hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1574ff932e5ed7ac4764df4b3be08fbe2f0fb201
| 183
|
py
|
Python
|
signalwire/relay/calling/results/dial_result.py
|
ramarketing/signalwire-python
|
c0663bdd0454faaa39f42af7c936cea1d43e1842
|
[
"MIT"
] | 23
|
2018-12-19T14:48:18.000Z
|
2022-01-11T03:58:36.000Z
|
signalwire/relay/calling/results/dial_result.py
|
ramarketing/signalwire-python
|
c0663bdd0454faaa39f42af7c936cea1d43e1842
|
[
"MIT"
] | 13
|
2018-10-17T12:57:54.000Z
|
2021-09-01T21:46:01.000Z
|
signalwire/relay/calling/results/dial_result.py
|
ramarketing/signalwire-python
|
c0663bdd0454faaa39f42af7c936cea1d43e1842
|
[
"MIT"
] | 12
|
2020-01-21T14:29:43.000Z
|
2022-01-11T07:48:06.000Z
|
from . import BaseResult
class DialResult(BaseResult):
def __init__(self, component):
super().__init__(component)
@property
def call(self):
return self.component.call
| 18.3
| 32
| 0.726776
| 21
| 183
| 5.952381
| 0.619048
| 0.208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169399
| 183
| 9
| 33
| 20.333333
| 0.822368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0.142857
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
ecc512fb53d557df78232b6d617a16ceab1491a6
| 25,721
|
py
|
Python
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 3
|
2019-04-01T11:03:04.000Z
|
2019-12-31T02:17:15.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-04-15T18:46:45.000Z
|
2021-04-15T18:46:45.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-09-23T13:43:07.000Z
|
2021-09-23T13:43:07.000Z
|
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.core.framework.attr_value_pb2 import AttrValue
from tensorflow.core.framework.attr_value_pb2 import NameAttrList
from tensorflow.core.framework.graph_pb2 import GraphDef
from tensorflow.core.framework.node_def_pb2 import NodeDef
from tensorflow.core.framework.summary_pb2 import HistogramProto
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.framework.summary_pb2 import SummaryMetadata
from tensorflow.core.protobuf.config_pb2 import ConfigProto
from tensorflow.core.protobuf.config_pb2 import GPUOptions
from tensorflow.core.protobuf.config_pb2 import GraphOptions
from tensorflow.core.protobuf.config_pb2 import OptimizerOptions
from tensorflow.core.protobuf.config_pb2 import RunMetadata
from tensorflow.core.protobuf.config_pb2 import RunOptions
from tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef
from tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo
from tensorflow.core.util.event_pb2 import Event
from tensorflow.core.util.event_pb2 import LogMessage
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python import AggregationMethod
from tensorflow.python import Assert
from tensorflow.python import ConditionalAccumulator
from tensorflow.python import ConditionalAccumulatorBase
from tensorflow.python import DType
from tensorflow.python import DeviceSpec
from tensorflow.python import Dimension
from tensorflow.python import FIFOQueue
from tensorflow.python import FixedLenFeature
from tensorflow.python import FixedLenSequenceFeature
from tensorflow.python import FixedLengthRecordReader
from tensorflow.python import GradientTape
from tensorflow.python import Graph
from tensorflow.python import GraphKeys
from tensorflow.python import IdentityReader
from tensorflow.python import IndexedSlices
from tensorflow.python import InteractiveSession
from tensorflow.python import LMDBReader
from tensorflow.python import NoGradient
from tensorflow.python import NoGradient as NotDifferentiable
from tensorflow.python import OpError
from tensorflow.python import Operation
from tensorflow.python import PaddingFIFOQueue
from tensorflow.python import Print
from tensorflow.python import PriorityQueue
from tensorflow.python import QueueBase
from tensorflow.python import RandomShuffleQueue
from tensorflow.python import ReaderBase
from tensorflow.python import RegisterGradient
from tensorflow.python import Session
from tensorflow.python import SparseConditionalAccumulator
from tensorflow.python import SparseFeature
from tensorflow.python import SparseTensor
from tensorflow.python import SparseTensorValue
from tensorflow.python import TFRecordReader
from tensorflow.python import Tensor
from tensorflow.python import TensorArray
from tensorflow.python import TensorShape
from tensorflow.python import TextLineReader
from tensorflow.python import VarLenFeature
from tensorflow.python import Variable
from tensorflow.python import VariableScope
from tensorflow.python import WholeFileReader
from tensorflow.python import abs
from tensorflow.python import accumulate_n
from tensorflow.python import acos
from tensorflow.python import acosh
from tensorflow.python import add
from tensorflow.python import add_check_numerics_ops
from tensorflow.python import add_n
from tensorflow.python import add_to_collection
from tensorflow.python import add_to_collections
from tensorflow.python import all_variables
from tensorflow.python import angle
from tensorflow.python import arg_max
from tensorflow.python import arg_min
from tensorflow.python import argmax
from tensorflow.python import argmin
from tensorflow.python import as_dtype
from tensorflow.python import as_string
from tensorflow.python import asin
from tensorflow.python import asinh
from tensorflow.python import assert_equal
from tensorflow.python import assert_greater
from tensorflow.python import assert_greater_equal
from tensorflow.python import assert_integer
from tensorflow.python import assert_less
from tensorflow.python import assert_less_equal
from tensorflow.python import assert_near
from tensorflow.python import assert_negative
from tensorflow.python import assert_non_negative
from tensorflow.python import assert_non_positive
from tensorflow.python import assert_none_equal
from tensorflow.python import assert_positive
from tensorflow.python import assert_proper_iterable
from tensorflow.python import assert_rank
from tensorflow.python import assert_rank_at_least
from tensorflow.python import assert_rank_in
from tensorflow.python import assert_same_float_dtype
from tensorflow.python import assert_scalar
from tensorflow.python import assert_type
from tensorflow.python import assert_variables_initialized
from tensorflow.python import assign
from tensorflow.python import assign_add
from tensorflow.python import assign_sub
from tensorflow.python import atan
from tensorflow.python import atan2
from tensorflow.python import atanh
from tensorflow.python import batch_to_space
from tensorflow.python import batch_to_space_nd
from tensorflow.python import betainc
from tensorflow.python import bincount
from tensorflow.python import bitcast
from tensorflow.python import boolean_mask
from tensorflow.python import broadcast_dynamic_shape
from tensorflow.python import broadcast_static_shape
from tensorflow.python import case
from tensorflow.python import cast
from tensorflow.python import ceil
from tensorflow.python import check_numerics
from tensorflow.python import cholesky
from tensorflow.python import cholesky_solve
from tensorflow.python import clip_by_average_norm
from tensorflow.python import clip_by_global_norm
from tensorflow.python import clip_by_norm
from tensorflow.python import colocate_with
from tensorflow.python import complex
from tensorflow.python import concat
from tensorflow.python import cond
from tensorflow.python import confusion_matrix
from tensorflow.python import conj
from tensorflow.python import constant
from tensorflow.python import constant_initializer
from tensorflow.python import container
from tensorflow.python import control_dependencies
from tensorflow.python import convert_to_tensor
from tensorflow.python import convert_to_tensor_or_indexed_slices
from tensorflow.python import convert_to_tensor_or_sparse_tensor
from tensorflow.python import cos
from tensorflow.python import cosh
from tensorflow.python import count_nonzero
from tensorflow.python import count_up_to
from tensorflow.python import create_partitioned_variables
from tensorflow.python import cross
from tensorflow.python import cumprod
from tensorflow.python import cumsum
from tensorflow.python import custom_gradient
from tensorflow.python import decode_base64
from tensorflow.python import decode_csv
from tensorflow.python import decode_json_example
from tensorflow.python import decode_raw
from tensorflow.python import delete_session_tensor
from tensorflow.python import depth_to_space
from tensorflow.python import dequantize
from tensorflow.python import deserialize_many_sparse
from tensorflow.python import device
from tensorflow.python import diag
from tensorflow.python import diag_part
from tensorflow.python import digamma
from tensorflow.python import div
from tensorflow.python import divide
from tensorflow.python import dynamic_partition
from tensorflow.python import dynamic_stitch
from tensorflow.python import edit_distance
from tensorflow.python import einsum
from tensorflow.python import enable_eager_execution
from tensorflow.python import encode_base64
from tensorflow.python import equal
from tensorflow.python import erf
from tensorflow.python import erfc
from tensorflow.python import executing_eagerly
from tensorflow.python import exp
from tensorflow.python import expand_dims
from tensorflow.python import expm1
from tensorflow.python import extract_image_patches
from tensorflow.python import eye
from tensorflow.python import fake_quant_with_min_max_args
from tensorflow.python import fake_quant_with_min_max_args_gradient
from tensorflow.python import fake_quant_with_min_max_vars
from tensorflow.python import fake_quant_with_min_max_vars_gradient
from tensorflow.python import fake_quant_with_min_max_vars_per_channel
from tensorflow.python import fake_quant_with_min_max_vars_per_channel_gradient
from tensorflow.python import fft
from tensorflow.python import fft2d
from tensorflow.python import fft3d
from tensorflow.python import fill
from tensorflow.python import fixed_size_partitioner
from tensorflow.python import floor
from tensorflow.python import floor_div
from tensorflow.python import floordiv
from tensorflow.python import floormod
from tensorflow.python import floormod as mod
from tensorflow.python import foldl
from tensorflow.python import foldr
from tensorflow.python import gather
from tensorflow.python import gather_nd
from tensorflow.python import get_collection
from tensorflow.python import get_collection_ref
from tensorflow.python import get_default_graph
from tensorflow.python import get_default_session
from tensorflow.python import get_local_variable
from tensorflow.python import get_seed
from tensorflow.python import get_session_handle
from tensorflow.python import get_session_tensor
from tensorflow.python import get_variable
from tensorflow.python import get_variable_scope
from tensorflow.python import global_norm
from tensorflow.python import global_variables
from tensorflow.python import global_variables_initializer
from tensorflow.python import glorot_normal_initializer
from tensorflow.python import glorot_uniform_initializer
from tensorflow.python import gradients
from tensorflow.python import greater
from tensorflow.python import greater_equal
from tensorflow.python import group
from tensorflow.python import guarantee_const
from tensorflow.python import hessians
from tensorflow.python import histogram_fixed_width
from tensorflow.python import histogram_fixed_width_bins
from tensorflow.python import identity
from tensorflow.python import identity_n
from tensorflow.python import ifft
from tensorflow.python import ifft2d
from tensorflow.python import ifft3d
from tensorflow.python import igamma
from tensorflow.python import igammac
from tensorflow.python import imag
from tensorflow.python import import_graph_def
from tensorflow.python import initialize_all_tables
from tensorflow.python import initialize_all_variables
from tensorflow.python import initialize_local_variables
from tensorflow.python import initialize_variables
from tensorflow.python import invert_permutation
from tensorflow.python import is_finite
from tensorflow.python import is_inf
from tensorflow.python import is_nan
from tensorflow.python import is_non_decreasing
from tensorflow.python import is_numeric_tensor
from tensorflow.python import is_strictly_increasing
from tensorflow.python import is_variable_initialized
from tensorflow.python import lbeta
from tensorflow.python import less
from tensorflow.python import less_equal
from tensorflow.python import lgamma
from tensorflow.python import lin_space
from tensorflow.python import lin_space as linspace
from tensorflow.python import load_file_system_library
from tensorflow.python import load_op_library
from tensorflow.python import local_variables
from tensorflow.python import local_variables_initializer
from tensorflow.python import log
from tensorflow.python import log1p
from tensorflow.python import log_sigmoid
from tensorflow.python import logical_and
from tensorflow.python import logical_not
from tensorflow.python import logical_or
from tensorflow.python import logical_xor
from tensorflow.python import make_ndarray
from tensorflow.python import make_template
from tensorflow.python import make_tensor_proto
from tensorflow.python import map_fn
from tensorflow.python import matching_files
from tensorflow.python import matmul
from tensorflow.python import matrix_band_part
from tensorflow.python import matrix_determinant
from tensorflow.python import matrix_diag
from tensorflow.python import matrix_diag_part
from tensorflow.python import matrix_inverse
from tensorflow.python import matrix_set_diag
from tensorflow.python import matrix_solve
from tensorflow.python import matrix_solve_ls
from tensorflow.python import matrix_transpose
from tensorflow.python import matrix_triangular_solve
from tensorflow.python import maximum
from tensorflow.python import meshgrid
from tensorflow.python import min_max_variable_partitioner
from tensorflow.python import minimum
from tensorflow.python import model_variables
from tensorflow.python import moving_average_variables
from tensorflow.python import multinomial
from tensorflow.python import multiply
from tensorflow.python import name_scope
from tensorflow.python import negative
from tensorflow.python import no_op
from tensorflow.python import no_regularizer
from tensorflow.python import norm
from tensorflow.python import not_equal
from tensorflow.python import one_hot
from tensorflow.python import ones
from tensorflow.python import ones_initializer
from tensorflow.python import ones_like
from tensorflow.python import op_scope
from tensorflow.python import orthogonal_initializer
from tensorflow.python import pad
from tensorflow.python import parallel_stack
from tensorflow.python import parse_example
from tensorflow.python import parse_single_example
from tensorflow.python import parse_single_sequence_example
from tensorflow.python import parse_tensor
from tensorflow.python import placeholder
from tensorflow.python import placeholder_with_default
from tensorflow.python import polygamma
from tensorflow.python import pow
from tensorflow.python import py_func
from tensorflow.python import qr
from tensorflow.python import quantize
from tensorflow.python import quantize_v2
from tensorflow.python import quantized_concat
from tensorflow.python import random_crop
from tensorflow.python import random_gamma
from tensorflow.python import random_normal
from tensorflow.python import random_normal_initializer
from tensorflow.python import random_poisson
from tensorflow.python import random_shuffle
from tensorflow.python import random_uniform
from tensorflow.python import random_uniform_initializer
from tensorflow.python import range
from tensorflow.python import rank
from tensorflow.python import read_file
from tensorflow.python import real
from tensorflow.python import realdiv
from tensorflow.python import reciprocal
from tensorflow.python import reduce_all
from tensorflow.python import reduce_any
from tensorflow.python import reduce_join
from tensorflow.python import reduce_logsumexp
from tensorflow.python import reduce_max
from tensorflow.python import reduce_mean
from tensorflow.python import reduce_min
from tensorflow.python import reduce_prod
from tensorflow.python import reduce_sum
from tensorflow.python import regex_replace
from tensorflow.python import register_tensor_conversion_function
from tensorflow.python import report_uninitialized_variables
from tensorflow.python import required_space_to_batch_paddings
from tensorflow.python import reset_default_graph
from tensorflow.python import reshape
from tensorflow.python import reverse
from tensorflow.python import reverse_sequence
from tensorflow.python import reverse_v2
from tensorflow.python import rint
from tensorflow.python import round
from tensorflow.python import rsqrt
from tensorflow.python import saturate_cast
from tensorflow.python import scalar_mul
from tensorflow.python import scan
from tensorflow.python import scatter_add
from tensorflow.python import scatter_div
from tensorflow.python import scatter_max
from tensorflow.python import scatter_min
from tensorflow.python import scatter_mul
from tensorflow.python import scatter_nd
from tensorflow.python import scatter_nd_add
from tensorflow.python import scatter_nd_sub
from tensorflow.python import scatter_nd_update
from tensorflow.python import scatter_sub
from tensorflow.python import scatter_update
from tensorflow.python import segment_max
from tensorflow.python import segment_mean
from tensorflow.python import segment_min
from tensorflow.python import segment_prod
from tensorflow.python import segment_sum
from tensorflow.python import self_adjoint_eig
from tensorflow.python import self_adjoint_eigvals
from tensorflow.python import sequence_mask
from tensorflow.python import serialize_many_sparse
from tensorflow.python import serialize_sparse
from tensorflow.python import serialize_tensor
from tensorflow.python import set_random_seed
from tensorflow.python import setdiff1d
from tensorflow.python import shape
from tensorflow.python import shape_n
from tensorflow.python import sigmoid
from tensorflow.python import sign
from tensorflow.python import sin
from tensorflow.python import sinh
from tensorflow.python import size
from tensorflow.python import slice
from tensorflow.python import space_to_batch
from tensorflow.python import space_to_batch_nd
from tensorflow.python import space_to_depth
from tensorflow.python import sparse_add
from tensorflow.python import sparse_concat
from tensorflow.python import sparse_fill_empty_rows
from tensorflow.python import sparse_mask
from tensorflow.python import sparse_matmul
from tensorflow.python import sparse_maximum
from tensorflow.python import sparse_merge
from tensorflow.python import sparse_minimum
from tensorflow.python import sparse_placeholder
from tensorflow.python import sparse_reduce_max
from tensorflow.python import sparse_reduce_max_sparse
from tensorflow.python import sparse_reduce_sum
from tensorflow.python import sparse_reduce_sum_sparse
from tensorflow.python import sparse_reorder
from tensorflow.python import sparse_reset_shape
from tensorflow.python import sparse_reshape
from tensorflow.python import sparse_retain
from tensorflow.python import sparse_segment_mean
from tensorflow.python import sparse_segment_sqrt_n
from tensorflow.python import sparse_segment_sum
from tensorflow.python import sparse_slice
from tensorflow.python import sparse_softmax
from tensorflow.python import sparse_split
from tensorflow.python import sparse_tensor_dense_matmul
from tensorflow.python import sparse_tensor_to_dense
from tensorflow.python import sparse_to_dense
from tensorflow.python import sparse_to_indicator
from tensorflow.python import sparse_transpose
from tensorflow.python import split
from tensorflow.python import sqrt
from tensorflow.python import square
from tensorflow.python import squared_difference
from tensorflow.python import squeeze
from tensorflow.python import stack
from tensorflow.python import stop_gradient
from tensorflow.python import strided_slice
from tensorflow.python import string_join
from tensorflow.python import string_split
from tensorflow.python import string_to_hash_bucket
from tensorflow.python import string_to_hash_bucket_fast
from tensorflow.python import string_to_hash_bucket_strong
from tensorflow.python import string_to_number
from tensorflow.python import substr
from tensorflow.python import subtract
from tensorflow.python import svd
from tensorflow.python import tables_initializer
from tensorflow.python import tan
from tensorflow.python import tanh
from tensorflow.python import tensordot
from tensorflow.python import tile
from tensorflow.python import timestamp
from tensorflow.python import to_bfloat16
from tensorflow.python import to_complex128
from tensorflow.python import to_complex64
from tensorflow.python import to_double
from tensorflow.python import to_float
from tensorflow.python import to_int32
from tensorflow.python import to_int64
from tensorflow.python import trace
from tensorflow.python import trainable_variables
from tensorflow.python import transpose
from tensorflow.python import truediv
from tensorflow.python import truncated_normal
from tensorflow.python import truncated_normal_initializer
from tensorflow.python import truncatediv
from tensorflow.python import truncatemod
from tensorflow.python import tuple
from tensorflow.python import uniform_unit_scaling_initializer
from tensorflow.python import unique
from tensorflow.python import unique_with_counts
from tensorflow.python import unravel_index
from tensorflow.python import unsorted_segment_max
from tensorflow.python import unsorted_segment_mean
from tensorflow.python import unsorted_segment_min
from tensorflow.python import unsorted_segment_prod
from tensorflow.python import unsorted_segment_sqrt_n
from tensorflow.python import unsorted_segment_sum
from tensorflow.python import unstack
from tensorflow.python import variable_axis_size_partitioner
from tensorflow.python import variable_op_scope
from tensorflow.python import variable_scope
from tensorflow.python import variables_initializer
from tensorflow.python import variance_scaling_initializer
from tensorflow.python import verify_tensor_all_finite
from tensorflow.python import where
from tensorflow.python import while_loop
from tensorflow.python import write_file
from tensorflow.python import zeros
from tensorflow.python import zeros_initializer
from tensorflow.python import zeros_like
from tensorflow.python import zeta
from tensorflow.python.framework.dtypes import QUANTIZED_DTYPES
from tensorflow.python.framework.dtypes import bfloat16
from tensorflow.python.framework.dtypes import bool
from tensorflow.python.framework.dtypes import complex128
from tensorflow.python.framework.dtypes import complex64
from tensorflow.python.framework.dtypes import double
from tensorflow.python.framework.dtypes import float16
from tensorflow.python.framework.dtypes import float32
from tensorflow.python.framework.dtypes import float64
from tensorflow.python.framework.dtypes import half
from tensorflow.python.framework.dtypes import int16
from tensorflow.python.framework.dtypes import int32
from tensorflow.python.framework.dtypes import int64
from tensorflow.python.framework.dtypes import int8
from tensorflow.python.framework.dtypes import qint16
from tensorflow.python.framework.dtypes import qint32
from tensorflow.python.framework.dtypes import qint8
from tensorflow.python.framework.dtypes import quint16
from tensorflow.python.framework.dtypes import quint8
from tensorflow.python.framework.dtypes import resource
from tensorflow.python.framework.dtypes import string
from tensorflow.python.framework.dtypes import uint16
from tensorflow.python.framework.dtypes import uint32
from tensorflow.python.framework.dtypes import uint64
from tensorflow.python.framework.dtypes import uint8
from tensorflow.python.framework.dtypes import variant
from tensorflow.python.framework.versions import COMPILER_VERSION
from tensorflow.python.framework.versions import COMPILER_VERSION as __compiler_version__
from tensorflow.python.framework.versions import CXX11_ABI_FLAG
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as __cxx11_abi_flag__
from tensorflow.python.framework.versions import GIT_VERSION
from tensorflow.python.framework.versions import GIT_VERSION as __git_version__
from tensorflow.python.framework.versions import GRAPH_DEF_VERSION
from tensorflow.python.framework.versions import GRAPH_DEF_VERSION_MIN_CONSUMER
from tensorflow.python.framework.versions import GRAPH_DEF_VERSION_MIN_PRODUCER
from tensorflow.python.framework.versions import MONOLITHIC_BUILD
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as __monolithic_build__
from tensorflow.python.framework.versions import VERSION
from tensorflow.python.framework.versions import VERSION as __version__
from tensorflow.python.ops.array_ops import newaxis
from tensorflow.python.ops.clip_ops import clip_by_value
from tensorflow.python.ops.variable_scope import AUTO_REUSE
from tensorflow.tools.api.generator.api import app
from tensorflow.tools.api.generator.api import bitwise
from tensorflow.tools.api.generator.api import compat
from tensorflow.tools.api.generator.api import contrib
from tensorflow.tools.api.generator.api import data
from tensorflow.tools.api.generator.api import distributions
from tensorflow.tools.api.generator.api import errors
from tensorflow.tools.api.generator.api import estimator
from tensorflow.tools.api.generator.api import feature_column
from tensorflow.tools.api.generator.api import gfile
from tensorflow.tools.api.generator.api import graph_util
from tensorflow.tools.api.generator.api import image
from tensorflow.tools.api.generator.api import initializers
from tensorflow.tools.api.generator.api import keras
from tensorflow.tools.api.generator.api import layers
from tensorflow.tools.api.generator.api import linalg
from tensorflow.tools.api.generator.api import logging
from tensorflow.tools.api.generator.api import losses
from tensorflow.tools.api.generator.api import manip
from tensorflow.tools.api.generator.api import math
from tensorflow.tools.api.generator.api import metrics
from tensorflow.tools.api.generator.api import nn
from tensorflow.tools.api.generator.api import profiler
from tensorflow.tools.api.generator.api import python_io
from tensorflow.tools.api.generator.api import resource_loader
from tensorflow.tools.api.generator.api import saved_model
from tensorflow.tools.api.generator.api import sets
from tensorflow.tools.api.generator.api import spectral
from tensorflow.tools.api.generator.api import summary
from tensorflow.tools.api.generator.api import sysconfig
from tensorflow.tools.api.generator.api import test
from tensorflow.tools.api.generator.api import train
from tensorflow.tools.api.generator.api import user_ops
_names_with_underscore = ['__version__', '__git_version__', '__compiler_version__', '__cxx11_abi_flag__', '__monolithic_build__']
__all__ = [s for s in dir() if not s.startswith('_')]
__all__.extend([s for s in _names_with_underscore])
| 46.427798
| 129
| 0.881498
| 3,534
| 25,721
| 6.251273
| 0.147991
| 0.345374
| 0.447221
| 0.531957
| 0.694007
| 0.415173
| 0.188213
| 0.069211
| 0.030871
| 0.023402
| 0
| 0.003662
| 0.086933
| 25,721
| 553
| 130
| 46.511754
| 0.937024
| 0.00556
| 0
| 0
| 1
| 0
| 0.003324
| 0
| 0
| 0
| 0
| 0
| 0.038321
| 1
| 0
| false
| 0
| 0.994526
| 0
| 0.994526
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
eccf618df223cb69aa328dddc3b78dcca7aca4f0
| 1,140
|
py
|
Python
|
desicos/conecylDB/__init__.py
|
saullocastro/desicos
|
922db8ac4fb0fb4d09df18ce2a14011f207f6fa8
|
[
"BSD-3-Clause"
] | 1
|
2020-10-22T22:15:24.000Z
|
2020-10-22T22:15:24.000Z
|
desicos/conecylDB/__init__.py
|
saullocastro/desicos
|
922db8ac4fb0fb4d09df18ce2a14011f207f6fa8
|
[
"BSD-3-Clause"
] | 1
|
2020-10-09T12:42:02.000Z
|
2020-10-09T12:42:02.000Z
|
desicos/conecylDB/__init__.py
|
saullocastro/desicos
|
922db8ac4fb0fb4d09df18ce2a14011f207f6fa8
|
[
"BSD-3-Clause"
] | 2
|
2020-07-14T07:45:31.000Z
|
2020-12-29T00:22:41.000Z
|
r"""
===================================================
Cone / Cylinder DataBase (:mod:`desicos.conecylDB`)
===================================================
.. currentmodule:: desicos.conecylDB
The ``desicos.conecylDB`` module includes all the information about cones
and cylinders required to reproduce structures that were investigated
by many publications and in the context of DESICOS.
It also includes the tools necessary to work with the Imperfection
DataBase. Unfortunately, the files composing this database cannot be made
available with the repository, but all the tools required to post process
an imperfection file had been made available.
.. automodule:: desicos.conecylDB.conecylDB
:members:
.. automodule:: desicos.conecylDB.ccs
:members:
.. automodule:: desicos.conecylDB.laminaprops
:members:
.. automodule:: desicos.conecylDB.allowables
:members:
.. automodule:: desicos.conecylDB.fit_data
:members:
.. automodule:: desicos.conecylDB.interpolate
:members:
.. automodule:: desicos.conecylDB.read_write
:members:
"""
from __future__ import absolute_import
from .conecylDB import *
| 27.804878
| 74
| 0.698246
| 123
| 1,140
| 6.414634
| 0.544715
| 0.202788
| 0.230672
| 0.250951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135965
| 1,140
| 40
| 75
| 28.5
| 0.801015
| 0.934211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
01ed4d27492826991aaf5773395a462b2614c1b7
| 158
|
py
|
Python
|
players/admin.py
|
rymcimcim/django-foosball
|
6df4d05a81d13827d9734519fdf6a203e5c9a78e
|
[
"MIT"
] | 1
|
2016-10-20T12:29:03.000Z
|
2016-10-20T12:29:03.000Z
|
players/admin.py
|
rymcimcim/django-foosball
|
6df4d05a81d13827d9734519fdf6a203e5c9a78e
|
[
"MIT"
] | null | null | null |
players/admin.py
|
rymcimcim/django-foosball
|
6df4d05a81d13827d9734519fdf6a203e5c9a78e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from players.models import Player
class PlayerAdmin(admin.ModelAdmin):
pass
admin.site.register(Player, PlayerAdmin)
| 15.8
| 40
| 0.797468
| 20
| 158
| 6.3
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132911
| 158
| 9
| 41
| 17.555556
| 0.919708
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
1720f0bd32cce8fd3476851d53b391a8cbb5bca4
| 70
|
py
|
Python
|
blitzdb/fields/integer.py
|
marcinguy/blitzdb3
|
8b8bca02b205d7ff33d3902e5abb166e10a7b624
|
[
"MIT"
] | 252
|
2015-01-02T13:05:12.000Z
|
2021-12-29T13:36:47.000Z
|
blitzdb/fields/integer.py
|
epatters/blitzdb
|
4b459e0bcde9e1f6224dd4e3bea74194586864b0
|
[
"MIT"
] | 33
|
2015-01-09T20:05:10.000Z
|
2019-11-08T15:48:34.000Z
|
blitzdb/fields/integer.py
|
epatters/blitzdb
|
4b459e0bcde9e1f6224dd4e3bea74194586864b0
|
[
"MIT"
] | 39
|
2015-01-20T01:15:04.000Z
|
2022-03-26T01:01:15.000Z
|
from .base import BaseField
class IntegerField(BaseField):
pass
| 11.666667
| 30
| 0.757143
| 8
| 70
| 6.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185714
| 70
| 5
| 31
| 14
| 0.929825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
17473a106f867e736a3ebc582dbe38dae38efe6a
| 30
|
py
|
Python
|
server/data_access/__init__.py
|
jdayton3/Geney
|
a1278d3945b1ee6d7303506d7297035b57897523
|
[
"Apache-2.0"
] | 2
|
2018-01-02T03:40:46.000Z
|
2018-03-16T05:56:47.000Z
|
server/data_access/__init__.py
|
jdayton3/Geney
|
a1278d3945b1ee6d7303506d7297035b57897523
|
[
"Apache-2.0"
] | 19
|
2017-12-05T20:27:35.000Z
|
2019-07-01T19:51:57.000Z
|
server/data_access/__init__.py
|
jdayton3/Geney
|
a1278d3945b1ee6d7303506d7297035b57897523
|
[
"Apache-2.0"
] | 4
|
2018-01-03T04:01:40.000Z
|
2019-07-19T17:04:43.000Z
|
from .GeneyJob import GeneyJob
| 30
| 30
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 30
| 1
| 30
| 30
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
177cf6581e4bb33c35d6e92bf67858578612256d
| 99
|
py
|
Python
|
histoprep/preprocess/__init__.py
|
jopo666/HistoPrep
|
1b74c346b38c7ca44f92269246571f5f850836af
|
[
"MIT"
] | 11
|
2021-04-21T10:37:22.000Z
|
2021-12-19T22:32:59.000Z
|
histoprep/preprocess/__init__.py
|
jopo666/HistoPrep
|
1b74c346b38c7ca44f92269246571f5f850836af
|
[
"MIT"
] | 1
|
2021-02-24T09:15:13.000Z
|
2021-04-19T06:38:58.000Z
|
histoprep/preprocess/__init__.py
|
jopo666/HistoPrep
|
1b74c346b38c7ca44f92269246571f5f850836af
|
[
"MIT"
] | 1
|
2021-09-16T05:00:21.000Z
|
2021-09-16T05:00:21.000Z
|
from ._metadata import *
from ._visualise import *
from ._writer import *
from . import functional
| 19.8
| 25
| 0.767677
| 12
| 99
| 6.083333
| 0.5
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161616
| 99
| 4
| 26
| 24.75
| 0.879518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
179138d3c27fe05d8d5cf8f6441ea698ed182489
| 42
|
py
|
Python
|
demos/python_demos/common/pipelines/__init__.py
|
ivanvikhrev/open_model_zoo
|
322e7ac5ed8a17611b56c46e5e56bfef05d8cc2a
|
[
"Apache-2.0"
] | 4
|
2019-09-17T13:11:02.000Z
|
2021-02-22T15:39:15.000Z
|
demos/python_demos/common/pipelines/__init__.py
|
ivanvikhrev/open_model_zoo
|
322e7ac5ed8a17611b56c46e5e56bfef05d8cc2a
|
[
"Apache-2.0"
] | null | null | null |
demos/python_demos/common/pipelines/__init__.py
|
ivanvikhrev/open_model_zoo
|
322e7ac5ed8a17611b56c46e5e56bfef05d8cc2a
|
[
"Apache-2.0"
] | 1
|
2021-02-24T00:40:03.000Z
|
2021-02-24T00:40:03.000Z
|
from .async_pipeline import AsyncPipeline
| 21
| 41
| 0.880952
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 42
| 1
| 42
| 42
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
179fb0ac02a761881f32ab849809174b33d74568
| 155
|
py
|
Python
|
main.py
|
ashish-khulbey/Test-Repo
|
2f1e998bdffca65f400046bdfb3fd354c959f784
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ashish-khulbey/Test-Repo
|
2f1e998bdffca65f400046bdfb3fd354c959f784
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
ashish-khulbey/Test-Repo
|
2f1e998bdffca65f400046bdfb3fd354c959f784
|
[
"Apache-2.0"
] | null | null | null |
<<<<<<< HEAD
print("Hello, from local repo!")
=======
print("Hello, World!")
print("Hello, from Github!")
>>>>>>> afa8fcd69f454d2ea375b62996d0354e2ad6cb1e
| 22.142857
| 48
| 0.658065
| 14
| 155
| 7.285714
| 0.642857
| 0.294118
| 0.27451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156028
| 0.090323
| 155
| 6
| 49
| 25.833333
| 0.567376
| 0
| 0
| 0
| 0
| 0
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
bdea9e6b97593d90098f8d8b3ebe1705ac2d52bf
| 1,790
|
py
|
Python
|
sesion_03/songs/migrations/0001_initial.py
|
bernest/modulo-django-desarrollo-web-cdmx-20-05pt
|
33f971f032f7d3902a49a993d46e3ecefb21d59b
|
[
"MIT"
] | null | null | null |
sesion_03/songs/migrations/0001_initial.py
|
bernest/modulo-django-desarrollo-web-cdmx-20-05pt
|
33f971f032f7d3902a49a993d46e3ecefb21d59b
|
[
"MIT"
] | null | null | null |
sesion_03/songs/migrations/0001_initial.py
|
bernest/modulo-django-desarrollo-web-cdmx-20-05pt
|
33f971f032f7d3902a49a993d46e3ecefb21d59b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2020-10-27 02:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('cover', models.ImageField(upload_to='covers')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.album')),
('artist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='songs.artist')),
],
),
]
| 38.913043
| 114
| 0.576536
| 183
| 1,790
| 5.47541
| 0.311475
| 0.047904
| 0.125749
| 0.149701
| 0.741517
| 0.741517
| 0.741517
| 0.741517
| 0.741517
| 0.741517
| 0
| 0.018692
| 0.282682
| 1,790
| 45
| 115
| 39.777778
| 0.761682
| 0.02514
| 0
| 0.631579
| 1
| 0
| 0.082616
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
da622549f118b2499293419b2f7a2e3b38746885
| 31
|
py
|
Python
|
models/__init__.py
|
jmojoo/MLMO
|
6afe03b8a9d5bbbd88641a851652a23c93a77c9f
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
jmojoo/MLMO
|
6afe03b8a9d5bbbd88641a851652a23c93a77c9f
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
jmojoo/MLMO
|
6afe03b8a9d5bbbd88641a851652a23c93a77c9f
|
[
"MIT"
] | null | null | null |
from .extractors import AlexNet
| 31
| 31
| 0.870968
| 4
| 31
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da622f99b2d993c21b77db9d7d489ea443ffe7a9
| 21
|
py
|
Python
|
astroquery/ned/__init__.py
|
cdeil/astroquery
|
1bbb9f8ea0347abb1081dba52a66772a03408f11
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/ned/__init__.py
|
cdeil/astroquery
|
1bbb9f8ea0347abb1081dba52a66772a03408f11
|
[
"BSD-3-Clause"
] | null | null | null |
astroquery/ned/__init__.py
|
cdeil/astroquery
|
1bbb9f8ea0347abb1081dba52a66772a03408f11
|
[
"BSD-3-Clause"
] | null | null | null |
from .nedpy import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
da724647bf52b2ccccc8a9cb64b82732b9781a67
| 6,187
|
py
|
Python
|
custom_auth/tests.py
|
qbrc-cnap/cnap
|
624683e91a64c3b4934b578c59db850242d2f94c
|
[
"MIT"
] | 1
|
2021-07-08T14:06:04.000Z
|
2021-07-08T14:06:04.000Z
|
custom_auth/tests.py
|
qbrc-cnap/cnap
|
624683e91a64c3b4934b578c59db850242d2f94c
|
[
"MIT"
] | 12
|
2020-02-12T00:10:53.000Z
|
2021-06-10T21:24:45.000Z
|
custom_auth/tests.py
|
qbrc-cnap/cnap
|
624683e91a64c3b4934b578c59db850242d2f94c
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from rest_framework.test import APIClient
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.conf import settings
def create_data(testcase_obj):
# create two users-- one is admin, other is regular
testcase_obj.regular_user = get_user_model().objects.create_user(email=settings.REGULAR_TEST_EMAIL, password='abcd123!')
testcase_obj.admin_user = get_user_model().objects.create_user(email=settings.ADMIN_TEST_EMAIL, password='abcd123!', is_staff=True)
testcase_obj.other_user = get_user_model().objects.create_user(email=settings.OTHER_TEST_EMAIL, password='abcd123!')
'''
Tests for listing Users:
- admin users can list all users
- admin users can list info about specific user
- non-admin users do not have access to anything (returns 403)
'''
class UserListTestCase(TestCase):
def setUp(self):
create_data(self)
def test_list_all_users_by_admin(self):
# establish the admin client:
client = APIClient()
client.login(email=settings.ADMIN_TEST_EMAIL, password='abcd123!')
url = reverse('user-list')
response = client.get(url)
data = response.data
self.assertEqual(response.status_code,200)
self.assertEqual(len(data), 3)
def test_list_specific_user_by_admin(self):
# establish the admin client:
client = APIClient()
client.login(email=settings.ADMIN_TEST_EMAIL, password='abcd123!')
# get the regular user's pk:
u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)[0]
reguser_pk = u.pk
url = reverse('user-detail', args=[reguser_pk])
response = client.get(url)
data = response.data
self.assertEqual(response.status_code,200)
self.assertEqual(data['email'], settings.REGULAR_TEST_EMAIL)
def test_regular_user_cannot_list_users(self):
# establish a "regular" client:
client = APIClient()
client.login(email=settings.REGULAR_TEST_EMAIL, password='abcd123!')
# check that querying all users is blocked:
url = reverse('user-list')
response = client.get(url)
data = response.data
self.assertEqual(response.status_code,403)
# Now check that they cannot check even their own data:
# get the regular user's pk:
u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)[0]
reguser_pk = u.pk
url = reverse('user-detail', args=[reguser_pk])
response = client.get(url)
data = response.data
self.assertEqual(response.status_code,403)
'''
Tests for creating Users:
- admin user can create a user with a specific username/email/pwd
- non-admin users do not have access to anything (returns 403)
'''
class UserCreateTestCase(TestCase):
def setUp(self):
create_data(self)
def test_admin_can_create_user(self):
# establish the admin client:
client = APIClient()
client.login(email=settings.ADMIN_TEST_EMAIL, password='abcd123!')
# get the admin user's pk:
u = get_user_model().objects.filter(email=settings.ADMIN_TEST_EMAIL)[0]
adminuser_pk = u.pk
url = reverse('user-list')
data = {'email':settings.YET_ANOTHER_TEST_EMAIL, \
'password': 'abcd123!', \
}
response = client.post(url, data, format='json')
self.assertEqual(response.status_code, 201)
u = get_user_model().objects.filter(email=settings.YET_ANOTHER_TEST_EMAIL)
self.assertEqual(len(u), 1)
def test_no_duplicate_user(self):
# establish the admin client:
client = APIClient()
client.login(email=settings.ADMIN_TEST_EMAIL, password='abcd123!')
# get the admin user's pk:
u = get_user_model().objects.filter(email=settings.ADMIN_TEST_EMAIL)[0]
adminuser_pk = u.pk
url = reverse('user-list')
data = {'email':settings.OTHER_TEST_EMAIL, \
'password': 'abcd123!', \
}
response = client.post(url, data, format='json')
self.assertEqual(response.status_code, 400)
def test_regular_user_cannot_create_user(self):
# establish the admin client:
client = APIClient()
client.login(email=settings.REGULAR_TEST_EMAIL, password='abcd123!')
# get the admin user's pk:
u = get_user_model().objects.filter(email=settings.ADMIN_TEST_EMAIL)[0]
adminuser_pk = u.pk
url = reverse('user-list')
data = {'email':settings.YET_ANOTHER_TEST_EMAIL, \
'password': 'abcd123!', \
}
response = client.post(url, data, format='json')
self.assertEqual(response.status_code, 403)
'''
Tests for User deletion:
- deletion of User causes deletion of all associated entities
- non-admin cannot delete users
'''
class UserDeleteTestCase(TestCase):
def setUp(self):
create_data(self)
def test_admin_can_delete_user(self):
# establish the admin client:
client = APIClient()
client.login(email=settings.ADMIN_TEST_EMAIL, password='abcd123!')
u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)
self.assertEqual(len(u), 1)
# get the reg user's pk:
u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)[0]
reguser_pk = u.pk
url = reverse('user-detail', args=[reguser_pk,])
response = client.delete(url)
u = get_user_model().objects.filter(email=settings.REGULAR_TEST_EMAIL)
self.assertEqual(len(u), 0)
def test_reguser_cannot_delete_user(self):
# establish the admin client:
client = APIClient()
client.login(email=settings.REGULAR_TEST_EMAIL, password='abcd123!')
u = get_user_model().objects.filter(email=settings.OTHER_TEST_EMAIL)
self.assertEqual(len(u), 1)
otheruser_pk = u[0].pk
url = reverse('user-detail', args=[otheruser_pk,])
response = client.delete(url)
self.assertEqual(response.status_code, 403)
| 35.557471
| 135
| 0.663326
| 788
| 6,187
| 5.016497
| 0.143401
| 0.082216
| 0.042499
| 0.084999
| 0.807741
| 0.767265
| 0.749051
| 0.710347
| 0.695674
| 0.650392
| 0
| 0.017603
| 0.228705
| 6,187
| 173
| 136
| 35.763006
| 0.810771
| 0.084532
| 0
| 0.673077
| 0
| 0
| 0.049739
| 0
| 0
| 0
| 0
| 0
| 0.134615
| 1
| 0.115385
| false
| 0.134615
| 0.048077
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
e528a6317e03a90a0e3909067889b9df15608bff
| 118
|
py
|
Python
|
week1/1.12 tasks of the week/step02 interval.py
|
project-cemetery/stepik-programming-on-python
|
3ca4e6a74b9ca5deb50336737fe2e6d74722a95f
|
[
"MIT"
] | 7
|
2020-08-03T22:10:29.000Z
|
2022-02-23T16:08:44.000Z
|
week1/1.12 tasks of the week/step02 interval.py
|
Joni2701/stepik-programming-on-python
|
3ca4e6a74b9ca5deb50336737fe2e6d74722a95f
|
[
"MIT"
] | null | null | null |
week1/1.12 tasks of the week/step02 interval.py
|
Joni2701/stepik-programming-on-python
|
3ca4e6a74b9ca5deb50336737fe2e6d74722a95f
|
[
"MIT"
] | 9
|
2020-05-19T16:42:39.000Z
|
2022-02-24T22:41:21.000Z
|
def is_in_interval(n):
return (-15 < n <= 12) or (14 < n < 17) or (19 <= n)
print(is_in_interval(int(input())))
| 19.666667
| 56
| 0.576271
| 22
| 118
| 2.909091
| 0.681818
| 0.125
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107527
| 0.211864
| 118
| 5
| 57
| 23.6
| 0.580645
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
e5a9a9867a7b67ecb65337229a603aba10abfeaf
| 187
|
py
|
Python
|
tests/main_tests/main2/libraries/user/__init__.py
|
ruke47/squadron
|
311bd93f11502917ce4e479cb89bde6764c83f27
|
[
"MIT"
] | null | null | null |
tests/main_tests/main2/libraries/user/__init__.py
|
ruke47/squadron
|
311bd93f11502917ce4e479cb89bde6764c83f27
|
[
"MIT"
] | null | null | null |
tests/main_tests/main2/libraries/user/__init__.py
|
ruke47/squadron
|
311bd93f11502917ce4e479cb89bde6764c83f27
|
[
"MIT"
] | null | null | null |
import os
def schema():
return {
'title': 'User schema',
'type': 'string',
}
def verify(**kwargs):
return []
def apply(**kwargs):
return []
| 13.357143
| 35
| 0.475936
| 18
| 187
| 4.944444
| 0.666667
| 0.269663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.358289
| 187
| 13
| 36
| 14.384615
| 0.741667
| 0
| 0
| 0.2
| 0
| 0
| 0.139037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.3
| true
| 0
| 0.1
| 0.3
| 0.7
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
f91abf0584c41eee3c43995b13acae675cae9c4d
| 32
|
py
|
Python
|
pybrain/__init__.py
|
metabacchi/FuzzyClassificator
|
f59c10364b872edce342403db6ef26e30d7f69b8
|
[
"MIT"
] | 28
|
2017-03-28T07:47:41.000Z
|
2022-03-22T13:45:03.000Z
|
pybrain/__init__.py
|
metabacchi/FuzzyClassificator
|
f59c10364b872edce342403db6ef26e30d7f69b8
|
[
"MIT"
] | 23
|
2017-03-28T09:17:33.000Z
|
2020-03-17T08:46:02.000Z
|
pybrain/__init__.py
|
metabacchi/FuzzyClassificator
|
f59c10364b872edce342403db6ef26e30d7f69b8
|
[
"MIT"
] | 10
|
2018-12-03T15:20:15.000Z
|
2021-04-29T06:39:26.000Z
|
from pybrain.structure import *
| 16
| 31
| 0.8125
| 4
| 32
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f9295958301faa8702c77e2cdd7dfa0b65ab3fcb
| 171
|
py
|
Python
|
src/morpheus/__init__.py
|
eliavw/morpheus
|
01c6d15416188254cfdcc7fe9461069cbf8dbe63
|
[
"MIT"
] | null | null | null |
src/morpheus/__init__.py
|
eliavw/morpheus
|
01c6d15416188254cfdcc7fe9461069cbf8dbe63
|
[
"MIT"
] | null | null | null |
src/morpheus/__init__.py
|
eliavw/morpheus
|
01c6d15416188254cfdcc7fe9461069cbf8dbe63
|
[
"MIT"
] | null | null | null |
from .composition.ParallelComposition import ParallelComposition
from .composition.SequentialComposition import SequentialComposition
from .core.Morpheus import Morpheus
| 34.2
| 68
| 0.888889
| 15
| 171
| 10.133333
| 0.466667
| 0.197368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076023
| 171
| 4
| 69
| 42.75
| 0.962025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
006e887513b8d73d7f7d031b3053ea43b36204e5
| 83
|
py
|
Python
|
TorchProteinLibrary/FullAtomModel/PDB2Coords/__init__.py
|
dendisuhubdy/TorchProteinLibrary
|
89f0f6c311658b9313484cd92804682a251b1b97
|
[
"MIT"
] | null | null | null |
TorchProteinLibrary/FullAtomModel/PDB2Coords/__init__.py
|
dendisuhubdy/TorchProteinLibrary
|
89f0f6c311658b9313484cd92804682a251b1b97
|
[
"MIT"
] | null | null | null |
TorchProteinLibrary/FullAtomModel/PDB2Coords/__init__.py
|
dendisuhubdy/TorchProteinLibrary
|
89f0f6c311658b9313484cd92804682a251b1b97
|
[
"MIT"
] | null | null | null |
from .PDB2Coords import PDB2CoordsBiopython, PDB2CoordsOrdered, PDB2CoordsUnordered
| 83
| 83
| 0.903614
| 6
| 83
| 12.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 0.060241
| 83
| 1
| 83
| 83
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
00849d43ffb9410704093ba002078f834f37dcb6
| 8,052
|
py
|
Python
|
analysis/drift_velocity/plots.py
|
lconaboy/seren3
|
5a2ec80adf0d69664d2ee874f5ba12cc02d6c337
|
[
"CNRI-Python"
] | 1
|
2017-09-21T14:58:23.000Z
|
2017-09-21T14:58:23.000Z
|
analysis/drift_velocity/plots.py
|
lconaboy/seren3
|
5a2ec80adf0d69664d2ee874f5ba12cc02d6c337
|
[
"CNRI-Python"
] | 1
|
2020-09-09T08:52:43.000Z
|
2020-09-09T08:52:43.000Z
|
analysis/drift_velocity/plots.py
|
lconaboy/seren3
|
5a2ec80adf0d69664d2ee874f5ba12cc02d6c337
|
[
"CNRI-Python"
] | 1
|
2019-01-21T10:57:41.000Z
|
2019-01-21T10:57:41.000Z
|
def plot_power_spectra(kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology.transfer_function import TF
if ax is None:
ax = plt.gca()
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ax.loglog(kbins, deltac_2, label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins, deltac_2_nodeconv, color="navy", linestyle='--')
ax.loglog(kbins, deltab_2, label="Baryons", color="darkorange", linewidth=2.)
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, deltac_2_CAMB, color="royalblue", linestyle=":")
ax.loglog(k, deltab_2_CAMB, color="darkorange", linestyle=":")
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
# ax.set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
ax.legend(loc="upper left", frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(0.01, 1e4)
ax.set_ylim(1e-12, 2)
def plot_velocity_power_spectra(kbins, vdeltab_2, vdeltac_2, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology import linear_velocity_ps
from seren3.cosmology.transfer_function import TF
if ax is None:
ax = plt.gca()
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ix = np.where(~np.isnan(vdeltab_2))
ax.loglog(kbins[ix][3:], vdeltac_2[ix][3:], label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins[ix][3:], vdeltab_2[ix][3:], label="Baryons", color="darkorange", linewidth=2.)
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
cosmo = tf.cosmo
vdeltab_2_CAMB = linear_velocity_ps(k, np.sqrt(deltab_2_CAMB), **cosmo)**2
vdeltac_2_CAMB = linear_velocity_ps(k, np.sqrt(deltac_2_CAMB), **cosmo)**2
vnorm = vdeltab_2_CAMB/deltab_2_CAMB
k, pkb = tf.TF_Pk(TF.VBARYON)
k, pkc = tf.TF_Pk(TF.VCDM)
vdeltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.) * vnorm * 0.702
vdeltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.) * vnorm * 0.702
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, vdeltac_2_CAMB, color="royalblue", linestyle=":")
ax.loglog(k, vdeltab_2_CAMB, color="darkorange", linestyle=":")
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
ax.set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
ax.legend(loc="lower left", frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(0.01, 1e4)
# ax.set_ylim(1e-12, 2)
def plot_velocity(data_9, data_14):
import matplotlib.pylab as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
for ax, data in zip(axs.flatten(), [data_9, data_14]):
kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf = data
kbins = kbins[3:]
deltab_2 = deltab_2[3:]
deltac_2 = deltac_2[3:]
deltac_2_nodeconv = deltac_2_nodeconv[3:]
plot_velocity_power_spectra(kbins, deltab_2, deltac_2, tf, ax=ax)
# axs[0].set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
axs[0].set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
fig.tight_layout()
plt.show()
def plot(data_9, data_14):
import matplotlib.pylab as plt
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))
for ax, data in zip(axs.flatten(), [data_9, data_14]):
kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf = data
kbins = kbins[ix][3:]
deltab_2 = deltab_2[3:]
deltac_2 = deltac_2[3:]
deltac_2_nodeconv = deltac_2_nodeconv[3:]
plot_power_spectra(kbins, deltab_2, deltac_2, deltac_2_nodeconv, tf, ax=ax)
# kbins, vdeltab_2, vdeltac_2, tf = data
# plot_velocity_power_spectra(kbins, deltab_2, deltac_2, tf, ax=ax)
axs[0].set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
# axs[0].set_ylabel(r"$\mathcal{P}_{v}(k)$ [km s$^{-1}$]", fontsize=20)
fig.tight_layout()
plt.show()
def plot_power_spectra_bias(kbins_bias, deltab_2_bias, deltac_2_bias, kbins, deltab_2, deltac_2, tf, ax=None):
'''
Plot density and velocity power spectra and compare with CAMB
'''
import numpy as np
import matplotlib.pylab as plt
from seren3.cosmology.transfer_function import TF
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(8,6))
gs = gridspec.GridSpec(5,4,wspace=0.,hspace=0.)
ax = fig.add_subplot(gs[2:,:])
ax2 = fig.add_subplot(gs[:2,:], sharex=ax)
k, pkb = tf.TF_Pk(TF.B)
k, pkc = tf.TF_Pk(TF.C)
ix = np.where(~np.isnan(deltab_2_bias))
ax.loglog(kbins_bias[ix], deltac_2_bias[ix], label="CDM", color="royalblue", linewidth=2.)
ax.loglog(kbins_bias[ix], deltab_2_bias[ix], label="Baryons", color="darkorange", linewidth=2.)
ix = np.where(~np.isnan(deltab_2))
ax.loglog(kbins[ix], deltac_2[ix], color="royalblue", linewidth=2., linestyle="--")
ax.loglog(kbins[ix], deltab_2[ix], color="darkorange", linewidth=2., linestyle="--")
ax.loglog([0.0001, 0.0001], [100, 100], color="k", linewidth=2., linestyle="-", label="Biased")
ax.loglog([0.0001, 0.0001], [100, 100], color="k", linewidth=2., linestyle="--", label="Unbiased")
ax2.plot(kbins_bias[ix], deltac_2_bias[ix]/deltac_2[ix], color="royalblue", linewidth=2.)
ax2.plot(kbins_bias[ix], deltab_2_bias[ix]/deltab_2[ix], color="darkorange", linewidth=2.)
ax2.plot(np.linspace(0.1, 3000), np.ones(50), linestyle=":", color="k", label="Unity")
# CAMB
deltab_2_CAMB = pkb * (k ** 3.) / (2. * np.pi ** 2.)
deltac_2_CAMB = pkc * (k ** 3.) / (2. * np.pi ** 2.)
# direc = '/lustre/scratch/astro/ds381/simulations/baryon_drift/100Mpc/z200/zoom/lvl14/'
# fname = "%s/input_powerspec_baryon.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# k = ps_data[0]
# P_bar = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# fname = "%s/input_powerspec_cdm.txt" % direc
# ps_data = np.loadtxt(fname, unpack=True)
# P_cdm = ps_data[1] * (2 * np.pi) ** 3 * tf._norm
# deltac_2_CAMB = P_cdm * (k ** 3.)
# deltab_2_CAMB = P_bar * (k ** 3.)
ax.loglog(k, deltac_2_CAMB, color="royalblue", linestyle=":", alpha=0.5)
ax.loglog(k, deltab_2_CAMB, color="darkorange", linestyle=":", alpha=0.5)
ax.set_xlabel(r"k [Mpc$^{-1}$ h a$^{-1}$]", fontsize=20)
ax.set_ylabel(r"$\mathcal{P}(k)$", fontsize=20)
ax.legend(loc="lower left", ncol=2, frameon=False, prop={"size" : 18})
# plt.xlim(0.001, 100)
ax.set_xlim(1, 2000)
ax.set_ylim(1e-8, 2)
ax2.set_ylim(-0.2, 1.2)
ax2.set_ylabel(r"$b(k,v_{bc})$", fontsize=20)
ax2.set_title(r"$|v_{bc,\mathrm{rec}}|$ = 19.06 km s$^{-1}$", fontsize=20)
ax2.legend(loc="lower left", frameon=False, prop={"size" : 20})
plt.setp(ax2.get_xticklabels(), visible=False)
| 38.526316
| 111
| 0.620964
| 1,308
| 8,052
| 3.636086
| 0.129969
| 0.051514
| 0.026913
| 0.013457
| 0.876787
| 0.839781
| 0.813709
| 0.783011
| 0.722666
| 0.671362
| 0
| 0.059231
| 0.192747
| 8,052
| 208
| 112
| 38.711538
| 0.672462
| 0.234724
| 0
| 0.440367
| 0
| 0
| 0.084129
| 0.003787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045872
| false
| 0
| 0.119266
| 0
| 0.165138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dad1b13cfc78bb0d60a151c6bb98e3620bff455b
| 785
|
py
|
Python
|
clients_1.3/python/test/test_solver_async_response.py
|
MetaAnalyticsAdmin/meta-analytics
|
9a8408d3e414e837d84939ad711d0ae9f83c46ed
|
[
"Apache-2.0"
] | 5
|
2020-05-04T17:02:35.000Z
|
2022-01-13T17:41:09.000Z
|
clients_1.3/python/test/test_solver_async_response.py
|
MetaAnalytics/meta-analytics
|
9a8408d3e414e837d84939ad711d0ae9f83c46ed
|
[
"Apache-2.0"
] | null | null | null |
clients_1.3/python/test/test_solver_async_response.py
|
MetaAnalytics/meta-analytics
|
9a8408d3e414e837d84939ad711d0ae9f83c46ed
|
[
"Apache-2.0"
] | null | null | null |
"""
QUBO API solvers
QUBO solvers from Meta Analytics # noqa: E501
The version of the OpenAPI document: v1
Contact: rajesh@craftingdata.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import meta_analytics
from meta_analytics.model.solver_async_response import SolverAsyncResponse
class TestSolverAsyncResponse(unittest.TestCase):
"""SolverAsyncResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSolverAsyncResponse(self):
"""Test SolverAsyncResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = SolverAsyncResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.216216
| 79
| 0.698089
| 83
| 785
| 6.457831
| 0.638554
| 0.072761
| 0.063433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011494
| 0.224204
| 785
| 36
| 80
| 21.805556
| 0.868637
| 0.456051
| 0
| 0.230769
| 1
| 0
| 0.020672
| 0
| 0
| 0
| 0
| 0.027778
| 0
| 1
| 0.230769
| false
| 0.230769
| 0.307692
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
dad31ea392b5ccf9f81d1cb1ddbee5b3b2633a4e
| 21,114
|
py
|
Python
|
api/test/test_main.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | 2
|
2018-07-01T20:36:46.000Z
|
2019-11-01T22:47:06.000Z
|
api/test/test_main.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | 1
|
2021-06-10T20:28:53.000Z
|
2021-06-10T20:28:53.000Z
|
api/test/test_main.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | null | null | null |
import global_config.config
import api.main as main
import json
from pymongo.errors import DuplicateKeyError
from unittest import TestCase, mock
from unittest.mock import Mock
from utilities.mongo_util import create_user, bulk_create_authorized_users, drop_user, drop_database
database = 'test_database'
global_config.config.GLOBAL_CONFIG['MONGODB_DATABASE'] = database
main.GLOBAL_CONFIG['MONGODB_DATABASE'] = database
class TestMain(TestCase):
@classmethod
def setUpClass(cls):
try:
create_user(global_config.config.GLOBAL_CONFIG['MONGODB_API_USERNAME'],
global_config.config.GLOBAL_CONFIG['MONGODB_API_USER_PASSWORD'],
roles=[{'role': 'dbOwner', 'db': database}], database=database)
except DuplicateKeyError:
pass
bulk_create_authorized_users([{'_id': 'test_user', 'token': 'test_token', 'scope': 1},
{'_id': 'test_user2', 'token': 'test_token2', 'scope': 2}, {'_id': 'test_user3', 'token':
'test_token_with_no_scope', 'scope': None}], database=database)
main._stats_collection = main.MongoDBCollection(
collection_name=global_config.config.GLOBAL_CONFIG['MONGODB_STATS_COLLECTION'],
username=global_config.config.GLOBAL_CONFIG['MONGODB_API_USERNAME'],
password=global_config.config.GLOBAL_CONFIG['MONGODB_API_USER_PASSWORD'],
database=database)
main._auth_collection = main.MongoDBCollection(
collection_name=global_config.config.GLOBAL_CONFIG['MONGODB_API_AUTHORIZED_USERS_COLLECTION'],
username=global_config.config.GLOBAL_CONFIG['MONGODB_API_USERNAME'],
password=global_config.config.GLOBAL_CONFIG['MONGODB_API_USER_PASSWORD'],
database=database)
def setUp(self):
main._api_alive = None
main._api_alive_last_update = None
self.app = main.app.test_client()
@classmethod
def tearDownClass(cls):
drop_user(global_config.config.GLOBAL_CONFIG['MONGODB_API_USERNAME'], database=database)
drop_database(global_config.config.GLOBAL_CONFIG['MONGODB_DATABASE'])
def test_execution_fails_if_environment_variable_does_not_exist(self):
from os import environ
localhost_ip = environ.get('MONGODB_IP')
if localhost_ip:
del environ['MONGODB_IP']
with self.assertRaises(SystemExit) as e:
main.main(log_to_stdout=False, log_to_telegram=False, log_to_file=False)
self.assertEqual(1, e.exception.code)
if localhost_ip is not None:
environ['MONGODB_IP'] = localhost_ip
@mock.patch('api.main.ping_database',
Mock(side_effect=EnvironmentError('Test error to verify 503 HTTP status code is obtained.')))
def test_authorized_calls_return_503_if_database_down(self):
r = self.app.get('/modules', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(503, r.status_code)
def test_require_auth_succeed(self):
r = self.app.get('/modules', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(200, r.status_code)
def test_require_auth_rejected_invalid_token(self):
r = self.app.get('/modules', headers={'Authorization': 'Bearer INVALID_TOKEN_K0R9A1NWOIvwCBO2fR'})
self.assertEqual(401, r.status_code)
def test_require_auth_rejected_no_token(self):
r = self.app.get('/modules')
self.assertEqual(401, r.status_code)
@mock.patch('api.main._stats_collection._collection.find_one', Mock(return_value={'_id': 'aggregated', 'per_module':
{'air_pollution': {}, 'countries': {}}}))
def test_modules_endpoint(self):
r = self.app.get('/modules', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertListEqual(['air_pollution', 'countries'], data['modules'])
@mock.patch('api.main._stats_collection._collection.find_one', Mock(return_value=None))
def test_modules_endpoint_with_no_modules(self):
r = self.app.get('/modules', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertListEqual([], data['modules'])
@mock.patch('api.main._stats_collection._collection.find_one', Mock(side_effect=[{'_id': 'aggregated',
'last_execution_id': 1}, {"more_values": "should_go_here", "execution_id": 1}]))
def test_execution_stats_endpoint_last_execution(self):
r = self.app.get('/executionStats', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertEqual(1, data['execution_id'])
@mock.patch('api.main._stats_collection._collection.find_one', Mock(return_value=None))
def test_execution_stats_endpoint_last_execution_and_subsystem_not_executed(self):
r = self.app.get('/executionStats', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(404, r.status_code)
def test_execution_stats_endpoint_with_execution_id_invalid_type(self):
r = self.app.get('/executionStats?executionId=foo', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r.status_code)
def test_execution_stats_endpoint_with_execution_id_negative(self):
r = self.app.get('/executionStats?executionId=-1', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r.status_code)
@mock.patch('api.main._stats_collection._collection.find_one', Mock(side_effect=[None, {'_id': 'aggregated',
'last_execution_id': 1}]))
def test_execution_stats_endpoint_with_execution_id_non_existing_but_subsystem_executed(self):
r = self.app.get('/executionStats?executionId=2', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(404, r.status_code)
self.assertEqual(1, data['last_execution_id'])
@mock.patch('api.main._stats_collection._collection.find_one', Mock(return_value=None))
def test_execution_stats_endpoint_with_execution_id_non_existing_and_subsystem_not_executed(self):
r = self.app.get('/executionStats?executionId=2', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(404, r.status_code)
self.assertIsNone(data.get('last_execution_id'))
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
@mock.patch('api.main.MongoDBCollection')
def test_data_endpoint_no_parameters(self, mock_collection):
data = []
for i in range(1, 6):
data.append({'_id': i, 'value': i})
mock_collection.return_value.find.return_value = (data, None)
mock_collection.return_value.collection.find_one.return_value = {'_id': 'aggregated',
'per_module': {'module': {}}}
r = self.app.get('/data/module', headers={'Authorization': 'Bearer test_token'})
result = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertListEqual(data, result['data'])
def test_data_endpoint_with_last_index_invalid_type(self):
r = self.app.get('/data/module?startIndex=foo', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r.status_code)
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
@mock.patch('api.main.MongoDBCollection')
def test_data_endpoint_with_last_index_compatible_datatype(self, mock_collection):
data = []
for i in range(5, 9):
data.append({'_id': i, 'value': i})
mock_collection.return_value.find.return_value = (data, None)
mock_collection.return_value.collection.find_one.return_value = {'_id': 'aggregated',
'per_module': {'module': {}}}
r = self.app.get('/data/module?startIndex=0', headers={'Authorization': 'Bearer test_token'})
result = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertListEqual(data, result['data'])
def test_data_endpoint_with_limit_invalid_type(self):
r = self.app.get('/data/module?limit=foo', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r.status_code)
def test_data_endpoint_with_limit_invalid_value(self):
r1 = self.app.get('/data/module?limit=0', headers={'Authorization': 'Bearer test_token'})
r2 = self.app.get('/data/module?limit=-1', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r1.status_code)
self.assertEqual(400, r2.status_code)
def test_data_endpoint_with_execution_id_invalid_type(self):
r = self.app.get('/data/module?executionId=foo', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r.status_code)
def test_data_endpoint_with_execution_id_invalid_value(self):
r2 = self.app.get('/data/module?executionId=-1', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r2.status_code)
@mock.patch('api.main._get_module_names', Mock(return_value=['other_module']))
@mock.patch('api.main.MongoDBCollection')
def test_data_endpoint_with_invalid_module(self, mock_collection):
mock_collection.return_value.collection.find_one.return_value = {'_id': 'aggregated',
'per_module': {'air_pollution': {}, 'countries': {}}}
r = self.app.get('/data/invalid_module', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(404, r.status_code)
def test_alive_endpoint(self):
r = self.app.get('/alive')
self.assertEqual(200, r.status_code)
data = json.loads(r.get_data().decode('utf-8'))
self.assertTrue(data['alive'])
def test_alive_endpoint_with_authenticated_request(self):
r = self.app.get('/alive', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(200, r.status_code)
data = json.loads(r.get_data().decode('utf-8'))
self.assertTrue(data['alive'])
@mock.patch('api.main.ping_database', Mock(side_effect=EnvironmentError('Database is down!')))
def test_alive_endpoint_with_inactive_database(self):
r = self.app.get('/alive')
self.assertEqual(503, r.status_code)
data = json.loads(r.get_data().decode('utf-8'))
self.assertFalse(data['alive'])
@mock.patch('api.main.ping_database',
Mock(side_effect=Exception('Test error (to verify anomalous exit). This is OK.')))
def test_alive_endpoint_with_inactive_database_uncaught_exception(self):
r = self.app.get('/alive')
self.assertEqual(500, r.status_code)
def test_unknown_endpoint_requests_are_rejected(self):
r = self.app.get('/unknown_endpoint')
self.assertEqual(404, r.status_code)
@mock.patch('api.main.ping_database')
def test_alive_endpoint_close_requests_use_cache(self, mock_ping):
r1 = self.app.get('/alive')
r1_data = r1.get_data()
self.assertEqual(200, r1.status_code)
self.assertEqual(1, mock_ping.call_count)
r2 = self.app.get('/alive')
r2_data = r2.get_data()
self.assertEqual(1, mock_ping.call_count)
self.assertEqual(200, r2.status_code)
self.assertEqual(r1_data, r2_data)
@mock.patch('api.main.ping_database')
@mock.patch('api.main.API_CONFIG', {'API_ALIVE_CACHE_TIME': -1})
def test_alive_endpoint_distant_requests_do_not_use_cache(self, mock_ping):
r1 = self.app.get('/alive')
r1_data = json.loads(r1.get_data().decode('utf-8'))
self.assertEqual(200, r1.status_code)
self.assertEqual(1, mock_ping.call_count)
r2 = self.app.get('/alive')
r2_data = json.loads(r2.get_data().decode('utf-8'))
self.assertEqual(2, mock_ping.call_count)
self.assertEqual(200, r2.status_code)
self.assertEqual(r1_data['alive'], r2_data['alive'])
self.assertNotEqual(r1_data['updated'], r2_data['updated'])
def test_scopes(self):
try:
main._stats_collection.get_collection().insert_one({'_id': {'subsystem_id': 1, 'type': 'aggregated'},
'per_module': {'module1': {}, 'module2': {}, 'module3': {}}, 'last_execution_id': 4})
main._stats_collection.get_collection().insert_one({'_id': {'subsystem_id': 2, 'type': 'aggregated'},
'per_module': {'module4': {}, 'module5': {}}, 'last_execution_id': 1})
main._stats_collection.get_collection().insert_one({'_id': {'subsystem_id': 1, 'execution_id': 3, 'type':
'last_execution'}, 'modules_with_pending_work': {'module1': {}, 'module2': {}}})
main._stats_collection.get_collection().insert_one({'_id': {'subsystem_id': 1, 'execution_id': 4, 'type':
'last_execution'}, 'modules_with_pending_work': None})
main._stats_collection.get_collection().insert_one({'_id': {'subsystem_id': 2, 'execution_id': 1, 'type':
'last_execution'}, 'modules_with_pending_work': {'module4': {}}})
# Testing module access
r1 = self.app.get('/modules', headers={'Authorization': 'Bearer test_token'})
r1_data = json.loads(r1.get_data().decode('utf-8'))
r2 = self.app.get('/modules', headers={'Authorization': 'Bearer test_token2'})
r2_data = json.loads(r2.get_data().decode('utf-8'))
self.assertEqual(['module1', 'module2', 'module3'], r1_data['modules'])
self.assertEqual(['module4', 'module5'], r2_data['modules'])
# Testing data access
r1 = self.app.get('/data/module1', headers={'Authorization': 'Bearer test_token'})
r2 = self.app.get('/data/module1', headers={'Authorization': 'Bearer test_token2'})
self.assertEqual(200, r1.status_code)
self.assertEqual(404, r2.status_code)
# Testing execution stats (with custom executionId)
r1 = self.app.get('/executionStats?executionId=3', headers={'Authorization': 'Bearer test_token'})
r2 = self.app.get('/executionStats?executionId=3', headers={'Authorization': 'Bearer test_token2'})
self.assertEqual(200, r1.status_code)
self.assertEqual(404, r2.status_code)
# Testing execution stats (last execution)
r1 = self.app.get('/executionStats', headers={'Authorization': 'Bearer test_token'})
r2 = self.app.get('/executionStats', headers={'Authorization': 'Bearer test_token2'})
r1_data = json.loads(r1.get_data().decode('utf-8'))
r2_data = json.loads(r2.get_data().decode('utf-8'))
self.assertEqual(200, r1.status_code)
self.assertEqual(200, r2.status_code)
self.assertEqual(4, r1_data['_id']['execution_id'])
self.assertEqual(1, r2_data['_id']['execution_id'])
self.assertIsNone(r1_data['modules_with_pending_work'])
self.assertDictEqual({'module4': {}}, r2_data['modules_with_pending_work'])
except Exception as e:
main._stats_collection.remove_all()
raise e
@mock.patch('api.main._user', {'_id': 'test_user3', 'token': 'test_token_with_no_scope', 'scope': None})
@mock.patch('api.main._AppTokenAuth.authorized', Mock(return_value=True))
def test_calls_fail_if_token_has_no_scope(self):
r = self.app.get('/modules', headers={'Authorization': 'Bearer test_token_with_no_scope'})
self.assertEqual(403, r.status_code)
r = self.app.get('/data/module', headers={'Authorization': 'Bearer test_token_with_no_scope'})
self.assertEqual(403, r.status_code)
r = self.app.get('/executionStats', headers={'Authorization': 'Bearer test_token_with_no_scope'})
self.assertEqual(403, r.status_code)
# Scope does not affect the open endpoints
r = self.app.get('/alive', headers={'Authorization': 'Bearer test_token_with_no_scope'})
self.assertEqual(200, r.status_code)
def test_calls_to_endpoints_which_use_statistics_collection_use_the_same_MongoDBCollection(self):
self.app.get('/executionStats', headers={'Authorization': 'Bearer test_token'})
collection = main._stats_collection
self.app.get('/modules', headers={'Authorization': 'Bearer test_token'})
collection2 = main._stats_collection
self.app.get('/pendingWork/module/', headers={'Authorization': 'Bearer test_token'})
collection3 = main._stats_collection
self.assertIs(collection, collection2)
self.assertIs(collection2, collection3)
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
@mock.patch('api.main._stats_collection._collection.find_one', Mock(side_effect=[{'_id': 'aggregated',
'last_execution_id': 1}, {"modules_with_pending_work": {'module': {'saved_elements': 100}},
"execution_id": 1}]))
def test_pending_work_endpoint_last_execution_module_had_pending_work_and_data_saved(self):
r = self.app.get('/pendingWork/module/', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertDictEqual({'pending_work': True, 'saved_elements': 100}, data)
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
@mock.patch('api.main._stats_collection._collection.find_one', Mock(side_effect=[{'_id': 'aggregated',
'last_execution_id': 1}, {"modules_with_pending_work": {'module': {'saved_elements': 0}},
"execution_id": 1}]))
def test_pending_work_endpoint_last_execution_module_had_pending_work_but_data_not_saved(self):
r = self.app.get('/pendingWork/module/', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertDictEqual({'pending_work': True, 'saved_elements': 0}, data)
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
@mock.patch('api.main._stats_collection._collection.find_one', Mock(side_effect=[{'_id': 'aggregated',
'last_execution_id': 1}, {"modules_with_pending_work": {}, "execution_id": 1}]))
def test_pending_work_endpoint_last_execution_module_no_pending_work(self):
r = self.app.get('/pendingWork/module/', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(200, r.status_code)
self.assertDictEqual({'pending_work': False, 'saved_elements': None}, data)
@mock.patch('api.main._stats_collection._collection.find_one', Mock(return_value=None))
@mock.patch('api.main._get_module_names', Mock(return_value=[]))
def test_pending_work_endpoint_last_execution_and_subsystem_not_executed(self):
r = self.app.get('/pendingWork/module/', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(404, r.status_code)
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
def test_pending_work_endpoint_with_execution_id_invalid_type(self):
r = self.app.get('/pendingWork/module?executionId=foo', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r.status_code)
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
def test_pending_work_endpoint_with_execution_id_negative(self):
r = self.app.get('/pendingWork/module?executionId=-1', headers={'Authorization': 'Bearer test_token'})
self.assertEqual(400, r.status_code)
@mock.patch('api.main._stats_collection._collection.find_one', Mock(side_effect=[None, {'_id': 'aggregated',
'last_execution_id': 1}]))
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
def test_pending_work_endpoint_with_execution_id_non_existing_but_subsystem_executed(self):
r = self.app.get('/pendingWork/module?executionId=2', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(404, r.status_code)
self.assertEqual(1, data['last_execution_id'])
@mock.patch('api.main._stats_collection._collection.find_one', Mock(return_value=None))
@mock.patch('api.main._get_module_names', Mock(return_value=['module']))
def test_pending_work_endpoint_with_execution_id_non_existing_and_subsystem_not_executed(self):
r = self.app.get('/pendingWork/module?executionId=2', headers={'Authorization': 'Bearer test_token'})
data = json.loads(r.get_data().decode('utf-8'))
self.assertEqual(404, r.status_code)
self.assertIsNone(data.get('last_execution_id'))
| 56.454545
| 120
| 0.682012
| 2,660
| 21,114
| 5.106391
| 0.088346
| 0.068468
| 0.039019
| 0.094972
| 0.821468
| 0.800928
| 0.786719
| 0.754399
| 0.71641
| 0.671354
| 0
| 0.018147
| 0.172682
| 21,114
| 373
| 121
| 56.605898
| 0.759446
| 0.008194
| 0
| 0.474843
| 0
| 0
| 0.261763
| 0.097779
| 0
| 0
| 0
| 0
| 0.251572
| 1
| 0.132075
| false
| 0.012579
| 0.025157
| 0
| 0.160377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9748343c503064d72e90da2b87d1b59c31fe37c2
| 31
|
py
|
Python
|
rosetta/__init__.py
|
UnitedLexCorp/rosetta
|
7acde4547b8a3b88c8f1e62bfcbb2aed870b104c
|
[
"BSD-3-Clause"
] | 132
|
2015-01-15T22:06:19.000Z
|
2021-11-25T08:31:46.000Z
|
rosetta/__init__.py
|
UnitedLexCorp/rosetta
|
7acde4547b8a3b88c8f1e62bfcbb2aed870b104c
|
[
"BSD-3-Clause"
] | 16
|
2015-01-02T21:51:02.000Z
|
2017-11-15T21:28:51.000Z
|
rosetta/__init__.py
|
UnitedLexCorp/rosetta
|
7acde4547b8a3b88c8f1e62bfcbb2aed870b104c
|
[
"BSD-3-Clause"
] | 36
|
2015-01-01T17:54:21.000Z
|
2021-02-23T12:11:48.000Z
|
from rosetta.text.api import *
| 15.5
| 30
| 0.774194
| 5
| 31
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
979a00fe5e4d9d2458b1bccdc3913f123575adcd
| 12,731
|
py
|
Python
|
graph_as923.py
|
tanupoo/lorawan_toa
|
57ff520583cd3c06c6918a2763471c1edba4dc47
|
[
"MIT"
] | 22
|
2018-01-03T05:45:19.000Z
|
2021-04-08T02:27:26.000Z
|
graph_as923.py
|
radiojitter/lorawan_toa
|
fb1ed3b47b3b5cc3452d10a03b65f150f42009fb
|
[
"MIT"
] | 2
|
2019-05-05T10:33:12.000Z
|
2019-05-10T08:10:24.000Z
|
graph_as923.py
|
radiojitter/lorawan_toa
|
fb1ed3b47b3b5cc3452d10a03b65f150f42009fb
|
[
"MIT"
] | 17
|
2017-09-30T13:48:28.000Z
|
2021-06-22T21:37:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import matplotlib.pyplot as plt
from lorawan_toa import *
def get_line(list_size, n_sf, bw=125):
return [ get_toa(i, n_sf, n_bw=bw)["t_packet"] for i in list_size ]
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("SF and ToA (BW=125 kHz)")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", label="SF12", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 11), "g-", label="SF11", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 10), "k-", label="SF10", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 9), "c-", label="SF9", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 8), "m-", label="SF8", linewidth=3, alpha=1)
ax.plot(x, get_line(x, 7), "y-", label="SF7", linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/lora-toa-125.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 No DwellTime")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-", label="SF12",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-", label="SF11",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-", label="SF10",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-", label="SF9",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-", label="SF8",
linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", label="SF7",
linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-without-dwelltime.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 DwellTime 400ms")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# required dwellTime consideration
ax.plot([0], [0], "b-", label="SF12", linewidth=3, alpha=1)
ax.plot([0], [0], "c-", label="SF11", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 19), get_line(mpsrange(8, 19), 10), "k-", label="SF10", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 9), "c-", label="SF9", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 133), get_line(mpsrange(8, 133), 8), "m-", label="SF8", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", label="SF7", linewidth=3, alpha=1)
ax.plot(x, [400 for i in range(0, 255)], "r,", linewidth=1, alpha=0.7)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-with-dwelltime.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", linewidth=1.2, alpha=0.7)
# required dwellTime consideration
ax.plot([0], [0], "b-", label="SF12/125kHz", linewidth=3, alpha=1)
ax.plot([0], [0], "g-", label="SF11/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 19), get_line(mpsrange(8, 19), 10), "k-",
label="SF10/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 9), "c-",
label="SF9/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 133), get_line(mpsrange(8, 133), 8), "m-",
label="SF8/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-",
label="SF7/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=250), "b--",
label="SF7/250kHz", linewidth=3, alpha=0.5)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-toa.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 and ARIB STD-T108")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-", linewidth=1.2, alpha=0.7)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-", linewidth=1.2, alpha=0.7)
# required dwellTime consideration
ax.plot([0], [0], "b-", label="SF12/125kHz", linewidth=3, alpha=1)
ax.plot([0], [0], "g-", label="SF11/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 19), get_line(mpsrange(8, 19), 10), "k-",
label="SF10/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 9), "c-",
label="SF9/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 133), get_line(mpsrange(8, 133), 8), "m-",
label="SF8/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-",
label="SF7/125kHz", linewidth=3, alpha=1)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=250), "b--",
label="SF7/250kHz", linewidth=3, alpha=0.5)
ax.plot(x, [400 for i in range(0, 255)], "r--", linewidth=2, alpha=0.7)
ax.plot(x, [200 for i in range(0, 255)], "r--", linewidth=2, alpha=0.7)
ax.plot(x, [4000 for i in range(0, 255)], "r--", linewidth=2, alpha=0.7)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper left", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-with-arib180.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 vs Others (SF12)")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 12, bw=500), "r-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-",
label="SF12/125kHz", linewidth=3.0, alpha=1)
# LoRa: SF12 / 500 kHz
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 12, bw=500), "r-",
label="SF12/500kHz", linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="best", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-vs-others-sf12.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("AS923 vs Others (SF10)")
x = range(0, 255)
ax.plot(x, get_line(x, 10), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10, bw=500), "r-", linewidth=3, alpha=0.05)
# no dwellTime consideration
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "b-",
label="SF10/125kHz", linewidth=3.0, alpha=1)
# LoRa: SF10 / 500 kHz
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 10, bw=500), "r-",
label="SF10/500kHz", linewidth=3, alpha=1)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="best", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/as923-vs-others-sf10.png")
#########
#
fig = plt.figure(num=None, figsize=(16, 8), facecolor='w', edgecolor='k')
ax = fig.add_subplot(1,1,1)
ax.set_title("LoRaWAN")
x = range(0, 255)
ax.plot(x, get_line(x, 12), "b-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 11), "g-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 10), "k-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 9), "c-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 8), "m-", linewidth=3, alpha=0.05)
ax.plot(x, get_line(x, 7), "y-", linewidth=3, alpha=0.05)
# SF BW bit rate Max. MACPayload
# 12 125 250 59
# 11 125 440 59
# 10 125 980 59
# 9 125 1760 123
# 8 125 3125 250
# 7 125 5470 250
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 12), "b-",
label="SF12/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 11), "g-",
label="SF11/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 59), get_line(mpsrange(8, 59), 10), "k-",
label="SF10/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 123), get_line(mpsrange(8, 123), 9), "c-",
label="SF9/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8), "m-",
label="SF8/125kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7), "y-",
label="SF7/125kHz", linewidth=2.0)
# SF BW bit rate Max. MACPayload
# 7 250 11000 250
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=250), "b-.",
label="SF7/250kHz", linewidth=2.0)
# SF BW bit rate Max. MACPayload
# 12 500 980 61
# 11 500 1760 137
# 10 500 3900 250
# 9 500 7000 250
# 8 500 12500 250
# 7 500 21900 250
ax.plot(mpsrange(8, 61), get_line(mpsrange(8, 61), 12, bw=500), "b--",
label="SF12/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 137), get_line(mpsrange(8, 137), 11, bw=500), "g--",
label="SF11/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 10, bw=500), "k--",
label="SF10/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 9, bw=500), "c--",
label="SF9/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 8, bw=500), "m--",
label="SF8/500kHz", linewidth=2.0)
ax.plot(mpsrange(8, 250), get_line(mpsrange(8, 250), 7, bw=500), "y--",
label="SF7/500kHz", linewidth=2.0)
ax.set_xlim(0, 260)
ax.set_ylim(0, 5000)
ax.set_xlabel("PHY Payload Size (Byte)")
ax.set_ylabel("Time on Air (ms)")
ax.grid(True)
ax.legend(loc="upper right", fancybox=True, shadow=True)
fig.tight_layout()
plt.show()
fig.savefig("image/lorawan-toa.png")
| 37.66568
| 97
| 0.636242
| 2,353
| 12,731
| 3.374841
| 0.065448
| 0.074802
| 0.128447
| 0.092558
| 0.928221
| 0.923813
| 0.922302
| 0.913361
| 0.906939
| 0.89762
| 0
| 0.126485
| 0.133689
| 12,731
| 337
| 98
| 37.777448
| 0.593526
| 0.05734
| 0
| 0.673554
| 0
| 0
| 0.114194
| 0.016313
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004132
| false
| 0
| 0.016529
| 0.004132
| 0.024793
| 0.004132
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
97ae848b114ecd7f15ed548822e71b264b2b1846
| 88
|
py
|
Python
|
ztlearn/dl/__init__.py
|
jefkine/zeta-learn
|
04388f90093b52f5df2f334c898f3a1224f5a13f
|
[
"MIT"
] | 30
|
2018-03-12T19:16:27.000Z
|
2021-12-16T05:32:38.000Z
|
ztlearn/dl/__init__.py
|
jefkine/zeta-learn
|
04388f90093b52f5df2f334c898f3a1224f5a13f
|
[
"MIT"
] | 4
|
2018-06-13T03:47:15.000Z
|
2018-11-05T21:33:34.000Z
|
ztlearn/dl/__init__.py
|
jefkine/zeta-learn
|
04388f90093b52f5df2f334c898f3a1224f5a13f
|
[
"MIT"
] | 4
|
2018-04-30T07:42:47.000Z
|
2022-01-31T11:35:53.000Z
|
# -*- coding: utf-8 -*-
# import packages(s)
from . import layers
from . import models
| 14.666667
| 23
| 0.647727
| 12
| 88
| 4.75
| 0.75
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.193182
| 88
| 5
| 24
| 17.6
| 0.788732
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
97c76b575796c8154fd5fd0278b2ba245cedfe48
| 36
|
py
|
Python
|
pureskillgg_dsdk/ds_models/__init__.py
|
pureskillgg/dsdk
|
2e91a815dc06cc37ac8272d87014301c64c1b46e
|
[
"MIT"
] | null | null | null |
pureskillgg_dsdk/ds_models/__init__.py
|
pureskillgg/dsdk
|
2e91a815dc06cc37ac8272d87014301c64c1b46e
|
[
"MIT"
] | 1
|
2022-03-31T15:16:17.000Z
|
2022-03-31T19:38:05.000Z
|
pureskillgg_dsdk/ds_models/__init__.py
|
pureskillgg/dsdk
|
2e91a815dc06cc37ac8272d87014301c64c1b46e
|
[
"MIT"
] | null | null | null |
from .model import create_ds_models
| 18
| 35
| 0.861111
| 6
| 36
| 4.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 36
| 1
| 36
| 36
| 0.90625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8ae24344b66c1646de2cf34b2ae76615987a0811
| 107
|
py
|
Python
|
gubbing/models/networks/temporal_net.py
|
mychiux413/gubbing
|
45856d242fc217af35109baa8432c9979076dc40
|
[
"MIT"
] | null | null | null |
gubbing/models/networks/temporal_net.py
|
mychiux413/gubbing
|
45856d242fc217af35109baa8432c9979076dc40
|
[
"MIT"
] | null | null | null |
gubbing/models/networks/temporal_net.py
|
mychiux413/gubbing
|
45856d242fc217af35109baa8432c9979076dc40
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import layers, Model
class TemporalNetwork(Model):
pass
| 17.833333
| 42
| 0.794393
| 14
| 107
| 6.071429
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.158879
| 107
| 5
| 43
| 21.4
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
8af17dfb07eb6bba9c4f97673c738e0249b39530
| 6,253
|
py
|
Python
|
src/epython/poke/eprequests.py
|
elibs/epython
|
8e490976f510ab6393739c42f9981495503fb454
|
[
"MIT"
] | null | null | null |
src/epython/poke/eprequests.py
|
elibs/epython
|
8e490976f510ab6393739c42f9981495503fb454
|
[
"MIT"
] | 1
|
2021-07-19T17:20:49.000Z
|
2021-07-19T17:20:49.000Z
|
src/epython/poke/eprequests.py
|
elibs/epython
|
8e490976f510ab6393739c42f9981495503fb454
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Description:
This module houses the Poke class, and the wrapped requests methods.
Author:
Ray Gomez
Date:
3/16/21
"""
import requests
from epython.environment import EPYTHON_REQUEST_ID, EPYTHON_REQUEST_RETRIES, EPYTHON_REQUEST_INTERVAL
from epython.handlers import basic_retry_handler
POKE_HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
"X-Request-ID": EPYTHON_REQUEST_ID
}
# These are the most common requests exceptions that will trigger a retry
# (this is explicit to show what is actually being retried on)
COMMON_REQUEST_EXCEPTIONS = (requests.exceptions.RequestException,
requests.exceptions.HTTPError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.ConnectTimeout,
requests.exceptions.ReadTimeout)
def get(url, params=None, data=None, auth=None, headers=None, timeout=None,
verify=None, retries=EPYTHON_REQUEST_RETRIES, interval=EPYTHON_REQUEST_INTERVAL, **kwargs):
""" Issue an HTTP GET request
Args:
url (str): The URL for the request
params (dict): The parameters to send in the query string for a request
data (obj): dict, list of tuples, bytes, or file-like object to send in the body of request
auth (tuple): Auth tuple to enable Basic/Digest/Custom HTTP Auth
headers (dict): HTTP Headers to send with the request
timeout (int): How many seconds to wait for the server to send data
verify (bool): Whether to verify the server's TLS certificate or not
retries (int): The number of times to retry a request
interval (int): The interval of wait time between each retry
Returns:
(obj): The vanilla response object, or a ResponseProcessor if one was requested.
"""
if headers is None:
headers = POKE_HEADERS
@basic_retry_handler(COMMON_REQUEST_EXCEPTIONS, retries=retries, interval=interval)
def __req():
return requests.get(url, params=params, data=data, auth=auth, headers=headers, timeout=timeout,
verify=verify, **kwargs)
return __req()
def put(url, params=None, data=None, auth=None, headers=None, timeout=None,
verify=None, retries=EPYTHON_REQUEST_RETRIES, interval=EPYTHON_REQUEST_INTERVAL, **kwargs):
""" Issue an HTTP PUT request
Args:
url (str): The URL for the request
params (dict): The parameters to send in the query string for a request
data (obj): dict, list of tuples, bytes, or file-like object to send in the body of request
auth (tuple): Auth tuple to enable Basic/Digest/Custom HTTP Auth
headers (dict): HTTP Headers to send with the request
timeout (int): How many seconds to wait for the server to send data
verify (bool): Whether to verify the server's TLS certificate or not
retries (int): The number of times to retry a request
interval (int): The interval of wait time between each retry
Returns:
(obj): The vanilla response object, or a ResponseProcessor if one was requested.
"""
if headers is None:
headers = POKE_HEADERS
@basic_retry_handler(COMMON_REQUEST_EXCEPTIONS, retries=retries, interval=interval)
def __req():
return requests.put(url, params=params, data=data, auth=auth, headers=headers, timeout=timeout,
verify=verify, **kwargs)
return __req()
def post(url, params=None, data=None, auth=None, headers=None, timeout=None,
verify=None, retries=EPYTHON_REQUEST_RETRIES, interval=EPYTHON_REQUEST_INTERVAL, **kwargs):
""" Issue an HTTP POST request
Args:
url (str): The URL for the request
params (dict): The parameters to send in the query string for a request
data (obj): dict, list of tuples, bytes, or file-like object to send in the body of request
auth (tuple): Auth tuple to enable Basic/Digest/Custom HTTP Auth
headers (dict): HTTP Headers to send with the request
timeout (int): How many seconds to wait for the server to send data
verify (bool): Whether to verify the server's TLS certificate or not
retries (int): The number of times to retry a request
interval (int): The interval of wait time between each retry
Returns:
(obj): The vanilla response object, or a ResponseProcessor if one was requested.
"""
if headers is None:
headers = POKE_HEADERS
@basic_retry_handler(COMMON_REQUEST_EXCEPTIONS, retries=retries, interval=interval)
def __req():
return requests.post(url, params=params, data=data, auth=auth, headers=headers, timeout=timeout,
verify=verify, **kwargs)
return __req()
def delete(url, params=None, data=None, auth=None, headers=None, timeout=None,
verify=None, retries=EPYTHON_REQUEST_RETRIES, interval=EPYTHON_REQUEST_INTERVAL, **kwargs):
""" Issue an HTTP DELETE request
Args:
url (str): The URL for the request
params (dict): The parameters to send in the query string for a request
data (obj): dict, list of tuples, bytes, or file-like object to send in the body of request
auth (tuple): Auth tuple to enable Basic/Digest/Custom HTTP Auth
headers (dict): HTTP Headers to send with the request
timeout (int): How many seconds to wait for the server to send data
verify (bool): Whether to verify the server's TLS certificate or not
retries (int): The number of times to retry a request
interval (int): The interval of wait time between each retry
Returns:
(obj): The vanilla response object, or a ResponseProcessor if one was requested.
"""
if headers is None:
headers = POKE_HEADERS
@basic_retry_handler(COMMON_REQUEST_EXCEPTIONS, retries=retries, interval=interval)
def __req():
return requests.delete(url, params=params, data=data, auth=auth, headers=headers,
timeout=timeout, verify=verify, **kwargs)
return __req()
| 42.537415
| 104
| 0.674716
| 839
| 6,253
| 4.951132
| 0.154946
| 0.02311
| 0.015407
| 0.021184
| 0.834136
| 0.834136
| 0.834136
| 0.834136
| 0.834136
| 0.834136
| 0
| 0.001278
| 0.249
| 6,253
| 146
| 105
| 42.828767
| 0.883305
| 0.515433
| 0
| 0.54
| 0
| 0
| 0.02227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.06
| 0.08
| 0.38
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c107527c49b130deb750446b43d6dc4fb1a68c6a
| 136
|
py
|
Python
|
LOBDeepPP/__init__.py
|
mariussterling/LOBDeepPP_code
|
010782f8db9a745940753f49d953361c32ee1190
|
[
"MIT"
] | 1
|
2021-07-09T08:40:58.000Z
|
2021-07-09T08:40:58.000Z
|
LOBDeepPP/__init__.py
|
mariussterling/LOBDeepPP_code
|
010782f8db9a745940753f49d953361c32ee1190
|
[
"MIT"
] | null | null | null |
LOBDeepPP/__init__.py
|
mariussterling/LOBDeepPP_code
|
010782f8db9a745940753f49d953361c32ee1190
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from . import LOBDeepPP_class
from . import LOBDeepPP_model
from . import LOBDeepPP_params_files
| 27.2
| 38
| 0.860294
| 18
| 136
| 6
| 0.5
| 0.277778
| 0.527778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 136
| 4
| 39
| 34
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
c1190beb19fed97ac40d1f214a9763ac22f66996
| 111
|
py
|
Python
|
app/main/__init__.py
|
krayzpipes/ACE-1
|
138bf2aecad949f0b72b66519c32893df033de39
|
[
"Apache-2.0"
] | 28
|
2018-08-08T11:57:31.000Z
|
2022-01-12T23:06:18.000Z
|
app/main/__init__.py
|
krayzpipes/ACE-1
|
138bf2aecad949f0b72b66519c32893df033de39
|
[
"Apache-2.0"
] | 108
|
2018-08-08T12:35:06.000Z
|
2019-07-19T22:57:19.000Z
|
app/main/__init__.py
|
krayzpipes/ACE-1
|
138bf2aecad949f0b72b66519c32893df033de39
|
[
"Apache-2.0"
] | 16
|
2018-08-03T18:48:00.000Z
|
2021-11-09T00:35:35.000Z
|
# vim: sw=4:ts=4:et
from flask import Blueprint
main = Blueprint('main', __name__)
from . import views, errors
| 22.2
| 34
| 0.72973
| 18
| 111
| 4.277778
| 0.722222
| 0.337662
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 0.144144
| 111
| 4
| 35
| 27.75
| 0.789474
| 0.153153
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
c19a7b56de4de890b7430283340bc504ae5d4af8
| 2,969
|
py
|
Python
|
gwlfe/MultiUse_Fxns/Erosion/ErosSum.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | null | null | null |
gwlfe/MultiUse_Fxns/Erosion/ErosSum.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 6
|
2018-07-24T22:46:28.000Z
|
2018-07-29T19:13:09.000Z
|
gwlfe/MultiUse_Fxns/Erosion/ErosSum.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 1
|
2018-07-24T18:22:01.000Z
|
2018-07-24T18:22:01.000Z
|
from numpy import sum
from numpy import zeros
from gwlfe.Memoization import memoize
from gwlfe.MultiUse_Fxns.Erosion.Erosion_1 import Erosion_1
from gwlfe.MultiUse_Fxns.Erosion.Erosion_1 import Erosion_1_f
@memoize
def ErosSum(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,
AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN, UnsatStor_0, KV, PcntET,
DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef, Qretention, PctAreaInfil,
n25b, Landuse, TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope, SedAAdjust,
StreamLength, n42b, n46c, n85d, AgLength, n42, n45, n85, UrbBankStab,
SedDelivRatio_0, Acoef, KF, LS, C, P):
result = zeros((NYrs,))
for Y in range(NYrs):
result[Y] = 0
for i in range(12):
result[Y] += Erosion_1(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,
AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN, UnsatStor_0, KV, PcntET,
DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef, Qretention, PctAreaInfil,
n25b, Landuse, TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope, SedAAdjust,
StreamLength, n42b, n46c, n85d, AgLength, n42, n45, n85, UrbBankStab,
SedDelivRatio_0, Acoef, KF, LS, C, P)[Y][i]
return result
@memoize
def ErosSum_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,
AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN, UnsatStor_0, KV, PcntET,
DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef, Qretention, PctAreaInfil,
n25b, Landuse, TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope, SedAAdjust,
StreamLength, n42b, n46c, n85d, AgLength, n42, n45, n85, UrbBankStab,
SedDelivRatio_0, Acoef, KF, LS, C, P):
return sum(Erosion_1_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,
AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN, UnsatStor_0, KV, PcntET,
DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef, Qretention, PctAreaInfil,
n25b, Landuse, TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope, SedAAdjust,
StreamLength, n42b, n46c, n85d, AgLength, n42, n45, n85, UrbBankStab,
SedDelivRatio_0, Acoef, KF, LS, C, P), axis=1)
| 63.170213
| 118
| 0.630852
| 324
| 2,969
| 5.635802
| 0.243827
| 0.026287
| 0.03724
| 0.054765
| 0.88609
| 0.88609
| 0.88609
| 0.88609
| 0.88609
| 0.88609
| 0
| 0.047865
| 0.28225
| 2,969
| 46
| 119
| 64.543478
| 0.80901
| 0
| 0
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.125
| 0.025
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
c1f478684234a287a64001cf0f99063dadc5f8af
| 31
|
py
|
Python
|
src/api/log/__init__.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 216
|
2015-01-05T12:48:10.000Z
|
2022-03-08T00:12:23.000Z
|
src/api/log/__init__.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 55
|
2015-02-28T12:10:26.000Z
|
2020-11-18T17:45:16.000Z
|
src/api/log/__init__.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 34
|
2015-05-02T15:15:10.000Z
|
2020-06-15T19:20:37.000Z
|
import rebase
import partition
| 10.333333
| 16
| 0.870968
| 4
| 31
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 2
| 17
| 15.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
de025c4b340cb55000bf3bafcb10a20043da87e2
| 29
|
py
|
Python
|
plugins/pelican-jinja2content/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 17
|
2020-07-16T10:46:42.000Z
|
2022-02-04T13:33:10.000Z
|
plugins/pelican-jinja2content/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 197
|
2016-01-01T18:26:21.000Z
|
2019-11-14T03:36:56.000Z
|
plugins/pelican-jinja2content/__init__.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 20
|
2019-10-16T20:41:33.000Z
|
2021-12-18T22:00:06.000Z
|
from .jinja2content import *
| 14.5
| 28
| 0.793103
| 3
| 29
| 7.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.137931
| 29
| 1
| 29
| 29
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a9a9700acbe8d4b83232569e31a1d7260b1f8b80
| 41
|
py
|
Python
|
visualization/visualizer/__init__.py
|
zhigangjiang/LGT-Net
|
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
|
[
"MIT"
] | 11
|
2022-03-03T17:49:33.000Z
|
2022-03-25T11:23:11.000Z
|
visualization/visualizer/__init__.py
|
zhigangjiang/LGT-Net
|
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
|
[
"MIT"
] | null | null | null |
visualization/visualizer/__init__.py
|
zhigangjiang/LGT-Net
|
d9a619158b2dc66a50c100e7fa7e491f1df16fd7
|
[
"MIT"
] | 1
|
2022-03-04T06:39:50.000Z
|
2022-03-04T06:39:50.000Z
|
"""
@Date: 2021/11/06
@description:
"""
| 8.2
| 17
| 0.560976
| 5
| 41
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228571
| 0.146341
| 41
| 4
| 18
| 10.25
| 0.428571
| 0.756098
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e713a520f0192970c234d7de1f2b16e666ef2cf2
| 327
|
py
|
Python
|
hubspot/crm/products/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 117
|
2020-04-06T08:22:53.000Z
|
2022-03-18T03:41:29.000Z
|
hubspot/crm/products/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 62
|
2020-04-06T16:21:06.000Z
|
2022-03-17T16:50:44.000Z
|
hubspot/crm/products/api/__init__.py
|
fakepop/hubspot-api-python
|
f04103a09f93f5c26c99991b25fa76801074f3d3
|
[
"Apache-2.0"
] | 45
|
2020-04-06T16:13:52.000Z
|
2022-03-30T21:33:17.000Z
|
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.crm.products.api.associations_api import AssociationsApi
from hubspot.crm.products.api.basic_api import BasicApi
from hubspot.crm.products.api.batch_api import BatchApi
from hubspot.crm.products.api.search_api import SearchApi
| 32.7
| 69
| 0.847095
| 48
| 327
| 5.583333
| 0.4375
| 0.164179
| 0.208955
| 0.328358
| 0.373134
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003378
| 0.094801
| 327
| 9
| 70
| 36.333333
| 0.902027
| 0.125382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e71f44037092f5de35f466c2eff64005820fc873
| 52
|
py
|
Python
|
python/GMatElastoPlasticQPot3d/Cartesian3d.py
|
tdegeus/ElastoPlasticQPot3d
|
c61987f0dd001d218e067231e1a71b775815a849
|
[
"MIT"
] | null | null | null |
python/GMatElastoPlasticQPot3d/Cartesian3d.py
|
tdegeus/ElastoPlasticQPot3d
|
c61987f0dd001d218e067231e1a71b775815a849
|
[
"MIT"
] | 15
|
2018-11-13T08:44:45.000Z
|
2021-08-30T07:09:55.000Z
|
python/GMatElastoPlasticQPot3d/Cartesian3d.py
|
tdegeus/ElastoPlasticQPot3d
|
c61987f0dd001d218e067231e1a71b775815a849
|
[
"MIT"
] | null | null | null |
from ._GMatElastoPlasticQPot3d.Cartesian3d import *
| 26
| 51
| 0.865385
| 4
| 52
| 11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.076923
| 52
| 1
| 52
| 52
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e7e089613ccfe0324f946b6e7045fe1c57e1d223
| 11
|
py
|
Python
|
demo.py
|
OddBloke/RustPython
|
a40ef57ba4dddc18f9f5a9ba5224203f4a983ed6
|
[
"MIT"
] | null | null | null |
demo.py
|
OddBloke/RustPython
|
a40ef57ba4dddc18f9f5a9ba5224203f4a983ed6
|
[
"MIT"
] | null | null | null |
demo.py
|
OddBloke/RustPython
|
a40ef57ba4dddc18f9f5a9ba5224203f4a983ed6
|
[
"MIT"
] | null | null | null |
print(42)
| 3.666667
| 9
| 0.636364
| 2
| 11
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0.181818
| 11
| 2
| 10
| 5.5
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
99bb2949a0056b3440c6bc1efe0bb804fb4c07e2
| 37
|
py
|
Python
|
webhdfspy/__init__.py
|
fsouto/webhdfspy
|
562528580adfa0f06ea07e34ef7089f1e59532d9
|
[
"MIT"
] | null | null | null |
webhdfspy/__init__.py
|
fsouto/webhdfspy
|
562528580adfa0f06ea07e34ef7089f1e59532d9
|
[
"MIT"
] | null | null | null |
webhdfspy/__init__.py
|
fsouto/webhdfspy
|
562528580adfa0f06ea07e34ef7089f1e59532d9
|
[
"MIT"
] | null | null | null |
from .webhdfspy import WebHDFSClient
| 18.5
| 36
| 0.864865
| 4
| 37
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99f3f71812280b5943de738ea02e43f630b35e63
| 35
|
py
|
Python
|
src/container_build/__init__.py
|
solomem/sagemaker_lambda_pipeline
|
c94f6ef78a53dbdf9a4782e27b6440e058e1964f
|
[
"Apache-2.0"
] | 58
|
2020-04-02T22:41:04.000Z
|
2022-03-16T14:43:44.000Z
|
src/container_build/__init__.py
|
IronOnet/sagemaker-deep-demand-forecast
|
3c7081186146fa167b46cbe21715929f37079ada
|
[
"Apache-2.0"
] | 8
|
2020-10-12T08:05:32.000Z
|
2021-08-17T00:31:23.000Z
|
src/container_build/__init__.py
|
IronOnet/sagemaker-deep-demand-forecast
|
3c7081186146fa167b46cbe21715929f37079ada
|
[
"Apache-2.0"
] | 29
|
2020-04-03T01:43:03.000Z
|
2022-03-16T14:43:47.000Z
|
from .container_build import build
| 17.5
| 34
| 0.857143
| 5
| 35
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99f4e78fbfec4998ba7bba847a56b205c1839a37
| 199
|
py
|
Python
|
virtual/Lib/site-packages/pylint/test/functional/too_many_arguments.py
|
JamesKimari/pitch-one
|
aac9007716bf2e3b6446588a06508fac068f3d20
|
[
"MIT"
] | 35
|
2016-09-22T22:53:14.000Z
|
2020-02-13T15:12:21.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/too_many_arguments.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | 32
|
2018-05-01T05:24:43.000Z
|
2022-03-11T23:20:39.000Z
|
virtual/lib/python3.6/site-packages/pylint/test/functional/too_many_arguments.py
|
evantoh/patient-management-system
|
6637eb1344775633759165260ed99843581c0e72
|
[
"Unlicense"
] | 88
|
2016-11-27T02:16:11.000Z
|
2020-02-28T05:10:26.000Z
|
# pylint: disable=missing-docstring
def stupid_function(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9): # [too-many-arguments]
return arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9
| 39.8
| 97
| 0.708543
| 29
| 199
| 4.827586
| 0.689655
| 0.114286
| 0.171429
| 0.228571
| 0.514286
| 0.514286
| 0.514286
| 0.514286
| 0.514286
| 0
| 0
| 0.106509
| 0.150754
| 199
| 4
| 98
| 49.75
| 0.721893
| 0.271357
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
823ba2b7444f6b352c9e118a02c9be2cc0bd039f
| 180
|
py
|
Python
|
utils/errors.py
|
DiNitride/gafbot
|
fdf0b8c89f3ac0b23681ce656ca08a3e8f26071d
|
[
"MIT"
] | 51
|
2016-10-05T18:05:17.000Z
|
2017-10-01T10:41:43.000Z
|
utils/errors.py
|
DiNitride/gafbot
|
fdf0b8c89f3ac0b23681ce656ca08a3e8f26071d
|
[
"MIT"
] | 6
|
2017-05-19T22:32:39.000Z
|
2018-10-14T18:12:12.000Z
|
utils/errors.py
|
DiNitride/gafbot
|
fdf0b8c89f3ac0b23681ce656ca08a3e8f26071d
|
[
"MIT"
] | 9
|
2016-10-08T07:11:47.000Z
|
2019-11-04T03:30:24.000Z
|
import discord
from discord.ext import commands
class UserBlacklisted(commands.CommandError):
"""
An exception when the user is blacklisted from the bot
"""
pass
| 18
| 58
| 0.722222
| 22
| 180
| 5.909091
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216667
| 180
| 9
| 59
| 20
| 0.921986
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
415d679dfdfe1e189c25d929013580ba1234f540
| 31
|
py
|
Python
|
simple_xntwist_ui/__init__.py
|
xn-twist/demo-ui
|
00f04add3a84b4e0f4ecf4e48063a9bb737f1814
|
[
"MIT"
] | null | null | null |
simple_xntwist_ui/__init__.py
|
xn-twist/demo-ui
|
00f04add3a84b4e0f4ecf4e48063a9bb737f1814
|
[
"MIT"
] | null | null | null |
simple_xntwist_ui/__init__.py
|
xn-twist/demo-ui
|
00f04add3a84b4e0f4ecf4e48063a9bb737f1814
|
[
"MIT"
] | null | null | null |
from . import simple_xntwist_ui
| 31
| 31
| 0.870968
| 5
| 31
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096774
| 31
| 1
| 31
| 31
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41741b8352830c8f1ee32187b482c56b03ca6098
| 15,221
|
py
|
Python
|
CrossLanguageOPMining-bert-3languageTrain-PGNAdapter/data/SRL.py
|
zenRRan/Chinese-ORL-with-Corpus-Translation
|
cf0e3db1c03c8f13f48ccd5ea1343d56e5513299
|
[
"Apache-2.0"
] | 1
|
2021-09-09T02:38:19.000Z
|
2021-09-09T02:38:19.000Z
|
CrossLanguageOPMining-bert-3languageTrain-PGNAdapter/data/SRL.py
|
zenRRan/Chinese-ORL-with-Corpus-Translation
|
cf0e3db1c03c8f13f48ccd5ea1343d56e5513299
|
[
"Apache-2.0"
] | null | null | null |
CrossLanguageOPMining-bert-3languageTrain-PGNAdapter/data/SRL.py
|
zenRRan/Chinese-ORL-with-Corpus-Translation
|
cf0e3db1c03c8f13f48ccd5ea1343d56e5513299
|
[
"Apache-2.0"
] | 1
|
2021-09-09T02:38:20.000Z
|
2021-09-09T02:38:20.000Z
|
# encoding:utf-8
class Word:
def __init__(self, id, form, label):
self.id = id
self.org_form = form
self.form = form.lower()
self.label = label
def __str__(self):
values = [str(self.id), self.org_form, self.label]
return '\t'.join(values)
class Sentence:
def __init__(self, words, bert_token=None, lang_id=None):
self.words = list(words)
self.length = len(self.words)
self.key_head = -1
self.key_start = -1
self.key_end = -1
self.key_label = ""
self.lang_id = lang_id
if bert_token is not None:
sentence_list = [word.org_form for word in self.words]
self.list_bert_indice, self.list_segments_id, self.list_piece_id = bert_token.bert_ids(sentence_list)
for idx in range(self.length):
if words[idx].label.endswith("-*"):
self.key_head = idx
self.key_label = words[idx].label[2:-2]
break
for idx in range(self.length):
cur_label = words[idx].label
if cur_label.startswith("B-"+self.key_label) or cur_label.startswith("S-"+self.key_label):
self.key_start = idx
if cur_label.startswith("E-"+self.key_label) or cur_label.startswith("S-"+self.key_label):
self.key_end = idx
def label_to_entity(labels):
length = len(labels)
entities = set()
idx = 0
while idx < length:
if labels[idx] == "O":
idx = idx + 1
elif labels[idx].startswith("B-"):
label = labels[idx][2:]
predict = False
if label.endswith("-*"):
label = label[0:-2]
predict = True
next_idx = idx + 1
end_idx = idx
while next_idx < length:
if labels[next_idx] == "O" or labels[next_idx].startswith("B-") \
or labels[next_idx].startswith("S-"):
break
next_label = labels[next_idx][2:]
if next_label.endswith("-*"):
next_label = next_label[0:-2]
predict = True
if next_label != label:
break
end_idx = next_idx
next_idx = next_idx + 1
if end_idx == idx:
new_label = "S-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
if not predict:
entities.add("[%d,%d]%s"%(idx, end_idx, label))
idx = end_idx + 1
elif labels[idx].startswith("S-"):
label = labels[idx][2:]
predict = False
if label.endswith("-*"):
label = label[0:-2]
predict = True
if not predict:
entities.add("[%d,%d]%s"%(idx, idx, label))
idx = idx + 1
elif labels[idx].startswith("M-"):
new_label = "B-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
else:
new_label = "S-" + labels[idx][2:]
print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
return entities
def normalize_labels(labels):
length = len(labels)
change = 0
normed_labels = []
for idx in range(length):
normed_labels.append(labels[idx])
idx = 0
while idx < length:
if labels[idx] == "O":
idx = idx + 1
elif labels[idx].startswith("B-"):
label = labels[idx][2:]
if label.endswith("-*"):
label = label[0:-2]
next_idx = idx + 1
end_idx = idx
while next_idx < length:
if labels[next_idx] == "O" or labels[next_idx].startswith("B-") \
or labels[next_idx].startswith("S-"):
break
next_label = labels[next_idx][2:]
if next_label.endswith("-*"):
next_label = next_label[0:-2]
if next_label != label:
break
end_idx = next_idx
next_idx = next_idx + 1
if end_idx == idx:
new_label = "S-" + labels[idx][2:]
#print("Change %s to %s" % (labels[idx], new_label))
labels[idx] = new_label
normed_labels[idx] = new_label
change = change + 1
idx = end_idx + 1
elif labels[idx].startswith("S-"):
idx = idx + 1
elif labels[idx].startswith("M-"):
new_label = "B-" + labels[idx][2:]
#print("Change %s to %s" % (labels[idx], new_label))
normed_labels[idx] = new_label
labels[idx] = new_label
change = change + 1
else:
new_label = "S-" + labels[idx][2:]
#print("Change %s to %s" % (labels[idx], new_label))
normed_labels[idx] = new_label
labels[idx] = new_label
change = change + 1
return normed_labels, change
def getListFromStr(entity):
entity_del_start = ''.join(list(entity)[1:])
# entity_del_start: '2,3]TARGET'
new_entity = entity_del_start.split(']')
start, end = new_entity[0].split(',')
start, end = int(start), int(end)
# start: 2 end: 3
label = new_entity[1]
# label: 'TARGET'
return [start, end, label]
def evalSRLExact(gold, predict):
glength, plength = gold.length, predict.length
if glength != plength:
raise Exception('gold length does not match predict length.')
goldlabels, predictlabels = [], []
for idx in range(glength):
goldlabels.append(gold.words[idx].label)
predictlabels.append(predict.words[idx].label)
# class set{'[2,4]TARGET', '[0,0]AGENT'}
gold_entities = label_to_entity(goldlabels)
# class set{'[2,3]TARGET', '[0,1]AGENT', '[2,4]ad>'}
predict_entities = label_to_entity(predictlabels)
gold_entity_num, predict_entity_num, correct_entity_num = len(gold_entities), len(predict_entities), 0
gold_agent_entity_num, predict_agent_entity_num, correct_agent_entity_num = 0, 0, 0
gold_target_entity_num, predict_target_entity_num, correct_target_entity_num = 0, 0, 0
for entity in gold_entities:
if entity.endswith('AGENT'):
gold_agent_entity_num += 1
elif entity.endswith('TARGET'):
gold_target_entity_num += 1
for entity in predict_entities:
if entity.endswith('AGENT'):
predict_agent_entity_num += 1
elif entity.endswith('TARGET'):
predict_target_entity_num += 1
for one_entity in gold_entities:
if one_entity in predict_entities:
correct_entity_num = correct_entity_num + 1
if one_entity.endswith('AGENT'):
correct_agent_entity_num += 1
elif one_entity.endswith('TARGET'):
correct_target_entity_num += 1
return gold_entity_num, predict_entity_num, correct_entity_num, \
gold_agent_entity_num, predict_agent_entity_num, correct_agent_entity_num, \
gold_target_entity_num, predict_target_entity_num, correct_target_entity_num
def jiaoji(a1, a2, b1, b2):
if a1 == b1 and a2 == b2:
return True
else:
list1 = list(range(a1, a2+1))
list2 = list(range(b1, b2+1))
if len(set(list1).intersection(set(list2))) != 0:
return True
return False
def contain_len(a1, a2, b1, b2):
return len(set(list(range(a1, a2 + 1))).intersection(set(list(range(b1, b2 + 1)))))
def evalSRLBinary(gold, predict):
glength, plength = gold.length, predict.length
if glength != plength:
raise Exception('gold length does not match predict length.')
goldlabels, predictlabels = [], []
for idx in range(glength):
goldlabels.append(gold.words[idx].label)
predictlabels.append(predict.words[idx].label)
# class set{'[2,4]TARGET', '[0,0]AGENT'}
gold_entities = label_to_entity(goldlabels)
# class set{'[2,3]TARGET', '[0,1]AGENT', '[2,4]ad>'}
predict_entities = label_to_entity(predictlabels)
gold_entity_num, predict_entity_num, gold_correct_entity_num, predict_correct_entity_num = len(gold_entities), len(
predict_entities), 0, 0
gold_agent_entity_num, predict_agent_entity_num, gold_correct_agent_entity_num, predict_correct_agent_entity_num = 0, 0, 0, 0
gold_target_entity_num, predict_target_entity_num, gold_correct_target_entity_num, predict_correct_target_entity_num = 0, 0, 0, 0
for entity in gold_entities:
if entity.endswith('AGENT'):
gold_agent_entity_num += 1
elif entity.endswith('TARGET'):
gold_target_entity_num += 1
for entity in predict_entities:
if entity.endswith('AGENT'):
predict_agent_entity_num += 1
elif entity.endswith('TARGET'):
predict_target_entity_num += 1
for gold_entity in gold_entities:
for predict_entity in predict_entities:
gold_start, gold_end, gold_label = getListFromStr(gold_entity)
predict_start, predict_end, predict_label = getListFromStr(predict_entity)
if gold_label == predict_label and jiaoji(gold_start, gold_end, predict_start, predict_end):
gold_correct_entity_num += 1
if gold_label == 'AGENT':
gold_correct_agent_entity_num += 1
elif gold_label == 'TARGET':
gold_correct_target_entity_num += 1
break
for predict_entity in predict_entities:
for gold_entity in gold_entities:
gold_start, gold_end, gold_label = getListFromStr(gold_entity)
predict_start, predict_end, predict_label = getListFromStr(predict_entity)
if gold_label == predict_label and jiaoji(gold_start, gold_end, predict_start, predict_end):
predict_correct_entity_num += 1
if gold_label == 'AGENT':
predict_correct_agent_entity_num += 1
elif gold_label == 'TARGET':
predict_correct_target_entity_num += 1
break
return gold_entity_num, predict_entity_num, gold_correct_entity_num, predict_correct_entity_num, \
gold_agent_entity_num, predict_agent_entity_num, gold_correct_agent_entity_num, predict_correct_agent_entity_num, \
gold_target_entity_num, predict_target_entity_num, gold_correct_target_entity_num, predict_correct_target_entity_num
def evalSRLProportional(gold, predict):
glength, plength = gold.length, predict.length
if glength != plength:
raise Exception('gold length does not match predict length.')
goldlabels, predictlabels = [], []
for idx in range(glength):
goldlabels.append(gold.words[idx].label)
predictlabels.append(predict.words[idx].label)
# class set{'[2,4]TARGET', '[0,0]AGENT'}
gold_entities = label_to_entity(goldlabels)
# class set{'[2,3]TARGET', '[0,1]AGENT', '[2,4]ad>'}
predict_entities = label_to_entity(predictlabels)
gold_entity_num, predict_entity_num, gold_correct_entity_num, predict_correct_entity_num = len(gold_entities), len(
predict_entities), 0, 0
gold_agent_entity_num, predict_agent_entity_num, gold_correct_agent_entity_num, predict_correct_agent_entity_num = 0, 0, 0, 0
gold_target_entity_num, predict_target_entity_num, gold_correct_target_entity_num, predict_correct_target_entity_num = 0, 0, 0, 0
for entity in gold_entities:
if entity.endswith('AGENT'):
gold_agent_entity_num += 1
elif entity.endswith('TARGET'):
gold_target_entity_num += 1
for entity in predict_entities:
if entity.endswith('AGENT'):
predict_agent_entity_num += 1
elif entity.endswith('TARGET'):
predict_target_entity_num += 1
for gold_entity in gold_entities:
for predict_entity in predict_entities:
gold_start, gold_end, gold_label = getListFromStr(gold_entity)
predict_start, predict_end, predict_label = getListFromStr(predict_entity)
if gold_label == predict_label and jiaoji(gold_start, gold_end, predict_start, predict_end):
correct_len = contain_len(gold_start, gold_end, predict_start, predict_end)
gold_correct_rate = (correct_len / (gold_end - gold_start + 1))
gold_correct_entity_num += gold_correct_rate
if gold_label == 'AGENT':
gold_correct_agent_entity_num += gold_correct_rate
elif gold_label == 'TARGET':
gold_correct_target_entity_num += gold_correct_rate
break
for predict_entity in predict_entities:
for gold_entity in gold_entities:
gold_start, gold_end, gold_label = getListFromStr(gold_entity)
predict_start, predict_end, predict_label = getListFromStr(predict_entity)
if gold_label == predict_label and jiaoji(gold_start, gold_end, predict_start, predict_end):
correct_len = contain_len(gold_start, gold_end, predict_start, predict_end)
predict_correct_rate = (correct_len / (predict_end - predict_start + 1))
predict_correct_entity_num += predict_correct_rate
if gold_label == 'AGENT':
predict_correct_agent_entity_num += predict_correct_rate
elif gold_label == 'TARGET':
predict_correct_target_entity_num += predict_correct_rate
break
return gold_entity_num, predict_entity_num, gold_correct_entity_num, predict_correct_entity_num, \
gold_agent_entity_num, predict_agent_entity_num, gold_correct_agent_entity_num, predict_correct_agent_entity_num, \
gold_target_entity_num, predict_target_entity_num, gold_correct_target_entity_num, predict_correct_target_entity_num
def readSRL(file, bert_token=None, lang_id=None):
min_count = 1
total = 0
words = []
for line in file:
tok = line.strip().split()
if not tok or line.strip() == '' or line.strip().startswith('#'):
if len(words) > min_count:
total += 1
yield Sentence(words, bert_token, lang_id)
words = []
elif len(tok) == 3:
try:
words.append(Word(int(tok[0]), tok[1], tok[2]))
except Exception:
pass
else:
pass
if len(words) > min_count:
total += 1
yield Sentence(words, bert_token, lang_id)
print("Total num: ", total)
def writeSRL(filename, sentences):
with open(filename, 'w') as file:
for sentence in sentences:
for entry in sentence.words:
file.write(str(entry) + '\n')
file.write('\n')
def printSRL(output, sentence):
for entry in sentence.words:
output.write(str(entry) + '\n')
output.write('\n')
| 40.481383
| 133
| 0.603968
| 1,914
| 15,221
| 4.503657
| 0.07001
| 0.098144
| 0.061253
| 0.029582
| 0.801276
| 0.773782
| 0.751044
| 0.745708
| 0.743387
| 0.681207
| 0
| 0.015531
| 0.293542
| 15,221
| 376
| 134
| 40.481383
| 0.786106
| 0.032849
| 0
| 0.669841
| 0
| 0
| 0.026584
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0.006349
| 0
| 0.003175
| 0.085714
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
418bde481cd9a862ca6b0f7d948b2cb9c4751dc4
| 5,165
|
py
|
Python
|
src/data/dataloader.py
|
TencentYoutuResearch/SelfSupervisedLearning-DSM
|
655a0a23a47bf2559f3d435384ae59a8871a5ff5
|
[
"Apache-2.0"
] | 27
|
2021-01-07T11:09:33.000Z
|
2021-08-31T02:46:23.000Z
|
src/data/dataloader.py
|
TencentYoutuResearch/SelfSupervisedLearning-DSM
|
655a0a23a47bf2559f3d435384ae59a8871a5ff5
|
[
"Apache-2.0"
] | null | null | null |
src/data/dataloader.py
|
TencentYoutuResearch/SelfSupervisedLearning-DSM
|
655a0a23a47bf2559f3d435384ae59a8871a5ff5
|
[
"Apache-2.0"
] | 3
|
2021-01-08T08:31:06.000Z
|
2021-11-26T04:10:23.000Z
|
import torch
def pt_data_loader_init(args, data_length, image_tmpl, train_transforms, test_transforms, eval_transforms):
if args.pt_dataset in ['ucf101', 'hmdb51', 'diving48', 'sth_v1']:
from data.dataset import DataSet as DataSet
elif args.pt_dataset == 'kinetics':
from data.video_dataset import VideoDataSet as DataSet
else:
Exception("unsupported dataset")
train_dataset = DataSet(args, args.pt_root, args.pt_train_list, num_segments=1, new_length=data_length,
stride=args.pt_stride, modality=args.pt_mode, dataset=args.pt_dataset, test_mode=False,
image_tmpl=image_tmpl if args.pt_mode in ["rgb", "RGBDiff"]
else args.pt_flow_prefix + "{}_{:05d}.jpg", transform=train_transforms)
print("training samples:{}".format(train_dataset.__len__()))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.pt_batch_size, shuffle=True,
num_workers=args.pt_workers, pin_memory=True)
val_dataset = DataSet(args, args.pt_root, args.pt_val_list, num_segments=1, new_length=data_length,
stride=args.pt_stride, modality=args.pt_mode, test_mode=True, dataset=args.pt_dataset,
image_tmpl=image_tmpl if args.pt_mode in ["rgb", "RGBDiff"] else args.pt_flow_prefix + "{}_{:05d}.jpg",
random_shift=False, transform=test_transforms)
print("val samples:{}".format(val_dataset.__len__()))
val_data_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.pt_batch_size, shuffle=False,
num_workers=args.pt_workers, pin_memory=True)
eval_dataset = DataSet(args, args.pt_root, args.pt_val_list, num_segments=1, new_length=data_length,
stride=args.pt_stride, modality=args.pt_mode, test_mode=True, dataset=args.pt_dataset,
image_tmpl=image_tmpl if args.pt_mode in ["rgb", "RGBDiff"] else args.pt_flow_prefix + "{}_{:05d}.jpg",
random_shift=False, transform=eval_transforms, full_video=True)
print("eval samples:{}".format(eval_dataset.__len__()))
eval_data_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.pt_batch_size, shuffle=False,
num_workers=args.pt_workers, pin_memory=True)
return train_data_loader, val_data_loader, eval_data_loader, train_dataset.__len__(), val_dataset.__len__(), eval_dataset.__len__()
def ft_data_loader_init(args, data_length, image_tmpl, train_transforms, test_transforms, eval_transforms):
if args.ft_dataset in ['ucf101', 'hmdb51', 'diving48', 'sth_v1']:
from data.dataset import DataSet as DataSet
elif args.ft_dataset == 'kinetics':
from data.video_dataset import VideoDataSet as DataSet
else:
Exception("unsupported dataset")
train_dataset = DataSet(args, args.ft_root, args.ft_train_list, num_segments=1, new_length=data_length,
stride=args.ft_stride, modality=args.ft_mode, dataset=args.ft_dataset, test_mode=False,
image_tmpl=image_tmpl if args.ft_mode in ["rgb", "RGBDiff"]
else args.flow_prefix + "{}_{:05d}.jpg", transform=train_transforms)
print("training samples:{}".format(train_dataset.__len__()))
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.ft_batch_size, shuffle=True,
num_workers=args.ft_workers, pin_memory=True)
val_dataset = DataSet(args, args.ft_root, args.ft_val_list, num_segments=1, new_length=data_length,
stride=args.ft_stride, modality=args.ft_mode, test_mode=True, dataset=args.ft_dataset,
image_tmpl=image_tmpl if args.ft_mode in ["rgb", "RGBDiff"] else args.flow_prefix + "{}_{:05d}.jpg",
random_shift=False, transform=test_transforms)
print("val samples:{}".format(val_dataset.__len__()))
val_data_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.ft_batch_size, shuffle=False,
num_workers=args.ft_workers, pin_memory=True)
eval_dataset = DataSet(args, args.ft_root, args.ft_val_list, num_segments=1, new_length=data_length,
stride=args.ft_stride, modality=args.ft_mode, test_mode=True, dataset=args.ft_dataset,
image_tmpl=image_tmpl if args.ft_mode in ["rgb", "RGBDiff"] else args.ft_flow_prefix + "{}_{:05d}.jpg",
random_shift=False, transform=eval_transforms, full_video=True)
print("eval samples:{}".format(eval_dataset.__len__()))
eval_data_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=args.ft_batch_size, shuffle=False,
num_workers=args.ft_workers, pin_memory=True)
return train_data_loader, val_data_loader, eval_data_loader, train_dataset.__len__(), val_dataset.__len__(), eval_dataset.__len__()
| 80.703125
| 135
| 0.66544
| 672
| 5,165
| 4.724702
| 0.102679
| 0.054803
| 0.034016
| 0.041575
| 0.982047
| 0.982047
| 0.982047
| 0.979528
| 0.950551
| 0.933543
| 0
| 0.008528
| 0.228074
| 5,165
| 63
| 136
| 81.984127
| 0.78781
| 0
| 0
| 0.542373
| 0
| 0
| 0.065828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.084746
| 0
| 0.152542
| 0.101695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
41cccc5396748efdf0684e52b6a1992b167b0dcf
| 3,612
|
py
|
Python
|
test/test_server.py
|
gomezportillo/apolo
|
8811fd805086d22ac15243eef43fc9be77be5484
|
[
"MIT"
] | 1
|
2018-10-08T09:45:53.000Z
|
2018-10-08T09:45:53.000Z
|
test/test_server.py
|
gomezportillo/cloud-computing
|
8811fd805086d22ac15243eef43fc9be77be5484
|
[
"MIT"
] | 30
|
2018-10-26T15:48:37.000Z
|
2019-01-22T18:52:42.000Z
|
test/test_server.py
|
gomezportillo/cloud-computing
|
8811fd805086d22ac15243eef43fc9be77be5484
|
[
"MIT"
] | 3
|
2018-11-30T14:40:29.000Z
|
2019-01-01T23:07:32.000Z
|
import unittest
import requests
import json
class TestServer(unittest.TestCase):
@classmethod
def setUpClass(self):
self.port = 80
self.URL_BASE = 'http://localhost:{}'.format( self.port )
self.URL_USERS = self.URL_BASE + '/rest/users'
self.URL_USERS_ALL = self.URL_USERS + '/all'
requests.delete( self.URL_USERS_ALL )
@classmethod
def tearDownClass(self):
requests.delete( self.URL_USERS_ALL )
def test_index_is_up(self):
response = requests.get( self.URL_BASE )
self.assertEqual(response.status_code, 200)
def test_index_message(self):
response = requests.get( self.URL_BASE )
response_json = response.json()
self.assertEqual(response_json['status'], 'OK')
def test_insert_is_up(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
response = requests.put( self.URL_USERS, data=user )
self.assertEqual( response.status_code, 200 )
requests.delete( self.URL_USERS_ALL )
def test_insert(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
response = requests.put( self.URL_USERS, data=user)
response_json = response.json()
self.assertEqual(response_json['status'], 'SUCCESS')
requests.delete( self.URL_USERS_ALL )
def test_find_is_up(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) )
self.assertEqual( response.status_code, 200 )
requests.delete( self.URL_USERS_ALL )
def test_find_user(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
requests.put( self.URL_USERS, data=user )
response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) )
response_json = response.json()
response_json['message']
self.assertEqual( [user], response_json['message'] )
requests.delete( self.URL_USERS_ALL )
def test_update_is_up(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
response = requests.post( self.URL_USERS, data=user )
self.assertEqual(response.status_code, 200)
requests.delete( self.URL_USERS_ALL )
def test_update_user(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
requests.put(self.URL_USERS, data=user)
user['instrument'] = 'bass'
requests.post(self.URL_USERS, data=user)
response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) )
response_json = response.json()
self.assertEqual( [user], response_json['message'] )
requests.delete( self.URL_USERS_ALL )
def test_delete_is_up(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
response = requests.delete( '{}/{}'.format( self.URL_USERS, user['email'] ) )
self.assertEqual( response.status_code, 200 )
def test_delete_user(self):
user={'email':'jhon@doe', 'instrument': 'guitar'}
requests.put( self.URL_USERS, data=user )
response = requests.delete( '{}/{}'.format( self.URL_USERS, user['email'] ) )
response = requests.get( '{}/{}'.format( self.URL_USERS, user['email'] ) )
response_json = response.json()
self.assertEqual( [], response_json['message'] )
requests.delete( self.URL_USERS_ALL )
def test_readall_is_up(self):
response = requests.get( self.URL_USERS_ALL )
self.assertEqual( response.status_code, 200 )
if __name__ == '__main__':
unittest.main()
| 31.964602
| 85
| 0.629014
| 428
| 3,612
| 5.091122
| 0.123832
| 0.096374
| 0.143185
| 0.082607
| 0.843047
| 0.830656
| 0.800826
| 0.776044
| 0.705369
| 0.608077
| 0
| 0.007135
| 0.223976
| 3,612
| 112
| 86
| 32.25
| 0.770246
| 0
| 0
| 0.592105
| 0
| 0
| 0.109911
| 0
| 0
| 0
| 0
| 0
| 0.144737
| 1
| 0.171053
| false
| 0
| 0.039474
| 0
| 0.223684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
68bd8231126ef0d9ff6ee74b1940ca4d28ca1498
| 5,074
|
py
|
Python
|
timelight_ai_python_api_client/models/__init__.py
|
timelight-ai/python-api-client
|
7e14341a89e8b7e1b4b0730416f6ddd3ef66ef39
|
[
"MIT"
] | null | null | null |
timelight_ai_python_api_client/models/__init__.py
|
timelight-ai/python-api-client
|
7e14341a89e8b7e1b4b0730416f6ddd3ef66ef39
|
[
"MIT"
] | null | null | null |
timelight_ai_python_api_client/models/__init__.py
|
timelight-ai/python-api-client
|
7e14341a89e8b7e1b4b0730416f6ddd3ef66ef39
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
timelight
This is the timelight api. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from timelight_ai_python_api_client.models.alert_comment_dto import AlertCommentDto
from timelight_ai_python_api_client.models.alert_dto import AlertDto
from timelight_ai_python_api_client.models.alert_favorite_dto import AlertFavoriteDto
from timelight_ai_python_api_client.models.alert_list_dto import AlertListDto
from timelight_ai_python_api_client.models.alert_ref_dto import AlertRefDto
from timelight_ai_python_api_client.models.alert_ref_list_dto import AlertRefListDto
from timelight_ai_python_api_client.models.alert_ref_result_dto import AlertRefResultDto
from timelight_ai_python_api_client.models.anomalies_response_dto import AnomaliesResponseDto
from timelight_ai_python_api_client.models.create_source_day_dto import CreateSourceDayDto
from timelight_ai_python_api_client.models.create_source_dto import CreateSourceDto
from timelight_ai_python_api_client.models.day_context import DayContext
from timelight_ai_python_api_client.models.day_list_dto import DayListDto
from timelight_ai_python_api_client.models.day_model_dto import DayModelDto
from timelight_ai_python_api_client.models.day_patch_dto import DayPatchDto
from timelight_ai_python_api_client.models.day_trend import DayTrend
from timelight_ai_python_api_client.models.day_trend_input import DayTrendInput
from timelight_ai_python_api_client.models.day_trend_input_list_dto import DayTrendInputListDto
from timelight_ai_python_api_client.models.day_trend_list_dto import DayTrendListDto
from timelight_ai_python_api_client.models.days_near_date_result_dto import DaysNearDateResultDto
from timelight_ai_python_api_client.models.days_patch_dto import DaysPatchDto
from timelight_ai_python_api_client.models.generated_day_context_bulk_dto import GeneratedDayContextBulkDto
from timelight_ai_python_api_client.models.generated_day_trend_bulk_dto import GeneratedDayTrendBulkDto
from timelight_ai_python_api_client.models.import_day_dto import ImportDayDto
from timelight_ai_python_api_client.models.import_days_dto import ImportDaysDto
from timelight_ai_python_api_client.models.login_dto import LoginDto
from timelight_ai_python_api_client.models.login_response_dto import LoginResponseDto
from timelight_ai_python_api_client.models.model_dto import ModelDto
from timelight_ai_python_api_client.models.model_list_dto import ModelListDto
from timelight_ai_python_api_client.models.model_patch_dto import ModelPatchDto
from timelight_ai_python_api_client.models.model_post_dto import ModelPostDto
from timelight_ai_python_api_client.models.models_patch_dto import ModelsPatchDto
from timelight_ai_python_api_client.models.models_post_dto import ModelsPostDto
from timelight_ai_python_api_client.models.prevision_apply_group_dto import PrevisionApplyGroupDto
from timelight_ai_python_api_client.models.prevision_apply_group_response_dto import PrevisionApplyGroupResponseDto
from timelight_ai_python_api_client.models.prevision_bulk_save_dto import PrevisionBulkSaveDto
from timelight_ai_python_api_client.models.prevision_bulk_save_result_dto import PrevisionBulkSaveResultDto
from timelight_ai_python_api_client.models.prevision_dto import PrevisionDto
from timelight_ai_python_api_client.models.prevision_list_dto import PrevisionListDto
from timelight_ai_python_api_client.models.prevision_patch_dto import PrevisionPatchDto
from timelight_ai_python_api_client.models.prevision_save_dto import PrevisionSaveDto
from timelight_ai_python_api_client.models.prevision_update_result_dto import PrevisionUpdateResultDto
from timelight_ai_python_api_client.models.recompute_day_models_response_dto import RecomputeDayModelsResponseDto
from timelight_ai_python_api_client.models.recompute_days_projection_response_dto import RecomputeDaysProjectionResponseDto
from timelight_ai_python_api_client.models.recompute_models_response_dto import RecomputeModelsResponseDto
from timelight_ai_python_api_client.models.recompute_source_models_response_dto import RecomputeSourceModelsResponseDto
from timelight_ai_python_api_client.models.request_demo_dto import RequestDemoDto
from timelight_ai_python_api_client.models.source_dto import SourceDto
from timelight_ai_python_api_client.models.source_group_create_dto import SourceGroupCreateDto
from timelight_ai_python_api_client.models.source_group_dto import SourceGroupDto
from timelight_ai_python_api_client.models.source_group_list_dto import SourceGroupListDto
from timelight_ai_python_api_client.models.source_group_patch_dto import SourceGroupPatchDto
from timelight_ai_python_api_client.models.source_list_dto import SourceListDto
from timelight_ai_python_api_client.models.source_patch_dto import SourcePatchDto
from timelight_ai_python_api_client.models.source_patch_group_dto import SourcePatchGroupDto
from timelight_ai_python_api_client.models.user_dto import UserDto
| 69.506849
| 123
| 0.917422
| 720
| 5,074
| 5.969444
| 0.169444
| 0.166356
| 0.19195
| 0.26873
| 0.56282
| 0.56282
| 0.56282
| 0.537692
| 0.234528
| 0.073057
| 0
| 0.001461
| 0.055577
| 5,074
| 72
| 124
| 70.472222
| 0.895451
| 0.040402
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
68c576960ed4b14647e54e0f97d34c5fc34587e3
| 22
|
py
|
Python
|
src/cnpj/seek/__init__.py
|
pedromxavier/cnpj
|
0b0f4914a49866990e484a03111f3b2c80d83aaa
|
[
"MIT"
] | 1
|
2021-03-13T13:47:40.000Z
|
2021-03-13T13:47:40.000Z
|
src/cnpj/seek/__init__.py
|
pedromxavier/cnpj
|
0b0f4914a49866990e484a03111f3b2c80d83aaa
|
[
"MIT"
] | null | null | null |
src/cnpj/seek/__init__.py
|
pedromxavier/cnpj
|
0b0f4914a49866990e484a03111f3b2c80d83aaa
|
[
"MIT"
] | null | null | null |
from .seek import seek
| 22
| 22
| 0.818182
| 4
| 22
| 4.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec2784fa11213b2e0722999417e866beffaf7090
| 16,106
|
py
|
Python
|
PR_BCI_team/Team_StarLab/DKHan/examples/eeg_dg/train_eval.py
|
PatternRecognition/OpenBMI
|
d9291ddb81f4319fb3764d7192e0363939a62ee9
|
[
"MIT"
] | 217
|
2015-11-02T11:10:29.000Z
|
2022-03-22T07:01:12.000Z
|
PR_BCI_team/Team_StarLab/DKHan/examples/eeg_dg/train_eval.py
|
deep-bci-g/OpenBMI
|
75daf901b2dbe215852cbff243606dcfcd10f05c
|
[
"MIT"
] | 24
|
2015-11-02T11:10:45.000Z
|
2021-09-08T11:10:33.000Z
|
PR_BCI_team/Team_StarLab/DKHan/examples/eeg_dg/train_eval.py
|
deep-bci-g/OpenBMI
|
75daf901b2dbe215852cbff243606dcfcd10f05c
|
[
"MIT"
] | 112
|
2016-01-22T01:45:44.000Z
|
2022-03-22T07:08:19.000Z
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class LabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self):
super(LabelSmoothingCrossEntropy, self).__init__()
def forward(self, x, target, smoothing=0.1):
confidence = 1. - smoothing
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = confidence * nll_loss + smoothing * smooth_loss
return loss.mean()
class ConfidenceLabelSmoothingCrossEntropy(torch.nn.Module):
def __init__(self):
super(ConfidenceLabelSmoothingCrossEntropy, self).__init__()
# self.confidence = [0.7425, 0.9325, 0.965, 0.5395, 0.86025, 0.754, 0.66475, 0.618, 0.7925, 0.6525, 0.5415,
# 0.5705, 0.6525, 0.59625, 0.6145, 0.62125, 0.7755, 0.866, 0.83425, 0.64125, 0.986, 0.82225,
# 0.70525, 0.5625, 0.5145, 0.5275, 0.57775, 0.918, 0.9175, 0.69575, 0.6555, 0.867, 0.945,
# 0.5155, 0.593, 0.976, 0.963, 0.591, 0.749, 0.5575, 0.52625, 0.6125, 0.83725, 0.97225,
# 0.93725, 0.6415, 0.61225, 0.584, 0.69175, 0.60825, 0.63575, 0.756, 0.61375, 0.53575]
self.confidence = [0.713, 0.953, 0.947, 0.514, 0.933, 0.725, 0.6025, 0.5855, 0.821, 0.6175, 0.547, 0.5605, 0.7,
0.609, 0.5785, 0.638, 0.8005, 0.824, 0.834, 0.5155, 0.9775, 0.8615, 0.6305, 0.549, 0.517,
0.5915, 0.5285, 0.923, 0.855, 0.751, 0.675, 0.773, 0.9805, 0.53, 0.5255, 0.9685, 0.9535,
0.5515, 0.8795, 0.497, 0.529, 0.5335, 0.8645, 0.9595, 0.9245, 0.5265, 0.452, 0.6415, 0.696,
0.617, 0.683, 0.7255, 0.5995, 0.5815, 0.772, 0.912, 0.983, 0.565, 0.7875, 0.783, 0.727,
0.6505, 0.764, 0.6875, 0.536, 0.5805, 0.605, 0.5835, 0.6505, 0.6045, 0.7505, 0.908, 0.8345,
0.767, 0.9945, 0.783, 0.78, 0.576, 0.512, 0.4635, 0.627, 0.913, 0.98, 0.6405, 0.636, 0.961,
0.9095, 0.501, 0.6605, 0.9835, 0.9725, 0.6305, 0.6185, 0.618, 0.5235, 0.6915, 0.81, 0.985,
0.95, 0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49, 0.985, 0.95,
0.7565, 0.7725, 0.5265, 0.6875, 0.5995, 0.5885, 0.7865, 0.628, 0.49
]
def forward(self, x, target, sid):
confidencemat = torch.zeros_like(target,dtype=torch.float32)
for i in range(len(target)):
confidencemat[i] = self.confidence[sid[i]]
smoothing = 1 - confidencemat
logprobs = F.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = torch.mul(confidencemat,nll_loss) + torch.mul(smoothing,smooth_loss)
return loss.mean()
class CroppedLoss:
def __init__(self, loss_function):
self.loss_function = loss_function
def __call__(self, preds, targets):
avg_preds = torch.mean(preds, dim=2)
avg_preds = avg_preds.squeeze(dim=1)
return self.loss_function(avg_preds, targets)
def train_crop(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1):
criterion = torch.nn.NLLLoss()
lossfn = CroppedLoss(criterion)
model.train()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
optimizer.zero_grad()
output = model(data)
output = model.embedding_net(data)
loss = lossfn(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
scheduler.step()
def eval_crop(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
outputs = []
for i in range(2):
outputs.append(model(data[:, :, :, i * 125:i * 125 + 1000]))
result = torch.cat([outputs[0], outputs[1][:, :, model.out_size - 125:model.out_size]], dim=2)
y_preds_per_trial = result.mean(dim=2)
test_loss.append(F.nll_loss(y_preds_per_trial, target, reduction='sum').item()) # sum up batch loss
pred = y_preds_per_trial.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
class MAE_loss(torch.nn.Module):
def __init__(self, device):
super(MAE_loss, self).__init__()
self.device = device
self.loss_function = torch.nn.L1Loss()
def __call__(self, preds, targets):
y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device)
y_onehot.zero_()
y_onehot.scatter_(1, targets.unsqueeze(1), 1)
return self.loss_function(preds, y_onehot)
class MAE_loss(torch.nn.Module):
def __init__(self, device):
super(MAE_loss, self).__init__()
self.device = device
self.loss_function = torch.nn.L1Loss()
def __call__(self, preds, targets):
y_onehot = torch.FloatTensor(targets.size(0), 2).to(self.device)
y_onehot.zero_()
y_onehot.scatter_(1, targets.unsqueeze(1), 1)
return self.loss_function(preds, y_onehot)
import utils
import time
def train(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
# lossfn = LabelSmoothingCrossEntropy()
# lossfn = ConfidenceLabelSmoothingCrossEntropy()
correct = []
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
t2 = time.time()
t_data.append(t2 - t3)
# print(t2)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def train_mtl(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
# lossfn = LabelSmoothingCrossEntropy()
# lossfn = ConfidenceLabelSmoothingCrossEntropy()
correct = []
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device, dtype=torch.int64)
t2 = time.time()
t_data.append(t2 - t3)
# print(t2)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, 2*subjid+target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def train_gpu(log_interval, model, device, train_loader, optimizer, scheduler, cuda, gpuidx, epoch=1):
losses = utils.AverageMeter('Loss', ':.4e')
if isinstance(model, torch.nn.DataParallel):
lossfn = model.module.criterion
else:
lossfn = model.criterion
correct = []
import time
start = time.time()
model.train()
t_data = []
t_model = []
t3 = time.time()
for batch_idx, datas in enumerate(train_loader):
data, target = datas[0], datas[1]
t2 = time.time()
t_data.append(t2 - t3)
optimizer.zero_grad()
output = model(data.unsqueeze(dim=1))
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = lossfn(output, target)
loss.backward()
optimizer.step()
losses.update(loss.item(), data.size(0))
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
t3 = time.time()
t_model.append(t3 - t2)
print("time :", time.time() - start)
print(f"t_data : {sum(t_data)} , t_model : {sum(t_model)}")
scheduler.step(losses.avg)
print(f'Train set: Accuracy: {sum(correct)}/{len(train_loader.dataset)} ({100. * sum(correct) / len(train_loader.dataset):.4f}%)')
def eval(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss
pred = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
def eval_cali(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
test_loss.append(F.cross_entropy(output, target, reduction='sum').item()) # sum up batch loss
pred = F.softmax(output, dim=1)
fpr, tpr, thresholds = roc_curve(target.cpu(), pred.cpu()[:,0])
AUC = auc(fpr, tpr)
correct.append(AUC)
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
def vote(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
output = F.log_softmax(output, dim=1)
_, pred = output.topk(maxk, 1, True, True)
# pred = pred.t()
# one-hot case
if target.ndimension() > 1:
target = target.max(1)[1]
modevalue = torch.mode(pred%2)[0]
return modevalue
def eval_mtl(model, device, test_loader):
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target, subjid = datas[0].to(device), datas[1].to(device, dtype=torch.int64), datas[2].to(device,
dtype=torch.int64)
output = model(data.unsqueeze(dim=1))
pred = vote(output, subjid*2+target, (1,5))
test_loss.append(F.cross_entropy(output, subjid*2+target, reduction='sum').item()) # sum up batch loss
# pred_0 = F.log_softmax(output, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
# pred = pred_0%2
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
def eval_ensemble(models, device, test_loader):
for model in models:
model.eval()
test_loss = []
correct = []
with torch.no_grad():
for datas in test_loader:
data, target = datas[0].to(device), datas[1].to(device, dtype=torch.int64)
output = []
for model in models:
output.append(model(data.unsqueeze(dim=1)).unsqueeze(dim=2))
temp = torch.cat(output, dim=2)
temp2 = temp.mean(dim=2)
test_loss.append(F.cross_entropy(temp2, target, reduction='sum').item()) # sum up batch loss
pred = F.log_softmax(temp2, dim=1).argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct.append(pred.eq(target.view_as(pred)).sum().item())
loss = sum(test_loss) / len(test_loader.dataset)
# print('{:.0f}'.format(100. * correct / len(test_loader.dataset)))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)'.format(
loss, sum(correct), len(test_loader.dataset),
100. * sum(correct) / len(test_loader.dataset)))
return loss, 100. * sum(correct) / len(test_loader.dataset)
| 37.282407
| 134
| 0.593009
| 2,212
| 16,106
| 4.198463
| 0.150543
| 0.048993
| 0.034995
| 0.053839
| 0.757941
| 0.746097
| 0.739098
| 0.715732
| 0.707979
| 0.699365
| 0
| 0.087914
| 0.249969
| 16,106
| 431
| 135
| 37.36891
| 0.680877
| 0.099342
| 0
| 0.706897
| 0
| 0.010345
| 0.071675
| 0.015344
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.031034
| 0
| 0.155172
| 0.062069
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b56670fe95c7afaefb5ca0333577c6f8d06cce1
| 118
|
py
|
Python
|
cct/core2/algorithms/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | 1
|
2015-11-04T16:37:39.000Z
|
2015-11-04T16:37:39.000Z
|
cct/core2/algorithms/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | null | null | null |
cct/core2/algorithms/__init__.py
|
awacha/cct
|
be1adbed2533df15c778051f3f4f9da0749c873a
|
[
"BSD-3-Clause"
] | 1
|
2020-03-05T02:50:43.000Z
|
2020-03-05T02:50:43.000Z
|
from . import radavg, readcbf, peakfit, centering, orderforleastmotormovement, correlmatrix, schilling, matrixaverager
| 118
| 118
| 0.847458
| 10
| 118
| 10
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084746
| 118
| 1
| 118
| 118
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b71c90f630e14826b11c070cb5b2c08da6ce8f6
| 6,557
|
py
|
Python
|
libretto/migrations/0010_removes_useless_db_index.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 15
|
2015-02-10T21:16:31.000Z
|
2021-03-25T16:46:20.000Z
|
libretto/migrations/0010_removes_useless_db_index.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 4
|
2021-02-10T15:42:08.000Z
|
2022-03-11T23:20:38.000Z
|
libretto/migrations/0010_removes_useless_db_index.py
|
adrienlachaize/dezede
|
584ec30cedab95152e2f95595b7691a04e6736e2
|
[
"BSD-3-Clause"
] | 6
|
2016-07-10T14:20:48.000Z
|
2022-01-19T18:34:02.000Z
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('libretto', '0009_auto_20150423_2042'),
]
operations = [
migrations.AlterModelOptions(
name='pupitre',
options={'ordering': ('-soliste', 'partie'), 'verbose_name': 'pupitre', 'verbose_name_plural': 'pupitres'},
),
migrations.AlterField(
model_name='elementdeprogramme',
name='autre',
field=models.CharField(max_length=500, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='engagement',
name='individus',
field=models.ManyToManyField(related_name='engagements', to='libretto.Individu'),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='circonstance',
field=models.CharField(max_length=500, verbose_name='circonstance', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='debut_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='evenement',
name='fin_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='deces_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='deces_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='naissance_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='individu',
name='naissance_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_heure_approx',
field=models.CharField(help_text='Ne remplir que si l\u2019heure est impr\xe9cise.', max_length=30, verbose_name='heure (approximative)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='oeuvre',
name='creation_lieu_approx',
field=models.CharField(help_text='Ne remplir que si le lieu (ou institution) est impr\xe9cis(e).', max_length=50, verbose_name='lieu (approximatif)', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='partie',
name='professions',
field=models.ManyToManyField(related_name='parties', to='libretto.Profession', blank=True, help_text='La ou les profession(s) capable(s) de jouer ce r\xf4le ou cet instrument.', null=True, verbose_name='occupations'),
preserve_default=True,
),
migrations.AlterField(
model_name='personnel',
name='engagements',
field=models.ManyToManyField(related_name='personnels', to='libretto.Engagement'),
preserve_default=True,
),
migrations.AlterField(
model_name='pupitre',
name='quantite_max',
field=models.IntegerField(default=1, verbose_name='quantit\xe9 maximale'),
preserve_default=True,
),
migrations.AlterField(
model_name='pupitre',
name='quantite_min',
field=models.IntegerField(default=1, verbose_name='quantit\xe9 minimale'),
preserve_default=True,
),
migrations.AlterField(
model_name='source',
name='date_approx',
field=models.CharField(help_text='Ne remplir que si la date est impr\xe9cise.', max_length=60, verbose_name='date (approximative)', blank=True),
preserve_default=True,
),
]
| 46.176056
| 229
| 0.618423
| 698
| 6,557
| 5.624642
| 0.15043
| 0.106979
| 0.133724
| 0.15512
| 0.845644
| 0.817371
| 0.80107
| 0.776617
| 0.776617
| 0.720071
| 0
| 0.016854
| 0.267043
| 6,557
| 141
| 230
| 46.503546
| 0.800042
| 0
| 0
| 0.686131
| 0
| 0.007299
| 0.275888
| 0.013116
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007299
| 0
| 0.029197
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d41c8c0be9a5740cf81ee2596760f90253aa73e4
| 6,072
|
py
|
Python
|
tests/test_tracing_trace.py
|
rgstephens/opentracing-decorator
|
b69f4e8f998509dd51f4e27b16fcf6f6a069cd22
|
[
"MIT"
] | 4
|
2021-01-26T14:07:57.000Z
|
2021-06-19T04:25:53.000Z
|
tests/test_tracing_trace.py
|
rgstephens/opentracing-decorator
|
b69f4e8f998509dd51f4e27b16fcf6f6a069cd22
|
[
"MIT"
] | null | null | null |
tests/test_tracing_trace.py
|
rgstephens/opentracing-decorator
|
b69f4e8f998509dd51f4e27b16fcf6f6a069cd22
|
[
"MIT"
] | 2
|
2021-06-15T07:25:08.000Z
|
2021-06-28T20:06:39.000Z
|
import numbers
import unittest
import uuid
from unittest.mock import MagicMock, create_autospec
from opentracing.mocktracer import MockTracer
from opentracing_decorator.tracing import Tracing
class TestTracing(unittest.TestCase):
def setUp(self):
self.tracer = MockTracer()
self.tracing = Tracing(self.tracer)
def test_func_called(self):
func = MagicMock()
traced_func = self.tracing.trace("TestTrace", func)
traced_func()
self.assertTrue(func.called)
def test_function_traced(self):
func = MagicMock()
traced_func = self.tracing.trace(str(uuid.uuid4()), func)
traced_func()
self.assertEqual(
len(self.tracer.finished_spans()),
1,
)
def test_span_passed(self):
func = MagicMock()
traced_func = self.tracing.trace("TestTrace", func, pass_span=True)
traced_func()
func.assert_called_with(span=self.tracer.finished_spans()[0])
def test_parameters_tagged(self):
def func_signature(a, b, c):
pass
func = create_autospec(func_signature, spec_set=True)
traced_func = self.tracing.trace("TestTrace", func, tag_parameters=True)
traced_func(10, 20, 30)
correct = {"a": 10, "b": 20, "c": 30}
span_tags = self.tracer.finished_spans()[0].tags
self.assertDictEqual(correct, span_tags)
def test_parameters_tagged_with_prefix(self):
def func_signature(a, b, c):
pass
func = create_autospec(func_signature, spec_set=True)
traced_func = self.tracing.trace("TestTrace", func, tag_parameters=True, parameter_prefix="test")
traced_func(10, 20, 30)
correct = {"test.a": 10, "test.b": 20, "test.c": 30}
span_tags = self.tracer.finished_spans()[0].tags
self.assertDictEqual(correct, span_tags)
def test_parameters_tagged_type_class(self):
def func_signature(a: int, b: MockTracer, c: str = "Test"):
pass
func = create_autospec(func_signature, spec_set=True)
traced_func = self.tracing.trace("TestTrace", func, tag_parameters=True)
traced_func(10, MockTracer())
span_tags = self.tracer.finished_spans()[0].tags
self.assertIsInstance(span_tags["b"], str)
def test_parameters_tagged_type_number(self):
def func_signature(a: int, b: MockTracer, c: str = "Test"):
pass
func = create_autospec(func_signature, spec_set=True)
traced_func = self.tracing.trace("TestTrace", func, tag_parameters=True)
traced_func(10, MockTracer())
span_tags = self.tracer.finished_spans()[0].tags
self.assertIsInstance(span_tags["a"], numbers.Number)
def test_parameters_tagged_type_bool(self):
def func_signature(a: bool, b: MockTracer, c: str = "Test"):
pass
func = create_autospec(func_signature, spec_set=True)
traced_func = self.tracing.trace("TestTrace", func, tag_parameters=True)
traced_func(True, MockTracer())
span_tags = self.tracer.finished_spans()[0].tags
self.assertIsInstance(span_tags["a"], bool)
def test_function_log_return_only_once(self):
func = MagicMock(return_value=3)
traced_func = self.tracing.trace("TestTrace", func, log_return=True)
traced_func()
logs = self.tracer.finished_spans()[0].logs
self.assertEqual(len(logs), 1, "There should only be one log.")
def test_log_return_int(self):
func = MagicMock(return_value=3)
traced_func = self.tracing.trace("TestTrace", func, log_return=True)
traced_func()
logs = self.tracer.finished_spans()[0].logs
correct = {"return": 3}
self.assertDictEqual(correct, logs[0].key_values)
def test_log_return_dict(self):
func = MagicMock(return_value={"test_1": 3, "test_2": 6})
traced_func = self.tracing.trace("TestTrace", func, log_return=True)
traced_func()
logs = self.tracer.finished_spans()[0].logs
correct = {"return.test_1": 3, "return.test_2": 6}
self.assertDictEqual(correct, logs[0].key_values)
def test_log_return_list(self):
func = MagicMock(return_value={"test_1": 3, "test_2": [1, 2, 3]})
traced_func = self.tracing.trace("TestTrace", func, log_return=True)
traced_func()
logs = self.tracer.finished_spans()[0].logs
correct = {
"return.test_1": 3,
"return.test_2.0": 1,
"return.test_2.1": 2,
"return.test_2.2": 3,
}
self.assertDictEqual(correct, logs[0].key_values)
def test_log_return_num_key(self):
func = MagicMock(return_value={3: 3, "test_2": [1, 2, 3]})
traced_func = self.tracing.trace("TestTrace", func, log_return=True)
traced_func()
logs = self.tracer.finished_spans()[0].logs
correct = {
"return.3": 3,
"return.test_2.0": 1,
"return.test_2.1": 2,
"return.test_2.2": 3,
}
self.assertDictEqual(correct, logs[0].key_values)
def test_log_return_tuple_key(self):
func = MagicMock(return_value={(1, 1): 3, "test_2": [1, 2, 3]})
traced_func = self.tracing.trace("TestTrace", func, log_return=True)
traced_func()
logs = self.tracer.finished_spans()[0].logs
correct = {
"return.(1, 1)": 3,
"return.test_2.0": 1,
"return.test_2.1": 2,
"return.test_2.2": 3,
}
self.assertDictEqual(correct, logs[0].key_values)
def test_log_return_object_key(self):
test_object = MockTracer()
func = MagicMock(return_value={test_object: "Hello"})
traced_func = self.tracing.trace("TestTrace", func, log_return=True)
traced_func()
logs = self.tracer.finished_spans()[0].logs
correct = {f"return.{str(test_object)}": "Hello"}
self.assertDictEqual(correct, logs[0].key_values)
| 30.512563
| 105
| 0.626318
| 771
| 6,072
| 4.70428
| 0.111543
| 0.082713
| 0.065619
| 0.086849
| 0.788255
| 0.752137
| 0.725393
| 0.714364
| 0.702509
| 0.702509
| 0
| 0.024929
| 0.246871
| 6,072
| 198
| 106
| 30.666667
| 0.768205
| 0
| 0
| 0.567164
| 0
| 0
| 0.07691
| 0.004117
| 0
| 0
| 0
| 0
| 0.11194
| 1
| 0.156716
| false
| 0.052239
| 0.044776
| 0
| 0.208955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d44d3d981f82a0cdf972455e9d88a4445545d2f0
| 161
|
py
|
Python
|
livestock/slaughtering/doctype/chicken_co_packing/test_chicken_co_packing.py
|
jayan13/livestock
|
75f4ccb246818d9cd55400d88fefbb36c168c713
|
[
"MIT"
] | null | null | null |
livestock/slaughtering/doctype/chicken_co_packing/test_chicken_co_packing.py
|
jayan13/livestock
|
75f4ccb246818d9cd55400d88fefbb36c168c713
|
[
"MIT"
] | null | null | null |
livestock/slaughtering/doctype/chicken_co_packing/test_chicken_co_packing.py
|
jayan13/livestock
|
75f4ccb246818d9cd55400d88fefbb36c168c713
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2022, alantechnologies and Contributors
# See license.txt
# import frappe
import unittest
class TestChickenCoPacking(unittest.TestCase):
pass
| 17.888889
| 55
| 0.801242
| 18
| 161
| 7.166667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.130435
| 161
| 8
| 56
| 20.125
| 0.892857
| 0.515528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d476da1bc58d7a1e677fb40a03e596a33fb0f3e3
| 44
|
py
|
Python
|
first_step.py
|
xulu199705/RT_Check
|
9f1ade3bca18cf720839c182bd0e772ce0dc6212
|
[
"MIT"
] | null | null | null |
first_step.py
|
xulu199705/RT_Check
|
9f1ade3bca18cf720839c182bd0e772ce0dc6212
|
[
"MIT"
] | null | null | null |
first_step.py
|
xulu199705/RT_Check
|
9f1ade3bca18cf720839c182bd0e772ce0dc6212
|
[
"MIT"
] | null | null | null |
import get_hwnd
get_hwnd.prt_hwnd_title()
| 8.8
| 25
| 0.818182
| 8
| 44
| 4
| 0.625
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 4
| 26
| 11
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d48ad58f431f6c4e67424407e8851da4431564a6
| 125
|
py
|
Python
|
test_git01.py
|
KSeungBin/rapa_detect_line
|
07cbed583027cd29124d9e473db894e4e53acc88
|
[
"Apache-2.0"
] | null | null | null |
test_git01.py
|
KSeungBin/rapa_detect_line
|
07cbed583027cd29124d9e473db894e4e53acc88
|
[
"Apache-2.0"
] | null | null | null |
test_git01.py
|
KSeungBin/rapa_detect_line
|
07cbed583027cd29124d9e473db894e4e53acc88
|
[
"Apache-2.0"
] | null | null | null |
import os
os.getcwd()
print('line 5')
print('line 5')
print('line 7')
print('line 5')
print('line 8')
print('line 5')
안떠요
| 9.615385
| 15
| 0.632
| 23
| 125
| 3.434783
| 0.391304
| 0.683544
| 0.506329
| 0.56962
| 0.607595
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 0.152
| 125
| 12
| 16
| 10.416667
| 0.688679
| 0
| 0
| 0.444444
| 0
| 0
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
2e47beb63f02eb95ecd44859a6e7f28918626cae
| 172
|
py
|
Python
|
AntiAnalysis/__init__.py
|
Devtion/CrazyPy
|
c3bde86f3925439bc3b9df308027c57c515fc712
|
[
"MIT"
] | 2
|
2020-05-15T17:13:37.000Z
|
2020-05-15T17:14:52.000Z
|
AntiAnalysis/__init__.py
|
Devtion/CrazyPy
|
c3bde86f3925439bc3b9df308027c57c515fc712
|
[
"MIT"
] | null | null | null |
AntiAnalysis/__init__.py
|
Devtion/CrazyPy
|
c3bde86f3925439bc3b9df308027c57c515fc712
|
[
"MIT"
] | null | null | null |
# Import all modules
import AntiAnalysis.VirtualBox
import AntiAnalysis.SandBox
import AntiAnalysis.Debugger
import AntiAnalysis.Emulator
import AntiAnalysis.Process
| 24.571429
| 31
| 0.848837
| 18
| 172
| 8.111111
| 0.5
| 0.616438
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 172
| 7
| 32
| 24.571429
| 0.960526
| 0.104651
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e4c23eadd4c50d7402fb819476b5a8a578e2a5b
| 60
|
py
|
Python
|
tests/python/kaolin/metrics/__init__.py
|
mlej8/kaolin
|
19fd610fff68c4d9ad9035386b76e6fd51b0b67c
|
[
"ECL-2.0",
"Apache-2.0"
] | 3,747
|
2019-11-13T02:18:16.000Z
|
2022-03-31T21:12:31.000Z
|
tests/python/kaolin/metrics/__init__.py
|
mlej8/kaolin
|
19fd610fff68c4d9ad9035386b76e6fd51b0b67c
|
[
"ECL-2.0",
"Apache-2.0"
] | 371
|
2019-11-13T14:50:59.000Z
|
2022-03-22T19:40:06.000Z
|
tests/python/kaolin/metrics/__init__.py
|
mlej8/kaolin
|
19fd610fff68c4d9ad9035386b76e6fd51b0b67c
|
[
"ECL-2.0",
"Apache-2.0"
] | 482
|
2019-11-13T05:04:38.000Z
|
2022-03-31T10:20:26.000Z
|
from . import test_trianglemesh
from . import test_voxelgrid
| 30
| 31
| 0.85
| 8
| 60
| 6.125
| 0.625
| 0.408163
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 60
| 2
| 32
| 30
| 0.924528
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
2e6c6a1736374811aabc13ef07cf0a50c3370621
| 74
|
py
|
Python
|
erri/python/trials_preparation/trial2.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
erri/python/trials_preparation/trial2.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | 16
|
2020-05-30T12:38:13.000Z
|
2022-02-19T09:23:31.000Z
|
erri/python/trials_preparation/trial2.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
def somme(liste_nombres):
pass
def moyenne(liste_nombres):
pass
| 10.571429
| 27
| 0.702703
| 10
| 74
| 5
| 0.6
| 0.48
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.216216
| 74
| 6
| 28
| 12.333333
| 0.862069
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
2e75770b92a6d34a174f44d7a7fbf15962d178f4
| 39
|
py
|
Python
|
python/testData/requirement/generation/newFileGeneration/main.py
|
Sajaki/intellij-community
|
6748af2c40567839d11fd652ec77ba263c074aad
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/requirement/generation/newFileGeneration/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2022-02-19T09:45:05.000Z
|
2022-02-27T20:32:55.000Z
|
python/testData/requirement/generation/newFileGeneration/main.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 1
|
2020-10-15T05:56:42.000Z
|
2020-10-15T05:56:42.000Z
|
from django import apps
import requests
| 19.5
| 23
| 0.871795
| 6
| 39
| 5.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 2
| 24
| 19.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5cf1cef302f9debcd36250730a2c06effc4f0705
| 31
|
py
|
Python
|
podcast-ml/service/src/app/podcastml/utils/__init__.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
podcast-ml/service/src/app/podcastml/utils/__init__.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
podcast-ml/service/src/app/podcastml/utils/__init__.py
|
cuappdev/archives
|
061d0f9cccf278363ffaeb27fc655743b1052ae5
|
[
"MIT"
] | null | null | null |
"""Init."""
from app import db
| 10.333333
| 18
| 0.612903
| 5
| 31
| 3.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 31
| 2
| 19
| 15.5
| 0.730769
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5cf2c5b79b531eee635dfb18ef717dc2f5b0afb6
| 12,963
|
py
|
Python
|
src/arguments.py
|
volkancirik/refer360
|
8ae7e739b812345204aa83514cdf5a271dfa812c
|
[
"MIT"
] | 7
|
2020-06-03T16:08:09.000Z
|
2021-09-30T07:30:34.000Z
|
src/arguments.py
|
volkancirik/refer360
|
8ae7e739b812345204aa83514cdf5a271dfa812c
|
[
"MIT"
] | null | null | null |
src/arguments.py
|
volkancirik/refer360
|
8ae7e739b812345204aa83514cdf5a271dfa812c
|
[
"MIT"
] | 1
|
2021-01-25T14:11:07.000Z
|
2021-01-25T14:11:07.000Z
|
"""
Arguments for train/test
"""
import argparse
def get_train_rl():
parser = argparse.ArgumentParser(
description='advantage-actor-critic RL training for localizing Waldo!')
parser.add_argument('--multi-gpu',
action='store_true', help='Use multiple gpu')
parser.add_argument('--epoch', type=int, default=200,
help='# of epochs to train (default: 200)')
parser.add_argument('--resume', type=str, default='',
help='to resume point for backtranslation (default: "")')
parser.add_argument('--prefix', type=str, default='rl',
help='prefix for model files (default: rl)')
parser.add_argument('--exp-dir', type=str, default='./exp',
help='experiment directory (default: ./exp)')
parser.add_argument('--data-path', type=str, default='../data',
help='data path for loading (default: ../data)')
# Model parameters
parser.add_argument('--model', type=str, default='lingunet',
help='model name concat2conv, rnn2conv, lingunet, visualbert, textonly, visiononly (default: lingunet)')
parser.add_argument('--n-img-channels', type=int, default=64,
help='# of image channels (default: 64)')
parser.add_argument('--n-layers', type=int, default=2,
help='# of layers for models (default: 2)')
parser.add_argument('--n-hid', type=int, default=128,
help='hidden dimension (default: 128)')
parser.add_argument('--n-obs', type=int, default=10000,
help='pre-trained wordvector size (default: 10000)')
parser.add_argument('--n-head', type=int, default=8,
help='# of heads for transformers (default: 8)')
parser.add_argument('--n-emb', type=int, default=128,
help='pre-trained wordvector size (default: 128)')
parser.add_argument('--cnn-layer', type=int, default=6,
help='cnn layer to use (default: 6)')
parser.add_argument('--dropout', type=float, default=0.25,
help='dropout rate (default: 0.25)')
parser.add_argument('--bidirectional',
action='store_true', help='Use bidirectional RNN')
parser.add_argument('--wordvec-file', type=str, default='../data/glove.840B.300d.txt',
help='data path for loading word vectors (default: ../data/glove.840B.300d.txt)')
parser.add_argument('--memorize', type=int, default=-1,
help='memorize first k batches (default: -1)')
parser.add_argument('--val-freq', type=int, default=1,
help='validation frequency for every n epochs (default: 1)')
parser.add_argument('--clip', type=float, default=5.0,
help='gradient clipping (default: 5.0)')
parser.add_argument('--metric', type=str, default='fov',
help='target metric completion|loss|fov|reward (default: fov)')
parser.add_argument('--gamma', type=float, default=0.99,
help='discount factor (default: 0.99)')
parser.add_argument('--seed', type=int, default=0,
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1,
help='interval between training status logs (default: 1)')
parser.add_argument('--max-step', type=int, default=24,
help='Max # of steps (default: 24)')
parser.add_argument('--batch-size', type=int, default=4,
help='Batch size (default: 4)')
parser.add_argument('--lr', type=float, default=0.00001,
help='learning rate (default: 0.00001)')
parser.add_argument('--weight-decay', type=float, default=0.00001,
help='L2 regularization (weight decay) term (default: 0.00001)')
parser.add_argument('--verbose',
action='store_true', help='Print to a progressbar or lines in stdout')
parser.add_argument('--degrees', type=int, default=15,
help='degrees in FoV change, default=15')
parser.add_argument('--use-gpu-camera',
action='store_true', help='Use gpu camera')
parser.add_argument('--use-look-ahead',
action='store_true', help='Use look ahead')
parser.add_argument('--oracle-mode',
action='store_true', help='Oracle Mode')
parser.add_argument('--random-agent',
action='store_true', help='Random agent')
parser.add_argument('--greedy',
action='store_true', help='Greedy action prediction')
parser.add_argument('--debug', type=int, default=0,
help='Debug Mode if debug>0, default = 0')
parser.add_argument('--trn-images', type=str, default='all',
help='list of image categories for training separated by comma, options are restaurant, bedroom, living_room, plaza_courtyard, shop, street or all, default=all')
parser.add_argument('--val-images', type=str, default='all',
help='list of image categories for validation separated by comma, options are restaurant, bedroom, living_room, plaza_courtyard, shop, street or all, default=all')
parser.add_argument('--use-masks',
action='store_true', help='Use masks for text embeddings')
# LXRT Model Config
# Note: LXRT = L, X, R (three encoders), Transformer
parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,
help='Load the pre-trained LXMERT model.')
parser.add_argument("--llayers", default=9, type=int,
help='Number of Language layers')
parser.add_argument("--xlayers", default=5, type=int,
help='Number of CROSS-modality layers.')
parser.add_argument("--rlayers", default=5, type=int,
help='Number of object Relationship layers.')
parser.add_argument("--fromScratch", dest='from_scratch', action='store_const', default=False, const=True,
help='If none of the --load, --loadLXMERT, --loadLXMERTQA is set, '
'the model would be trained from scratch. If --fromScratch is'
' not specified, the model would load BERT-pre-trained weights by'
' default. ')
parser.add_argument('--use-detectron',
action='store_true', help='Use detectron for visual input')
return parser
def get_train_rl_sentence2sentence():
parser = get_train_rl()
parser.add_argument('--max-sentence', type=int, default=5,
help='max number of sentences, default=5')
return parser
def get_train_fovpretraining():
parser = argparse.ArgumentParser(
description='FoV pretraining for localizing nearby objects!')
parser.add_argument('--epoch', type=int, default=200,
help='# of epochs to train (default: 200)')
parser.add_argument('--resume', type=str, default='',
help='to resume point for backtranslation (default: "")')
parser.add_argument('--prefix', type=str, default='rl',
help='prefix for model files (default: rl)')
parser.add_argument('--exp-dir', type=str, default='./exp',
help='experiment directory (default: ./exp)')
parser.add_argument('--data-path', type=str, default='../data',
help='data path for loading (default: ../data)')
# Model parameters
parser.add_argument('--model', type=str, default='hallucinator',
help='model name hallucinator|lxmert (default: hallucinator)')
parser.add_argument('--n-img-channels', type=int, default=64,
help='# of image channels (default: 64)')
parser.add_argument('--n-layers', type=int, default=2,
help='# of layers for models (default: 2)')
parser.add_argument('--n-hid', type=int, default=128,
help='hidden dimension (default: 128)')
parser.add_argument('--n-obs', type=int, default=10000,
help='pre-trained wordvector size (default: 10000)')
parser.add_argument('--n-head', type=int, default=16,
help='# of heads for transformers (default: 16)')
parser.add_argument('--n-emb', type=int, default=128,
help='pre-trained wordvector size (default: 128)')
parser.add_argument('--cnn-layer', type=int, default=6,
help='cnn layer to use (default: 6)')
parser.add_argument('--dropout', type=float, default=0.25,
help='dropout rate (default: 0.25)')
parser.add_argument('--bidirectional',
action='store_true', help='Use bidirectional RNN')
parser.add_argument('--wordvec-file', type=str, default='../data/glove.840B.300d.txt',
help='data path for loading word vectors (default: ../data/glove.840B.300d.txt)')
parser.add_argument('--memorize', type=int, default=-1,
help='memorize first k batches (default: -1)')
parser.add_argument('--val-freq', type=int, default=1,
help='validation frequency for every n epochs (default: 1)')
parser.add_argument('--clip', type=float, default=5.0,
help='gradient clipping (default: 5.0)')
parser.add_argument('--metric', type=str, default='accuracy',
help='target metric loss,accuracy (default: accuracy)')
parser.add_argument('--seed', type=int, default=0,
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1,
help='interval between training status logs (default: 1)')
parser.add_argument('--batch-size', type=int, default=64,
help='Batch size (default: 64)')
parser.add_argument('--lr', type=float, default=0.00001,
help='learning rate (default: 0.00001)')
parser.add_argument('--weight-decay', type=float, default=0.00001,
help='L2 regularization (weight decay) term (default: 0.00001)')
parser.add_argument('--verbose',
action='store_true', help='Print to a progressbar or lines in stdout')
parser.add_argument('--debug', type=int, default=0,
help='Debug Mode if debug>0, default = 0')
parser.add_argument('--trn-images', type=str, default='all',
help='list of image categories for training separated by comma, options are restaurant, bedroom, living_room, plaza_courtyard, shop, street or all, default=all')
parser.add_argument('--val-images', type=str, default='all',
help='list of image categories for validation separated by comma, options are restaurant, bedroom, living_room, plaza_courtyard, shop, street or all, default=all')
parser.add_argument('--task', type=str, default='task1',
help='name of the task, options are task1, task2 default=task1')
parser.add_argument('--data-root', type=str, default='../data/fov_pretraining_all',
help='load data from data_root path default ../data/fov_pretraining_all')
parser.add_argument('--obj-dict-file', type=str, default='../data/vg_object_dictionaries.all.json',
help='object dictionary file, default=../data/vg_object_dictionaries.all.json')
parser.add_argument('--direction', type=str, default='canonical',
help='direction method used canonical | cartesian | lup | canonical_proximity, default=canonical')
parser.add_argument('--fov-emb-mode', type=int, default=2,
help='Fov embeddings mode 0: dont use it 1:only fov embeddings 2: fov embeddings + image features default=2')
parser.add_argument(
'--ignore-list', help='Comma separated list of ground-truth moves to ignore, default=""', default='')
# LXRT Model Config
# Note: LXRT = L, X, R (three encoders), Transformer
parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,
help='Load the pre-trained LXMERT model.')
parser.add_argument("--llayers", default=9, type=int,
help='Number of Language layers')
parser.add_argument("--xlayers", default=5, type=int,
help='Number of CROSS-modality layers.')
parser.add_argument("--rlayers", default=5, type=int,
help='Number of object Relationship layers.')
parser.add_argument("--fromScratch", dest='from_scratch', action='store_const', default=False, const=True,
help='If none of the --load, --loadLXMERT, --loadLXMERTQA is set, '
'the model would be trained from scratch. If --fromScratch is'
' not specified, the model would load BERT-pre-trained weights by'
' default. ')
return parser
| 57.105727
| 185
| 0.612744
| 1,567
| 12,963
| 4.985322
| 0.168475
| 0.099078
| 0.187148
| 0.029186
| 0.78725
| 0.751152
| 0.742704
| 0.732975
| 0.72491
| 0.72491
| 0
| 0.023887
| 0.244311
| 12,963
| 226
| 186
| 57.358407
| 0.773581
| 0.015197
| 0
| 0.689474
| 0
| 0.026316
| 0.432526
| 0.023367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015789
| false
| 0
| 0.005263
| 0
| 0.036842
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5cfb200f1eeacbf33033a73aa7fafc834cf99eaf
| 44
|
py
|
Python
|
src/approot/__init__.py
|
omkumar01/major-project
|
57dc229a17c7783142d0270f52181883f5b8a7f0
|
[
"MIT"
] | null | null | null |
src/approot/__init__.py
|
omkumar01/major-project
|
57dc229a17c7783142d0270f52181883f5b8a7f0
|
[
"MIT"
] | null | null | null |
src/approot/__init__.py
|
omkumar01/major-project
|
57dc229a17c7783142d0270f52181883f5b8a7f0
|
[
"MIT"
] | null | null | null |
from approot.celery import app as celery_app
| 44
| 44
| 0.863636
| 8
| 44
| 4.625
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 1
| 44
| 44
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d84d0913e3105408f6f2a94b26bbe2105679d3cd
| 31
|
py
|
Python
|
xvision/ops/functional.py
|
jimmysue/xvision
|
bf5aa567a197b3e4c9fdd285c80b4f7512d14d7a
|
[
"MIT"
] | 3
|
2021-04-08T10:50:53.000Z
|
2021-11-15T07:26:16.000Z
|
xvision/ops/functional.py
|
jimmysue/xvision
|
bf5aa567a197b3e4c9fdd285c80b4f7512d14d7a
|
[
"MIT"
] | 3
|
2021-08-05T07:40:52.000Z
|
2021-11-16T05:53:29.000Z
|
xvision/ops/functional.py
|
jimmysue/xvision
|
bf5aa567a197b3e4c9fdd285c80b4f7512d14d7a
|
[
"MIT"
] | 1
|
2021-12-15T05:57:48.000Z
|
2021-12-15T05:57:48.000Z
|
from .emd_loss import emd_loss
| 15.5
| 30
| 0.83871
| 6
| 31
| 4
| 0.666667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 31
| 1
| 31
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d857c01199881432f946a813c2b1eacbefca1fa8
| 93
|
py
|
Python
|
slurm_jupyter_kernel/__main__.py
|
mawigh/slurm_jupyter_kernel
|
37050b49eba3d6c71ea59f067917739dafa55d44
|
[
"MIT"
] | 1
|
2021-12-06T11:26:04.000Z
|
2021-12-06T11:26:04.000Z
|
slurm_jupyter_kernel/__main__.py
|
mawigh/slurm_jupyter_kernel
|
37050b49eba3d6c71ea59f067917739dafa55d44
|
[
"MIT"
] | 1
|
2021-12-06T08:23:59.000Z
|
2021-12-06T08:23:59.000Z
|
slurm_jupyter_kernel/__main__.py
|
mawigh/slurm_jupyter_kernel
|
37050b49eba3d6c71ea59f067917739dafa55d44
|
[
"MIT"
] | 3
|
2021-03-31T07:20:19.000Z
|
2021-12-04T01:50:06.000Z
|
from slurm_jupyter_kernel.start_kernel import slurm_jupyter_kernel;
slurm_jupyter_kernel();
| 23.25
| 67
| 0.88172
| 13
| 93
| 5.769231
| 0.461538
| 0.48
| 0.72
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 93
| 3
| 68
| 31
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
d862b299142d3eb41f91e81221844f92c71b3de3
| 28
|
py
|
Python
|
capsules/__init__.py
|
yuranusduke/DynamicRoutingCapsule
|
1a5e7e015673bcc3feaca0ff6e06e1f385769712
|
[
"MIT"
] | null | null | null |
capsules/__init__.py
|
yuranusduke/DynamicRoutingCapsule
|
1a5e7e015673bcc3feaca0ff6e06e1f385769712
|
[
"MIT"
] | null | null | null |
capsules/__init__.py
|
yuranusduke/DynamicRoutingCapsule
|
1a5e7e015673bcc3feaca0ff6e06e1f385769712
|
[
"MIT"
] | null | null | null |
from .CapsNet import CapsNet
| 28
| 28
| 0.857143
| 4
| 28
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d86b8d2cb1203098a91c8835cb29d38215e19a08
| 22
|
py
|
Python
|
smartlinks/tests/management/__init__.py
|
ixc/glamkit-smartlinks
|
c550f372cecd08bdc81795b18f6b0cec38ac7bd2
|
[
"BSD-3-Clause"
] | 3
|
2016-11-28T22:04:40.000Z
|
2021-05-23T22:35:37.000Z
|
smartlinks/tests/management/__init__.py
|
ixc/glamkit-smartlinks
|
c550f372cecd08bdc81795b18f6b0cec38ac7bd2
|
[
"BSD-3-Clause"
] | null | null | null |
smartlinks/tests/management/__init__.py
|
ixc/glamkit-smartlinks
|
c550f372cecd08bdc81795b18f6b0cec38ac7bd2
|
[
"BSD-3-Clause"
] | 2
|
2017-08-13T06:44:56.000Z
|
2017-10-04T00:14:35.000Z
|
from commands import *
| 22
| 22
| 0.818182
| 3
| 22
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 22
| 1
| 22
| 22
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d879e3d6b9d970febe5da5daf9d69dc466db3949
| 121
|
py
|
Python
|
module10-modules.and.packages/deepcloudlabs/utils.py
|
deepcloudlabs/dcl160-2021-jun-28
|
51ca7b419ea8d1af8e1c6b468cf583b2141a992d
|
[
"MIT"
] | null | null | null |
module10-modules.and.packages/deepcloudlabs/utils.py
|
deepcloudlabs/dcl160-2021-jun-28
|
51ca7b419ea8d1af8e1c6b468cf583b2141a992d
|
[
"MIT"
] | null | null | null |
module10-modules.and.packages/deepcloudlabs/utils.py
|
deepcloudlabs/dcl160-2021-jun-28
|
51ca7b419ea8d1af8e1c6b468cf583b2141a992d
|
[
"MIT"
] | null | null | null |
def is_even(n):
return n % 2 == 0
def is_odd(n):
return not is_even(n)
lost_numbers = (4, 8, 15, 16, 23, 42)
| 12.1
| 37
| 0.570248
| 25
| 121
| 2.6
| 0.68
| 0.153846
| 0.215385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 0.272727
| 121
| 9
| 38
| 13.444444
| 0.602273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.4
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d88244ea9a924e467b7f42e0af0468cc89297a86
| 148
|
py
|
Python
|
src/utils/overpass_wrapper/__init__.py
|
Informatik-HS-KL/BEGGEL-SP-Map-Matcher-WS19
|
c3532a86640631dcafa4e88d6efb32360f0faf0f
|
[
"Apache-2.0"
] | 1
|
2021-07-21T13:51:18.000Z
|
2021-07-21T13:51:18.000Z
|
src/utils/overpass_wrapper/__init__.py
|
Informatik-HS-KL/BEGGEL-SP-Map-Matcher-WS19
|
c3532a86640631dcafa4e88d6efb32360f0faf0f
|
[
"Apache-2.0"
] | null | null | null |
src/utils/overpass_wrapper/__init__.py
|
Informatik-HS-KL/BEGGEL-SP-Map-Matcher-WS19
|
c3532a86640631dcafa4e88d6efb32360f0faf0f
|
[
"Apache-2.0"
] | null | null | null |
from .client_side import OverpassWrapperClientSide
from .server_side import OverpassWrapperServerSide
from .overpass_wrapper import OverpassWrapper
| 37
| 50
| 0.898649
| 15
| 148
| 8.666667
| 0.666667
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 148
| 3
| 51
| 49.333333
| 0.955882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d88b1e0403585fc919505fbfb36497f335ba306a
| 902
|
py
|
Python
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test23.py
|
YangHao666666/hawq
|
10cff8350f1ba806c6fec64eb67e0e6f6f24786c
|
[
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 450
|
2015-09-05T09:12:51.000Z
|
2018-08-30T01:45:36.000Z
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test23.py
|
YangHao666666/hawq
|
10cff8350f1ba806c6fec64eb67e0e6f6f24786c
|
[
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 1,274
|
2015-09-22T20:06:16.000Z
|
2018-08-31T22:14:00.000Z
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test23.py
|
YangHao666666/hawq
|
10cff8350f1ba806c6fec64eb67e0e6f6f24786c
|
[
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 278
|
2015-09-21T19:15:06.000Z
|
2018-08-31T00:36:51.000Z
|
'doc'
class X:
'doc'
def __init__(self):
self.fff = 0
def x(self):
pass
def y(self):
'should generate a warning'
if self.x:
pass
if self.x and globals():
pass
if globals() and self.x:
pass
def z(self):
'should NOT generate a warning'
if globals() :
pass
if self.x() and self.fff:
pass
if self.x() and globals():
pass
if globals() and self.x():
pass
if self.fff:
pass
print self.x
print self.fff
class Y(X):
'doc'
def j(self):
'should generate a warning'
if self.x:
pass
print self.fff
def h(self):
'should NOT generate a warning'
if self.x():
pass
print self.x
print self.fff
| 18.408163
| 39
| 0.446785
| 113
| 902
| 3.530973
| 0.19469
| 0.125313
| 0.105263
| 0.180451
| 0.776942
| 0.726817
| 0.726817
| 0.518797
| 0.518797
| 0.230576
| 0
| 0.002066
| 0.463415
| 902
| 48
| 40
| 18.791667
| 0.822314
| 0
| 0
| 0.595238
| 1
| 0
| 0.129712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.261905
| 0
| null | null | 0.119048
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d89c3d1405d9b16f198b69e2e136cfec97f32d86
| 160
|
py
|
Python
|
Exerc_Python/desafio21a.py
|
BotoniLucas/Curso_Python
|
6d6243e65758998d5cdd81b0ad06224784b88cb8
|
[
"MIT"
] | null | null | null |
Exerc_Python/desafio21a.py
|
BotoniLucas/Curso_Python
|
6d6243e65758998d5cdd81b0ad06224784b88cb8
|
[
"MIT"
] | null | null | null |
Exerc_Python/desafio21a.py
|
BotoniLucas/Curso_Python
|
6d6243e65758998d5cdd81b0ad06224784b88cb8
|
[
"MIT"
] | null | null | null |
# desafio 21
import pygame
pygame.mixer.init()
pygame.mixer.music.load('Deutschland.mp3')
pygame.mixer.music.play()
while (pygame.mixer.music.get_busy()): pass
| 22.857143
| 43
| 0.76875
| 24
| 160
| 5.083333
| 0.625
| 0.360656
| 0.393443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020134
| 0.06875
| 160
| 7
| 43
| 22.857143
| 0.798658
| 0.0625
| 0
| 0
| 0
| 0
| 0.100671
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d8aa4d674619a35ccf24adbf6c26f5b008a9aa9c
| 12,270
|
py
|
Python
|
manila_tempest_tests/tests/api/admin/test_quotas.py
|
scality/manila
|
b4a67d033cdcbc1389ae52f35ad281be7a18c9ae
|
[
"Apache-2.0"
] | 1
|
2015-05-28T22:28:08.000Z
|
2015-05-28T22:28:08.000Z
|
manila_tempest_tests/tests/api/admin/test_quotas.py
|
scality/manila
|
b4a67d033cdcbc1389ae52f35ad281be7a18c9ae
|
[
"Apache-2.0"
] | 5
|
2015-08-13T15:17:28.000Z
|
2016-08-02T02:55:01.000Z
|
manila_tempest_tests/tests/api/admin/test_quotas.py
|
scality/manila
|
b4a67d033cdcbc1389ae52f35ad281be7a18c9ae
|
[
"Apache-2.0"
] | 2
|
2015-08-29T08:19:58.000Z
|
2016-08-02T02:46:10.000Z
|
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config # noqa
from tempest import test # noqa
from manila_tempest_tests import clients_share as clients
from manila_tempest_tests.tests.api import base
CONF = config.CONF
class SharesAdminQuotasTest(base.BaseSharesAdminTest):
@classmethod
def resource_setup(cls):
cls.os = clients.AdminManager()
super(SharesAdminQuotasTest, cls).resource_setup()
cls.user_id = cls.shares_v2_client.user_id
cls.tenant_id = cls.shares_v2_client.tenant_id
@test.attr(type=["gate", "smoke", ])
def test_default_quotas(self):
quotas = self.shares_v2_client.default_quotas(self.tenant_id)
self.assertGreater(int(quotas["gigabytes"]), -2)
self.assertGreater(int(quotas["snapshot_gigabytes"]), -2)
self.assertGreater(int(quotas["shares"]), -2)
self.assertGreater(int(quotas["snapshots"]), -2)
self.assertGreater(int(quotas["share_networks"]), -2)
@test.attr(type=["gate", "smoke", ])
def test_show_quotas(self):
quotas = self.shares_v2_client.show_quotas(self.tenant_id)
self.assertGreater(int(quotas["gigabytes"]), -2)
self.assertGreater(int(quotas["snapshot_gigabytes"]), -2)
self.assertGreater(int(quotas["shares"]), -2)
self.assertGreater(int(quotas["snapshots"]), -2)
self.assertGreater(int(quotas["share_networks"]), -2)
@test.attr(type=["gate", "smoke", ])
def test_show_quotas_for_user(self):
quotas = self.shares_v2_client.show_quotas(
self.tenant_id, self.user_id)
self.assertGreater(int(quotas["gigabytes"]), -2)
self.assertGreater(int(quotas["snapshot_gigabytes"]), -2)
self.assertGreater(int(quotas["shares"]), -2)
self.assertGreater(int(quotas["snapshots"]), -2)
self.assertGreater(int(quotas["share_networks"]), -2)
class SharesAdminQuotasUpdateTest(base.BaseSharesAdminTest):
force_tenant_isolation = True
client_version = '2'
def setUp(self):
super(self.__class__, self).setUp()
self.client = self.get_client_with_isolated_creds(
client_version=self.client_version)
self.tenant_id = self.client.tenant_id
self.user_id = self.client.user_id
@test.attr(type=["gate", "smoke", ])
def test_update_tenant_quota_shares(self):
# get current quotas
quotas = self.client.show_quotas(self.tenant_id)
new_quota = int(quotas["shares"]) + 2
# set new quota for shares
updated = self.client.update_quotas(self.tenant_id, shares=new_quota)
self.assertEqual(int(updated["shares"]), new_quota)
@test.attr(type=["gate", "smoke", ])
def test_update_user_quota_shares(self):
# get current quotas
quotas = self.client.show_quotas(self.tenant_id, self.user_id)
new_quota = int(quotas["shares"]) - 1
# set new quota for shares
updated = self.client.update_quotas(
self.tenant_id, self.user_id, shares=new_quota)
self.assertEqual(int(updated["shares"]), new_quota)
@test.attr(type=["gate", "smoke", ])
def test_update_tenant_quota_snapshots(self):
# get current quotas
quotas = self.client.show_quotas(self.tenant_id)
new_quota = int(quotas["snapshots"]) + 2
# set new quota for snapshots
updated = self.client.update_quotas(
self.tenant_id, snapshots=new_quota)
self.assertEqual(int(updated["snapshots"]), new_quota)
@test.attr(type=["gate", "smoke", ])
def test_update_user_quota_snapshots(self):
# get current quotas
quotas = self.client.show_quotas(self.tenant_id, self.user_id)
new_quota = int(quotas["snapshots"]) - 1
# set new quota for snapshots
updated = self.client.update_quotas(
self.tenant_id, self.user_id, snapshots=new_quota)
self.assertEqual(int(updated["snapshots"]), new_quota)
@test.attr(type=["gate", "smoke", ])
def test_update_tenant_quota_gigabytes(self):
# get current quotas
custom = self.client.show_quotas(self.tenant_id)
# make quotas for update
gigabytes = int(custom["gigabytes"]) + 2
# set new quota for shares
updated = self.client.update_quotas(
self.tenant_id, gigabytes=gigabytes)
self.assertEqual(int(updated["gigabytes"]), gigabytes)
@test.attr(type=["gate", "smoke", ])
def test_update_tenant_quota_snapshot_gigabytes(self):
# get current quotas
custom = self.client.show_quotas(self.tenant_id)
# make quotas for update
snapshot_gigabytes = int(custom["snapshot_gigabytes"]) + 2
# set new quota for shares
updated = self.client.update_quotas(
self.tenant_id,
snapshot_gigabytes=snapshot_gigabytes)
self.assertEqual(
int(updated["snapshot_gigabytes"]), snapshot_gigabytes)
@test.attr(type=["gate", "smoke", ])
def test_update_user_quota_gigabytes(self):
# get current quotas
custom = self.client.show_quotas(self.tenant_id, self.user_id)
# make quotas for update
gigabytes = int(custom["gigabytes"]) - 1
# set new quota for shares
updated = self.client.update_quotas(
self.tenant_id, self.user_id, gigabytes=gigabytes)
self.assertEqual(int(updated["gigabytes"]), gigabytes)
@test.attr(type=["gate", "smoke", ])
def test_update_user_quota_snapshot_gigabytes(self):
# get current quotas
custom = self.client.show_quotas(self.tenant_id, self.user_id)
# make quotas for update
snapshot_gigabytes = int(custom["snapshot_gigabytes"]) - 1
# set new quota for shares
updated = self.client.update_quotas(
self.tenant_id, self.user_id,
snapshot_gigabytes=snapshot_gigabytes)
self.assertEqual(
int(updated["snapshot_gigabytes"]), snapshot_gigabytes)
@test.attr(type=["gate", "smoke", ])
def test_update_tenant_quota_share_networks(self):
# get current quotas
quotas = self.client.show_quotas(self.tenant_id)
new_quota = int(quotas["share_networks"]) + 2
# set new quota for share-networks
updated = self.client.update_quotas(
self.tenant_id, share_networks=new_quota)
self.assertEqual(int(updated["share_networks"]), new_quota)
@test.attr(type=["gate", "smoke", ])
def test_update_user_quota_share_networks(self):
# get current quotas
quotas = self.client.show_quotas(
self.tenant_id, self.user_id)
new_quota = int(quotas["share_networks"]) - 1
# set new quota for share-networks
updated = self.client.update_quotas(
self.tenant_id, self.user_id,
share_networks=new_quota)
self.assertEqual(int(updated["share_networks"]), new_quota)
@test.attr(type=["gate", "smoke", ])
def test_reset_tenant_quotas(self):
# get default_quotas
default = self.client.default_quotas(self.tenant_id)
# get current quotas
custom = self.client.show_quotas(self.tenant_id)
# make quotas for update
shares = int(custom["shares"]) + 2
snapshots = int(custom["snapshots"]) + 2
gigabytes = int(custom["gigabytes"]) + 2
snapshot_gigabytes = int(custom["snapshot_gigabytes"]) + 2
share_networks = int(custom["share_networks"]) + 2
# set new quota
updated = self.client.update_quotas(
self.tenant_id,
shares=shares,
snapshots=snapshots,
gigabytes=gigabytes,
snapshot_gigabytes=snapshot_gigabytes,
share_networks=share_networks)
self.assertEqual(int(updated["shares"]), shares)
self.assertEqual(int(updated["snapshots"]), snapshots)
self.assertEqual(int(updated["gigabytes"]), gigabytes)
self.assertEqual(
int(updated["snapshot_gigabytes"]), snapshot_gigabytes)
self.assertEqual(int(updated["share_networks"]), share_networks)
# reset customized quotas
self.client.reset_quotas(self.tenant_id)
# verify quotas
reseted = self.client.show_quotas(self.tenant_id)
self.assertEqual(int(reseted["shares"]), int(default["shares"]))
self.assertEqual(int(reseted["snapshots"]), int(default["snapshots"]))
self.assertEqual(int(reseted["gigabytes"]), int(default["gigabytes"]))
self.assertEqual(int(reseted["share_networks"]),
int(default["share_networks"]))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_quota_for_shares(self):
self.client.update_quotas(self.tenant_id, shares=-1)
quotas = self.client.show_quotas(self.tenant_id)
self.assertEqual(-1, quotas.get('shares'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_user_quota_for_shares(self):
self.client.update_quotas(
self.tenant_id, self.user_id, shares=-1)
quotas = self.client.show_quotas(self.tenant_id, self.user_id)
self.assertEqual(-1, quotas.get('shares'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_quota_for_snapshots(self):
self.client.update_quotas(self.tenant_id, snapshots=-1)
quotas = self.client.show_quotas(self.tenant_id)
self.assertEqual(-1, quotas.get('snapshots'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_user_quota_for_snapshots(self):
self.client.update_quotas(
self.tenant_id, self.user_id, snapshots=-1)
quotas = self.client.show_quotas(self.tenant_id, self.user_id)
self.assertEqual(-1, quotas.get('snapshots'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_quota_for_gigabytes(self):
self.client.update_quotas(self.tenant_id, gigabytes=-1)
quotas = self.client.show_quotas(self.tenant_id)
self.assertEqual(-1, quotas.get('gigabytes'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_quota_for_snapshot_gigabytes(self):
self.client.update_quotas(
self.tenant_id, snapshot_gigabytes=-1)
quotas = self.client.show_quotas(self.tenant_id)
self.assertEqual(-1, quotas.get('snapshot_gigabytes'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_user_quota_for_gigabytes(self):
self.client.update_quotas(
self.tenant_id, self.user_id, gigabytes=-1)
quotas = self.client.show_quotas(self.tenant_id, self.user_id)
self.assertEqual(-1, quotas.get('gigabytes'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_user_quota_for_snapshot_gigabytes(self):
self.client.update_quotas(
self.tenant_id, self.user_id, snapshot_gigabytes=-1)
quotas = self.client.show_quotas(self.tenant_id, self.user_id)
self.assertEqual(-1, quotas.get('snapshot_gigabytes'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_quota_for_share_networks(self):
self.client.update_quotas(self.tenant_id, share_networks=-1)
quotas = self.client.show_quotas(self.tenant_id)
self.assertEqual(-1, quotas.get('share_networks'))
@test.attr(type=["gate", "smoke", ])
def test_unlimited_user_quota_for_share_networks(self):
self.client.update_quotas(
self.tenant_id, self.user_id, share_networks=-1)
quotas = self.client.show_quotas(self.tenant_id, self.user_id)
self.assertEqual(-1, quotas.get('share_networks'))
| 37.87037
| 78
| 0.66251
| 1,520
| 12,270
| 5.134868
| 0.086842
| 0.090967
| 0.075336
| 0.110698
| 0.802947
| 0.785778
| 0.764894
| 0.75157
| 0.742601
| 0.717233
| 0
| 0.006647
| 0.21524
| 12,270
| 323
| 79
| 37.987616
| 0.803926
| 0.103993
| 0
| 0.543689
| 0
| 0
| 0.083036
| 0
| 0
| 0
| 0
| 0
| 0.213592
| 1
| 0.126214
| false
| 0
| 0.019417
| 0
| 0.165049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d8c0ca9e336b129f3d4a5c72ff83a6eef593b36e
| 78
|
py
|
Python
|
iic/archs/__init__.py
|
felizang/IIC-pytorch3
|
c16928fd497089b3776c7dc3a2ac89b863314a62
|
[
"MIT"
] | 1
|
2020-09-18T23:37:58.000Z
|
2020-09-18T23:37:58.000Z
|
iic/archs/__init__.py
|
felizang/IIC-pytorch3
|
c16928fd497089b3776c7dc3a2ac89b863314a62
|
[
"MIT"
] | null | null | null |
iic/archs/__init__.py
|
felizang/IIC-pytorch3
|
c16928fd497089b3776c7dc3a2ac89b863314a62
|
[
"MIT"
] | null | null | null |
from .cluster import *
# from .segmentation import *
# from .semisup import *
| 19.5
| 29
| 0.717949
| 9
| 78
| 6.222222
| 0.555556
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 78
| 3
| 30
| 26
| 0.875
| 0.641026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b12e2495088fdbd215d105931b7da7c7cd9ed14
| 712
|
py
|
Python
|
finders/sublime.py
|
commoncode/djangogirls-offline
|
c43db2910f45dbee5335518f38166b55358b0709
|
[
"MIT"
] | null | null | null |
finders/sublime.py
|
commoncode/djangogirls-offline
|
c43db2910f45dbee5335518f38166b55358b0709
|
[
"MIT"
] | null | null | null |
finders/sublime.py
|
commoncode/djangogirls-offline
|
c43db2910f45dbee5335518f38166b55358b0709
|
[
"MIT"
] | null | null | null |
def sublime():
return [
('MacOS', 'https://download.sublimetext.com/Sublime%20Text%20Build%203126.dmg', 'sublime/sublime.dmg'),
('Windows (32-bit)', 'https://download.sublimetext.com/Sublime%20Text%20Build%203126%20Setup.exe', 'sublime/sublime-x86.exe'), # noqa
('Windows (64-bit)', 'https://download.sublimetext.com/Sublime%20Text%20Build%203126%20x64%20Setup.exe', 'sublime/sublime-amd64.exe'), # noqa
('Ubuntu (32-bit)', 'https://download.sublimetext.com/sublime-text_build-3126_i386.deb', 'sublime/sublime-x86.deb'), # noqa
('Ubuntu (64-bit)', 'https://download.sublimetext.com/sublime-text_build-3126_amd64.deb', 'sublime/sublime-amd64.deb'), # noqa
]
| 71.2
| 151
| 0.679775
| 89
| 712
| 5.393258
| 0.314607
| 0.135417
| 0.25
| 0.28125
| 0.56875
| 0.56875
| 0.56875
| 0.552083
| 0.441667
| 0
| 0
| 0.1072
| 0.122191
| 712
| 9
| 152
| 79.111111
| 0.6608
| 0.026685
| 0
| 0
| 0
| 0.125
| 0.775837
| 0.139738
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0
| 0
| 0.125
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
2b3a2886cced8166d79a209e36b74c73cb5dec34
| 14,328
|
py
|
Python
|
Contrib-Inspur/openbmc/poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 5
|
2019-11-11T07:57:26.000Z
|
2022-03-28T08:26:53.000Z
|
Contrib-Inspur/openbmc/poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 3
|
2019-09-05T21:47:07.000Z
|
2019-09-17T18:10:45.000Z
|
Contrib-Inspur/openbmc/poky/bitbake/lib/toaster/tests/functional/test_functional_basic.py
|
opencomputeproject/Rack-Manager
|
e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a
|
[
"MIT"
] | 11
|
2019-07-20T00:16:32.000Z
|
2022-01-11T14:17:48.000Z
|
#! /usr/bin/env python
#
# BitBake Toaster functional tests implementation
#
# Copyright (C) 2017 Intel Corporation
#
# SPDX-License-Identifier: GPL-2.0-only
#
import time
import re
from tests.functional.functional_helpers import SeleniumFunctionalTestCase
from orm.models import Project
class FuntionalTestBasic(SeleniumFunctionalTestCase):
# testcase (1514)
def test_create_slenium_project(self):
project_name = 'selenium-project'
self.get('')
self.driver.find_element_by_link_text("To start building, create your first Toaster project").click()
self.driver.find_element_by_id("new-project-name").send_keys(project_name)
self.driver.find_element_by_id('projectversion').click()
self.driver.find_element_by_id("create-project-button").click()
element = self.wait_until_visible('#project-created-notification')
self.assertTrue(self.element_exists('#project-created-notification'),'Project creation notification not shown')
self.assertTrue(project_name in element.text,
"New project name not in new project notification")
self.assertTrue(Project.objects.filter(name=project_name).count(),
"New project not found in database")
# testcase (1515)
def test_verify_left_bar_menu(self):
self.get('')
self.wait_until_visible('#projectstable')
self.find_element_by_link_text_in_table('projectstable', 'selenium-project').click()
self.assertTrue(self.element_exists('#config-nav'),'Configuration Tab does not exist')
project_URL=self.get_URL()
self.driver.find_element_by_xpath('//a[@href="'+project_URL+'"]').click()
try:
self.driver.find_element_by_xpath("//*[@id='config-nav']/ul/li/a[@href="+'"'+project_URL+'customimages/"'+"]").click()
self.assertTrue(re.search("Custom images",self.driver.find_element_by_xpath("//div[@class='col-md-10']").text),'Custom images information is not loading properly')
except:
self.fail(msg='No Custom images tab available')
try:
self.driver.find_element_by_xpath("//*[@id='config-nav']/ul/li/a[@href="+'"'+project_URL+'images/"'+"]").click()
self.assertTrue(re.search("Compatible image recipes",self.driver.find_element_by_xpath("//div[@class='col-md-10']").text),'The Compatible image recipes information is not loading properly')
except:
self.fail(msg='No Compatible image tab available')
try:
self.driver.find_element_by_xpath("//*[@id='config-nav']/ul/li/a[@href="+'"'+project_URL+'softwarerecipes/"'+"]").click()
self.assertTrue(re.search("Compatible software recipes",self.driver.find_element_by_xpath("//div[@class='col-md-10']").text),'The Compatible software recipe information is not loading properly')
except:
self.fail(msg='No Compatible software recipe tab available')
try:
self.driver.find_element_by_xpath("//*[@id='config-nav']/ul/li/a[@href="+'"'+project_URL+'machines/"'+"]").click()
self.assertTrue(re.search("Compatible machines",self.driver.find_element_by_xpath("//div[@class='col-md-10']").text),'The Compatible machine information is not loading properly')
except:
self.fail(msg='No Compatible machines tab available')
try:
self.driver.find_element_by_xpath("//*[@id='config-nav']/ul/li/a[@href="+'"'+project_URL+'layers/"'+"]").click()
self.assertTrue(re.search("Compatible layers",self.driver.find_element_by_xpath("//div[@class='col-md-10']").text),'The Compatible layer information is not loading properly')
except:
self.fail(msg='No Compatible layers tab available')
try:
self.driver.find_element_by_xpath("//*[@id='config-nav']/ul/li/a[@href="+'"'+project_URL+'configuration"'+"]").click()
self.assertTrue(re.search("Bitbake variables",self.driver.find_element_by_xpath("//div[@class='col-md-10']").text),'The Bitbake variables information is not loading properly')
except:
self.fail(msg='No Bitbake variables tab available')
# testcase (1516)
def test_review_configuration_information(self):
self.get('')
self.driver.find_element_by_xpath("//div[@id='global-nav']/ul/li/a[@href="+'"'+'/toastergui/projects/'+'"'+"]").click()
self.wait_until_visible('#projectstable')
self.find_element_by_link_text_in_table('projectstable', 'selenium-project').click()
project_URL=self.get_URL()
try:
self.assertTrue(self.element_exists('#machine-section'),'Machine section for the project configuration page does not exist')
self.assertTrue(re.search("qemux86",self.driver.find_element_by_xpath("//span[@id='project-machine-name']").text),'The machine type is not assigned')
self.driver.find_element_by_xpath("//span[@id='change-machine-toggle']").click()
self.wait_until_visible('#select-machine-form')
self.wait_until_visible('#cancel-machine-change')
self.driver.find_element_by_xpath("//form[@id='select-machine-form']/a[@id='cancel-machine-change']").click()
except:
self.fail(msg='The machine information is wrong in the configuration page')
try:
self.driver.find_element_by_id('no-most-built')
except:
self.fail(msg='No Most built information in project detail page')
try:
self.assertTrue(re.search("Yocto Project master",self.driver.find_element_by_xpath("//span[@id='project-release-title']").text),'The project release is not defined')
except:
self.fail(msg='No project release title information in project detail page')
try:
self.driver.find_element_by_xpath("//div[@id='layer-container']")
self.assertTrue(re.search("3",self.driver.find_element_by_id("project-layers-count").text),'There should be 3 layers listed in the layer count')
layer_list = self.driver.find_element_by_id("layers-in-project-list")
layers = layer_list.find_elements_by_tag_name("li")
for layer in layers:
if re.match ("openembedded-core",layer.text):
print ("openembedded-core layer is a default layer in the project configuration")
elif re.match ("meta-poky",layer.text):
print ("meta-poky layer is a default layer in the project configuration")
elif re.match ("meta-yocto-bsp",layer.text):
print ("meta-yocto-bsp is a default layer in the project configuratoin")
else:
self.fail(msg='default layers are missing from the project configuration')
except:
self.fail(msg='No Layer information in project detail page')
# testcase (1517)
def test_verify_machine_information(self):
self.get('')
self.driver.find_element_by_xpath("//div[@id='global-nav']/ul/li/a[@href="+'"'+'/toastergui/projects/'+'"'+"]").click()
self.wait_until_visible('#projectstable')
self.find_element_by_link_text_in_table('projectstable', 'selenium-project').click()
try:
self.assertTrue(self.element_exists('#machine-section'),'Machine section for the project configuration page does not exist')
self.assertTrue(re.search("qemux86",self.driver.find_element_by_id("project-machine-name").text),'The machine type is not assigned')
self.driver.find_element_by_id("change-machine-toggle").click()
self.wait_until_visible('#select-machine-form')
self.wait_until_visible('#cancel-machine-change')
self.driver.find_element_by_id("cancel-machine-change").click()
except:
self.fail(msg='The machine information is wrong in the configuration page')
# testcase (1518)
def test_verify_most_built_recipes_information(self):
self.get('')
self.driver.find_element_by_xpath("//div[@id='global-nav']/ul/li/a[@href="+'"'+'/toastergui/projects/'+'"'+"]").click()
self.wait_until_visible('#projectstable')
self.find_element_by_link_text_in_table('projectstable', 'selenium-project').click()
project_URL=self.get_URL()
try:
self.assertTrue(re.search("You haven't built any recipes yet",self.driver.find_element_by_id("no-most-built").text),'Default message of no builds is not present')
self.driver.find_element_by_xpath("//div[@id='no-most-built']/p/a[@href="+'"'+project_URL+'images/"'+"]").click()
self.assertTrue(re.search("Compatible image recipes",self.driver.find_element_by_xpath("//div[@class='col-md-10']").text),'The Choose a recipe to build link is not working properly')
except:
self.fail(msg='No Most built information in project detail page')
# testcase (1519)
def test_verify_project_release_information(self):
self.get('')
self.driver.find_element_by_xpath("//div[@id='global-nav']/ul/li/a[@href="+'"'+'/toastergui/projects/'+'"'+"]").click()
self.wait_until_visible('#projectstable')
self.find_element_by_link_text_in_table('projectstable', 'selenium-project').click()
try:
self.assertTrue(re.search("Yocto Project master",self.driver.find_element_by_id("project-release-title").text),'The project release is not defined')
except:
self.fail(msg='No project release title information in project detail page')
# testcase (1520)
def test_verify_layer_information(self):
self.get('')
self.driver.find_element_by_xpath("//div[@id='global-nav']/ul/li/a[@href="+'"'+'/toastergui/projects/'+'"'+"]").click()
self.wait_until_visible('#projectstable')
self.find_element_by_link_text_in_table('projectstable', 'selenium-project').click()
project_URL=self.get_URL()
try:
self.driver.find_element_by_xpath("//div[@id='layer-container']")
self.assertTrue(re.search("3",self.driver.find_element_by_id("project-layers-count").text),'There should be 3 layers listed in the layer count')
layer_list = self.driver.find_element_by_id("layers-in-project-list")
layers = layer_list.find_elements_by_tag_name("li")
for layer in layers:
if re.match ("openembedded-core",layer.text):
print ("openembedded-core layer is a default layer in the project configuration")
elif re.match ("meta-poky",layer.text):
print ("meta-poky layer is a default layer in the project configuration")
elif re.match ("meta-yocto-bsp",layer.text):
print ("meta-yocto-bsp is a default layer in the project configuratoin")
else:
self.fail(msg='default layers are missing from the project configuration')
self.driver.find_element_by_xpath("//input[@id='layer-add-input']")
self.driver.find_element_by_xpath("//button[@id='add-layer-btn']")
self.driver.find_element_by_xpath("//div[@id='layer-container']/form[@class='form-inline']/p/a[@id='view-compatible-layers']")
self.driver.find_element_by_xpath("//div[@id='layer-container']/form[@class='form-inline']/p/a[@href="+'"'+project_URL+'importlayer"'+"]")
except:
self.fail(msg='No Layer information in project detail page')
# testcase (1521)
def test_verify_project_detail_links(self):
self.get('')
self.driver.find_element_by_xpath("//div[@id='global-nav']/ul/li/a[@href="+'"'+'/toastergui/projects/'+'"'+"]").click()
self.wait_until_visible('#projectstable')
self.find_element_by_link_text_in_table('projectstable', 'selenium-project').click()
project_URL=self.get_URL()
self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li[@id='topbar-configuration-tab']/a[@href="+'"'+project_URL+'"'+"]").click()
self.assertTrue(re.search("Configuration",self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li[@id='topbar-configuration-tab']/a[@href="+'"'+project_URL+'"'+"]").text), 'Configuration tab in project topbar is misspelled')
try:
self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li/a[@href="+'"'+project_URL+'builds/"'+"]").click()
self.assertTrue(re.search("Builds",self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li/a[@href="+'"'+project_URL+'builds/"'+"]").text), 'Builds tab in project topbar is misspelled')
self.driver.find_element_by_xpath("//div[@id='empty-state-projectbuildstable']")
except:
self.fail(msg='Builds tab information is not present')
try:
self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li/a[@href="+'"'+project_URL+'importlayer"'+"]").click()
self.assertTrue(re.search("Import layer",self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li/a[@href="+'"'+project_URL+'importlayer"'+"]").text), 'Import layer tab in project topbar is misspelled')
self.driver.find_element_by_xpath("//fieldset[@id='repo-select']")
self.driver.find_element_by_xpath("//fieldset[@id='git-repo']")
except:
self.fail(msg='Import layer tab not loading properly')
try:
self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li/a[@href="+'"'+project_URL+'newcustomimage/"'+"]").click()
self.assertTrue(re.search("New custom image",self.driver.find_element_by_xpath("//div[@id='project-topbar']/ul[@class='nav nav-tabs']/li/a[@href="+'"'+project_URL+'newcustomimage/"'+"]").text), 'New custom image tab in project topbar is misspelled')
self.assertTrue(re.search("Select the image recipe you want to customise",self.driver.find_element_by_xpath("//div[@class='col-md-12']/h2").text),'The new custom image tab is not loading correctly')
except:
self.fail(msg='New custom image tab not loading properly')
| 61.758621
| 268
| 0.661991
| 1,870
| 14,328
| 4.903209
| 0.114439
| 0.07678
| 0.090741
| 0.130549
| 0.801069
| 0.769877
| 0.746646
| 0.728324
| 0.717963
| 0.704657
| 0
| 0.005344
| 0.177205
| 14,328
| 231
| 269
| 62.025974
| 0.772415
| 0.020031
| 0
| 0.587912
| 0
| 0.06044
| 0.414705
| 0.153473
| 0.120879
| 0
| 0
| 0
| 0.137363
| 1
| 0.043956
| false
| 0
| 0.043956
| 0
| 0.093407
| 0.032967
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b4420ed0d3eb5ee2284257b45a567c89eda83bb
| 88
|
py
|
Python
|
deepART/nlp/text_preproc/__init__.py
|
nicholaslaw/deepART
|
ffd425128bec02ca22ac64a6b55425fc1b29f9ba
|
[
"MIT"
] | 1
|
2021-05-22T00:34:25.000Z
|
2021-05-22T00:34:25.000Z
|
deepART/nlp/text_preproc/__init__.py
|
nicholaslaw/deepART
|
ffd425128bec02ca22ac64a6b55425fc1b29f9ba
|
[
"MIT"
] | null | null | null |
deepART/nlp/text_preproc/__init__.py
|
nicholaslaw/deepART
|
ffd425128bec02ca22ac64a6b55425fc1b29f9ba
|
[
"MIT"
] | null | null | null |
from .preprocessors import *
from .corpus_handler import *
from .bow_vectorizer import *
| 29.333333
| 29
| 0.806818
| 11
| 88
| 6.272727
| 0.636364
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 88
| 3
| 30
| 29.333333
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
99139706c6cdeed0ff8ca7a2e98f407754c6d821
| 5,551
|
py
|
Python
|
app/SuperPhy/models/sparql/genomes.py
|
superphy/semantic
|
06b3885a6a3e424ba57be0c839ae2bd8a46364ac
|
[
"Apache-2.0"
] | 16
|
2015-09-23T17:19:35.000Z
|
2020-05-26T16:02:54.000Z
|
app/SuperPhy/models/sparql/genomes.py
|
superphy/semantic
|
06b3885a6a3e424ba57be0c839ae2bd8a46364ac
|
[
"Apache-2.0"
] | 21
|
2015-11-03T15:43:58.000Z
|
2017-10-30T02:23:37.000Z
|
app/SuperPhy/models/sparql/genomes.py
|
superphy/semantic
|
06b3885a6a3e424ba57be0c839ae2bd8a46364ac
|
[
"Apache-2.0"
] | 5
|
2015-10-19T17:12:39.000Z
|
2018-09-10T16:19:56.000Z
|
#!/usr/bin/python
from SuperPhy.models.sparql.endpoint import Endpoint
from SuperPhy.models.sparql.prefixes import prefixes
def get_all_syndromes():
"""
input - None
output - list of all the unique syndromes
"""
string = prefixes + """
SELECT ?syndromes
WHERE
{
?_Syndrome_Uri rdf:type :isolation_syndrome .
?_Syndrome_Uri rdfs:label ?syndromes
}
group by ?syndromes"""
syndrome_query = Endpoint.query(string)
syndromes = []
for item in syndrome_query['results']['bindings']:
syndromes.append(item[syndrome_query['head']['vars'][0]]['value'])
return syndromes
def get_all_genome_metadata():
string = prefixes + """
SELECT ?Genome_Uri
(GROUP_CONCAT (DISTINCT ?_Syndrome ; separator=',\\n') AS ?Syndromes)(GROUP_CONCAT (DISTINCT ?_Accession ; separator=',\\n') AS ?Accession) (GROUP_CONCAT (DISTINCT ?_Biosample_Id ; separator=',\\n') AS ?Biosample_Id)(GROUP_CONCAT (DISTINCT ?_Bioproject_Id ; separator=',\\n') AS ?Bioproject_Id)(GROUP_CONCAT (DISTINCT ?_Strain ; separator=',\\n') AS ?Strain)(GROUP_CONCAT (DISTINCT ?_Serotype_O ; separator=',\\n') AS ?Serotype_O)(GROUP_CONCAT (DISTINCT ?_Serotype_H ; separator=',\\n') AS ?Serotype_H)(GROUP_CONCAT (DISTINCT ?_Scientific_Name ; separator=',\\n') AS ?Scientific_Name)(GROUP_CONCAT (DISTINCT ?_Common_Name ; separator=',\\n') AS ?Common_Name)(GROUP_CONCAT (DISTINCT ?_Isolation_Date ; separator=',\\n') AS ?Isolation_Date)(GROUP_CONCAT (DISTINCT ?_Geographic_Location ; separator=',\\n') AS ?Geographic_Location)
WHERE
{ { ?Genome_Uri a gfvo:Genome }
OPTIONAL
{ ?Genome_Uri :has_bioproject ?_Bioproject_Id}
OPTIONAL
{ ?Genome_Uri :has_biosample ?_Biosample_Id}
OPTIONAL
{ ?Genome_Uri :has_Htype ?_Serotype_H_Uri .
?_Serotype_H_Uri rdfs:label ?_Serotype_H
}
OPTIONAL
{ ?Genome_Uri :has_Otype ?_Serotype_O_Uri .
?_Serotype_O_Uri rdfs:label ?_Serotype_O
}
OPTIONAL
{ ?Genome_Uri :has_geographic_location ?_Geographic_Location}
OPTIONAL
{ ?Genome_Uri :has_accession ?_Accession}
OPTIONAL
{ ?Genome_Uri :has_strain ?_Strain}
OPTIONAL
{ ?Genome_Uri :has_attribute ?_From_Host_Uri .
?_From_Host_Uri rdf:type :isolation_from_host .
?_From_Host_Uri :has_attribute ?_Host_Uri .
?_Host_Uri :scientific_name ?_Scientific_Name .
?_Host_Uri :common_name ?_Common_Name
}
OPTIONAL
{ ?Genome_Uri :has_isolation_date ?_Isolation_Date}
OPTIONAL
{ ?Genome_Uri :has_isolation_attribute ?_Syndrome_Uri .
?_Syndrome_Uri rdf:type :isolation_syndrome .
?_Syndrome_Uri rdfs:label ?_Syndrome
}
}
GROUP BY ?Genome_Uri
ORDER BY (?Genome_Uri)
"""
return Endpoint.query(string)
def get_genome_metadata(accession):
string = prefixes + """
PREFIX : <https://github.com/superphy#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX gfvo: <http://www.biointerchange.org/gfvo#>
SELECT ?Genome_Uri
(GROUP_CONCAT (DISTINCT ?_Syndrome ; separator=',\\n') AS ?Syndromes)(GROUP_CONCAT (DISTINCT ?_Accession ; separator=',\\n') AS ?Accession) (GROUP_CONCAT (DISTINCT ?_Biosample_Id ; separator=',\\n') AS ?Biosample_Id)(GROUP_CONCAT (DISTINCT ?_Bioproject_Id ; separator=',\\n') AS ?Bioproject_Id)(GROUP_CONCAT (DISTINCT ?_Strain ; separator=',\\n') AS ?Strain)(GROUP_CONCAT (DISTINCT ?_Serotype_O ; separator=',\\n') AS ?Serotype_O)(GROUP_CONCAT (DISTINCT ?_Serotype_H ; separator=',\\n') AS ?Serotype_H)(GROUP_CONCAT (DISTINCT ?_Scientific_Name ; separator=',\\n') AS ?Scientific_Name)(GROUP_CONCAT (DISTINCT ?_Common_Name ; separator=',\\n') AS ?Common_Name)(GROUP_CONCAT (DISTINCT ?_Isolation_Date ; separator=',\\n') AS ?Isolation_Date)(GROUP_CONCAT (DISTINCT ?_Geographic_Location ; separator=',\\n') AS ?Geographic_Location)
WHERE
{
{ ?Genome_Uri a gfvo:Genome .
?Genome_Uri :has_accession "%s"^^xsd:string}
OPTIONAL
{ ?Genome_Uri :has_bioproject ?_Bioproject_Id}
OPTIONAL
{ ?Genome_Uri :has_biosample ?_Biosample_Id}
OPTIONAL
{ ?Genome_Uri :has_Htype ?_Serotype_H_Uri .
?_Serotype_H_Uri rdfs:label ?_Serotype_H
}
OPTIONAL
{ ?Genome_Uri :has_Otype ?_Serotype_O_Uri .
?_Serotype_O_Uri rdfs:label ?_Serotype_O
}
OPTIONAL
{ ?Genome_Uri :has_geographic_location ?_Geographic_Location}
OPTIONAL
{ ?Genome_Uri :has_strain ?_Strain}
OPTIONAL
{ ?Genome_Uri :has_attribute ?_From_Host_Uri .
?_From_Host_Uri rdf:type :isolation_from_host .
?_From_Host_Uri :has_attribute ?_Host_Uri .
?_Host_Uri :scientific_name ?_Scientific_Name .
?_Host_Uri :common_name ?_Common_Name
}
OPTIONAL
{ ?Genome_Uri :has_isolation_date ?_Isolation_Date}
OPTIONAL
{ ?Genome_Uri :has_isolation_attribute ?_Syndrome_Uri .
?_Syndrome_Uri rdf:type :isolation_syndrome .
?_Syndrome_Uri rdfs:label ?_Syndrome
}
}
GROUP BY ?Genome_Uri
ORDER BY (?Genome_Uri)
""" % (accession)
return Endpoint.query(string)
| 46.647059
| 832
| 0.652135
| 634
| 5,551
| 5.299685
| 0.152997
| 0.075
| 0.124405
| 0.113095
| 0.784821
| 0.784821
| 0.784821
| 0.784821
| 0.784821
| 0.784821
| 0
| 0.005631
| 0.23221
| 5,551
| 118
| 833
| 47.042373
| 0.782731
| 0.012971
| 0
| 0.654206
| 0
| 0.028037
| 0.895585
| 0.12713
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028037
| false
| 0
| 0.018692
| 0
| 0.074766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
992723148f30fd087631440427dc71c1155bf987
| 35
|
py
|
Python
|
tfrecord_handler/io.py
|
m-zayan/tfrecord-handler
|
fef50743cbc61c6ffd937c47428d9c6102a8e047
|
[
"Apache-2.0"
] | null | null | null |
tfrecord_handler/io.py
|
m-zayan/tfrecord-handler
|
fef50743cbc61c6ffd937c47428d9c6102a8e047
|
[
"Apache-2.0"
] | null | null | null |
tfrecord_handler/io.py
|
m-zayan/tfrecord-handler
|
fef50743cbc61c6ffd937c47428d9c6102a8e047
|
[
"Apache-2.0"
] | null | null | null |
from tfrecord_handler._io import *
| 17.5
| 34
| 0.828571
| 5
| 35
| 5.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
993605e64d34991227020ec250dc90010bc12fb4
| 682
|
py
|
Python
|
jmeter_api/configs/__init__.py
|
dashawn888/jmeter_api
|
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
|
[
"Apache-2.0"
] | 11
|
2020-03-22T13:30:21.000Z
|
2021-12-25T06:23:44.000Z
|
jmeter_api/configs/__init__.py
|
dashawn888/jmeter_api
|
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
|
[
"Apache-2.0"
] | 2
|
2020-03-23T00:06:42.000Z
|
2021-02-24T21:41:40.000Z
|
jmeter_api/configs/__init__.py
|
dashawn888/jmeter_api
|
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
|
[
"Apache-2.0"
] | 3
|
2020-11-09T14:14:25.000Z
|
2021-05-27T02:54:38.000Z
|
from jmeter_api.configs.counter.elements import Counter
from jmeter_api.configs.csv_data_set_config.elements import CsvDataSetConfig
from jmeter_api.configs.http_auth_manager.elements import HTTPAuthManager
from jmeter_api.configs.http_cache_manager.elements import HTTPCacheManager
from jmeter_api.configs.http_cookie_manager.elements import HTTPCookieManager
from jmeter_api.configs.http_header_manager.elements import HTTPHeaderManager
from jmeter_api.configs.http_request_defaults.elements import HTTPRequestDefaults
from jmeter_api.configs.random_csv_data_set_config.elements import RandomCsvDataSetConfig
from jmeter_api.configs.random_variable.elements import RandomVariable
| 68.2
| 89
| 0.907625
| 90
| 682
| 6.577778
| 0.322222
| 0.152027
| 0.197635
| 0.304054
| 0.391892
| 0.101351
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052786
| 682
| 9
| 90
| 75.777778
| 0.916409
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9946addc08217b2a1101ee5cf8cd56d32f207148
| 32
|
py
|
Python
|
mhkit/river/io/__init__.py
|
Matthew-Boyd/MHKiT-Python
|
016e9e67dbe1ac1ec24b3a6f8eb2771f73dfefa6
|
[
"BSD-3-Clause"
] | 21
|
2020-04-20T19:10:03.000Z
|
2022-03-30T18:46:03.000Z
|
mhkit/river/io/__init__.py
|
Matthew-Boyd/MHKiT-Python
|
016e9e67dbe1ac1ec24b3a6f8eb2771f73dfefa6
|
[
"BSD-3-Clause"
] | 110
|
2020-03-06T22:11:08.000Z
|
2022-03-25T20:28:36.000Z
|
mhkit/river/io/__init__.py
|
Matthew-Boyd/MHKiT-Python
|
016e9e67dbe1ac1ec24b3a6f8eb2771f73dfefa6
|
[
"BSD-3-Clause"
] | 32
|
2020-03-05T20:33:10.000Z
|
2022-03-24T20:19:34.000Z
|
from mhkit.river.io import usgs
| 16
| 31
| 0.8125
| 6
| 32
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41f17590d6717522c7db08810a1ce7c01ea93dbc
| 79
|
py
|
Python
|
EDA/index.py
|
jbofill10/Movies-EDA
|
97652d21df32d4984fafeb629db00edd93812da2
|
[
"MIT"
] | 1
|
2020-04-17T20:47:40.000Z
|
2020-04-17T20:47:40.000Z
|
EDA/index.py
|
jbofill10/Movies-EDA
|
97652d21df32d4984fafeb629db00edd93812da2
|
[
"MIT"
] | 12
|
2020-04-16T03:18:37.000Z
|
2020-04-19T02:53:58.000Z
|
EDA/index.py
|
jbofill10/Movies-EDA
|
97652d21df32d4984fafeb629db00edd93812da2
|
[
"MIT"
] | null | null | null |
from EDA import MetaData
def run(meta_df):
MetaData.metadata_eda(meta_df)
| 15.8
| 34
| 0.772152
| 13
| 79
| 4.461538
| 0.615385
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151899
| 79
| 4
| 35
| 19.75
| 0.865672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
511b9c12335aaf16efd9724689690cb44f4b60ac
| 114
|
py
|
Python
|
montepython/likelihoods/Planck15_highl_TTTEEE/__init__.py
|
archaeo-pteryx/montepython_public
|
6fbcaa3266fd3a10a8e3ed4190dc65e6f29f1a37
|
[
"MIT"
] | 69
|
2018-04-20T07:38:33.000Z
|
2022-03-11T06:55:36.000Z
|
montepython/likelihoods/Planck15_highl_TTTEEE/__init__.py
|
archaeo-pteryx/montepython_public
|
6fbcaa3266fd3a10a8e3ed4190dc65e6f29f1a37
|
[
"MIT"
] | 263
|
2018-05-20T21:58:11.000Z
|
2022-03-30T21:45:48.000Z
|
montepython/likelihoods/Planck15_highl_TTTEEE/__init__.py
|
archaeo-pteryx/montepython_public
|
6fbcaa3266fd3a10a8e3ed4190dc65e6f29f1a37
|
[
"MIT"
] | 78
|
2018-04-21T13:11:54.000Z
|
2022-02-01T01:57:31.000Z
|
from montepython.likelihood_class import Likelihood_clik
class Planck15_highl_TTTEEE(Likelihood_clik):
pass
| 19
| 56
| 0.850877
| 14
| 114
| 6.571429
| 0.714286
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.114035
| 114
| 5
| 57
| 22.8
| 0.891089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 6
|
51435d897e226fc136654a5ded40c1b260922fd7
| 151
|
py
|
Python
|
uploads/core/admin.py
|
AsciencioAlex/simple-file-upload
|
bfd00a4d04147772027fd55454ac813813a11f19
|
[
"MIT"
] | null | null | null |
uploads/core/admin.py
|
AsciencioAlex/simple-file-upload
|
bfd00a4d04147772027fd55454ac813813a11f19
|
[
"MIT"
] | null | null | null |
uploads/core/admin.py
|
AsciencioAlex/simple-file-upload
|
bfd00a4d04147772027fd55454ac813813a11f19
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from uploads.core.forms import DocumentForm
from uploads.core.models import Document
admin.site.register(Document)
| 18.875
| 43
| 0.834437
| 21
| 151
| 6
| 0.619048
| 0.174603
| 0.238095
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10596
| 151
| 7
| 44
| 21.571429
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5ab7e313c6428a3c8abd1e203bae7c3287bf003f
| 9,640
|
py
|
Python
|
app/tests/teams_tests/test_views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 101
|
2018-04-11T14:48:04.000Z
|
2022-03-28T00:29:48.000Z
|
app/tests/teams_tests/test_views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 1,733
|
2018-03-21T11:56:16.000Z
|
2022-03-31T14:58:30.000Z
|
app/tests/teams_tests/test_views.py
|
njmhendrix/grand-challenge.org
|
9bc36f5e26561a78bd405e8ea5e4c0f86c95f011
|
[
"Apache-2.0"
] | 42
|
2018-06-08T05:49:07.000Z
|
2022-03-29T08:43:01.000Z
|
import pytest
from django.conf import settings
from django.test import Client
from tests.factories import TeamFactory, TeamMemberFactory
from tests.utils import (
assert_viewname_redirect,
assert_viewname_status,
get_view_for_user,
validate_admin_or_participant_view,
validate_open_view,
)
def validate_owner_or_admin_view(
*, two_challenge_set, client: Client, **kwargs
):
"""
Assert that a view is only accessible to administrators or participants
of that particular challenge.
"""
# No user
assert_viewname_redirect(
redirect_url=settings.LOGIN_URL,
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
**kwargs,
)
tests = [
(403, two_challenge_set.challenge_set_1.non_participant),
(200, two_challenge_set.challenge_set_1.participant),
(403, two_challenge_set.challenge_set_1.participant1),
(200, two_challenge_set.challenge_set_1.creator),
(200, two_challenge_set.challenge_set_1.admin),
(403, two_challenge_set.challenge_set_2.non_participant),
(403, two_challenge_set.challenge_set_2.participant),
(403, two_challenge_set.challenge_set_2.participant1),
(403, two_challenge_set.challenge_set_2.creator),
(403, two_challenge_set.challenge_set_2.admin),
(200, two_challenge_set.admin12),
(403, two_challenge_set.participant12),
(200, two_challenge_set.admin1participant2),
]
for test in tests:
assert_viewname_status(
code=test[0],
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
user=test[1],
**kwargs,
)
def validate_member_owner_or_admin_view(
*, two_challenge_set, client: Client, **kwargs
):
"""
Assert that a view is only accessible to administrators or participants
of that particular challenge.
"""
# No user
assert_viewname_redirect(
redirect_url=settings.LOGIN_URL,
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
**kwargs,
)
tests = [
(403, two_challenge_set.challenge_set_1.non_participant),
(200, two_challenge_set.challenge_set_1.participant),
(200, two_challenge_set.challenge_set_1.participant1),
(200, two_challenge_set.challenge_set_1.creator),
(200, two_challenge_set.challenge_set_1.admin),
(403, two_challenge_set.challenge_set_2.non_participant),
(403, two_challenge_set.challenge_set_2.participant),
(403, two_challenge_set.challenge_set_2.participant1),
(403, two_challenge_set.challenge_set_2.creator),
(403, two_challenge_set.challenge_set_2.admin),
(200, two_challenge_set.admin12),
(403, two_challenge_set.participant12),
(200, two_challenge_set.admin1participant2),
]
for test in tests:
assert_viewname_status(
code=test[0],
challenge=two_challenge_set.challenge_set_1.challenge,
client=client,
user=test[1],
**kwargs,
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"view", ["teams:list", "teams:create", "teams:member-create"]
)
def test_admin_or_participant_permissions(client, two_challenge_sets, view):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
if view in ("teams:detail", "teams:member-create"):
pk = team.pk
else:
pk = None
validate_admin_or_participant_view(
viewname=view,
reverse_kwargs={"pk": pk},
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
def test_open_views(client, challenge_set):
team = TeamFactory(
challenge=challenge_set.challenge, owner=challenge_set.participant
)
validate_open_view(
viewname="teams:detail",
reverse_kwargs={"pk": team.pk},
challenge_set=challenge_set,
client=client,
)
@pytest.mark.django_db
@pytest.mark.parametrize("view", ["teams:update", "teams:delete"])
def test_team_update_delete_permissions(client, two_challenge_sets, view):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant1,
)
validate_owner_or_admin_view(
viewname=view,
reverse_kwargs={"pk": team.pk},
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
def test_team_member_delete_permissions(client, two_challenge_sets):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
team_member = TeamMemberFactory(
team=team, user=two_challenge_sets.challenge_set_1.participant1
)
validate_member_owner_or_admin_view(
viewname="teams:member-delete",
reverse_kwargs={"pk": team_member.pk},
two_challenge_set=two_challenge_sets,
client=client,
)
@pytest.mark.django_db
@pytest.mark.parametrize("team_name", ["test_team_name"])
def test_team_creation(client, two_challenge_sets, team_name):
response = get_view_for_user(
viewname="teams:create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant,
data={"name": team_name},
)
assert response.status_code == 302
response = get_view_for_user(
url=response.url,
client=client,
user=two_challenge_sets.challenge_set_1.participant,
)
assert response.status_code == 200
assert team_name in response.rendered_content.lower()
@pytest.mark.django_db
def test_team_member_addition(client, two_challenge_sets):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
assert two_challenge_sets.challenge_set_1.participant in team.get_members()
assert (
two_challenge_sets.challenge_set_1.participant1
not in team.get_members()
)
# Participant1 requests to join team
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant1,
reverse_kwargs={"pk": team.pk},
)
assert (
two_challenge_sets.challenge_set_1.participant1 in team.get_members()
)
assert response.status_code == 302
@pytest.mark.django_db
def test_unique_membership(client, two_challenge_sets):
team = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant,
)
team1 = TeamFactory(
challenge=two_challenge_sets.challenge_set_1.challenge,
owner=two_challenge_sets.challenge_set_1.participant1,
)
# Try to create a new team, should be denied
response = get_view_for_user(
viewname="teams:create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant,
data={"name": "thisteamshouldnotbecreated"},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
# Participant1 requests to join team, should be denied
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.challenge_set_1.participant1,
reverse_kwargs={"pk": team.pk},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
# participant12 should be able to create a team in their challenge and join
# another team
response = get_view_for_user(
viewname="teams:create",
challenge=two_challenge_sets.challenge_set_2.challenge,
client=client,
method=client.post,
user=two_challenge_sets.participant12,
data={"name": "thisteamshouldbecreated"},
)
assert response.status_code == 302
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.participant12,
reverse_kwargs={"pk": team.pk},
)
assert response.status_code == 302
assert two_challenge_sets.participant12 in team.get_members()
response = get_view_for_user(
viewname="teams:member-create",
challenge=two_challenge_sets.challenge_set_1.challenge,
client=client,
method=client.post,
user=two_challenge_sets.participant12,
reverse_kwargs={"pk": team1.pk},
)
assert response.status_code == 200
assert (
"You are already a member of another team for this challenge"
in response.rendered_content
)
| 34.184397
| 79
| 0.696992
| 1,169
| 9,640
| 5.400342
| 0.094098
| 0.178679
| 0.088547
| 0.118802
| 0.848408
| 0.808966
| 0.781562
| 0.774751
| 0.733407
| 0.708221
| 0
| 0.026055
| 0.215664
| 9,640
| 281
| 80
| 34.30605
| 0.808888
| 0.045436
| 0
| 0.639676
| 0
| 0
| 0.057224
| 0.005351
| 0
| 0
| 0
| 0
| 0.089069
| 1
| 0.036437
| false
| 0
| 0.020243
| 0
| 0.05668
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5ad2241cc583c3362b86effbfc3733ddb6a8d949
| 254
|
py
|
Python
|
api/organisations/managers.py
|
mevinbabuc/flagsmith
|
751bd6cb4a34bd2f80af5a9c547559da9c2fa010
|
[
"BSD-3-Clause"
] | 1,259
|
2021-06-10T11:24:09.000Z
|
2022-03-31T10:30:44.000Z
|
api/organisations/managers.py
|
mevinbabuc/flagsmith
|
751bd6cb4a34bd2f80af5a9c547559da9c2fa010
|
[
"BSD-3-Clause"
] | 392
|
2021-06-10T11:12:29.000Z
|
2022-03-31T10:13:53.000Z
|
api/organisations/managers.py
|
mevinbabuc/flagsmith
|
751bd6cb4a34bd2f80af5a9c547559da9c2fa010
|
[
"BSD-3-Clause"
] | 58
|
2021-06-11T03:18:07.000Z
|
2022-03-31T14:39:10.000Z
|
from django.db.models import Manager
from permissions.models import ORGANISATION_PERMISSION_TYPE
class OrganisationPermissionManager(Manager):
def get_queryset(self):
return super().get_queryset().filter(type=ORGANISATION_PERMISSION_TYPE)
| 28.222222
| 79
| 0.814961
| 29
| 254
| 6.931034
| 0.655172
| 0.119403
| 0.258706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114173
| 254
| 8
| 80
| 31.75
| 0.893333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
5aeb24e96a5e5665f338ac97453dbfb6aac41415
| 4,281
|
py
|
Python
|
model/detection_model/TextSnake_pytorch/dataset/ratio_analysis.py
|
JinGyeSetBirdsFree/FudanOCR
|
e6b18b0eefaf832b2eb7198f5df79e00bd4cee36
|
[
"MIT"
] | 25
|
2020-02-29T12:14:10.000Z
|
2020-04-24T07:56:06.000Z
|
model/detection_model/TextSnake_pytorch/dataset/ratio_analysis.py
|
dun933/FudanOCR
|
fd79b679044ea23fd9eb30691453ed0805d2e98b
|
[
"MIT"
] | 33
|
2020-12-10T19:15:39.000Z
|
2022-03-12T00:17:30.000Z
|
model/detection_model/TextSnake_pytorch/dataset/ratio_analysis.py
|
dun933/FudanOCR
|
fd79b679044ea23fd9eb30691453ed0805d2e98b
|
[
"MIT"
] | 4
|
2020-02-29T12:14:18.000Z
|
2020-04-12T12:26:50.000Z
|
import cv2
import os
import numpy as np
from model.detection_model.TextSnake_pytorch.dataset.read_json import read_json, read_dict
def recorder(record, ratio):
ranges = [key.split('~') for key in record.keys()]
for range in ranges:
if int(range[0]) <= ratio < int(range[1]):
record['{}~{}'.format(range[0], range[1])] += 1
break
return record
if __name__ == '__main__':
path = '/home/shf/fudan_ocr_system/datasets/ICDAR19/'
json_name = 'train_labels.json'
maxlen = 1280
train_files = os.listdir(os.path.join(path, 'train_images'))
test_files = os.listdir(os.path.join(path, 'test_images'))
data_dict = read_json(os.path.join(path, json_name))
legal_record = {
'1~2': 0,
'2~3': 0,
'3~4': 0,
'4~5': 0,
'5~6': 0,
'6~7': 0,
'7~99999999': 0,
}
illegal_record = {
'1~2': 0,
'2~3': 0,
'3~4': 0,
'4~5': 0,
'5~6': 0,
'6~7': 0,
'7~99999999': 0,
}
# max_area = -1
# min_area = 999999999
with open('record.txt', 'w') as f:
for idx, file in enumerate(train_files):
polygons = read_dict(data_dict, file)
im = cv2.imread(os.path.join(path, 'train_images', file))
h, w = im.shape[:2]
scale = 1.0
if max(h, w) > maxlen:
scale = float(maxlen) / h if h > w else float(maxlen) / w
im = cv2.resize(im, (int(w*scale), int(h*scale)))
print(idx, file, len(polygons))
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] * scale).astype(np.int32)
polygon.points[:, 1] = (polygon.points[:, 1] * scale).astype(np.int32)
if not polygon.illegibility:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(legal_record, ratio)
else:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(illegal_record, ratio)
if idx % 10 == 0:
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('record: ', legal_record)
print('illegal: ', illegal_record)
print("Test Images")
with open('record2.txt', 'w') as f:
for idx, file in enumerate(test_files):
polygons = read_dict(data_dict, file)
im = cv2.imread(os.path.join(path, 'test_images', file))
h, w = im.shape[:2]
scale = 1.0
if max(h, w) > maxlen:
scale = float(maxlen) / h if h > w else float(maxlen) / w
im = cv2.resize(im, (int(w * scale), int(h * scale)))
print(idx, file, len(polygons))
for polygon in polygons:
polygon.points[:, 0] = (polygon.points[:, 0] * scale).astype(np.int32)
polygon.points[:, 1] = (polygon.points[:, 1] * scale).astype(np.int32)
if not polygon.illegibility:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(legal_record, ratio)
else:
drawing = np.zeros(im.shape[:2], np.uint8)
_, (w, h), _ = cv2.minAreaRect(polygon.points.astype(np.int32))
ratio = float(max(w, h)) / min(w, h)
f.write(str(ratio) + '\n')
recorder(illegal_record, ratio)
if idx % 10 == 0:
print('record: ', legal_record)
print('illegal: ', illegal_record)
print('record: ', legal_record)
print('illegal: ', illegal_record)
| 33.708661
| 90
| 0.494277
| 531
| 4,281
| 3.879473
| 0.188324
| 0.075728
| 0.050485
| 0.033981
| 0.777184
| 0.777184
| 0.762621
| 0.735437
| 0.735437
| 0.705825
| 0
| 0.047412
| 0.35459
| 4,281
| 126
| 91
| 33.97619
| 0.698154
| 0.007942
| 0
| 0.701031
| 0
| 0
| 0.067657
| 0.010372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010309
| false
| 0
| 0.041237
| 0
| 0.061856
| 0.113402
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5af87e3a5039ec68981b94637fb70be999a158cf
| 148
|
py
|
Python
|
home/views.py
|
chiragkhandhar/ODAS
|
3b461e54a668ae1457caf71f3b28aa82ca2dd305
|
[
"MIT"
] | null | null | null |
home/views.py
|
chiragkhandhar/ODAS
|
3b461e54a668ae1457caf71f3b28aa82ca2dd305
|
[
"MIT"
] | null | null | null |
home/views.py
|
chiragkhandhar/ODAS
|
3b461e54a668ae1457caf71f3b28aa82ca2dd305
|
[
"MIT"
] | 1
|
2020-04-24T03:10:40.000Z
|
2020-04-24T03:10:40.000Z
|
from django.http import HttpResponse
from django.shortcuts import render
def index0(request):
return render(request,'home/homepage.html')
| 24.666667
| 48
| 0.77027
| 19
| 148
| 6
| 0.736842
| 0.175439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007937
| 0.148649
| 148
| 5
| 49
| 29.6
| 0.896825
| 0
| 0
| 0
| 0
| 0
| 0.125874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
851503cfbdc95af276c1681f8ca5cd8776353d27
| 43
|
py
|
Python
|
workon/templatetags/workon_chart.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/templatetags/workon_chart.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/templatetags/workon_chart.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
from ..contrib.chart.templatetags import *
| 21.5
| 42
| 0.790698
| 5
| 43
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 43
| 2
| 42
| 21.5
| 0.871795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51805cd494063c08b036672d1f73408bdab210b2
| 77,579
|
py
|
Python
|
Othello.py
|
ojasonbernal/PythonOthelloGame
|
9f26b49a9cb84bc6e468081ba2cd37824d879f2a
|
[
"MIT"
] | null | null | null |
Othello.py
|
ojasonbernal/PythonOthelloGame
|
9f26b49a9cb84bc6e468081ba2cd37824d879f2a
|
[
"MIT"
] | null | null | null |
Othello.py
|
ojasonbernal/PythonOthelloGame
|
9f26b49a9cb84bc6e468081ba2cd37824d879f2a
|
[
"MIT"
] | null | null | null |
# Importing modules
import pygame
import numpy as np
import random
# Initializing the Pygame module
pygame.init()
def console_screen():
"""This function is meant for the user to enter specifications for the game as the player plays. """
print('Note: Enter nicknames to name the players in the game')
user = ''
user2 = ''
try:
user = input("Enter the name of player 1(Enter 'Computer 1' if you don't want to be named): ")
user2 = input("Enter the name of player 2(Enter 'Computer 2' if there is no other player): ")
print('1 Minecraft Music Remix\n'
'2 Minecraft Calm Music\n'
'3 No Music')
music = input('Pick an option for music: ')
if music == '1':
pygame.mixer_music.load('MinecraftThemeSong.mp3')
pygame.mixer.music.set_volume(.1)
pygame.mixer_music.play(loops=100, start=0.0)
elif music == '2':
pygame.mixer_music.load('MinecraftThemeSong2.mp3')
pygame.mixer_music.play(loops=100, start=0.0)
elif music == '3':
pass
else:
raise ValueError
except ValueError: # Except statement for invalid user input
music = ''
while music != '1' or music != '2' or music != '3':
print('Invalid input. Please enter a valid choice')
print('1 Minecraft Music Remix\n'
'2 Minecraft Calm Music\n'
'3 No Music')
music = input('Pick an option for music: ')
if music == '1':
pygame.mixer_music.load('MinecraftThemeSong.mp3')
pygame.mixer.music.set_volume(.1)
pygame.mixer_music.play(loops=100, start=0.0)
break
elif music == '2':
pygame.mixer_music.load('MinecraftThemeSong2.mp3')
pygame.mixer_music.play(loops=100, start=0.0)
break
elif music == '3':
break
except IOError: # Except statement if a file could not be opened
print('Could not open file. File may not exist')
except AttributeError: # Except statement if a module is not found
print('No module found.')
return user, user2
# Setting up Pygame Display window
display_width = 800
display_height = 600
# Defining colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 220, 0)
LIGHTER_GREEN = (0, 255, 0)
DARK_GREEN = (0, 150, 0)
BLUE = (0, 100, 100)
LIGHTER_BLUE = (0, 128, 128)
ORANGE = (255, 150, 0)
LIGHTER_ORANGE = (255, 165, 0)
YELLOW = (235, 235, 0)
LIGHTER_YELLOW = (255, 255, 0)
# Game Initialization and Settings
name1, name2 = console_screen()
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('OTHELLO GAME')
clock = pygame.time.Clock()
click = pygame.mouse.get_pressed()
mouse = pygame.mouse.get_pos()
# Images used in the game
OthelloImage = pygame.image.load('reversi.png')
DirectionsImage = pygame.image.load('directions2.png')
Othello_background_image = pygame.image.load('background_othello_image.png')
Wood_background = pygame.image.load('wood_background.png')
# Dimensions of the board
rows = 8
columns = 8
# Circle Radius
circle_radius = int((40 / 2) - 2)
# Displaying the Othello Image
def othello_image(x, y):
""" This function adds an image of the Othello board to the pygame display.
It takes coordinates to place the image and the pygame display shows it. """
gameDisplay.blit(OthelloImage, (x, y))
# Displaying the Directions Image
def directions_image(x, y):
"""This function adds an image of the Othello instructions to the pygame display.
It takes coordinates to place the image and the pygame display shows it."""
gameDisplay.blit(DirectionsImage, (x, y))
# Displaying the Background Othello Image
def background_othello_image(x, y):
"""This function adds an image of an Othello background to the pygame display.
It takes coordinates to place the image and the pygame display shows it."""
gameDisplay.blit(Othello_background_image, (x, y))
# Displaying the Wood Background Image
def wood_background_image(x, y):
"""This function adds an image of a Wood Background to the pygame display.
It takes coordinates to place the image and the pygame display shows it."""
gameDisplay.blit(Wood_background, (x, y))
# Creating the board
def game_board():
"""This function creates a matrix of zeros to create the board."""
board = np.zeros((rows, columns))
return board
def piece_placed(x, y, player, board):
"""This function determines the piece played.
It takes the coordinates of the piece, the player number, and the board.
The pieces are zeros or ones and the function returns the piece on the board based on the number."""
if player == 0:
board[x][y] = 1
elif player == 1:
board[x][y] = 2
return board
# Reversing the order of array elements along the specified axis
def print_board(board):
"""This function reverses the order of array elements along the specified axis.
It takes the game board and prints a reversed version after a move."""
print(np.flip(board, 0))
# Assigning the board to the variable board
board = game_board()
# Function to create text objects
def text_objects(text, font, color):
"""This function creates text objects in the pygame display.
It takes a string, a font and a color for the text and it returns a variable with the details about the text. """
textSurface = font.render(text, True, color)
return textSurface, textSurface.get_rect()
# Displaying the first intro text
def message_display(text, color):
"""This function creates the first intro text.
It takes the text_objects function and color, and it displays it in the pygame display."""
largeText = pygame.font.Font('freesansbold.ttf', 35)
TextSurface, TextRectangle = text_objects(text, largeText, color)
TextRectangle.center = ((display_width / 2), (display_height / 1.2))
gameDisplay.blit(TextSurface, TextRectangle)
# pygame.display.update()
# time.sleep(2)
# game_loop()
# Displaying the second intro text
def message_display2(text, color):
"""This function creates the second intro text.
It takes the text_objects function and color, and it displays it in the pygame display."""
largeText = pygame.font.Font('freesansbold.ttf', 45)
TextSurface, TextRectangle = text_objects(text, largeText, color)
TextRectangle.center = ((display_width / 2), (display_height / 4.5))
gameDisplay.blit(TextSurface, TextRectangle)
# Message display for the scoreboard and Othello title
def message_display3(text, color):
"""This function creates the Othello text.
It takes the text_objects function and color, and it displays it in the pygame display."""
largeText = pygame.font.Font('times new roman.ttf', 45)
TextSurface, TextRectangle = text_objects(text, largeText, color)
TextRectangle.center = (280, 540)
gameDisplay.blit(TextSurface, TextRectangle)
# Displaying the Player win text
def winner_or_tie_text(text, color):
"""This function creates a text for the winner.
It takes the text_objects function and color, and it displays it in the pygame display."""
largeText = pygame.font.Font('times new roman.ttf', 70)
TextSurface, TextRectangle = text_objects(text, largeText, color)
TextRectangle.center = ((display_width / 2), (display_height / 9))
gameDisplay.blit(TextSurface, TextRectangle)
# Displaying the return text
def return_text(text, color):
"""This function creates a text to return to the main menu.
It takes the text_objects function and color, and it displays it in the pygame display."""
largeText = pygame.font.Font('freesansbold.ttf', 15)
TextSurface, TextRectangle = text_objects(text, largeText, color)
TextRectangle.center = ((display_width / 1.2), (display_height / 1.05))
gameDisplay.blit(TextSurface, TextRectangle)
# Button function
def button(message, x, y, width, height, inactive_color, active_color, action=None):
"""This function creates the buttons for the main menu. It takes a text for the button, the measurements, the color, and a boolean.
It creates the buttons in the pygame display and assigns them an action when clicked."""
color = BLACK
click = pygame.mouse.get_pressed()
mouse = pygame.mouse.get_pos()
# print(click)
if x + width > mouse[0] > x and y + height > mouse[1] > y:
pygame.draw.rect(gameDisplay, active_color, (x, y, width, height))
if click[0] == 1 and action != None:
action()
else:
pygame.draw.rect(gameDisplay, inactive_color, (x, y, width, height))
# Creating text for the buttons
smallText = pygame.font.Font('freesansbold.ttf', 20)
textSurface, textRectangle = text_objects(message, smallText, color)
textRectangle.center = ((x + (width/2)), (y+(height/2)))
gameDisplay.blit(textSurface, textRectangle)
# Intro Screen
def game_intro():
"""This function creates the intro of the game with the Othello image, name of the game, and
an action to start when the code is run."""
x = 0
y = 0
gameDisplay.fill(WHITE)
othello_image(x, y)
message_display('Press Space to Play', BLACK)
message_display2('REVERSI (OTHELLO)', BLACK)
pygame.display.update()
intro = False
while not intro:
for event in pygame.event.get():
# print(event)
if event.type == pygame.QUIT:
quit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
second_display()
intro = True
# Second Screen
def second_display():
"""This function creates the second display after the intro.
It displays a background image, the buttons of the main menu, and actions when clicked. """
x = 0
y = 0
gameDisplay.fill(WHITE)
background_othello_image(x, y)
game_exit = False
while not game_exit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
button('Player VS Player', 200, 115, 400, 70, BLUE, LIGHTER_BLUE, player_player)
button('Player VS Computer', 200, 200, 400, 70, ORANGE, LIGHTER_ORANGE, player_computer)
button('Computer VS Computer', 200, 285, 400, 70, YELLOW, LIGHTER_YELLOW, computer_computer)
button('How To Play', 200, 370, 400, 70, GREEN, LIGHTER_GREEN, how_to_play)
pygame.display.update()
clock.tick(60)
def display_board():
"""This function creates a board display. It creates eight columns with eight rows of squares for the board.
It indicates the text, size, color, and action."""
x = 0
y = 0
wood_background_image(x, y)
# gameDisplay.fill(RED)
button('', 90, 90, 413, 413, BLACK, BLACK, None)
# 1st column of boxes
button('', 100, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 100, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 100, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 100, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 100, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 100, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 100, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 100, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
# 2st column of boxes
button('', 150, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 150, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 150, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 150, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 150, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 150, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 150, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 150, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
# 3st column of boxes
button('', 200, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 200, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 200, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 200, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 200, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 200, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 200, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 200, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
# 4st column of boxes
button('', 250, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 250, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 250, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 250, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 250, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 250, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 250, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 250, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
# 5st column of boxes
button('', 300, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 300, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 300, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 300, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 300, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 300, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 300, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 300, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
# 6st column of boxes
button('', 350, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 350, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 350, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 350, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 350, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 350, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 350, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 350, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
# 7st column of boxes
button('', 400, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 400, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 400, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 400, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 400, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 400, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 400, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 400, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
# 8st column of boxes
button('', 450, 450, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 450, 400, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 450, 350, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 450, 300, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 450, 250, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 450, 200, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 450, 150, 40, 40, DARK_GREEN, DARK_GREEN, None)
button('', 450, 100, 40, 40, DARK_GREEN, DARK_GREEN, None)
return_text('Press the letter "m" for Main Menu', WHITE)
# Drawing the score board circles:
pygame.draw.circle(gameDisplay, WHITE, (530, 170), circle_radius)
pygame.draw.circle(gameDisplay, BLACK, (530, 120), circle_radius)
message_display3('OTHELLO', WHITE)
pygame.display.update()
# Player vs Player Screen
def player_player():
"""This function creates the player vs player screen.
It allows the players to place the round pieces on the board when a square is clicked.
Implements the rules of the game."""
turn = 0
display_board()
reset_array(board)
setting_up_board(board)
player_score(board)
pygame.display.update()
game_exit = False
while not game_exit:
mouse = pygame.mouse.get_pos()
draw_piece_in_display(turn)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_m:
second_display()
game_exit = True
if event.type == pygame.MOUSEBUTTONUP and (100 < mouse[0] < 490 and 100 < mouse[1] < 490):
if turn == 0:
enforce_rules(board, 1)
else:
enforce_rules(board, 2)
if player_score(board):
game_exit = True
turn += 1
turn %= 2
pygame.display.update()
# Player vs Computer Screen
def player_computer():
"""This function creates the player vs computer screen.
It allows the player and the computer to place the round pieces on the board when a square is clicked.
Implements the rules of the game."""
turn = 0
display_board()
reset_array(board)
setting_up_board(board)
game_exit = False
while not game_exit:
draw_piece_in_display(turn)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_m:
second_display()
game_exit = True
if event.type == pygame.MOUSEBUTTONUP:
enforce_rules(board, 1) # Will change array
computer_move(board, 1) # Computer makes a valid move
enforce_rules(board, 2) # Will change the array for the computer
if player_score(board):
game_exit = True
turn += 1
turn %= 2
pygame.display.update()
# Player vs Computer Screen
def computer_computer():
"""This function creates the computer vs computer screen.
It allows the computer to have two different turns to place the round pieces on the board when a square is clicked.
Implements the rules of the game."""
display_board()
reset_array(board)
setting_up_board(board)
game_exit = False
while not game_exit:
pygame.time.wait(500)
# draw_piece_in_display(turn)
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_m:
second_display()
game_exit = True
computer_move(board, 0) # Computer makes a valid move
enforce_rules(board, 1) # will change the array for the computer
computer_move(board, 1) # Computer makes a valid move
enforce_rules(board, 2) # will change the array for the computer
if player_score(board):
game_exit = True
pygame.display.update()
# How to Play Screen
def how_to_play():
"""This function creates the how to play screen. It displays the instructions image in the pygame display. """
x = 0
y = 0
gameDisplay.fill(WHITE)
directions_image(x, y)
return_text('Press the letter "m" for Main Menu', BLACK)
pygame.display.update()
game_exit = False
while not game_exit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit_game()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_m:
second_display()
game_exit = True
# New definition
def setting_up_board(board):
"""This function sets up the board given the board.
It also changes the array so that the game will be able to run properly. """
board[3][3] = 1
pygame.draw.circle(gameDisplay, BLACK, (270, 320), circle_radius)
board[3][4] = 2
pygame.draw.circle(gameDisplay, WHITE, (320, 320), circle_radius)
board[4][3] = 2
pygame.draw.circle(gameDisplay, WHITE, (270, 270), circle_radius)
board[4][4] = 1
pygame.draw.circle(gameDisplay, BLACK, (320, 270), circle_radius)
return board
def computer_move(board, move):
"""This function creates the AI of the game.
It takes the board of the game and a move for the computer.
It creates a move using random and returning booleans. """
while True:
x = random.randint(0, 7)
y = random.randint(0, 7)
if move == 0:
if board[x][y] == 0:
board[x][y] = 1
return False
elif move == 1:
if board[x][y] == 0:
board[x][y] = 2
return False
def reset_array(array):
"""This function resets the array that resembles the board on pygame to the console.
It takes an array with the same number of columns and rows of the board and it reset it after each move."""
for i, e in enumerate(array):
if isinstance(e, list):
reset_array(e)
else:
array[i] = 0
def score(text, color, posx, posy):
"""This function displays the score of each player.
Parameters include the text, color, and the x and y coordinate to display the text. """
largeText = pygame.font.Font('times new roman.ttf', 35)
TextSurface, TextRectangle = text_objects(text, largeText, color)
TextRectangle.center = ((posx), (posy))
gameDisplay.blit(TextSurface, TextRectangle)
# Function to keep track of the scores of each player
def player_score(board):
"""This function keeps track of the score of each player.
It takes the board to check how many pieces of each color are in the game board and
compares the scores to return boolean values if player x wins. """
player1_score = 0
player2_score = 0
zeros = 64
for row in range(rows):
for column in range(columns):
if board[row][column] == 1:
player1_score += 1
button('', 568, 100, 40, 40, WHITE, WHITE, action=None)
score(str(player1_score), BLACK, 590, 120)
zeros -= 1
elif board[row][column] == 2:
player2_score += 1
button('', 568, 150, 40, 40, WHITE, WHITE, action=None)
score(str(player2_score), BLACK, 590, 170)
zeros -= 1
if zeros <= 0:
if player1_score > player2_score:
player_1_win()
return True
elif player1_score < player2_score:
player_2_win()
return True
elif player1_score == player2_score:
player_tie()
return True
def player_1_win():
"""This function creates a screen if player 1 wins.
It displays a text if the boolean expression from player_score indicates a higher score for the first player. """
winner_or_tie_text(str(name1), WHITE)
def player_2_win():
"""This function creates a screen if player 2 wins.
It displays a text if the boolean expression from player_score indicates a higher score for the second player. """
winner_or_tie_text(str(name2), WHITE)
def player_tie():
"""This function creates a screen if there is a tie.
It displays a text if the boolean expression from player_score indicates a higher score for the second player. """
winner_or_tie_text("Tie!", WHITE)
def draw_piece_in_display(move):
"""This function draws the circles over the squares when clicked.
It takes the location of the click and draws a circle with specifications such as location, color, and size. """
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
# First Column
if click[0] == 1 and (100 + 40 > mouse[0] > 100 and 450 + 40 > mouse[1] > 450) and (board[0][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 470), circle_radius) # Surface, color, position x, radius
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 470), circle_radius)
piece_placed(0, 0, move, board)
elif click[0] == 1 and (100 + 40 > mouse[0] > 100 and 400 + 40 > mouse[1] > 400) and (board[1][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 420), circle_radius)
piece_placed(1, 0, move, board)
elif click[0] == 1 and (100 + 40 > mouse[0] > 100 and 350 + 40 > mouse[1] > 350) and (board[2][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 370), circle_radius)
piece_placed(2, 0, move, board)
elif click[0] == 1 and (100 + 40 > mouse[0] > 100 and 300 + 40 > mouse[1] > 300) and (board[3][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 320), circle_radius)
piece_placed(3, 0, move, board)
elif click[0] == 1 and (100 + 40 > mouse[0] > 100 and 250 + 40 > mouse[1] > 250) and (board[4][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 270), circle_radius)
piece_placed(4, 0, move, board)
elif click[0] == 1 and (100 + 40 > mouse[0] > 100 and 200 + 40 > mouse[1] > 200) and (board[5][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 220), circle_radius)
piece_placed(5, 0, move, board)
elif click[0] == 1 and (100 + 40 > mouse[0] > 100 and 150 + 40 > mouse[1] > 150) and (board[6][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 170), circle_radius)
piece_placed(6, 0, move, board)
elif click[0] == 1 and (100 + 40 > mouse[0] > 100 and 100 + 40 > mouse[1] > 100) and (board[7][0] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (120, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (120, 120), circle_radius)
piece_placed(7, 0, move, board)
# Second Column
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 450 + 40 > mouse[1] > 450) and (board[0][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 470), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 470), circle_radius)
piece_placed(0, 1, move, board)
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 400 + 40 > mouse[1] > 400) and (board[1][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 420), circle_radius)
piece_placed(1, 1, move, board)
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 350 + 40 > mouse[1] > 350) and (board[2][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 370), circle_radius)
piece_placed(2, 1, move, board)
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 300 + 40 > mouse[1] > 300) and (board[3][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 320), circle_radius)
piece_placed(3, 1, move, board)
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 250 + 40 > mouse[1] > 250) and (board[4][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 270), circle_radius)
piece_placed(4, 1, move, board)
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 200 + 40 > mouse[1] > 200) and (board[5][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 220), circle_radius)
piece_placed(5, 1, move, board)
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 150 + 40 > mouse[1] > 150) and (board[6][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 170), circle_radius)
piece_placed(6, 1, move, board)
elif click[0] == 1 and (150 + 40 > mouse[0] > 150 and 100 + 40 > mouse[1] > 100) and (board[7][1] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (170, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (170, 120), circle_radius)
piece_placed(7, 1, move, board)
# Third Column
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 450 + 40 > mouse[1] > 450) and (board[0][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 470), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 470), circle_radius)
piece_placed(0, 2, move, board)
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 400 + 40 > mouse[1] > 400) and (board[1][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 420), circle_radius)
piece_placed(1, 2, move, board)
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 350 + 40 > mouse[1] > 350) and (board[2][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 370), circle_radius)
piece_placed(2, 2, move, board)
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 300 + 40 > mouse[1] > 300) and (board[3][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 320), circle_radius)
piece_placed(3, 2, move, board)
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 250 + 40 > mouse[1] > 250) and (board[4][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 270), circle_radius)
piece_placed(4, 2, move, board)
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 200 + 40 > mouse[1] > 200) and (board[5][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 220), circle_radius)
piece_placed(5, 2, move, board)
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 150 + 40 > mouse[1] > 150) and (board[6][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 170), circle_radius)
piece_placed(6, 2, move, board)
elif click[0] == 1 and (200 + 40 > mouse[0] > 200 and 100 + 40 > mouse[1] > 100) and (board[7][2] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (220, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (220, 120), circle_radius)
piece_placed(7, 2, move, board)
# Fourth Column
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 450 + 40 > mouse[1] > 450) and (board[0][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 470), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 470), circle_radius)
piece_placed(0, 3, move, board)
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 400 + 40 > mouse[1] > 400) and (board[1][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 420), circle_radius)
piece_placed(1, 3, move, board)
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 350 + 40 > mouse[1] > 350) and (board[2][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 370), circle_radius)
piece_placed(2, 3, move, board)
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 300 + 40 > mouse[1] > 300) and (board[3][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 320), circle_radius)
piece_placed(3, 3, move, board)
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 250 + 40 > mouse[1] > 250) and (board[4][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 270), circle_radius)
piece_placed(4, 3, move, board)
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 200 + 40 > mouse[1] > 200) and (board[5][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 220), circle_radius)
piece_placed(5, 3, move, board)
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 150 + 40 > mouse[1] > 150) and (board[6][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 170), circle_radius)
piece_placed(6, 3, move, board)
elif click[0] == 1 and (250 + 40 > mouse[0] > 250 and 100 + 40 > mouse[1] > 100) and (board[7][3] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (270, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (270, 120), circle_radius)
piece_placed(7, 3, move, board)
# Fifth Column
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 450 + 40 > mouse[1] > 450) and (board[0][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 470), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 470), circle_radius)
piece_placed(0, 4, move, board)
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 400 + 40 > mouse[1] > 400) and (board[1][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 420), circle_radius)
piece_placed(1, 4, move, board)
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 350 + 40 > mouse[1] > 350) and (board[2][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 370), circle_radius)
piece_placed(2, 4, move, board)
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 300 + 40 > mouse[1] > 300) and (board[3][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 320), circle_radius)
piece_placed(3, 4, move, board)
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 250 + 40 > mouse[1] > 250) and (board[4][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 270), circle_radius)
piece_placed(4, 4, move, board)
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 200 + 40 > mouse[1] > 200) and (board[5][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 220), circle_radius)
piece_placed(5, 4, move, board)
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 150 + 40 > mouse[1] > 150) and (board[6][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 170), circle_radius)
piece_placed(6, 4, move, board)
elif click[0] == 1 and (300 + 40 > mouse[0] > 300 and 100 + 40 > mouse[1] > 100) and (board[7][4] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (320, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (320, 120), circle_radius)
piece_placed(7, 4, move, board)
# Sixth Column
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 450 + 40 > mouse[1] > 450) and (board[0][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 470), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 470), circle_radius)
piece_placed(0, 5, move, board)
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 400 + 40 > mouse[1] > 400) and (board[1][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 420), circle_radius)
piece_placed(1, 5, move, board)
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 350 + 40 > mouse[1] > 350) and (board[2][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 370), circle_radius)
piece_placed(2, 5, move, board)
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 300 + 40 > mouse[1] > 300) and (board[3][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 320), circle_radius)
piece_placed(3, 5, move, board)
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 250 + 40 > mouse[1] > 250) and (board[4][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 270), circle_radius)
piece_placed(4, 5, move, board)
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 200 + 40 > mouse[1] > 200) and (board[5][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 220), circle_radius)
piece_placed(5, 5, move, board)
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 150 + 40 > mouse[1] > 150) and (board[6][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 170), circle_radius)
piece_placed(6, 5, move, board)
elif click[0] == 1 and (350 + 40 > mouse[0] > 350 and 100 + 40 > mouse[1] > 100) and (board[7][5] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (370, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (370, 120), circle_radius)
piece_placed(7, 5, move, board)
# Seventh Column
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 450 + 40 > mouse[1] > 450) and (board[0][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 470), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 470), circle_radius)
piece_placed(0, 6, move, board)
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 400 + 40 > mouse[1] > 400) and (board[1][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 420), circle_radius)
piece_placed(1, 6, move, board)
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 350 + 40 > mouse[1] > 350) and (board[2][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 370), circle_radius)
piece_placed(2, 6, move, board)
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 300 + 40 > mouse[1] > 300) and (board[3][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 320), circle_radius)
piece_placed(3, 6, move, board)
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 250 + 40 > mouse[1] > 250) and (board[4][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 270), circle_radius)
piece_placed(4, 6, move, board)
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 200 + 40 > mouse[1] > 200) and (board[5][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 220), circle_radius)
piece_placed(5, 6, move, board)
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 150 + 40 > mouse[1] > 150) and (board[6][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 170), circle_radius)
piece_placed(6, 6, move, board)
elif click[0] == 1 and (400 + 40 > mouse[0] > 400 and 100 + 40 > mouse[1] > 100) and (board[7][6] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (420, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (420, 120), circle_radius)
piece_placed(7, 6, move, board)
# Eight Column
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 450 + 40 > mouse[1] > 450) and (board[0][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 470), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 470), circle_radius)
piece_placed(0, 7, move, board)
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 400 + 40 > mouse[1] > 400) and (board[1][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 420), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 420), circle_radius)
piece_placed(1, 7, move, board)
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 350 + 40 > mouse[1] > 350) and (board[2][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 370), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 370), circle_radius)
piece_placed(2, 7, move, board)
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 300 + 40 > mouse[1] > 300) and (board[3][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 320), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 320), circle_radius)
piece_placed(3, 7, move, board)
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 250 + 40 > mouse[1] > 250) and (board[4][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 270), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 270), circle_radius)
piece_placed(4, 7, move, board)
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 200 + 40 > mouse[1] > 200) and (board[5][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 220), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 220), circle_radius)
piece_placed(5, 7, move, board)
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 150 + 40 > mouse[1] > 150) and (board[6][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 170), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 170), circle_radius)
piece_placed(6, 7, move, board)
elif click[0] == 1 and (450 + 40 > mouse[0] > 450 and 100 + 40 > mouse[1] > 100) and (board[7][7] == 0):
if move == 0:
pygame.draw.circle(gameDisplay, BLACK, (470, 120), circle_radius)
else:
pygame.draw.circle(gameDisplay, WHITE, (470, 120), circle_radius)
piece_placed(7, 7, move, board)
pygame.display.update()
def draw_flipped_piece(board, move):
"""This function draws circles on top of other circles to change the color based on the rules of the game.
It takes the game board and the move that converts the color of the pieces.
It displays new circles of the same color if the rules of the game are met."""
if move == 1:
# First Row
if board[0][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 470), circle_radius) # Surface, color, position x, radius
if board[0][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 470), circle_radius) # Surface, color, position x, radius
if board[0][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 470), circle_radius)
if board[0][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 470), circle_radius)
if board[0][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 470), circle_radius)
if board[0][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 470), circle_radius)
if board[0][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 470), circle_radius)
if board[0][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 470), circle_radius)
# Second Row
if board[1][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 420), circle_radius) # Surface, color, position x, radius
if board[1][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 420), circle_radius) # Surface, color, position x, radius
if board[1][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 420), circle_radius)
if board[1][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 420), circle_radius)
if board[1][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 420), circle_radius)
if board[1][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 420), circle_radius)
if board[1][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 420), circle_radius)
if board[1][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 420), circle_radius)
# Third Row
if board[2][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 370), circle_radius) # Surface, color, position x, radius
if board[2][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 370), circle_radius) # Surface, color, position x, radius
if board[2][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 370), circle_radius)
if board[2][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 370), circle_radius)
if board[2][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 370), circle_radius)
if board[2][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 370), circle_radius)
if board[2][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 370), circle_radius)
if board[2][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 370), circle_radius)
# Fourth Row
if board[3][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 320), circle_radius) # Surface, color, position x, radius
if board[3][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 320), circle_radius) # Surface, color, position x, radius
if board[3][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 320), circle_radius)
if board[3][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 320), circle_radius)
if board[3][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 320), circle_radius)
if board[3][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 320), circle_radius)
if board[3][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 320), circle_radius)
if board[3][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 320), circle_radius)
# Fifth Row
if board[4][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 270), circle_radius) # Surface, color, position x, radius
if board[4][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 270), circle_radius) # Surface, color, position x, radius
if board[4][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 270), circle_radius)
if board[4][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 270), circle_radius)
if board[4][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 270), circle_radius)
if board[4][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 270), circle_radius)
if board[4][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 270), circle_radius)
if board[4][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 270), circle_radius)
# Sixth Row
if board[5][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 220), circle_radius) # Surface, color, position x, radius
if board[5][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 220), circle_radius) # Surface, color, position x, radius
if board[5][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 220), circle_radius)
if board[5][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 220), circle_radius)
if board[5][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 220), circle_radius)
if board[5][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 220), circle_radius)
if board[5][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 220), circle_radius)
if board[5][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 220), circle_radius)
# Seventh Row
if board[6][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 170), circle_radius) # Surface, color, position x, radius
if board[6][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 170), circle_radius) # Surface, color, position x, radius
if board[6][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 170), circle_radius)
if board[6][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 170), circle_radius)
if board[6][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 170), circle_radius)
if board[6][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 170), circle_radius)
if board[6][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 170), circle_radius)
if board[6][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 170), circle_radius)
# Eight Row
if board[7][0] == 1:
pygame.draw.circle(gameDisplay, BLACK, (120, 120), circle_radius) # Surface, color, position x, radius
if board[7][1] == 1:
pygame.draw.circle(gameDisplay, BLACK, (170, 120), circle_radius) # Surface, color, position x, radius
if board[7][2] == 1:
pygame.draw.circle(gameDisplay, BLACK, (220, 120), circle_radius)
if board[7][3] == 1:
pygame.draw.circle(gameDisplay, BLACK, (270, 120), circle_radius)
if board[7][4] == 1:
pygame.draw.circle(gameDisplay, BLACK, (320, 120), circle_radius)
if board[7][5] == 1:
pygame.draw.circle(gameDisplay, BLACK, (370, 120), circle_radius)
if board[7][6] == 1:
pygame.draw.circle(gameDisplay, BLACK, (420, 120), circle_radius)
if board[7][7] == 1:
pygame.draw.circle(gameDisplay, BLACK, (470, 120), circle_radius)
else:
# First Row
if board[0][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 470), circle_radius) # Surface, color, position x, radius
if board[0][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 470), circle_radius) # Surface, color, position x, radius
if board[0][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 470), circle_radius)
if board[0][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 470), circle_radius)
if board[0][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 470), circle_radius)
if board[0][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 470), circle_radius)
if board[0][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 470), circle_radius)
if board[0][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 470), circle_radius)
# Second Row
if board[1][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 420), circle_radius) # Surface, color, position x, radius
if board[1][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 420), circle_radius) # Surface, color, position x, radius
if board[1][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 420), circle_radius)
if board[1][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 420), circle_radius)
if board[1][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 420), circle_radius)
if board[1][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 420), circle_radius)
if board[1][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 420), circle_radius)
if board[1][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 420), circle_radius)
# Third Row
if board[2][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 370), circle_radius) # Surface, color, position x, radius
if board[2][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 370), circle_radius) # Surface, color, position x, radius
if board[2][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 370), circle_radius)
if board[2][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 370), circle_radius)
if board[2][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 370), circle_radius)
if board[2][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 370), circle_radius)
if board[2][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 370), circle_radius)
if board[2][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 370), circle_radius)
# Fourth Row
if board[3][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 320), circle_radius) # Surface, color, position x, radius
if board[3][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 320), circle_radius) # Surface, color, position x, radius
if board[3][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 320), circle_radius)
if board[3][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 320), circle_radius)
if board[3][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 320), circle_radius)
if board[3][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 320), circle_radius)
if board[3][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 320), circle_radius)
if board[3][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 320), circle_radius)
# Fifth Row
if board[4][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 270), circle_radius) # Surface, color, position x, radius
if board[4][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 270), circle_radius) # Surface, color, position x, radius
if board[4][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 270), circle_radius)
if board[4][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 270), circle_radius)
if board[4][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 270), circle_radius)
if board[4][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 270), circle_radius)
if board[4][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 270), circle_radius)
if board[4][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 270), circle_radius)
# Sixth Row
if board[5][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 220), circle_radius) # Surface, color, position x, radius
if board[5][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 220), circle_radius) # Surface, color, position x, radius
if board[5][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 220), circle_radius)
if board[5][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 220), circle_radius)
if board[5][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 220), circle_radius)
if board[5][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 220), circle_radius)
if board[5][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 220), circle_radius)
if board[5][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 220), circle_radius)
# Seventh Row
if board[6][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 170), circle_radius) # Surface, color, position x, radius
if board[6][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 170), circle_radius) # Surface, color, position x, radius
if board[6][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 170), circle_radius)
if board[6][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 170), circle_radius)
if board[6][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 170), circle_radius)
if board[6][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 170), circle_radius)
if board[6][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 170), circle_radius)
if board[6][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 170), circle_radius)
# Eight Row
if board[7][0] == 2:
pygame.draw.circle(gameDisplay, WHITE, (120, 120), circle_radius) # Surface, color, position x, radius
if board[7][1] == 2:
pygame.draw.circle(gameDisplay, WHITE, (170, 120), circle_radius) # Surface, color, position x, radius
if board[7][2] == 2:
pygame.draw.circle(gameDisplay, WHITE, (220, 120), circle_radius)
if board[7][3] == 2:
pygame.draw.circle(gameDisplay, WHITE, (270, 120), circle_radius)
if board[7][4] == 2:
pygame.draw.circle(gameDisplay, WHITE, (320, 120), circle_radius)
if board[7][5] == 2:
pygame.draw.circle(gameDisplay, WHITE, (370, 120), circle_radius)
if board[7][6] == 2:
pygame.draw.circle(gameDisplay, WHITE, (420, 120), circle_radius)
if board[7][7] == 2:
pygame.draw.circle(gameDisplay, WHITE, (470, 120), circle_radius)
pygame.display.update()
# This is what changes the matrix
def enforce_rules(board, move):
"""This function changes the matrix that resembles the game board.
It takes the board and the last move, which is based on numbers 0 and 1, and
updates the matrix on the console with the new moves. """
# Check for horizontal locations for pieces to be flipped
for row in range(rows):
for column in range(columns - 2):
if board[row][column] == move and board[row][column + 1] != 0 and board[row][column + 2] == move:
board[row][column] = move
board[row][column + 1] = move
board[row][column + 2] = move
draw_flipped_piece(board, move)
for row in range(rows):
for column in range(columns - 3):
if board[row][column] == move and board[row][column + 1] != 0 and board[row][column + 2] != 0 and board[row][column + 3] == move:
board[row][column] = move
board[row][column + 1] = move
board[row][column + 2] = move
board[row][column + 3] = move
draw_flipped_piece(board, move)
for row in range(rows):
for column in range(columns - 4):
if board[row][column] == move and board[row][column + 1] != 0 and board[row][column + 2] != 0 and board[row][column + 3] != 0 and board[row][column + 4] == move:
board[row][column] = move
board[row][column + 1] = move
board[row][column + 2] = move
board[row][column + 3] = move
board[row][column + 4] = move
draw_flipped_piece(board, move)
for row in range(rows):
for column in range(columns - 5):
if board[row][column] == move and board[row][column + 1] != 0 and board[row][column + 2] != 0 and board[row][column + 3] != 0 and board[row][column + 4] == move and board[row][column + 5] == move:
board[row][column] = move
board[row][column + 1] = move
board[row][column + 2] = move
board[row][column + 3] = move
board[row][column + 4] = move
board[row][column + 5] = move
draw_flipped_piece(board, move)
for row in range(rows):
for column in range(columns - 6):
if board[row][column] == move and board[row][column + 1] != 0 and board[row][column + 2] != 0 and board[row][column + 3] != 0 and board[row][column + 4] == move and board[row][column + 5] == move and board[row][column + 6] == move:
board[row][column] = move
board[row][column + 1] = move
board[row][column + 2] = move
board[row][column + 3] = move
board[row][column + 4] = move
board[row][column + 5] = move
board[row][column + 6] = move
draw_flipped_piece(board, move)
for row in range(rows):
for column in range(columns - 7):
if board[row][column] == move and board[row][column + 1] != 0 and board[row][column + 2] != 0 and board[row][column + 3] != 0 and board[row][column + 4] == move and board[row][column + 5] == move and board[row][column + 6] == move and board[row][column + 7] == move:
board[row][column] = move
board[row][column + 1] = move
board[row][column + 2] = move
board[row][column + 3] = move
board[row][column + 4] = move
board[row][column + 5] = move
board[row][column + 6] = move
board[row][column + 7] = move
draw_flipped_piece(board, move)
# Check for vertical locations for pieces to be flipped
for row in range(rows - 2):
for column in range(columns):
if board[row][column] == move and board[row + 1][column] != 0 and board[row + 2][column] == move:
board[row][column] = move
board[row + 1][column] = move
board[row + 2][column] = move
draw_flipped_piece(board, move)
for row in range(rows - 3):
for column in range(columns):
if board[row][column] == move and board[row + 1][column] != 0 and board[row + 2][column] != 0 and board[row + 3][column] == move:
board[row][column] = move
board[row + 1][column] = move
board[row + 2][column] = move
board[row + 3][column] = move
draw_flipped_piece(board, move)
for row in range(rows - 4):
for column in range(columns):
if board[row][column] == move and board[row + 1][column] != 0 and board[row + 2][column] != 0 and board[row + 3][column] != 0 and board[row + 4][column] == move:
board[row][column] = move
board[row + 1][column] = move
board[row + 2][column] = move
board[row + 3][column] = move
board[row + 4][column] = move
draw_flipped_piece(board, move)
for row in range(rows - 5):
for column in range(columns):
if board[row][column] == move and board[row + 1][column] != 0 and board[row + 2][column] != 0 and board[row + 3][column] != 0 and board[row + 4][column] == move and board[row + 5][column] == move:
board[row][column] = move
board[row + 1][column] = move
board[row + 2][column] = move
board[row + 3][column] = move
board[row + 4][column] = move
board[row + 5][column] = move
draw_flipped_piece(board, move)
for row in range(rows - 6):
for column in range(columns):
if board[row][column] == move and board[row + 1][column] != 0 and board[row + 2][column] != 0 and board[row + 3][column] != 0 and board[row + 4][column] == move and board[row + 5][column] == move and board[row + 6][column] == move:
board[row][column] = move
board[row + 1][column] = move
board[row + 2][column] = move
board[row + 3][column] = move
board[row + 4][column] = move
board[row + 5][column] = move
board[row + 6][column] = move
draw_flipped_piece(board, move)
for row in range(rows - 7):
for column in range(columns):
if board[row][column] == move and board[row + 1][column] != 0 and board[row + 2][column] != 0 and board[row + 3][column] != 0 and board[row + 4][column] == move and board[row + 5][column] == move and board[row + 6][column] != 0 and board[row + 7][column] == move:
board[row][column] = move
board[row + 1][column] = move
board[row + 2][column] = move
board[row + 3][column] = move
board[row + 4][column] = move
board[row + 5][column] = move
board[row + 6][column] = move
board[row + 7][column] = move
draw_flipped_piece(board, move)
# Check for positive diagonal locations for pieces to be flipped
for row in range(rows - 2):
for column in range(columns - 2):
if board[row][column] == move and board[row + 1][column + 1] != 0 and board[row + 2][column + 2] == move:
board[row][column] = move
board[row + 1][column + 1] = move
board[row + 2][column + 2] = move
draw_flipped_piece(board, move)
for row in range(rows - 3):
for column in range(columns - 3):
if board[row][column] == move and board[row + 1][column + 1] != 0 and board[row + 2][column + 2] != 0 and board[row + 3][column + 3] == move:
board[row][column] = move
board[row + 1][column + 1] = move
board[row + 2][column + 2] = move
board[row + 3][column + 3] = move
draw_flipped_piece(board, move)
for row in range(rows - 4):
for column in range(columns - 4):
if board[row][column] == move and board[row + 1][column + 1] != 0 and board[row + 2][column + 2] != 0 and board[row + 3][column + 3] != 0 and board[row + 4][column + 4] == move:
board[row][column] = move
board[row + 1][column + 1] = move
board[row + 2][column + 2] = move
board[row + 3][column + 3] = move
board[row + 4][column + 4] = move
draw_flipped_piece(board, move)
for row in range(rows - 5):
for column in range(columns - 5):
if board[row][column] == move and board[row + 1][column + 1] != 0 and board[row + 2][column + 2] != 0 and board[row + 3][column + 3] != 0 and board[row + 4][column + 4] != 0 and board[row + 5][column + 5] == move:
board[row][column] = move
board[row + 1][column + 1] = move
board[row + 2][column + 2] = move
board[row + 3][column + 3] = move
board[row + 4][column + 4] = move
board[row + 5][column + 5] = move
draw_flipped_piece(board, move)
for row in range(rows - 6):
for column in range(columns - 6):
if board[row][column] == move and board[row + 1][column + 1] != 0 and board[row + 2][column + 2] != 0 and board[row + 3][column + 3] != 0 and board[row + 4][column + 4] != 0 and board[row + 5][column + 5] != 0 and board[row + 6][column + 6] == move:
board[row][column] = move
board[row + 1][column + 1] = move
board[row + 2][column + 2] = move
board[row + 3][column + 3] = move
board[row + 4][column + 4] = move
board[row + 5][column + 5] = move
board[row + 6][column + 6] = move
draw_flipped_piece(board, move)
for row in range(rows - 7):
for column in range(columns - 7):
if board[row][column] == move and board[row + 1][column + 1] != 0 and board[row + 2][column + 2] != 0 and board[row + 3][column + 3] != 0 and board[row + 4][column + 4] != 0 and board[row + 5][column + 5] != 0and board[row + 6][column + 6] != 0 and board[row + 7][column + 7] == move:
board[row][column] = move
board[row + 1][column + 1] = move
board[row + 2][column + 2] = move
board[row + 3][column + 3] = move
board[row + 4][column + 4] = move
board[row + 5][column + 5] = move
board[row + 6][column + 6] = move
board[row + 7][column + 7] = move
draw_flipped_piece(board, move)
# Check for negatively diagonal locations for pieces to be flipped
for row in range(2, rows):
for column in range(columns - 2):
if board[row][column] == move and board[row - 1][column + 1] != 0 and board[row - 2][column + 2] == move:
board[row][column] = move
board[row - 1][column + 1] = move
board[row - 2][column + 2] = move
draw_flipped_piece(board, move)
for row in range(3, rows):
for column in range(columns - 3):
if board[row][column] == move and board[row - 1][column + 1] != 0 and board[row - 2][column + 2] != 0 and board[row - 3][column + 3] == move:
board[row][column] = move
board[row - 1][column + 1] = move
board[row - 2][column + 2] = move
board[row - 3][column + 3] = move
draw_flipped_piece(board, move)
for row in range(4, rows):
for column in range(columns - 4):
if board[row][column] == move and board[row - 1][column + 1] != 0 and board[row - 2][column + 2] != 0 and board[row - 3][column + 3] != 0 and board[row - 4][column + 4] == move:
board[row][column] = move
board[row - 1][column + 1] = move
board[row - 2][column + 2] = move
board[row - 3][column + 3] = move
board[row - 4][column + 4] = move
draw_flipped_piece(board, move)
for row in range(5, rows):
for column in range(columns - 5):
if board[row][column] == move and board[row - 1][column + 1] != 0 and board[row - 2][column + 2] != 0 and board[row - 3][column + 3] != 0 and board[row - 4][column + 4] != 0 and board[row - 5][column + 5] == move:
board[row][column] = move
board[row - 1][column + 1] = move
board[row - 2][column + 2] = move
board[row - 3][column + 3] = move
board[row - 4][column + 4] = move
board[row - 5][column + 5] = move
draw_flipped_piece(board, move)
for row in range(6, rows):
for column in range(columns - 6):
if board[row][column] == move and board[row - 1][column + 1] != 0 and board[row - 2][column + 2] != 0 and board[row - 3][column + 3] != 0 and board[row - 4][column + 4] != 0 and board[row - 5][column + 5] != 0 and board[row - 6][column + 6] == move:
board[row][column] = move
board[row - 1][column + 1] = move
board[row - 2][column + 2] = move
board[row - 3][column + 3] = move
board[row - 4][column + 4] = move
board[row - 5][column + 5] = move
board[row - 6][column + 6] = move
draw_flipped_piece(board, move)
for row in range(7, rows):
for column in range(columns - 7):
if board[row][column] == move and board[row - 1][column + 1] != 0 and board[row - 2][column + 2] != 0 and board[row - 3][column + 3] != 0 and board[row - 4][column + 4] != 0 and board[row - 5][column + 5] != 0 and board[row - 6][column + 6] != 0 and board[row - 7][column + 7] == move:
board[row][column] = move
board[row - 1][column + 1] = move
board[row - 2][column + 2] = move
board[row - 3][column + 3] = move
board[row - 4][column + 4] = move
board[row - 5][column + 5] = move
board[row - 6][column + 6] = move
board[row - 7][column + 7] = move
draw_flipped_piece(board, move)
# Ending the game function
def quit_game():
"""This function quits pygame."""
pygame.quit()
quit()
# Calling the game intro to begin the game
game_intro()
| 49.256508
| 298
| 0.568285
| 10,553
| 77,579
| 4.108595
| 0.039704
| 0.04908
| 0.096683
| 0.163153
| 0.844665
| 0.827921
| 0.79577
| 0.793049
| 0.760275
| 0.447138
| 0
| 0.093133
| 0.297593
| 77,579
| 1,574
| 299
| 49.287802
| 0.702543
| 0.110068
| 0
| 0.58548
| 0
| 0.000781
| 0.014309
| 0.001761
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026542
| false
| 0.000781
| 0.002342
| 0
| 0.03669
| 0.006245
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
51c9ef92baa48076ee825944c62e0184d4538d3d
| 10,451
|
py
|
Python
|
fhir/resources/tests/test_specimendefinition.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/tests/test_specimendefinition.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/tests/test_specimendefinition.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/SpecimenDefinition
Release: R4
Version: 4.0.1
Build ID: 9346c8cc45
Last updated: 2019-11-01T09:29:23.356+11:00
"""
from pydantic.validators import bytes_validator # noqa: F401
from .. import fhirtypes # noqa: F401
from .. import specimendefinition
def impl_specimendefinition_1(inst):
assert inst.id == "2364"
assert inst.meta.tag[0].code == "HTEST"
assert inst.meta.tag[0].display == "test health data"
assert (
inst.meta.tag[0].system == "http://terminology.hl7.org/CodeSystem/v3-ActReason"
)
assert inst.patientPreparation[0].text == "12 hour fasting"
assert inst.patientPreparation[1].coding[0].code == "263678003"
assert inst.patientPreparation[1].coding[0].display == "At rest"
assert inst.patientPreparation[1].coding[0].system == "http://snomed.info/sct"
assert inst.text.status == "generated"
assert inst.timeAspect == "preferrably morning time"
assert inst.typeCollected.coding[0].code == "122555007"
assert inst.typeCollected.coding[0].display == "Venous blood specimen"
assert inst.typeCollected.coding[0].system == "http://snomed.info/sct"
assert inst.typeTested[0].container.cap.coding[0].code == "yellow"
assert inst.typeTested[0].container.cap.coding[0].display == "yellow cap"
assert (
inst.typeTested[0].container.cap.coding[0].system == "urn:iso:std:iso:6710:2017"
)
assert inst.typeTested[0].container.material.coding[0].code == "61088005"
assert inst.typeTested[0].container.material.coding[0].display == "plastic"
assert (
inst.typeTested[0].container.material.coding[0].system
== "http://snomed.info/sct"
)
assert inst.typeTested[0].container.minimumVolumeQuantity.code == "mL"
assert (
inst.typeTested[0].container.minimumVolumeQuantity.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].container.minimumVolumeQuantity.unit == "ml"
assert float(inst.typeTested[0].container.minimumVolumeQuantity.value) == float(2)
assert inst.typeTested[0].container.type.coding[0].code == "702281005"
assert inst.typeTested[0].container.type.coding[0].display == (
"Evacuated blood collection tube, thrombin/clot activator/gel" " separator"
)
assert (
inst.typeTested[0].container.type.coding[0].system == "http://snomed.info/sct"
)
assert inst.typeTested[0].handling[0].maxDuration.code == "min"
assert (
inst.typeTested[0].handling[0].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[0].maxDuration.unit == "minute"
assert float(inst.typeTested[0].handling[0].maxDuration.value) == float(60)
assert (
inst.typeTested[0].handling[0].temperatureQualifier.text
== "Ambient temperature"
)
assert inst.typeTested[0].handling[0].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[0].handling[0].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[0].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[0].handling[0].temperatureRange.high.value) == float(
25
)
assert inst.typeTested[0].handling[0].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[0].handling[0].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[0].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[0].handling[0].temperatureRange.low.value) == float(15)
assert inst.typeTested[0].handling[1].maxDuration.code == "h"
assert (
inst.typeTested[0].handling[1].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[1].maxDuration.unit == "hour"
assert float(inst.typeTested[0].handling[1].maxDuration.value) == float(8)
assert (
inst.typeTested[0].handling[1].temperatureQualifier.text
== "Refrigerated temperature"
)
assert inst.typeTested[0].handling[1].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[0].handling[1].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[1].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[0].handling[1].temperatureRange.high.value) == float(8)
assert inst.typeTested[0].handling[1].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[0].handling[1].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[0].handling[1].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[0].handling[1].temperatureRange.low.value) == float(2)
assert inst.typeTested[0].preference == "preferred"
assert inst.typeTested[0].type.coding[0].code == "119364003"
assert inst.typeTested[0].type.coding[0].display == "Serum specimen"
assert inst.typeTested[0].type.coding[0].system == "http://snomed.info/sct"
assert inst.typeTested[1].container.cap.coding[0].code == "green"
assert inst.typeTested[1].container.cap.coding[0].display == "green cap"
assert (
inst.typeTested[1].container.cap.coding[0].system == "urn:iso:std:iso:6710:2017"
)
assert inst.typeTested[1].container.material.coding[0].code == "32039001"
assert inst.typeTested[1].container.material.coding[0].display == "glass"
assert (
inst.typeTested[1].container.material.coding[0].system
== "http://snomed.info/sct"
)
assert inst.typeTested[1].container.minimumVolumeQuantity.code == "mL"
assert (
inst.typeTested[1].container.minimumVolumeQuantity.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].container.minimumVolumeQuantity.unit == "ml"
assert float(inst.typeTested[1].container.minimumVolumeQuantity.value) == float(2)
assert inst.typeTested[1].container.type.coding[0].code == "767390000"
assert inst.typeTested[1].container.type.coding[0].display == (
"Evacuated blood collection tube with heparin lithium and gel" " separator"
)
assert (
inst.typeTested[1].container.type.coding[0].system == "http://snomed.info/sct"
)
assert inst.typeTested[1].handling[0].maxDuration.code == "min"
assert (
inst.typeTested[1].handling[0].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[0].maxDuration.unit == "minute"
assert float(inst.typeTested[1].handling[0].maxDuration.value) == float(60)
assert (
inst.typeTested[1].handling[0].temperatureQualifier.text
== "Ambient temperature"
)
assert inst.typeTested[1].handling[0].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[1].handling[0].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[0].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[1].handling[0].temperatureRange.high.value) == float(
25
)
assert inst.typeTested[1].handling[0].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[1].handling[0].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[0].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[1].handling[0].temperatureRange.low.value) == float(15)
assert inst.typeTested[1].handling[1].maxDuration.code == "h"
assert (
inst.typeTested[1].handling[1].maxDuration.system == "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[1].maxDuration.unit == "hour"
assert float(inst.typeTested[1].handling[1].maxDuration.value) == float(8)
assert (
inst.typeTested[1].handling[1].temperatureQualifier.text
== "Refrigerated temperature"
)
assert inst.typeTested[1].handling[1].temperatureRange.high.code == "Cel"
assert (
inst.typeTested[1].handling[1].temperatureRange.high.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[1].temperatureRange.high.unit == "°C"
assert float(inst.typeTested[1].handling[1].temperatureRange.high.value) == float(8)
assert inst.typeTested[1].handling[1].temperatureRange.low.code == "Cel"
assert (
inst.typeTested[1].handling[1].temperatureRange.low.system
== "http://unitsofmeasure.org"
)
assert inst.typeTested[1].handling[1].temperatureRange.low.unit == "°C"
assert float(inst.typeTested[1].handling[1].temperatureRange.low.value) == float(2)
assert inst.typeTested[1].preference == "alternate"
assert inst.typeTested[1].rejectionCriterion[0].coding[0].code == "insufficient"
assert (
inst.typeTested[1].rejectionCriterion[0].coding[0].display
== "insufficient specimen volume"
)
assert (
inst.typeTested[1].rejectionCriterion[0].coding[0].system
== "http://terminology.hl7.org/CodeSystem/rejection-criteria"
)
assert inst.typeTested[1].rejectionCriterion[1].coding[0].code == "hemolized"
assert (
inst.typeTested[1].rejectionCriterion[1].coding[0].display
== "hemolized specimen"
)
assert (
inst.typeTested[1].rejectionCriterion[1].coding[0].system
== "http://terminology.hl7.org/CodeSystem/rejection-criteria"
)
assert inst.typeTested[1].type.coding[0].code == "119361006"
assert inst.typeTested[1].type.coding[0].display == "Plasma specimen"
assert inst.typeTested[1].type.coding[0].system == "http://snomed.info/sct"
def test_specimendefinition_1(base_settings):
"""No. 1 tests collection for SpecimenDefinition.
Test File: specimendefinition-example-serum-plasma.json
"""
filename = (
base_settings["unittest_data_dir"]
/ "specimendefinition-example-serum-plasma.json"
)
inst = specimendefinition.SpecimenDefinition.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "SpecimenDefinition" == inst.resource_type
impl_specimendefinition_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "SpecimenDefinition" == data["resourceType"]
inst2 = specimendefinition.SpecimenDefinition(**data)
impl_specimendefinition_1(inst2)
| 45.43913
| 88
| 0.685102
| 1,235
| 10,451
| 5.791093
| 0.138462
| 0.180089
| 0.218121
| 0.123322
| 0.824804
| 0.78146
| 0.766779
| 0.725811
| 0.56264
| 0.361298
| 0
| 0.040707
| 0.160846
| 10,451
| 229
| 89
| 45.637555
| 0.773888
| 0.035595
| 0
| 0.23445
| 0
| 0
| 0.148896
| 0.00935
| 0
| 0
| 0
| 0
| 0.511962
| 1
| 0.009569
| false
| 0
| 0.014354
| 0
| 0.023923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
51cb8b25a68504ed83b49eae0373b0aa4644b17f
| 10,265
|
py
|
Python
|
data/reanalysis_data/download-scripts/download_uwind_sigma995.py
|
Skye777/transformer
|
177834bcb55e59f8ea0fbe666734c148effbec8d
|
[
"Apache-2.0"
] | null | null | null |
data/reanalysis_data/download-scripts/download_uwind_sigma995.py
|
Skye777/transformer
|
177834bcb55e59f8ea0fbe666734c148effbec8d
|
[
"Apache-2.0"
] | null | null | null |
data/reanalysis_data/download-scripts/download_uwind_sigma995.py
|
Skye777/transformer
|
177834bcb55e59f8ea0fbe666734c148effbec8d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#################################################################
# Python Script to retrieve 164 online Data files of 'ds131.2',
# total 4.49G. This script uses 'requests' to download data.
#
# Highlight this script by Select All, Copy and Paste it into a file;
# make the file executable and run it on command line.
#
# You need pass in your password as a parameter to execute
# this script; or you can set an environment variable RDAPSWD
# if your Operating System supports it.
#
# Contact rpconroy@ucar.edu (Riley Conroy) for further assistance.
#################################################################
import sys, os
import requests
def check_file_status(filepath, filesize):
sys.stdout.write('\r')
sys.stdout.flush()
size = int(os.stat(filepath).st_size)
percent_complete = (size / filesize) * 100
sys.stdout.write('%.3f %s' % (percent_complete, '% Completed'))
sys.stdout.flush()
# Try to get password
if len(sys.argv) < 2 and not 'RDAPSWD' in os.environ:
try:
import getpass
input = getpass.getpass
except:
try:
input = raw_input
except:
pass
pswd = input('Password: ')
else:
try:
pswd = sys.argv[1]
except:
pswd = os.environ['RDAPSWD']
url = 'https://rda.ucar.edu/cgi-bin/login'
values = {'email': '1811017@tongji.edu.cn', 'passwd': pswd, 'action': 'login'}
# Authenticate
ret = requests.post(url, data=values)
if ret.status_code != 200:
print('Bad Authentication')
print(ret.text)
exit(1)
dspath = 'https://rda.ucar.edu/data/ds131.2/'
filelist = [
'pgrbanl/pgrbanl_mean_1851_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1852_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1853_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1854_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1855_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1856_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1857_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1858_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1859_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1860_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1861_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1862_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1863_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1864_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1865_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1866_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1867_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1868_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1869_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1870_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1871_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1872_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1873_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1874_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1875_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1876_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1877_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1878_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1879_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1880_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1881_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1882_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1883_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1884_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1885_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1886_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1887_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1888_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1889_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1890_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1891_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1892_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1893_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1894_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1895_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1896_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1897_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1898_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1899_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1900_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1901_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1902_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1903_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1904_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1905_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1906_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1907_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1908_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1909_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1910_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1911_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1912_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1913_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1914_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1915_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1916_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1917_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1918_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1919_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1920_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1921_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1922_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1923_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1924_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1925_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1926_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1927_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1928_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1929_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1930_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1931_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1932_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1933_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1934_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1935_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1936_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1937_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1938_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1939_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1940_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1941_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1942_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1943_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1944_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1945_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1946_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1947_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1948_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1949_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1950_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1951_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1952_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1953_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1954_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1955_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1956_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1957_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1958_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1959_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1960_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1961_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1962_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1963_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1964_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1965_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1966_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1967_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1968_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1969_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1970_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1971_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1972_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1973_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1974_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1975_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1976_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1977_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1978_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1979_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1980_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1981_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1982_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1983_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1984_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1985_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1986_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1987_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1988_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1989_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1990_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1991_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1992_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1993_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1994_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1995_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1996_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1997_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1998_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_1999_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2000_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2001_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2002_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2003_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2004_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2005_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2006_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2007_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2008_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2009_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2010_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2011_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2012_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2013_UGRD_sigma.grib',
'pgrbanl/pgrbanl_mean_2014_UGRD_sigma.grib']
for file in filelist:
filename = dspath + file
file_base = '../meta-data/uwind/' + os.path.basename(file)
print('Downloading', file_base)
req = requests.get(filename, cookies=ret.cookies, allow_redirects=True, stream=True)
filesize = int(req.headers['Content-length'])
with open(file_base, 'wb') as outfile:
chunk_size = 1048576
for chunk in req.iter_content(chunk_size=chunk_size):
outfile.write(chunk)
if chunk_size < filesize:
check_file_status(file_base, filesize)
check_file_status(file_base, filesize)
print()
| 43.495763
| 88
| 0.758305
| 1,440
| 10,265
| 4.934722
| 0.221528
| 0.323107
| 0.415424
| 0.458767
| 0.72094
| 0.72094
| 0.009851
| 0
| 0
| 0
| 0
| 0.07724
| 0.124696
| 10,265
| 235
| 89
| 43.680851
| 0.713634
| 0.05017
| 0
| 0.047393
| 0
| 0
| 0.722777
| 0.702165
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004739
| false
| 0.023697
| 0.014218
| 0
| 0.018957
| 0.018957
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cfb3214c3f6a84da7ae3a26cd3ea7f301f7b740a
| 2,220
|
py
|
Python
|
src/pysonata/sonata/tests/circuit/test_file.py
|
AllenInstitute/project7
|
901350cdf5c57a56b7efece2a309c72bdf5f2608
|
[
"BSD-3-Clause"
] | 35
|
2018-04-10T13:54:27.000Z
|
2022-03-12T09:22:31.000Z
|
src/pysonata/sonata/tests/circuit/test_file.py
|
AllenInstitute/project7
|
901350cdf5c57a56b7efece2a309c72bdf5f2608
|
[
"BSD-3-Clause"
] | 92
|
2018-03-19T10:14:18.000Z
|
2022-01-29T15:21:47.000Z
|
src/pysonata/sonata/tests/circuit/test_file.py
|
AllenInstitute/project7
|
901350cdf5c57a56b7efece2a309c72bdf5f2608
|
[
"BSD-3-Clause"
] | 25
|
2018-03-16T23:14:42.000Z
|
2022-02-09T19:37:05.000Z
|
import pytest
import tempfile
from conftest import load_circuit_files
def test_load_files():
# load nodes file
net = load_circuit_files(data_files='examples/v1_nodes.h5', data_type_files='examples/v1_node_types.csv')
assert(net.nodes is not None)
assert(net.has_nodes)
assert(net.edges is None)
assert(not net.has_edges)
# load edges file
net = load_circuit_files(data_files='examples/v1_v1_edges.h5', data_type_files='examples/v1_v1_edge_types.csv')
assert(net.nodes is None)
assert(not net.has_nodes)
assert(net.edges is not None)
assert(net.has_edges)
# load nodes and edges
net = load_circuit_files(data_files=['examples/v1_nodes.h5', 'examples/v1_v1_edges.h5'],
data_type_files=['examples/v1_node_types.csv', 'examples/v1_v1_edge_types.csv'])
assert(net.nodes is not None)
assert(net.has_nodes)
assert(net.edges is not None)
assert(net.has_edges)
def test_version():
net = load_circuit_files(data_files=['examples/v1_nodes.h5', 'examples/v1_v1_edges.h5'],
data_type_files=['examples/v1_node_types.csv', 'examples/v1_v1_edge_types.csv'])
assert(net.version == '0.1')
def test_bad_magic():
import h5py
tmp_file, tmp_file_name = tempfile.mkstemp(suffix='.hdf5')
# no magic
with h5py.File(tmp_file_name, 'r+') as h5:
h5.create_group('nodes')
with pytest.raises(Exception):
load_circuit_files(data_files=tmp_file_name, data_type_files='examples/v1_node_types.csv')
# bad magic
with h5py.File(tmp_file_name, 'r+') as h5:
h5.attrs['magic'] = 0x0A7B
with pytest.raises(Exception):
load_circuit_files(data_files=tmp_file_name, data_type_files='examples/v1_node_types.csv')
def test_no_files():
with pytest.raises(Exception):
load_circuit_files(data_files=[], data_type_files=[])
def test_no_node_types():
with pytest.raises(Exception):
load_circuit_files(data_files='examples/v1_nodes.h5', data_type_files=[])
def test_mixed_files():
with pytest.raises(Exception):
load_circuit_files(data_files='examples/v1_nodes.h5', data_type_files='examples/v1_v1_edge_types.csv')
| 32.647059
| 115
| 0.709009
| 336
| 2,220
| 4.357143
| 0.151786
| 0.11612
| 0.133197
| 0.122951
| 0.816257
| 0.797814
| 0.775956
| 0.775956
| 0.775956
| 0.748634
| 0
| 0.024563
| 0.174775
| 2,220
| 67
| 116
| 33.134328
| 0.774563
| 0.031982
| 0
| 0.477273
| 0
| 0
| 0.20392
| 0.14699
| 0
| 0
| 0.0028
| 0
| 0.295455
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cff62439bd9f68cd8b9fbda08f8270d240786404
| 219
|
py
|
Python
|
hello/views.py
|
CupOfJoe-L/python-docs-hello-django
|
cc33b187e1b34731f036aa3bbd1d48270e3e7b5d
|
[
"MIT"
] | null | null | null |
hello/views.py
|
CupOfJoe-L/python-docs-hello-django
|
cc33b187e1b34731f036aa3bbd1d48270e3e7b5d
|
[
"MIT"
] | null | null | null |
hello/views.py
|
CupOfJoe-L/python-docs-hello-django
|
cc33b187e1b34731f036aa3bbd1d48270e3e7b5d
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
def hello(request):
return HttpResponse("Hello, World! And everyone out there! This is the Django Version of the App I'm trying to Deploy.")
| 36.5
| 124
| 0.780822
| 34
| 219
| 5.029412
| 0.794118
| 0.116959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155251
| 219
| 5
| 125
| 43.8
| 0.924324
| 0
| 0
| 0
| 0
| 0.25
| 0.442922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
5c540f4414022745fd23cc4a4a838f528acc04c4
| 4,413
|
py
|
Python
|
Datasets/paddy_millet_data.py
|
kabbas570/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using
|
87a8d231625adcdce6168a25ee146bc74f22ba5d
|
[
"CC-BY-4.0"
] | null | null | null |
Datasets/paddy_millet_data.py
|
kabbas570/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using
|
87a8d231625adcdce6168a25ee146bc74f22ba5d
|
[
"CC-BY-4.0"
] | null | null | null |
Datasets/paddy_millet_data.py
|
kabbas570/CED-Net-Crops-and-Weeds-Segmentation-for-Smart-Farming-Using
|
87a8d231625adcdce6168a25ee146bc74f22ba5d
|
[
"CC-BY-4.0"
] | 1
|
2021-03-29T01:49:49.000Z
|
2021-03-29T01:49:49.000Z
|
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
mask_id = []
for infile in sorted(glob.glob('/home/user01/data_ssd/Abbas/PAPER/Datasets/Paddy_Millet/train/masks/*.png')): # path to masks of train data
mask_id.append(infile)
image_id = []
for infile in sorted(glob.glob('/home/user01/data_ssd/Abbas/PAPER/Datasets/Paddy_Millet/train/images/*.png')): # path to images of train data
image_id.append(infile)
height=896
width=896
kernel = np.ones((5,5),np.uint8)
def gene():
Mask=[]
Mask1=[]
for i in range(len(mask_id)):
target=np.zeros([896,896,2])
image = cv2.imread(mask_id[i],0)
target[:,:,0][np.where(image==0)]=1
target[:,:,1][np.where(image==255)]=1
target = cv2.morphologyEx(target, cv2.MORPH_OPEN, kernel)
target1 = cv2.resize(target, (height//2,width//2), interpolation = cv2.INTER_AREA)
target1[np.where(target1>=0.25)]=1
target1 = cv2.morphologyEx(target1, cv2.MORPH_OPEN, kernel)
Mask.append(target)
Mask1.append(target1)
Mask = np.array(Mask)
Mask1 = np.array(Mask1)
return Mask1,Mask
def img():
Mask=[]
Mask1=[]
for i in range(len(mask_id)):
image = cv2.imread(image_id[i])
image=image/255
image1 = cv2.resize(image, (height//2,width//2), interpolation = cv2.INTER_AREA)
Mask.append(image)
Mask1.append(image1)
Mask = np.array(Mask)
Mask1 = np.array(Mask1)
return Mask1,Mask
mask_idV = []
for infile in sorted(glob.glob('/home/user01/data_ssd/Abbas/PAPER/Datasets/Paddy_Millet/valid/masks/*.png')): # path to masks of validation data
mask_idV.append(infile)
image_idV = []
for infile in sorted(glob.glob('/home/user01/data_ssd/Abbas/PAPER/Datasets/Paddy_Millet/valid/images/*.png')): # path to images of validation data
image_idV.append(infile)
def geneV():
Mask=[]
Mask1=[]
for i in range(len(image_idV)):
target=np.zeros([896,896,2])
image = cv2.imread(mask_idV[i],0)
target[:,:,0][np.where(image==0)]=1
target[:,:,1][np.where(image==255)]=1
target = cv2.morphologyEx(target, cv2.MORPH_OPEN, kernel)
target1 = cv2.resize(target, (height//2,width//2), interpolation = cv2.INTER_AREA)
target1[np.where(target1>=0.25)]=1
target1 = cv2.morphologyEx(target1, cv2.MORPH_OPEN, kernel)
Mask.append(target)
Mask1.append(target1)
Mask = np.array(Mask)
Mask1 = np.array(Mask1)
return Mask1,Mask
def imgV():
Mask=[]
Mask1=[]
for i in range(len(image_idV)):
image = cv2.imread(image_idV[i])
image=image/255
image1 = cv2.resize(image, (height//2,width//2), interpolation = cv2.INTER_AREA)
Mask.append(image)
Mask1.append(image1)
Mask = np.array(Mask)
Mask1 = np.array(Mask1)
return Mask1,Mask
import glob
mask_idT = []
for infile in sorted(glob.glob('/home/user01/data_ssd/Abbas/PAPER/Datasets/Paddy_Millet/test/masks/*.png')): # path to masks of test data
mask_idT.append(infile)
image_idT = []
for infile in sorted(glob.glob('/home/user01/data_ssd/Abbas/PAPER/Datasets/Paddy_Millet/test/images/*.png')): # path to images of test data
image_idT.append(infile)
def geneT():
Mask=[]
Mask1=[]
for i in range(len(mask_idT)):
target=np.zeros([896,896,2])
image = cv2.imread(mask_idT[i],0)
target[:,:,0][np.where(image==0)]=1
target[:,:,1][np.where(image==255)]=1
target = cv2.morphologyEx(target, cv2.MORPH_OPEN, kernel)
target1 = cv2.resize(target, (height//2,width//2), interpolation = cv2.INTER_AREA)
target1[np.where(target1>=0.25)]=1
target1 = cv2.morphologyEx(target1, cv2.MORPH_OPEN, kernel)
Mask.append(target)
Mask1.append(target1)
Mask = np.array(Mask)
Mask1 = np.array(Mask1)
return Mask1,Mask
def imgT():
Mask=[]
Mask1=[]
for i in range(len(mask_idT)):
image = cv2.imread(image_idT[i])
image=image/255
image1 = cv2.resize(image, (height//2,width//2), interpolation = cv2.INTER_AREA)
Mask.append(image)
Mask1.append(image1)
Mask = np.array(Mask)
Mask1 = np.array(Mask1)
return Mask1,Mask
| 32.211679
| 148
| 0.62044
| 623
| 4,413
| 4.317817
| 0.12199
| 0.040149
| 0.024535
| 0.037918
| 0.843866
| 0.843866
| 0.794796
| 0.794796
| 0.794796
| 0.727881
| 0
| 0.055032
| 0.230002
| 4,413
| 136
| 149
| 32.448529
| 0.73661
| 0.040335
| 0
| 0.681416
| 0
| 0
| 0.103881
| 0.103881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053097
| false
| 0
| 0.044248
| 0
| 0.150442
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c8788d8a7dbb2f817010ae5c52e2e1b70fe733a
| 145
|
py
|
Python
|
src/sage/sat/solvers/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 5
|
2015-01-04T07:15:06.000Z
|
2022-03-04T15:15:18.000Z
|
src/sage/sat/solvers/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | null | null | null |
src/sage/sat/solvers/__init__.py
|
switzel/sage
|
7eb8510dacf61b691664cd8f1d2e75e5d473e5a0
|
[
"BSL-1.0"
] | 10
|
2016-09-28T13:12:40.000Z
|
2022-02-12T09:28:34.000Z
|
from satsolver import SatSolver
from dimacs import Glucose, RSat
try:
from cryptominisat import CryptoMiniSat
except ImportError:
pass
| 16.111111
| 43
| 0.793103
| 17
| 145
| 6.764706
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186207
| 145
| 8
| 44
| 18.125
| 0.974576
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
7a251a70e104374e1417ba0f70ba4314b680e323
| 11,457
|
py
|
Python
|
tests/unit/loop/test_splitfuncs.py
|
benkrikler/alphatwirl
|
cda7d12fec21291ea33af23234fc08be19430934
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/loop/test_splitfuncs.py
|
benkrikler/alphatwirl
|
cda7d12fec21291ea33af23234fc08be19430934
|
[
"BSD-3-Clause"
] | 7
|
2018-02-26T10:32:26.000Z
|
2018-03-19T12:27:12.000Z
|
tests/unit/loop/test_splitfuncs.py
|
benkrikler/alphatwirl
|
cda7d12fec21291ea33af23234fc08be19430934
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import unittest
from alphatwirl.loop.splitfuncs import *
from alphatwirl.loop.splitfuncs import _apply_max_events_total
from alphatwirl.loop.splitfuncs import _file_start_length_list
from alphatwirl.loop.splitfuncs import _start_length_pairs_for_split_lists
from alphatwirl.loop.splitfuncs import _minimum_positive_value
##__________________________________________________________________||
class TestSplitfuncs(unittest.TestCase):
def test_create_file_start_length_list(self):
# simple
file_nevents_list = [('A', 100), ('B', 100)]
max_events_per_run = 30
max_events_total = 140
max_files_per_run = 2
expected = [
(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A', 'B'], 90, 30),
(['B'], 20, 20)
]
self.assertEqual(expected, create_file_start_length_list(file_nevents_list, max_events_per_run, max_events_total, max_files_per_run))
def test_apply_max_events_total(self):
# simple
file_nevents_list = [('A', 100), ('B', 100)]
max_events_total = 120
expected = [('A', 100), ('B', 20)]
self.assertEqual(expected, _apply_max_events_total(file_nevents_list, max_events_total))
# exact
file_nevents_list = [('A', 100), ('B', 200)]
max_events_total = 300
expected = [('A', 100), ('B', 200)]
self.assertEqual(expected, _apply_max_events_total(file_nevents_list, max_events_total))
# default
file_nevents_list = [('A', 100), ('B', 200)]
expected = [('A', 100), ('B', 200)]
self.assertEqual(expected, _apply_max_events_total(file_nevents_list))
# zero
file_nevents_list = [('A', 100), ('B', 200)]
max_events_total = 0
expected = [ ]
self.assertEqual(expected, _apply_max_events_total(file_nevents_list, max_events_total))
# empty
file_nevents_list = [ ]
max_events_total = 10
expected = [ ]
self.assertEqual(expected, _apply_max_events_total(file_nevents_list, max_events_total))
def test_file_start_length_list_empty_01(self):
args = ([ ], 20, 2) # empty file list
expected = [ ]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 0)], 20, 2) # no events
expected = [ ]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 0), ('B', 0), ('C', 0)], 20, 2) # no events
expected = [ ]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 0), ('B', 10), ('C', 0)], 20, 2) # no events in some files
expected = [(['B'], 0, 10)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 0), ('B', 20), ('C', 0)], 20, 2) # the last file has no events
# the 2nd last has max_events_per_run
expected = [(['B'], 0, 20)] # shouldn't be [(['B'], 0, 20), ([ ], 0, 0)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20), ('B', 0), ('C', 0)], 20, 2)
expected = [(['A'], 0, 20)]
self.assertEqual(expected, _file_start_length_list(*args))
def test_file_start_length_list_onefile_01(self):
args = ([('A', 20)], 30, 2)
expected = [(['A'], 0, 20)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20)], 20, 2)
expected = [(['A'], 0, 20)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20)], 10, 2)
expected = [(['A'], 0, 10), (['A'], 10, 10)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20)], 7, 2)
expected = [(['A'], 0, 7), (['A'], 7, 7), (['A'], 14, 6)]
self.assertEqual(expected, _file_start_length_list(*args))
def test_file_start_length_twofiles_01(self):
args = ([('A', 20), ('B', 20)], 20, 2) # exact
expected = [(['A'], 0, 20), (['B'], 0, 20)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20), ('B', 25)], 20, 2) # exact first file
expected = [(['A'], 0, 20), (['B'], 0, 20), (['B'], 20, 5)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 40), ('B', 25)], 20, 2) # twice the exact first file
expected = [(['A'], 0, 20), (['A'], 20, 20), (['B'], 0, 20), (['B'], 20, 5)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 60), ('B', 25)], 20, 2) # three times the exact first file
expected = [(['A'], 0, 20), (['A'], 20, 20), (['A'], 40, 20), (['B'], 0, 20), (['B'], 20, 5)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 20)], 110, 2) # short first file
expected = [(['A', 'B'], 0, 110), (['B'], 10, 10)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20), ('B', 25)], 30, 2) # short first file
expected = [(['A', 'B'], 0, 30), (['B'], 10, 15)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20), ('B', 100)], 30, 2) # short first file
expected = [(['A', 'B'], 0, 30), (['B'], 10, 30), (['B'], 40, 30), (['B'], 70, 30)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 30)], 30, 2) # long first file
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A', 'B'], 90, 30), (['B'], 20, 10)]
self.assertEqual(expected, _file_start_length_list(*args))
def test_file_start_length_twofiles_02_maxfile1(self):
args = ([('A', 20), ('B', 20)], 20, 1) # exact
expected = [(['A'], 0, 20), (['B'], 0, 20)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20), ('B', 25)], 20, 1) # exact fist file
expected = [(['A'], 0, 20), (['B'], 0, 20), (['B'], 20, 5)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 20)], 110, 1) # short first file
expected = [(['A'], 0, 100), (['B'], 0, 20)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20), ('B', 25)], 30, 1) # short first file
expected = [(['A'], 0, 20), (['B'], 0, 25)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 20), ('B', 100)], 30, 1) # short first file
expected = [(['A'], 0, 20), (['B'], 0, 30), (['B'], 30, 30), (['B'], 60, 30), (['B'], 90, 10)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 30)], 30, 1) # long first file
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A'], 90, 10), (['B'], 0, 30)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 90), ('B', 30)], 30, 1) # long first file
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['B'], 0, 30)]
self.assertEqual(expected, _file_start_length_list(*args))
def test_file_start_length_list_03(self):
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 30)], 30, 10)
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A', 'B', 'C', 'D'], 90, 30), (['D'], 8, 22)]
self.assertEqual(expected, _file_start_length_list(*args))
def test_file_start_length_list_04(self):
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 30, 10)
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A', 'B', 'C', 'D'], 90, 30), (['D'], 8, 30), (['D'], 38, 30), (['D'], 68, 30), (['D'], 98, 2)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 30, 3)
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A', 'B', 'C'], 90, 22), (['D'], 0, 30), (['D'], 30, 30), (['D'], 60, 30), (['D'], 90, 10)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('C', 7), ('D', 100)], 30, 2)
expected = [(['C', 'D'], 0, 30), (['D'], 23, 30), (['D'], 53, 30), (['D'], 83, 17)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 30, 2)
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A', 'B'], 90, 15), (['C', 'D'], 0, 30), (['D'], 23, 30), (['D'], 53, 30), (['D'], 83, 17)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 30, 1)
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A'], 90, 10), (['B'], 0, 5), (['C'], 0, 7), (['D'], 0, 30), (['D'], 30, 30), (['D'], 60, 30), (['D'], 90, 10)]
self.assertEqual(expected, _file_start_length_list(*args))
def test_file_start_length_list_05(self):
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 30, -1) # max_files_per_run = -1
expected = [(['A'], 0, 30), (['A'], 30, 30), (['A'], 60, 30), (['A', 'B', 'C', 'D'], 90, 30), (['D'], 8, 30), (['D'], 38, 30), (['D'], 68, 30), (['D'], 98, 2)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], -1, 2) # max_events_per_run = -1
expected = [(['A', 'B'], 0, 105), (['C', 'D'], 0, 107)]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], -1, -1) # both are -1
expected = [(['A', 'B', 'C', 'D'], 0, 212)]
self.assertEqual(expected, _file_start_length_list(*args))
def test_file_start_length_list_06(self):
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 30, 0) # max_files_per_run = 0
expected = [ ]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 0, 2) # max_events_per_run = 0
expected = [ ]
self.assertEqual(expected, _file_start_length_list(*args))
args = ([('A', 100), ('B', 5), ('C', 7), ('D', 100)], 0, 0) # both are 0
expected = [ ]
self.assertEqual(expected, _file_start_length_list(*args))
def test_start_length_pairs_for_split_lists(self):
self.assertEqual([(0, 10), (10, 10), (20, 10), (30, 10)], _start_length_pairs_for_split_lists(40, 10))
self.assertEqual([(0, 10), (10, 10), (20, 10), (30, 10), (40, 1)], _start_length_pairs_for_split_lists(41, 10))
self.assertEqual([(0, 40)], _start_length_pairs_for_split_lists(40, 40))
self.assertEqual([(0, 40)], _start_length_pairs_for_split_lists(40, 50))
self.assertEqual([(0, 40)], _start_length_pairs_for_split_lists(40, -1))
def test_minimum_positive_value(self):
# empty
self.assertEqual(-1, _minimum_positive_value([]))
# all negative
self.assertEqual(-1, _minimum_positive_value([-1, -2, - 3]))
# all positive
self.assertEqual(10, _minimum_positive_value([10, 20, 30]))
# zero or positive
self.assertEqual(0, _minimum_positive_value([10, 20, 0, 30]))
# general
self.assertEqual(10, _minimum_positive_value([10, 20, 30, -2, -3]))
##__________________________________________________________________||
| 46.763265
| 183
| 0.528498
| 1,568
| 11,457
| 3.544005
| 0.072704
| 0.108872
| 0.129566
| 0.157279
| 0.860176
| 0.801152
| 0.759403
| 0.735469
| 0.72989
| 0.711355
| 0
| 0.101654
| 0.234966
| 11,457
| 244
| 184
| 46.954918
| 0.532345
| 0.067121
| 0
| 0.384615
| 0
| 0
| 0.023673
| 0
| 0
| 0
| 0
| 0
| 0.313609
| 1
| 0.071006
| false
| 0
| 0.04142
| 0
| 0.118343
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a2beccb20b101469add30621b17d27226afd39b
| 110
|
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/pg-7.4-local-variable.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/pg-7.4-local-variable.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-007/pg-7.4-local-variable.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
def myfnc(x):
print("inside myfnc", x)
x = 10
print("inside myfnc", x)
x = 20
myfnc(x)
print(x)
| 11
| 28
| 0.554545
| 19
| 110
| 3.210526
| 0.368421
| 0.393443
| 0.360656
| 0.557377
| 0.590164
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.272727
| 110
| 9
| 29
| 12.222222
| 0.7125
| 0
| 0
| 0.285714
| 0
| 0
| 0.218182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
7a31c8b4ec0476f6ea703da5244286c922d12e7a
| 1,648
|
py
|
Python
|
tests/unit/test_engine_detection.py
|
blinkhealth/vault-anyconfig
|
5f23cbcdf8d6bccb2e1a79d62b8757d5262fc4b4
|
[
"Apache-2.0"
] | 6
|
2019-04-10T06:02:07.000Z
|
2021-09-18T19:13:09.000Z
|
tests/unit/test_engine_detection.py
|
tomtom-international/vault-anyconf
|
5292319c483a56108d6a1d6888520c964ae185b2
|
[
"Apache-2.0"
] | 11
|
2019-04-01T08:08:56.000Z
|
2021-01-08T20:34:58.000Z
|
tests/unit/test_engine_detection.py
|
LaudateCorpus1/vault-anyconfig
|
5292319c483a56108d6a1d6888520c964ae185b2
|
[
"Apache-2.0"
] | 5
|
2019-03-29T14:59:08.000Z
|
2021-09-14T04:04:07.000Z
|
import pytest
from hypothesis import given, example
import hypothesis.strategies as strat
from vault_anyconfig.vault_anyconfig import VaultAnyConfig
@given(
contents=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
secret_key=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
)
@example(contents="aoeu", secret_key="data")
@example(contents="aoeu", secret_key="metadata")
def test_detect_kv_v1(contents, secret_key, gen_vault_response_kv1, gen_vault_response_kv2):
"""
Tests that kv1 is detected correctly
"""
read_response_v1 = gen_vault_response_kv1(contents, secret_key)
read_response_v2 = gen_vault_response_kv2(contents, secret_key)
assert VaultAnyConfig._VaultAnyConfig__is_key_value_v1(read_response_v1, secret_key)
assert not VaultAnyConfig._VaultAnyConfig__is_key_value_v1(read_response_v2, secret_key)
@given(
contents=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
secret_key=strat.text(min_size=1, alphabet=strat.characters(blacklist_categories=("C"))),
)
@example(contents="aoeu", secret_key="data")
@example(contents="aoeu", secret_key="metadata")
def test_detect_kv_v2(contents, secret_key, gen_vault_response_kv1, gen_vault_response_kv2):
"""
Tests that kv2 is detected correctly
"""
read_response_v1 = gen_vault_response_kv1(contents, secret_key)
read_response_v2 = gen_vault_response_kv2(contents, secret_key)
assert VaultAnyConfig._VaultAnyConfig__is_key_value_v2(read_response_v2)
assert not VaultAnyConfig._VaultAnyConfig__is_key_value_v2(read_response_v1)
| 41.2
| 93
| 0.796723
| 225
| 1,648
| 5.417778
| 0.204444
| 0.103363
| 0.105004
| 0.052502
| 0.864643
| 0.864643
| 0.864643
| 0.849877
| 0.741591
| 0.741591
| 0
| 0.018919
| 0.101942
| 1,648
| 39
| 94
| 42.25641
| 0.80473
| 0.044296
| 0
| 0.538462
| 0
| 0
| 0.028497
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a880650ca92d9038cfd1525741d5fec7515eb91
| 389
|
py
|
Python
|
idangr_core.py
|
IMULMUL/IDAngr
|
dea91475893178b3a665b7be57eb4140ea002f84
|
[
"BSD-2-Clause"
] | 237
|
2018-02-07T11:18:09.000Z
|
2022-02-18T13:28:29.000Z
|
idangr_core.py
|
IMULMUL/IDAngr
|
dea91475893178b3a665b7be57eb4140ea002f84
|
[
"BSD-2-Clause"
] | 5
|
2018-02-15T14:48:10.000Z
|
2019-10-01T18:18:41.000Z
|
idangr_core.py
|
IMULMUL/IDAngr
|
dea91475893178b3a665b7be57eb4140ea002f84
|
[
"BSD-2-Clause"
] | 34
|
2018-02-14T12:28:09.000Z
|
2022-02-18T13:28:47.000Z
|
######################################################
# Author: Andrea Fioraldi <andreafioraldi@gmail.com> #
# License: BSD 2-Clause #
######################################################
import idangr
print
print "################### IDAngr ###################"
print " usage: idangr.init(is_remote, host, port)"
print " import angrdbg"
print
| 25.933333
| 54
| 0.377892
| 27
| 389
| 5.407407
| 0.740741
| 0.150685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003165
| 0.187661
| 389
| 14
| 55
| 27.785714
| 0.458861
| 0.187661
| 0
| 0.333333
| 0
| 0
| 0.654971
| 0.128655
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.833333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
8f830bdf8ea6b9fb08df352f98d824cf4af0553d
| 9,837
|
py
|
Python
|
tests/test_pep585_integration.py
|
loyada/typed-py
|
8f946ed0cddb38bf7fd463a4c8111a592ccae31a
|
[
"MIT"
] | 14
|
2018-02-14T13:28:47.000Z
|
2022-02-12T08:03:21.000Z
|
tests/test_pep585_integration.py
|
loyada/typed-py
|
8f946ed0cddb38bf7fd463a4c8111a592ccae31a
|
[
"MIT"
] | 142
|
2017-11-22T14:02:33.000Z
|
2022-03-23T21:26:29.000Z
|
tests/test_pep585_integration.py
|
loyada/typed-py
|
8f946ed0cddb38bf7fd463a4c8111a592ccae31a
|
[
"MIT"
] | 4
|
2017-12-14T16:46:45.000Z
|
2021-12-15T16:33:31.000Z
|
import typing
import sys
from typing import List
from pytest import raises, mark
from typedpy import (
Array,
Deserializer,
Integer,
SerializableField,
Serializer,
String,
Structure,
)
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_string_var_1():
class Foo(Structure):
a: list[String]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=["abc", "def"])
assert foo.a[0] == "abc"
serialized = {"a": ["abc", "def"], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
assert Serializer(deserialized).serialize() == serialized
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_string_var_2():
class Foo(Structure):
a: list[set[String]]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=[{"abc", "def"}])
assert "abc" in foo.a[0]
serialized = {"a": [{"abc", "def"}], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
for s in deserialized.a[0]:
assert s in result["a"][0]
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_string_var_3():
class Foo(Structure):
a: List[String]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=["abc", "def"])
assert foo.a[0] == "abc"
serialized = {"a": ["abc", "def"], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
assert Serializer(deserialized).serialize() == serialized
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_string_var_4():
class Foo(Structure):
a: list[typing.Set[String]]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=[{"abc", "def"}])
assert "abc" in foo.a[0]
serialized = {"a": [{"abc", "def"}], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
for s in deserialized.a[0]:
assert s in result["a"][0]
assert result["i"] == 5
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_string_var_5():
class Foo(Structure):
a: typing.List[typing.Set[String]]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=[{"abc", "def"}])
assert "abc" in foo.a[0]
serialized = {"a": [{"abc", "def"}], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
for s in deserialized.a[0]:
assert s in result["a"][0]
assert result["i"] == 5
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_string_var_6():
class Foo(Structure):
a: typing.List[typing.Set[String]]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=[{"abc", "def"}])
assert "abc" in foo.a[0]
serialized = {"a": [{"abc", "def"}], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
for s in deserialized.a[0]:
assert s in result["a"][0]
assert result["i"] == 5
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_array_of_str():
class Foo(Structure):
a: Array[str]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=["abc", "def"])
assert "abc" in foo.a[0]
serialized = {"a": ["abc", "def"], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
assert result == serialized
with raises(ValueError) as excinfo:
deserializer.deserialize({"a": ["abc", 123], "i": 5})
assert "a_1: Got 123; Expected a string" in str(excinfo.value)
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_array_of_dict_1():
class Foo(Structure):
a: Array[dict[str, Integer]]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=[{"abc": 1}, {"abc": 2}])
assert foo.a[0] == {"abc": 1}
serialized = {"a": [{"abc": 1}, {"abc": 2}], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
assert result == serialized
with raises(ValueError) as excinfo:
deserializer.deserialize({"a": [{"abc": "xxx"}], "i": 5})
assert "a_0: a_1_value: Expected <class 'int'>; Got 'xxx'" in str(excinfo.value)
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_array_of_dict_2():
class Foo(Structure):
a: Array[dict[String, int]]
i: int
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=[{"abc": 1}, {"abc": 2}])
assert foo.a[0] == {"abc": 1}
serialized = {"a": [{"abc": 1}, {"abc": 2}], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
assert result == serialized
with raises(ValueError) as excinfo:
deserializer.deserialize({"a": [{"abc": "xxx"}], "i": 5})
assert "a_0: a_1_value: Expected <class 'int'>; Got 'xxx'" in str(excinfo.value)
with raises(ValueError) as excinfo:
deserializer.deserialize({"a": [{1: 123}], "i": 5})
assert "a_0: a_1_key: Got 1; Expected a string" in str(excinfo.value)
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_array_of_dict_3():
class Foo(Structure):
a: Array[dict[String(minLength=5), int]]
i: int
with raises(ValueError) as excinfo:
Foo(i=5, a=[{"abc": 1}, {"abc": 2}])
assert "a_0_key: Got 'abc'; Expected a minimum length of 5" in str(excinfo.value)
deserializer = Deserializer(Foo)
foo = Foo(i=5, a=[{"abcde": 1}, {"abc123": 2}])
assert foo.a[0] == {"abcde": 1}
serialized = {"a": [{"abcde": 1}, {"abc123": 2}], "i": 5}
deserialized = deserializer.deserialize(serialized)
assert deserialized == foo
result = Serializer(deserialized).serialize()
assert result == serialized
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_string_var_2_invalid():
class Foo(Structure):
a: list[set[String]]
i: int
deserializer = Deserializer(Foo)
with raises(TypeError) as excinfo:
Foo(i=5, a=[{"abc", "def"}, 123])
assert "a_1: Got 123; Expected <class 'set'>" in str(excinfo.value)
serialized = {"a": [{"abc", "def"}, 123], "i": 5}
with raises(ValueError) as excinfo:
deserializer.deserialize(serialized)
assert "a_1: Got 123; Expected a list, set, or tuple" in str(excinfo.value)
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_optional_simple():
class Foo(Structure):
a: typing.Optional[set[String]]
i: int
foo = Foo(i=5)
foo.a = {"abc"}
assert foo.a == {"abc"}
assert Foo(i=5, a={"abc"}) == foo
assert Deserializer(Foo).deserialize({"i": 5}) == Foo(i=5)
assert Deserializer(Foo).deserialize({"i": 5, "a": ["abc"]}) == foo
assert Serializer(foo).serialize() == {"i": 5, "a": ["abc"]}
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_default_factory_invalid_default():
with raises(TypeError) as excinfo:
class Foo(Structure):
a: list[str] = lambda: [1, 2, 3]
i: int
assert (
"a: Invalid default value: [1, 2, 3]; Reason: value_0: Got 1; Expected a string"
in str(excinfo.value)
)
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_default_factory_valid():
class Foo(Structure):
a: list[str] = lambda: ["abc", "def"]
i: int
foo1 = Foo(i=1)
foo2 = Foo(i=2)
foo1.a.append("xxx")
assert foo1.a == ["abc", "def", "xxx"]
assert foo2.a == ["abc", "def"]
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_dict_to_map():
class TestSerializable(SerializableField):
def serialize(self, value):
return value.rstrip()
def deserialize(self, value):
return value + " "
class Foo(Structure):
a: dict[str, TestSerializable]
i: int
foo = Deserializer(Foo).deserialize({"i": 5, "a": {"abc": "xxx"}})
assert foo.a["abc"] == "xxx" + " "
assert Serializer(foo).serialize() == {"i": 5, "a": {"abc": "xxx"}}
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_dict_to_map_invalid():
class Foo(Structure):
a: dict[str, Array[String]]
i: int
with raises(ValueError) as excinfo:
Deserializer(Foo).deserialize({"i": 5, "a": {"abc": ["xxx", "yyy", 2]}})
assert "a_2: Expected a string" in str(excinfo.value)
@mark.skipif(sys.version_info < (3, 9), reason="requires python3.9 or higher")
def test_list_of_class_reference():
class Bar(Structure):
a: int
b: int
class Foo(Structure):
a: list[Bar]
i: int
with raises(ValueError) as excinfo:
Deserializer(Foo).deserialize({"i": 5, "a": [{"a": "x", "b": 5}]})
assert "a_0: a: Expected <class 'int'>; Got 'x'" in str(excinfo.value)
assert Deserializer(Foo).deserialize({"i": 5, "a": [{"a": 1, "b": 5}]}) == Foo(
a=[Bar(a=1, b=5)], i=5
)
| 31.228571
| 88
| 0.610959
| 1,331
| 9,837
| 4.436514
| 0.069121
| 0.014225
| 0.010161
| 0.057578
| 0.865368
| 0.824725
| 0.799661
| 0.739543
| 0.706012
| 0.702625
| 0
| 0.028508
| 0.219071
| 9,837
| 314
| 89
| 31.328025
| 0.740172
| 0
| 0
| 0.625514
| 0
| 0.004115
| 0.129104
| 0
| 0.004115
| 0
| 0
| 0
| 0.218107
| 1
| 0.078189
| false
| 0
| 0.020576
| 0.00823
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8fb98d32cab778ab9fe8d0731121a6f7138d8c46
| 12,013
|
py
|
Python
|
tests/test_decorators.py
|
Dominik1123/click-inspect
|
d8c6616fa556785fce9bd4ef3d5bb5fb54219956
|
[
"MIT"
] | null | null | null |
tests/test_decorators.py
|
Dominik1123/click-inspect
|
d8c6616fa556785fce9bd4ef3d5bb5fb54219956
|
[
"MIT"
] | null | null | null |
tests/test_decorators.py
|
Dominik1123/click-inspect
|
d8c6616fa556785fce9bd4ef3d5bb5fb54219956
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import sys
from typing import List, Sequence, Tuple, Union
import click
import pytest
from click_inspect.decorators import add_options_from, _parse_type_hint_into_kwargs
def test_add_options_from(base_function):
@click.command()
@add_options_from(base_function)
def test(): pass
assert len(test.params) == 4
assert test.params[0].name == 'b'
assert test.params[0].opts == ['--b']
assert test.params[0].secondary_opts == []
assert test.params[0].type is click.INT
assert test.params[0].default == 1
assert test.params[0].required is False
assert test.params[0].help == 'This one should be added.'
assert test.params[1].name == 'c'
assert test.params[1].opts == ['--c']
assert test.params[1].secondary_opts == []
assert test.params[1].type is click.INT
assert test.params[1].required is True
assert test.params[1].help == 'This one should be added too.'
assert test.params[2].name == 'd'
assert test.params[2].opts == ['--d']
assert test.params[2].secondary_opts == []
assert test.params[2].type is click.STRING
assert test.params[2].default == 'test'
assert test.params[2].required is False
assert test.params[2].help == 'And so should this one.'
assert test.params[3].name == 'e'
assert test.params[3].opts == ['--e']
assert test.params[3].secondary_opts == ['--no-e']
assert test.params[3].type is click.BOOL
assert test.params[3].is_flag is True
assert test.params[3].default is True
assert test.params[3].required is False
assert test.params[3].help == 'Boolean flag.'
def test_add_options_from_infer_types_from_docstring(base_function):
base_function.__annotations__ = {}
test_add_options_from(base_function)
def test_add_options_from_include(base_function):
@click.command()
@add_options_from(base_function, include={'a', 'b'})
def test(): pass
assert len(test.params) == 2
assert test.params[0].name == 'a'
assert test.params[0].opts == ['--a']
assert test.params[0].secondary_opts == []
assert test.params[0].type is click.STRING
assert test.params[0].required is True
assert test.params[0].help == 'This parameter should be skipped.'
assert test.params[1].name == 'b'
assert test.params[1].opts == ['--b']
assert test.params[1].secondary_opts == []
assert test.params[1].type is click.INT
assert test.params[1].default == 1
assert test.params[1].required is False
assert test.params[1].help == 'This one should be added.'
def test_add_options_from_include_via_names_and_custom():
def func(a: int, b: int, c: int): pass
@click.command()
@add_options_from(func, names={'a': ['-a']}, custom={'c': {'default': 1}})
def test(): pass
assert len(test.params) == 2
assert test.params[0].name == 'a'
assert test.params[1].name == 'c'
def test_add_options_from_exclude(base_function):
@click.command()
@add_options_from(base_function, exclude={'b', 'c'})
def test(): pass
assert len(test.params) == 2
assert test.params[0].name == 'd'
assert test.params[0].opts == ['--d']
assert test.params[0].secondary_opts == []
assert test.params[0].type is click.STRING
assert test.params[0].default == 'test'
assert test.params[0].required is False
assert test.params[0].help == 'And so should this one.'
assert test.params[1].name == 'e'
assert test.params[1].opts == ['--e']
assert test.params[1].secondary_opts == ['--no-e']
assert test.params[1].type is click.BOOL
assert test.params[1].is_flag is True
assert test.params[1].default is True
assert test.params[1].required is False
assert test.params[1].help == 'Boolean flag.'
def test_add_options_from_names(base_function):
@click.command()
@add_options_from(base_function, names={'b': ['-b'], 'd': ['-test', '--d']})
def test(): pass
assert len(test.params) == 4
assert test.params[0].name == 'b'
assert test.params[0].opts == ['-b']
assert test.params[0].secondary_opts == []
assert test.params[1].name == 'c'
assert test.params[1].opts == ['--c']
assert test.params[1].secondary_opts == []
assert test.params[2].name == 'd'
assert test.params[2].opts == ['-test', '--d']
assert test.params[2].secondary_opts == []
assert test.params[3].name == 'e'
assert test.params[3].opts == ['--e']
assert test.params[3].secondary_opts == ['--no-e']
def test_add_options_from_custom(base_function):
@click.command()
@add_options_from(base_function, custom={'d': dict(default='custom_default')})
def test(): pass
assert len(test.params) == 4
assert test.params[2].default == 'custom_default'
def test_add_options_from_single_switch_boolean_flag(base_function):
@click.command()
@add_options_from(base_function, names={'e': ['--e']})
def test(): pass
assert len(test.params) == 4
assert test.params[3].name == 'e'
assert test.params[3].opts == ['--e']
assert test.params[3].secondary_opts == []
assert test.params[3].type is click.BOOL
assert test.params[3].is_flag is True
assert test.params[3].default is True
assert test.params[3].required is False
assert test.params[3].help == 'Boolean flag.'
def test_add_options_from_warn_if_no_type():
def func(*, a):
"""Test func.
Args:
a: Test parameter
"""
with pytest.warns(UserWarning) as warninfo:
@add_options_from(func)
def test(): pass
assert len(warninfo) == 1
assert str(warninfo[0].message.args[0]) == "No type hint for parameter 'a'"
def test_add_options_from_no_warn_if_no_type_but_default():
def func(*, a = 1):
"""Test func.
Args:
a: Test parameter
"""
@click.command()
@add_options_from(func)
def test(): pass
assert len(test.params) == 1
assert test.params[0].type is click.INT
assert test.params[0].default == 1
def test_add_options_from_unsupported_docstring_style():
def func(*, a: int):
"""Test func.
Params:
a: This is the only parameter.
"""
@click.command()
@add_options_from(func)
def test(): pass
assert len(test.params) == 1
assert test.params[0].help is None
def test_add_options_from_readme_example_func(readme_example_function):
@click.command()
@add_options_from(readme_example_function)
def test(): pass
assert len(test.params) == 3
assert test.params[0].name == 'size'
assert test.params[0].opts == ['--size']
assert test.params[0].type is click.INT
assert test.params[0].required is True
assert test.params[0].help == 'Size of the grid in both dimensions.'
assert test.params[1].name == 'symbol'
assert test.params[1].opts == ['--symbol']
assert test.params[1].type is click.STRING
assert test.params[1].default == 'x'
assert test.params[1].required is False
assert test.params[1].help == 'Symbol for displaying data points.'
assert test.params[2].name == 'empty'
assert test.params[2].opts == ['--empty']
assert test.params[2].type is click.STRING
assert test.params[2].default == ' '
assert test.params[2].required is False
assert test.params[2].help == 'Symbol for displaying empty space.'
def test_add_options_from_list_type_hint(list_type_hint_function):
@click.command()
@add_options_from(list_type_hint_function)
def test(): pass
assert len(test.params) == 1
assert test.params[0].name == 'x'
assert test.params[0].opts == ['--x']
assert test.params[0].type is click.INT
assert test.params[0].required is True
assert test.params[0].multiple is True
assert test.params[0].help == '...'
def test_add_options_from_list_type_hint_via_docstring(list_type_hint_function):
list_type_hint_function.__annotations__ = {}
test_add_options_from_list_type_hint(list_type_hint_function)
def test_add_options_from_tuple_type_hint(tuple_type_hint_function):
@click.command()
@add_options_from(tuple_type_hint_function)
def test(): pass
assert len(test.params) == 1
assert test.params[0].name == 'x'
assert test.params[0].opts == ['--x']
assert type(test.params[0].type) is click.Tuple
assert test.params[0].type.types == [click.INT, click.STRING]
assert test.params[0].nargs == 2
assert test.params[0].required is True
assert test.params[0].help == '...'
def test_add_options_from_tuple_type_hint_via_docstring(tuple_type_hint_function):
tuple_type_hint_function.__annotations__ = {}
test_add_options_from_tuple_type_hint(tuple_type_hint_function)
def test_add_options_from_union_type_hint(union_type_hint_function):
@click.command()
@add_options_from(union_type_hint_function)
def test(): pass
assert len(test.params) == 1
assert test.params[0].name == 'x'
assert test.params[0].opts == ['--x']
assert test.params[0].type is click.INT
assert test.params[0].required is True
assert test.params[0].help == '...'
def test_add_options_from_union_type_hint_via_docstring(union_type_hint_function):
union_type_hint_function.__annotations__ = {}
test_add_options_from_union_type_hint(union_type_hint_function)
def test_add_options_from_nested_union_and_sequence():
def func(*, x: Union[List[int], str]): pass
@click.command()
@add_options_from(func)
def test(): pass
assert len(test.params) == 1
assert test.params[0].type is click.INT
def test_add_options_from_no_type_warning_for_excluded_parameters():
def func(*, x: int): # Use some valid type hint here to prevent further warnings.
"""
Args:
x (UnknownType): If 'x' gets excluded, no warning should be issued.
"""
with pytest.warns(UserWarning) as warninfo:
@add_options_from(func)
def test(): pass
assert len(warninfo) == 1
@click.command()
@add_options_from(func, exclude={'x'})
def test(): pass
assert len(test.params) == 0
@pytest.mark.skipif(sys.version_info >= (3, 9),
reason='Starting with Python 3.9 get_type_hints works without raising TypeError.')
def test_add_options_from_warn_on_standard_collections_as_typing_generics():
def func(*, a: list[str]): pass
with pytest.warns(UserWarning) as warninfo:
@add_options_from(func)
def test(): pass
assert len(warninfo) == 2 # Warns another time because no type hint is available.
def test_parse_type_hint_into_kwargs_bool():
assert _parse_type_hint_into_kwargs(bool) == dict(is_flag=True, type=bool)
@pytest.mark.parametrize('tp', [Sequence, List])
def test_parse_type_hint_into_kwargs_list(tp):
assert _parse_type_hint_into_kwargs(tp[int]) == dict(multiple=True, type=int)
assert _parse_type_hint_into_kwargs(tp[str]) == dict(multiple=True, type=str)
assert _parse_type_hint_into_kwargs(tp[bool]) == dict(multiple=True, type=bool)
def test_parse_type_hint_into_kwargs_tuple():
assert _parse_type_hint_into_kwargs(Tuple[int, str]) == dict(type=(int, str))
assert _parse_type_hint_into_kwargs(Tuple[int, int, int]) == dict(type=(int, int, int))
def test_parse_type_hint_into_kwargs_union():
assert _parse_type_hint_into_kwargs(Union[int, str]) == dict(type=int)
assert _parse_type_hint_into_kwargs(Union[str, int]) == dict(type=str)
def test_parse_type_hint_into_kwargs_list_with_union():
assert _parse_type_hint_into_kwargs(List[Union[int, str]]) == dict(multiple=True, type=int)
assert _parse_type_hint_into_kwargs(List[Union[str, float]]) == dict(multiple=True, type=str)
def test_parse_type_hint_into_kwargs_union_with_list():
assert _parse_type_hint_into_kwargs(Union[List[int], List[str]]) == dict(multiple=True, type=int)
assert _parse_type_hint_into_kwargs(Union[List[str], List[int]]) == dict(multiple=True, type=str)
| 31.949468
| 101
| 0.682427
| 1,784
| 12,013
| 4.366031
| 0.084641
| 0.170754
| 0.240339
| 0.109128
| 0.829246
| 0.751701
| 0.693542
| 0.640647
| 0.571575
| 0.523816
| 0
| 0.014986
| 0.177891
| 12,013
| 375
| 102
| 32.034667
| 0.773694
| 0.027137
| 0
| 0.511811
| 0
| 0
| 0.052663
| 0
| 0
| 0
| 0
| 0
| 0.586614
| 1
| 0.204724
| false
| 0.082677
| 0.023622
| 0
| 0.228346
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8fdf6def1199362a429b295f53c3e9fb5cf09573
| 168
|
py
|
Python
|
app/admin.py
|
VickramMS/arccp
|
40d1664b8b491cd743d88f9b512d014b74b6e921
|
[
"bzip2-1.0.6"
] | null | null | null |
app/admin.py
|
VickramMS/arccp
|
40d1664b8b491cd743d88f9b512d014b74b6e921
|
[
"bzip2-1.0.6"
] | 5
|
2020-06-05T20:17:36.000Z
|
2021-06-04T22:13:23.000Z
|
app/admin.py
|
VickramMS/arccp
|
40d1664b8b491cd743d88f9b512d014b74b6e921
|
[
"bzip2-1.0.6"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Test)
admin.site.register(Question)
admin.site.register(Report)
admin.site.register(Formula)
| 24
| 32
| 0.815476
| 24
| 168
| 5.708333
| 0.5
| 0.262774
| 0.49635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 168
| 7
| 33
| 24
| 0.878205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8fe3ba9889578ae07b17d66b4e9120dfc571dcd7
| 45
|
py
|
Python
|
detector/__init__.py
|
EvenDBL/License-Plate-Detection-and-Recognition
|
edeead48c2a12736a0fcd79ba8d1ae3cbdf60aee
|
[
"Unlicense"
] | 1
|
2022-03-28T09:42:45.000Z
|
2022-03-28T09:42:45.000Z
|
detector/__init__.py
|
EvenDBL/License-Plate-Detection-and-Recognition
|
edeead48c2a12736a0fcd79ba8d1ae3cbdf60aee
|
[
"Unlicense"
] | null | null | null |
detector/__init__.py
|
EvenDBL/License-Plate-Detection-and-Recognition
|
edeead48c2a12736a0fcd79ba8d1ae3cbdf60aee
|
[
"Unlicense"
] | null | null | null |
from .segmentation_detector import SEGDetecor
| 45
| 45
| 0.911111
| 5
| 45
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 45
| 1
| 45
| 45
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.