hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
f7c9101c72ad1ded9a9c2e62600c587a0e6679a4
109
py
Python
codiag/flib/__init__.py
jimustafa/codiag
3b275da7f00551d5af5e26ce0432a6d91710fb15
[ "BSD-3-Clause" ]
null
null
null
codiag/flib/__init__.py
jimustafa/codiag
3b275da7f00551d5af5e26ce0432a6d91710fb15
[ "BSD-3-Clause" ]
null
null
null
codiag/flib/__init__.py
jimustafa/codiag
3b275da7f00551d5af5e26ce0432a6d91710fb15
[ "BSD-3-Clause" ]
null
null
null
from __future__ import absolute_import, division, print_function from .codiag import * from . import givens
21.8
64
0.816514
14
109
5.928571
0.642857
0
0
0
0
0
0
0
0
0
0
0
0.137615
109
4
65
27.25
0.882979
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.333333
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f7fa41af81c49e6f6e970ef316571ea89fcdd869
240
py
Python
pacote-download/d012 - valor do produto de dar 5% desconto.py
Carlos-DOliveira/cursoemvideo-python3
4546c8a7360155243e2f7ecbbb80c57868f770a2
[ "MIT" ]
null
null
null
pacote-download/d012 - valor do produto de dar 5% desconto.py
Carlos-DOliveira/cursoemvideo-python3
4546c8a7360155243e2f7ecbbb80c57868f770a2
[ "MIT" ]
null
null
null
pacote-download/d012 - valor do produto de dar 5% desconto.py
Carlos-DOliveira/cursoemvideo-python3
4546c8a7360155243e2f7ecbbb80c57868f770a2
[ "MIT" ]
null
null
null
''' 012 Faça um algoritmo que leia o preço de um produto e mostre seu novo preço, com 5% de desconto''' valor = float(input('Digite o valor do protudo: R$ ')) print(f'O Valor do produto com 5% de desconto é {valor - (valor * 5)/100:.2f}')
48
103
0.683333
45
240
3.644444
0.644444
0.04878
0.073171
0.170732
0
0
0
0
0
0
0
0.051282
0.1875
240
5
104
48
0.789744
0.4
0
0
0
0.5
0.717391
0
0
0
0
0
0
1
0
false
0
0
0
0
0.5
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
7930719bb140601d6f74af8523ec00b5538c6d64
75
py
Python
satispy/io/__init__.py
NickGardi/satispy
ff0485921fc9fa290446837a10432929c4e04d90
[ "BSD-3-Clause" ]
54
2015-04-01T06:17:06.000Z
2021-08-19T23:04:17.000Z
satispy/io/__init__.py
NickGardi/satispy
ff0485921fc9fa290446837a10432929c4e04d90
[ "BSD-3-Clause" ]
11
2015-04-25T15:05:29.000Z
2019-04-12T18:34:41.000Z
satispy/io/__init__.py
NickGardi/satispy
ff0485921fc9fa290446837a10432929c4e04d90
[ "BSD-3-Clause" ]
21
2015-03-09T20:41:36.000Z
2019-06-15T17:26:57.000Z
from __future__ import absolute_import from satispy.io.dimacs_cnf import *
25
38
0.853333
11
75
5.272727
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.106667
75
2
39
37.5
0.865672
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f7188a5d6697e456a89894a7330aa0af3fad00a2
146
py
Python
tests/filetwo.py
alexandrevicenzi/lazyconfig
a03aa0b92cf8f810a8652728d80dd0d792dd66ed
[ "MIT" ]
null
null
null
tests/filetwo.py
alexandrevicenzi/lazyconfig
a03aa0b92cf8f810a8652728d80dd0d792dd66ed
[ "MIT" ]
null
null
null
tests/filetwo.py
alexandrevicenzi/lazyconfig
a03aa0b92cf8f810a8652728d80dd0d792dd66ed
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import sys sys.path.append('./') from lazyconfig import lazyconfig def get_name(): return lazyconfig.config.name
12.166667
33
0.678082
19
146
5.157895
0.736842
0
0
0
0
0
0
0
0
0
0
0.008264
0.171233
146
11
34
13.272727
0.801653
0.143836
0
0
0
0
0.01626
0
0
0
0
0
0
1
0.2
true
0
0.4
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
1
1
0
0
5
f71dc8aeb93f7835f2caaf5b59252fc6ba16d798
135
py
Python
tests/fixtures/unused_import_comment_6.py
cdce8p/python-typing-update
2ad78b9ce4b5e3d8e8ff5dd35474c8e214d69983
[ "MIT" ]
5
2021-03-17T16:12:09.000Z
2021-09-12T22:19:51.000Z
tests/fixtures/unused_import_comment_6.py
cdce8p/python-typing-update
2ad78b9ce4b5e3d8e8ff5dd35474c8e214d69983
[ "MIT" ]
10
2021-03-23T18:14:24.000Z
2022-03-28T03:05:18.000Z
tests/fixtures/unused_import_comment_6.py
cdce8p/python-typing-update
2ad78b9ce4b5e3d8e8ff5dd35474c8e214d69983
[ "MIT" ]
2
2021-03-20T08:47:52.000Z
2021-06-07T04:02:02.000Z
"""Test unused import retention.""" from logging import DEBUG # unused-import from typing import Any, List var1: List[str] var2: Any
19.285714
42
0.740741
20
135
5
0.65
0.24
0
0
0
0
0
0
0
0
0
0.017544
0.155556
135
6
43
22.5
0.859649
0.325926
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
f72394108b2b48963e86a1dfb5530319995e885c
4,375
py
Python
tests/test_graph.py
Nikolay-Lysenko/gpn
a59f43e90536f85f8b0051c5ce6d0497081a5a8f
[ "MIT" ]
null
null
null
tests/test_graph.py
Nikolay-Lysenko/gpn
a59f43e90536f85f8b0051c5ce6d0497081a5a8f
[ "MIT" ]
null
null
null
tests/test_graph.py
Nikolay-Lysenko/gpn
a59f43e90536f85f8b0051c5ce6d0497081a5a8f
[ "MIT" ]
null
null
null
""" Test `graph.py` module. Author: Nikolay Lysenko """ from typing import List, Tuple import pytest import tensorflow as tf import numpy as np from gpn.graph import sample_multiple_fragments @pytest.mark.parametrize( "images, corners, fragment_size, frame_size, n_channels, expected", [ ( # `images` np.array([ [ [[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], [[1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1]] ], [ [[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]] ] ]).swapaxes(1, 3), # `corners` [(1, 1), (0, 2)], # `fragment_size` 4, # `frame_size` 1, # `n_channels` 3, # `expected` np.array([ [ [[1, 0, 1, 0], [0, 1, 0, 1], [1, 0, 1, 0], [0, 1, 0, 1]], [[1, 1, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1]] ], [ [[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]] ], [ [[0, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 0], [0, 1, 1, 1], [0, 0, 0, 0]] ], [ [[0, 1, 1, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0], [0, 0, 0, 0]], [[0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0]] ], ]).swapaxes(1, 3) ) ] ) def test_sample_multiple_fragments( images: np.ndarray, corners: List[Tuple[int, int]], fragment_size: int, frame_size: int, n_channels: int, expected: np.ndarray ) -> None: """Test `sample_multiple_fragments` function.""" graph = tf.Graph() with graph.as_default(): tensor_images = tf.placeholder(tf.float32, images.shape) tensor_corners = [ tf.placeholder(tf.int32, (2,), name=f'corner_{i}') for i, _ in enumerate(corners) ] tensor_fragments = sample_multiple_fragments( tensor_images, tensor_corners, fragment_size, frame_size, n_channels ) with tf.Session(graph=graph) as sess: feed_dict = { tensor_images: images, **{k: v for k, v in zip(tensor_corners, corners)} } fragments = tensor_fragments.eval(feed_dict, sess) np.testing.assert_array_equal(fragments, expected)
29.965753
71
0.275429
468
4,375
2.497863
0.147436
0.311377
0.400342
0.492729
0.321642
0.321642
0.321642
0.244654
0.244654
0.242087
0
0.162828
0.573257
4,375
145
72
30.172414
0.46331
0.037486
0
0.582677
0
0
0.017648
0
0
0
0
0
0.007874
1
0.007874
false
0
0.03937
0
0.047244
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f732e9b2a6ebaaa5570c84740a20877a5638855d
162
py
Python
tests/test_models/test_backbones/__init__.py
mrzhuzhe/mmdetection
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
[ "Apache-2.0" ]
null
null
null
tests/test_models/test_backbones/__init__.py
mrzhuzhe/mmdetection
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
[ "Apache-2.0" ]
null
null
null
tests/test_models/test_backbones/__init__.py
mrzhuzhe/mmdetection
c04ca2c2a65500bc248a5d2ab6ace5b15f00064d
[ "Apache-2.0" ]
null
null
null
# Copyright (c) OpenMMLab. All rights reserved. from .utils import check_norm_state, is_block, is_norm __all__ = ['is_block', 'is_norm', 'check_norm_state']
32.4
55
0.740741
24
162
4.5
0.583333
0.166667
0.259259
0.240741
0
0
0
0
0
0
0
0
0.141975
162
4
56
40.5
0.776978
0.277778
0
0
0
0
0.279279
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
f7850ce08a9f9ce42c7a3f146540abc49e1c1f12
41,598
py
Python
stable_baselines/td3/policies.py
Ow-woo/stable-baselines
ece376f62b0eaa3b58e90593b7db5fb9de3d82c5
[ "MIT" ]
1
2021-03-03T14:59:11.000Z
2021-03-03T14:59:11.000Z
stable_baselines/td3/policies.py
Ow-woo/stable-baselines
ece376f62b0eaa3b58e90593b7db5fb9de3d82c5
[ "MIT" ]
null
null
null
stable_baselines/td3/policies.py
Ow-woo/stable-baselines
ece376f62b0eaa3b58e90593b7db5fb9de3d82c5
[ "MIT" ]
4
2019-10-07T23:11:26.000Z
2021-08-24T13:00:40.000Z
import tensorflow as tf import numpy as np from gym.spaces import Box import copy from stable_baselines.common.policies import BasePolicy, nature_cnn, register_policy, cnn_1d_extractor from stable_baselines.sac.policies import mlp from stable_baselines.a2c.utils import lstm, batch_to_seq, seq_to_batch class TD3Policy(BasePolicy): """ Policy object that implements a TD3-like actor critic :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param scale: (bool) whether or not to scale the input """ def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, scale=False, add_action_ph=False): super(TD3Policy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse, scale=scale, add_action_ph=add_action_ph) assert isinstance(ac_space, Box), "Error: the action space must be of type gym.spaces.Box" self.qf1 = None self.qf2 = None self.q_discrepancy = None self.policy = None def make_actor(self, obs=None, reuse=False, scope="pi"): """ Creates an actor object :param obs: (TensorFlow Tensor) The observation placeholder (can be None for default placeholder) :param reuse: (bool) whether or not to reuse parameters :param scope: (str) the scope name of the actor :return: (TensorFlow Tensor) the output tensor """ raise NotImplementedError def make_critics(self, obs=None, action=None, reuse=False, scope="qvalues_fn"): """ Creates the two Q-Values approximator :param obs: (TensorFlow Tensor) The observation placeholder (can be None for default placeholder) :param action: (TensorFlow Tensor) The action placeholder :param reuse: (bool) whether or not to reuse parameters :param scope: (str) the scope name :return: ([tf.Tensor]) Mean, action and log probability """ raise NotImplementedError def step(self, obs, state=None, mask=None): """ Returns the policy for a single step :param obs: ([float] or [int]) The current observation of the environment :param state: ([float]) The last states (used in recurrent policies) :param mask: ([float]) The last masks (used in recurrent policies) :return: ([float]) actions """ raise NotImplementedError def proba_step(self, obs, state=None, mask=None): """ Returns the policy for a single step :param obs: ([float] or [int]) The current observation of the environment :param state: ([float]) The last states (used in recurrent policies) :param mask: ([float]) The last masks (used in recurrent policies) :return: ([float]) actions """ return self.step(obs, state, mask) class FeedForwardPolicy(TD3Policy): """ Policy object that implements a DDPG-like actor critic, using a feed forward neural network. :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64]) :param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction :param feature_extraction: (str) The feature extraction type ("cnn" or "mlp") :param layer_norm: (bool) enable layer normalisation :param act_fun: (tf.func) the activation function to use in the neural network. :param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, layers=None, cnn_extractor=nature_cnn, feature_extraction="cnn", layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs): super(FeedForwardPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse, scale=(feature_extraction == "cnn" and cnn_extractor == nature_cnn)) self._kwargs_check(feature_extraction, kwargs) self.layer_norm = layer_norm self.feature_extraction = feature_extraction self.cnn_kwargs = kwargs self.cnn_extractor = cnn_extractor self.cnn_vf = self.cnn_kwargs.pop("cnn_vf", True) self.reuse = reuse if layers is None: layers = [64, 64] self.layers = layers self.obs_module_indices = obs_module_indices self.policy_pre_activation = None assert len(layers) >= 1, "Error: must have at least one hidden layer for the policy." self.activ_fn = act_fun def make_actor(self, obs=None, reuse=False, scope="pi"): if obs is None: obs = self.processed_obs if self.obs_module_indices is not None: obs = tf.gather(obs, self.obs_module_indices["pi"], axis=-1) with tf.variable_scope(scope, reuse=reuse): if self.feature_extraction == "cnn": pi_h = self.cnn_extractor(obs, name="pi_c1", act_fun=self.activ_fn, **self.cnn_kwargs) else: pi_h = tf.layers.flatten(obs) pi_h = mlp(pi_h, self.layers, self.activ_fn, layer_norm=self.layer_norm) self.policy_pre_activation = tf.layers.dense(pi_h, self.ac_space.shape[0]) self.policy = policy = tf.tanh(self.policy_pre_activation) return policy def make_critics(self, obs=None, action=None, reuse=False, scope="values_fn", extracted_callback=None): if obs is None: obs = self.processed_obs if self.obs_module_indices is not None: obs = tf.gather(obs, self.obs_module_indices["vf"], axis=-1) with tf.variable_scope(scope, reuse=reuse): if self.feature_extraction == "cnn" and self.cnn_vf: critics_h = self.cnn_extractor(obs, name="vf_c1", act_fun=self.activ_fn, **self.cnn_kwargs) else: critics_h = tf.layers.flatten(obs) if extracted_callback is not None: critics_h = extracted_callback(critics_h) # Concatenate preprocessed state and action qf_h = tf.concat([critics_h, action], axis=-1) # Double Q values to reduce overestimation with tf.variable_scope('qf1', reuse=reuse): qf1_h = mlp(qf_h, self.layers, self.activ_fn, layer_norm=self.layer_norm) qf1 = tf.layers.dense(qf1_h, 1, name="qf1") with tf.variable_scope('qf2', reuse=reuse): qf2_h = mlp(qf_h, self.layers, self.activ_fn, layer_norm=self.layer_norm) qf2 = tf.layers.dense(qf2_h, 1, name="qf2") self.qf1 = qf1 self.qf2 = qf2 # TODO: assumes that all qf1 and qf2 can never have opposite signs #self.q_discrepancy = tf.square(self.qf1 - self.qf2) / tf.square(tf.maximum(self.qf1, self.qf2)) #self.q_discrepancy = tf.abs(self.qf1 - self.qf2) return self.qf1, self.qf2 def step(self, obs, state=None, mask=None): return self.sess.run(self.policy, {self.obs_ph: obs}) def get_q_discrepancy(self, obs): if isinstance(obs, np.ndarray) and len(obs.shape) == 1: # TODO: check for MLP or CNN policy here obs = np.expand_dims(obs, axis=0) return self.sess.run(self.q_discrepancy, {self.obs_ph: obs}) class RecurrentPolicy(TD3Policy): """ Policy object that implements a DDPG-like actor critic, using a feed forward neural network. :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64]) :param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction :param feature_extraction: (str) The feature extraction type ("cnn" or "mlp") :param layer_norm: (bool) enable layer normalisation :param act_fun: (tf.func) the activation function to use in the neural network. :param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ recurrent = True def __init__(self, sess, ob_space, ac_space, layers, n_env=1, n_steps=1, n_batch=None, reuse=False, cnn_extractor=nature_cnn, feature_extraction="mlp", n_lstm=128, share_lstm=False, save_state=False, save_target_state=False, layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs): super(RecurrentPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse=reuse, add_action_ph=True, scale=(feature_extraction == "cnn" and cnn_extractor == nature_cnn)) self._kwargs_check(feature_extraction, kwargs) self.layer_norm = layer_norm self.feature_extraction = feature_extraction self.cnn_kwargs = kwargs self.cnn_extractor = cnn_extractor self.cnn_vf = self.cnn_kwargs.pop("cnn_vf", True) self.reuse = reuse self.layers = layers self.obs_module_indices = obs_module_indices self.activ_fn = act_fun self.n_lstm = n_lstm self.share_lstm = share_lstm self._obs_ph = self.processed_obs # Base class has self.obs_ph as property getting self._obs_ph self.obs_tp1_ph = self.processed_obs assert self.n_batch % self.n_steps == 0, "The batch size must be a multiple of sequence length (n_steps)" self._lstm_n_batch = self.n_batch // self.n_steps self.action_prev = np.zeros((1, *self.ac_space.shape)) self._initial_state = np.zeros((self._lstm_n_batch, self.n_lstm * 2), dtype=np.float32) if self.share_lstm: self.state = None else: self.pi_state = None self.qf1_state = None self.qf2_state = None with tf.variable_scope("input", reuse=False): self.dones_ph = tf.placeholder_with_default(np.zeros((self.n_batch,), dtype=np.float32), (self.n_batch,), name="dones_ph") # (done t-1) if self.share_lstm: self.state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name="state_ph") else: self.pi_state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name="pi_state_ph") self.qf1_state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name="qf1_state_ph") self.qf2_state_ph = tf.placeholder_with_default(self.initial_state, (self._lstm_n_batch, self.n_lstm * 2), name="qf2_state_ph") self.action_prev_ph = tf.placeholder(np.float32, (self.n_batch, *self.ac_space.shape), name="action_prev_ph") self.save_state = save_state self.save_target_state = save_target_state self.extra_phs = ["action_prev"] self.rnn_inputs = ["obs", "action_prev"] self.extra_data_names = ["action_prev"] if self.save_target_state: self.extra_data_names = sorted(self.extra_data_names + ["target_action_prev"]) self.rnn_inputs = sorted(self.rnn_inputs + ["obs_tp1"]) self.extra_phs = sorted(self.extra_phs + ["target_action_prev"]) if self.save_state: state_names = ["state"] if self.share_lstm else ["pi_state", "qf1_state", "qf2_state"] if self.save_target_state: state_names.extend(["target_" + state_name for state_name in state_names]) if self.share_lstm: self.extra_data_names = sorted(self.extra_data_names + state_names) self.extra_phs = sorted(self.extra_phs + state_names) else: self.extra_data_names = sorted(self.extra_data_names + state_names) self.extra_phs = sorted(self.extra_phs + state_names) def _process_phs(self, **phs): for ph_name, ph_val in phs.items(): if ph_val is None: phs[ph_name] = getattr(self, ph_name + "_ph") else: try: setattr(self, ph_name + "_ph", ph_val) except AttributeError: setattr(self, "_" + ph_name + "_ph", ph_val) return phs.values() def _make_branch(self, branch_name, input_tensor, dones=None, state_ph=None): if branch_name == "lstm": for i, fc_layer_units in enumerate(self.layers["lstm"]): input_tensor = self.activ_fn(tf.layers.dense(input_tensor, fc_layer_units, name="lstm_fc{}".format(i))) input_tensor = batch_to_seq(input_tensor, self._lstm_n_batch, self.n_steps) masks = batch_to_seq(dones, self._lstm_n_batch, self.n_steps) input_tensor, state = lstm(input_tensor, masks, state_ph, "lstm", n_hidden=self.n_lstm, layer_norm=self.layer_norm) input_tensor = seq_to_batch(input_tensor) return input_tensor, state else: for i, fc_layer_units in enumerate(self.layers[branch_name]): input_tensor = self.activ_fn(tf.layers.dense(input_tensor, fc_layer_units, name="{}_fc{}".format(branch_name, i))) return input_tensor def make_actor(self, ff_phs=None, rnn_phs=None, dones=None, reuse=False, scope="pi"): lstm_branch = tf.concat([tf.layers.flatten(ph) for ph in rnn_phs], axis=-1) if ff_phs is not None: ff_branch = tf.concat([tf.layers.flatten(ph) for ph in ff_phs], axis=-1) if dones is None: dones = self.dones_ph if self.share_lstm: with tf.variable_scope("shared", reuse=tf.AUTO_REUSE): lstm_branch, self.state = self._make_branch("lstm", lstm_branch, dones, self.state_ph) with tf.variable_scope(scope, reuse=reuse): if self.layers["ff"] is not None: ff_branch = self._make_branch("ff", ff_branch) if not self.share_lstm: lstm_branch, self.pi_state = self._make_branch("lstm", lstm_branch, dones, self.pi_state_ph) if ff_phs is not None: head = tf.concat([ff_branch, lstm_branch], axis=-1) else: head = lstm_branch head = self._make_branch("head", head) self.policy_pre_activation = tf.layers.dense(head, self.ac_space.shape[0]) self.policy = policy = tf.tanh(self.policy_pre_activation) return policy def make_critics(self, ff_phs=None, rnn_phs=None, dones=None, reuse=False, scope="values_fn"): lstm_branch_in = tf.concat([tf.layers.flatten(ph) for ph in rnn_phs], axis=-1) if ff_phs is not None: ff_branch_in = tf.concat([tf.layers.flatten(ph) for ph in ff_phs], axis=-1) if dones is None: dones = self.dones_ph self.qf1, self.qf2 = None, None self.qf1_state, self.qf2_state = None, None if self.share_lstm: with tf.variable_scope("shared", reuse=tf.AUTO_REUSE): lstm_branch_s, self.state = self._make_branch("lstm", lstm_branch_in, dones, self.state_ph) with tf.variable_scope(scope, reuse=reuse): # Double Q values to reduce overestimation for qf_i in range(1, 3): with tf.variable_scope('qf{}'.format(qf_i), reuse=reuse): lstm_branch = lstm_branch_in if self.layers["ff"] is not None: ff_branch = self._make_branch("ff", ff_branch_in) elif ff_phs is not None: ff_branch = ff_branch_in if not self.share_lstm: lstm_branch, state = self._make_branch("lstm", lstm_branch, dones, getattr(self, "qf{}_state_ph".format(qf_i))) setattr(self, "qf{}_state".format(qf_i), state) else: lstm_branch = lstm_branch_s if ff_phs is not None: head = tf.concat([ff_branch, lstm_branch], axis=-1) else: head = lstm_branch head = self._make_branch("head", head) setattr(self, "qf{}".format(qf_i), tf.layers.dense(head, 1, name="qf{}".format(qf_i))) return self.qf1, self.qf2 def step(self, obs, action_prev=None, state=None, mask=None, feed_dict=None, **kwargs): if feed_dict is None: feed_dict = {} if state is None: state = self.initial_state if mask is None: mask = np.array([False]) if action_prev is None: assert obs.shape[0] == 1 if mask[0]: self.action_prev = np.zeros((1, *self.ac_space.shape)) action_prev = self.action_prev rnn_node = self.state if self.share_lstm else self.pi_state state_ph = self.state_ph if self.share_lstm else self.pi_state_ph feed_dict.update({self.obs_ph: obs, state_ph: state, self.dones_ph: mask, self.action_prev_ph: action_prev}) action, out_state = self.sess.run([self.policy, rnn_node], feed_dict) self.action_prev = action return action, out_state @property def initial_state(self): return self._initial_state def collect_data(self, _locals, _globals): data = {} if self.save_state: if self.share_lstm: data["state"] = _locals["prev_policy_state"][0, :] else: data["pi_state"] = _locals["prev_policy_state"][0, :] if len(_locals["episode_data"]) == 0: qf1_state, qf2_state = self.initial_state, self.initial_state else: qf_feed_dict = { self.qf1_state_ph: _locals["episode_data"][-1]["qf1_state"][None], self.qf2_state_ph: _locals["episode_data"][-1]["qf2_state"][None], } qf_feed_dict.update({getattr(self, data_name + "_ph"): _locals["episode_data"][-1][data_name][None] for data_name in self.rnn_inputs}) qf1_state, qf2_state = self.sess.run([self.qf1_state, self.qf2_state], feed_dict=qf_feed_dict) data["qf1_state"] = qf1_state[0, :] data["qf2_state"] = qf2_state[0, :] if len(_locals["episode_data"]) == 0: data["action_prev"] = np.zeros(*self.ac_space.shape, dtype=np.float32) else: data["action_prev"] = _locals["episode_data"][-1]["action"] if self.save_target_state: data["target_action_prev_rnn"] = _locals["action"] return data class DRPolicy(RecurrentPolicy): """ Policy object that implements a DDPG-like actor critic, using a feed forward neural network. :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param layers: ([int]) The size of the Neural network for the policy (if None, default to [64, 64]) :param cnn_extractor: (function (TensorFlow Tensor, ``**kwargs``): (TensorFlow Tensor)) the CNN feature extraction :param feature_extraction: (str) The feature extraction type ("cnn" or "mlp") :param layer_norm: (bool) enable layer normalisation :param act_fun: (tf.func) the activation function to use in the neural network. :param kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ recurrent = True def __init__(self, sess, ob_space, ac_space, goal_size, my_size, n_env=1, n_steps=1, n_batch=None, reuse=False, layers=None, cnn_extractor=nature_cnn, feature_extraction="mlp", n_lstm=128, share_lstm=False, layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs): if layers is None: layers = {"ff": [128], "lstm": [128], "head": [128, 128]} super().__init__(sess, ob_space, ac_space, layers, n_env, n_steps, n_batch, reuse=reuse, cnn_extractor=cnn_extractor, feature_extraction=feature_extraction, n_lstm=n_lstm, share_lstm=share_lstm, layer_norm=layer_norm, act_fun=act_fun, obs_module_indices=obs_module_indices, **kwargs) with tf.variable_scope("input", reuse=False): self.my_ph = tf.placeholder(tf.float32, (None, my_size), name="my_ph") # the dynamics of the environment self.goal_size = goal_size self.extra_phs = sorted(self.extra_phs + ["my"]) self.extra_data_names = sorted(self.extra_data_names + ["my"]) def make_actor(self, obs_ff=None, obs_rnn=None, action_prev=None, dones=None, reuse=False, scope="pi"): if obs_ff is None: obs_ff = self.processed_obs if obs_rnn is None: obs_rnn = self.processed_obs if action_prev is None: action_prev = self.action_prev_ph obs_ff, goal = obs_ff[:, :-self.goal_size], obs_ff[:, -self.goal_size:] goal = tf.subtract(goal, obs_ff[:, -self.goal_size:], name="goal_relative") obs_rnn = obs_rnn[:, :-self.goal_size] ff_phs = [obs_ff, goal] rnn_phs = [obs_rnn, action_prev] return super().make_actor(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope) def make_critics(self, obs_ff=None, action_ff=None, my=None, obs_rnn=None, action_prev=None, dones=None, reuse=False, scope="values_fn"): if obs_ff is None: obs_ff = self.processed_obs if action_ff is None: action_ff = self.action_ph if my is None: my = self.my_ph if obs_rnn is None: obs_rnn = self.processed_obs if action_prev is None: action_prev = self.action_prev_ph obs_ff, goal = obs_ff[:, :-self.goal_size], obs_ff[:, -self.goal_size:] goal = tf.subtract(goal, obs_ff[:, -self.goal_size:], name="goal_relative") obs_rnn = obs_rnn[:, :-self.goal_size] ff_phs = [obs_ff, goal, my, action_ff] rnn_phs = [obs_rnn, action_prev] return super().make_critics(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope) def collect_data(self, _locals, _globals, **kwargs): data = super().collect_data(_locals, _globals) if "my" not in _locals or _locals["episode_data"]: data["my"] = _locals["self"].env.get_env_parameters() return data class LstmMlpPolicy(RecurrentPolicy): recurrent = True def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, layers=None, cnn_extractor=nature_cnn, feature_extraction="mlp", n_lstm=128, share_lstm=False, layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs): if layers is None: layers = {"ff": None, "lstm": [64, 64], "head": []} else: assert layers["ff"] is None super().__init__(sess, ob_space, ac_space, layers, n_env, n_steps, n_batch, reuse=reuse, cnn_extractor=cnn_extractor, feature_extraction=feature_extraction, n_lstm=n_lstm, share_lstm=share_lstm, layer_norm=layer_norm, act_fun=act_fun, obs_module_indices=obs_module_indices, **kwargs) def make_actor(self, obs=None, action_prev=None, dones=None, reuse=False, scope="pi"): obs, action_prev, dones = self._process_phs(obs=obs, action_prev=action_prev, dones=dones) ff_phs = None rnn_phs = [obs, action_prev] return super().make_actor(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope) def make_critics(self, obs=None, action=None, action_prev=None, dones=None, reuse=False, scope="values_fn"): obs, action, action_prev, dones = self._process_phs(obs=obs, action=action, action_prev=action_prev, dones=dones) ff_phs = [action] rnn_phs = [obs, action_prev] return super().make_critics(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope) class LstmFFMlpPolicy(RecurrentPolicy): recurrent = True def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, layers=None, cnn_extractor=nature_cnn, feature_extraction="mlp", n_lstm=128, share_lstm=False, layer_norm=False, act_fun=tf.nn.relu, obs_module_indices=None, **kwargs): if layers is None: layers = {"ff": [64], "lstm": [64, 64], "head": []} super().__init__(sess, ob_space, ac_space, layers, n_env, n_steps, n_batch, reuse=reuse, cnn_extractor=cnn_extractor, feature_extraction=feature_extraction, n_lstm=n_lstm, share_lstm=share_lstm, layer_norm=layer_norm, act_fun=act_fun, obs_module_indices=obs_module_indices, **kwargs) def make_actor(self, obs=None, action_prev=None, dones=None, reuse=False, scope="pi"): obs, action_prev, dones = self._process_phs(obs=obs, action_prev=action_prev, dones=dones) ff_phs = [obs] rnn_phs = [obs, action_prev] return super().make_actor(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope) def make_critics(self, obs=None, action=None, action_prev=None, dones=None, reuse=False, scope="values_fn"): obs, action, action_prev, dones = self._process_phs(obs=obs, action=action, action_prev=action_prev, dones=dones) ff_phs = [obs, action] rnn_phs = [obs, action_prev] return super().make_critics(ff_phs=ff_phs, rnn_phs=rnn_phs, dones=dones, reuse=reuse, scope=scope) class CnnPolicy(FeedForwardPolicy): """ Policy object that implements actor critic, using a CNN (the nature CNN) :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs): super(CnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse, feature_extraction="cnn", **_kwargs) class CnnMlpPolicy(FeedForwardPolicy): """ Policy object that implements actor critic, using a CNN (the nature CNN) :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs): super(CnnMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse, cnn_extractor=cnn_1d_extractor, feature_extraction="cnn", **_kwargs) class DRCnnMlpPolicy(FeedForwardPolicy): """ Policy object that implements actor critic, using a CNN (the nature CNN) :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, my_size, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs): super(DRCnnMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse, cnn_extractor=cnn_1d_extractor, feature_extraction="cnn", **_kwargs) with tf.variable_scope("input", reuse=False): self.my_ph = tf.placeholder(tf.float32, (self.n_batch, *my_size), name="my_ph") # (done t-1) self.extra_phs = ["my", "target_my"] self.extra_data_names = ["my", "target_my"] def make_critics(self, obs=None, action=None, my=None, reuse=False, scope="values_fn"): if my is None: my = self.my_ph return super().make_critics(obs, action, reuse, scope, extracted_callback=lambda x: tf.concat([x, my], axis=-1)) def collect_data(self, _locals, _globals): data = [] for env_i in range(_locals["self"].n_envs): d = {} if len(_locals["episode_data"][env_i]) == 0 or "my" not in _locals["episode_data"][env_i]: if _locals["self"].n_envs == 1: d["my"] = _locals["self"].env.get_env_parameters() else: d["my"] = _locals["self"].env.env_method("get_env_parameters", indices=env_i)[0] else: d["my"] = _locals["episode_data"][env_i][-1]["my"] d["target_my"] = d["my"] data.append(d) return data class DRMyEstPolicy(FeedForwardPolicy): """ Policy object that implements actor critic, using a CNN (the nature CNN) :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, my_size, n_env=1, n_steps=1, n_batch=None, reuse=False, loss_weight=1e-3, **_kwargs): super().__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse, cnn_extractor=cnn_1d_extractor, feature_extraction="mlp", **_kwargs) self._obs_ph = self.processed_obs # Base class has self.obs_ph as property getting self._obs_ph with tf.variable_scope("input", reuse=False): self.my_ph = tf.placeholder(tf.float32, (self.n_batch, *my_size), name="my_ph") # (done t-1) self.action_prev_ph = tf.placeholder(tf.float32, (self.n_batch, *self.ac_space.shape), name="action_prev_ph") self.obs_prev_ph = tf.placeholder(tf.float32, (self.n_batch, *self.ob_space.shape), name="obs_prev_ph") self.loss_weight = loss_weight self.obs_prev = np.zeros((1, *self.ob_space.shape)) self.action_prev = np.zeros((1, *self.ac_space.shape)) self.my_est_loss_op = None self.my_est_op = None self.policy_loss = None self.my_est = None self.extra_phs = ["my", "action_prev", "obs_prev", "target_my", "target_action_prev", "target_obs_prev"] self.extra_data_names = ["my", "action_prev", "obs_prev", "target_my", "target_action_prev", "target_obs_prev"] def _process_phs(self, **phs): for ph_name, ph_val in phs.items(): if ph_val is None: phs[ph_name] = getattr(self, ph_name + "_ph") else: try: setattr(self, ph_name + "_ph", ph_val) except AttributeError: setattr(self, "_" + ph_name + "_ph", ph_val) return phs.values() def make_actor(self, obs=None, obs_prev=None, action_prev=None, my_gt=None, reuse=False, scope="pi"): obs, obs_prev, action_prev, my_gt = self._process_phs(obs=obs, obs_prev=obs_prev, action_prev=action_prev, my=my_gt) if self.obs_module_indices is not None: obs = tf.gather(obs, self.obs_module_indices["pi"], axis=-1) obs_prev = tf.gather(obs_prev, self.obs_module_indices["pi"], axis=-1) with tf.variable_scope(scope + "/my", reuse=reuse): my_h = tf.concat([obs, obs_prev, action_prev], axis=-1) my_h = mlp(my_h, [64, 64], self.activ_fn, layer_norm=self.layer_norm) self.my_est_op = tf.layers.dense(my_h, self.my_ph.shape[-1]) self.my_est_loss_op = tf.reduce_mean((self.my_est_op - my_gt) ** 2) self.policy_loss = self.loss_weight * self.my_est_loss_op obs = tf.concat([obs, self.my_est_op], axis=-1) with tf.variable_scope(scope, reuse=reuse): if self.feature_extraction == "cnn": pi_h = self.cnn_extractor(obs, name="pi_c1", act_fun=self.activ_fn, **self.cnn_kwargs) else: pi_h = tf.layers.flatten(obs) pi_h = mlp(pi_h, self.layers, self.activ_fn, layer_norm=self.layer_norm) self.policy_pre_activation = tf.layers.dense(pi_h, self.ac_space.shape[0]) self.policy = policy = tf.tanh(self.policy_pre_activation) return policy def make_critics(self, obs=None, action=None, my=None, reuse=False, scope="values_fn"): obs, action, my = self._process_phs(obs=obs, action=action, my=my) return super().make_critics(obs, action, reuse, scope, extracted_callback=lambda x: tf.concat([x, my], axis=-1)) def collect_data(self, _locals, _globals): data = {} if "my" not in _locals or _locals["episode_data"]: data["my"] = _locals["self"].env.get_env_parameters() data["target_my"] = data["my"] if len(_locals["episode_data"]) == 0: data["obs_prev"] = _locals["obs"] data["action_prev"] = _locals["action"] else: data["obs_prev"] = _locals["episode_data"][-1]["obs"] data["action_prev"] = _locals["episode_data"][-1]["action"] data["target_obs_prev"] = data["obs_prev"] data["target_action_prev"] = data["action_prev"] return data def step(self, obs, obs_prev=None, action_prev=None, mask=None): if action_prev is None: assert obs.shape[0] == 1 if mask is not None and mask[0]: self.action_prev = np.zeros((1, *self.ac_space.shape)) action_prev = self.action_prev if obs_prev is None: if mask is not None and mask[0]: self.obs_prev = np.zeros((1, *self.ob_space.shape)) obs_prev = self.obs_prev action, my_est = self.sess.run([self.policy, self.my_est_op], {self.obs_ph: obs, self.action_prev_ph: action_prev, self.obs_prev_ph: obs_prev}) self.action_prev = action self.obs_prev = obs self.my_est = my_est #return action, my_est return action class LnCnnPolicy(FeedForwardPolicy): """ Policy object that implements actor critic, using a CNN (the nature CNN), with layer normalisation :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs): super(LnCnnPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse, feature_extraction="cnn", layer_norm=True, **_kwargs) class MlpPolicy(FeedForwardPolicy): """ Policy object that implements actor critic, using a MLP (2 layers of 64) :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs): super(MlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse, feature_extraction="mlp", **_kwargs) class LnMlpPolicy(FeedForwardPolicy): """ Policy object that implements actor critic, using a MLP (2 layers of 64), with layer normalisation :param sess: (TensorFlow session) The current TensorFlow session :param ob_space: (Gym Space) The observation space of the environment :param ac_space: (Gym Space) The action space of the environment :param n_env: (int) The number of environments to run :param n_steps: (int) The number of steps to run for each environment :param n_batch: (int) The number of batch to run (n_envs * n_steps) :param reuse: (bool) If the policy is reusable or not :param _kwargs: (dict) Extra keyword arguments for the nature CNN feature extraction """ def __init__(self, sess, ob_space, ac_space, n_env=1, n_steps=1, n_batch=None, reuse=False, **_kwargs): super(LnMlpPolicy, self).__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, reuse, feature_extraction="mlp", layer_norm=True, **_kwargs) register_policy("LstmFFMlpPolicy", LstmFFMlpPolicy) register_policy("LstmMlpPolicy", LstmMlpPolicy) register_policy("DRPolicy", DRPolicy) register_policy("CnnPolicy", CnnPolicy) register_policy("LnCnnPolicy", LnCnnPolicy) register_policy("MlpPolicy", MlpPolicy) register_policy("LnMlpPolicy", LnMlpPolicy) register_policy("CnnMlpPolicy", CnnMlpPolicy) register_policy("DRCnnMlpPolicy", DRCnnMlpPolicy) register_policy("DRMyEstPolicy", DRMyEstPolicy)
48.53909
148
0.634093
5,798
41,598
4.303208
0.051052
0.029259
0.015872
0.018517
0.797756
0.762365
0.735431
0.722485
0.704249
0.688096
0
0.007872
0.26095
41,598
856
149
48.595794
0.803695
0.23405
0
0.509728
0
0
0.050319
0.000711
0
0
0
0.001168
0.011673
1
0.081712
false
0
0.013619
0.003891
0.180934
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
f7871a0dda7b9908779e676055916a3ce7be9906
247
py
Python
students/k3342/practical_works/Demin_Danil/django_project_demin/project_first_app/admin.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
10
2020-03-20T09:06:12.000Z
2021-07-27T13:06:02.000Z
students/k3342/practical_works/Demin_Danil/django_project_demin/project_first_app/admin.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
134
2020-03-23T09:47:48.000Z
2022-03-12T01:05:19.000Z
students/k3342/practical_works/Demin_Danil/django_project_demin/project_first_app/admin.py
TonikX/ITMO_ICT_-WebProgramming_2020
ba566c1b3ab04585665c69860b713741906935a0
[ "MIT" ]
71
2020-03-20T12:45:56.000Z
2021-10-31T19:22:25.000Z
from django.contrib import admin from .models import Owner admin.site.register(Owner) from .models import Car admin.site.register(Car) from .models import Owning admin.site.register(Owning) from .models import License admin.site.register(License)
24.7
32
0.817814
37
247
5.459459
0.324324
0.19802
0.316832
0
0
0
0
0
0
0
0
0
0.097166
247
9
33
27.444444
0.90583
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.555556
0
0.555556
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e38fcc638f5f4491adecf79f6bf800c40de85b7c
101
py
Python
expressly/__init__.py
expressly/expressly-plugin-sdk-python3-core
97b28e78b69a30bb2bd087e9df48da1f30ef757c
[ "MIT" ]
null
null
null
expressly/__init__.py
expressly/expressly-plugin-sdk-python3-core
97b28e78b69a30bb2bd087e9df48da1f30ef757c
[ "MIT" ]
null
null
null
expressly/__init__.py
expressly/expressly-plugin-sdk-python3-core
97b28e78b69a30bb2bd087e9df48da1f30ef757c
[ "MIT" ]
null
null
null
from expressly.api import Api from expressly.routes import routes api_url = 'prod.expresslyapp.com'
20.2
35
0.811881
15
101
5.4
0.6
0.320988
0
0
0
0
0
0
0
0
0
0
0.118812
101
4
36
25.25
0.910112
0
0
0
0
0
0.207921
0.207921
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
e3907a7a3ed197b3e8c41adfbfed302ddb7fb9d7
195
py
Python
casepro/msgs/context_processors.py
rapidpro/ureport-partners
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
[ "BSD-3-Clause" ]
21
2015-07-21T15:57:49.000Z
2021-11-04T18:26:35.000Z
casepro/msgs/context_processors.py
rapidpro/ureport-partners
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
[ "BSD-3-Clause" ]
357
2015-05-22T07:26:45.000Z
2022-03-12T01:08:28.000Z
casepro/msgs/context_processors.py
rapidpro/ureport-partners
16e5b95eae36ecbbe8ab2a59f34a2f5fd32ceacd
[ "BSD-3-Clause" ]
24
2015-05-28T12:30:25.000Z
2021-11-19T01:57:38.000Z
from django.conf import settings def messages(request): """ Context processor for information relating to messages """ return {"max_msg_chars": settings.SITE_MAX_MESSAGE_CHARS}
21.666667
61
0.733333
24
195
5.75
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.184615
195
8
62
24.375
0.867925
0.276923
0
0
0
0
0.104
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
e39afee45c1e7a0f10eac0d5def54c2f1c91b6c0
15,368
py
Python
flextensor/testing/others/test_conv2d_cuda_different_schedule.py
imxian/FlexTensor
311af3362856ea1b0073404fffad42c54585c205
[ "MIT" ]
135
2020-03-15T11:28:48.000Z
2022-03-26T00:54:32.000Z
flextensor/testing/others/test_conv2d_cuda_different_schedule.py
imxian/FlexTensor
311af3362856ea1b0073404fffad42c54585c205
[ "MIT" ]
11
2020-03-23T11:06:38.000Z
2022-01-24T06:25:41.000Z
flextensor/testing/others/test_conv2d_cuda_different_schedule.py
imxian/FlexTensor
311af3362856ea1b0073404fffad42c54585c205
[ "MIT" ]
32
2020-03-17T05:12:59.000Z
2022-03-26T00:54:33.000Z
""" Test different schedule on conv2d_nchw Target NVIDIA GPU ==================================== **Author**: `Size Zheng` """ import tvm import json from flextensor.measure import _evaluate from flextensor.nn import conv2d_nchw from flextensor.configs.conv2d_config import yolo_shapes_b8 from flextensor.utils import any_factor_split class Parameter(object): def __init__(self): self.b_factors = [2, 4, 1, 1] self.k_factors = [8, 4, 8, 2] self.p_factors = [7, 1, 2, 1] self.q_factors = [1, 1, 14, 1] self.rc_factors = [1, 32, 32] self.ry_factors = [1, 1, 1] self.rx_factors = [1, 1, 1] def __str__(self): ret = "" ret += str(self.b_factors) + "\n" ret += str(self.k_factors) + "\n" ret += str(self.p_factors) + "\n" ret += str(self.q_factors) + "\n" ret += str(self.rc_factors) + "\n" ret += str(self.ry_factors) + "\n" ret += str(self.rx_factors) + "\n" return ret def schedule_yolo_conv_cuda_1(s, outputs, inputs, weight, parameter): # inline the padding operation padded = outputs.op.input_tensors[0] # create cache write_cache = s.cache_write(outputs, "local") read_share_weight = s.cache_read(weight, "shared", [write_cache]) # read_local_weight = s.cache_read(read_share_weight, "local", [write_cache]) read_share_inputs = s.cache_read(padded, "shared", [write_cache]) # read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache]) b_factors = parameter.b_factors k_factors = parameter.k_factors p_factors = parameter.p_factors q_factors = parameter.q_factors rc_factors = parameter.rc_factors ry_factors = parameter.ry_factors rx_factors = parameter.rx_factors # prepare thread_axis bx = tvm.te.thread_axis("blockIdx.x") by = tvm.te.thread_axis("blockIdx.y") bz = tvm.te.thread_axis("blockIdx.z") vx = tvm.te.thread_axis("vthread") vy = tvm.te.thread_axis("vthread") vz = tvm.te.thread_axis("vthread") tx = tvm.te.thread_axis("threadIdx.x") ty = tvm.te.thread_axis("threadIdx.y") tz = tvm.te.thread_axis("threadIdx.z") # split the spatial axes b, k, p, q = s[outputs].op.axis kernel_scope, b = s[outputs].split(b, nparts=1) bo, bi = s[outputs].split(b, nparts=b_factors[0]) ko, ki = s[outputs].split(k, nparts=k_factors[0]) po, pi = s[outputs].split(p, nparts=p_factors[0]) qo, qi = s[outputs].split(q, nparts=q_factors[0]) vbo, bi = s[outputs].split(bi, nparts=b_factors[1]) vko, ki = s[outputs].split(ki, nparts=k_factors[1]) vpo, pi = s[outputs].split(pi, nparts=p_factors[1]) vqo, qi = s[outputs].split(qi, nparts=q_factors[1]) tbo, bi = s[outputs].split(bi, nparts=b_factors[2]) tko, ki = s[outputs].split(ki, nparts=k_factors[2]) tpo, pi = s[outputs].split(pi, nparts=p_factors[2]) tqo, qi = s[outputs].split(qi, nparts=q_factors[2]) # reorder s[outputs].reorder(bo, ko, po, qo, vbo, vko, vpo, vqo, tbo, tko, tpo, tqo, bi, ki, pi, qi) # fuse bko = s[outputs].fuse(bo, ko) vbko = s[outputs].fuse(vbo, vko) tbko = s[outputs].fuse(tbo, tko) bki = s[outputs].fuse(bi, ki) # bind s[outputs].bind(bko, bz) s[outputs].bind(po, by) s[outputs].bind(qo, bx) s[outputs].bind(vbko, vz) s[outputs].bind(vpo, vy) s[outputs].bind(vqo, vx) s[outputs].bind(tbko, tz) s[outputs].bind(tpo, ty) s[outputs].bind(tqo, tx) # compute at write cache s[write_cache].compute_at(s[outputs], tqo) rc, ry, rx = s[write_cache].op.reduce_axis rco, rci = s[write_cache].split(rc, nparts=rc_factors[0]) rcm, rci = s[write_cache].split(rci, nparts=rc_factors[1]) ryo, ryi = s[write_cache].split(ry, nparts=ry_factors[0]) rym, ryi = s[write_cache].split(ryi, nparts=ry_factors[1]) rxo, rxi = s[write_cache].split(rx, nparts=rx_factors[0]) rxm, rxi = s[write_cache].split(rxi, nparts=rx_factors[1]) a, b, c, d = s[write_cache].op.axis s[write_cache].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, a, b, c, d) # compute at read cache s[read_share_weight].compute_at(s[write_cache], rxm) # s[read_local_weight].compute_at(s[write_cache], rxi) s[read_share_inputs].compute_at(s[write_cache], rxm) # s[read_local_inputs].compute_at(s[write_cache], rxi) # cooperative fetching for cache in [read_share_inputs, read_share_weight]: cb, ck, ch, cw = s[cache].op.axis fused = s[cache].fuse(cb, ck, ch, cw) fused, bindx = s[cache].split(fused, factor=q_factors[2]) fused, bindy = s[cache].split(fused, factor=p_factors[2]) fused, bindz = s[cache].split(fused, factor=b_factors[2] * k_factors[2]) s[cache].bind(bindx, tx) s[cache].bind(bindy, ty) s[cache].bind(bindz, tz) s[outputs].pragma(kernel_scope, 'auto_unroll_max_step', 1500) s[outputs].pragma(kernel_scope, 'unroll_explicit', 1) s[padded].compute_inline() def schedule_yolo_conv_cuda_2(s, outputs, inputs, weight, parameter): # inline the padding operation padded = outputs.op.input_tensors[0] # create cache write_cache = s.cache_write(outputs, "local") read_share_weight = s.cache_read(weight, "shared", [write_cache]) # read_local_weight = s.cache_read(read_share_weight, "local", [write_cache]) read_share_inputs = s.cache_read(padded, "shared", [write_cache]) # read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache]) b_factors = parameter.b_factors k_factors = parameter.k_factors p_factors = parameter.p_factors q_factors = parameter.q_factors rc_factors = parameter.rc_factors ry_factors = parameter.ry_factors rx_factors = parameter.rx_factors # prepare thread_axis bx = tvm.te.thread_axis("blockIdx.x") by = tvm.te.thread_axis("blockIdx.y") bz = tvm.te.thread_axis("blockIdx.z") vx = tvm.te.thread_axis("vthread") vy = tvm.te.thread_axis("vthread") vz = tvm.te.thread_axis("vthread") tx = tvm.te.thread_axis("threadIdx.x") ty = tvm.te.thread_axis("threadIdx.y") tz = tvm.te.thread_axis("threadIdx.z") # split the spatial axes b, k, p, q = s[outputs].op.axis kernel_scope, b = s[outputs].split(b, nparts=1) ko, ki = s[outputs].split(k, nparts=k_factors[0]) po, pi = s[outputs].split(p, nparts=p_factors[0]) qo, qi = s[outputs].split(q, nparts=q_factors[0]) vko, ki = s[outputs].split(ki, nparts=k_factors[1]) vpo, pi = s[outputs].split(pi, nparts=p_factors[1]) vqo, qi = s[outputs].split(qi, nparts=q_factors[1]) tko, ki = s[outputs].split(ki, nparts=k_factors[2]) tpo, pi = s[outputs].split(pi, nparts=p_factors[2]) tqo, qi = s[outputs].split(qi, nparts=q_factors[2]) # reorder s[outputs].reorder(ko, po, qo, vko, vpo, vqo, tko, tpo, tqo, ki, pi, qi) # s[outputs].reorder(po, bko, qo, vqo, vbko, vpo, tbko, tpo, tqo, bki, pi, qi) # fuse bko = s[outputs].fuse(b, ko) # bind s[outputs].bind(bko, bz) s[outputs].bind(po, by) s[outputs].bind(qo, bx) s[outputs].bind(vko, vz) s[outputs].bind(vpo, vy) s[outputs].bind(vqo, vx) s[outputs].bind(tko, tz) s[outputs].bind(tpo, ty) s[outputs].bind(tqo, tx) # compute at write cache s[write_cache].compute_at(s[outputs], tqo) rc, ry, rx = s[write_cache].op.reduce_axis rco, rci = s[write_cache].split(rc, nparts=rc_factors[0]) rcm, rci = s[write_cache].split(rci, nparts=rc_factors[1]) ryo, ryi = s[write_cache].split(ry, nparts=ry_factors[0]) rym, ryi = s[write_cache].split(ryi, nparts=ry_factors[1]) rxo, rxi = s[write_cache].split(rx, nparts=rx_factors[0]) rxm, rxi = s[write_cache].split(rxi, nparts=rx_factors[1]) a, b, c, d = s[write_cache].op.axis s[write_cache].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, a, b, c, d) # compute at read cache s[read_share_weight].compute_at(s[write_cache], rxm) # s[read_local_weight].compute_at(s[write_cache], rxi) s[read_share_inputs].compute_at(s[write_cache], rxm) # s[read_local_inputs].compute_at(s[write_cache], rxi) # cooperative fetching for cache in [read_share_inputs, read_share_weight]: cb, ck, ch, cw = s[cache].op.axis fused = s[cache].fuse(cb, ck, ch, cw) fused, bindx = s[cache].split(fused, factor=q_factors[2]) fused, bindy = s[cache].split(fused, factor=p_factors[2]) fused, bindz = s[cache].split(fused, factor=k_factors[2]) s[cache].bind(bindx, tx) s[cache].bind(bindy, ty) s[cache].bind(bindz, tz) s[outputs].pragma(kernel_scope, 'auto_unroll_max_step', 1500) s[outputs].pragma(kernel_scope, 'unroll_explicit', 1) s[padded].compute_inline() def schedule_yolo_conv_cuda_3(s, outputs, inputs, weight, parameter): # inline the padding operation padded = outputs.op.input_tensors[0] # create cache write_cache = s.cache_write(outputs, "local") read_share_weight = s.cache_read(weight, "shared", [write_cache]) # read_local_weight = s.cache_read(read_share_weight, "local", [write_cache]) read_share_inputs = s.cache_read(padded, "shared", [write_cache]) # read_local_inputs = s.cache_read(read_share_inputs, "local", [write_cache]) b_factors = parameter.b_factors k_factors = parameter.k_factors p_factors = parameter.p_factors q_factors = parameter.q_factors rc_factors = parameter.rc_factors ry_factors = parameter.ry_factors rx_factors = parameter.rx_factors # prepare thread_axis bx = tvm.te.thread_axis("blockIdx.x") by = tvm.te.thread_axis("blockIdx.y") bz = tvm.te.thread_axis("blockIdx.z") vx = tvm.te.thread_axis("vthread") vy = tvm.te.thread_axis("vthread") vz = tvm.te.thread_axis("vthread") tx = tvm.te.thread_axis("threadIdx.x") ty = tvm.te.thread_axis("threadIdx.y") tz = tvm.te.thread_axis("threadIdx.z") # split the spatial axes b, k, p, q = s[outputs].op.axis kernel_scope, b = s[outputs].split(b, nparts=1) bo, bi = s[outputs].split(b, nparts=b_factors[0]) ko, ki = s[outputs].split(k, nparts=k_factors[0]) po, pi = s[outputs].split(p, nparts=p_factors[0]) qo, qi = s[outputs].split(q, nparts=q_factors[0]) vbo, bi = s[outputs].split(bi, nparts=b_factors[1]) vko, ki = s[outputs].split(ki, nparts=k_factors[1]) vpo, pi = s[outputs].split(pi, nparts=p_factors[1]) vqo, qi = s[outputs].split(qi, nparts=q_factors[1]) tbo, bi = s[outputs].split(bi, nparts=b_factors[2]) tko, ki = s[outputs].split(ki, nparts=k_factors[2]) tpo, pi = s[outputs].split(pi, nparts=p_factors[2]) tqo, qi = s[outputs].split(qi, nparts=q_factors[2]) # reorder s[outputs].reorder(bo, ko, po, qo, vbo, vko, vpo, vqo, tbo, tko, tpo, tqo, bi, ki, pi, qi) # fuse outer = s[outputs].fuse(bo, ko, po, qo) middle = s[outputs].fuse(vbo, vko, vpo, vqo) inner = s[outputs].fuse(tbo, tko, tpo, tqo) # bind s[outputs].bind(outer, bx) s[outputs].bind(inner, tx) # compute at write cache s[write_cache].compute_at(s[outputs], inner) rc, ry, rx = s[write_cache].op.reduce_axis rco, rci = s[write_cache].split(rc, nparts=rc_factors[0]) rcm, rci = s[write_cache].split(rci, nparts=rc_factors[1]) ryo, ryi = s[write_cache].split(ry, nparts=ry_factors[0]) rym, ryi = s[write_cache].split(ryi, nparts=ry_factors[1]) rxo, rxi = s[write_cache].split(rx, nparts=rx_factors[0]) rxm, rxi = s[write_cache].split(rxi, nparts=rx_factors[1]) a, b, c, d = s[write_cache].op.axis s[write_cache].reorder(rco, ryo, rxo, rcm, rym, rxm, rci, ryi, rxi, a, b, c, d) # compute at read cache s[read_share_weight].compute_at(s[write_cache], rxm) # s[read_local_weight].compute_at(s[write_cache], rxi) s[read_share_inputs].compute_at(s[write_cache], rxm) # s[read_local_inputs].compute_at(s[write_cache], rxi) # cooperative fetching for cache in [read_share_inputs, read_share_weight]: cb, ck, ch, cw = s[cache].op.axis fused = s[cache].fuse(cb, ck, ch, cw) fused, bindx = s[cache].split(fused, factor=b_factors[2] * k_factors[2] * p_factors[2] * q_factors[2]) s[cache].bind(bindx, tx) s[outputs].pragma(kernel_scope, 'auto_unroll_max_step', 1500) s[outputs].pragma(kernel_scope, 'unroll_explicit', 1) s[padded].compute_inline() def schedule_yolo_conv_opencl(s, outputs, inputs, weight): # inline the padding operation padded = outputs.op.input_tensors[0] # prepare thread_axis bx = tvm.te.thread_axis("blockIdx.x") # split the spatial axes b, k, p, q = s[outputs].op.axis bo, bi = s[outputs].split(b, nparts=1) s[outputs].bind(bo, bx) s[padded].compute_inline() def try_yolo_conv(config, parameter, fsch): # get the compute # (1, 3, 448, 448, 64, 3, 7, 7, 1, 2, 3, 1, 1) batch, CI, H, W, CO, _, kh, kw, _, st, pad, dilation, group = config inputs = tvm.te.placeholder((batch, CI, H, W), dtype="float32") weight = tvm.te.placeholder((CO, CI, kh, kw), dtype="float32") outputs = conv2d_nchw(inputs, weight, stride=st, padding=pad, dilation=dilation, groups=group) s = tvm.te.create_schedule(outputs.op) fsch(s, outputs, inputs, weight, parameter) arg_bufs = [inputs, weight, outputs] stmt = tvm.lower(s, arg_bufs, simple_mode=True) # print(stmt) dev_id = 2 ctx = tvm.nd.context("cuda", dev_id) max_dims = ctx.max_thread_dimensions kwargs = { "max_shared_memory_per_block": ctx.max_shared_memory_per_block, "max_threads_per_block": ctx.max_threads_per_block, "max_thread_x": max_dims[0], "max_thread_y": max_dims[1], "max_thread_z": max_dims[2] } verify = tvm.tir.ir_pass.VerifyGPUCode(stmt, kwargs) # print("config is:\n %s" % (str(config))) if verify: print("Valid kernel") time_cost = _evaluate(s, arg_bufs, "cuda", dev_id, 10) print("Yolo conv use", time_cost, "ms\n") else: print("Invalid kernel") time_cost = float("inf") return time_cost if __name__ == "__main__": res = [] parameters = [] with open("yolo_conv_b8_parameters.txt", "r") as fin: for line in fin: _, content = line.split(":", 1) obj = json.loads(content) op_parameters = obj[0] conv_parameters = op_parameters[1] parameter = Parameter() parameter.b_factors = conv_parameters["spatial"][0] parameter.k_factors = conv_parameters["spatial"][1] parameter.p_factors = conv_parameters["spatial"][2] parameter.q_factors = conv_parameters["spatial"][3] parameter.rc_factors = conv_parameters["reduce"][0] parameter.ry_factors = conv_parameters["reduce"][1] parameter.rx_factors = conv_parameters["reduce"][2] parameters.append(parameter) for config, parameter in list(zip(yolo_shapes_b8, parameters))[:]: cost = try_yolo_conv(config, parameter, schedule_yolo_conv_cuda_3) res.append(cost) for ele in res: print(ele)
36.853717
117
0.642309
2,397
15,368
3.928661
0.099291
0.074758
0.04906
0.0446
0.794414
0.752469
0.752363
0.745779
0.743124
0.743124
0
0.013248
0.204321
15,368
416
118
36.942308
0.756869
0.108277
0
0.644366
0
0
0.048875
0.005496
0
0
0
0
0
1
0.024648
false
0.003521
0.021127
0
0.056338
0.014085
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e39dc932026baaeb449bc58a709ca31ff928d9e3
24
py
Python
localstack/__init__.py
supaflysnooka/localstack
078d477a42244d58bd0c8606a0fe80a048f06cb7
[ "Apache-2.0" ]
null
null
null
localstack/__init__.py
supaflysnooka/localstack
078d477a42244d58bd0c8606a0fe80a048f06cb7
[ "Apache-2.0" ]
null
null
null
localstack/__init__.py
supaflysnooka/localstack
078d477a42244d58bd0c8606a0fe80a048f06cb7
[ "Apache-2.0" ]
null
null
null
__version__ = "0.12.20"
12
23
0.666667
4
24
3
1
0
0
0
0
0
0
0
0
0
0
0.238095
0.125
24
1
24
24
0.333333
0
0
0
0
0
0.291667
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e3f3cb2bd55edec10da6e557fc4ebf9a637fd0f3
83
py
Python
models/__init__.py
mpiannucci/crosswynds-promo
66ab0250b5c587374d4f538335b0994f70cf739b
[ "MIT" ]
null
null
null
models/__init__.py
mpiannucci/crosswynds-promo
66ab0250b5c587374d4f538335b0994f70cf739b
[ "MIT" ]
5
2015-03-06T18:46:28.000Z
2015-03-11T16:42:00.000Z
models/__init__.py
mpiannucci/crosswynds-promo
66ab0250b5c587374d4f538335b0994f70cf739b
[ "MIT" ]
null
null
null
# Add the models to the parent namespace from ipaddress import * from user import *
27.666667
40
0.783133
13
83
5
0.769231
0
0
0
0
0
0
0
0
0
0
0
0.180723
83
3
41
27.666667
0.955882
0.457831
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5809a89e7673754ce1e6624b49d6a2657e29b62f
125
py
Python
test/sqlalchemy_filterparams_tests/models/__init__.py
cbrand/python-sqlalchemy-filterparams
6e555cfe9e2f0f2c5f6d6606485de50bc76aaf73
[ "MIT" ]
2
2016-02-24T03:07:26.000Z
2016-05-22T22:00:40.000Z
test/sqlalchemy_filterparams_tests/models/__init__.py
cbrand/python-sqlalchemy-filterparams
6e555cfe9e2f0f2c5f6d6606485de50bc76aaf73
[ "MIT" ]
null
null
null
test/sqlalchemy_filterparams_tests/models/__init__.py
cbrand/python-sqlalchemy-filterparams
6e555cfe9e2f0f2c5f6d6606485de50bc76aaf73
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 -*- from .base import Base from .domain import Domain from .email import EMail from .user import User
17.857143
26
0.72
19
125
4.736842
0.473684
0
0
0
0
0
0
0
0
0
0
0.009709
0.176
125
6
27
20.833333
0.864078
0.184
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5821bc72acbf3496afadd43ff46c69e4c31d3104
67
py
Python
federatedscope/core/regularizer/__init__.py
alibaba/FederatedScope
fcf6d237624769ea094cfd68803901622f14fc23
[ "Apache-2.0" ]
9
2022-03-24T07:59:37.000Z
2022-03-31T06:47:52.000Z
federatedscope/core/regularizer/__init__.py
alibaba/FederatedScope
fcf6d237624769ea094cfd68803901622f14fc23
[ "Apache-2.0" ]
1
2022-03-28T13:52:17.000Z
2022-03-28T13:52:17.000Z
federatedscope/core/regularizer/__init__.py
alibaba/FederatedScope
fcf6d237624769ea094cfd68803901622f14fc23
[ "Apache-2.0" ]
null
null
null
from federatedscope.core.regularizer.proximal_regularizer import *
33.5
66
0.880597
7
67
8.285714
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.059701
67
1
67
67
0.920635
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
582fab8494179614d32acf2f9ae3bf70b45cdace
19,013
py
Python
test/graph/parse/test_cdg_java.py
acheshkov/program_slicing
124d2dcf6c9c2cd8e505b96f4f47f3ea98f0a260
[ "MIT" ]
5
2021-11-06T04:35:17.000Z
2022-03-21T09:11:54.000Z
test/graph/parse/test_cdg_java.py
acheshkov/program_slicing
124d2dcf6c9c2cd8e505b96f4f47f3ea98f0a260
[ "MIT" ]
19
2021-11-15T14:42:56.000Z
2022-02-01T14:30:34.000Z
test/graph/parse/test_cdg_java.py
acheshkov/program_slicing
124d2dcf6c9c2cd8e505b96f4f47f3ea98f0a260
[ "MIT" ]
null
null
null
__licence__ = 'MIT' __author__ = 'kuyaki' __credits__ = ['kuyaki'] __maintainer__ = 'kuyaki' __date__ = '2021/03/30' from typing import List, Dict from unittest import TestCase from program_slicing.graph.parse import cdg_java from program_slicing.graph.statement import Statement, StatementType class CDGJavaTestCase(TestCase): def __check_cdg_children(self, children: List[Statement], statement_type_map: Dict[int, StatementType]) -> None: for i, child in enumerate(children): statement_type = statement_type_map.get(i, StatementType.UNKNOWN) self.assertEqual(statement_type, child.statement_type) def test_switch(self) -> None: source_code = """ { switch(a) { default: a = 1; case 10: myFoo(); case 5: break; case 4: a = -1; } """ cdg = cdg_java.parse(source_code) self.assertEqual(23, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(6, len(function_children)) self.__check_cdg_children(function_children, { 0: StatementType.SCOPE, 3: StatementType.SCOPE, 4: StatementType.BRANCH, 5: StatementType.EXIT }) branch_children = [child for child in cdg.successors(function_children[4])] self.assertEqual(16, len(branch_children)) self.__check_cdg_children(branch_children, { 0: StatementType.SCOPE, 4: StatementType.ASSIGNMENT, 5: StatementType.SCOPE, 7: StatementType.CALL, 8: StatementType.SCOPE, 9: StatementType.GOTO, 10: StatementType.SCOPE, 15: StatementType.ASSIGNMENT }) def test_while(self) -> None: source_code = """ { while (1) { } } """ cdg = cdg_java.parse(source_code) self.assertEqual(7, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(5, len(function_children)) self.__check_cdg_children(function_children, { 0: StatementType.SCOPE, 3: StatementType.LOOP, 4: StatementType.EXIT }) loop_children = [child for child in cdg.successors(function_children[3])] self.assertEqual(1, len(loop_children)) self.__check_cdg_children(loop_children, { 0: StatementType.SCOPE }) def test_for_each(self) -> None: source_code = """ class A { int main(String word) { for (char a : word) { foo(a); } } } """ cdg = cdg_java.parse(source_code) self.assertEqual(16, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(9, len(function_children)) self.__check_cdg_children(function_children, { 1: StatementType.VARIABLE, 2: StatementType.SCOPE, 3: StatementType.VARIABLE, 7: StatementType.LOOP, 8: StatementType.EXIT }) loop_children = [child for child in cdg.successors(function_children[7])] self.assertEqual(3, len(loop_children)) self.__check_cdg_children(loop_children, { 0: StatementType.SCOPE, 2: StatementType.CALL }) self.assertEqual({"a"}, loop_children[2].affected_by) def test_for_each_modifiers(self) -> None: source_code = """ class A { int main(String word) { for (final char a : word) { } } } """ cdg = cdg_java.parse(source_code) self.assertEqual(15, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(10, len(function_children)) self.__check_cdg_children(function_children, { 1: StatementType.VARIABLE, 2: StatementType.SCOPE, 3: StatementType.VARIABLE, 8: StatementType.LOOP, 9: StatementType.EXIT }) loop_children = [child for child in cdg.successors(function_children[8])] self.assertEqual(1, len(loop_children)) self.__check_cdg_children(loop_children, { 0: StatementType.SCOPE }) def test_try_catch(self) -> None: source_code = """ class A { int main(String args) { try { a = args[10]; } catch (Exception e) { e.printStackTrace(); } finally { System.out.println("The 'try catch' is finished."); } } } """ cdg = cdg_java.parse(source_code) self.assertEqual(25, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(9, len(function_children)) self.__check_cdg_children(function_children, { 1: StatementType.VARIABLE, 2: StatementType.SCOPE, 3: StatementType.BRANCH, 5: StatementType.SCOPE, 7: StatementType.CALL, 8: StatementType.EXIT }) try_children = [child for child in cdg.successors(function_children[3])] self.assertEqual(9, len(try_children)) self.__check_cdg_children(try_children, { 0: StatementType.SCOPE, 6: StatementType.ASSIGNMENT, 7: StatementType.VARIABLE, 8: StatementType.BRANCH }) catch_children = [child for child in cdg.successors(try_children[8])] self.assertEqual(3, len(catch_children)) self.__check_cdg_children(catch_children, { 0: StatementType.SCOPE, 2: StatementType.CALL }) def test_resourced_try_multi_catch(self) -> None: source_code = """ class A { int main(String args) { try (int i = 10) { a = args[i]; } catch (MyException1 e) { e.printStackTrace(); } catch (MyException2 e) { } } } """ cdg = cdg_java.parse(source_code) self.assertEqual(28, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(9, len(function_children)) self.__check_cdg_children(function_children, { 1: StatementType.VARIABLE, 2: StatementType.SCOPE, 7: StatementType.BRANCH, 8: StatementType.EXIT }) try_children = [child for child in cdg.successors(function_children[7])] self.assertEqual(9, len(try_children)) self.__check_cdg_children(try_children, { 0: StatementType.SCOPE, 6: StatementType.ASSIGNMENT, 7: StatementType.VARIABLE, 8: StatementType.BRANCH }) catch_1_children = [child for child in cdg.successors(try_children[8])] self.assertEqual(5, len(catch_1_children)) self.__check_cdg_children(catch_1_children, { 0: StatementType.SCOPE, 2: StatementType.CALL, 3: StatementType.VARIABLE, 4: StatementType.BRANCH }) catch_2_children = [child for child in cdg.successors(catch_1_children[4])] self.assertEqual(1, len(catch_2_children)) self.__check_cdg_children(catch_2_children, { 0: StatementType.SCOPE }) def test_update(self) -> None: source_code = """ { int n = 0; for (int i = 0; i < 10; i++) { ++n; } } """ cdg = cdg_java.parse(source_code) self.assertEqual(19, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(12, len(function_children)) self.__check_cdg_children(function_children, { 0: StatementType.SCOPE, 3: StatementType.VARIABLE, 6: StatementType.VARIABLE, 10: StatementType.LOOP, 11: StatementType.EXIT }) loop_children = [child for child in cdg.successors(function_children[10])] self.assertEqual(6, len(loop_children)) self.__check_cdg_children(loop_children, { 0: StatementType.SCOPE, 3: StatementType.ASSIGNMENT, 5: StatementType.ASSIGNMENT }) def test_multiple_returns(self) -> None: source_code = """ { int n = 0; int a = 10; if (n < a) { return n; } return a; } """ cdg = cdg_java.parse(source_code) self.assertEqual(19, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(15, len(function_children)) self.__check_cdg_children(function_children, { 0: StatementType.SCOPE, 3: StatementType.VARIABLE, 6: StatementType.VARIABLE, 11: StatementType.BRANCH, 13: StatementType.GOTO, 14: StatementType.EXIT }) self.assertEqual({"a", "n"}, function_children[11].affected_by) branch_children = [child for child in cdg.successors(function_children[11])] self.assertEqual(3, len(branch_children)) self.__check_cdg_children(branch_children, { 0: StatementType.SCOPE, 2: StatementType.GOTO }) def test_synchronized(self) -> None: source_code = """ { synchronized(a) { a = -1; } } """ cdg = cdg_java.parse(source_code) self.assertEqual(12, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(5, len(function_children)) self.__check_cdg_children(function_children, { 0: StatementType.SCOPE, 3: StatementType.LOOP, 4: StatementType.EXIT }) loop_children = [child for child in cdg.successors(function_children[3])] self.assertEqual(6, len(loop_children)) self.__check_cdg_children(loop_children, { 0: StatementType.SCOPE, 5: StatementType.ASSIGNMENT }) def test_parse(self) -> None: source_code = """ class A { public static int main() { int n = 10; for(int i = 0; i < n; i += 1) { if (i < 4) { System.out.println("lol"); continue; } if (i > 6) { System.out.println("che bu rek"); break; } else System.out.println("kek"); } return n; } } """ cdg = cdg_java.parse(source_code) self.assertEqual(44, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(15, len(function_children)) self.__check_cdg_children(function_children, { 1: StatementType.SCOPE, 4: StatementType.VARIABLE, 7: StatementType.VARIABLE, 11: StatementType.LOOP, 13: StatementType.GOTO, 14: StatementType.EXIT }) loop_children = [child for child in cdg.successors(function_children[11])] self.assertEqual(14, len(loop_children)) self.__check_cdg_children(loop_children, { 0: StatementType.SCOPE, 5: StatementType.BRANCH, 10: StatementType.BRANCH, 13: StatementType.ASSIGNMENT }) branch_1_children = [child for child in cdg.successors(loop_children[5])] self.assertEqual(4, len(branch_1_children)) self.__check_cdg_children(branch_1_children, { 0: StatementType.SCOPE, 2: StatementType.CALL, 3: StatementType.GOTO }) branch_2_children = [child for child in cdg.successors(loop_children[10])] self.assertEqual(7, len(branch_2_children)) self.__check_cdg_children(branch_1_children, { 0: StatementType.SCOPE, 2: StatementType.CALL, 3: StatementType.GOTO, 4: StatementType.GOTO, 6: StatementType.CALL }) def test_parse_without_class(self) -> None: source_code = """ public static int main(int arg) { int n = 10 + arg; return n; } """ cdg = cdg_java.parse(source_code) self.assertEqual(10, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(9, len(function_children)) self.__check_cdg_children(function_children, { 1: StatementType.VARIABLE, 2: StatementType.SCOPE, 5: StatementType.VARIABLE, 7: StatementType.GOTO, 8: StatementType.EXIT }) def test_parse_without_function(self) -> None: source_code = """ int n = 10 + arg; return n; """ cdg = cdg_java.parse(source_code) self.assertEqual(7, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(6, len(function_children)) self.__check_cdg_children(function_children, { 2: StatementType.VARIABLE, 4: StatementType.GOTO, 5: StatementType.EXIT }) def test_parse_with_inner_functions(self) -> None: source_code = """ class A { int main() { int n = 0; class B { int gain() { int k = 0; } } } } """ cdg = cdg_java.parse(source_code) self.assertEqual(19, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(2, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION, 1: StatementType.FUNCTION }) for entry_point in entry_points: function_children = [child for child in cdg.successors(entry_point)] if len(function_children) == 6: self.__check_cdg_children(function_children, { 1: StatementType.SCOPE, 4: StatementType.VARIABLE, 5: StatementType.EXIT }) elif len(function_children) == 8: self.__check_cdg_children(function_children, { 1: StatementType.SCOPE, 4: StatementType.VARIABLE, 6: StatementType.SCOPE, 7: StatementType.EXIT }) else: self.assertFalse(True) def test_parse_constructor(self) -> None: source_code = """ class MyClass { int a; MyClass() { this.a = 0; } } """ cdg = cdg_java.parse(source_code) self.assertEqual(16, len(cdg.nodes)) entry_points = [entry_point for entry_point in cdg.entry_points] self.assertEqual(1, len(entry_points)) self.__check_cdg_children(entry_points, { 0: StatementType.FUNCTION }) function_children = [child for child in cdg.successors(entry_points[0])] self.assertEqual(9, len(function_children)) self.__check_cdg_children(function_children, { 1: StatementType.SCOPE, 7: StatementType.ASSIGNMENT, 8: StatementType.EXIT })
36.423372
116
0.564771
1,970
19,013
5.195431
0.074619
0.075232
0.070347
0.085979
0.794822
0.77616
0.750171
0.739521
0.712262
0.697509
0
0.024262
0.340977
19,013
521
117
36.493282
0.792578
0
0
0.619522
0
0
0.151475
0.005154
0
0
0
0
0.119522
1
0.02988
false
0
0.007968
0
0.049801
0.011952
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5840454d831c1e7ac8805dae83f87938318dd1d6
4,078
py
Python
gators/feature_generation_dt/tests/test_cyclic_day_of_week.py
Aditya-Kapadiya/gators
d7c9967e3a8e304a601b6a92ad834d03d3e36338
[ "Apache-2.0" ]
4
2021-10-29T18:20:52.000Z
2022-03-31T22:53:03.000Z
gators/feature_generation_dt/tests/test_cyclic_day_of_week.py
Aditya-Kapadiya/gators
d7c9967e3a8e304a601b6a92ad834d03d3e36338
[ "Apache-2.0" ]
1
2022-02-21T20:02:16.000Z
2022-02-21T20:02:16.000Z
gators/feature_generation_dt/tests/test_cyclic_day_of_week.py
Aditya-Kapadiya/gators
d7c9967e3a8e304a601b6a92ad834d03d3e36338
[ "Apache-2.0" ]
5
2021-11-17T20:16:54.000Z
2022-02-21T18:21:02.000Z
# License: Apache-2.0 import databricks.koalas as ks import numpy as np import pandas as pd import pytest from pandas.testing import assert_frame_equal from gators.feature_generation_dt import CyclicDayOfWeek ks.set_option("compute.default_index_type", "distributed-sequence") @pytest.fixture def data(): X = pd.DataFrame( { "A": ["2020-05-04T00", None, np.nan], "B": ["2020-05-06T06", None, np.nan], "C": ["2020-05-08T23", None, np.nan], "D": ["2020-05-09T06", None, np.nan], "E": ["2020-05-10T06", None, np.nan], "X": ["x", None, np.nan], } ) columns = ["A", "B", "C", "D", "E"] X["A"] = X["A"].astype("datetime64[ns]") X["B"] = X["B"].astype("datetime64[ms]") X["C"] = X["C"].astype("datetime64[s]") X["D"] = X["D"].astype("datetime64[m]") X["E"] = X["E"].astype("datetime64[h]") X_expected = pd.DataFrame( { "A__day_of_week_cos": [1.0, np.nan, np.nan], "A__day_of_week_sin": [0.0, np.nan, np.nan], "B__day_of_week_cos": [-0.4999999999999998, np.nan, np.nan], "B__day_of_week_sin": [0.8660254037844388, np.nan, np.nan], "C__day_of_week_cos": [-0.5000000000000004, np.nan, np.nan], "C__day_of_week_sin": [-0.8660254037844384, np.nan, np.nan], "D__day_of_week_cos": [0.4999999999999993, np.nan, np.nan], "D__day_of_week_sin": [-0.866025403784439, np.nan, np.nan], "E__day_of_week_cos": [1.0, None, np.nan], "E__day_of_week_sin": [-2.4492935982947064e-16, None, np.nan], } ) X_expected = pd.concat([X.copy(), X_expected], axis=1) obj = CyclicDayOfWeek(columns=columns).fit(X) return obj, X, X_expected @pytest.fixture def data_ks(): X = ks.DataFrame( { "A": ["2020-05-04T00", None, np.nan], "B": ["2020-05-06T06", None, np.nan], "C": ["2020-05-08T23", None, np.nan], "D": ["2020-05-09T06", None, np.nan], "E": ["2020-05-10T06", None, np.nan], "X": ["x", None, np.nan], } ) columns = ["A", "B", "C", "D", "E"] X[columns] = X[columns].astype("datetime64[ns]") X_expected = pd.DataFrame( { "A__day_of_week_cos": [1.0, np.nan, np.nan], "A__day_of_week_sin": [0.0, np.nan, np.nan], "B__day_of_week_cos": [-0.4999999999999998, np.nan, np.nan], "B__day_of_week_sin": [0.8660254037844388, np.nan, np.nan], "C__day_of_week_cos": [-0.5000000000000004, np.nan, np.nan], "C__day_of_week_sin": [-0.8660254037844384, np.nan, np.nan], "D__day_of_week_cos": [0.4999999999999993, np.nan, np.nan], "D__day_of_week_sin": [-0.866025403784439, np.nan, np.nan], "E__day_of_week_cos": [1.0, None, np.nan], "E__day_of_week_sin": [-2.4492935982947064e-16, None, np.nan], } ) X_expected = pd.concat([X.to_pandas().copy(), X_expected], axis=1) obj = CyclicDayOfWeek(columns=columns).fit(X) return obj, X, X_expected def test_pd(data): obj, X, X_expected = data X_new = obj.transform(X) assert_frame_equal(X_new, X_expected) @pytest.mark.koalas def test_ks(data_ks): obj, X, X_expected = data_ks X_new = obj.transform(X) assert_frame_equal(X_new.to_pandas(), X_expected) def test_pd_np(data): obj, X, X_expected = data X_numpy_new = obj.transform_numpy(X.to_numpy()) X_new = pd.DataFrame(X_numpy_new) X_expected = pd.DataFrame(X_expected.values) assert_frame_equal(X_new, X_expected) @pytest.mark.koalas def test_ks_np(data_ks): obj, X, X_expected = data_ks X_numpy_new = obj.transform_numpy(X.to_numpy()) X_new = pd.DataFrame(X_numpy_new) X_expected = pd.DataFrame(X_expected.values) assert_frame_equal(X_new, X_expected) def test_init(): with pytest.raises(TypeError): _ = CyclicDayOfWeek(columns=0) with pytest.raises(ValueError): _ = CyclicDayOfWeek(columns=[])
34.559322
74
0.592202
611
4,078
3.672668
0.148936
0.106952
0.080214
0.071301
0.759358
0.747326
0.747326
0.72861
0.72861
0.706328
0
0.118156
0.234183
4,078
117
75
34.854701
0.600384
0.004659
0
0.59
0
0
0.160463
0.006409
0
0
0
0
0.05
1
0.07
false
0
0.06
0
0.15
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
58816e377b7292b3b4b138724d8702d6205b486f
170
py
Python
inventory4h4h/logs/admin.py
Yashub/InventoryForHabit
6b4811bc6e48dcfbde54160311f043afff626e4f
[ "MIT" ]
null
null
null
inventory4h4h/logs/admin.py
Yashub/InventoryForHabit
6b4811bc6e48dcfbde54160311f043afff626e4f
[ "MIT" ]
null
null
null
inventory4h4h/logs/admin.py
Yashub/InventoryForHabit
6b4811bc6e48dcfbde54160311f043afff626e4f
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import DonationData, Address admin.site.register(DonationData) admin.site.register(Address) # Register your models here.
18.888889
41
0.811765
22
170
6.272727
0.545455
0.130435
0.246377
0
0
0
0
0
0
0
0
0
0.111765
170
8
42
21.25
0.913907
0.152941
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
58991d9ea4f4e39bd45def89c88764d3497c0f5c
49
py
Python
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/server/__init__.py
Maximilien-R/cookiecutter-tartiflette-aiohttp
66e7e0897b315df6a1908c6c31ec58b74e0b3a6f
[ "MIT" ]
3
2020-06-01T14:16:19.000Z
2021-11-07T19:54:08.000Z
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/server/__init__.py
Maximilien-R/cookiecutter-tartiflette-aiohttp
66e7e0897b315df6a1908c6c31ec58b74e0b3a6f
[ "MIT" ]
88
2019-11-15T17:35:54.000Z
2021-08-02T04:50:51.000Z
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/server/__init__.py
Maximilien-R/cookiecutter-tartiflette-aiohttp
66e7e0897b315df6a1908c6c31ec58b74e0b3a6f
[ "MIT" ]
2
2020-05-04T08:35:34.000Z
2020-10-22T17:47:26.000Z
from .app import run_app __all__ = ("run_app",)
12.25
24
0.693878
8
49
3.5
0.625
0.428571
0
0
0
0
0
0
0
0
0
0
0.163265
49
3
25
16.333333
0.682927
0
0
0
0
0
0.142857
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
58a59d58b3c391b86e3ac22ed5480efa7463ff0e
46
py
Python
test/login.py
shuxiang-python/second_git
24245972b96dadebd3068a7942cb5333538b3a0f
[ "MIT" ]
null
null
null
test/login.py
shuxiang-python/second_git
24245972b96dadebd3068a7942cb5333538b3a0f
[ "MIT" ]
null
null
null
test/login.py
shuxiang-python/second_git
24245972b96dadebd3068a7942cb5333538b3a0f
[ "MIT" ]
null
null
null
num1 =100 num2 = 200 num3 = 300 num5 =500
5.111111
10
0.608696
8
46
3.5
1
0
0
0
0
0
0
0
0
0
0
0.5
0.304348
46
8
11
5.75
0.375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
543978d21fb8085d99daafd5a10cff343031d949
44
py
Python
tests/components/panel_custom/__init__.py
domwillcode/home-assistant
f170c80bea70c939c098b5c88320a1c789858958
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
tests/components/panel_custom/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
31,101
2020-03-02T13:00:16.000Z
2022-03-31T23:57:36.000Z
tests/components/panel_custom/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Tests for the panel_custom component."""
22
43
0.727273
6
44
5.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.113636
44
1
44
44
0.794872
0.840909
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
544e71351ee9ff4c4c0b27e6decf6603806e2633
16,657
py
Python
cogs/karaoke.py
Focus1019/Discord-Karaoke-Handler
af9bc211338e1e782fa0632fcc611d762a8ee751
[ "MIT" ]
1
2021-11-23T07:41:14.000Z
2021-11-23T07:41:14.000Z
cogs/karaoke.py
Focus1019/Discord-Karaoke-Handler
af9bc211338e1e782fa0632fcc611d762a8ee751
[ "MIT" ]
null
null
null
cogs/karaoke.py
Focus1019/Discord-Karaoke-Handler
af9bc211338e1e782fa0632fcc611d762a8ee751
[ "MIT" ]
null
null
null
import discord from discord.ext import commands import checks class Karaoke(commands.Cog): def __init__(self, bot): self.bot = bot self.current_users = {} self.locked = False @commands.command(name='help') @checks.is_karaoke_channel() @commands.guild_only() async def _help(self, ctx): embed = discord.Embed(colour=discord.Colour.dark_teal()) embed.title = 'Karaoke Help' embed.description = '{prefix}queue **-** Show the current karaoke queue.\n' \ '{prefix}join **-** Join a karaoke queue.\n' \ '{prefix}leave **-** Leave a karaoke queue.\n' \ '{next}next **-** Select the next user in queue.\n' \ \ 'Admin Commands.\n' \ \ '{prefix}channel add|remove **-** add or remove karaoke queue channel.\n' \ '{prefix}clear **-** Clear the current queue.\n' \ '{prefix}remove [user] **-** Removes the mentioned user from the queue.\n' \ '{prefix}add [user] **-** Add the mentioned user from the queue.\n' \ '{prefix}swap [user1] [user2] **-** Swaps the Mentioned users on the queue.\n' \ '{prefix}lock **-** lock or unlock the command for the queue.\n' \ 'need help? kindly DM Focus™#0001' embed.set_footer(text='set your footer here') await ctx.send(embed=embed) @commands.command(name='queue') @checks.is_karaoke_channel() @commands.guild_only() async def _queue(self, ctx): embed = discord.Embed() embed.title = 'Karaoke Queue' embed.set_footer(text='karaoke queue') if ctx.channel not in self.current_users: embed.colour = discord.Colour.gold() embed.description = 'The queue is currently empty.' return await ctx.send(embed=embed) members = self.current_users[ctx.channel] formatted_members = [] for i, member in enumerate(members): member: discord.Member f = lambda x: f'{member}{f"({member.nick})" if member.nick else ""}' if i == 0: formatted_members.append(f'**Current turn:** {f(member)}') continue formatted_members.append(f'**{i}** | {f(member)}') embed.colour = discord.Colour.dark_teal() embed.description = '\n\n'.join(formatted_members) return await ctx.send(embed=embed) @commands.command(name='join') @checks.is_karaoke_channel() @commands.guild_only() async def _join(self, ctx): if self.locked: embed = discord.Embed(colour=discord.Colour.red()) embed.description = 'The queue is currently locked.' embed.set_footer(text='karaoke queue') return await ctx.send(embed=embed) if ctx.channel not in self.current_users: self.current_users[ctx.channel] = [] if ctx.author in self.current_users[ctx.channel]: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'You are already in the queue.' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) self.current_users[ctx.channel].append(ctx.author) embed = discord.Embed(colour=discord.Colour.green()) embed.description = 'You got successfully added to the queue.' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) @commands.command(name='leave') @checks.is_karaoke_channel() @commands.guild_only() async def _leave(self, ctx): if self.locked: embed = discord.Embed(colour=discord.Colour.red()) embed.description = 'The queue is currently locked.' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) if ctx.channel not in self.current_users: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The queue is currently empty' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) if ctx.author not in self.current_users[ctx.channel]: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'You are not in the queue.' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) if len(self.current_users[ctx.channel]) == 1: del self.current_users[ctx.channel] else: self.current_users[ctx.channel].remove(ctx.author) embed = discord.Embed(colour=discord.Colour.green()) embed.description = 'You got successfully got removed from the queue.' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) @commands.command(name='next') @checks.is_karaoke_channel() @commands.guild_only() async def _next(self, ctx): if ctx.channel not in self.current_users: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The queue is currently empty' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) if ctx.author is not self.current_users[ctx.channel][0]: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'Only the current member can execute this command.' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) finished_member = self.current_users[ctx.channel].pop(0) if len(self.current_users[ctx.channel]) > 0: current_member = self.current_users[ctx.channel][0] embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'{finished_member.name} finished.\n\n' \ f'**{current_member.name} now it\'s your turn.**' embed.set_footer(text='set your footer here') return await ctx.send(content=current_member.mention, embed=embed) else: del self.current_users[ctx.channel] embed = discord.Embed(colour=discord.Colour.gold()) embed.description = f'{finished_member.name} finished.\n\n' \ f'**The queue is now empty.**' embed.set_footer(text='set your footer here') return await ctx.send(embed=embed) # Admin Commands @commands.command(name='clear') @commands.has_permissions(administrator=True) @checks.is_karaoke_channel() async def _clear(self, ctx): if ctx.channel not in self.current_users: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The queue is already empty.' return await ctx.send(embed=embed) del self.current_users[ctx.channel] embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully cleared the queue.' return await ctx.send(embed=embed) @commands.command(name='remove') @commands.has_permissions(administrator=True) @checks.is_karaoke_channel() async def _remove(self, ctx, member: discord.Member): if ctx.channel not in self.current_users: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The queue is empty.' return await ctx.send(embed=embed) if member not in self.current_users[ctx.channel]: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The member is not in the queue.' return await ctx.send(embed=embed) self.current_users[ctx.channel].remove(member) embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully removed {member.name} from the queue.' return await ctx.send(embed=embed) @_remove.error async def _remove_error(self, ctx: commands.Context, error): if isinstance(error, commands.MissingRequiredArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'Please use `{ctx.prefix}remove <Member>`.' return await ctx.send(embed=embed) elif isinstance(error, commands.BadArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The given argument is no member.' return await ctx.send(embed=embed) @commands.command(name='add') @commands.has_permissions(administrator=True) @checks.is_karaoke_channel() async def _add(self, ctx, member: discord.Member): if ctx.channel not in self.current_users: self.current_users[ctx.channel] = [] if member in self.current_users[ctx.channel]: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The member is already in the queue.' return await ctx.send(embed=embed) self.current_users[ctx.channel].append(member) embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully added {member.name} to the queue.' return await ctx.send(embed=embed) @_add.error async def _add_error(self, ctx: commands.Context, error): if isinstance(error, commands.MissingRequiredArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'Please use `{ctx.prefix}add <Member>`.' return await ctx.send(embed=embed) elif isinstance(error, commands.BadArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The given argument is no member.' return await ctx.send(embed=embed) @commands.command(name='swap') @commands.has_permissions(administrator=True) @checks.is_karaoke_channel() async def _swap(self, ctx, member1: discord.Member, member2: discord.Member): if ctx.channel not in self.current_users: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The queue is empty.' return await ctx.send(embed=embed) for member in [member1, member2]: if member not in self.current_users[ctx.channel]: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The member is not in the queue.' return await ctx.send(embed=embed) pos1 = self.current_users[ctx.channel].index(member1) pos2 = self.current_users[ctx.channel].index(member2) self.current_users[ctx.channel].remove(member1) self.current_users[ctx.channel].remove(member2) self.current_users[ctx.channel].insert(pos2, member1) self.current_users[ctx.channel].insert(pos1, member2) embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully switched {member1.name} and {member2.name} position.' return await ctx.send(embed=embed) @_swap.error async def _swap_error(self, ctx: commands.Context, error): if isinstance(error, commands.MissingRequiredArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'Please use `{ctx.prefix}swap <Member1> <Member2>`.' return await ctx.send(embed=embed) elif isinstance(error, commands.BadArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The given argument is no member.' return await ctx.send(embed=embed) @commands.command(name='prefix') @commands.has_permissions(administrator=True) async def _prefix(self, ctx, prefix: str): ctx.bot.cfg.set('core.token', prefix) embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully set prefix.\n' \ f'> Prefix: `{prefix}`' return await ctx.send(embed=embed) @_prefix.error async def _prefix_error(self, ctx: commands.Context, error): if isinstance(error, commands.MissingRequiredArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'Please use `{ctx.prefix}prefix <Prefix>`.' return await ctx.send(embed=embed) @commands.command(name='lock') @commands.command(administrator=True) async def _lock(self, ctx): if self.locked: self.locked = False embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully unlocked commands.' return await ctx.send(embed=embed) else: self.locked = True embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully locked commands.' return await ctx.send(embed=embed) @commands.group(name='channel', invoke_without_command=True) @commands.has_permissions(administrator=True) async def _channel(self, ctx): channel_ids = ctx.bot.cfg.get('karaoke.channels') channels = [ctx.bot.get_channel(cid) for cid in channel_ids] formatted_channels = [c.mention if c else 'Not Found' for c in channels] embed = discord.Embed(colour=discord.Colour.green()) embed.title = 'Karaoke Channels' embed.description = ', '.join(formatted_channels) embed.add_field(name='**Help**', value=f'`{ctx.prefix}channel add <Channel>` - Add a karaoke channel\n' f'`{ctx.prefix}channel remove <Channel>` - Remove a karaoke channel') return await ctx.send(embed=embed) @_channel.command(name='add') async def _channel_add(self, ctx, channel: discord.TextChannel): channel_ids = ctx.bot.cfg.get('karaoke.channels') if channel.id in channel_ids: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'The channel is already a karaoke channel.' return await ctx.send(embed=embed) channel_ids.append(channel.id) ctx.bot.cfg.set('karaoke.channels', channel_ids) embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully added {channel.mention}.' return await ctx.send(embed=embed) @_channel_add.error async def _channel_add_error(self, ctx: commands.Context, error): if isinstance(error, commands.MissingRequiredArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'Please use `{ctx.prefix}channel add <Channel>`.' return await ctx.send(embed=embed) elif isinstance(error, commands.BadArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The given argument is no channel.' return await ctx.send(embed=embed) @_channel.command(name='remove') async def _channel_remove(self, ctx, channel: discord.TextChannel): channel_ids = ctx.bot.cfg.get('karaoke.channels') if channel.id not in channel_ids: embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'The channel is no karaoke channel.' return await ctx.send(embed=embed) channel_ids.remove(channel.id) ctx.bot.cfg.set('karaoke.channels', channel_ids) embed = discord.Embed(colour=discord.Colour.green()) embed.description = f'Successfully removed {channel.mention}.' return await ctx.send(embed=embed) @_channel_remove.error async def _channel_remove_error(self, ctx: commands.Context, error): if isinstance(error, commands.MissingRequiredArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = f'Please use `{ctx.prefix}channel remove <Channel>`.' return await ctx.send(embed=embed) elif isinstance(error, commands.BadArgument): embed = discord.Embed(colour=discord.Colour.dark_red()) embed.description = 'The given argument is no channel.' return await ctx.send(embed=embed) def setup(bot): bot.add_cog(Karaoke(bot))
48.704678
108
0.631146
2,012
16,657
5.139662
0.079026
0.042549
0.074848
0.099797
0.808529
0.794507
0.75689
0.713084
0.678174
0.628663
0
0.002417
0.254788
16,657
341
109
48.847507
0.8305
0.00084
0
0.53871
0
0
0.169942
0.005649
0
0
0
0
0
1
0.006452
false
0
0.009677
0
0.154839
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
545cf5f598dc74b4c13da6876364a1b882db8d47
191
py
Python
shirt.py
BeniJan/palletization_team
0bae088c3d2b7629eef339f1af42ff192eeb6c47
[ "MIT" ]
null
null
null
shirt.py
BeniJan/palletization_team
0bae088c3d2b7629eef339f1af42ff192eeb6c47
[ "MIT" ]
null
null
null
shirt.py
BeniJan/palletization_team
0bae088c3d2b7629eef339f1af42ff192eeb6c47
[ "MIT" ]
null
null
null
class Shirt: def __init__(self, size, color): self.size = size.lower() self.color = color.lower() def __repr__(self): return str(self.size + "&" + self.color)
27.285714
48
0.581152
24
191
4.291667
0.458333
0.23301
0
0
0
0
0
0
0
0
0
0
0.272251
191
7
48
27.285714
0.741007
0
0
0
0
0
0.005208
0
0
0
0
0
0
1
0.333333
false
0
0
0.166667
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
546d168d4d6d5568624a5b842abb2841c19cb914
91
py
Python
pythran/tests/user_defined_import/global_init_alias_main.py
davidbrochart/pythran
24b6c8650fe99791a4091cbdc2c24686e86aa67c
[ "BSD-3-Clause" ]
1,647
2015-01-13T01:45:38.000Z
2022-03-28T01:23:41.000Z
pythran/tests/user_defined_import/global_init_alias_main.py
davidbrochart/pythran
24b6c8650fe99791a4091cbdc2c24686e86aa67c
[ "BSD-3-Clause" ]
1,116
2015-01-01T09:52:05.000Z
2022-03-18T21:06:40.000Z
pythran/tests/user_defined_import/global_init_alias_main.py
davidbrochart/pythran
24b6c8650fe99791a4091cbdc2c24686e86aa67c
[ "BSD-3-Clause" ]
180
2015-02-12T02:47:28.000Z
2022-03-14T10:28:18.000Z
import global_init as gi XX = [gi.aa(), 3] #pythran export bb() def bb(): return XX
10.111111
24
0.615385
16
91
3.4375
0.8125
0
0
0
0
0
0
0
0
0
0
0.014493
0.241758
91
8
25
11.375
0.782609
0.208791
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
5471c2fb9bcaf084d221ea30c73362ed0ec94385
83
py
Python
segnet/__init__.py
Abdiel-EMT/segnet
474a68079000a85d1e62ad9723d316074bb1eb8d
[ "MIT" ]
null
null
null
segnet/__init__.py
Abdiel-EMT/segnet
474a68079000a85d1e62ad9723d316074bb1eb8d
[ "MIT" ]
null
null
null
segnet/__init__.py
Abdiel-EMT/segnet
474a68079000a85d1e62ad9723d316074bb1eb8d
[ "MIT" ]
null
null
null
name = "segnet" from .metrics import * from .models import * from .utils import *
13.833333
22
0.698795
11
83
5.272727
0.636364
0.344828
0
0
0
0
0
0
0
0
0
0
0.192771
83
5
23
16.6
0.865672
0
0
0
0
0
0.072289
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
547a1b1e370ec4d3064c444c33912f861fcb8191
203
py
Python
testapp/custom_runner.py
Smyle/django-nose
0590bfcb4024987035623664eea94e01c0bb67a7
[ "BSD-3-Clause" ]
null
null
null
testapp/custom_runner.py
Smyle/django-nose
0590bfcb4024987035623664eea94e01c0bb67a7
[ "BSD-3-Clause" ]
null
null
null
testapp/custom_runner.py
Smyle/django-nose
0590bfcb4024987035623664eea94e01c0bb67a7
[ "BSD-3-Clause" ]
null
null
null
"""Custom runner to test overriding runner.""" from django_nose import NoseTestSuiteRunner class CustomNoseTestSuiteRunner(NoseTestSuiteRunner): """Custom test runner, to test overring runner."""
25.375
54
0.783251
21
203
7.52381
0.619048
0.101266
0.151899
0
0
0
0
0
0
0
0
0
0.128079
203
7
55
29
0.892655
0.418719
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
54809f788453a5e208c9a4de3924fdd65904fea9
200
py
Python
applications/alkaid/alkaid/strategy_base.py
FrederichRiver/neutrino
e91db53486e56ddeb83ae9714311d606b33fb165
[ "BSD-3-Clause" ]
2
2019-02-10T15:14:23.000Z
2019-02-12T13:59:52.000Z
applications/alkaid/alkaid/strategy_base.py
FrederichRiver/neutrino
e91db53486e56ddeb83ae9714311d606b33fb165
[ "BSD-3-Clause" ]
null
null
null
applications/alkaid/alkaid/strategy_base.py
FrederichRiver/neutrino
e91db53486e56ddeb83ae9714311d606b33fb165
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/python3 class strategyBase(object): def __init__(self): pass def _get_data(self): pass def _settle(self): pass if __name__ == "__main__": pass
11.764706
27
0.575
23
200
4.347826
0.695652
0.24
0.22
0
0
0
0
0
0
0
0
0.007299
0.315
200
16
28
12.5
0.722628
0.085
0
0.444444
0
0
0.043956
0
0
0
0
0
0
1
0.333333
false
0.444444
0
0
0.444444
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
0
0
0
5
5490e61394ee15f2a555a5910ca6937e6c850945
167
py
Python
general/recursion/string/3. binary_2_decimal.py
SaPhyoThuHtet/problem-solving
f73dd0f14942574f4cb91fbfc86a554be274796f
[ "MIT" ]
null
null
null
general/recursion/string/3. binary_2_decimal.py
SaPhyoThuHtet/problem-solving
f73dd0f14942574f4cb91fbfc86a554be274796f
[ "MIT" ]
null
null
null
general/recursion/string/3. binary_2_decimal.py
SaPhyoThuHtet/problem-solving
f73dd0f14942574f4cb91fbfc86a554be274796f
[ "MIT" ]
null
null
null
def decimal_to_binary(num): if (num == 0): return 0 return num%2+10*decimal_to_binary(num//2) print(decimal_to_binary(2))
15.181818
45
0.550898
24
167
3.583333
0.458333
0.313953
0.523256
0.418605
0
0
0
0
0
0
0
0.063063
0.335329
167
10
46
16.7
0.711712
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.6
0.2
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
54b1a86a5d7e48afe4dbb2a290188e8fd2c4a47b
141
py
Python
tests/module/__init__.py
MD-Studio/MDStudio_SMARTCyp
92ebd48af891188d509c23e297437218c00ec136
[ "Apache-2.0" ]
3
2019-10-17T01:10:27.000Z
2022-01-19T23:11:49.000Z
tests/module/__init__.py
MD-Studio/MDStudio_SMARTCyp
92ebd48af891188d509c23e297437218c00ec136
[ "Apache-2.0" ]
1
2019-10-18T22:07:16.000Z
2019-10-21T11:19:48.000Z
tests/module/__init__.py
MD-Studio/MDStudio_SMARTCyp
92ebd48af891188d509c23e297437218c00ec136
[ "Apache-2.0" ]
1
2019-10-17T01:14:04.000Z
2019-10-17T01:14:04.000Z
import sys version = sys.version_info MAJOR_PY_VERSION = sys.version_info.major PY_VERSION = '{0}.{1}'.format(version.major, version.minor)
23.5
59
0.77305
22
141
4.727273
0.454545
0.288462
0.326923
0.403846
0.605769
0.605769
0.605769
0
0
0
0
0.015625
0.092199
141
5
60
28.2
0.796875
0
0
0
0
0
0.049645
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
54bb35806e71240058384e4ede8654dc9c80aae4
49
py
Python
blueprint_example/run.py
gtsofa/mnseriesflask
58a84b698527ff3e790f0f7179193335bd440e3c
[ "MIT" ]
null
null
null
blueprint_example/run.py
gtsofa/mnseriesflask
58a84b698527ff3e790f0f7179193335bd440e3c
[ "MIT" ]
null
null
null
blueprint_example/run.py
gtsofa/mnseriesflask
58a84b698527ff3e790f0f7179193335bd440e3c
[ "MIT" ]
null
null
null
# run.py from blue import app app.run(debug=True)
16.333333
20
0.755102
10
49
3.7
0.8
0
0
0
0
0
0
0
0
0
0
0
0.122449
49
3
21
16.333333
0.860465
0.122449
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
49d59637ab6bdb39099fd750090f7c99375854cd
4,702
py
Python
model-optimizer/extensions/ops/activation_test.py
JOCh1958/openvino
070201feeec5550b7cf8ec5a0ffd72dc879750be
[ "Apache-2.0" ]
1
2021-04-06T03:32:12.000Z
2021-04-06T03:32:12.000Z
model-optimizer/extensions/ops/activation_test.py
JOCh1958/openvino
070201feeec5550b7cf8ec5a0ffd72dc879750be
[ "Apache-2.0" ]
28
2021-09-24T09:29:02.000Z
2022-03-28T13:20:46.000Z
model-optimizer/extensions/ops/activation_test.py
JOCh1958/openvino
070201feeec5550b7cf8ec5a0ffd72dc879750be
[ "Apache-2.0" ]
1
2020-08-30T11:48:03.000Z
2020-08-30T11:48:03.000Z
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import unittest import numpy as np from extensions.ops.activation_ops import Elu, SoftPlus, Mish from mo.graph.graph import Node from mo.utils.unittest.graph import build_graph class TestActivationOp(unittest.TestCase): nodes_attributes = { 'node_1': { 'shape': np.array([4]), 'value': None }, 'activation_node': { 'op': 'Activation', 'kind': 'op', 'operation': None }, 'node_3': { 'shape': None } } def test_activation_elu_infer(self): graph = build_graph(self.nodes_attributes, [ ('node_1', 'activation_node'), ('activation_node', 'node_3') ], { 'node_1': { 'value': np.array([6, -4, -2, -1]) }, 'activation_node': { 'operation': 'elu', 'alpha': 1.0, }, 'node_3': { 'value': None } }) graph.graph['layout'] = 'NCHW' activation_node = Node(graph, 'activation_node') Elu.infer(activation_node) exp_shape = np.array([4]) res_shape = graph.node['node_3']['shape'] res_value = graph.node['node_3']['value'] exp_value = np.array([6., -0.98168436, -0.86466472, -0.63212056]) for i, value in enumerate(exp_shape): self.assertEqual(res_shape[i], value) for i, value in enumerate(exp_value): self.assertAlmostEqual(res_value[i], value) def test_activation_softplus_infer(self): graph = build_graph(self.nodes_attributes, [ ('node_1', 'activation_node'), ('activation_node', 'node_3') ], { 'node_1': { 'value': np.array([-1.0, 0.0, 1.0, 20.0]) }, 'activation_node': { 'op': 'SoftPlus', 'operation': SoftPlus.operation, }, 'node_3': { 'value': None } }) graph.graph['layout'] = 'NCHW' activation_node = Node(graph, 'activation_node') SoftPlus.infer(activation_node) exp_shape = np.array([4]) res_shape = graph.node['node_3']['shape'] res_value = graph.node['node_3']['value'] exp_value = np.array([0.3132617, 0.6931472, 1.3132617, 20.0]) for i, value in enumerate(exp_shape): self.assertEqual(res_shape[i], value) for i, value in enumerate(exp_value): self.assertAlmostEqual(res_value[i], value) def test_activation_mish_infer(self): graph = build_graph(self.nodes_attributes, [ ('node_1', 'activation_node'), ('activation_node', 'node_3') ], { 'node_1': { 'value': np.array([-1.0, 0.0, 1.0, 20.0]) }, 'activation_node': { 'op': 'Mish', 'operation': Mish.operation, }, 'node_3': { 'value': None } }) graph.graph['layout'] = 'NCHW' activation_node = Node(graph, 'activation_node') Mish.infer(activation_node) exp_shape = np.array([4]) res_shape = graph.node['node_3']['shape'] res_value = graph.node['node_3']['value'] exp_value = np.array([-0.30340146, 0.0, 0.8650984, 20.0]) for i, value in enumerate(exp_shape): self.assertEqual(res_shape[i], value) for i, value in enumerate(exp_value): self.assertAlmostEqual(res_value[i], value)
39.847458
77
0.406635
407
4,702
4.498772
0.167076
0.145276
0.044238
0.045877
0.703987
0.703987
0.703987
0.703987
0.703987
0.703987
0
0.055328
0.481072
4,702
117
78
40.188034
0.695082
0.016376
0
0.560748
0
0
0.107746
0
0
0
0
0
0.056075
1
0.028037
false
0
0.046729
0
0.093458
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
49f31a36f9f75998ad7a5713b883c956808b310e
291
py
Python
atari/experiment_1_atari/random_agent.py
arcosin/Task_Detector
318f61aa45c17059941b8ad5417208bb891ed64e
[ "MIT" ]
null
null
null
atari/experiment_1_atari/random_agent.py
arcosin/Task_Detector
318f61aa45c17059941b8ad5417208bb891ed64e
[ "MIT" ]
null
null
null
atari/experiment_1_atari/random_agent.py
arcosin/Task_Detector
318f61aa45c17059941b8ad5417208bb891ed64e
[ "MIT" ]
null
null
null
import random class RandomAgent: def __init__(self, actSize): super().__init__() self.actSize = actSize def act(self, state): return random.randint(0, self.actSize - 1) #===============================================================================
16.166667
80
0.439863
24
291
5
0.625
0.275
0.25
0
0
0
0
0
0
0
0
0.008696
0.209622
291
17
81
17.117647
0.513043
0.271478
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.714286
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
49f44766855124ba8a658773e50cf7c70e6967a7
71
py
Python
strong_glm/utils/__init__.py
strongio/strong-glm
db05cb8a297858e46961e5d91105a515531dfdbb
[ "MIT" ]
2
2021-04-20T17:00:03.000Z
2022-03-03T16:33:01.000Z
strong_glm/utils/__init__.py
strongio/strong-glm
db05cb8a297858e46961e5d91105a515531dfdbb
[ "MIT" ]
1
2020-02-26T16:48:56.000Z
2020-02-26T16:48:56.000Z
strong_glm/utils/__init__.py
strongio/strong-glm
db05cb8a297858e46961e5d91105a515531dfdbb
[ "MIT" ]
null
null
null
from .tensor import to_tensor from .simulate_data import simulate_data
23.666667
40
0.859155
11
71
5.272727
0.545455
0.413793
0
0
0
0
0
0
0
0
0
0
0.112676
71
2
41
35.5
0.920635
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b70a094b3c9510fb785270947cb699b2b7adb112
192
py
Python
app/dept/__init__.py
xasos/IlliniGuide
a2695decde1479843503e52fb48677c9d75d559a
[ "MIT" ]
null
null
null
app/dept/__init__.py
xasos/IlliniGuide
a2695decde1479843503e52fb48677c9d75d559a
[ "MIT" ]
null
null
null
app/dept/__init__.py
xasos/IlliniGuide
a2695decde1479843503e52fb48677c9d75d559a
[ "MIT" ]
null
null
null
from flask import Blueprint dept = Blueprint('dept', __name__, url_prefix="/dept", template_folder="templates", static_folder = "static", static_url_path='/static/dept') from . import views
32
141
0.760417
25
192
5.48
0.56
0.189781
0
0
0
0
0
0
0
0
0
0
0.104167
192
5
142
38.4
0.796512
0
0
0
0
0
0.1875
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
5
b70ed27f13c6996dc61afb06a40031934add3791
143
py
Python
solutions/python3/1238.py
sm2774us/amazon_interview_prep_2021
f580080e4a6b712b0b295bb429bf676eb15668de
[ "MIT" ]
42
2020-08-02T07:03:49.000Z
2022-03-26T07:50:15.000Z
solutions/python3/1238.py
ajayv13/leetcode
de02576a9503be6054816b7444ccadcc0c31c59d
[ "MIT" ]
null
null
null
solutions/python3/1238.py
ajayv13/leetcode
de02576a9503be6054816b7444ccadcc0c31c59d
[ "MIT" ]
40
2020-02-08T02:50:24.000Z
2022-03-26T15:38:10.000Z
class Solution: def circularPermutation(self, n: int, start: int) -> List[int]: return [start ^ i ^ i >> 1 for i in range(1 << n)]
35.75
67
0.594406
22
143
3.863636
0.681818
0
0
0
0
0
0
0
0
0
0
0.018692
0.251748
143
3
68
47.666667
0.775701
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
b713f41c30fa90add9409d8aebc099674049b09d
197
py
Python
CodeForces/SpecificTastesOfAndre/SpecificTastesOfAndre.py
GeorgianBadita/algorithmic-problems
6b260050b7a1768b5e47a1d7d4ef7138a52db210
[ "MIT" ]
1
2021-07-05T16:32:14.000Z
2021-07-05T16:32:14.000Z
CodeForces/SpecificTastesOfAndre/SpecificTastesOfAndre.py
GeorgianBadita/algorithmic-problems
6b260050b7a1768b5e47a1d7d4ef7138a52db210
[ "MIT" ]
null
null
null
CodeForces/SpecificTastesOfAndre/SpecificTastesOfAndre.py
GeorgianBadita/algorithmic-problems
6b260050b7a1768b5e47a1d7d4ef7138a52db210
[ "MIT" ]
1
2021-05-14T15:40:09.000Z
2021-05-14T15:40:09.000Z
def perfect_array(length): return ' '.join(['1']*length) def main(): t = int(input()) for _ in range(t): length = int(input()) print(perfect_array(length)) main()
13.133333
36
0.553299
25
197
4.24
0.6
0.226415
0.339623
0
0
0
0
0
0
0
0
0.006944
0.269036
197
14
37
14.071429
0.729167
0
0
0
0
0
0.010152
0
0
0
0
0
0
1
0.25
false
0
0
0.125
0.375
0.125
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
b7278a6d4cc1b8ed4600ae2fcb4f2567be411146
99
py
Python
app/core/admin.py
akagrv/StockMonitor
af0175b9ef0a9678fd358f9ddfab8f167b58b0aa
[ "MIT" ]
null
null
null
app/core/admin.py
akagrv/StockMonitor
af0175b9ef0a9678fd358f9ddfab8f167b58b0aa
[ "MIT" ]
1
2021-05-11T16:29:16.000Z
2021-05-11T16:29:16.000Z
app/core/admin.py
akagrv/StockMonitor
af0175b9ef0a9678fd358f9ddfab8f167b58b0aa
[ "MIT" ]
1
2021-09-25T06:29:30.000Z
2021-09-25T06:29:30.000Z
from django.contrib import admin from core.models import WatchList admin.site.register(WatchList)
19.8
33
0.838384
14
99
5.928571
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.10101
99
4
34
24.75
0.932584
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b72f3c2622c3b8d60304e8a0161a837176afde31
38
py
Python
errors.py
jackbicknell14/spotify-apps
a44a76b541bd27880c3aa088e34e89fba441314c
[ "Apache-2.0" ]
null
null
null
errors.py
jackbicknell14/spotify-apps
a44a76b541bd27880c3aa088e34e89fba441314c
[ "Apache-2.0" ]
null
null
null
errors.py
jackbicknell14/spotify-apps
a44a76b541bd27880c3aa088e34e89fba441314c
[ "Apache-2.0" ]
null
null
null
class TrackError(Exception): pass
12.666667
28
0.736842
4
38
7
1
0
0
0
0
0
0
0
0
0
0
0
0.184211
38
2
29
19
0.903226
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
3f97dc6d5e675ae86e5b3b1abbb20fb22825f9ab
197
py
Python
extract/extract_location_and_date.py
emmacunningham/court-reminder
0494e7f864c3922d1ac0bc41c6e255cd88e021a8
[ "MIT" ]
2
2019-10-02T05:31:37.000Z
2021-07-31T16:24:24.000Z
extract/extract_location_and_date.py
emmacunningham/court-reminder
0494e7f864c3922d1ac0bc41c6e255cd88e021a8
[ "MIT" ]
10
2017-02-11T05:35:31.000Z
2018-12-31T19:58:51.000Z
extract/extract_location_and_date.py
emmacunningham/court-reminder
0494e7f864c3922d1ac0bc41c6e255cd88e021a8
[ "MIT" ]
2
2017-02-12T19:11:49.000Z
2019-09-30T22:44:13.000Z
class Extractor(object): def __init__(self, transcript): self.transcript = transcript def get_location(self): return "TBD" def get_date(self): return "TBD"
15.153846
36
0.614213
22
197
5.227273
0.545455
0.243478
0.226087
0
0
0
0
0
0
0
0
0
0.28934
197
12
37
16.416667
0.821429
0
0
0.285714
0
0
0.030769
0
0
0
0
0
0
1
0.428571
false
0
0
0.285714
0.857143
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
3fb31f38b96e905ee893b0165c0942adaf415fd6
65
py
Python
tests/test_client.py
josephschorr/Python-Wrapper
2b0cb60049ddf621a44f329b1b374bca2063eb20
[ "MIT" ]
45
2016-04-07T04:38:34.000Z
2021-12-19T02:10:38.000Z
tests/test_client.py
josephschorr/Python-Wrapper
2b0cb60049ddf621a44f329b1b374bca2063eb20
[ "MIT" ]
30
2016-08-05T22:50:10.000Z
2021-05-18T08:51:00.000Z
tests/test_client.py
josephschorr/Python-Wrapper
2b0cb60049ddf621a44f329b1b374bca2063eb20
[ "MIT" ]
19
2016-08-05T15:18:23.000Z
2020-05-07T23:00:09.000Z
# Still need to write these... def test_client(): assert 1 == 1
21.666667
30
0.661538
11
65
3.818182
0.909091
0
0
0
0
0
0
0
0
0
0
0.038462
0.2
65
3
31
21.666667
0.769231
0.430769
0
0
0
0
0
0
0
0
0
0
0.5
1
0.5
true
0
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
1
0
0
0
0
0
0
5
3fbca29903ceeed10a2e4538e65476233e8a4e85
76
py
Python
vary/information_bottlekneck/__init__.py
joshloyal/Vary
cd79a941336c7b335dead8ca718a2d0d949d44bb
[ "MIT" ]
1
2017-05-14T11:54:09.000Z
2017-05-14T11:54:09.000Z
vary/information_bottlekneck/__init__.py
joshloyal/Vary
cd79a941336c7b335dead8ca718a2d0d949d44bb
[ "MIT" ]
null
null
null
vary/information_bottlekneck/__init__.py
joshloyal/Vary
cd79a941336c7b335dead8ca718a2d0d949d44bb
[ "MIT" ]
1
2020-11-17T11:44:56.000Z
2020-11-17T11:44:56.000Z
from vary.information_bottlekneck.bottlekneck import InformationBottlekneck
38
75
0.921053
7
76
9.857143
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.052632
76
1
76
76
0.958333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
3fe7a88e56218951474549a1771d6e7ca0654c52
103
py
Python
src/affe/io/__init__.py
eliavw/affe
0e57d7f40cb67f9a300292e03e3f83b4b591d1e3
[ "MIT" ]
1
2020-12-02T06:16:00.000Z
2020-12-02T06:16:00.000Z
src/affe/io/__init__.py
eliavw/affe
0e57d7f40cb67f9a300292e03e3f83b4b591d1e3
[ "MIT" ]
null
null
null
src/affe/io/__init__.py
eliavw/affe
0e57d7f40cb67f9a300292e03e3f83b4b591d1e3
[ "MIT" ]
null
null
null
from .dirs import * from .file import * from .tree import * from .dump import dump_object, load_object
20.6
42
0.757282
16
103
4.75
0.5
0.394737
0
0
0
0
0
0
0
0
0
0
0.165049
103
4
43
25.75
0.883721
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b20aa3c0ea92adc16044211540a9711b673aad30
52
py
Python
django_command_cron/models/__init__.py
andrewp-as-is/django-command-cron.py
6b2b20cb1a9ccd80e2377b316316d6af68c8f9c6
[ "Unlicense" ]
1
2021-09-23T18:16:56.000Z
2021-09-23T18:16:56.000Z
django_command_cron/models/__init__.py
andrewp-as-is/django-command-cron.py
6b2b20cb1a9ccd80e2377b316316d6af68c8f9c6
[ "Unlicense" ]
null
null
null
django_command_cron/models/__init__.py
andrewp-as-is/django-command-cron.py
6b2b20cb1a9ccd80e2377b316316d6af68c8f9c6
[ "Unlicense" ]
null
null
null
from .call import Call from .command import Command
17.333333
28
0.807692
8
52
5.25
0.5
0
0
0
0
0
0
0
0
0
0
0
0.153846
52
2
29
26
0.954545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b7476f18a3132a504732c99123ed32a15248e962
31
py
Python
tianyancha/__init__.py
iamsk/tianyancha
388302f20ea199a775f83b903bd6915bf21afd3a
[ "MIT" ]
2
2020-12-11T05:26:09.000Z
2020-12-11T12:25:10.000Z
tianyancha/__init__.py
iamsk/tianyancha
388302f20ea199a775f83b903bd6915bf21afd3a
[ "MIT" ]
null
null
null
tianyancha/__init__.py
iamsk/tianyancha
388302f20ea199a775f83b903bd6915bf21afd3a
[ "MIT" ]
null
null
null
from .client import Tianyancha
15.5
30
0.83871
4
31
6.5
1
0
0
0
0
0
0
0
0
0
0
0
0.129032
31
1
31
31
0.962963
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
b74dc0ec22e8846c08502fe4ab1e1cea7c0f1f4e
301
py
Python
payload/Payload.py
avegao/pybatrium
9f24d84f0b57888afb841121b308527c6b7365e4
[ "MIT" ]
null
null
null
payload/Payload.py
avegao/pybatrium
9f24d84f0b57888afb841121b308527c6b7365e4
[ "MIT" ]
null
null
null
payload/Payload.py
avegao/pybatrium
9f24d84f0b57888afb841121b308527c6b7365e4
[ "MIT" ]
null
null
null
from __future__ import annotations from abc import abstractmethod from typing import Dict class Payload: @staticmethod @abstractmethod def parse(data: bytes) -> Dict: pass # @staticmethod # @abstractmethod # def __from_struct(data: Dict) -> Payload: # pass
17.705882
47
0.671096
31
301
6.290323
0.548387
0.266667
0.297436
0
0
0
0
0
0
0
0
0
0.259136
301
16
48
18.8125
0.874439
0.265781
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0.125
0.375
0
0.625
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
5
b768f50334c5a6644de17381036d6aa8d89342a5
6,239
py
Python
tests/test_menu.py
ivddorrka/OP_nutriotionproject
20aa3c707fa1141a7425a158e47ef1f12744b1c7
[ "FSFAP" ]
1
2021-04-08T20:34:14.000Z
2021-04-08T20:34:14.000Z
tests/test_menu.py
ivddorrka/OP_nutriotionproject
20aa3c707fa1141a7425a158e47ef1f12744b1c7
[ "FSFAP" ]
3
2021-04-03T22:20:36.000Z
2021-05-17T16:32:26.000Z
tests/test_menu.py
ivddorrka/OP_nutriotionproject
20aa3c707fa1141a7425a158e47ef1f12744b1c7
[ "FSFAP" ]
null
null
null
""" Module for testing class Menu. """ import unittest from unittest import TestCase from modules.menu import Menu class TestMenu(TestCase): """ Class for testing menu.py. """ def setUp(self) -> None: """ Set up menus for tests. """ self.first_menu = Menu(2000, 70, 60, 300, ['tomato']) self.second_menu = Menu(1900, 60, 65, 290, []) def tearDown(self) -> None: """ Refreshes menus (makes them empty). """ self.first_menu = Menu(2000, 70, 60, 300, ['tomato']) self.second_menu = Menu(1900, 60, 65, 290, []) def test_init(self): """ Testing init (it also checks method choose_dishes()). """ self.assertEqual(self.first_menu.calories, 2000) self.assertEqual(self.first_menu.proteins, 70) self.assertEqual(self.first_menu.fats, 60) self.assertEqual(self.first_menu.carbohydrates, 300) self.assertEqual(self.first_menu.daily_calories, 0) self.assertEqual(self.first_menu.daily_fats, 0) self.assertEqual(self.first_menu.daily_proteins, 0) self.assertEqual(self.first_menu.daily_carbohydrates, 0) self.assertEqual(len(self.first_menu.all_dishes), 6580) self.assertEqual(len(self.second_menu.all_dishes), 7344) def test_menu(self): """ Testing methods __str__(), generate_menu(), accept_dish(), generate_dish() and delete_dish(). """ self.assertEqual(self.first_menu.__str__(), '') self.first_menu.generate_menu() self.assertNotEqual(self.first_menu.__str__(), '') self.assertEqual(len(self.first_menu.menu), 3) self.assertTrue(0.85*self.first_menu.calories <= self.first_menu.menu[0].calories + self.first_menu.menu[1].calories + self.first_menu.menu[2].calories <= 1.15*self.first_menu.calories) self.assertTrue(0.85*self.first_menu.proteins <= self.first_menu.menu[0].proteins + self.first_menu.menu[1].proteins + self.first_menu.menu[2].proteins <= 1.15*self.first_menu.proteins) self.assertTrue(0.85*self.first_menu.fats <= self.first_menu.menu[0].fats + self.first_menu.menu[1].fats + self.first_menu.menu[2].fats <= 1.15*self.first_menu.fats) self.assertTrue(0.85*self.first_menu.carbohydrates <= self.first_menu.menu[0].carbohydrates + self.first_menu.menu[1].carbohydrates + self.first_menu.menu[2].carbohydrates <= 1.15*self.first_menu.carbohydrates) self.first_menu.accept_dish(self.first_menu.menu[0]) self.assertTrue(self.first_menu.daily_calories <= 0.4*self.first_menu.calories) self.assertTrue(self.first_menu.daily_proteins <= 0.4*self.first_menu.proteins) self.assertTrue(self.first_menu.daily_fats <= 0.4*self.first_menu.calories) self.assertTrue(self.first_menu.daily_fats <= 0.4*self.first_menu.carbohydrates) self.first_menu.accept_dish(self.first_menu.menu[1]) self.assertTrue(0.5*self.first_menu.calories <= self.first_menu.daily_calories <= 0.9*self.first_menu.calories) self.assertTrue(0.5*self.first_menu.proteins <= self.first_menu.daily_proteins <= 0.9*self.first_menu.proteins) self.assertTrue(0.5*self.first_menu.fats <= self.first_menu.daily_fats <= 0.9*self.first_menu.fats) self.assertTrue(0.5*self.first_menu.carbohydrates <= self.first_menu.daily_carbohydrates <= 0.9*self.first_menu.carbohydrates) self.first_menu.accept_dish(self.first_menu.menu[2]) self.assertTrue(0.85*self.first_menu.calories <= self.first_menu.daily_calories <= 1.15*self.first_menu.calories) self.assertTrue(0.85*self.first_menu.proteins <= self.first_menu.daily_proteins <= 1.15*self.first_menu.proteins) self.assertTrue(0.85*self.first_menu.fats <= self.first_menu.daily_fats <= 1.15*self.first_menu.fats) self.assertTrue(0.85*self.first_menu.carbohydrates <= self.first_menu.daily_carbohydrates <= 1.15*self.first_menu.carbohydrates) self.second_menu.generate_menu() self.second_menu.delete_dish(self.second_menu.menu[2]) self.second_menu.accept_dish(self.second_menu.menu[0]) self.second_menu.accept_dish(self.second_menu.menu[1]) self.second_menu.accept_dish(self.second_menu.menu[2]) self.assertTrue(0.85*self.first_menu.calories <= self.first_menu.daily_calories <= 1.15*self.first_menu.calories) self.assertTrue(0.85*self.first_menu.proteins <= self.first_menu.daily_proteins <= 1.15*self.first_menu.proteins) self.assertTrue(0.85*self.first_menu.fats <= self.first_menu.daily_fats <= 1.15*self.first_menu.fats) self.assertTrue(0.85*self.first_menu.carbohydrates <= self.first_menu.daily_carbohydrates <= 1.15*self.first_menu.carbohydrates) def test_product(self): """ Testing methods search_product(), choose_product(). """ self.assertEqual(len(self.first_menu.search_product('tomato')), 50) self.assertEqual( len(self.second_menu.search_product('mango juice')), 50) self.first_menu.choose_product('tomatoes, raw', 100) self.assertAlmostEqual(self.first_menu.daily_calories, 18.0) self.assertAlmostEqual(self.first_menu.daily_proteins, 0.88) self.assertAlmostEqual(self.first_menu.daily_fats, 0.2) self.assertAlmostEqual(self.first_menu.daily_carbohydrates, 3.89) self.first_menu.choose_product('mango nectar', 150) self.assertAlmostEqual(self.first_menu.daily_calories, 94.5) self.assertAlmostEqual(self.first_menu.daily_proteins, 1.045) self.assertAlmostEqual(self.first_menu.daily_fats, 0.29) self.assertAlmostEqual(self.first_menu.daily_carbohydrates, 23.54) if __name__ == '__main__': unittest.main()
51.561983
140
0.650745
814
6,239
4.764128
0.114251
0.222795
0.321815
0.129964
0.825941
0.709386
0.632027
0.49278
0.46493
0.385508
0
0.045079
0.224876
6,239
120
141
51.991667
0.756824
0.050809
0
0.266667
0
0
0.010719
0
0
0
0
0
0.477778
1
0.055556
false
0
0.033333
0
0.1
0
0
0
0
null
1
1
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
5
b7992b33c3ef1bcd3e499a57e61569e7d14b6c0d
124
py
Python
tools/__init__.py
santiagoRuizSchiphol/squeezenext-tensorflow
e514f3d173fac098116ac28ade2862fb5acb498f
[ "MIT" ]
58
2018-07-19T11:52:57.000Z
2021-04-22T07:36:24.000Z
tools/__init__.py
santiagoRuizSchiphol/squeezenext-tensorflow
e514f3d173fac098116ac28ade2862fb5acb498f
[ "MIT" ]
6
2018-09-28T19:04:36.000Z
2020-10-18T10:27:28.000Z
tools/__init__.py
santiagoRuizSchiphol/squeezenext-tensorflow
e514f3d173fac098116ac28ade2862fb5acb498f
[ "MIT" ]
21
2018-09-11T02:00:42.000Z
2021-01-20T22:31:11.000Z
from tools import define_first_dim, get_checkpoint_step,get_or_create_global_step,warmup_phase import stats import fine_tune
41.333333
94
0.91129
21
124
4.904762
0.809524
0
0
0
0
0
0
0
0
0
0
0
0.064516
124
3
95
41.333333
0.887931
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b7ea540cf8dffbaef60c32b8d6d6d6babbf12fc8
110
py
Python
cwApp/admin.py
cs-fullstack-2019-spring/django-validation-cw-Litterial
8b1fb8f4cd3feca57210fe4a48c8938bbc6d50d6
[ "Apache-2.0" ]
null
null
null
cwApp/admin.py
cs-fullstack-2019-spring/django-validation-cw-Litterial
8b1fb8f4cd3feca57210fe4a48c8938bbc6d50d6
[ "Apache-2.0" ]
null
null
null
cwApp/admin.py
cs-fullstack-2019-spring/django-validation-cw-Litterial
8b1fb8f4cd3feca57210fe4a48c8938bbc6d50d6
[ "Apache-2.0" ]
null
null
null
from django.contrib import admin from .models import Car # Register your models here. admin.site.register(Car)
27.5
32
0.809091
17
110
5.235294
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.118182
110
4
33
27.5
0.917526
0.236364
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
b7f3c638a8bc5d4246744618385437f1ff42a19c
322
py
Python
asteroid/data/__init__.py
mhu-coder/AsSteroid
56dd2b81bb16c1f081b0b91e3bbb8b29dd587dbd
[ "MIT" ]
null
null
null
asteroid/data/__init__.py
mhu-coder/AsSteroid
56dd2b81bb16c1f081b0b91e3bbb8b29dd587dbd
[ "MIT" ]
null
null
null
asteroid/data/__init__.py
mhu-coder/AsSteroid
56dd2b81bb16c1f081b0b91e3bbb8b29dd587dbd
[ "MIT" ]
null
null
null
from .wham_dataset import WhamDataset from .whamr_dataset import WhamRDataset from .dns_dataset import DNSDataset from .librimix_dataset import LibriMix from .wsj0_mix import Wsj0mixDataset from .musdb18_dataset import MUSDB18Dataset from .sms_wsj_dataset import SmsWsjDataset from .kinect_wsj import KinectWsjMixDataset
32.2
43
0.872671
41
322
6.634146
0.487805
0.286765
0
0
0
0
0
0
0
0
0
0.020761
0.102484
322
9
44
35.777778
0.920415
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4d14584056aecc5a90dcaeb0ac6374a0209c7655
115
py
Python
utils/__init__.py
Justin900429/vision-transformer
e149092efbb83c166449944137db0ee5200f9325
[ "MIT" ]
1
2021-09-01T03:29:03.000Z
2021-09-01T03:29:03.000Z
utils/__init__.py
Justin900429/vision-transformer
e149092efbb83c166449944137db0ee5200f9325
[ "MIT" ]
null
null
null
utils/__init__.py
Justin900429/vision-transformer
e149092efbb83c166449944137db0ee5200f9325
[ "MIT" ]
null
null
null
from .EMA import EMA from .loss import LabelSmoothing from .stochastic import StochasticDepth from .utils import *
23
39
0.817391
15
115
6.266667
0.533333
0
0
0
0
0
0
0
0
0
0
0
0.13913
115
4
40
28.75
0.949495
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4d277c5eae28a56d4edf0781f3ca5b2093b4f60f
44
py
Python
sphinx/cmd/__init__.py
daobook/sphinx
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
[ "BSD-2-Clause" ]
null
null
null
sphinx/cmd/__init__.py
daobook/sphinx
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
[ "BSD-2-Clause" ]
1,662
2015-01-02T11:45:27.000Z
2015-01-03T12:21:29.000Z
sphinx/cmd/__init__.py
daobook/sphinx
ef8daca1f9a82ede9b4b0b5cde93f3414cee3dfe
[ "BSD-2-Clause" ]
null
null
null
"""Modules for command line executables."""
22
43
0.727273
5
44
6.4
1
0
0
0
0
0
0
0
0
0
0
0
0.113636
44
1
44
44
0.820513
0.840909
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
4d3674fd0e3f53c047ce044d830a93509b3affa1
66
py
Python
test/data/70.py
suliveevil/vista.vim
a0469c645dcbe4033b857da27d35491f39e2f776
[ "MIT" ]
1,764
2019-02-16T04:36:30.000Z
2022-03-29T07:00:42.000Z
test/data/70.py
suliveevil/vista.vim
a0469c645dcbe4033b857da27d35491f39e2f776
[ "MIT" ]
358
2019-02-16T09:33:47.000Z
2022-03-25T03:51:38.000Z
test/data/70.py
suliveevil/vista.vim
a0469c645dcbe4033b857da27d35491f39e2f776
[ "MIT" ]
108
2019-02-16T06:55:59.000Z
2022-02-15T13:38:19.000Z
class Foo: class Bar: def baz(self): pass
13.2
22
0.454545
8
66
3.75
0.875
0
0
0
0
0
0
0
0
0
0
0
0.469697
66
4
23
16.5
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0.25
0
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
4d47c43ffa02b032c2aa6029613079eadfacf4e4
729
py
Python
pavucina.py
filipjenis/Python
fb5aa05caf175b46a4fb6e9830191218b8c4404b
[ "CNRI-Python" ]
null
null
null
pavucina.py
filipjenis/Python
fb5aa05caf175b46a4fb6e9830191218b8c4404b
[ "CNRI-Python" ]
null
null
null
pavucina.py
filipjenis/Python
fb5aa05caf175b46a4fb6e9830191218b8c4404b
[ "CNRI-Python" ]
2
2020-05-07T13:16:53.000Z
2020-06-01T16:53:57.000Z
from random import * import tkinter canvas=tkinter.Canvas(bg='black',width=1000,height=800) canvas.pack() x=10 y=10 for i in range(1,26): x=x+20 y=y+20 canvas.create_line(x,10,510,y,fill='white',width=2) canvas.update() canvas.after(100) x=x+20 y=y+20 for i in range(1,26): x=x-20 y=y-20 canvas.create_line(x,510,30,y,fill='white',width=2) canvas.update() canvas.after(100) x=x-20 y=510 for i in range(1,26): x=x+20 y=y-20 canvas.create_line(x,10,30,y,fill='white',width=2) canvas.update() canvas.after(100) x=x+20 y=y-20 for i in range(1,26): x=x-20 y=y+20 canvas.create_line(x,510,510,y,fill='white',width=2) canvas.update() canvas.after(100)
17.780488
56
0.632373
149
729
3.067114
0.214765
0.030635
0.061269
0.076586
0.798687
0.798687
0.798687
0.798687
0.798687
0.798687
0
0.148398
0.186557
729
40
57
18.225
0.62226
0
0
0.694444
0
0
0.034294
0
0
0
0
0
0
1
0
false
0
0.055556
0
0.055556
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4d61e64a3bae8fae68693acb229013ab9bcd213d
67
py
Python
python_lessons/MtMk_Test_Files/Install_modules.py
1986MMartin/coding-sections-markus
e13be32e5d83e69250ecfb3c76a04ee48a320607
[ "Apache-2.0" ]
null
null
null
python_lessons/MtMk_Test_Files/Install_modules.py
1986MMartin/coding-sections-markus
e13be32e5d83e69250ecfb3c76a04ee48a320607
[ "Apache-2.0" ]
null
null
null
python_lessons/MtMk_Test_Files/Install_modules.py
1986MMartin/coding-sections-markus
e13be32e5d83e69250ecfb3c76a04ee48a320607
[ "Apache-2.0" ]
null
null
null
import pandas_datareader as pdr print(pdr.get_data_fred('GS10'))
22.333333
33
0.791045
11
67
4.545455
0.909091
0
0
0
0
0
0
0
0
0
0
0.033333
0.104478
67
2
34
33.5
0.8
0
0
0
0
0
0.061538
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
4d9063821f7e37387458404241c009b7a34fe815
6,504
py
Python
Thirdparty/libwebp/build.py
stinvi/dava.engine
2b396ca49cdf10cdc98ad8a9ffcf7768a05e285e
[ "BSD-3-Clause" ]
26
2018-09-03T08:48:22.000Z
2022-02-14T05:14:50.000Z
Thirdparty/libwebp/build.py
ANHELL-blitz/dava.engine
ed83624326f000866e29166c7f4cccfed1bb41d4
[ "BSD-3-Clause" ]
null
null
null
Thirdparty/libwebp/build.py
ANHELL-blitz/dava.engine
ed83624326f000866e29166c7f4cccfed1bb41d4
[ "BSD-3-Clause" ]
45
2018-05-11T06:47:17.000Z
2022-02-03T11:30:55.000Z
import os import shutil import build_utils def get_supported_targets(platform): if platform == 'win32': return ['win32', 'win10', 'android'] elif platform == 'darwin': return ['macos', 'ios', 'android'] elif platform == 'linux': return ['android', 'linux'] else: return [] def get_dependencies_for_target(target): return [] def build_for_target(target, working_directory_path, root_project_path): if target == 'win32': _build_win32(working_directory_path, root_project_path) elif target == 'win10': _build_win10(working_directory_path, root_project_path) elif target == 'macos': _build_macos(working_directory_path, root_project_path) elif target == 'ios': _build_ios(working_directory_path, root_project_path) elif target == 'android': _build_android(working_directory_path, root_project_path) elif target == 'linux': _build_linux(working_directory_path, root_project_path) def get_download_info(): return 'https://github.com/webmproject/libwebp/archive/v0.4.3.tar.gz' def _download_and_extract(working_directory_path): source_folder_path = os.path.join(working_directory_path, 'libwebp_source') url = get_download_info() build_utils.download_and_extract( url, working_directory_path, source_folder_path, 'libwebp-0.4.3') return source_folder_path @build_utils.run_once def _patch_sources(source_folder_path, working_directory_path): build_utils.apply_patch( os.path.abspath('patch_win.diff'), working_directory_path) def _build_win32(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) # x86 x86_env = build_utils.get_win32_vs_x86_env() build_utils.run_process( ['nmake', '-f', 'Makefile.vc', 'CFG=release-static', 'RTLIBCFG=dynamic', 'OBJDIR=output'], process_cwd=source_folder_path, environment=x86_env, shell=True) build_utils.run_process( ['nmake', '-f', 'Makefile.vc', 'CFG=debug-static', 'RTLIBCFG=dynamic', 'OBJDIR=output'], process_cwd=source_folder_path, environment=x86_env, shell=True) # x64 x64_env = build_utils.get_win32_vs_x64_env() build_utils.run_process( ['nmake', '-f', 'Makefile.vc', 'CFG=release-static', 'RTLIBCFG=dynamic', 'OBJDIR=output'], process_cwd=source_folder_path, environment=x64_env, shell=True) build_utils.run_process( ['nmake', '-f', 'Makefile.vc', 'CFG=debug-static', 'RTLIBCFG=dynamic', 'OBJDIR=output'], process_cwd=source_folder_path, environment=x64_env, shell=True) libs_win_root = os.path.join(root_project_path, 'Libs/lib_CMake/win') shutil.copyfile( os.path.join(source_folder_path, 'output/debug-static/x86/lib/libwebp_debug.lib'), os.path.join(libs_win_root, 'x86/Debug/libwebp.lib')) shutil.copyfile( os.path.join(source_folder_path, 'output/release-static/x86/lib/libwebp.lib'), os.path.join(libs_win_root, 'x86/Release/libwebp.lib')) shutil.copyfile( os.path.join(source_folder_path, 'output/debug-static/x64/lib/libwebp_debug.lib'), os.path.join(libs_win_root, 'x64/Debug/libwebp.lib')) shutil.copyfile( os.path.join(source_folder_path, 'output/release-static/x64/lib/libwebp.lib'), os.path.join(libs_win_root, 'x64/Release/libwebp.lib')) _copy_headers(source_folder_path, root_project_path) def _build_win10(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) build_utils.build_and_copy_libraries_win10_cmake( os.path.join(working_directory_path, 'gen'), source_folder_path, root_project_path, 'libwebp.sln', 'webp', 'webp.lib', 'webp.lib', 'libwebp.lib', 'libwebp.lib', 'libwebp.lib', 'libwebp.lib', 'libwebp.lib', 'libwebp.lib', ['-DCMAKE_SYSTEM_PROCESSOR=arm']) _copy_headers(source_folder_path, root_project_path) def _build_macos(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) build_utils.build_and_copy_libraries_macos_cmake( os.path.join(working_directory_path, 'gen'), source_folder_path, root_project_path, 'libwebp.xcodeproj', 'webp', 'libwebp.a', 'libwebp.a') _copy_headers(source_folder_path, root_project_path) def _build_ios(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) build_utils.build_and_copy_libraries_ios_cmake( os.path.join(working_directory_path, 'gen'), source_folder_path, root_project_path, 'libwebp.xcodeproj', 'webp', 'libwebp.a', 'libwebp.a') _copy_headers(source_folder_path, root_project_path) def _build_android(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) build_utils.build_and_copy_libraries_android_cmake( os.path.join(working_directory_path, 'gen'), source_folder_path, root_project_path, 'libwebp.a', 'libwebp.a', arm_abi='armeabi-v7a') _copy_headers(source_folder_path, root_project_path) def _build_linux(working_directory_path, root_project_path): source_folder_path = _download_and_extract(working_directory_path) _patch_sources(source_folder_path, working_directory_path) build_utils.build_and_copy_libraries_linux_cmake( gen_folder_path=os.path.join(working_directory_path, 'gen'), source_folder_path=source_folder_path, root_project_path=root_project_path, target="all", lib_name='libwebp.a') _copy_headers(source_folder_path, root_project_path) def _copy_headers(source_folder_path, root_project_path): include_path = os.path.join(root_project_path, 'Libs/include/webp') build_utils.copy_files( os.path.join(source_folder_path, 'src/webp'), include_path, '*.h')
35.347826
98
0.71802
841
6,504
5.104637
0.122473
0.090846
0.141626
0.115071
0.813883
0.813184
0.762404
0.748894
0.641975
0.626136
0
0.012479
0.174508
6,504
183
99
35.540984
0.787111
0.001076
0
0.478873
0
0.007042
0.155836
0.044349
0
0
0
0
0
1
0.091549
false
0
0.021127
0.014085
0.161972
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4d96e335fcfbb51e0f4654cdcde1b1f9004ee887
23,433
py
Python
tests/finance/test_slippage.py
nathanwolfe/zipline-minute-bars
bcc6532731503c4521c6f7c4f9ee5e7ee545c013
[ "Apache-2.0" ]
null
null
null
tests/finance/test_slippage.py
nathanwolfe/zipline-minute-bars
bcc6532731503c4521c6f7c4f9ee5e7ee545c013
[ "Apache-2.0" ]
null
null
null
tests/finance/test_slippage.py
nathanwolfe/zipline-minute-bars
bcc6532731503c4521c6f7c4f9ee5e7ee545c013
[ "Apache-2.0" ]
null
null
null
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Unit tests for finance.slippage ''' import datetime import pytz from nose_parameterized import parameterized import pandas as pd from pandas.tslib import normalize_date from zipline.finance.slippage import VolumeShareSlippage from zipline.protocol import DATASOURCE_TYPE from zipline.finance.blotter import Order from zipline.data.data_portal import DataPortal from zipline.protocol import BarData from zipline.testing import tmp_bcolz_equity_minute_bar_reader from zipline.testing.fixtures import ( WithDataPortal, WithSimParams, ZiplineTestCase, ) class SlippageTestCase(WithSimParams, WithDataPortal, ZiplineTestCase): START_DATE = pd.Timestamp('2006-01-05 14:31', tz='utc') END_DATE = pd.Timestamp('2006-01-05 14:36', tz='utc') SIM_PARAMS_CAPITAL_BASE = 1.0e5 SIM_PARAMS_DATA_FREQUENCY = 'minute' SIM_PARAMS_EMISSION_RATE = 'daily' ASSET_FINDER_EQUITY_SIDS = (133,) ASSET_FINDER_EQUITY_START_DATE = pd.Timestamp('2006-01-05', tz='utc') ASSET_FINDER_EQUITY_END_DATE = pd.Timestamp('2006-01-07', tz='utc') minutes = pd.DatetimeIndex( start=START_DATE, end=END_DATE - pd.Timedelta('1 minute'), freq='1min' ) @classmethod def make_equity_minute_bar_data(cls): yield 133, pd.DataFrame( { 'open': [3.0, 3.0, 3.5, 4.0, 3.5], 'high': [3.15, 3.15, 3.15, 3.15, 3.15], 'low': [2.85, 2.85, 2.85, 2.85, 2.85], 'close': [3.0, 3.5, 4.0, 3.5, 3.0], 'volume': [2000, 2000, 2000, 2000, 2000], }, index=cls.minutes, ) @classmethod def init_class_fixtures(cls): super(SlippageTestCase, cls).init_class_fixtures() cls.ASSET133 = cls.env.asset_finder.retrieve_asset(133) def test_volume_share_slippage(self): assets = ( (133, pd.DataFrame( { 'open': [3.00], 'high': [3.15], 'low': [2.85], 'close': [3.00], 'volume': [200], }, index=[self.minutes[0]], )), ) days = pd.date_range( start=normalize_date(self.minutes[0]), end=normalize_date(self.minutes[-1]) ) with tmp_bcolz_equity_minute_bar_reader(self.trading_calendar, days, assets) \ as reader: data_portal = DataPortal( self.env.asset_finder, self.trading_calendar, first_trading_day=reader.first_trading_day, equity_minute_reader=reader, ) slippage_model = VolumeShareSlippage() open_orders = [ Order( dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), amount=100, filled=0, sid=self.ASSET133 ) ] bar_data = BarData(data_portal, lambda: self.minutes[0], 'minute') orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 1) _, txn = orders_txns[0] expected_txn = { 'price': float(3.0001875), 'dt': datetime.datetime( 2006, 1, 5, 14, 31, tzinfo=pytz.utc), 'amount': int(5), 'sid': int(133), 'commission': None, 'type': DATASOURCE_TYPE.TRANSACTION, 'order_id': open_orders[0].id } self.assertIsNotNone(txn) # TODO: Make expected_txn an Transaction object and ensure there # is a __eq__ for that class. self.assertEquals(expected_txn, txn.__dict__) open_orders = [ Order( dt=datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), amount=100, filled=0, sid=self.ASSET133 ) ] # Set bar_data to be a minute ahead of last trade. # Volume share slippage should not execute when there is no trade. bar_data = BarData(data_portal, lambda: self.minutes[1], 'minute') orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) def test_orders_limit(self): slippage_model = VolumeShareSlippage() slippage_model.data_portal = self.data_portal # long, does not trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': 100, 'filled': 0, 'sid': self.ASSET133, 'limit': 3.5}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[3], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # long, does not trade - impacted price worse than limit price open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': 100, 'filled': 0, 'sid': self.ASSET133, 'limit': 3.5}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[3], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # long, does trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': 100, 'filled': 0, 'sid': self.ASSET133, 'limit': 3.6}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[3], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 1) txn = orders_txns[0][1] expected_txn = { 'price': float(3.50021875), 'dt': datetime.datetime( 2006, 1, 5, 14, 34, tzinfo=pytz.utc), # we ordered 100 shares, but default volume slippage only allows # for 2.5% of the volume. 2.5% * 2000 = 50 shares 'amount': int(50), 'sid': int(133), 'order_id': open_orders[0].id } self.assertIsNotNone(txn) for key, value in expected_txn.items(): self.assertEquals(value, txn[key]) # short, does not trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': -100, 'filled': 0, 'sid': self.ASSET133, 'limit': 3.5}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[0], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # short, does not trade - impacted price worse than limit price open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': -100, 'filled': 0, 'sid': self.ASSET133, 'limit': 3.5}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[0], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # short, does trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': -100, 'filled': 0, 'sid': self.ASSET133, 'limit': 3.4}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[1], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 1) _, txn = orders_txns[0] expected_txn = { 'price': float(3.49978125), 'dt': datetime.datetime( 2006, 1, 5, 14, 32, tzinfo=pytz.utc), 'amount': int(-50), 'sid': int(133) } self.assertIsNotNone(txn) for key, value in expected_txn.items(): self.assertEquals(value, txn[key]) STOP_ORDER_CASES = { # Stop orders can be long/short and have their price greater or # less than the stop. # # A stop being reached is conditional on the order direction. # Long orders reach the stop when the price is greater than the stop. # Short orders reach the stop when the price is less than the stop. # # Which leads to the following 4 cases: # # | long | short | # | price > stop | | | # | price < stop | | | # # Currently the slippage module acts according to the following table, # where 'X' represents triggering a transaction # | long | short | # | price > stop | | X | # | price < stop | X | | # # However, the following behavior *should* be followed. # # | long | short | # | price > stop | X | | # | price < stop | | X | 'long | price gt stop': { 'order': { 'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'), 'amount': 100, 'filled': 0, 'sid': 133, 'stop': 3.5 }, 'event': { 'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'), 'volume': 2000, 'price': 4.0, 'high': 3.15, 'low': 2.85, 'sid': 133, 'close': 4.0, 'open': 3.5 }, 'expected': { 'transaction': { 'price': 4.00025, 'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'), 'amount': 50, 'sid': 133, } } }, 'long | price lt stop': { 'order': { 'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'), 'amount': 100, 'filled': 0, 'sid': 133, 'stop': 3.6 }, 'event': { 'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'), 'volume': 2000, 'price': 3.5, 'high': 3.15, 'low': 2.85, 'sid': 133, 'close': 3.5, 'open': 4.0 }, 'expected': { 'transaction': None } }, 'short | price gt stop': { 'order': { 'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'), 'amount': -100, 'filled': 0, 'sid': 133, 'stop': 3.4 }, 'event': { 'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'), 'volume': 2000, 'price': 3.5, 'high': 3.15, 'low': 2.85, 'sid': 133, 'close': 3.5, 'open': 3.0 }, 'expected': { 'transaction': None } }, 'short | price lt stop': { 'order': { 'dt': pd.Timestamp('2006-01-05 14:30', tz='UTC'), 'amount': -100, 'filled': 0, 'sid': 133, 'stop': 3.5 }, 'event': { 'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'), 'volume': 2000, 'price': 3.0, 'high': 3.15, 'low': 2.85, 'sid': 133, 'close': 3.0, 'open': 3.0 }, 'expected': { 'transaction': { 'price': 2.9998125, 'dt': pd.Timestamp('2006-01-05 14:31', tz='UTC'), 'amount': -50, 'sid': 133, } } }, } @parameterized.expand([ (name, case['order'], case['event'], case['expected']) for name, case in STOP_ORDER_CASES.items() ]) def test_orders_stop(self, name, order_data, event_data, expected): data = order_data data['sid'] = self.ASSET133 order = Order(**data) assets = ( (133, pd.DataFrame( { 'open': [event_data['open']], 'high': [event_data['high']], 'low': [event_data['low']], 'close': [event_data['close']], 'volume': [event_data['volume']], }, index=[pd.Timestamp('2006-01-05 14:31', tz='UTC')], )), ) days = pd.date_range( start=normalize_date(self.minutes[0]), end=normalize_date(self.minutes[-1]) ) with tmp_bcolz_equity_minute_bar_reader(self.trading_calendar, days, assets) \ as reader: data_portal = DataPortal( self.env.asset_finder, self.trading_calendar, first_trading_day=reader.first_trading_day, equity_minute_reader=reader, ) slippage_model = VolumeShareSlippage() try: dt = pd.Timestamp('2006-01-05 14:31', tz='UTC') bar_data = BarData(data_portal, lambda: dt, 'minute') _, txn = next(slippage_model.simulate( bar_data, self.ASSET133, [order], )) except StopIteration: txn = None if expected['transaction'] is None: self.assertIsNone(txn) else: self.assertIsNotNone(txn) for key, value in expected['transaction'].items(): self.assertEquals(value, txn[key]) def test_orders_stop_limit(self): slippage_model = VolumeShareSlippage() slippage_model.data_portal = self.data_portal # long, does not trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': 100, 'filled': 0, 'sid': self.ASSET133, 'stop': 4.0, 'limit': 3.0}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[2], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) bar_data = BarData(self.data_portal, lambda: self.minutes[3], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # long, does not trade - impacted price worse than limit price open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': 100, 'filled': 0, 'sid': self.ASSET133, 'stop': 4.0, 'limit': 3.5}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[2], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) bar_data = BarData(self.data_portal, lambda: self.minutes[3], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # long, does trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': 100, 'filled': 0, 'sid': self.ASSET133, 'stop': 4.0, 'limit': 3.6}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[2], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) bar_data = BarData(self.data_portal, lambda: self.minutes[3], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 1) _, txn = orders_txns[0] expected_txn = { 'price': float(3.50021875), 'dt': datetime.datetime( 2006, 1, 5, 14, 34, tzinfo=pytz.utc), 'amount': int(50), 'sid': int(133) } for key, value in expected_txn.items(): self.assertEquals(value, txn[key]) # short, does not trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': -100, 'filled': 0, 'sid': self.ASSET133, 'stop': 3.0, 'limit': 4.0}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[0], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) bar_data = BarData(self.data_portal, lambda: self.minutes[1], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # short, does not trade - impacted price worse than limit price open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': -100, 'filled': 0, 'sid': self.ASSET133, 'stop': 3.0, 'limit': 3.5}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[0], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) bar_data = BarData(self.data_portal, lambda: self.minutes[1], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) # short, does trade open_orders = [ Order(**{ 'dt': datetime.datetime(2006, 1, 5, 14, 30, tzinfo=pytz.utc), 'amount': -100, 'filled': 0, 'sid': self.ASSET133, 'stop': 3.0, 'limit': 3.4}) ] bar_data = BarData(self.data_portal, lambda: self.minutes[0], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 0) bar_data = BarData(self.data_portal, lambda: self.minutes[1], self.sim_params.data_frequency) orders_txns = list(slippage_model.simulate( bar_data, self.ASSET133, open_orders, )) self.assertEquals(len(orders_txns), 1) _, txn = orders_txns[0] expected_txn = { 'price': float(3.49978125), 'dt': datetime.datetime( 2006, 1, 5, 14, 32, tzinfo=pytz.utc), 'amount': int(-50), 'sid': int(133) } for key, value in expected_txn.items(): self.assertEquals(value, txn[key])
31.119522
86
0.463193
2,340
23,433
4.493162
0.117094
0.0428
0.027963
0.047936
0.749857
0.738159
0.71809
0.705916
0.678048
0.666825
0
0.071365
0.42534
23,433
752
87
31.160904
0.709416
0.092818
0
0.73494
0
0
0.061524
0
0
0
0
0.00133
0.053356
1
0.010327
false
0
0.020654
0
0.049914
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4da96ab0a9cc14afad4ace395ccd5246ac5b2156
59
py
Python
call.py
hearteam/Linebot_project
532b81d3c8bd1a658e0ec8f1bf473ee3fa4d232d
[ "MIT" ]
null
null
null
call.py
hearteam/Linebot_project
532b81d3c8bd1a658e0ec8f1bf473ee3fa4d232d
[ "MIT" ]
null
null
null
call.py
hearteam/Linebot_project
532b81d3c8bd1a658e0ec8f1bf473ee3fa4d232d
[ "MIT" ]
2
2021-08-24T13:21:24.000Z
2021-08-25T02:18:51.000Z
from ECdic import ECdic print(ECdic().EtoC("tofu_skin"))
19.666667
32
0.728814
9
59
4.666667
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.118644
59
3
32
19.666667
0.807692
0
0
0
0
0
0.155172
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
4dc4b3d941d4972ee4a3a2d0bc36f411bcc1a7ce
101
py
Python
exceptions.py
AmitaiF/Dlu-Bot
fb5e5a04f550b951f6299e0be302a9295653c58d
[ "MIT" ]
null
null
null
exceptions.py
AmitaiF/Dlu-Bot
fb5e5a04f550b951f6299e0be302a9295653c58d
[ "MIT" ]
null
null
null
exceptions.py
AmitaiF/Dlu-Bot
fb5e5a04f550b951f6299e0be302a9295653c58d
[ "MIT" ]
null
null
null
class NoLastBookException (Exception): pass class OpenLastBookFileFailed (Exception): pass
14.428571
41
0.762376
8
101
9.625
0.625
0.337662
0
0
0
0
0
0
0
0
0
0
0.178218
101
6
42
16.833333
0.927711
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
4dd28c9262f1dde6c2aeae961768059f14bd1e6d
42
py
Python
helper.py
mollysall/cs3240-labdemo
c08212df44433daf99e143d8a07a8a5a67c9f018
[ "MIT" ]
null
null
null
helper.py
mollysall/cs3240-labdemo
c08212df44433daf99e143d8a07a8a5a67c9f018
[ "MIT" ]
null
null
null
helper.py
mollysall/cs3240-labdemo
c08212df44433daf99e143d8a07a8a5a67c9f018
[ "MIT" ]
null
null
null
def greeting(message): print(message)
14
22
0.714286
5
42
6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.166667
42
2
23
21
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0
0.5
0.5
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
1
0
5
4ddb0a4290fd4d8c2b24e222d292bc86af6fdb3c
174
py
Python
alphazero/__init__.py
zhiyiYo/Alpha-Gobang-Zero
b0e90ae456b02754956be83a0d6495391390e666
[ "MIT" ]
14
2021-04-01T14:19:10.000Z
2022-03-17T06:29:35.000Z
alphazero/__init__.py
zhiyiYo/Alpha-Gobang-Zero
b0e90ae456b02754956be83a0d6495391390e666
[ "MIT" ]
1
2021-06-20T13:21:52.000Z
2021-06-22T12:41:06.000Z
alphazero/__init__.py
zhiyiYo/Alpha-Gobang-Zero
b0e90ae456b02754956be83a0d6495391390e666
[ "MIT" ]
4
2021-06-24T13:18:19.000Z
2021-12-26T06:00:54.000Z
from .alpha_zero_mcts import AlphaZeroMCTS from .chess_board import ChessBoard, ColorError from .policy_value_net import PolicyValueNet from .rollout_mcts import RolloutMCTS
34.8
47
0.873563
23
174
6.347826
0.695652
0.136986
0
0
0
0
0
0
0
0
0
0
0.097701
174
4
48
43.5
0.929936
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4de08cf4ab49f9964f23248e0ce6ed7921005f46
152
py
Python
mpa/modules/datasets/pipelines/__init__.py
openvinotoolkit/model_preparation_algorithm
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
[ "Apache-2.0" ]
null
null
null
mpa/modules/datasets/pipelines/__init__.py
openvinotoolkit/model_preparation_algorithm
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
[ "Apache-2.0" ]
null
null
null
mpa/modules/datasets/pipelines/__init__.py
openvinotoolkit/model_preparation_algorithm
8d36bf5944837b7a3d22fc2c3a4cb93423619fc2
[ "Apache-2.0" ]
null
null
null
# Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # # flake8: noqa from . import transforms from . import torchvision2mmdet
19
38
0.763158
19
152
6.105263
0.894737
0.172414
0
0
0
0
0
0
0
0
0
0.061538
0.144737
152
7
39
21.714286
0.830769
0.559211
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4dfca92764519d80dcb93e207e757fbd87a987ee
177
py
Python
put_together.py
Mattjez914/Blackjack_Microchallenge
c4f60b62a3ada14663eb30ce72563af994e1eda4
[ "Apache-2.0" ]
null
null
null
put_together.py
Mattjez914/Blackjack_Microchallenge
c4f60b62a3ada14663eb30ce72563af994e1eda4
[ "Apache-2.0" ]
null
null
null
put_together.py
Mattjez914/Blackjack_Microchallenge
c4f60b62a3ada14663eb30ce72563af994e1eda4
[ "Apache-2.0" ]
1
2019-04-17T06:12:23.000Z
2019-04-17T06:12:23.000Z
from learntools.core import binder; binder.bind(globals()) from learntools.python.ex3 import q7 as blackjack from should_hit import should_hit blackjack.simulate_one_game()
19.666667
58
0.819209
26
177
5.423077
0.653846
0.198582
0
0
0
0
0
0
0
0
0
0.012739
0.112994
177
8
59
22.125
0.88535
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
127f0cffd99e7aef789ca8d2014b9af03881a033
210
py
Python
panelserverextension.py
bjrnfrdnnd/panel-test
4609a259e749825b2a2012d8a7e48ed8e8a78deb
[ "MIT" ]
null
null
null
panelserverextension.py
bjrnfrdnnd/panel-test
4609a259e749825b2a2012d8a7e48ed8e8a78deb
[ "MIT" ]
1
2019-07-26T22:12:19.000Z
2019-10-31T17:48:51.000Z
panelserverextension.py
bjrnfrdnnd/panel-test
4609a259e749825b2a2012d8a7e48ed8e8a78deb
[ "MIT" ]
1
2019-09-19T11:54:45.000Z
2019-09-19T11:54:45.000Z
from subprocess import Popen def load_jupyter_server_extension(nbapp): """serve the dnmr_ab.ipynb directory with bokeh server""" Popen(["panel", "serve", "dnmr_ab.ipynb", "--allow-websocket-origin=*"])
42
76
0.728571
28
210
5.285714
0.785714
0.081081
0.148649
0
0
0
0
0
0
0
0
0
0.119048
210
5
76
42
0.8
0.242857
0
0
0
0
0.318182
0.168831
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
5
128804fe67b902d3aaa44bde8604d4a0af8e6a6c
121
py
Python
pyDataProcesser/IO/__init__.py
Psicowired87/pyProcesser
8c45f98869cddd833442908a0616a329ce4a2085
[ "MIT" ]
null
null
null
pyDataProcesser/IO/__init__.py
Psicowired87/pyProcesser
8c45f98869cddd833442908a0616a329ce4a2085
[ "MIT" ]
null
null
null
pyDataProcesser/IO/__init__.py
Psicowired87/pyProcesser
8c45f98869cddd833442908a0616a329ce4a2085
[ "MIT" ]
null
null
null
from aux_functions import get_extension_file from parse_dataframe import parse_manual_csv, parse_xlsx, parse_dataframe
24.2
73
0.884298
18
121
5.5
0.666667
0.282828
0
0
0
0
0
0
0
0
0
0
0.099174
121
4
74
30.25
0.908257
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
12b4a84f93b547a8346793bc37e9f589bde6a3e7
242
py
Python
django18/views.py
dresl/django18-bootstrap
6e17572f8fbb0cacd2ca1e56c3a3fb5f276d4de9
[ "Apache-2.0" ]
null
null
null
django18/views.py
dresl/django18-bootstrap
6e17572f8fbb0cacd2ca1e56c3a3fb5f276d4de9
[ "Apache-2.0" ]
null
null
null
django18/views.py
dresl/django18-bootstrap
6e17572f8fbb0cacd2ca1e56c3a3fb5f276d4de9
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import render def home(request): return render(request, 'apps/about.html', {}) def about(request): return render(request, 'apps/about.html', {}) def contact(request): return render(request, 'apps/contact.html', {})
24.2
48
0.72314
32
242
5.46875
0.40625
0.222857
0.325714
0.445714
0.651429
0.48
0.48
0.48
0
0
0
0
0.11157
242
10
48
24.2
0.813953
0
0
0.285714
0
0
0.193416
0
0
0
0
0
0
1
0.428571
false
0
0.142857
0.428571
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
0
0
0
5
12c7b5d233df00c8d230d020d16216b5d702ed04
1,235
py
Python
tests/unit/bokeh/model/test_data_model.py
g-parki/bokeh
664ead5306bba64609e734d4105c8aa8cfb76d81
[ "BSD-3-Clause" ]
null
null
null
tests/unit/bokeh/model/test_data_model.py
g-parki/bokeh
664ead5306bba64609e734d4105c8aa8cfb76d81
[ "BSD-3-Clause" ]
null
null
null
tests/unit/bokeh/model/test_data_model.py
g-parki/bokeh
664ead5306bba64609e734d4105c8aa8cfb76d81
[ "BSD-3-Clause" ]
null
null
null
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import annotations # isort:skip import pytest ; pytest #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Module under test import bokeh.model.data_model as bmd # isort:skip #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- def test_DataModel() -> None: assert bmd.DataModel.__data_model__ is True
38.59375
78
0.269636
62
1,235
5.193548
0.758065
0.055901
0
0
0
0
0
0
0
0
0
0.006969
0.070445
1,235
31
79
39.83871
0.273519
0.820243
0
0
0
0
0
0
0
0
0
0
0.2
1
0.2
true
0
0.6
0
0.8
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
12e414fc3bf00e6152f953b989914f034edfe9e1
45
py
Python
crabageprediction/venv/Lib/site-packages/fontTools/otlLib/__init__.py
13rianlucero/CrabAgePrediction
92bc7fbe1040f49e820473e33cc3902a5a7177c7
[ "MIT" ]
38,667
2015-01-01T00:15:34.000Z
2022-03-31T22:57:03.000Z
crabageprediction/venv/Lib/site-packages/fontTools/otlLib/__init__.py
13rianlucero/CrabAgePrediction
92bc7fbe1040f49e820473e33cc3902a5a7177c7
[ "MIT" ]
1,599
2016-09-27T09:07:36.000Z
2022-03-31T23:04:51.000Z
crabageprediction/venv/Lib/site-packages/fontTools/otlLib/__init__.py
13rianlucero/CrabAgePrediction
92bc7fbe1040f49e820473e33cc3902a5a7177c7
[ "MIT" ]
11,269
2015-01-01T08:41:17.000Z
2022-03-31T16:12:52.000Z
"""OpenType Layout-related functionality."""
22.5
44
0.755556
4
45
8.5
1
0
0
0
0
0
0
0
0
0
0
0
0.066667
45
1
45
45
0.809524
0.844444
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
12e6c2e1c26f603e0570b41f2a7cf244db2c1b53
11,601
py
Python
src/unittest/python/install_utils_tests.py
klr8/pybuilder
2812021c18ce850009ce5ec7f7c18195eff73b10
[ "Apache-2.0" ]
1,419
2015-01-02T20:51:04.000Z
2022-03-23T21:26:00.000Z
src/unittest/python/install_utils_tests.py
klr8/pybuilder
2812021c18ce850009ce5ec7f7c18195eff73b10
[ "Apache-2.0" ]
670
2015-01-01T10:26:03.000Z
2022-02-23T16:33:13.000Z
src/unittest/python/install_utils_tests.py
klr8/pybuilder
2812021c18ce850009ce5ec7f7c18195eff73b10
[ "Apache-2.0" ]
270
2015-01-02T05:01:53.000Z
2022-01-20T10:22:59.000Z
# -*- coding: utf-8 -*- # # This file is part of PyBuilder # # Copyright 2011-2020 PyBuilder Team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from os.path import normcase as nc, join as jp from pybuilder.core import (Project, Logger, Dependency, RequirementsFile) from pybuilder.install_utils import install_dependencies from pybuilder.pip_utils import PIP_MODULE_STANZA from pybuilder.plugins.python.install_dependencies_plugin import initialize_install_dependencies_plugin from test_utils import Mock, ANY, patch __author__ = "Arcadiy Ivanov" class InstallDependencyTest(unittest.TestCase): def setUp(self): self.project = Project("unittest", ".") self.project.set_property("dir_install_logs", "any_directory") self.project.set_property("dir_target", "/any_target_directory") self.logger = Mock(Logger) self.pyb_env = Mock() self.pyb_env.executable = ["exec"] self.pyb_env.site_paths = [] self.pyb_env.env_dir = "a" self.pyb_env.execute_command.return_value = 0 initialize_install_dependencies_plugin(self.project) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_requirements_file_dependency(self, *_): dependency = RequirementsFile("requirements.txt") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "-r", "requirements.txt"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_without_version(self, *_): dependency = Dependency("spam") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch", constraints_file_name="constraint_file") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "-c", nc(jp(self.pyb_env.env_dir, "constraint_file")), "spam"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_without_version_on_windows_derivate(self, *_): dependency = Dependency("spam") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "spam"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_insecurely_when_property_is_set(self, *_): dependency = Dependency("spam") self.project.set_property("install_dependencies_insecure_installation", ["spam"]) install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "--allow-unverified", "spam", "--allow-external", "spam", "spam"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_securely_when_property_is_not_set_to_dependency(self, *_): dependency = Dependency("spam") self.project.set_property("install_dependencies_insecure_installation", ["some-other-dependency"]) install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch", constraints_file_name="constraint_file") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "-c", ANY, "--allow-unverified", "some-other-dependency", "--allow-external", "some-other-dependency", "spam"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) # some-other-dependency might be a dependency of "spam" # so we always have to put the insecure dependencies in the command line :-( @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_using_custom_index_url(self, *_): self.project.set_property("install_dependencies_index_url", "some_index_url") dependency = Dependency("spam") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "--index-url", "some_index_url", "spam"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_use_extra_index_url_when_index_url_is_not_set(self, *_): self.project.set_property("install_dependencies_extra_index_url", "some_extra_index_url") dependency = Dependency("spam") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "--extra-index-url", "some_extra_index_url", "spam"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_use_index_and_extra_index_url_when_index_and_extra_index_url_are_set(self, *_): self.project.set_property("install_dependencies_index_url", "some_index_url") self.project.set_property("install_dependencies_extra_index_url", "some_extra_index_url") dependency = Dependency("spam") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "--index-url", "some_index_url", "--extra-index-url", "some_extra_index_url", "spam"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_with_version(self, *_): dependency = Dependency("spam", "0.1.2") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "spam>=0.1.2"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_with_version_and_operator(self, *_): dependency = Dependency("spam", "==0.1.2") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "spam==0.1.2"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) def test_should_install_dependency_with_wrong_version_and_operator(self): self.assertRaises(ValueError, Dependency, "spam", "~=1") @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_with_url(self, *_): dependency = Dependency("spam", url="some_url") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "--force-reinstall", "some_url"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True) @patch("pybuilder.install_utils.tail_log") @patch("pybuilder.install_utils.open") @patch("pybuilder.install_utils.create_constraint_file") @patch("pybuilder.install_utils.get_packages_info", return_value={}) def test_should_install_dependency_with_url_even_if_version_is_given(self, *_): dependency = Dependency("spam", version="0.1.2", url="some_url") install_dependencies(self.logger, self.project, dependency, self.pyb_env, "install_batch") self.pyb_env.execute_command.assert_called_with( self.pyb_env.executable + PIP_MODULE_STANZA + ["install", "--force-reinstall", "some_url"], cwd=ANY, env=ANY, error_file_name=ANY, outfile_name=ANY, shell=False, no_path_search=True)
50.659389
110
0.717697
1,469
11,601
5.319946
0.124575
0.10032
0.13167
0.159693
0.782598
0.760333
0.755982
0.755982
0.754703
0.754703
0
0.003101
0.16602
11,601
228
111
50.881579
0.804651
0.065684
0
0.664634
0
0
0.269021
0.190811
0
0
0
0
0.079268
1
0.085366
false
0
0.042683
0
0.134146
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4219cd53dd0257d94d5d48f540651b8297643d47
9,302
py
Python
pycotcp/pycotcp/adapter.py
matteobarato/pyvdetelweb
a49da458536aca4f0efa407b6db55c1455c3f75c
[ "MIT" ]
1
2018-09-19T11:28:05.000Z
2018-09-19T11:28:05.000Z
pycotcp/pycotcp/adapter.py
matteobarato/pyvdetelweb
a49da458536aca4f0efa407b6db55c1455c3f75c
[ "MIT" ]
null
null
null
pycotcp/pycotcp/adapter.py
matteobarato/pyvdetelweb
a49da458536aca4f0efa407b6db55c1455c3f75c
[ "MIT" ]
null
null
null
#!/usr/bin/env python class Adapter: NOT_IMPLEMENTED = "Not yet implemented" def __init__(self): print "initing adapter" pass def testfunc(self): print "I'm an adapter" def deleteLink4(self): raise NotImplementedError(NOT_IMPLEMENTED) def deleteLink6(self): raise NotImplementedError(NOT_IMPLEMENTED) def deleteSocketBox(self): raise NotImplementedError(NOT_IMPLEMENTED) def createDevice(self): raise NotImplementedError(NOT_IMPLEMENTED) def createMreq(self): raise NotImplementedError(NOT_IMPLEMENTED) def deleteMreq(self): raise NotImplementedError(NOT_IMPLEMENTED) def createMreqSource(self): raise NotImplementedError(NOT_IMPLEMENTED) def deleteMreqSource(self): raise NotImplementedError(NOT_IMPLEMENTED) def createKvVector(self): raise NotImplementedError(NOT_IMPLEMENTED) def deleteKvVector(self): raise NotImplementedError(NOT_IMPLEMENTED) def createRTree(self): raise NotImplementedError(NOT_IMPLEMENTED) def stackTick(self): raise NotImplementedError(NOT_IMPLEMENTED) def idle(self): raise NotImplementedError(NOT_IMPLEMENTED) def getError(self): #was getPicoError raise NotImplementedError(NOT_IMPLEMENTED) def isNetmaskIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def isUnicastIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def findSourceIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def natEnableIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def natDisableIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def linkFindIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def linkGetIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def linkDelIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def routeAddIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def routeDelIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def portForwardIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def routeGetGatewayIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def pingStartIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def pingAbortIp4(self): raise NotImplementedError(NOT_IMPLEMENTED) def isMulticastIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def isUnicastIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def isGlobalIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def isUniqueLocalIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def isSiteLocalIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def isLocalHostIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def isUnspecifiedIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def findSourceIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def linkFindIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def linkAddIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def linkAddIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def routeAddIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def routeDelIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def routingEnableIpv6(self): raise NotImplementedError(NOT_IMPLEMENTED) def routingDisableIpv6(self): raise NotImplementedError(NOT_IMPLEMENTED) def routeGetGatewayIp6(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketOpen(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketBind(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketConnect(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketSend(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketRecv(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketRecvFrom(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketRecvFromExt(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketWrite(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketRead(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketClose(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketShutdown(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketListen(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketAccept(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketSendTo(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketSendToExt(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketgetName(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketGetPeerName(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketSetOption(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketSetOptionMreq(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketSetOptionMreqSource(self): raise NotImplementedError(NOT_IMPLEMENTED) def socketGetOption(self): raise NotImplementedError(NOT_IMPLEMENTED) def dhcpClientInitiate(self): raise NotImplementedError(NOT_IMPLEMENTED) def dhcpClientAbort(self): raise NotImplementedError(NOT_IMPLEMENTED) def dhcpServerInitiate(self): raise NotImplementedError(NOT_IMPLEMENTED) def dhcpServerDestroy(self): raise NotImplementedError(NOT_IMPLEMENTED) def sntpSync(self): raise NotImplementedError(NOT_IMPLEMENTED) def sntpGetTimeOfTheDay(self): raise NotImplementedError(NOT_IMPLEMENTED) def slaacv4UnregisterIP(self): raise NotImplementedError(NOT_IMPLEMENTED) def dnsNameServer(self): raise NotImplementedError(NOT_IMPLEMENTED) def dnsGetAddr(self): raise NotImplementedError(NOT_IMPLEMENTED) def dnsGetName(self): raise NotImplementedError(NOT_IMPLEMENTED) def filterIpv4Add(self): raise NotImplementedError(NOT_IMPLEMENTED) def filterIpv4Del(self): raise NotImplementedError(NOT_IMPLEMENTED) def olsrAdd(self): raise NotImplementedError(NOT_IMPLEMENTED) def aodvAdd(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppSetSerialread(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppSetSerialWrite(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppSetSerialSpeed(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppSetAPN(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppSetUsername(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppSetPassword(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppConnect(self): raise NotImplementedError(NOT_IMPLEMENTED) def pppDisconnect(self): raise NotImplementedError(NOT_IMPLEMENTED) def dnssdInit(self): raise NotImplementedError(NOT_IMPLEMENTED) def dnssdRegisterService(self): raise NotImplementedError(NOT_IMPLEMENTED) def dnssdKVVectorAdd(self): raise NotImplementedError(NOT_IMPLEMENTED) def mdnsInit(self): raise NotImplementedError(NOT_IMPLEMENTED) def mdnsGetHostname(self): raise NotImplementedError(NOT_IMPLEMENTED) def mdnsSetHostname(self): raise NotImplementedError(NOT_IMPLEMENTED) def mdnsClaim(self): raise NotImplementedError(NOT_IMPLEMENTED) def mdnsGetRecord(self): raise NotImplementedError(NOT_IMPLEMENTED) def mdnsRecordCreate(self): raise NotImplementedError(NOT_IMPLEMENTED) def mdnsIsHostnameRecord(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpListen(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpRejectRequest(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpSessionSetup(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpSetOption(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpGetOption(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpParseRequestArgs(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpSend(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpCloseServer(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpAppSetup(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpAppStartRx(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpAppStartTx(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpGet(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpPut(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpStartTx(self): raise NotImplementedError(NOT_IMPLEMENTED) def tftpStartRx(self): raise NotImplementedError(NOT_IMPLEMENTED)
26.653295
50
0.72608
812
9,302
8.173645
0.165025
0.238361
0.455628
0.641254
0.761338
0.748832
0.023354
0.023354
0.023354
0.023354
0
0.004772
0.21146
9,302
348
51
26.729885
0.900068
0.00387
0
0.493506
0
0
0.005181
0
0
0
0
0
0
0
null
null
0.008658
0
null
null
0.008658
0
0
0
null
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
4268dedc4f196ce3c22a737e9a8814bb72fec166
140
py
Python
pyaz/netappfiles/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/netappfiles/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
null
null
null
pyaz/netappfiles/__init__.py
py-az-cli/py-az-cli
9a7dc44e360c096a5a2f15595353e9dad88a9792
[ "MIT" ]
1
2022-02-03T09:12:01.000Z
2022-02-03T09:12:01.000Z
''' Manage Azure NetApp Files (ANF) Resources. ''' from .. pyaz_utils import _call_az from . import account, pool, snapshot, vault, volume
20
52
0.728571
19
140
5.210526
0.894737
0
0
0
0
0
0
0
0
0
0
0
0.157143
140
6
53
23.333333
0.838983
0.3
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
35fe41c3bbfa008331efcfd72dcee99276f0c045
1,430
py
Python
specHdl/rawdata/PacketProcess.py
huhub/prototypeTester
3ebb1af5afef26c678fad8d36f945ca2fd804b7d
[ "Apache-2.0" ]
null
null
null
specHdl/rawdata/PacketProcess.py
huhub/prototypeTester
3ebb1af5afef26c678fad8d36f945ca2fd804b7d
[ "Apache-2.0" ]
null
null
null
specHdl/rawdata/PacketProcess.py
huhub/prototypeTester
3ebb1af5afef26c678fad8d36f945ca2fd804b7d
[ "Apache-2.0" ]
null
null
null
PacketProcess = {'CtlPpGapConfig': ['gapValue'], 'DsMemRouteTsn': ['isTsn', 'tsnHandle'], 'DsMemRoute': ['destMap', 'nexthopIdx', 'flowPolicePtr', 'flowPoliceValid', 'flowStatsValid', 'flowStatsPtr', 'mirrorEn', 'discard', 'copyToCpu', 'nat'], 'DsMemMac': ['pending', 'flowPolicePtr', 'flowPoliceValid', 'flowStatsValid', 'flowStatsPtr', 'destMap', 'isMcast', 'mirrorEn', 'dstDiscard', 'copyToCpu'], 'DsMemMacTsn': ['isTsn', 'tsnHandle'], 'DsMemCustomFdb': ['isTsn', 'tsnHandle', 'flowPolicePtr', 'flowPoliceValid', 'flowStatsValid', 'flowStatsPtr', 'destMap', 'isMcast', 'mirrorEn'], 'CtlStormCntl': ['enable', 'stormCurPtr', 'stormInterval', 'stormMaxPtr', 'stormMinPtr', 'stormFinalDelay', 'stormCurRound', 'stormUpdRound'], 'DsMemStormCtrl': ['stormCtrlEn', 'threshold', 'usePktCount'], 'DsMemStorm': ['runningCounter'], 'DsRegPortLearnCtrl': ['lock', 'violationToCpu', 'maxMacNum', 'macNumLimitEn', 'lrnNumExceedDiscard'], 'DsRegPortLearnNum': ['learntMacNum'], 'CtlPktProcLog': ['cpuFifoFullNum', 'hwFifoFullNum', 'aclQosLogEn', 'aclDiscard', 'routeDiscard', 'routeExcpDiscard', 'routeProcess', 'bridgeProcess', 'destMacKnown', 'l2UcastSrcMatchDiscard', 'macDaDiscard', 'igrStpCheckDiscard', 'stormDrop', 'lrnPortLockDiscard', 'lrnNumExceedDiscard', 'isTsn', 'tsnHandle', 'igrFlowSpan', 'entryPend'], 'CtlMacLearn': ['cpuLearnEn', 'cpuLearnNum', 'cpuFifoDepth', 'cpuLrnNumThrd', 'hwLearnNum', 'hwFifoDepth', 'hwLrnNumThrd']}
1,430
1,430
0.725175
92
1,430
11.271739
0.771739
0.054002
0.121504
0.15622
0.146577
0.146577
0.146577
0
0
0
0
0.000747
0.064336
1,430
1
1,430
1,430
0.77429
0
0
0
0
0
0.715584
0.015374
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
c444238b992aa2e729ccd8592544ab07c94b5c39
86
py
Python
accounts/admin.py
mirsazzathossain/SPMS-Project
eb2b9144b6ddb8d18c146a4c4d6f79b9f7a7eeb5
[ "MIT" ]
190
2021-02-06T10:47:54.000Z
2022-02-15T23:45:07.000Z
accounts/admin.py
mirsazzathossain/SPMS-Project
eb2b9144b6ddb8d18c146a4c4d6f79b9f7a7eeb5
[ "MIT" ]
105
2020-06-17T19:40:51.000Z
2022-03-01T20:23:04.000Z
accounts/admin.py
mirsazzathossain/SPMS-Project
eb2b9144b6ddb8d18c146a4c4d6f79b9f7a7eeb5
[ "MIT" ]
52
2018-03-08T11:18:12.000Z
2021-08-02T16:07:04.000Z
from django.contrib import admin from .models import User admin.site.register(User)
14.333333
32
0.802326
13
86
5.307692
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.127907
86
5
33
17.2
0.92
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c4567fd2bde78f229a843af7d5b833c0f0b99b70
215
py
Python
apps/public/apps.py
aeasringnar/-django-RESTfulAPI
3065f7617dc3534005ab94cd08324c2b51526634
[ "MIT" ]
null
null
null
apps/public/apps.py
aeasringnar/-django-RESTfulAPI
3065f7617dc3534005ab94cd08324c2b51526634
[ "MIT" ]
null
null
null
apps/public/apps.py
aeasringnar/-django-RESTfulAPI
3065f7617dc3534005ab94cd08324c2b51526634
[ "MIT" ]
null
null
null
from django.apps import AppConfig class PublicConfig(AppConfig): name = 'apps.public' # todo 修改app名称,使应用可以在apps目录中存在,并且可以正常的导入到settings # 激活signals def ready(self): import apps.public.signals
21.5
74
0.730233
23
215
6.826087
0.782609
0.127389
0
0
0
0
0
0
0
0
0
0
0.190698
215
9
75
23.888889
0.902299
0.265116
0
0
0
0
0.070968
0
0
0
0
0.111111
0
1
0.2
false
0
0.4
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
1
0
0
5
c4667abf783360e90a5080d3edac2a45dd910901
219
py
Python
backend/app/tests/test_tasks.py
huideyeren/odaikun
31ebacdce3398e442891c93fc0877416ed902c27
[ "MIT" ]
null
null
null
backend/app/tests/test_tasks.py
huideyeren/odaikun
31ebacdce3398e442891c93fc0877416ed902c27
[ "MIT" ]
20
2020-11-12T03:21:24.000Z
2020-11-24T00:10:40.000Z
backend/app/tests/test_tasks.py
huideyeren/odaikun
31ebacdce3398e442891c93fc0877416ed902c27
[ "MIT" ]
null
null
null
from app import tasks def test_example_task(): """ test_example_task サンプルのタスクを受信できているかのテスト """ task_output = tasks.example_task("Hello World") assert task_output == "test task returns Hello World"
21.9
57
0.712329
27
219
5.518519
0.518519
0.221477
0.201342
0
0
0
0
0
0
0
0
0
0.200913
219
9
58
24.333333
0.851429
0.178082
0
0
0
0
0.243902
0
0
0
0
0
0.25
1
0.25
false
0
0.25
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
c473d6491a0eb0339c728bd0ce69fb79c1114e95
2,744
py
Python
tests/test_logger.py
gosion/pyPvm
d7326799c907b660db11b02fd16843fdb4733eb7
[ "MIT" ]
null
null
null
tests/test_logger.py
gosion/pyPvm
d7326799c907b660db11b02fd16843fdb4733eb7
[ "MIT" ]
null
null
null
tests/test_logger.py
gosion/pyPvm
d7326799c907b660db11b02fd16843fdb4733eb7
[ "MIT" ]
null
null
null
import os import pytest import sys import time from pvm.features.logging import LogFeature, StreamWriter, FileWriter from tests.processes import pause_and_continute @pytest.fixture def init(): file_name = ( "abc." + time.strftime("%Y%m%d", time.localtime(time.time())) + ".log" ) if os.path.exists(file_name): os.remove(file_name) yield file_name def test_log_to_console(capsys): process = pause_and_continute(StreamWriter()) prices = [26, 32, 15] initData1 = { "price1": prices[0], "price2": prices[1], } process.start(initData1) waiting_ids = process.process_context.scope.get("waiting_ids", []) assert waiting_ids is not None assert len(waiting_ids) == 1 assert process.process_context.scope.get("total") == ( prices[0] + prices[1] ) initData2 = {"user_input": 0} process.proceed(waiting_ids[0], initData2) assert process.process_context.scope.get("total", prices[0] + prices[1]) initData2["user_input"] = prices[2] process.proceed(waiting_ids[0], initData2) assert process.process_context.scope.get("total") == ( prices[0] + prices[1] + prices[2] ) expected = [ "occurs.", "is ready to execute.", "inished ths executions.", "passed.", "is ready to execute.", "I am waiting.", ] out, err = capsys.readouterr() lines = err.split(os.linesep) for i, e in enumerate(expected): assert lines[i].split("-")[-1].strip().endswith(e) def test_log_to_file(init): process = pause_and_continute(FileWriter("abc.log")) prices = [26, 32, 15] initData1 = { "price1": prices[0], "price2": prices[1], } process.start(initData1) waiting_ids = process.process_context.scope.get("waiting_ids", []) assert waiting_ids is not None assert len(waiting_ids) == 1 assert process.process_context.scope.get("total") == ( prices[0] + prices[1] ) file_name = init initData2 = {"user_input": 0} process.proceed(waiting_ids[0], initData2) assert process.process_context.scope.get("total", prices[0] + prices[1]) initData2["user_input"] = prices[2] process.proceed(waiting_ids[0], initData2) assert process.process_context.scope.get("total") == ( prices[0] + prices[1] + prices[2] ) expected = [ "occurs.", "is ready to execute.", "inished ths executions.", "passed.", "is ready to execute.", "I am waiting.", ] with open(file_name, mode="r") as f: lines = f.readlines() for i, e in enumerate(expected): assert lines[i].split("-")[-1].strip().endswith(e)
24.5
78
0.612609
342
2,744
4.792398
0.274854
0.073215
0.102502
0.126907
0.708969
0.708969
0.708969
0.708969
0.708969
0.708969
0
0.027831
0.240525
2,744
111
79
24.720721
0.758637
0
0
0.626506
0
0
0.116618
0
0
0
0
0
0.144578
1
0.036145
false
0.024096
0.072289
0
0.108434
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6709ef99dbc39e6aafe1755a80e82b6351387391
55
py
Python
opy/testdata/hello_py2_print.py
bb010g/oil
660f6ad283d53e3b9c4b1088b39ff1002e6a8d55
[ "Apache-2.0" ]
1
2018-10-15T10:09:32.000Z
2018-10-15T10:09:32.000Z
opy/testdata/hello_py2_print.py
bb010g/oil
660f6ad283d53e3b9c4b1088b39ff1002e6a8d55
[ "Apache-2.0" ]
1
2018-05-28T21:30:28.000Z
2018-05-28T21:30:28.000Z
opy/testdata/hello_py2_print.py
bb010g/oil
660f6ad283d53e3b9c4b1088b39ff1002e6a8d55
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python import sys print >>sys.stderr, 'hi'
11
24
0.672727
9
55
4.111111
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.127273
55
4
25
13.75
0.770833
0.290909
0
0
0
0
0.052632
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
1
0
5
6738fdf7fa1dbd32d627d0c964d9456d21566f86
2,185
py
Python
checkov/common/checks_infra/solvers/attribute_solvers/__init__.py
Devocean8-Official/checkov
8ce61421fa838a97981ab3bd0ae2a12e541666b2
[ "Apache-2.0" ]
1
2022-02-15T20:46:07.000Z
2022-02-15T20:46:07.000Z
checkov/common/checks_infra/solvers/attribute_solvers/__init__.py
Devocean8-Official/checkov
8ce61421fa838a97981ab3bd0ae2a12e541666b2
[ "Apache-2.0" ]
3
2022-03-07T20:37:31.000Z
2022-03-21T20:20:14.000Z
checkov/common/checks_infra/solvers/attribute_solvers/__init__.py
Devocean8-Official/checkov
8ce61421fa838a97981ab3bd0ae2a12e541666b2
[ "Apache-2.0" ]
null
null
null
from checkov.common.checks_infra.solvers.attribute_solvers.any_attribute_solver import AnyResourceSolver from checkov.common.checks_infra.solvers.attribute_solvers.contains_attribute_solver import ContainsAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.not_contains_attribute_solver import NotContainsAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.ending_with_attribute_solver import EndingWithAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.equals_attribute_solver import EqualsAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.regex_match_attribute_solver import RegexMatchAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.exists_attribute_solver import ExistsAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.not_ending_with_attribute_solver import NotEndingWithAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.not_equals_attribute_solver import NotEqualsAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.not_regex_match_attribute_solver import NotRegexMatchAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.not_exists_attribute_solver import NotExistsAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.not_starting_with_attribute_solver import NotStartingWithAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.starting_with_attribute_solver import StartingWithAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.within_attribute_solver import WithinAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.greater_than_attribute_solver import GreaterThanAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.greater_than_or_equal_attribute_solver import GreaterThanOrEqualAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.less_than_attribute_solver import LessThanAttributeSolver from checkov.common.checks_infra.solvers.attribute_solvers.less_than_or_equal_attribute_solver import LessThanOrEqualAttributeSolver
115
138
0.925858
254
2,185
7.602362
0.169291
0.102538
0.158467
0.214397
0.631797
0.533402
0.504402
0.504402
0.293112
0.125324
0
0
0.032952
2,185
18
139
121.388889
0.913867
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
675235c0ada6906064d1f7efdbb6ab2363adb9be
239
py
Python
PYTHON/ex074.py
george-git-dev/CursoemVideo
307933ef91f1ad3a0be11bfb326fe86211f2f156
[ "MIT" ]
null
null
null
PYTHON/ex074.py
george-git-dev/CursoemVideo
307933ef91f1ad3a0be11bfb326fe86211f2f156
[ "MIT" ]
null
null
null
PYTHON/ex074.py
george-git-dev/CursoemVideo
307933ef91f1ad3a0be11bfb326fe86211f2f156
[ "MIT" ]
1
2021-04-01T22:31:19.000Z
2021-04-01T22:31:19.000Z
from random import randint n = (randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10)) print(f"Eu sorteei os valores {n}") print(f"O maior valor sorteado foi {max(n)}") print(f"O menor valor sorteado foi {min(n)}")
47.8
84
0.682008
45
239
3.622222
0.466667
0.245399
0.306748
0.417178
0.306748
0.306748
0.306748
0.306748
0.306748
0.306748
0
0.072464
0.133891
239
5
85
47.8
0.714976
0
0
0
0
0
0.395833
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0.6
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
67606b53b22673f3a87f2a297ad0ea1434e5893b
2,656
py
Python
demo/plot_wavelets.py
SalvoCas/pywt
75b3b7b37102aad27780153b4b0fdaf184b205a4
[ "MIT" ]
1,435
2015-07-29T18:28:27.000Z
2022-03-31T10:16:46.000Z
demo/plot_wavelets.py
SalvoCas/pywt
75b3b7b37102aad27780153b4b0fdaf184b205a4
[ "MIT" ]
547
2015-07-29T18:10:15.000Z
2022-03-24T18:42:57.000Z
demo/plot_wavelets.py
SalvoCas/pywt
75b3b7b37102aad27780153b4b0fdaf184b205a4
[ "MIT" ]
421
2015-07-30T13:08:25.000Z
2022-03-24T11:10:07.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # Plot scaling and wavelet functions for db, sym, coif, bior and rbio families import itertools import matplotlib.pyplot as plt import pywt plot_data = [('db', (4, 3)), ('sym', (4, 3)), ('coif', (3, 2))] for family, (rows, cols) in plot_data: fig = plt.figure() fig.subplots_adjust(hspace=0.2, wspace=0.2, bottom=.02, left=.06, right=.97, top=.94) colors = itertools.cycle('bgrcmyk') wnames = pywt.wavelist(family) i = iter(wnames) for col in range(cols): for row in range(rows): try: wavelet = pywt.Wavelet(next(i)) except StopIteration: break phi, psi, x = wavelet.wavefun(level=5) color = next(colors) ax = fig.add_subplot(rows, 2 * cols, 1 + 2 * (col + row * cols)) ax.set_title(wavelet.name + " phi") ax.plot(x, phi, color) ax.set_xlim(min(x), max(x)) ax = fig.add_subplot(rows, 2*cols, 1 + 2*(col + row*cols) + 1) ax.set_title(wavelet.name + " psi") ax.plot(x, psi, color) ax.set_xlim(min(x), max(x)) for family, (rows, cols) in [('bior', (4, 3)), ('rbio', (4, 3))]: fig = plt.figure() fig.subplots_adjust(hspace=0.5, wspace=0.2, bottom=.02, left=.06, right=.97, top=.94) colors = itertools.cycle('bgrcmyk') wnames = pywt.wavelist(family) i = iter(wnames) for col in range(cols): for row in range(rows): try: wavelet = pywt.Wavelet(next(i)) except StopIteration: break phi, psi, phi_r, psi_r, x = wavelet.wavefun(level=5) row *= 2 color = next(colors) ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols)) ax.set_title(wavelet.name + " phi") ax.plot(x, phi, color) ax.set_xlim(min(x), max(x)) ax = fig.add_subplot(2*rows, 2*cols, 2*(1 + col + row*cols)) ax.set_title(wavelet.name + " psi") ax.plot(x, psi, color) ax.set_xlim(min(x), max(x)) row += 1 ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols)) ax.set_title(wavelet.name + " phi_r") ax.plot(x, phi_r, color) ax.set_xlim(min(x), max(x)) ax = fig.add_subplot(2*rows, 2*cols, 1 + 2*(col + row*cols) + 1) ax.set_title(wavelet.name + " psi_r") ax.plot(x, psi_r, color) ax.set_xlim(min(x), max(x)) plt.show()
31.247059
78
0.508283
378
2,656
3.497355
0.219577
0.045386
0.036309
0.068079
0.826021
0.765507
0.765507
0.742814
0.680787
0.662632
0
0.035755
0.336596
2,656
84
79
31.619048
0.714529
0.044804
0
0.634921
0
0
0.023283
0
0
0
0
0
0
1
0
false
0
0.047619
0
0.047619
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
6782ba7147a1da69b9c83481218f3f3b394e9bfb
7,547
py
Python
tests/trees/test_hoeffding_adaptive_tree.py
jiahy0825/scikit-multiflow
910fa62605de49dea3e4599bb233c3d9c6f4527b
[ "BSD-3-Clause" ]
null
null
null
tests/trees/test_hoeffding_adaptive_tree.py
jiahy0825/scikit-multiflow
910fa62605de49dea3e4599bb233c3d9c6f4527b
[ "BSD-3-Clause" ]
null
null
null
tests/trees/test_hoeffding_adaptive_tree.py
jiahy0825/scikit-multiflow
910fa62605de49dea3e4599bb233c3d9c6f4527b
[ "BSD-3-Clause" ]
null
null
null
import numpy as np from array import array import os from skmultiflow.data import ConceptDriftStream, SEAGenerator, HyperplaneGenerator from skmultiflow.trees import HAT def test_hat_mc(test_path): stream = ConceptDriftStream(stream=SEAGenerator(random_state=1, noise_percentage=0.05), drift_stream=SEAGenerator(random_state=2, classification_function=2, noise_percentage=0.05), random_state=1, position=250, width=10) stream.prepare_for_use() learner = HAT(leaf_prediction='mc') cnt = 0 max_samples = 1000 y_pred = array('i') y_proba = [] wait_samples = 20 while cnt < max_samples: X, y = stream.next_sample() # Test every n samples if (cnt % wait_samples == 0) and (cnt != 0): y_pred.append(learner.predict(X)[0]) y_proba.append(learner.predict_proba(X)[0]) learner.partial_fit(X, y) cnt += 1 expected_predictions = array('i', [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) assert np.alltrue(y_pred == expected_predictions) test_file = os.path.join(test_path, 'test_hoeffding_adaptive_tree_mc.npy') data = np.load(test_file) assert np.allclose(y_proba, data) expected_info = "HAT(binary_split=False, bootstrap_sampling=True, grace_period=200,\n" \ " leaf_prediction='mc', max_byte_size=33554432,\n" \ " memory_estimate_period=1000000, nb_threshold=0, no_preprune=False,\n" \ " nominal_attributes=None, remove_poor_atts=False, split_confidence=1e-07,\n" \ " split_criterion='info_gain', stop_mem_management=False, tie_threshold=0.05)" assert learner.get_info() == expected_info expected_model_1 = 'Leaf = Class 1.0 | {0.0: 398.0, 1.0: 1000.0}\n' assert (learner.get_model_description() == expected_model_1) assert type(learner.predict(X)) == np.ndarray assert type(learner.predict_proba(X)) == np.ndarray stream.restart() X, y = stream.next_sample(5000) learner = HAT(max_byte_size=30, leaf_prediction='mc', grace_period=10) learner.partial_fit(X, y) def test_hat_nb(test_path): stream = ConceptDriftStream(stream=SEAGenerator(random_state=1, noise_percentage=0.05), drift_stream=SEAGenerator(random_state=2, classification_function=2, noise_percentage=0.05), random_state=1, position=250, width=10) stream.prepare_for_use() learner = HAT(leaf_prediction='nb') cnt = 0 max_samples = 1000 y_pred = array('i') y_proba = [] wait_samples = 20 while cnt < max_samples: X, y = stream.next_sample() # Test every n samples if (cnt % wait_samples == 0) and (cnt != 0): y_pred.append(learner.predict(X)[0]) y_proba.append(learner.predict_proba(X)[0]) learner.partial_fit(X, y) cnt += 1 expected_predictions = array('i', [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1]) assert np.alltrue(y_pred == expected_predictions) test_file = os.path.join(test_path, 'test_hoeffding_adaptive_tree_nb.npy') data = np.load(test_file) assert np.allclose(y_proba, data) expected_info = "HAT(binary_split=False, bootstrap_sampling=True, grace_period=200,\n" \ " leaf_prediction='nb', max_byte_size=33554432,\n" \ " memory_estimate_period=1000000, nb_threshold=0, no_preprune=False,\n" \ " nominal_attributes=None, remove_poor_atts=False, split_confidence=1e-07,\n" \ " split_criterion='info_gain', stop_mem_management=False, tie_threshold=0.05)" assert learner.get_info() == expected_info assert type(learner.predict(X)) == np.ndarray assert type(learner.predict_proba(X)) == np.ndarray def test_hat_nba(test_path): stream = HyperplaneGenerator(mag_change=0.001, noise_percentage=0.1, random_state=2) stream.prepare_for_use() learner = HAT(leaf_prediction='nba') cnt = 0 max_samples = 5000 y_pred = array('i') y_proba = [] wait_samples = 100 while cnt < max_samples: X, y = stream.next_sample() # Test every n samples if (cnt % wait_samples == 0) and (cnt != 0): y_pred.append(learner.predict(X)[0]) y_proba.append(learner.predict_proba(X)[0]) learner.partial_fit(X, y) cnt += 1 expected_predictions = array('i', [1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0]) assert np.alltrue(y_pred == expected_predictions) test_file = os.path.join(test_path, 'test_hoeffding_adaptive_tree_nba.npy') data = np.load(test_file) assert np.allclose(y_proba, data) expected_info = "HAT(binary_split=False, bootstrap_sampling=True, grace_period=200,\n" \ " leaf_prediction='nba', max_byte_size=33554432,\n" \ " memory_estimate_period=1000000, nb_threshold=0, no_preprune=False,\n" \ " nominal_attributes=None, remove_poor_atts=False, split_confidence=1e-07,\n" \ " split_criterion='info_gain', stop_mem_management=False, tie_threshold=0.05)" assert learner.get_info() == expected_info assert type(learner.predict(X)) == np.ndarray assert type(learner.predict_proba(X)) == np.ndarray def test_hoeffding_adaptive_tree_categorical_features(test_path): data_path = os.path.join(test_path, 'ht_categorical_features_testcase.npy') stream = np.load(data_path) # Removes the last two columns (regression targets) stream = stream[:, :-2] X, y = stream[:, :-1], stream[:, -1] nominal_attr_idx = np.arange(7).tolist() learner = HAT(nominal_attributes=nominal_attr_idx) learner.partial_fit(X, y, classes=np.unique(y)) expected_description = "if Attribute 0 = -15.0:\n" \ " Leaf = Class 2 | {2: 475.0}\n" \ "if Attribute 0 = 0.0:\n" \ " Leaf = Class 0 | {0: 560.0, 1: 345.0}\n" \ "if Attribute 0 = 1.0:\n" \ " Leaf = Class 1 | {0: 416.0, 1: 464.0}\n" \ "if Attribute 0 = 2.0:\n" \ " Leaf = Class 1 | {0: 335.0, 1: 504.0}\n" \ "if Attribute 0 = 3.0:\n" \ " Leaf = Class 1 | {0: 244.0, 1: 644.0}\n" \ "if Attribute 0 = -30.0:\n" \ " Leaf = Class 3.0 | {3.0: 65.0, 4.0: 55.0}\n" assert learner.get_model_description() == expected_description
41.927778
102
0.551875
1,015
7,547
3.902463
0.15468
0.043928
0.053774
0.060591
0.797526
0.760414
0.745014
0.744761
0.702095
0.699823
0
0.079632
0.322777
7,547
179
103
42.162011
0.695363
0.01484
0
0.580882
0
0.014706
0.219381
0.120188
0
0
0
0
0.125
1
0.029412
false
0
0.036765
0
0.066176
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
678a715802279b3ffa2b1e247706b3f4c90568b3
88
py
Python
finance/__init__.py
codingwithchad/finance
4202692024ec0137670ff02a13b2f92f17da0cb2
[ "MIT" ]
null
null
null
finance/__init__.py
codingwithchad/finance
4202692024ec0137670ff02a13b2f92f17da0cb2
[ "MIT" ]
null
null
null
finance/__init__.py
codingwithchad/finance
4202692024ec0137670ff02a13b2f92f17da0cb2
[ "MIT" ]
null
null
null
from categorize import categorize from financeutil import toFloat from category import *
29.333333
33
0.863636
11
88
6.909091
0.545455
0
0
0
0
0
0
0
0
0
0
0
0.125
88
3
34
29.333333
0.987013
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6795af2659c58f77f302d69349e684baeaff81a1
137
py
Python
app/loader.py
ganggas95/E-Wisata
fb66fc7d3d4cc5a45ad9acea42fb306140a6449f
[ "Apache-2.0" ]
null
null
null
app/loader.py
ganggas95/E-Wisata
fb66fc7d3d4cc5a45ad9acea42fb306140a6449f
[ "Apache-2.0" ]
null
null
null
app/loader.py
ganggas95/E-Wisata
fb66fc7d3d4cc5a45ad9acea42fb306140a6449f
[ "Apache-2.0" ]
1
2020-02-12T09:21:15.000Z
2020-02-12T09:21:15.000Z
from .create_app import login from .user_app import User @login.user_loader def load_user(user_id): return User.get_by_id(user_id)
17.125
34
0.788321
25
137
4
0.52
0.18
0
0
0
0
0
0
0
0
0
0
0.138686
137
7
35
19.571429
0.847458
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
67d105d412565b0a5337b25419ff11939265dc1b
10,095
py
Python
senlin/tests/unit/api/openstack/v1/test_policy_types.py
openstack/senlin
390779ca1e08f819683e79993696f945f1c0393e
[ "Apache-2.0" ]
45
2015-10-18T02:56:50.000Z
2022-03-01T15:28:02.000Z
senlin/tests/unit/api/openstack/v1/test_policy_types.py
openstack/senlin
390779ca1e08f819683e79993696f945f1c0393e
[ "Apache-2.0" ]
2
2019-04-26T10:44:47.000Z
2020-12-16T19:45:34.000Z
senlin/tests/unit/api/openstack/v1/test_policy_types.py
openstack/senlin
390779ca1e08f819683e79993696f945f1c0393e
[ "Apache-2.0" ]
45
2015-10-19T02:35:57.000Z
2021-09-28T09:01:42.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from webob import exc from senlin.api.common import util from senlin.api.middleware import fault from senlin.api.openstack.v1 import policy_types from senlin.common import exception as senlin_exc from senlin.common import policy from senlin.rpc import client as rpc_client from senlin.tests.unit.api import shared from senlin.tests.unit.common import base @mock.patch.object(policy, 'enforce') class PolicyTypeControllerTest(shared.ControllerTest, base.SenlinTestCase): def setUp(self): super(PolicyTypeControllerTest, self).setUp() class DummyConfig(object): bind_port = 8777 cfgopts = DummyConfig() self.controller = policy_types.PolicyTypeController(options=cfgopts) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_list(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'index', True) req = self._get('/policy_types') engine_response = [ {'name': 'senlin.policy.p1', 'version': '1.0', 'attr': 'v1'}, {'name': 'senlin.policy.p2', 'version': '1.0', 'attr': 'v2'} ] mock_call.return_value = engine_response obj = mock.Mock() mock_parse.return_value = obj response = self.controller.index(req) self.assertEqual( [ {'name': 'senlin.policy.p1-1.0'}, {'name': 'senlin.policy.p2-1.0'}, ], response['policy_types'] ) mock_parse.assert_called_once_with( 'PolicyTypeListRequest', req, {}) mock_call.assert_called_once_with( req.context, 'policy_type_list', mock.ANY) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_list_old_version(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'index', True) req = self._get('/policy_types', version='1.3') engine_response = [ {'name': 'senlin.policy.p1', 'version': '1.0'}, {'name': 'senlin.policy.p2', 'version': '1.1'} ] mock_call.return_value = engine_response obj = mock.Mock() mock_parse.return_value = obj response = self.controller.index(req) self.assertEqual( [ {'name': 'senlin.policy.p1-1.0'}, {'name': 'senlin.policy.p2-1.1'} ], response['policy_types'] ) mock_parse.assert_called_once_with( 'PolicyTypeListRequest', req, {}) mock_call.assert_called_once_with( req.context, 'policy_type_list', mock.ANY) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_list_new_version(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'index', True) req = self._get('/policy_types', version='1.5') engine_response = [ {'name': 'senlin.policy.p1', 'version': '1.0', 'a1': 'v1'}, {'name': 'senlin.policy.p2', 'version': '1.1', 'a2': 'v2'} ] mock_call.return_value = engine_response obj = mock.Mock() mock_parse.return_value = obj response = self.controller.index(req) self.assertEqual(engine_response, response['policy_types']) mock_parse.assert_called_once_with( 'PolicyTypeListRequest', req, {}) mock_call.assert_called_once_with( req.context, 'policy_type_list', mock.ANY) def test_list_err_denied_policy(self, mock_enforce): self._mock_enforce_setup(mock_enforce, 'index', False) req = self._get('/policy_types') resp = shared.request_with_middleware(fault.FaultWrapper, self.controller.index, req) self.assertEqual(403, resp.status_int) self.assertIn('403 Forbidden', str(resp)) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_get_old_version(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'get', True) type_name = 'SimplePolicy' req = self._get('/policy_types/%(type)s' % {'type': type_name}, version='1.3') engine_response = { 'name': type_name, 'schema': { 'Foo': {'type': 'String', 'required': False}, 'Bar': {'type': 'Integer', 'required': False}, }, } mock_call.return_value = engine_response obj = mock.Mock() mock_parse.return_value = obj response = self.controller.get(req, type_name=type_name) self.assertEqual(engine_response, response['policy_type']) mock_parse.assert_called_once_with( 'PolicyTypeGetRequest', req, {'type_name': type_name}) mock_call.assert_called_once_with( req.context, 'policy_type_get', mock.ANY) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_get_new_version(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'get', True) type_name = 'SimplePolicy' req = self._get('/policy_types/%(type)s' % {'type': type_name}, version='1.5') engine_response = { 'name': type_name, 'schema': { 'Foo': {'type': 'String', 'required': False}, 'Bar': {'type': 'Integer', 'required': False}, }, 'support_status': 'faked_status' } mock_call.return_value = engine_response obj = mock.Mock() mock_parse.return_value = obj response = self.controller.get(req, type_name=type_name) self.assertEqual(engine_response, response['policy_type']) mock_parse.assert_called_once_with( 'PolicyTypeGetRequest', req, {'type_name': type_name}) mock_call.assert_called_once_with( req.context, 'policy_type_get', mock.ANY) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_policy_type_get(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'get', True) type_name = 'SimplePolicy' req = self._get('/policy_types/%(type)s' % {'type': type_name}) engine_response = { 'name': type_name, 'schema': { 'Foo': {'type': 'String', 'required': False}, 'Bar': {'type': 'Integer', 'required': False}, }, } mock_call.return_value = engine_response obj = mock.Mock() mock_parse.return_value = obj response = self.controller.get(req, type_name=type_name) self.assertEqual(engine_response, response['policy_type']) mock_parse.assert_called_once_with( 'PolicyTypeGetRequest', req, {'type_name': type_name}) mock_call.assert_called_once_with( req.context, 'policy_type_get', mock.ANY) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_policy_type_get_not_found(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'get', True) type_name = 'BogusPolicyType' req = self._get('/policy_types/%(type)s' % {'type': type_name}) error = senlin_exc.ResourceNotFound(type='policy_type', id=type_name) mock_call.side_effect = shared.to_remote_error(error) resp = shared.request_with_middleware(fault.FaultWrapper, self.controller.get, req, type_name=type_name) self.assertEqual(404, resp.json['code']) self.assertEqual('ResourceNotFound', resp.json['error']['type']) @mock.patch.object(util, 'parse_request') @mock.patch.object(rpc_client.EngineClient, 'call') def test_policy_type_get_bad_param(self, mock_call, mock_parse, mock_enforce): self._mock_enforce_setup(mock_enforce, 'get', True) type_name = 11 req = self._get('/policy_types/%(type)s' % {'type': type_name}) mock_parse.side_effect = exc.HTTPBadRequest("bad param") ex = self.assertRaises(exc.HTTPBadRequest, self.controller.get, req, type_name=type_name) self.assertEqual("bad param", str(ex)) mock_parse.assert_called_once_with( 'PolicyTypeGetRequest', req, {'type_name': type_name}) self.assertEqual(0, mock_call.call_count) def test_policy_type_schema_err_denied_policy(self, mock_enforce): self._mock_enforce_setup(mock_enforce, 'get', False) type_name = 'FakePolicyType' req = self._get('/policy_types/%(type)s' % {'type': type_name}) resp = shared.request_with_middleware(fault.FaultWrapper, self.controller.get, req, type_name=type_name) self.assertEqual(403, resp.status_int) self.assertIn('403 Forbidden', str(resp))
39.280156
77
0.613967
1,171
10,095
5.029035
0.149445
0.048905
0.043301
0.04415
0.762608
0.759042
0.754627
0.737816
0.737816
0.705043
0
0.009573
0.26528
10,095
256
78
39.433594
0.784414
0.052006
0
0.648241
0
0
0.143545
0.020402
0
0
0
0
0.140704
1
0.055276
false
0
0.050251
0
0.115578
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
db145e992bf04f84c24bb5ff99b6daeae33b35b1
1,184
py
Python
src/players/tests/test_parse_spectators/test_spectators.py
codacy-badger/hbscorez
215e4d2617ac9be91bb9d561bbfc552349cd4781
[ "MIT" ]
12
2018-03-20T21:38:53.000Z
2021-10-31T10:00:12.000Z
src/players/tests/test_parse_spectators/test_spectators.py
codacy-badger/hbscorez
215e4d2617ac9be91bb9d561bbfc552349cd4781
[ "MIT" ]
79
2018-03-18T14:26:47.000Z
2022-03-01T15:51:40.000Z
src/players/tests/test_parse_spectators/test_spectators.py
codacy-badger/hbscorez
215e4d2617ac9be91bb9d561bbfc552349cd4781
[ "MIT" ]
4
2018-05-18T15:39:56.000Z
2020-10-29T09:28:41.000Z
import os import tabula from django.test import TestCase from players.management.commands.parse_report import parse_spectators class ParseSpectators(TestCase): def test_value(self): base = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(base, 'report-with-spectators.pdf') table = tabula.read_pdf(path, output_format='json', **{'pages': 1, 'lattice': True})[0] spectators = parse_spectators(table) self.assertEqual(spectators, 60) def test_unknown(self): base = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(base, 'report-with-unknown-spectators.pdf') table = tabula.read_pdf(path, output_format='json', **{'pages': 1, 'lattice': True})[0] spectators = parse_spectators(table) self.assertEqual(spectators, None) def test_invalid(self): base = os.path.dirname(os.path.abspath(__file__)) path = os.path.join(base, 'report-with-invalid-spectators.pdf') table = tabula.read_pdf(path, output_format='json', **{'pages': 1, 'lattice': True})[0] spectators = parse_spectators(table) self.assertEqual(spectators, None)
34.823529
95
0.673142
149
1,184
5.174497
0.275168
0.070039
0.038911
0.054475
0.749676
0.749676
0.749676
0.749676
0.749676
0.749676
0
0.008316
0.1875
1,184
33
96
35.878788
0.793139
0
0
0.478261
0
0
0.119932
0.079392
0
0
0
0
0.130435
1
0.130435
false
0
0.173913
0
0.347826
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
db1b947d4cd98708096ef8e54e90ee5cade83492
26
py
Python
stop/__init__.py
iboraham/senior-project
766fa2c9dd8b4beaa85d48ef71e3c70b525beef2
[ "MIT" ]
1
2021-01-28T07:55:26.000Z
2021-01-28T07:55:26.000Z
stop/__init__.py
iboraham/senior-project
766fa2c9dd8b4beaa85d48ef71e3c70b525beef2
[ "MIT" ]
null
null
null
stop/__init__.py
iboraham/senior-project
766fa2c9dd8b4beaa85d48ef71e3c70b525beef2
[ "MIT" ]
2
2020-02-03T11:30:44.000Z
2020-02-03T11:58:06.000Z
import stop stop.main()
5.2
11
0.692308
4
26
4.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.192308
26
4
12
6.5
0.857143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
db3897bc53520c5189403c48c14a796fc7c94191
16
py
Python
num/test1.py
ziyecen/redis_test
705b539b62eb613d9a2b528c02028ff299d85483
[ "MIT" ]
null
null
null
num/test1.py
ziyecen/redis_test
705b539b62eb613d9a2b528c02028ff299d85483
[ "MIT" ]
null
null
null
num/test1.py
ziyecen/redis_test
705b539b62eb613d9a2b528c02028ff299d85483
[ "MIT" ]
null
null
null
nu1 = 1 num2 = 2
8
8
0.5625
4
16
2.25
1
0
0
0
0
0
0
0
0
0
0
0.363636
0.3125
16
2
8
8
0.454545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e1f0df7570fd2cded81150bc2a7f43a8fefd308c
223
py
Python
qtools3/errors.py
jkpr/qtools3
f3e97619177a71db091ee04eb904479810978025
[ "MIT" ]
null
null
null
qtools3/errors.py
jkpr/qtools3
f3e97619177a71db091ee04eb904479810978025
[ "MIT" ]
8
2019-08-06T07:59:46.000Z
2019-10-07T18:55:07.000Z
qtools3/errors.py
jkpr/qtools3
f3e97619177a71db091ee04eb904479810978025
[ "MIT" ]
3
2019-07-18T18:34:14.000Z
2020-07-31T20:26:30.000Z
"""A module with errors used in the qtools3 package.""" class XlsformError(Exception): pass class ConvertError(Exception): pass class XformError(Exception): pass class QxmleditError(Exception): pass
12.388889
55
0.713004
25
223
6.36
0.64
0.327044
0.339623
0
0
0
0
0
0
0
0
0.005618
0.201794
223
17
56
13.117647
0.88764
0.219731
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
c01ce96afeb76e2c7ea71c31197c10b631d9c72e
146
py
Python
client/src/grafana_launcher.py
estcube/telemetry-forwarding-client
be659c8dd8e4bd26d1d1974d63f90acffd150e34
[ "MIT" ]
3
2020-06-11T12:34:25.000Z
2020-09-16T12:06:32.000Z
client/src/grafana_launcher.py
estcube/telemetry-forwarding-client
be659c8dd8e4bd26d1d1974d63f90acffd150e34
[ "MIT" ]
57
2020-09-16T09:11:04.000Z
2022-02-28T01:32:13.000Z
client/src/grafana_launcher.py
estcube/Telemetry-Forwarding-Client
be659c8dd8e4bd26d1d1974d63f90acffd150e34
[ "MIT" ]
null
null
null
""" Hook to start Grafana server """ import subprocess subprocess.Popen([r"../grafana/bin/grafana-server.exe", "--homepath=grafana"], cwd="..")
24.333333
88
0.684932
18
146
5.555556
0.722222
0.26
0
0
0
0
0
0
0
0
0
0
0.09589
146
5
89
29.2
0.757576
0.191781
0
0
0
0
0.481818
0.3
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c021586bf901ce06c374aa8ba599d3129aca7020
38
py
Python
pandas2tensorboard/test/__init__.py
Anselmoo/pandas2tensorboard
ec5e16135416d23b83daa6bc618d701cf6feb30d
[ "MIT" ]
null
null
null
pandas2tensorboard/test/__init__.py
Anselmoo/pandas2tensorboard
ec5e16135416d23b83daa6bc618d701cf6feb30d
[ "MIT" ]
16
2022-02-06T18:50:39.000Z
2022-03-28T16:30:27.000Z
pandas2tensorboard/test/__init__.py
Anselmoo/pandas2tensorboard
ec5e16135416d23b83daa6bc618d701cf6feb30d
[ "MIT" ]
null
null
null
"""Test of the Pandas2Tensorboard."""
19
37
0.710526
4
38
6.75
1
0
0
0
0
0
0
0
0
0
0
0.029412
0.105263
38
1
38
38
0.764706
0.815789
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
c02afe1eb2695be8d6342fb95a78446b6e09d5c3
229
py
Python
walle/core/__init__.py
kaixin-bai/walle
031e48c080fe439418d017c689ea7e6350ebbbb1
[ "MIT" ]
null
null
null
walle/core/__init__.py
kaixin-bai/walle
031e48c080fe439418d017c689ea7e6350ebbbb1
[ "MIT" ]
null
null
null
walle/core/__init__.py
kaixin-bai/walle
031e48c080fe439418d017c689ea7e6350ebbbb1
[ "MIT" ]
null
null
null
"""Module importing all core classes. """ from walle.core.matrix import RotationMatrix from walle.core.orientation import Orientation from walle.core.pose import Pose from walle.core.quaternion import UnitQuaternion, Quaternion
28.625
60
0.825328
30
229
6.3
0.466667
0.190476
0.275132
0
0
0
0
0
0
0
0
0
0.104803
229
7
61
32.714286
0.921951
0.148472
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c02e9582e259a2a723cd5f593585f282cc44b380
321
py
Python
pysigfox/exceptions.py
optimdata/pysigfox
9998a3fb4813d80b44aa0974fbd4f5936de54fa6
[ "MIT" ]
1
2021-03-12T10:22:07.000Z
2021-03-12T10:22:07.000Z
pysigfox/exceptions.py
optimdata/pysigfox
9998a3fb4813d80b44aa0974fbd4f5936de54fa6
[ "MIT" ]
1
2021-04-30T13:31:03.000Z
2021-04-30T13:31:03.000Z
pysigfox/exceptions.py
optimdata/pysigfox
9998a3fb4813d80b44aa0974fbd4f5936de54fa6
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- class SigfoxBaseException(BaseException): pass class SigfoxConnectionError(SigfoxBaseException): pass class SigfoxBadStatusError(SigfoxBaseException): pass class SigfoxResponseError(SigfoxBaseException): pass class SigfoxTooManyRequestsError(SigfoxBaseException): pass
16.05
54
0.775701
23
321
10.826087
0.478261
0.144578
0.337349
0
0
0
0
0
0
0
0
0.003663
0.149533
321
19
55
16.894737
0.908425
0.065421
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
1
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
c0428808ab2e951ae05bf84c69ce853b1cf337d3
159
py
Python
scripts/taxes_Baybay.py
mgbaybay/Data-Science
4c077b5bd6f693f1d5f0a5fa1996b2ebb4260caf
[ "MIT" ]
null
null
null
scripts/taxes_Baybay.py
mgbaybay/Data-Science
4c077b5bd6f693f1d5f0a5fa1996b2ebb4260caf
[ "MIT" ]
null
null
null
scripts/taxes_Baybay.py
mgbaybay/Data-Science
4c077b5bd6f693f1d5f0a5fa1996b2ebb4260caf
[ "MIT" ]
null
null
null
income = float(input()) gross_pay = income taxes_owed = income * .12 net_pay = gross_pay - taxes_owed print(gross_pay) print(taxes_owed) print(net_pay)
19.875
33
0.72956
25
159
4.32
0.4
0.222222
0.259259
0
0
0
0
0
0
0
0
0.015038
0.163522
159
8
34
19.875
0.796992
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.428571
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
5
c051f98e0d65bdd8119a2205f764e931bce686fa
3,791
py
Python
dolphindb/vector.py
ShenHongFei/dolphindb-python
36f6cc0ded6d9b4b3f25d5eadd83dc3f3314fd8c
[ "Apache-2.0" ]
1
2020-12-29T11:23:07.000Z
2020-12-29T11:23:07.000Z
dolphindb/vector.py
ShenHongFei/dolphindb-python
36f6cc0ded6d9b4b3f25d5eadd83dc3f3314fd8c
[ "Apache-2.0" ]
null
null
null
dolphindb/vector.py
ShenHongFei/dolphindb-python
36f6cc0ded6d9b4b3f25d5eadd83dc3f3314fd8c
[ "Apache-2.0" ]
null
null
null
from pandas import Series class Vector(object): def __init__(self, name=None, data=None, s=None, tableName=None): self.__name = name self.__tableName = tableName self.__session = s # type : session if isinstance(data, list): self.__vec = Series(data) elif isinstance(data, Series): self.__vec = data else: self.__vec = None def name(self): return self.__name def tableName(self): return self.__tableName def as_series(self, useCache=False): if useCache is True and self.__vec is not None: return self.__vec self.__vec = Series(self.__session.run('.'.join((self.__tableName, self.__name)))) return self.__vec def __str__(self): return self.__name def __lt__(self, other): return FilterCond(self.__name, '<', str(other)) def __le__(self, other): return FilterCond(self.__name, '<=', str(other)) def __gt__(self, other): return FilterCond(self.__name, '>', str(other)) def __ge__(self, other): return FilterCond(self.__name, '>=', str(other)) def __eq__(self, other): return FilterCond(self.__name, '==', str(other)) def __ne__(self, other): return FilterCond(self.__name, '!=', str(other)) def __add__(self, other): return FilterCond(self.__name, '+', str(other)) def __sub__(self, other): return FilterCond(self.__name, '-', str(other)) def __mul__(self, other): return FilterCond(self.__name, '*', str(other)) def __div__(self, other): return FilterCond(self.__name, '/', str(other)) def __mod__(self, other): return FilterCond(self.__name, '%', str(other)) def __lshift__(self, other): return FilterCond(self.__name, '<<', str(other)) def __rshift__(self, other): return FilterCond(self.__name, '>>', str(other)) def __floordiv__(self, other): return FilterCond('int(', str(self), ')') class FilterCond(object): def __init__(self, lhs, op, rhs): self.__lhs = lhs self.__op = op self.__rhs = rhs def __str__(self): return '(' + str(self.__lhs) + ' ' + str(self.__op) + ' ' + str(self.__rhs) + ')' def __or__(self, other): return FilterCond(str(self), 'or', str(other)) def __and__(self, other): return FilterCond(str(self), 'and', str(other)) def __lt__(self, other): return FilterCond(str(self), '<', str(other)) def __le__(self, other): return FilterCond(str(self), '<=', str(other)) def __gt__(self, other): return FilterCond(str(self), '>', str(other)) def __ge__(self, other): return FilterCond(str(self), '>=', str(other)) def __eq__(self, other): return FilterCond(str(self), '==', str(other)) def __ne__(self, other): return FilterCond(str(self), '!=', str(other)) def __add__(self, other): return FilterCond(str(self), '+', str(other)) def __sub__(self, other): return FilterCond(str(self), '-', str(other)) def __mul__(self, other): return FilterCond(str(self), '*', str(other)) def __div__(self, other): return FilterCond(str(self), '/', str(other)) def __mod__(self, other): return FilterCond(str(self), '%', str(other)) def __lshift__(self, other): return FilterCond(str(self), '<<', str(other)) def __rshift__(self, other): return FilterCond(str(self), '>>', str(other)) def __floordiv__(self, other): return FilterCond('int(', str(self), ')')
29.161538
91
0.572408
430
3,791
4.565116
0.127907
0.137545
0.229241
0.382068
0.715232
0.695364
0.657667
0.657667
0.619969
0.055018
0
0
0.276972
3,791
129
92
29.387597
0.716162
0.003693
0
0.4
0
0
0.015908
0
0
0
0
0
0
1
0.411111
false
0
0.011111
0.377778
0.844444
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
fbeb96861175ca463a76b8d5d17b8e8149caad30
181
py
Python
dsalgos/tests/test_linear_search.py
psd314/dsalgos
c8f40c99ee00009c2b32317f85aa11fdff6693ff
[ "MIT" ]
null
null
null
dsalgos/tests/test_linear_search.py
psd314/dsalgos
c8f40c99ee00009c2b32317f85aa11fdff6693ff
[ "MIT" ]
null
null
null
dsalgos/tests/test_linear_search.py
psd314/dsalgos
c8f40c99ee00009c2b32317f85aa11fdff6693ff
[ "MIT" ]
null
null
null
from src.algos.search.linear_search import linear_search def test_linear_search(): a = [1, 2, 3] assert linear_search(a, 1) == True assert linear_search(a, 0) == False
25.857143
56
0.696133
29
181
4.137931
0.551724
0.5
0.325
0.233333
0
0
0
0
0
0
0
0.034014
0.187845
181
6
57
30.166667
0.782313
0
0
0
0
0
0
0
0
0
0
0
0.4
1
0.2
false
0
0.2
0
0.4
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
224fc41a4e713cb22b93b5f81c752a3eeeb5cf69
43
py
Python
Scripts/dk/timer.py
hhg128/DKGL
c61bc6546ac5655da97462cc532a9034ba08516d
[ "PSF-2.0", "BSD-3-Clause" ]
14
2015-09-12T01:32:05.000Z
2021-10-13T02:52:53.000Z
Scripts/dk/timer.py
hhg128/DKGL
c61bc6546ac5655da97462cc532a9034ba08516d
[ "PSF-2.0", "BSD-3-Clause" ]
null
null
null
Scripts/dk/timer.py
hhg128/DKGL
c61bc6546ac5655da97462cc532a9034ba08516d
[ "PSF-2.0", "BSD-3-Clause" ]
3
2015-11-10T03:12:49.000Z
2018-10-15T15:38:31.000Z
import _dk_core as core Timer = core.Timer
14.333333
23
0.790698
8
43
4
0.625
0.5625
0
0
0
0
0
0
0
0
0
0
0.162791
43
3
24
14.333333
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
226be7067891ded045daeeaa4ca3a2d4ff543c87
3,624
py
Python
LogReg.py
cedorman/footballmodel
7300e631d3d460b04b69e2769f4f5ac784f6ceb1
[ "Apache-2.0" ]
null
null
null
LogReg.py
cedorman/footballmodel
7300e631d3d460b04b69e2769f4f5ac784f6ceb1
[ "Apache-2.0" ]
null
null
null
LogReg.py
cedorman/footballmodel
7300e631d3d460b04b69e2769f4f5ac784f6ceb1
[ "Apache-2.0" ]
null
null
null
# # Simple wrapper for Logistic Regression # from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import roc_auc_score import logger class LogReg: def __init__(self, X_train, X_test, y_train, y_test): """Run a 'standard' LogReg process, use CV to optimize, then print results on train and test.""" self.log = logger.getLogger() self.X_train = X_train self.X_test = X_test self.y_train = y_train self.y_test = y_test # without cross validation # logistic_regression = LogisticRegression(random_state=0, max_iter=2000).fit(X_train, y_train) # With cross validation. self.logistic_regression = LogisticRegressionCV(random_state=0, max_iter=2000).fit(X_train, y_train) def score(self): """ Score, where the y_train / y_test is binary. """ x_shape = self.X_train.shape y_shape = self.y_train.shape if x_shape[0] != y_shape[0]: self.log.warning(f"Problem with shape of x/y {x_shape} {y_shape}") if len(y_shape) != 1: self.log.warning(f"Problem with shape of x/y {x_shape} {y_shape}") # ---------------------------------- # Training data # Score on training data. # Note that this uses the 'natural' scoring for this sort of classifier, # which is accuracy_score from _classification.py, which is simply # the % that match. score = self.logistic_regression.score(self.X_train, self.y_train) self.log.info(f"Train: {score}") # AUC score prediction = self.logistic_regression.predict_proba(self.X_train)[:, 1] auc_score = roc_auc_score(self.y_train, prediction, multi_class='ovr') self.log.info(f"Train: {auc_score}") # ---------------------------------- # Test data score = self.logistic_regression.score(self.X_test, self.y_test) self.log.info(f"Test: {score}") # AUC score prediction = self.logistic_regression.predict_proba(self.X_test)[:, 1] auc_score = roc_auc_score(self.y_test, prediction, multi_class='ovr') self.log.info(f"Test: {auc_score}") def score_multi_class(self): """ Score, where the y_train / y_test is multiclass. """ x_shape = self.X_train.shape y_shape = self.y_train.shape if x_shape[0] != y_shape[0]: self.log.warning(f"Problem with shape of x/y {x_shape} {y_shape}") if len(y_shape) > 1: self.log.warning(f"Problem with shape of x/y {x_shape} {y_shape}") # ---------------------------------- # Training data # Score on training data. # Note that this uses the 'natural' scoring for this sort of classifier, # which is accuracy_score from _classification.py, which is simply # the % that match. score = self.logistic_regression.score(self.X_train, self.y_train) self.log.info(f"Train: {score}") # AUC score prediction = self.logistic_regression.predict_proba(self.X_train) auc_score = roc_auc_score(self.y_train, prediction, multi_class='ovr') self.log.info(f"Train: {auc_score}") # ---------------------------------- # Test data score = self.logistic_regression.score(self.X_test, self.y_test) self.log.info(f"Test: {score}") # AUC score prediction = self.logistic_regression.predict_proba(self.X_test) auc_score = roc_auc_score(self.y_test, prediction, multi_class='ovr') self.log.info(f"Test: {auc_score}")
36.979592
108
0.612859
494
3,624
4.275304
0.176113
0.064394
0.09375
0.045455
0.769886
0.769886
0.769886
0.769886
0.768939
0.74053
0
0.006593
0.246689
3,624
97
109
37.360825
0.767033
0.263245
0
0.577778
0
0
0.122184
0
0
0
0
0
0
1
0.066667
false
0
0.066667
0
0.155556
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2274abf6a22cee96952d2bf90ddaa9d8d4c86f7e
126
py
Python
odin/utilities/__init__.py
gsamarakoon/Odin
e2e9d638c68947d24f1260d35a3527dd84c2523f
[ "MIT" ]
103
2017-01-14T19:38:14.000Z
2022-03-10T12:52:09.000Z
odin/utilities/__init__.py
gsamarakoon/Odin
e2e9d638c68947d24f1260d35a3527dd84c2523f
[ "MIT" ]
6
2017-01-19T01:38:53.000Z
2020-03-09T19:03:18.000Z
odin/utilities/__init__.py
JamesBrofos/Odin
e2e9d638c68947d24f1260d35a3527dd84c2523f
[ "MIT" ]
33
2017-02-05T21:51:17.000Z
2021-12-22T20:38:30.000Z
from .odin_init import odin_init from .compute_days_elapsed import compute_days_elapsed from .fund_actions import period_dict
31.5
54
0.880952
20
126
5.15
0.55
0.15534
0.349515
0
0
0
0
0
0
0
0
0
0.095238
126
3
55
42
0.903509
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
97e97c98e0747a68f726e7e83f4d6d9c5a7729a7
40
py
Python
classes/Loss/__init__.py
coopersigrist/DnDML
782b8908147fc9d90c6fb1dbb25a394ca4022b14
[ "MIT" ]
2
2021-05-31T22:44:50.000Z
2021-09-12T03:19:21.000Z
classes/Loss/__init__.py
coopersigrist/DnDML
782b8908147fc9d90c6fb1dbb25a394ca4022b14
[ "MIT" ]
null
null
null
classes/Loss/__init__.py
coopersigrist/DnDML
782b8908147fc9d90c6fb1dbb25a394ca4022b14
[ "MIT" ]
1
2021-07-22T12:54:47.000Z
2021-07-22T12:54:47.000Z
from .wrapper import create_loss_wrapper
40
40
0.9
6
40
5.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.075
40
1
40
40
0.918919
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
97f19c3836dff71534af3b763817774ff1d5b3f6
151
py
Python
academy/www/index.py
frappe/academy
052090fd714542e35997eb3a1285e6ccd7cebeaa
[ "MIT" ]
3
2019-06-18T04:57:58.000Z
2020-03-24T09:56:05.000Z
academy/www/index.py
frappe/academy
052090fd714542e35997eb3a1285e6ccd7cebeaa
[ "MIT" ]
1
2019-06-22T14:38:16.000Z
2019-06-22T14:38:16.000Z
academy/www/index.py
frappe/academy
052090fd714542e35997eb3a1285e6ccd7cebeaa
[ "MIT" ]
10
2019-12-04T07:47:34.000Z
2022-03-15T07:23:27.000Z
from __future__ import unicode_literals import frappe no_cache = 1 def get_context(context): context.academy = frappe.get_single("Academy Settings")
21.571429
56
0.81457
21
151
5.47619
0.714286
0.243478
0
0
0
0
0
0
0
0
0
0.007463
0.112583
151
7
56
21.571429
0.850746
0
0
0
0
0
0.105263
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5