hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
4f741ee37b51a1290b1acafa2b316f8926d3afe4
162
py
Python
projects/PointsColletion/pointscollection/layers/points_collection_ops/__init__.py
li-haoran/detectron2
84aebaaed19b07cce9dfd579f98b09ad4ed22e90
[ "Apache-2.0" ]
null
null
null
projects/PointsColletion/pointscollection/layers/points_collection_ops/__init__.py
li-haoran/detectron2
84aebaaed19b07cce9dfd579f98b09ad4ed22e90
[ "Apache-2.0" ]
null
null
null
projects/PointsColletion/pointscollection/layers/points_collection_ops/__init__.py
li-haoran/detectron2
84aebaaed19b07cce9dfd579f98b09ad4ed22e90
[ "Apache-2.0" ]
null
null
null
from .functions.points_collect import points_collect from .modules.points_collect import PointsCollectPack __all__ = [ 'point_collect','PointsCollectPack' ]
23.142857
53
0.814815
17
162
7.294118
0.529412
0.314516
0.306452
0
0
0
0
0
0
0
0
0
0.111111
162
6
54
27
0.861111
0
0
0
0
0
0.185185
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
4fa63979787a5ce275ba187d8f73d121afe2f38f
61
py
Python
webapp/api/__init__.py
scorpio975/d-repr
1d08024192642233d42d29e1d05f8713ee265bca
[ "MIT" ]
5
2019-10-02T01:04:50.000Z
2022-03-08T09:39:50.000Z
webapp/api/__init__.py
scorpio975/d-repr
1d08024192642233d42d29e1d05f8713ee265bca
[ "MIT" ]
3
2020-06-13T22:09:48.000Z
2021-04-23T08:23:49.000Z
webapp/api/__init__.py
scorpio975/d-repr
1d08024192642233d42d29e1d05f8713ee265bca
[ "MIT" ]
5
2019-10-02T03:01:27.000Z
2021-02-02T13:34:35.000Z
from typing import List, Dict, Tuple, Callable, Any, Optional
61
61
0.786885
9
61
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.131148
61
1
61
61
0.90566
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
96c934bb290b679b9f916460995097ec236cc3d3
149
py
Python
telethon/tl/__init__.py
mohammadtat83/Telethon
e2b523eaa358ef8a6136b6bb3f6d66f563892f7e
[ "MIT" ]
2
2021-01-06T12:49:49.000Z
2021-04-23T16:32:13.000Z
telethon/tl/__init__.py
mohammadtat83/Telethon
e2b523eaa358ef8a6136b6bb3f6d66f563892f7e
[ "MIT" ]
1
2018-03-20T21:15:47.000Z
2018-03-20T21:15:47.000Z
telethon/tl/__init__.py
mohammadtat83/Telethon
e2b523eaa358ef8a6136b6bb3f6d66f563892f7e
[ "MIT" ]
7
2019-07-12T17:11:49.000Z
2022-01-05T19:41:12.000Z
from .tlobject import TLObject from .gzip_packed import GzipPacked from .tl_message import TLMessage from .message_container import MessageContainer
29.8
47
0.865772
19
149
6.631579
0.578947
0
0
0
0
0
0
0
0
0
0
0
0.107383
149
4
48
37.25
0.947368
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
96e5803fd8974e0998f837dfd9a4b5fa832b8946
33
py
Python
Vyom/OnOff.py
mu2x/rPI
9f01013bda666e56f19858b63b0bbc32615a9b0e
[ "MIT" ]
null
null
null
Vyom/OnOff.py
mu2x/rPI
9f01013bda666e56f19858b63b0bbc32615a9b0e
[ "MIT" ]
null
null
null
Vyom/OnOff.py
mu2x/rPI
9f01013bda666e56f19858b63b0bbc32615a9b0e
[ "MIT" ]
null
null
null
#Written by Vyom print('onoff')
8.25
16
0.69697
5
33
4.6
1
0
0
0
0
0
0
0
0
0
0
0
0.151515
33
3
17
11
0.821429
0.454545
0
0
0
0
0.294118
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
8c1625cc000223f318eb4a7e6894b0d9806986b3
32
py
Python
language-python-test/test/features/strings/newline.py
wbadart/language-python
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
[ "BSD-3-Clause" ]
null
null
null
language-python-test/test/features/strings/newline.py
wbadart/language-python
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
[ "BSD-3-Clause" ]
null
null
null
language-python-test/test/features/strings/newline.py
wbadart/language-python
6c048c215ff7fe4a5d5cc36ba3c17a666af74821
[ "BSD-3-Clause" ]
null
null
null
"\n" "\nfoo" "bar\n" "foo\nbar"
6.4
10
0.5
6
32
2.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.125
32
4
11
8
0.571429
0
0
0
0
0
0.625
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
8c58ac86519aaee68a5c8b1e32a7a53e65a9e7ef
118
py
Python
src/core/settings/__init__.py
Alirezaja1384/MajazAmooz
9200e46bed33aeb60d578a5c4c02013a8032cf08
[ "MIT" ]
3
2021-04-01T19:42:53.000Z
2022-03-01T09:50:17.000Z
src/core/settings/__init__.py
Alirezaja1384/MajazAmooz
9200e46bed33aeb60d578a5c4c02013a8032cf08
[ "MIT" ]
null
null
null
src/core/settings/__init__.py
Alirezaja1384/MajazAmooz
9200e46bed33aeb60d578a5c4c02013a8032cf08
[ "MIT" ]
null
null
null
from .base import DEBUG if DEBUG: from .development import * # noqa else: from .production import * # noqa
16.857143
38
0.669492
15
118
5.266667
0.6
0.253165
0
0
0
0
0
0
0
0
0
0
0.254237
118
6
39
19.666667
0.897727
0.076271
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
8c5b68b4a19c7db7d2e7a1404b4b96f88d5111a3
16,359
py
Python
pycce/run/clusters.py
MICCoMpy/PyCCE
b24a311f54d04ce452ef4b75f52a61a35d502563
[ "MIT" ]
null
null
null
pycce/run/clusters.py
MICCoMpy/PyCCE
b24a311f54d04ce452ef4b75f52a61a35d502563
[ "MIT" ]
null
null
null
pycce/run/clusters.py
MICCoMpy/PyCCE
b24a311f54d04ce452ef4b75f52a61a35d502563
[ "MIT" ]
3
2021-12-18T16:25:01.000Z
2022-03-15T03:02:44.000Z
""" This module contains information about the way the cluster expansion is implemented in the package. """ import functools import operator import warnings import numpy as np from pycce.sm import _smc try: from mpi4py import MPI mpiop = {'imul': MPI.PROD, 'mul': MPI.PROD, 'prod': MPI.PROD, 'iadd': MPI.SUM, 'add': MPI.SUM, 'sum': MPI.SUM } except ImportError: mpiop = None def cluster_expansion_decorator(_func=None, *, result_operator=operator.imul, contribution_operator=operator.ipow, removal_operator=operator.itruediv, addition_operator=np.prod): """ Decorator for creating cluster correlation expansion of the method of ``RunObject`` class. Args: _func (func): Function to expand. result_operator (func): Operator which will combine the result of expansion (default: operator.imul). contribution_operator (func): Operator which will combine multiple contributions of the same cluster (default: operator.ipow) in the optimized approach. result_operator (func): Operator which will combine the result of expansion (default: operator.imul). removal_operator (func): Operator which will remove subcluster contribution from the given cluster contribution. First argument cluster contribution, second - subcluster contribution (default: operator.itruediv). addition_operator (func): Group operation which will combine contributions from the different clusters into one contribution (default: np.prod). Returns: func: Expanded function. """ def inner_cluster_expansion_decorator(function): @functools.wraps(function) def cluster_expansion(self, *arg, **kwarg): if self.direct: return direct_approach(function, self, *arg, result_operator=result_operator, removal_operator=removal_operator, addition_operator=addition_operator, **kwarg) else: return optimized_approach(function, self, *arg, result_operator=result_operator, contribution_operator=contribution_operator, **kwarg) return cluster_expansion if _func is None: return inner_cluster_expansion_decorator else: return inner_cluster_expansion_decorator(_func) def optimized_approach(function, self, *arg, result_operator=operator.imul, contribution_operator=operator.ipow, **kwarg): """ Optimized approach to compute cluster correlation expansion. Args: function (func): Function to expand. self (RunObject): Object whose method is expanded. *arg: list of positional arguments of the expanded function. result_operator (func): Operator which will combine the result of expansion (default: operator.imul). contribution_operator (func): Operator which will combine multiple contributions of the same cluster (default: operator.ipow). **kwarg: Dictionary containing all keyword arguments of the expanded function. Returns: func: Expanded function. """ subclusters = self.clusters revorders = sorted(subclusters)[::-1] norders = len(revorders) if self.parallel: try: from mpi4py import MPI except ImportError: warnings.warn('Parallel failed: mpi4py is not found. Running serial.') self.parallel = False if self.parallel: comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() else: rank = 0 # If there is only one set of indexes for only one order, # Then for this subcluster nelements < maximum CCE order if norders == 1 and subclusters[revorders[0]].shape[0] == 1: verticles = subclusters[revorders[0]][0] return function(self, verticles, *arg, **kwarg) result = 1 result = contribution_operator(result, 0) # The Highest possible L will have all powers of 1 power = {} # Number of visited orders from highest to lowest visited = 0 for order in revorders: nclusters = subclusters[order].shape[0] current_power = np.ones(nclusters, dtype=np.int32) # indexes of the cluster of size order are stored in v if self.parallel: remainder = nclusters % size add = int(rank < remainder) each = nclusters // size block = each + add start = rank * each + rank if rank < remainder else rank * each + remainder else: start = 0 block = nclusters for index in range(start, start + block): v = subclusters[order][index] # First, find the correct power. Iterate over all higher orders for higherorder in revorders[:visited]: # np.isin gives bool array of shape subclusters[higherorder], # which is np.array of # indexes of subclusters with order = higherorder. # Entries are True if value is # present in v and False if values are not present in v. # Sum bool entries in inside cluster, # if the sum equal to size of v, # then v is inside the given subcluster. # containv is 1D bool array with values of i-element True # if i-subcluster of # subclusters[higherorder] contains v containv = np.count_nonzero( np.isin(subclusters[higherorder], v), axis=1) == v.size # Power of cluster v is decreased by sum of powers of all the higher orders, # As all of them have to be divided by v current_power[index] -= np.sum(power[higherorder][containv], dtype=np.int32) vcalc = function(v, *arg, **kwarg) vcalc = contribution_operator(vcalc, current_power[index]) result = result_operator(result, vcalc) if self.parallel: buffer = np.empty(current_power.shape, dtype=np.int32) comm.Allreduce(current_power, buffer, MPI.SUM) current_power = buffer - size + 1 power[order] = current_power visited += 1 # print('Computed {} of order {} for {} clusters'.format( # function.__name__, order, subclusters[order].shape[0])) _smc.clear() if self.parallel: if rank == 0: result_shape = result.shape else: result_shape = None result_shape = comm.bcast(result_shape, root=0) if np.asarray(result).shape != result_shape: result = np.ones(result_shape, dtype=np.complex128) result = contribution_operator(result, 0) root_result = np.zeros(result_shape, dtype=np.complex128) comm.Allreduce(result.astype(np.complex128), root_result, mpiop[result_operator.__name__]) else: root_result = result return root_result def direct_approach(function, self, *arg, result_operator=operator.imul, removal_operator=operator.itruediv, addition_operator=np.prod, **kwarg): """ Direct approach to compute cluster correlation expansion. Args: function (func): Function to expand. self (RunObject): Object whose method is expanded. result_operator (func): Operator which will combine the result of expansion (default: operator.imul). removal_operator (func): Operator which will remove subcluster contribution from the given cluster contribution. First argument cluster contribution, second - subcluster contribution (default: operator.itruediv). addition_operator (func): Group operation which will combine contributions from the different clusters into one contribution (default: np.prod). **kwarg: Dictionary containing all keyword arguments of the expanded function. Returns: func: Expanded method. """ subclusters = self.clusters if self.parallel: try: from mpi4py import MPI except ImportError: warnings.warn('Parallel failed: mpi4py is not found. Running serial') self.parallel = False MPI = None orders = sorted(subclusters) norders = len(orders) if self.parallel: comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() else: rank = 0 comm = None # print(dms_zero.mask) # If there is only one set of indexes for only one order, # Then for this subcluster nelements < maximum CCE order if norders == 1 and subclusters[orders[0]].shape[0] == 1: verticles = subclusters[orders[0]][0] return function(self, verticles, *arg, **kwarg) # print(zero_power) # The Highest possible L will have all powers of 1 result_tilda = {} visited = 0 result = 1 - result_operator(1, 0) for order in orders: current_order = [] # indexes of the cluster of size order are stored in v nclusters = subclusters[order].shape[0] if self.parallel: remainder = nclusters % size add = int(rank < remainder) each = nclusters // size block = each + add start = rank * each + rank if rank < remainder else rank * each + remainder else: start = 0 block = nclusters for index in range(start, start + block): v = subclusters[order][index] vcalc = function(v, *arg, **kwarg) for lowerorder in orders[:visited]: contained_in_v = np.all(np.isin(subclusters[lowerorder], v), axis=1) lower_vcalc = addition_operator(result_tilda[lowerorder][contained_in_v], axis=0) vcalc = removal_operator(vcalc, lower_vcalc) current_order.append(vcalc) current_order = np.array(current_order, copy=False) if self.parallel: comm.Barrier() result_shape = vcalc.shape if rank == 0 else None result_shape = comm.bcast(result_shape, root=0) chunk = np.zeros((nclusters, *result_shape), dtype=np.complex128) chunk[start:start + block] = current_order.reshape(block, *result_shape) currrent_buffer = np.zeros((nclusters, *result_shape), dtype=np.complex128) comm.Allreduce(chunk, currrent_buffer, MPI.SUM) current_order = currrent_buffer result_tilda[order] = current_order visited += 1 for o in orders: result = result_operator(result, addition_operator(result_tilda[o], axis=0)) return result def interlaced_decorator(_func=None, *, result_operator=operator.imul, contribution_operator=operator.ipow): """ Decorator for creating interlaced cluster correlation expansion of the method of ``RunObject`` class. Args: _func (func): Function to expand. result_operator (func): Operator which will combine the result of expansion (default: operator.imul). contribution_operator (func): Operator which will combine multiple contributions of the same cluster (default: operator.ipow) in the optimized approach. Returns: func: Expanded method. """ def inner_interlaced_decorator(function): @functools.wraps(function) def cluster_expansion(self, *arg, **kwarg): subclusters = self.clusters revorders = sorted(subclusters)[::-1] norders = len(revorders) if self.parallel: try: from mpi4py import MPI except ImportError: warnings.warn('Parallel failed: mpi4py is not found. Running serial.') self.parallel = False if self.parallel: comm = MPI.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() else: rank = 0 # If there is only one set of indexes for only one order, # Then for this subcluster nelements < maximum CCE order if norders == 1 and subclusters[revorders[0]].shape[0] == 1: verticles = subclusters[revorders[0]][0] return function(self, verticles, *arg, **kwarg) result = 1 result = contribution_operator(result, 0) # The Highest possible L will have all powers of 1 power = {} # Number of visited orders from highest to lowest visited = 0 for order in revorders: nclusters = subclusters[order].shape[0] current_power = np.ones(nclusters, dtype=np.int32) # indexes of the cluster of size order are stored in v if self.parallel: remainder = nclusters % size add = int(rank < remainder) each = nclusters // size block = each + add start = rank * each + rank if rank < remainder else rank * each + remainder else: start = 0 block = nclusters for index in range(start, start + block): v = subclusters[order][index] supercluster = [] for higherorder in revorders[:visited]: containv = np.count_nonzero( np.isin(subclusters[higherorder], v), axis=1) == v.size supercluster.append(subclusters[higherorder][containv].ravel()) current_power[index] -= np.sum(power[higherorder][containv], dtype=np.int32) try: supercluster = np.unique(np.concatenate(supercluster)) except ValueError: supercluster = v if not supercluster.size: supercluster = v vcalc = function(v, supercluster, *arg, **kwarg) vcalc = contribution_operator(vcalc, current_power[index]) result = result_operator(result, vcalc) if self.parallel: buffer = np.empty(current_power.shape, dtype=np.int32) comm.Allreduce(current_power, buffer, MPI.SUM) current_power = buffer - size + 1 power[order] = current_power visited += 1 # print('Computed {} of order {} for {} clusters'.format( # function.__name__, order, subclusters[order].shape[0])) _smc.clear() if self.parallel: if rank == 0: result_shape = result.shape else: result_shape = None result_shape = comm.bcast(result_shape, root=0) if np.asarray(result).shape != result_shape: result = np.ones(result_shape, dtype=np.complex128) result = contribution_operator(result, 0) root_result = np.zeros(result_shape, dtype=np.complex128) comm.Allreduce(result.astype(np.complex128), root_result, mpiop[result_operator.__name__]) else: root_result = result return root_result return cluster_expansion if _func is None: return inner_interlaced_decorator else: return inner_interlaced_decorator(_func)
37.011312
111
0.577786
1,733
16,359
5.352568
0.127525
0.028461
0.02113
0.026951
0.774903
0.746119
0.7431
0.738465
0.688551
0.674536
0
0.009919
0.34672
16,359
441
112
37.095238
0.858052
0.278929
0
0.740157
0
0
0.015586
0
0
0
0
0
0
1
0.031496
false
0
0.051181
0
0.137795
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
4fc0be0c1be7ef6144975be87e94a305af19f340
188
py
Python
netta/setup.py
zhangdafu12/web
64ce7db4697167215bf9ee25cd5bdc0bd15b5831
[ "MIT" ]
null
null
null
netta/setup.py
zhangdafu12/web
64ce7db4697167215bf9ee25cd5bdc0bd15b5831
[ "MIT" ]
1
2020-03-30T09:26:59.000Z
2020-03-30T09:26:59.000Z
netta/setup.py
zhangdafu12/web
64ce7db4697167215bf9ee25cd5bdc0bd15b5831
[ "MIT" ]
null
null
null
# coding:utf-8 # _author_:Junjie # date:2018/11/6 from distutils.core import setup from Cython.Build import cythonize setup(name='command_node',ext_modules=cythonize("./command_node.py"))
26.857143
69
0.787234
29
188
4.931034
0.793103
0.153846
0
0
0
0
0
0
0
0
0
0.046243
0.079787
188
7
69
26.857143
0.780347
0.228723
0
0
0
0
0.204225
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
4fd6bd8ac1b5d891109673be180b702565ed76d0
55
py
Python
jupyterlabpymolpysnips/Programming/printAtomNumbers.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Programming/printAtomNumbers.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Programming/printAtomNumbers.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
cmd.do('iterate (resi 1), print(name + " %i5" % ID);')
27.5
54
0.545455
9
55
3.333333
1
0
0
0
0
0
0
0
0
0
0
0.043478
0.163636
55
1
55
55
0.608696
0
0
0
0
0
0.8
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
4fff2a0e6ffe8febe7819282ad7540de5c273d36
5,459
py
Python
rdr_service/services/gcp_config.py
all-of-us/raw-data-repository
d28ad957557587b03ff9c63d55dd55e0508f91d8
[ "BSD-3-Clause" ]
39
2017-10-13T19:16:27.000Z
2021-09-24T16:58:21.000Z
rdr_service/services/gcp_config.py
all-of-us/raw-data-repository
d28ad957557587b03ff9c63d55dd55e0508f91d8
[ "BSD-3-Clause" ]
312
2017-09-08T15:42:13.000Z
2022-03-23T18:21:40.000Z
rdr_service/services/gcp_config.py
all-of-us/raw-data-repository
d28ad957557587b03ff9c63d55dd55e0508f91d8
[ "BSD-3-Clause" ]
19
2017-09-15T13:58:00.000Z
2022-02-07T18:33:20.000Z
# # # !!! This file is python 3.x compliant !!! # from collections import OrderedDict from enum import Enum import os # path where temporary service account credential keys are stored GCP_SERVICE_KEY_STORE = "{0}/.rdr/service-keys".format(os.path.expanduser("~")) GCP_PROJECTS = [ "all-of-us-rdr-prod", "all-of-us-rdr-stable", "all-of-us-rdr-staging", "all-of-us-rdr-sandbox", "pmi-drc-api-test", "all-of-us-rdr-careevo-test", "all-of-us-rdr-ptsc-1-test", "all-of-us-rdr-ptsc-2-test", "all-of-us-rdr-ptsc-3-test", "aou-pdr-data-prod" ] class RdrEnvironment(Enum): PROD = "all-of-us-rdr-prod" STABLE = "all-of-us-rdr-stable" STAGING = "all-of-us-rdr-staging" SANDBOX = "all-of-us-rdr-sandbox" TEST = "pmi-drc-api-test" CAREEVO_TEST = "all-of-us-rdr-careevo-test" PTSC_1_TEST = "all-of-us-rdr-ptsc-1-test" PTSC_2_TEST = "all-of-us-rdr-ptsc-2-test" PTSC_3_TEST = "all-of-us-rdr-ptsc-3-test" GCP_INSTANCES = { # List of RDR's GCP projects mapped to their database instance names "all-of-us-rdr-prod": "all-of-us-rdr-prod:us-central1:rdrmaindb", "all-of-us-rdr-stable": "all-of-us-rdr-stable:us-central1:rdrmaindb", "all-of-us-rdr-staging": "all-of-us-rdr-staging:us-central1:rdrmaindb", "all-of-us-rdr-sandbox": "all-of-us-rdr-sandbox:us-central1:rdrmaindb", "pmi-drc-api-test": "pmi-drc-api-test:us-central1:rdrmaindb", "all-of-us-rdr-careevo-test": "all-of-us-rdr-careevo-test:us-central1:rdrmaindb", "all-of-us-rdr-ptsc-1-test": "all-of-us-rdr-ptsc-1-test:us-central1:rdrmaindb", "all-of-us-rdr-ptsc-2-test": "all-of-us-rdr-ptsc-2-test:us-central1:rdrmaindb", "all-of-us-rdr-ptsc-3-test": "all-of-us-rdr-ptsc-3-test:us-central1:rdrmaindb", } GCP_REPLICA_INSTANCES = { "all-of-us-rdr-prod": "all-of-us-rdr-prod:us-central1:rdrbackupdb-a", "all-of-us-rdr-stable": "all-of-us-rdr-stable:us-central1:rdrbackupdb", "all-of-us-rdr-staging": "all-of-us-rdr-staging:us-central1:rdrbackupdb", "all-of-us-rdr-sandbox": "all-of-us-rdr-sandbox:us-central1:rdrmaindb", "pmi-drc-api-test": "pmi-drc-api-test:us-central1:rdrbackupdb", "all-of-us-rdr-careevo-test": "all-of-us-rdr-careevo-test:us-central1:rdrbackupdb", "all-of-us-rdr-ptsc-1-test": "all-of-us-rdr-ptsc-1-test:us-central1:rdrbackupdb", "all-of-us-rdr-ptsc-2-test": "all-of-us-rdr-ptsc-2-test:us-central1:rdrbackupdb", "all-of-us-rdr-ptsc-3-test": "all-of-us-rdr-ptsc-3-test:us-central1:rdrbackupdb", } GCP_SERVICES = [ 'default', 'offline', 'resource' ] # Map GCP app service to configuration yaml files. GCP_SERVICE_CONFIG_MAP = OrderedDict({ 'prod': { 'default': { 'type': 'service', 'config_file': "app.yaml", 'default': [ 'rdr_service/app_base.yaml', 'rdr_service/app_prod.yaml' ] }, 'offline': { 'type': 'service', 'default': [ 'rdr_service/offline.yaml' ] }, 'resource': { 'type': 'service', 'default': [ 'rdr_service/resource.yaml' ] }, 'cron': { 'type': 'config', 'default': [ 'rdr_service/cron_default.yaml', 'rdr_service/cron_prod.yaml' ] }, 'queue': { 'type': 'config', 'default': [ 'rdr_service/queue.yaml' ] }, 'index': { 'type': 'config', 'default': [ 'rdr_service/index.yaml' ] } }, 'nonprod': { 'default': { 'type': 'service', 'config_file': "app.yaml", 'default': [ 'rdr_service/app_base.yaml', 'rdr_service/app_nonprod.yaml' ], 'sandbox': [ 'rdr_service/app_base.yaml', 'rdr_service/app_sandbox.yaml' ] }, 'offline': { 'type': 'service', 'default': [ 'rdr_service/offline.yaml' ] }, 'resource': { 'type': 'service', 'default': [ 'rdr_service/resource.yaml' ] }, 'cron': { 'type': 'config', 'default': [ 'rdr_service/cron_default.yaml', ], 'careevo': [ 'rdr_service/cron_default.yaml', 'rdr_service/cron_careevo.yaml' ], 'ptsc': [ 'rdr_service/cron_default.yaml', 'rdr_service/cron_ptsc.yaml' ], 'sandbox': [ 'rdr_service/cron_default.yaml', 'rdr_service/cron_sandbox.yaml' ], 'stable': [ 'rdr_service/cron_default.yaml', 'rdr_service/cron_stable.yaml' ], 'test': [ 'rdr_service/cron_default.yaml', 'rdr_service/cron_test.yaml' ] }, 'queue': { 'type': 'config', 'default': [ 'rdr_service/queue.yaml' ] }, 'index': { 'type': 'config', 'default': [ 'rdr_service/index.yaml' ] } } })
30.841808
87
0.517311
650
5,459
4.243077
0.123077
0.08702
0.121827
0.174039
0.786439
0.777737
0.74583
0.726613
0.617839
0.575417
0
0.010957
0.314526
5,459
176
88
31.017045
0.726082
0.040484
0
0.4125
0
0
0.522471
0.403519
0
0
0
0
0
1
0
false
0
0.01875
0
0.08125
0
0
0
0
null
0
0
1
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8b17e0dae3968b0d7015c50e22d2f728f2c23982
138
py
Python
models/__init__.py
shinke-li/SimpleView
78a3ca006304df36f04bbb9037a7db7183ebe8a9
[ "BSD-3-Clause" ]
95
2021-06-09T09:44:14.000Z
2022-03-13T12:10:50.000Z
SimpleView/models/__init__.py
jiawei-ren/modelnetc
1187b20954e955c340b545c2ae9a055351b0242f
[ "Apache-2.0" ]
7
2021-06-23T04:44:25.000Z
2022-01-14T15:45:27.000Z
SimpleView/models/__init__.py
jiawei-ren/modelnetc
1187b20954e955c340b545c2ae9a055351b0242f
[ "Apache-2.0" ]
13
2021-07-01T23:55:15.000Z
2022-01-04T12:29:02.000Z
from .mv import MVModel from .rscnn import RSCNN from .pointnet2 import PointNet2 from .dgcnn import DGCNN from .pointnet import PointNet
23
32
0.818841
20
138
5.65
0.4
0
0
0
0
0
0
0
0
0
0
0.016949
0.144928
138
5
33
27.6
0.940678
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8b305df789a525fcdec085f7edbf14be75bcaa31
241
py
Python
lib/algorithms/__init__.py
xuzhiying9510/ncflow
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
[ "Artistic-1.0-cl8" ]
10
2021-02-09T19:25:46.000Z
2022-03-29T13:49:23.000Z
lib/algorithms/__init__.py
xuzhiying9510/ncflow
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
[ "Artistic-1.0-cl8" ]
null
null
null
lib/algorithms/__init__.py
xuzhiying9510/ncflow
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
[ "Artistic-1.0-cl8" ]
5
2020-12-23T15:24:40.000Z
2022-01-06T09:42:38.000Z
from .abstract_formulation import Objective from .path_formulation import PathFormulation from .edge_formulation import EdgeFormulation from .min_max_flow_on_edge import MinMaxFlowOnEdgeOverCap from .smore import SMORE from .ncflow import *
34.428571
57
0.871369
30
241
6.766667
0.533333
0.251232
0
0
0
0
0
0
0
0
0
0
0.099585
241
6
58
40.166667
0.935484
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8b3489762765ff1de2acb63146af397a76e006d6
118
py
Python
mlinsights/timeseries/__init__.py
sdpython/mlinsights
bae59cda775a69bcce83b16b88df2f34a092cb60
[ "MIT" ]
48
2017-11-19T14:59:41.000Z
2022-03-03T15:50:24.000Z
mlinsights/timeseries/__init__.py
sdpython/mlinsights
bae59cda775a69bcce83b16b88df2f34a092cb60
[ "MIT" ]
87
2017-11-20T00:10:32.000Z
2021-11-20T01:48:09.000Z
mlinsights/timeseries/__init__.py
sdpython/mlinsights
bae59cda775a69bcce83b16b88df2f34a092cb60
[ "MIT" ]
12
2019-05-09T07:45:52.000Z
2021-06-28T06:55:53.000Z
""" @file @brief Shortcut to *timeseries*. """ from .ar import ARTimeSeriesRegressor from .utils import build_ts_X_y
14.75
37
0.754237
16
118
5.375
0.875
0
0
0
0
0
0
0
0
0
0
0
0.135593
118
7
38
16.857143
0.843137
0.322034
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8c772b1c1a718d7a66f05bf4ae89dfa8219322e3
37,787
py
Python
utils/graphUtils/GraphMLSimple.py
VishnuDuttSharma/gnn_pathplanning
57f7f46fa8ba4888e2a2044cfb0bc476ee235765
[ "MIT" ]
86
2020-04-04T17:01:20.000Z
2022-03-21T02:28:35.000Z
utils/graphUtils/GraphMLSimple.py
VishnuDuttSharma/gnn_pathplanning
57f7f46fa8ba4888e2a2044cfb0bc476ee235765
[ "MIT" ]
4
2021-03-05T06:38:27.000Z
2021-12-13T03:36:15.000Z
utils/graphUtils/GraphMLSimple.py
VishnuDuttSharma/gnn_pathplanning
57f7f46fa8ba4888e2a2044cfb0bc476ee235765
[ "MIT" ]
14
2020-05-06T03:59:27.000Z
2021-08-02T20:08:56.000Z
# 2018/11/01~2018/07/12 # Fernando Gama, fgama@seas.upenn.edu. # GraphRNN editted by Qingbiao Li """ graphML.py Module for basic GSP and graph machine learning functions. Functionals LSIGF: Applies a linear shift-invariant graph filter spectralGF: Applies a linear shift-invariant graph filter in spectral form NVGF: Applies a node-variant graph filter EVGF: Applies an edge-variant graph filter learnAttentionGSO: Computes the GSO following the attention mechanism graphAttention: Applies a graph attention layer Filtering Layers (nn.Module) GraphFilter: Creates a graph convolutional layer using LSI graph filters SpectralGF: Creates a graph convolutional layer using LSI graph filters in spectral form NodeVariantGF: Creates a graph filtering layer using node-variant graph filters EdgeVariantGF: Creates a graph filtering layer using edge-variant graph filters GraphAttentional: Creates a layer using graph attention mechanisms Activation Functions - Nonlinearities (nn.Module) MaxLocalActivation: Creates a localized max activation function layer MedianLocalActivation: Creates a localized median activation function layer NoActivation: Creates a layer for no activation function Summarizing Functions - Pooling (nn.Module) NoPool: No summarizing function. MaxPoolLocal: Max-summarizing function """ import math import numpy as np import torch import torch.nn as nn import utils.graphUtils.graphTools as graphTools zeroTolerance = 1e-9 # Values below this number are considered zero. infiniteNumber = 1e12 # infinity equals this number # WARNING: Only scalar bias. def LSIGF(h, S, x, b=None): """ LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear shift-invariant graph filter on input and then adds bias. Denote as G the number of input features, F the number of output features, E the number of edge features, K the number of filter taps, N the number of nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{G x N} the input data where x_{g} in R^{N} is the graph signal representing feature g, and b in R^{F x N} the bias vector, with b_{f} in R^{N} representing the bias for feature f. Then, the LSI-GF is computed as y_{f} = \sum_{e=1}^{E} \sum_{k=0}^{K-1} \sum_{g=1}^{G} [h_{f,g,e}]_{k} S_{e}^{k} x_{g} + b_{f} for f = 1, ..., F. Inputs: filter_taps (torch.tensor): array of filter taps; shape: output_features x edge_features x filter_taps x input_features GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} Outputs: output: filtered signals; shape: batch_size x output_features x number_nodes """ # The basic idea of what follows is to start reshaping the input and the # GSO so the filter coefficients go just as a very plain and simple # linear operation, so that all the derivatives and stuff on them can be # easily computed. # h is output_features x edge_weights x filter_taps x input_features # S is edge_weighs x number_nodes x number_nodes # x is batch_size x input_features x number_nodes # b is output_features x number_nodes # Output: # y is batch_size x output_features x number_nodes # Get the parameter numbers: F = h.shape[0] E = h.shape[1] K = h.shape[2] G = h.shape[3] assert S.shape[0] == E N = S.shape[1] assert S.shape[2] == N B = x.shape[0] assert x.shape[1] == G assert x.shape[2] == N # Or, in the notation we've been using: # h in F x E x K x G # S in E x N x N # x in B x G x N # b in F x N # y in B x F x N # Now, we have x in B x G x N and S in E x N x N, and we want to come up # with matrix multiplication that yields z = x * S with shape # B x E x K x G x N. # For this, we first add the corresponding dimensions x = x.reshape([B, 1, G, N]) S = S.reshape([1, E, N, N]) z = x.reshape([B, 1, 1, G, N]).repeat(1, E, 1, 1, 1) # This is for k = 0 # We need to repeat along the E dimension, because for k=0, S_{e} = I for # all e, and therefore, the same signal values have to be used along all # edge feature dimensions. for k in range(1, K): x = torch.matmul(x, S) # B x E x G x N xS = x.reshape([B, E, 1, G, N]) # B x E x 1 x G x N z = torch.cat((z, xS), dim=2) # B x E x k x G x N # This output z is of size B x E x K x G x N # Now we have the x*S_{e}^{k} product, and we need to multiply with the # filter taps. # We multiply z on the left, and h on the right, the output is to be # B x N x F (the multiplication is not along the N dimension), so we reshape # z to be B x N x E x K x G and reshape it to B x N x EKG (remember we # always reshape the last dimensions), and then make h be E x K x G x F and # reshape it to EKG x F, and then multiply y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E * K * G]), h.reshape([F, E * K * G]).permute(1, 0)).permute(0, 2, 1) # And permute againt to bring it from B x N x F to B x F x N. # Finally, add the bias if b is not None: y = y + b return y class GraphFilter(nn.Module): """ GraphFilter Creates a (linear) layer that applies a graph filter Initialization: GraphFilter(in_features, out_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = GraphFilter(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, K, E=1, bias=True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.G = G self.F = F self.K = K self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(F, E, K, G)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.G * self.K) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N - Nin) \ .type(x.dtype).to(x.device) ), dim=2) # Compute the filter output u = LSIGF(self.weight, self.S, x, self.bias) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.G, self.F) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) + \ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class GraphFilterRNN(nn.Module): """ GraphFilterRNN Creates a (linear) layer that applies a graph filter with Hidden Markov Model Initialization: GraphFilterRNN(in_features, out_features, hidden_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) hidden_features (int): number of hidden features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: y = GraphFilter(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, H, F, K, E=1, bias=True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.G = G # in_features self.F = F # out_features self.H = H # hidden_features self.K = K # filter_taps self.E = E # edge_features self.S = None # No GSO assigned yet # Create parameters: self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G)) self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) self.weight_U = nn.parameter.Parameter(torch.Tensor(F, E, K, H)) if bias: self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1)) self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1)) self.bias_U = nn.parameter.Parameter(torch.Tensor(F, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv_a = 1. / math.sqrt(self.G * self.K) self.weight_A.data.uniform_(-stdv_a, stdv_a) if self.bias_A is not None: self.bias_A.data.uniform_(-stdv_a, stdv_a) stdv_b = 1. / math.sqrt(self.H * self.K) self.weight_B.data.uniform_(-stdv_b, stdv_b) if self.bias_B is not None: self.bias_B.data.uniform_(-stdv_b, stdv_b) stdv_u = 1. / math.sqrt(self.H * self.K) self.weight_U.data.uniform_(-stdv_u, stdv_u) if self.bias_U is not None: self.bias_U.data.uniform_(-stdv_u, stdv_u) def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N assert S.shape[0] == self.E self.N = S.shape[1] assert S.shape[2] == self.N self.S = S def forward(self, x, h): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N - Nin) \ .type(x.dtype).to(x.device) ), dim=2) # Compute the filter output u_a = LSIGF(self.weight_A, self.S, x, self.bias_A) u_b = LSIGF(self.weight_B, self.S, h, self.bias_B) h = u_a + u_b u = LSIGF(self.weight_U, self.S, h, self.bias_U) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, hidden_features=%d" % ( self.G, self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) + \ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString def BatchLSIGF(h, S, x, b=None): """ LSIGF(filter_taps, GSO, input, bias=None) Computes the output of a linear shift-invariant graph filter on input and then adds bias. Denote as F the number of input features, G the number of output features, E the number of edge features, K the number of filter taps, N the number of nodes, S_{e} in R^{N x N} the GSO for edge feature e, x in R^{f x N} the input data where x_{g} in R^{N} is the graph signal representing feature g, and b in R^{G x N} the bias vector, with b_{g} in R^{N} representing the bias for feature f. Then, the LSI-GF is computed as y_{g} = \sum_{e=1}^{E} \sum_{k=0}^{K-1} \sum_{g=1}^{F} [h_{f,g,e}]_{k} S_{e}^{k} x_{f} + b_{f} for g = 1, ..., G. Inputs: filter_taps (torch.tensor): array of filter taps; shape: output_features x edge_features x filter_taps x input_features GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes input (torch.tensor): input signal; shape: batch_size x input_features x number_nodes bias (torch.tensor): shape: output_features x number_nodes if the same bias is to be applied to all nodes, set number_nodes = 1 so that b_{f} vector becomes b_{f} \mathbf{1}_{N} Outputs: output: filtered signals; shape: batch_size x output_features x number_nodes """ # The basic idea of what follows is to start reshaping the input and the # GSO so the filter coefficients go just as a very plain and simple # linear operation, so that all the derivatives and stuff on them can be # easily computed. # h is output_features x edge_weights x filter_taps x input_features # S is edge_weighs x number_nodes x number_nodes # x is batch_size x input_features x number_nodes # b is output_features x number_nodes # Output: # y is batch_size x output_features x number_nodes # Get the parameter numbers: G = h.shape[0] E = h.shape[1] K = h.shape[2] F = h.shape[3] assert S.shape[1] == E N = S.shape[2] assert S.shape[3] == N B = x.shape[0] assert x.shape[1] == F assert x.shape[2] == N # Or, in the notation we've been using: # h in G x E x K x F # S in B x E x N x N # x in B x F x N # b in G x N # y in B x G x N # Now, we have x in B x F x N and S in B x E x N x N, and we want to come up # with matrix multiplication that yields z = x * S with shape # B x E x K x F x N. # For this, we first add the corresponding dimensions x = x.reshape([B, 1, F, N]) S = S.reshape([B, E, N, N]) z = x.reshape([B, 1, 1, F, N]).repeat(1, E, 1, 1, 1) # This is for k = 0 # We need to repeat along the E dimension, because for k=0, S_{e} = I for # all e, and therefore, the same signal values have to be used along all # edge feature dimensions. for k in range(1, K): x = torch.matmul(x, S) # B x E x F x N xS = x.reshape([B, E, 1, F, N]) # B x E x 1 x F x N z = torch.cat((z, xS), dim=2) # B x E x k x F x N # This output z is of size B x E x K x F x N # Now we have the x*S_{e}^{k} product, and we need to multiply with the # filter taps. # We multiply z on the left, and h on the right, the output is to be # B x N x F (the multiplication is not along the N dimension), so we reshape # z to be B x N x E x K x F and reshape it to B x N x EKG (remember we # always reshape the last dimensions), and then make h be E x K x F x G and # reshape it to EKF x G, and then multiply y = torch.matmul(z.permute(0, 4, 1, 2, 3).reshape([B, N, E * K * F]), h.reshape([F, E * K * G]).permute(1, 0)).permute(0, 2, 1) # And permute againt to bring it from B x N x G to B x G x N. # Finally, add the bias if b is not None: y = y + b return y class GraphFilterBatch(nn.Module): """ GraphFilter Creates a (linear) layer that applies a graph filter Initialization: GraphFilter(in_features, out_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: Batch edge_features x number_nodes x number_nodes Forward call: y = GraphFilter(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, F, G, K, E=1, bias=True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.F = F self.G = G self.K = K self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight = nn.parameter.Parameter(torch.Tensor(G, E, K, F)) if bias: self.bias = nn.parameter.Parameter(torch.Tensor(G, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv = 1. / math.sqrt(self.F * self.K) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def addGSO(self, S): # Every S has 4 dimensions. assert len(S.shape) == 4 # S is of shape B x E x N x N assert S.shape[1] == self.E self.N = S.shape[2] assert S.shape[3] == self.N self.S = S def forward(self, x): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N - Nin) \ .type(x.dtype).to(x.device) ), dim=2) # Compute the filter output u = BatchLSIGF(self.weight, self.S, x, self.bias) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, " % ( self.F, self.G) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) + \ "bias=%s, " % (self.bias is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class GraphFilterRNNBatch(nn.Module): """ GraphFilter Creates a (linear) layer that applies a graph filter Initialization: GraphFilter(in_features, out_features, filter_taps, edge_features=1, bias=True) Inputs: in_features (int): number of input features (each feature is a graph signal) out_features (int): number of output features (each feature is a graph signal) filter_taps (int): number of filter taps edge_features (int): number of features over each edge bias (bool): add bias vector (one bias per feature) after graph filtering Output: torch.nn.Module for a graph filtering layer (also known as graph convolutional layer). Observation: Filter taps have shape out_features x edge_features x filter_taps x in_features Add graph shift operator: GraphFilter.addGSO(GSO) Before applying the filter, we need to define the GSO that we are going to use. This allows to change the GSO while using the same filtering coefficients (as long as the number of edge features is the same; but the number of nodes can change). Inputs: GSO (torch.tensor): graph shift operator; shape: Batch edge_features x number_nodes x number_nodes Forward call: y = GraphFilter(x) Inputs: x (torch.tensor): input data; shape: batch_size x in_features x number_nodes Outputs: y (torch.tensor): output; shape: batch_size x out_features x number_nodes """ def __init__(self, G, F, H, K, E=1, bias=True): # K: Number of filter taps # GSOs will be added later. # This combines both weight scalars and weight vectors. # Bias will always be shared and scalar. # Initialize parent super().__init__() # Save parameters: self.F = F self.G = G self.H = H self.K = K self.E = E self.S = None # No GSO assigned yet # Create parameters: self.weight_A = nn.parameter.Parameter(torch.Tensor(H, E, K, G)) self.weight_B = nn.parameter.Parameter(torch.Tensor(H, E, K, H)) self.weight_D = nn.parameter.Parameter(torch.Tensor(F, E, K, H)) if bias: self.bias_A = nn.parameter.Parameter(torch.Tensor(H, 1)) self.bias_B = nn.parameter.Parameter(torch.Tensor(H, 1)) self.bias_D = nn.parameter.Parameter(torch.Tensor(G, 1)) else: self.register_parameter('bias', None) # Initialize parameters self.reset_parameters() def reset_parameters(self): # Taken from _ConvNd initialization of parameters: stdv_a = 1. / math.sqrt(self.F * self.K) self.weight_A.data.uniform_(-stdv_a, stdv_a) if self.bias_A is not None: self.bias_A.data.uniform_(-stdv_a, stdv_a) stdv_b = 1. / math.sqrt(self.H * self.K) self.weight_B.data.uniform_(-stdv_b, stdv_b) if self.bias_B is not None: self.bias_B.data.uniform_(-stdv_b, stdv_b) stdv_d = 1. / math.sqrt(self.H * self.K) self.weight_U.data.uniform_(-stdv_d, stdv_d) if self.bias_U is not None: self.bias_U.data.uniform_(-stdv_d, stdv_d) def addGSO(self, S): # Every S has 4 dimensions. assert len(S.shape) == 4 # S is of shape B x E x N x N assert S.shape[1] == self.E self.N = S.shape[2] assert S.shape[3] == self.N self.S = S def updateHiddenState(self, hiddenState): self.hiddenState = hiddenState def forward(self, x, hidden_prev): # x is of shape: batchSize x dimInFeatures x numberNodesIn B = x.shape[0] F = x.shape[1] Nin = x.shape[2] # And now we add the zero padding if Nin < self.N: x = torch.cat((x, torch.zeros(B, F, self.N - Nin) \ .type(x.dtype).to(x.device) ), dim=2) # Compute the filter output u_a = BatchLSIGF(self.weight_A, self.S, x, self.bias_A) u_b = BatchLSIGF(self.weight_B, self.S, self.hiddenState, self.bias_B) sigma = nn.ReLU(inplace=True) self.hiddenStateNext = sigma(u_a + u_b) u = BatchLSIGF(self.weight_D, self.S, self.hiddenStateNext, self.bias_D) self.updateHiddenState(self.hiddenStateNext) # So far, u is of shape batchSize x dimOutFeatures x numberNodes # And we want to return a tensor of shape # batchSize x dimOutFeatures x numberNodesIn # since the nodes between numberNodesIn and numberNodes are not required if Nin < self.N: u = torch.index_select(u, 2, torch.arange(Nin).to(u.device)) return u def extra_repr(self): reprString = "in_features=%d, out_features=%d, hidden_features=%d," % ( self.G, self.F, self.H) + "filter_taps=%d, " % ( self.K) + "edge_features=%d, " % (self.E) + \ "bias=%s, " % (self.bias_D is not None) if self.S is not None: reprString += "GSO stored" else: reprString += "no GSO stored" return reprString class NoActivation(nn.Module): """ NoActivation creates an activation layer that does nothing It is for completeness, to be able to switch between linear models and nonlinear models, without altering the entire architecture model Initialization: NoActivation() Output: torch.nn.Module for an empty activation layer Forward call: y = NoActivation(x) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x number_nodes Outputs: y (torch.tensor): activated data; shape: batch_size x dim_features x number_nodes """ def __init__(self): super().__init__() def forward(self, x): return x def extra_repr(self): reprString = "No Activation Function" return reprString class NoPool(nn.Module): """ This is a pooling layer that actually does no pooling. It has the same input structure and methods of MaxPoolLocal() for consistency. Basically, this allows us to change from pooling to no pooling without necessarily creating a new architecture. In any case, we're pretty sure this function should never ship, and pooling can be avoided directly when defining the architecture. """ def __init__(self, nInputNodes, nOutputNodes, nHops): super().__init__() self.nInputNodes = nInputNodes self.nOutputNodes = nOutputNodes self.nHops = nHops self.neighborhood = None def addGSO(self, GSO): # This is necessary to keep the form of the other pooling strategies # within the SelectionGNN framework. But we do not care about any GSO. pass def forward(self, x): # x should be of shape batchSize x dimNodeSignals x nInputNodes assert x.shape[2] == self.nInputNodes # Check that there are at least the same number of nodes that # we will keep (otherwise, it would be unpooling, instead of # pooling) assert x.shape[2] >= self.nOutputNodes # And do not do anything return x def extra_repr(self): reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % ( self.nInputNodes, self.nOutputNodes, self.nHops) reprString += "no neighborhood needed" return reprString class MaxPoolLocal(nn.Module): """ MaxPoolLocal Creates a pooling layer on graphs by selecting nodes Initialization: MaxPoolLocal(in_dim, out_dim, number_hops) Inputs: in_dim (int): number of nodes at the input out_dim (int): number of nodes at the output number_hops (int): number of hops to pool information Output: torch.nn.Module for a local max-pooling layer. Observation: The selected nodes for the output are always the top ones. Add a neighborhood set: Add graph shift operator: GraphFilter.addGSO(GSO) Before being used, we need to define the GSO that will determine the neighborhood that we are going to pool. Inputs: GSO (torch.tensor): graph shift operator; shape: edge_features x number_nodes x number_nodes Forward call: v = MaxPoolLocal(x) Inputs: x (torch.tensor): input data; shape: batch_size x dim_features x in_dim Outputs: y (torch.tensor): pooled data; shape: batch_size x dim_features x out_dim """ def __init__(self, nInputNodes, nOutputNodes, nHops): super().__init__() self.nInputNodes = nInputNodes self.nOutputNodes = nOutputNodes self.nHops = nHops self.neighborhood = None def addGSO(self, S): # Every S has 3 dimensions. assert len(S.shape) == 3 # S is of shape E x N x N (And I don't care about E, because the # computeNeighborhood function takes care of it) self.N = S.shape[1] assert S.shape[2] == self.N # Get the device (before operating with S and losing it, it's cheaper # to store the device now, than to duplicate S -i.e. keep a numpy and a # tensor copy of S) device = S.device # Move the GSO to cpu and to np.array so it can be handled by the # computeNeighborhood function S = np.array(S.cpu()) # Compute neighborhood neighborhood = graphTools.computeNeighborhood(S, self.nHops, self.nOutputNodes, self.nInputNodes, 'matrix') # And move the neighborhood back to a tensor neighborhood = torch.tensor(neighborhood).to(device) # The neighborhood matrix has to be a tensor of shape # nOutputNodes x maxNeighborhoodSize assert neighborhood.shape[0] == self.nOutputNodes assert neighborhood.max() <= self.nInputNodes # Store all the relevant information self.maxNeighborhoodSize = neighborhood.shape[1] self.neighborhood = neighborhood def forward(self, x): # x should be of shape batchSize x dimNodeSignals x nInputNodes batchSize = x.shape[0] dimNodeSignals = x.shape[1] assert x.shape[2] == self.nInputNodes # Check that there are at least the same number of nodes that # we will keep (otherwise, it would be unpooling, instead of # pooling) assert x.shape[2] >= self.nOutputNodes # And given that the self.neighborhood is already a torch.tensor matrix # we can just go ahead and get it. # So, x is of shape B x F x N. But we need it to be of shape # B x F x N x maxNeighbor. Why? Well, because we need to compute the # maximum between the value of each node and those of its neighbors. # And we do this by applying a torch.max across the rows (dim = 3) so # that we end up again with a B x F x N, but having computed the max. # How to fill those extra dimensions? Well, what we have is neighborhood # matrix, and we are going to use torch.gather to bring the right # values (torch.index_select, while more straightforward, only works # along a single dimension). # Each row of the matrix neighborhood determines all the neighbors of # each node: the first row contains all the neighbors of the first node, # etc. # The values of the signal at those nodes are contained in the dim = 2 # of x. So, just for now, let's ignore the batch and feature dimensions # and imagine we have a column vector: N x 1. We have to pick some of # the elements of this vector and line them up alongside each row # so that then we can compute the maximum along these rows. # When we torch.gather along dimension 0, we are selecting which row to # pick according to each column. Thus, if we have that the first row # of the neighborhood matrix is [1, 2, 0] means that we want to pick # the value at row 1 of x, at row 2 of x in the next column, and at row # 0 of the last column. For these values to be the appropriate ones, we # have to repeat x as columns to build our b x F x N x maxNeighbor # matrix. x = x.unsqueeze(3) # B x F x N x 1 x = x.repeat([1, 1, 1, self.maxNeighborhoodSize]) # BxFxNxmaxNeighbor # And the neighbors that we need to gather are the same across the batch # and feature dimensions, so we need to repeat the matrix along those # dimensions gatherNeighbor = self.neighborhood.reshape([1, 1, self.nOutputNodes, self.maxNeighborhoodSize]) gatherNeighbor = gatherNeighbor.repeat([batchSize, dimNodeSignals, 1, 1]) # And finally we're in position of getting all the neighbors in line xNeighbors = torch.gather(x, 2, gatherNeighbor) # B x F x nOutput x maxNeighbor # Note that this gather function already reduces the dimension to # nOutputNodes. # And proceed to compute the maximum along this dimension v, _ = torch.max(xNeighbors, dim=3) return v def extra_repr(self): reprString = "in_dim=%d, out_dim=%d, number_hops = %d, " % ( self.nInputNodes, self.nOutputNodes, self.nHops) if self.neighborhood is not None: reprString += "neighborhood stored" else: reprString += "NO neighborhood stored" return reprString
38.053374
81
0.601715
5,571
37,787
4.006103
0.087058
0.005108
0.021507
0.025988
0.755444
0.743973
0.734206
0.718254
0.710548
0.695492
0
0.007719
0.317702
37,787
992
82
38.091734
0.857924
0.566465
0
0.691429
0
0
0.041222
0
0
0
0
0
0.08
1
0.097143
false
0.002857
0.014286
0.002857
0.177143
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8c78a8c9a07a6fafb68713783229d94c3bfc38b6
1,916
py
Python
data_log/migrations/0013_auto_20190613_2107.py
Itori/swarfarm
7192e2d8bca093b4254023bbec42b6a2b1887547
[ "Apache-2.0" ]
66
2017-09-11T04:46:00.000Z
2021-03-13T00:02:42.000Z
data_log/migrations/0013_auto_20190613_2107.py
Itori/swarfarm
7192e2d8bca093b4254023bbec42b6a2b1887547
[ "Apache-2.0" ]
133
2017-09-24T21:28:59.000Z
2021-04-02T10:35:31.000Z
data_log/migrations/0013_auto_20190613_2107.py
Itori/swarfarm
7192e2d8bca093b4254023bbec42b6a2b1887547
[ "Apache-2.0" ]
28
2017-08-30T19:04:32.000Z
2020-11-16T04:09:00.000Z
# Generated by Django 2.1.7 on 2019-06-14 04:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('data_log', '0012_auto_20190428_0842'), ] operations = [ migrations.AddField( model_name='craftrunelog', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='dungeonrunedrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='magicboxcraftrunecraftdrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='magicboxcraftrunedrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='riftdungeonrunecraftdrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='riftdungeonrunedrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='riftraidrunecraftdrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='shoprefreshrunedrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='wishlogrunedrop', name='ancient', field=models.BooleanField(default=False), ), migrations.AddField( model_name='worldbosslogrunedrop', name='ancient', field=models.BooleanField(default=False), ), ]
29.9375
53
0.566806
150
1,916
7.146667
0.3
0.16791
0.214552
0.251866
0.655784
0.655784
0.655784
0.612873
0.612873
0.612873
0
0.023994
0.325679
1,916
63
54
30.412698
0.805728
0.023486
0
0.701754
1
0
0.156768
0.06153
0
0
0
0
0
1
0
false
0
0.017544
0
0.070175
0
0
0
0
null
0
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
8ca54727dec462aeebe74dde3ec0aed1c59be77a
38
py
Python
experiments/chaotic_artist/test.py
enjalot/adventures_in_opencl
c222d15c076ee3f5f81b529eb47e87c8d8057096
[ "MIT" ]
152
2015-01-04T00:58:08.000Z
2022-02-02T00:11:58.000Z
experiments/wave/test.py
ahmadm-atallah/adventures_in_opencl
c222d15c076ee3f5f81b529eb47e87c8d8057096
[ "MIT" ]
1
2017-09-21T13:36:15.000Z
2017-09-21T13:36:15.000Z
experiments/wave/test.py
ahmadm-atallah/adventures_in_opencl
c222d15c076ee3f5f81b529eb47e87c8d8057096
[ "MIT" ]
71
2015-02-11T17:12:09.000Z
2021-12-06T14:05:28.000Z
import import wave wv = wave.Wave()
7.6
16
0.684211
6
38
4.333333
0.5
0
0
0
0
0
0
0
0
0
0
0
0.210526
38
4
17
9.5
0.866667
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.666667
null
null
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
8cdc59f920e9b61a82af4f3cd312239beb3437ed
181
wsgi
Python
insights.wsgi
DamianFekete/cppinsights-web
dc89f1702bf65c98c0b4556bad7f69a059b62a4c
[ "MIT" ]
17
2018-05-17T12:07:25.000Z
2022-03-09T10:36:42.000Z
insights.wsgi
huntdog1541/cppinsights-web
a8256e2fa1b095d4a30f8afe324b8d1e61b0e245
[ "MIT" ]
34
2018-10-21T17:47:40.000Z
2022-02-21T09:08:01.000Z
insights.wsgi
huntdog1541/cppinsights-web
a8256e2fa1b095d4a30f8afe324b8d1e61b0e245
[ "MIT" ]
10
2018-05-17T12:07:27.000Z
2021-08-24T06:42:18.000Z
#!/usr/bin/python import sys import logging logging.basicConfig(stream=sys.stderr) sys.path.insert(0,"/home/insights/public_html/insights") from insights import app as application
22.625
56
0.80663
27
181
5.37037
0.740741
0
0
0
0
0
0
0
0
0
0
0.005988
0.077348
181
7
57
25.857143
0.862275
0.088398
0
0
0
0
0.213415
0.213415
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5086720c613c63666c8b37e9de9f414b8e07ed0c
119
py
Python
IEProtLib/pc/__init__.py
luwei0917/IEConv_proteins
9c79ea000c20088fa48234f1868e42883a9b5a21
[ "MIT" ]
24
2021-03-09T02:42:12.000Z
2022-03-25T23:48:14.000Z
IEProtLib/pc/__init__.py
luwei0917/IEConv_proteins
9c79ea000c20088fa48234f1868e42883a9b5a21
[ "MIT" ]
1
2021-11-05T20:06:16.000Z
2021-11-05T20:06:16.000Z
IEProtLib/pc/__init__.py
luwei0917/IEConv_proteins
9c79ea000c20088fa48234f1868e42883a9b5a21
[ "MIT" ]
8
2021-05-21T14:07:56.000Z
2022-01-24T09:52:42.000Z
from .AABB import AABB from .PointCloud import PointCloud from .Grid import Grid from .Neighborhood import Neighborhood
29.75
38
0.840336
16
119
6.25
0.375
0
0
0
0
0
0
0
0
0
0
0
0.12605
119
4
38
29.75
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
508bb3a1caafcef71a4fa6e2890529d8fcd5a329
2,246
py
Python
lib/django-0.96/django/contrib/admin/urls.py
MiCHiLU/google_appengine_sdk
3da9f20d7e65e26c4938d2c4054bc4f39cbc5522
[ "Apache-2.0" ]
790
2015-01-03T02:13:39.000Z
2020-05-10T19:53:57.000Z
AppServer/lib/django-0.96/django/contrib/admin/urls.py
nlake44/appscale
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
[ "Apache-2.0" ]
1,361
2015-01-08T23:09:40.000Z
2020-04-14T00:03:04.000Z
AppServer/lib/django-0.96/django/contrib/admin/urls.py
nlake44/appscale
6944af660ca4cb772c9b6c2332ab28e5ef4d849f
[ "Apache-2.0" ]
155
2015-01-08T22:59:31.000Z
2020-04-08T08:01:53.000Z
from django.conf import settings from django.conf.urls.defaults import * if settings.USE_I18N: i18n_view = 'django.views.i18n.javascript_catalog' else: i18n_view = 'django.views.i18n.null_javascript_catalog' urlpatterns = patterns('', ('^$', 'django.contrib.admin.views.main.index'), ('^r/(\d+)/(.*)/$', 'django.views.defaults.shortcut'), ('^jsi18n/$', i18n_view, {'packages': 'django.conf'}), ('^logout/$', 'django.contrib.auth.views.logout'), ('^password_change/$', 'django.contrib.auth.views.password_change'), ('^password_change/done/$', 'django.contrib.auth.views.password_change_done'), ('^template_validator/$', 'django.contrib.admin.views.template.template_validator'), # Documentation ('^doc/$', 'django.contrib.admin.views.doc.doc_index'), ('^doc/bookmarklets/$', 'django.contrib.admin.views.doc.bookmarklets'), ('^doc/tags/$', 'django.contrib.admin.views.doc.template_tag_index'), ('^doc/filters/$', 'django.contrib.admin.views.doc.template_filter_index'), ('^doc/views/$', 'django.contrib.admin.views.doc.view_index'), ('^doc/views/(?P<view>[^/]+)/$', 'django.contrib.admin.views.doc.view_detail'), ('^doc/models/$', 'django.contrib.admin.views.doc.model_index'), ('^doc/models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$', 'django.contrib.admin.views.doc.model_detail'), # ('^doc/templates/$', 'django.views.admin.doc.template_index'), ('^doc/templates/(?P<template>.*)/$', 'django.contrib.admin.views.doc.template_detail'), # "Add user" -- a special-case view ('^auth/user/add/$', 'django.contrib.admin.views.auth.user_add_stage'), # "Change user password" -- another special-case view ('^auth/user/(\d+)/password/$', 'django.contrib.admin.views.auth.user_change_password'), # Add/change/delete/history ('^([^/]+)/([^/]+)/$', 'django.contrib.admin.views.main.change_list'), ('^([^/]+)/([^/]+)/add/$', 'django.contrib.admin.views.main.add_stage'), ('^([^/]+)/([^/]+)/(.+)/history/$', 'django.contrib.admin.views.main.history'), ('^([^/]+)/([^/]+)/(.+)/delete/$', 'django.contrib.admin.views.main.delete_stage'), ('^([^/]+)/([^/]+)/(.+)/$', 'django.contrib.admin.views.main.change_stage'), ) del i18n_view
51.045455
114
0.64114
264
2,246
5.318182
0.227273
0.194444
0.230769
0.294872
0.495727
0.331197
0
0
0
0
0
0.00789
0.097061
2,246
43
115
52.232558
0.684418
0.08504
0
0
0
0
0.732552
0.644217
0
0
0
0
0
1
0
false
0.09375
0.0625
0
0.0625
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
50abb726c1afe0bb312cacaf312d5451b7f4337b
37
py
Python
test/lmp/util/__init__.py
ProFatXuanAll/char-RNN
531f101b3d1ba20bafd28ca060aafe6f583d1efb
[ "Beerware" ]
null
null
null
test/lmp/util/__init__.py
ProFatXuanAll/char-RNN
531f101b3d1ba20bafd28ca060aafe6f583d1efb
[ "Beerware" ]
null
null
null
test/lmp/util/__init__.py
ProFatXuanAll/char-RNN
531f101b3d1ba20bafd28ca060aafe6f583d1efb
[ "Beerware" ]
null
null
null
"""Test :py:mod:`lmp.util` entry."""
18.5
36
0.567568
6
37
3.5
1
0
0
0
0
0
0
0
0
0
0
0
0.081081
37
1
37
37
0.617647
0.810811
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
50c5461e9740f07ccdd54d37f2bf9797d4917771
113
py
Python
venv/Lib/site-packages/xero_python/payrollnz/api/__init__.py
RobMilinski/Xero-Starter-Branched-Test
c82382e674b34c2336ee164f5a079d6becd1ed46
[ "MIT" ]
77
2020-02-16T03:50:18.000Z
2022-03-11T03:53:26.000Z
venv/Lib/site-packages/xero_python/payrollnz/api/__init__.py
RobMilinski/Xero-Starter-Branched-Test
c82382e674b34c2336ee164f5a079d6becd1ed46
[ "MIT" ]
50
2020-04-06T10:15:52.000Z
2022-03-29T21:27:50.000Z
venv/Lib/site-packages/xero_python/payrollnz/api/__init__.py
RobMilinski/Xero-Starter-Branched-Test
c82382e674b34c2336ee164f5a079d6becd1ed46
[ "MIT" ]
27
2020-06-04T11:16:17.000Z
2022-03-19T06:27:36.000Z
# flake8: noqa # import apis into api package from xero_python.payrollnz.api.payroll_nz_api import PayrollNzApi
22.6
65
0.823009
17
113
5.294118
0.823529
0
0
0
0
0
0
0
0
0
0
0.010101
0.123894
113
4
66
28.25
0.89899
0.362832
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
50dd44e658414d3c8ee23269e759762570657c01
3,768
py
Python
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtGui/QBoxLayout.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
1
2020-04-20T02:27:20.000Z
2020-04-20T02:27:20.000Z
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QBoxLayout.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
null
null
null
resources/dot_PyCharm/system/python_stubs/cache/8cdc475d469a13122bc4bc6c3ac1c215d93d5f120f5cc1ef33a8f3088ee54d8e/PySide/QtGui/QBoxLayout.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
null
null
null
# encoding: utf-8 # module PySide.QtGui # from C:\Python27\lib\site-packages\PySide\QtGui.pyd # by generator 1.147 # no doc # imports import PySide.QtCore as __PySide_QtCore import Shiboken as __Shiboken from QLayout import QLayout class QBoxLayout(QLayout): # no doc def addItem(self, *args, **kwargs): # real signature unknown pass def addLayout(self, *args, **kwargs): # real signature unknown pass def addSpacerItem(self, *args, **kwargs): # real signature unknown pass def addSpacing(self, *args, **kwargs): # real signature unknown pass def addStretch(self, *args, **kwargs): # real signature unknown pass def addStrut(self, *args, **kwargs): # real signature unknown pass def addWidget(self, *args, **kwargs): # real signature unknown pass def count(self, *args, **kwargs): # real signature unknown pass def direction(self, *args, **kwargs): # real signature unknown pass def expandingDirections(self, *args, **kwargs): # real signature unknown pass def hasHeightForWidth(self, *args, **kwargs): # real signature unknown pass def heightForWidth(self, *args, **kwargs): # real signature unknown pass def insertItem(self, *args, **kwargs): # real signature unknown pass def insertLayout(self, *args, **kwargs): # real signature unknown pass def insertSpacerItem(self, *args, **kwargs): # real signature unknown pass def insertSpacing(self, *args, **kwargs): # real signature unknown pass def insertStretch(self, *args, **kwargs): # real signature unknown pass def insertWidget(self, *args, **kwargs): # real signature unknown pass def invalidate(self, *args, **kwargs): # real signature unknown pass def itemAt(self, *args, **kwargs): # real signature unknown pass def maximumSize(self, *args, **kwargs): # real signature unknown pass def minimumHeightForWidth(self, *args, **kwargs): # real signature unknown pass def minimumSize(self, *args, **kwargs): # real signature unknown pass def setDirection(self, *args, **kwargs): # real signature unknown pass def setGeometry(self, *args, **kwargs): # real signature unknown pass def setSpacing(self, *args, **kwargs): # real signature unknown pass def setStretch(self, *args, **kwargs): # real signature unknown pass def setStretchFactor(self, *args, **kwargs): # real signature unknown pass def sizeHint(self, *args, **kwargs): # real signature unknown pass def spacing(self, *args, **kwargs): # real signature unknown pass def stretch(self, *args, **kwargs): # real signature unknown pass def takeAt(self, *args, **kwargs): # real signature unknown pass def __init__(self, *args, **kwargs): # real signature unknown pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass BottomToTop = PySide.QtGui.QBoxLayout.Direction.BottomToTop Direction = None # (!) real value is "<type 'PySide.QtGui.QBoxLayout.Direction'>" Down = PySide.QtGui.QBoxLayout.Direction.Down LeftToRight = PySide.QtGui.QBoxLayout.Direction.LeftToRight RightToLeft = PySide.QtGui.QBoxLayout.Direction.RightToLeft staticMetaObject = None # (!) real value is '<PySide.QtCore.QMetaObject object at 0x0000000003F42FC8>' TopToBottom = PySide.QtGui.QBoxLayout.Direction.TopToBottom Up = PySide.QtGui.QBoxLayout.Direction.Up
28.984615
106
0.654989
424
3,768
5.761792
0.224057
0.180925
0.278346
0.243144
0.580434
0.552599
0.552599
0.537045
0
0
0
0.007322
0.238854
3,768
129
107
29.209302
0.844491
0.30759
0
0.419753
0
0
0
0
0
0
0
0
0
1
0.419753
false
0.419753
0.037037
0
0.567901
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
0
1
0
0
5
0f9d31c6295facea46f9c57934b429ed0fa6f51c
992
py
Python
appEngine-DataStore/labs/fortune-teller/solution/model.py
aa1215/cssi_2018
0f18fefb1e681f1abe8b3b22277d8f441d8e973a
[ "Apache-2.0" ]
null
null
null
appEngine-DataStore/labs/fortune-teller/solution/model.py
aa1215/cssi_2018
0f18fefb1e681f1abe8b3b22277d8f441d8e973a
[ "Apache-2.0" ]
null
null
null
appEngine-DataStore/labs/fortune-teller/solution/model.py
aa1215/cssi_2018
0f18fefb1e681f1abe8b3b22277d8f441d8e973a
[ "Apache-2.0" ]
null
null
null
from google.appengine.ext import ndb class Movie(ndb.Model): title = ndb.StringProperty() # media_type = ndb.StringProperty(required=True, default="Movie") runtime = ndb.IntegerProperty(required=False) rating = ndb.FloatProperty(required=False) year = ndb.IntegerProperty(required=False) # def __init__(self, movie_title, run_time, user_rating): # self.title = movie_title # self.runtime_mins = run_time # self.rating = user_rating class User(ndb.Model): username = ndb.StringProperty(required=True) password = ndb.StringProperty(required=True) billing = ndb.StringProperty(required=True) email = ndb.StringProperty(required=True) # def __init__(self, user, passw, bill, mail): # self.username = user # self.password = passw # self.bill = bill # self.email = mail # class TVShow(ndb.model): # title = ndb.StringProperty(required=True) # genre = ndb.StringProperty(required=True)
33.066667
69
0.683468
115
992
5.756522
0.330435
0.205438
0.26435
0.306647
0.090634
0
0
0
0
0
0
0
0.203629
992
29
70
34.206897
0.837975
0.470766
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0.090909
0.090909
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
1
0
0
5
ba140fc4172e66019e3d6cfe4f3ce01a8917e532
273
py
Python
mysite/ct/tests/selenium/selenium_integrate.py
raccoongang/socraticqs2
06201005136ee139846f857dbb2f518736e441de
[ "Apache-2.0" ]
3
2015-11-20T07:33:28.000Z
2017-01-15T23:33:50.000Z
mysite/ct/tests/selenium/selenium_integrate.py
raccoongang/socraticqs2
06201005136ee139846f857dbb2f518736e441de
[ "Apache-2.0" ]
28
2015-07-14T11:33:24.000Z
2017-11-17T15:21:22.000Z
mysite/ct/tests/selenium/selenium_integrate.py
raccoongang/socraticqs2
06201005136ee139846f857dbb2f518736e441de
[ "Apache-2.0" ]
4
2015-04-29T09:04:59.000Z
2017-07-19T14:11:16.000Z
""" Selenium integration tests. """ from django.core.urlresolvers import reverse def test_main_page(selenium, live_server): selenium.get(live_server.url) def test_user_courses(selenium, live_server): selenium.get('%s%s' % (live_server.url, reverse('ct:home')))
21
64
0.74359
38
273
5.131579
0.578947
0.205128
0.184615
0.266667
0.297436
0
0
0
0
0
0
0
0.117216
273
12
65
22.75
0.809129
0.098901
0
0
0
0
0.046218
0
0
0
0
0
0
1
0.4
false
0
0.2
0
0.6
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
ba27aa4b460ee207f25a0a2f9b83f044105855ce
386
py
Python
chempy/properties/tests/test_gas_sol_electrolytes_schumpe_1993.py
matecsaj/chempy
2c93f185e4547739331193c06d77282206621517
[ "BSD-2-Clause" ]
null
null
null
chempy/properties/tests/test_gas_sol_electrolytes_schumpe_1993.py
matecsaj/chempy
2c93f185e4547739331193c06d77282206621517
[ "BSD-2-Clause" ]
null
null
null
chempy/properties/tests/test_gas_sol_electrolytes_schumpe_1993.py
matecsaj/chempy
2c93f185e4547739331193c06d77282206621517
[ "BSD-2-Clause" ]
null
null
null
from chempy.util.testing import requires from chempy.units import units_library, default_units as u from ..gas_sol_electrolytes_schumpe_1993 import lg_solubility_ratio @requires(units_library) def test_lg_solubility_ratio(): lgr = lg_solubility_ratio({'Br-': 0.05*u.molar, 'Na+': 0.050*u.molar}, 'N2O', units=u) assert lgr != 0 # TODO: calculate by hand the reference value
38.6
90
0.764249
61
386
4.606557
0.606557
0.128114
0.181495
0
0
0
0
0
0
0
0
0.03869
0.129534
386
9
91
42.888889
0.797619
0.111399
0
0
0
0
0.026393
0
0
0
0
0.111111
0.142857
1
0.142857
false
0
0.428571
0
0.571429
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
1
0
1
0
0
5
e850c6aec9c3a178205a890a9215d5b9a903e1d0
82
py
Python
tests/test_example.py
HBPSP8Repo/ansible-airflow
be62a762ea2ce1396bd80176984171f1d4eb759f
[ "MIT" ]
21
2016-04-25T02:29:33.000Z
2019-10-22T06:10:35.000Z
tests/test_example.py
HBPSP8Repo/ansible-airflow
be62a762ea2ce1396bd80176984171f1d4eb759f
[ "MIT" ]
1
2020-04-24T07:33:43.000Z
2020-04-24T07:33:43.000Z
tests/test_example.py
HBPSP8Repo/ansible-airflow
be62a762ea2ce1396bd80176984171f1d4eb759f
[ "MIT" ]
9
2016-05-10T12:11:05.000Z
2020-02-19T12:03:39.000Z
def test_airflow_version(Command): assert Command('airflow', 'version').rc == 0
27.333333
46
0.731707
11
82
5.272727
0.727273
0.482759
0
0
0
0
0
0
0
0
0
0.013699
0.109756
82
2
47
41
0.780822
0
0
0
0
0
0.170732
0
0
0
0
0
0.5
1
0.5
false
0
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
0
0
0
0
5
e8932d16a3040c36774444fc8f91a2485c332aac
238
py
Python
pydamain/port/__init__.py
by-Exist/pydamain
40d90dbb2a854bc8286dfb5531754e4651097790
[ "MIT" ]
null
null
null
pydamain/port/__init__.py
by-Exist/pydamain
40d90dbb2a854bc8286dfb5531754e4651097790
[ "MIT" ]
null
null
null
pydamain/port/__init__.py
by-Exist/pydamain
40d90dbb2a854bc8286dfb5531754e4651097790
[ "MIT" ]
null
null
null
# type: ignore from .email_sender import EmailSender from .outbox import Outbox from .repository import ( CollectionOrientedRepository, PersistenceOrientedRepository, GenerateIdentifier, ) from .unit_of_work import UnitOfWork
23.8
37
0.806723
23
238
8.217391
0.695652
0
0
0
0
0
0
0
0
0
0
0
0.147059
238
9
38
26.444444
0.931034
0.05042
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
e8af183d3fb971cb0c019f650382dc94bea8f46e
14,365
py
Python
pysit/core/acquisition.py
zfang-slim/PysitForPython3
dc60537b26018e28d92b7a956a2cf96775f0bdf9
[ "BSD-3-Clause" ]
null
null
null
pysit/core/acquisition.py
zfang-slim/PysitForPython3
dc60537b26018e28d92b7a956a2cf96775f0bdf9
[ "BSD-3-Clause" ]
null
null
null
pysit/core/acquisition.py
zfang-slim/PysitForPython3
dc60537b26018e28d92b7a956a2cf96775f0bdf9
[ "BSD-3-Clause" ]
1
2020-06-13T07:13:07.000Z
2020-06-13T07:13:07.000Z
import copy import numpy as np from mpi4py import MPI from .shot import * from .receivers import * from .sources import * from pysit.util.parallel import ParallelWrapShotNull from pysit.util.compute_tools import * __all__ = ['equispaced_acquisition', 'equispaced_acquisition_given_data', 'equispaced_acquisition_given_locations', 'marine_acquisition'] def marine_acquisition(mesh, wavelet, sources_x_locations=None, sources_y_locations=None, max_offset_x=None, max_offset_y=None, receivers_dx=None, receivers_dy=None, source_depth=None, source_kwargs={}, receiver_depth=None, receiver_kwargs={}, parallel_shot_wrap=ParallelWrapShotNull()): if sources_x_locations is None: raise ValueError( "The horizontal locations of sources are not defined, please set values to variable 'sources_x_locations' ") if max_offset_x is None: raise ValueError( "The horizontal maximal offset is not defined, please set values to variable 'max_offset_x' ") if receivers_dx is None: raise ValueError( "The horizontal receiver sampling interval is not defined, please set values to variable 'receivers_dx' ") m = mesh d = mesh.domain xmin = d.x.lbound xmax = d.x.rbound zmin = d.z.lbound zmax = d.z.rbound if m.dim == 3: raise ValueError( "3D Marine tow string acquisition has not been implemented") if source_depth is None: source_depth = zmin if receiver_depth is None: receiver_depth = zmin shots = list() max_sources = len(sources_x_locations) if m.dim == 2: sources = len(sources_x_locations) local_sources = sources / parallel_shot_wrap.size for k in range(int(local_sources)): index_true = int(local_sources) * parallel_shot_wrap.rank + k subindex = np.unravel_index(index_true, sources) idx = subindex[0] if m.dim == 3: ## 3D marine acquisition has not been implemented jdx = subindex[1] if m.dim == 2: # srcpos = (xmin + (xmax-xmin)*(idx+1.0)/(sources+1.0), source_depth) srcpos = (sources_x_locations[idx], source_depth) elif m.dim == 3: # srcpos = (xmin + (xmax-xmin)*(idx+1.0)/(sources[0]+1.0), ymin + ( # ymax-ymin)*(jdx+1.0)/(sources[1]+1.0), source_depth) srcpos = (sources_x_locations[idx], sources_y_locations[jdx]) # Define source location and type source = PointSource(m, srcpos, wavelet, **source_kwargs) # Define set of receivers xpos = np.arange( sources_x_locations[idx], max_offset_x+sources_x_locations[idx], receivers_dx) receiversbase = ReceiverSet( m, [PointReceiver(m, (x, receiver_depth), **receiver_kwargs) for x in xpos]) receivers = copy.deepcopy(receiversbase) # Create and store the shot shot = Shot(source, receivers) shots.append(shot) return shots def equispaced_acquisition(mesh, wavelet, sources=1, receivers='max', source_depth=None, source_kwargs={}, receiver_depth=None, receiver_kwargs={}, parallel_shot_wrap=ParallelWrapShotNull() ): m = mesh d = mesh.domain xmin = d.x.lbound xmax = d.x.rbound zmin = d.z.lbound zmax = d.z.rbound if m.dim == 3: ymin = d.y.lbound ymax = d.y.rbound if source_depth is None: source_depth = zmin if receiver_depth is None: receiver_depth = zmin shots = list() max_sources = m.x.n if m.dim == 2: if receivers == 'max': receivers = m.x.n if sources == 'max': sources = m.x.n if receivers > m.x.n: raise ValueError('Number of receivers exceeds mesh nodes.') if sources > m.x.n: raise ValueError('Number of sources exceeds mesh nodes.') xpos = np.linspace(xmin, xmax, receivers) receiversbase = ReceiverSet(m, [PointReceiver(m, (x, receiver_depth), **receiver_kwargs) for x in xpos]) local_sources = sources / parallel_shot_wrap.size if m.dim == 3: if receivers == 'max': receivers = (m.x.n, m.y.n) # x, y if sources == 'max': sources = (m.x.n, m.y.n) # x, y if receivers[0] > m.x.n or receivers[1] > m.y.n: raise ValueError('Number of receivers exceeds mesh nodes.') if sources[0] > m.x.n or sources[1] > m.y.n: raise ValueError('Number of sources exceeds mesh nodes.') xpos = np.linspace(xmin, xmax, receivers[0]) ypos = np.linspace(ymin, ymax, receivers[1]) receiversbase = ReceiverSet(m, [PointReceiver(m, (x, y, receiver_depth), **receiver_kwargs) for x in xpos for y in ypos]) local_sources = np.prod(sources) / parallel_shot_wrap.size print(type(local_sources)) print(local_sources) for k in range(int(local_sources)): index_true = int(local_sources) * parallel_shot_wrap.rank + k subindex = np.unravel_index(index_true, sources) idx = subindex[0] if m.dim == 3: jdx = subindex[1] if m.dim == 2: srcpos = (xmin + (xmax-xmin)*(idx+1.0)/(sources+1.0), source_depth) elif m.dim == 3: srcpos = (xmin + (xmax-xmin)*(idx+1.0)/(sources[0]+1.0), ymin + (ymax-ymin)*(jdx+1.0)/(sources[1]+1.0), source_depth) # Define source location and type source = PointSource(m, srcpos, wavelet, **source_kwargs) # Define set of receivers receivers = copy.deepcopy(receiversbase) # Create and store the shot shot = Shot(source, receivers) shots.append(shot) return shots def equispaced_acquisition_given_locations(mesh, wavelet, sources_x_locations=None, sources_y_locations=None, receivers_x_locations=None, receivers_y_locations=None, source_depth=None, source_kwargs={}, receiver_depth=None, receiver_kwargs={}, parallel_shot_wrap=ParallelWrapShotNull() ): ## Define the acquisition geometry for given sources locations and receivers locations if sources_x_locations is None: raise ValueError("The horizontal locations of sources are not defined, please set values to variable 'sources_x_locations' ") if receivers_x_locations is None: raise ValueError("The horizontal locations of receivers are not defined, please set values to variable 'receivers_x_locations' ") m = mesh d = mesh.domain xmin = d.x.lbound xmax = d.x.rbound zmin = d.z.lbound zmax = d.z.rbound if m.dim == 3: ymin = d.y.lbound ymax = d.y.rbound if source_depth is None: source_depth = zmin if receiver_depth is None: receiver_depth = zmin shots = list() max_sources = len(sources_x_locations) if m.dim == 2: receivers = len(receivers_x_locations) sources = len(sources_x_locations) xpos = receivers_x_locations receiversbase = ReceiverSet(m, [PointReceiver(m, (x, receiver_depth), **receiver_kwargs) for x in xpos]) local_sources = sources / parallel_shot_wrap.size if m.dim == 3: receivers = (len(receivers_x_locations), len(receivers_y_locations)) # x, y sources = (len(sources_x_locations), len(sources_y_locations)) # x, y if receivers[0] > m.x.n or receivers[1] > m.y.n: raise ValueError('Number of receivers exceeds mesh nodes.') if sources[0] > m.x.n or sources[1] > m.y.n: raise ValueError('Number of sources exceeds mesh nodes.') xpos = receivers_x_locations ypos = receivers_y_locations receiversbase = ReceiverSet(m, [PointReceiver( m, (x, y, receiver_depth), **receiver_kwargs) for x in xpos for y in ypos]) local_sources = np.prod(sources) / parallel_shot_wrap.size print(type(local_sources)) print(local_sources) for k in range(int(local_sources)): index_true = int(local_sources) * parallel_shot_wrap.rank + k subindex = np.unravel_index(index_true, sources) idx = subindex[0] if m.dim == 3: jdx = subindex[1] if m.dim == 2: # srcpos = (xmin + (xmax-xmin)*(idx+1.0)/(sources+1.0), source_depth) srcpos = (sources_x_locations[idx], source_depth) elif m.dim == 3: # srcpos = (xmin + (xmax-xmin)*(idx+1.0)/(sources[0]+1.0), ymin + ( # ymax-ymin)*(jdx+1.0)/(sources[1]+1.0), source_depth) srcpos = (sources_x_locations[idx], sources_y_locations[jdx]) # Define source location and type source = PointSource(m, srcpos, wavelet, **source_kwargs) # Define set of receivers receivers = copy.deepcopy(receiversbase) # Create and store the shot shot = Shot(source, receivers) shots.append(shot) return shots def equispaced_acquisition_given_data(data, mesh, wavelet, odata, ddata, ndata, source_kwargs={}, receiver_kwargs={}, parallel_shot_wrap=ParallelWrapShotNull() ): source_depth=None, receiver_depth=None, m = mesh d = mesh.domain xmin = d.x.lbound xmax = d.x.rbound zmin = d.z.lbound zmax = d.z.rbound if m.dim == 2: data_time, data_xrec, data_zrec, data_xsrc, data_zsrc = odn2grid_data_2D_time(odata, ddata, ndata) if m.dim == 3: data_time, data_xrec, data_yrec, data_zrec, data_xsrc, data_ysrc, data_zsrc = odn2grid_data_3D_time(odata, ddata, ndata) if m.dim == 3: ymin = d.y.lbound ymax = d.y.rbound source_depth = data_zsrc[0] receiver_depth = data_zrec[0] shots = list() max_sources = m.x.n if m.dim == 2: receivers = ndata[1] sources = ndata[3] xpos_rec = data_xrec receiversbase = ReceiverSet(m, [PointReceiver(m, (x, receiver_depth), **receiver_kwargs) for x in xpos_rec]) if np.mod(sources, parallel_shot_wrap.size) != 0: raise ValueError('Currently, we only support the case that mod(number of sources, number of processes) = 0') local_sources = sources / parallel_shot_wrap.size if m.dim == 3: receivers = (ndata[1], ndata[2]) sources = (ndata[4], ndata[5]) xpos_rec = data_xrec ypos_rec = data_yrec receiversbase = ReceiverSet(m, [PointReceiver(m, (x, y, receiver_depth), **receiver_kwargs) for x in xpos_rec for y in ypos_rec]) if np.mod(np.prod(sources), parallel_shot_wrap.size) != 0: raise('Currently, we only support the case that mod(number of sources, number of processes) = 0') local_sources = np.prod(sources) / parallel_shot_wrap.size print(type(local_sources)) local_sources = int(local_sources) if m.dim == 2: if parallel_shot_wrap.rank == 0: data_local = data[:,:,:,0:local_sources,:].squeeze() for i in range(1, parallel_shot_wrap.size): data_send = data[:,:,:,i*local_sources:(i+1)*local_sources,:] parallel_shot_wrap.comm.send(data_send, dest=i, tag=i) else: data_receive=parallel_shot_wrap.comm.recv(source=0, tag=parallel_shot_wrap.rank) print('Receive data from process ', 0) data_local = data_receive.squeeze() if m.dim == 3: if parallel_shot_wrap.rank == 0: data_local = get_local_data(data, n, local_sources, 0) for k in range(1, parallel_shot_wrap.size): data_send = get_local_data(data, n, local_source, k) parallel_shot_wrap.comm.send(data_send, dest=k, tag=k) else: data_local=parallel_shot_wrap.comm.recv(source=0, tag=parallel_shot_wrap.rank) print('Receive data from process ', 0) # data_local = np.zeros((data_time, data_xrec*data_yrec, local_sources)) # for k in range(local_sources): for k in range(int(local_sources)): index_true = int(local_sources) * parallel_shot_wrap.rank + k subindex = np.unravel_index(index_true, sources) if m.dim == 2: idx = subindex if m.dim == 3: idx = subindex[0] jdx = subindex[1] if m.dim == 2: srcpos = (data_xsrc[idx], source_depth) elif m.dim == 3: srcpos = (data_xsrc[idx], data_ysrc[jdx], source_depth) # Define source location and type source = PointSource(m, srcpos, wavelet, **source_kwargs) # Define set of receivers receivers = copy.deepcopy(receiversbase) receivers.data = data_local[:,:,k] # Create and store the shot shot = Shot(source, receivers) shots.append(shot) return shots def get_local_data(data, n, local_sources, rank): n_out = (n[0], n[1]*n[2], local_sources) data_out = np.zeros(n_out) for k in range(local_sources): indx_k = rank * local_sources + k indx_sub = np.unravel_index(indx_k, (n[4], n[5])) data_tmp = np.reshape(data[:,:,:,:,indx_sub[0],indx_sub[1],:], (n[0], n[1]*n[2])) data_out[:,:,k] = data_tmp return data_out
31.993318
137
0.577376
1,808
14,365
4.406527
0.090708
0.048199
0.054224
0.040417
0.808083
0.770051
0.738923
0.703904
0.65809
0.646291
0
0.013238
0.321615
14,365
448
138
32.064732
0.80431
0.066968
0
0.662069
0
0
0.086144
0.011815
0
0
0
0
0
1
0.017241
false
0
0.027586
0
0.062069
0.024138
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2cd88c41f40b9bf4f60ec2175eeaa0b64a654267
4,498
py
Python
smart_meter/migrations/0018_auto_20210310_1823.py
GPXenergy/gpx_server_api
9b021522be4414ac95159a0ed576848c463637f9
[ "MIT" ]
null
null
null
smart_meter/migrations/0018_auto_20210310_1823.py
GPXenergy/gpx_server_api
9b021522be4414ac95159a0ed576848c463637f9
[ "MIT" ]
null
null
null
smart_meter/migrations/0018_auto_20210310_1823.py
GPXenergy/gpx_server_api
9b021522be4414ac95159a0ed576848c463637f9
[ "MIT" ]
null
null
null
# Generated by Django 3.0.8 on 2021-03-10 17:23 from django.db import migrations, models def delete_gas_measurements(apps, schema_editor): GasMeasurement = apps.get_model("smart_meter", "GasMeasurement") GasMeasurement.objects.all().delete() class Migration(migrations.Migration): dependencies = [ ('smart_meter', '0017_auto_20210105_1028'), ] operations = [ migrations.RunPython(delete_gas_measurements), migrations.RenameField( model_name='powermeasurement', old_name='power_exp', new_name='actual_export', ), migrations.RenameField( model_name='powermeasurement', old_name='power_imp', new_name='actual_import', ), migrations.RenameField( model_name='smartmeter', old_name='solar', new_name='actual_solar', ), migrations.RenameField( model_name='smartmeter', old_name='gas', new_name='total_gas', ), migrations.RenameField( model_name='smartmeter', old_name='power_export_1', new_name='total_power_export_1', ), migrations.RenameField( model_name='smartmeter', old_name='power_export_2', new_name='total_power_export_2', ), migrations.RenameField( model_name='smartmeter', old_name='power_import_1', new_name='total_power_import_1', ), migrations.RenameField( model_name='smartmeter', old_name='power_import_2', new_name='total_power_import_2', ), migrations.RenameField( model_name='solarmeasurement', old_name='solar', new_name='actual_solar', ), migrations.RemoveField( model_name='gasmeasurement', name='gas', ), migrations.RemoveField( model_name='gasmeasurement', name='total', ), migrations.AddField( model_name='gasmeasurement', name='actual_gas', field=models.DecimalField(decimal_places=3, default=0, max_digits=9), preserve_default=False, ), migrations.AddField( model_name='gasmeasurement', name='total_gas', field=models.DecimalField(decimal_places=3, default=0, max_digits=9), preserve_default=False, ), migrations.AddField( model_name='groupparticipant', name='solar_joined', field=models.DecimalField(decimal_places=3, max_digits=9, null=True), ), migrations.AddField( model_name='groupparticipant', name='solar_left', field=models.DecimalField(decimal_places=3, max_digits=9, null=True), ), migrations.AddField( model_name='powermeasurement', name='total_export_1', field=models.DecimalField(decimal_places=3, default=0, max_digits=9), preserve_default=False, ), migrations.AddField( model_name='powermeasurement', name='total_export_2', field=models.DecimalField(decimal_places=3, default=0, max_digits=9), preserve_default=False, ), migrations.AddField( model_name='powermeasurement', name='total_import_1', field=models.DecimalField(decimal_places=3, default=0, max_digits=9), preserve_default=False, ), migrations.AddField( model_name='powermeasurement', name='total_import_2', field=models.DecimalField(decimal_places=3, default=0, max_digits=9), preserve_default=False, ), migrations.AddField( model_name='smartmeter', name='actual_gas', field=models.DecimalField(decimal_places=3, max_digits=9, null=True), ), migrations.AddField( model_name='smartmeter', name='total_solar', field=models.DecimalField(decimal_places=3, max_digits=9, null=True), ), migrations.AddField( model_name='solarmeasurement', name='total_solar', field=models.DecimalField(decimal_places=3, default=0, max_digits=9), preserve_default=False, ), ]
33.318519
81
0.578479
427
4,498
5.807963
0.17096
0.079839
0.102016
0.119758
0.830645
0.775
0.708468
0.657258
0.563306
0.448387
0
0.023407
0.316141
4,498
134
82
33.567164
0.782835
0.010004
0
0.730159
1
0
0.161312
0.005167
0
0
0
0
0
1
0.007937
false
0
0.063492
0
0.095238
0
0
0
0
null
0
0
0
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
fa13a1a0762b1a88b5ba0d4f857289025b890686
130
py
Python
withdraw/admin.py
10sujitkhanal/forzza
d51332fe0655f85deb5acd612754f0b0ed9d2f3f
[ "MIT" ]
null
null
null
withdraw/admin.py
10sujitkhanal/forzza
d51332fe0655f85deb5acd612754f0b0ed9d2f3f
[ "MIT" ]
null
null
null
withdraw/admin.py
10sujitkhanal/forzza
d51332fe0655f85deb5acd612754f0b0ed9d2f3f
[ "MIT" ]
null
null
null
from django.contrib import admin # Register your models here. from withdraw.models import Withdraw admin.site.register(Withdraw)
21.666667
36
0.823077
18
130
5.944444
0.611111
0
0
0
0
0
0
0
0
0
0
0
0.115385
130
6
37
21.666667
0.930435
0.2
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
fa1a80d6dc2976790439636ea4a744d12210f102
147
py
Python
docs/config.py
tonyfast/literacy
c1713a1e2f0aa68fe190a33c73d6a97eccf2ee1e
[ "BSD-3-Clause" ]
13
2016-04-10T19:11:11.000Z
2021-01-25T00:22:23.000Z
docs/config.py
tonyfast/literacy
c1713a1e2f0aa68fe190a33c73d6a97eccf2ee1e
[ "BSD-3-Clause" ]
5
2017-09-25T16:08:36.000Z
2017-10-18T03:26:22.000Z
docs/config.py
tonyfast/literacy
c1713a1e2f0aa68fe190a33c73d6a97eccf2ee1e
[ "BSD-3-Clause" ]
1
2016-04-13T00:08:52.000Z
2016-04-13T00:08:52.000Z
c.TemplateExporter.exclude_input = True c.Exporter.preprocessors = ['literacy.Execute'] #c.Exporter.preprocessors = ['literacy.template.Execute']
29.4
57
0.789116
16
147
7.1875
0.625
0.156522
0.382609
0.521739
0
0
0
0
0
0
0
0
0.068027
147
5
57
29.4
0.839416
0.380952
0
0
0
0
0.175824
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
fa37701da25fbbc5f0745cdeffc1a206008c91ca
165
py
Python
main.py
pawelkunicki/roulette-martingale-simulator
4448be306ed7d256e9a3cecac789cb1669ec4507
[ "MIT" ]
null
null
null
main.py
pawelkunicki/roulette-martingale-simulator
4448be306ed7d256e9a3cecac789cb1669ec4507
[ "MIT" ]
null
null
null
main.py
pawelkunicki/roulette-martingale-simulator
4448be306ed7d256e9a3cecac789cb1669ec4507
[ "MIT" ]
null
null
null
from roulette_simulator import RouletteSimulator from roulette_simulator_gu2i import RouletteSimulatorGUI if __name__ == '__main__': import os app = Qt
15
56
0.781818
18
165
6.555556
0.722222
0.20339
0.355932
0
0
0
0
0
0
0
0
0.007407
0.181818
165
10
57
16.5
0.866667
0
0
0
0
0
0.04878
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
d7081d08e85fb2a0e798366ddf1b5552c035e29c
96
py
Python
venv/lib/python3.8/site-packages/tomlkit/container.py
GiulianaPola/select_repeats
17a0d053d4f874e42cf654dd142168c2ec8fbd11
[ "MIT" ]
2
2022-03-13T01:58:52.000Z
2022-03-31T06:07:54.000Z
venv/lib/python3.8/site-packages/tomlkit/container.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
19
2021-11-20T04:09:18.000Z
2022-03-23T15:05:55.000Z
venv/lib/python3.8/site-packages/tomlkit/container.py
DesmoSearch/Desmobot
b70b45df3485351f471080deb5c785c4bc5c4beb
[ "MIT" ]
null
null
null
/home/runner/.cache/pip/pool/2c/6f/d1/f6cd637b6cb4d8e145912cdfe3e0a4fc73add49774a2ee5a0e2224c989
96
96
0.895833
9
96
9.555556
1
0
0
0
0
0
0
0
0
0
0
0.375
0
96
1
96
96
0.520833
0
0
0
0
0
0
0
0
1
0
0
0
0
null
null
0
0
null
null
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
0
0
0
null
1
0
0
0
1
0
0
0
0
0
0
0
0
5
d762e6cce924432fc18a14e2b3ca96528b49caac
10,348
py
Python
apps/calculator/calculator.py
squirrelcom/TYOS
8fa140fe5c46e5af26a5b504bd6554664abff463
[ "MIT" ]
null
null
null
apps/calculator/calculator.py
squirrelcom/TYOS
8fa140fe5c46e5af26a5b504bd6554664abff463
[ "MIT" ]
null
null
null
apps/calculator/calculator.py
squirrelcom/TYOS
8fa140fe5c46e5af26a5b504bd6554664abff463
[ "MIT" ]
null
null
null
import pygame, sys from pygame.locals import * from CalculatorFunctions import * #My first Pytgon game pygame.init() pygame.display.set_caption('Calculator') pygame.display.set_icon(pygame.image.load('calculator.xpm')) FPS = 30 white=(255,255,255) red=(255,5,0) clock=pygame.time.Clock() gameDisplay = pygame.display.set_mode((700,700)) rectpos = (0,0) font = pygame.font.SysFont(None,40) equasion = '' y=0 answer=None text=font.render(str(answer), False, red) mouse = pygame.draw.rect(gameDisplay,red,Rect((rectpos),(10,10))) button1 = pygame.draw.rect(gameDisplay,red,Rect((100,200),(30,30))) gameDisplay.blit(font.render('1',True,(0,0,200)), (100,200)) button2 = pygame.draw.rect(gameDisplay,red,Rect((150,200),(30,30))) gameDisplay.blit(font.render('2',True,(0,0,200)), (150,200)) button3 = pygame.draw.rect(gameDisplay,red,Rect((200,200),(30,30))) gameDisplay.blit(font.render('3',True,(0,0,200)), (200,200)) button4 = pygame.draw.rect(gameDisplay,red,Rect((250,200),(30,30))) gameDisplay.blit(font.render('4',True,(0,0,200)), (250,200)) button5 = pygame.draw.rect(gameDisplay,red,Rect((300,200),(30,30))) gameDisplay.blit(font.render('5',True,(0,0,200)), (300,200)) button6 = pygame.draw.rect(gameDisplay,red,Rect((350,200),(30,30))) gameDisplay.blit(font.render('6',True,(0,0,200)), (350,200)) button7 = pygame.draw.rect(gameDisplay,red,Rect((400,200),(30,30))) gameDisplay.blit(font.render('7',True,(0,0,200)), (400,200)) button8 = pygame.draw.rect(gameDisplay,red,Rect((450,200),(30,30))) gameDisplay.blit(font.render('8',True,(0,0,200)), (450,200)) button9 = pygame.draw.rect(gameDisplay,red,Rect((500,200),(30,30))) gameDisplay.blit(font.render('9',True,(0,0,200)), (500,200)) button0 = pygame.draw.rect(gameDisplay,red,Rect((550,250),(30,30))) gameDisplay.blit(font.render('0',True,(0,0,200)), (550,200)) buttonAdd = pygame.draw.rect(gameDisplay,red,Rect((100,250),(30,30))) gameDisplay.blit(font.render('+',True,(0,0,200)), (100,250)) buttonSubtract = pygame.draw.rect(gameDisplay,red,Rect((150,250),(30,30))) gameDisplay.blit(font.render('-',True,(0,0,200)), (150,250)) buttonMultiply = pygame.draw.rect(gameDisplay,red,Rect((200,250),(30,30))) gameDisplay.blit(font.render('*',True,(0,0,200)), (200,250)) buttonDivide = pygame.draw.rect(gameDisplay,red,Rect((250,250),(30,30))) gameDisplay.blit(font.render('/',True,(0,0,200)), (250,250)) buttonEquals = pygame.draw.rect(gameDisplay,red,Rect((250,250),(30,30))) gameDisplay.blit(font.render('=',True,(0,0,200)), (250,250)) buttonClear = pygame.draw.rect(gameDisplay,red,Rect((300,250),(30,30))) gameDisplay.blit(font.render('C',True,(0,0,200)), (300,250)) buttonDecimal = pygame.draw.rect(gameDisplay,red,Rect((300,250),(30,30))) gameDisplay.blit(font.render('.',True,(0,0,200)), (300,250)) buttonTan = pygame.draw.rect(gameDisplay,red,Rect((450,250),(30,30))) gameDisplay.blit(font.render('tan',True,(0,0,200)), (450,250)) buttonCos = pygame.draw.rect(gameDisplay,red,Rect((500,250),(30,30))) gameDisplay.blit(font.render('cos',True,(0,0,200)), (500,250)) buttonSin = pygame.draw.rect(gameDisplay,red,Rect((550,250),(30,30))) gameDisplay.blit(font.render('sin',True,(0,0,200)), (550,250)) buttonLeftBracket = pygame.draw.rect(gameDisplay,red,Rect((600,250),(30,30))) gameDisplay.blit(font.render('(',True,(0,0,200)), (600,250)) buttonRightBracket = pygame.draw.rect(gameDisplay,red,Rect((650,250),(30,30))) gameDisplay.blit(font.render(')',True,(0,0,200)), (650,250)) buttonSqrttt = pygame.draw.rect(gameDisplay,red,Rect((650,250),(30,30))) gameDisplay.blit(font.render('sqrt',True,(0,0,200)), (100,300)) while True: try: pygame.display.set_caption('Calculator') for event in pygame.event.get(): if event.type == pygame.MOUSEMOTION: rectpos = event.pos if event.type == pygame.MOUSEBUTTONDOWN: if mouse.colliderect(button1): equasion = equasion + '1' if mouse.colliderect(button2): equasion = equasion + '2' if mouse.colliderect(button3): equasion = equasion + '3' if mouse.colliderect(button4): equasion = equasion + '4' if mouse.colliderect(button5): equasion = equasion + '5' if mouse.colliderect(button6): equasion = equasion + '6' if mouse.colliderect(button7): equasion = equasion + '7' if mouse.colliderect(button8): equasion = equasion + '8' if mouse.colliderect(button9): equasion = equasion + '9' if mouse.colliderect(button0): equasion = equasion + '0' if mouse.colliderect(buttonAdd): equasion = equasion + '+' if mouse.colliderect(buttonSubtract): equasion = equasion + '-' if mouse.colliderect(buttonMultiply): equasion = equasion + '*' if mouse.colliderect(buttonDivide): equasion = equasion + '/' if mouse.colliderect(buttonDecimal): equasion = equasion + '.' if mouse.colliderect(buttonTan): equasion = equasion + 'tan(' if mouse.colliderect(buttonCos): equasion = equasion + 'cos(' if mouse.colliderect(buttonSin): equasion = equasion + 'sin(' if mouse.colliderect(buttonLeftBracket): equasion = equasion + '(' if mouse.colliderect(buttonRightBracket): equasion = equasion + ')' if mouse.colliderect(buttonSqrttt): equasion = equasion + 'sqrt(' if mouse.colliderect(buttonEquals): if equasion[0] != '+' or equasion[0] != '-' or equasion[0] != '*' or equasion[0] != '/': answer = eval(equasion) text = font.render('='+str(answer), False, red) gameDisplay.blit(text, (0,40)) else: answer = '=Error' if mouse.colliderect(buttonClear): equasion = '' if event.type == QUIT: pygame.quit() sys.exit() gameDisplay.fill(white) mouse = pygame.draw.rect(gameDisplay,red,Rect((rectpos),(10,10))) button1 = pygame.draw.rect(gameDisplay,red,Rect((100,200),(30,30))) gameDisplay.blit(font.render('1',True,(0,0,200)), (100,200)) button2 = pygame.draw.rect(gameDisplay,red,Rect((150,200),(30,30))) gameDisplay.blit(font.render('2',True,(0,0,200)), (150,200)) button3 = pygame.draw.rect(gameDisplay,red,Rect((200,200),(30,30))) gameDisplay.blit(font.render('3',True,(0,0,200)), (200,200)) button4 = pygame.draw.rect(gameDisplay,red,Rect((250,200),(30,30))) gameDisplay.blit(font.render('4',True,(0,0,200)), (250,200)) button5 = pygame.draw.rect(gameDisplay,red,Rect((300,200),(30,30))) gameDisplay.blit(font.render('5',True,(0,0,200)), (300,200)) button6 = pygame.draw.rect(gameDisplay,red,Rect((350,200),(30,30))) gameDisplay.blit(font.render('6',True,(0,0,200)), (350,200)) button7 = pygame.draw.rect(gameDisplay,red,Rect((400,200),(30,30))) gameDisplay.blit(font.render('7',True,(0,0,200)), (400,200)) button8 = pygame.draw.rect(gameDisplay,red,Rect((450,200),(30,30))) gameDisplay.blit(font.render('8',True,(0,0,200)), (450,200)) button9 = pygame.draw.rect(gameDisplay,red,Rect((500,200),(30,30))) gameDisplay.blit(font.render('9',True,(0,0,200)), (500,200)) button0 = pygame.draw.rect(gameDisplay,red,Rect((550,200),(30,30))) gameDisplay.blit(font.render('0',True,(0,0,200)), (550,200)) buttonAdd = pygame.draw.rect(gameDisplay,red,Rect((100,250),(30,30))) gameDisplay.blit(font.render('+',True,(0,0,200)), (100,250)) buttonSubtract = pygame.draw.rect(gameDisplay,red,Rect((150,250),(30,30))) gameDisplay.blit(font.render('-',True,(0,0,200)), (150,250)) buttonMultiply = pygame.draw.rect(gameDisplay,red,Rect((200,250),(30,30))) gameDisplay.blit(font.render('*',True,(0,0,200)), (200,250)) buttonDivide = pygame.draw.rect(gameDisplay,red,Rect((250,250),(30,30))) gameDisplay.blit(font.render('/',True,(0,0,200)), (250,250)) buttonEquals = pygame.draw.rect(gameDisplay,red,Rect((300,250),(30,30))) gameDisplay.blit(font.render('=',True,(0,0,200)), (300,250)) buttonClear = pygame.draw.rect(gameDisplay,red,Rect((350,250),(30,30))) gameDisplay.blit(font.render('C',True,(0,0,200)), (350,250)) buttonDecimal = pygame.draw.rect(gameDisplay,red,Rect((400,250),(30,30))) gameDisplay.blit(font.render('.',True,(0,0,200)), (400,250)) buttonTan = pygame.draw.rect(gameDisplay,red,Rect((450,250),(30,30))) gameDisplay.blit(font.render('tan',True,(0,0,200)), (450,250)) buttonCos = pygame.draw.rect(gameDisplay,red,Rect((500,250),(30,30))) gameDisplay.blit(font.render('cos',True,(0,0,200)), (500,250)) buttonSin = pygame.draw.rect(gameDisplay,red,Rect((550,250),(30,30))) gameDisplay.blit(font.render('sin',True,(0,0,200)), (550,250)) buttonLeftBracket = pygame.draw.rect(gameDisplay,red,Rect((600,250),(30,30))) gameDisplay.blit(font.render('(',True,(0,0,200)), (600,250)) buttonRightBracket = pygame.draw.rect(gameDisplay,red,Rect((650,250),(30,30))) gameDisplay.blit(font.render(')',True,(0,0,200)), (650,250)) buttonSqrttt = pygame.draw.rect(gameDisplay,red,Rect((100,300),(30,30))) gameDisplay.blit(font.render('sqrt',True,(0,0,200)), (100,300)) gameDisplay.blit(font.render(equasion,True,red), (250,y)) gameDisplay.blit(text, (250,40)) clock.tick(FPS) pygame.display.update() except SyntaxError: answer = 'ERROR'
44.603448
108
0.597797
1,329
10,348
4.651618
0.090293
0.079262
0.108703
0.194112
0.773051
0.722905
0.72242
0.71045
0.684568
0.684568
0
0.131508
0.209316
10,348
231
109
44.796537
0.624053
0.001933
0
0.458101
0
0
0.014331
0
0
0
0
0
0
1
0
false
0
0.01676
0
0.01676
0
0
0
0
null
0
0
1
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
d7827282572a2afca09956c26ebcf47237092a79
16,247
py
Python
tests/test_features_enricher.py
upgini/upgini
b7cc154bd2452a2233b46df585b3e8f5c13b6074
[ "BSD-3-Clause" ]
39
2021-12-03T08:55:25.000Z
2022-02-23T03:43:00.000Z
tests/test_features_enricher.py
upgini/upgini
b7cc154bd2452a2233b46df585b3e8f5c13b6074
[ "BSD-3-Clause" ]
null
null
null
tests/test_features_enricher.py
upgini/upgini
b7cc154bd2452a2233b46df585b3e8f5c13b6074
[ "BSD-3-Clause" ]
3
2021-12-29T10:07:39.000Z
2022-01-28T13:30:54.000Z
import os import pandas as pd import pytest from requests_mock.mocker import Mocker from upgini import FeaturesEnricher, SearchKey from upgini.metadata import RuntimeParameters from .utils import ( mock_default_requests, mock_get_features_meta, mock_get_metadata, mock_initial_search, mock_initial_summary, mock_raw_features, mock_validation_raw_features, mock_validation_search, mock_validation_summary, ) def test_search_keys_validation(requests_mock: Mocker): url = "http://fake_url2" mock_default_requests(requests_mock, url) with pytest.raises(Exception, match="Date and datetime search keys are presented simultaniously"): FeaturesEnricher( search_keys={"d1": SearchKey.DATE, "dt2": SearchKey.DATETIME}, endpoint=url, ) with pytest.raises(Exception, match="COUNTRY search key should be provided if POSTAL_CODE is presented"): FeaturesEnricher(search_keys={"postal_code": SearchKey.POSTAL_CODE}, endpoint=url) def test_features_enricher(requests_mock: Mocker): pd.set_option("mode.chained_assignment", "raise") url = "http://fake_url2" path_to_mock_features = os.path.join( os.path.dirname(os.path.realpath(__file__)), "test_data/binary/mock_features.parquet" ) mock_default_requests(requests_mock, url) search_task_id = mock_initial_search(requests_mock, url) ads_search_task_id = mock_initial_summary( requests_mock, url, search_task_id, hit_rate=99.9, auc=0.66, uplift=0.1, eval_set_metrics=[ {"eval_set_index": 1, "hit_rate": 1.0, "auc": 0.5}, {"eval_set_index": 2, "hit_rate": 0.99, "auc": 0.77}, ], ) mock_get_metadata(requests_mock, url, search_task_id) mock_get_features_meta( requests_mock, url, ads_search_task_id, ads_features=[{"name": "feature", "importance": 10.1, "matchedInPercent": 99.0, "valueType": "NUMERIC"}], etalon_features=[{"name": "SystemRecordId_473310000", "importance": 1.0, "matchedInPercent": 100.0}], ) mock_raw_features(requests_mock, url, search_task_id, path_to_mock_features) path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/binary/data.csv") df = pd.read_csv(path, sep=",") train_df = df.head(10000) train_features = train_df.drop(columns="target") train_target = train_df["target"] eval1_df = df[10000:11000] eval1_features = eval1_df.drop(columns="target") eval1_target = eval1_df["target"] eval2_df = df[11000:12000] eval2_features = eval2_df.drop(columns="target") eval2_target = eval2_df["target"] enricher = FeaturesEnricher( search_keys={"phone_num": SearchKey.PHONE, "rep_date": SearchKey.DATE}, endpoint=url, api_key="fake_api_key", date_format="%Y-%m-%d", ) enriched_train_features = enricher.fit_transform( train_features, train_target, eval_set=[(eval1_features, eval1_target), (eval2_features, eval2_target)], keep_input=True, ) assert enriched_train_features.shape == (10000, 4) metrics = enricher.calculate_metrics( train_features, train_target, eval_set=[(eval1_features, eval1_target), (eval2_features, eval2_target)] ) expected_metrics = pd.DataFrame( [ { "match_rate": 99.9, "baseline roc_auc": 0.5, "enriched roc_auc": 0.4926257640349131, "uplift": -0.007374235965086906, }, {"match_rate": 100.0, "baseline roc_auc": 0.5, "enriched roc_auc": 0.5, "uplift": 0.0}, {"match_rate": 99.0, "baseline roc_auc": 0.5, "enriched roc_auc": 0.5, "uplift": 0.0}, ], index=["train", "eval 1", "eval 2"], ) print("Expected metrics: ") print(expected_metrics) print("Actual metrics: ") print(metrics) assert metrics is not None for segment in expected_metrics.index: for col in expected_metrics.columns: assert metrics.loc[segment, col] == expected_metrics.loc[segment, col] print(enricher.features_info) assert enricher.feature_names_ == ["feature"] assert enricher.feature_importances_ == [10.1] assert len(enricher.features_info) == 2 first_feature_info = enricher.features_info.iloc[0] assert first_feature_info["feature_name"] == "feature" assert first_feature_info["shap_value"] == 10.1 second_feature_info = enricher.features_info.iloc[1] assert second_feature_info["feature_name"] == "SystemRecordId_473310000" assert second_feature_info["shap_value"] == 1.0 def test_features_enricher_fit_transform_runtime_parameters(requests_mock: Mocker): pd.set_option("mode.chained_assignment", "raise") url = "http://fake_url2" path_to_mock_features = os.path.join( os.path.dirname(os.path.realpath(__file__)), "test_data/binary/mock_features.parquet" ) mock_default_requests(requests_mock, url) search_task_id = mock_initial_search(requests_mock, url) ads_search_task_id = mock_initial_summary( requests_mock, url, search_task_id, hit_rate=99.9, auc=0.66, uplift=0.1, eval_set_metrics=[ {"eval_set_index": 1, "hit_rate": 100, "auc": 0.5}, {"eval_set_index": 2, "hit_rate": 99, "auc": 0.77}, ], ) mock_get_metadata(requests_mock, url, search_task_id) mock_get_features_meta( requests_mock, url, ads_search_task_id, ads_features=[{"name": "feature", "importance": 10.1, "matchedInPercent": 99.0, "valueType": "NUMERIC"}], etalon_features=[{"name": "SystemRecordId_473310000", "importance": 1.0, "matchedInPercent": 100.0}], ) mock_raw_features(requests_mock, url, search_task_id, path_to_mock_features) path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/binary/data.csv") df = pd.read_csv(path, sep=",") train_df = df.head(10000) train_features = train_df.drop(columns="target") train_target = train_df["target"] eval1_df = df[10000:11000] eval1_features = eval1_df.drop(columns="target") eval1_target = eval1_df["target"] eval2_df = df[11000:12000] eval2_features = eval2_df.drop(columns="target") eval2_target = eval2_df["target"] enricher = FeaturesEnricher( search_keys={"phone_num": SearchKey.PHONE, "rep_date": SearchKey.DATE}, date_format="%Y-%m-%d", endpoint=url, api_key="fake_api_key", runtime_parameters=RuntimeParameters(properties={"runtimeProperty1": "runtimeValue1"}), ) assert enricher.runtime_parameters is not None enricher.fit( train_features, train_target, eval_set=[(eval1_features, eval1_target), (eval2_features, eval2_target)], ) fit_req = None initial_search_url = url + "/public/api/v2/search/initial" for elem in requests_mock.request_history: if elem.url == initial_search_url: fit_req = elem # TODO: can be better with # https://metareal.blog/en/post/2020/05/03/validating-multipart-form-data-with-requests-mock/ # It"s do-able to parse req with cgi module and verify contents assert fit_req is not None assert "runtimeProperty1" in str(fit_req.body) assert "runtimeValue1" in str(fit_req.body) validation_search_task_id = mock_validation_search(requests_mock, url, search_task_id) mock_validation_summary( requests_mock, url, search_task_id, ads_search_task_id, validation_search_task_id, hit_rate=99.9, auc=0.66, uplift=0.1, eval_set_metrics=[ {"eval_set_index": 1, "hit_rate": 100, "auc": 0.5}, {"eval_set_index": 2, "hit_rate": 99, "auc": 0.77}, ], ) mock_validation_raw_features(requests_mock, url, validation_search_task_id, path_to_mock_features) transformed = enricher.transform(train_features, keep_input=True) transform_req = None transform_url = url + "/public/api/v2/search/validation?initialSearchTaskId=" + search_task_id for elem in requests_mock.request_history: if elem.url == transform_url: transform_req = elem assert transform_req is not None assert "runtimeProperty1" in str(transform_req.body) assert "runtimeValue1" in str(transform_req.body) assert transformed.shape == (10000, 4) def test_search_with_only_personal_keys(requests_mock: Mocker): url = "https://some.fake.url" mock_default_requests(requests_mock, url) with pytest.raises(Exception): FeaturesEnricher(search_keys={"phone": SearchKey.PHONE, "email": SearchKey.EMAIL}, endpoint=url) def test_filter_by_importance(requests_mock: Mocker): url = "https://some.fake.url" path_to_mock_features = os.path.join( os.path.dirname(os.path.realpath(__file__)), "test_data/binary/mock_features.parquet" ) mock_default_requests(requests_mock, url) search_task_id = mock_initial_search(requests_mock, url) ads_search_task_id = mock_initial_summary( requests_mock, url, search_task_id, hit_rate=99.9, auc=0.66, uplift=0.1, eval_set_metrics=[ {"eval_set_index": 1, "hit_rate": 1.0, "auc": 0.5}, {"eval_set_index": 2, "hit_rate": 0.99, "auc": 0.77}, ], ) mock_get_metadata(requests_mock, url, search_task_id) mock_get_features_meta( requests_mock, url, ads_search_task_id, ads_features=[{"name": "feature", "importance": 0.7, "matchedInPercent": 99.0, "valueType": "NUMERIC"}], etalon_features=[{"name": "SystemRecordId_473310000", "importance": 0.3, "matchedInPercent": 100.0}], ) mock_raw_features(requests_mock, url, search_task_id, path_to_mock_features) path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/binary/data.csv") df = pd.read_csv(path, sep=",") train_df = df.head(10000) train_features = train_df.drop(columns="target") train_target = train_df["target"] eval1_df = df[10000:11000] eval1_features = eval1_df.drop(columns="target") eval1_target = eval1_df["target"] eval2_df = df[11000:12000] eval2_features = eval2_df.drop(columns="target") eval2_target = eval2_df["target"] enricher = FeaturesEnricher( search_keys={"phone_num": SearchKey.PHONE, "rep_date": SearchKey.DATE}, date_format="%Y-%m-%d", endpoint=url, api_key="fake_api_key", ) eval_set = [(eval1_features, eval1_target), (eval2_features, eval2_target)] enricher.fit(train_features, train_target, eval_set=eval_set, importance_threshold=0.8) assert enricher.enriched_X is not None # assert len(enricher.enriched_X) == 10000 # assert enricher.enriched_X.columns.to_list() == ["SystemRecordId_473310000", "phone_num", "rep_date"] # assert enricher.enriched_eval_set is not None # assert len(enricher.enriched_eval_set) == 2000 # assert enricher.enriched_eval_set.columns.to_list() == [ # "SystemRecordId_473310000", # "phone_num", # "rep_date", # "eval_set_index" # ] metrics = enricher.calculate_metrics(train_features, train_target, eval_set, importance_threshold=0.8) assert metrics.loc["train", "baseline roc_auc"] == 0.5 assert metrics.loc["eval 1", "baseline roc_auc"] == 0.5 assert metrics.loc["eval 2", "baseline roc_auc"] == 0.5 train_features = enricher.fit_transform( train_features, train_target, eval_set=eval_set, keep_input=True, importance_threshold=0.8 ) assert train_features.shape == (10000, 3) validation_search_task_id = mock_validation_search(requests_mock, url, search_task_id) mock_validation_summary( requests_mock, url, search_task_id, ads_search_task_id, validation_search_task_id, hit_rate=99.9, auc=0.66, uplift=0.1, eval_set_metrics=[ {"eval_set_index": 1, "hit_rate": 100, "auc": 0.5}, {"eval_set_index": 2, "hit_rate": 99, "auc": 0.77}, ], ) mock_validation_raw_features(requests_mock, url, validation_search_task_id, path_to_mock_features) test_features = enricher.transform(eval1_features, keep_input=True, importance_threshold=0.8) assert test_features.shape == (1000, 3) def test_filter_by_max_features(requests_mock: Mocker): url = "https://some.fake.url" path_to_mock_features = os.path.join( os.path.dirname(os.path.realpath(__file__)), "test_data/binary/mock_features.parquet" ) mock_default_requests(requests_mock, url) search_task_id = mock_initial_search(requests_mock, url) ads_search_task_id = mock_initial_summary( requests_mock, url, search_task_id, hit_rate=99.9, auc=0.66, uplift=0.1, eval_set_metrics=[ {"eval_set_index": 1, "hit_rate": 1.0, "auc": 0.5}, {"eval_set_index": 2, "hit_rate": 0.99, "auc": 0.77}, ], ) mock_get_metadata(requests_mock, url, search_task_id) mock_get_features_meta( requests_mock, url, ads_search_task_id, ads_features=[{"name": "feature", "importance": 0.7, "matchedInPercent": 99.0, "valueType": "NUMERIC"}], etalon_features=[{"name": "SystemRecordId_473310000", "importance": 0.3, "matchedInPercent": 100.0}], ) mock_raw_features(requests_mock, url, search_task_id, path_to_mock_features) path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data/binary/data.csv") df = pd.read_csv(path, sep=",") train_df = df.head(10000) train_features = train_df.drop(columns="target") train_target = train_df["target"] eval1_df = df[10000:11000] eval1_features = eval1_df.drop(columns="target") eval1_target = eval1_df["target"] eval2_df = df[11000:12000] eval2_features = eval2_df.drop(columns="target") eval2_target = eval2_df["target"] enricher = FeaturesEnricher( search_keys={"phone_num": SearchKey.PHONE, "rep_date": SearchKey.DATE}, date_format="%Y-%m-%d", endpoint=url, api_key="fake_api_key", ) eval_set = [(eval1_features, eval1_target), (eval2_features, eval2_target)] enricher.fit(train_features, train_target, eval_set=eval_set, max_features=0) # assert enricher.enriched_X is not None # assert len(enricher.enriched_X) == 10000 # assert enricher.enriched_X.columns.to_list() == ["SystemRecordId_473310000", "phone_num", "rep_date"] # assert enricher.enriched_eval_set is not None # assert len(enricher.enriched_eval_set) == 2000 # assert enricher.enriched_eval_set.columns.to_list() == [ # "SystemRecordId_473310000", # "phone_num", # "rep_date", # "eval_set_index" # ] metrics = enricher.calculate_metrics(train_features, train_target, eval_set, max_features=0) assert metrics.loc["train", "baseline roc_auc"] == 0.5 assert metrics.loc["eval 1", "baseline roc_auc"] == 0.5 assert metrics.loc["eval 2", "baseline roc_auc"] == 0.5 train_features = enricher.fit_transform( train_features, train_target, eval_set=eval_set, keep_input=True, max_features=0 ) assert train_features.shape == (10000, 3) validation_search_task_id = mock_validation_search(requests_mock, url, search_task_id) mock_validation_summary( requests_mock, url, search_task_id, ads_search_task_id, validation_search_task_id, hit_rate=99.9, auc=0.66, uplift=0.1, eval_set_metrics=[ {"eval_set_index": 1, "hit_rate": 100, "auc": 0.5}, {"eval_set_index": 2, "hit_rate": 99, "auc": 0.77}, ], ) mock_validation_raw_features(requests_mock, url, validation_search_task_id, path_to_mock_features) test_features = enricher.transform(eval1_features, keep_input=True, max_features=0) assert test_features.shape == (1000, 3)
36.346756
113
0.670893
2,127
16,247
4.787024
0.09779
0.054213
0.050678
0.045374
0.797093
0.794539
0.763602
0.753879
0.73679
0.733746
0
0.046846
0.207731
16,247
446
114
36.428251
0.744173
0.060319
0
0.665722
0
0
0.145519
0.032804
0
0
0
0.002242
0.082153
1
0.016997
false
0
0.05949
0
0.076487
0.014164
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ad24f3360b6ebbfb837304a55ad5bbe9e72450df
71
py
Python
network/__init__.py
ZeeChono/DeepLabV3Plus-Pytorch
88dc8fb03c591e3159a072cd68be3e91aacbb2f8
[ "MIT" ]
729
2019-12-02T13:37:51.000Z
2022-03-30T23:16:26.000Z
network/__init__.py
ZeeChono/DeepLabV3Plus-Pytorch
88dc8fb03c591e3159a072cd68be3e91aacbb2f8
[ "MIT" ]
64
2019-12-18T10:46:13.000Z
2022-03-25T08:45:57.000Z
network/__init__.py
ZeeChono/DeepLabV3Plus-Pytorch
88dc8fb03c591e3159a072cd68be3e91aacbb2f8
[ "MIT" ]
210
2019-12-12T07:44:37.000Z
2022-03-29T09:33:50.000Z
from .modeling import * from ._deeplab import convert_to_separable_conv
35.5
47
0.859155
10
71
5.7
0.8
0
0
0
0
0
0
0
0
0
0
0
0.098592
71
2
47
35.5
0.890625
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
ad5327170bf835bb622b7434597704b92e530cdb
343
py
Python
sols/1672.py
Paul11100/LeetCode
9896c579dff1812c0c76964db8d60603ee715e35
[ "MIT" ]
null
null
null
sols/1672.py
Paul11100/LeetCode
9896c579dff1812c0c76964db8d60603ee715e35
[ "MIT" ]
null
null
null
sols/1672.py
Paul11100/LeetCode
9896c579dff1812c0c76964db8d60603ee715e35
[ "MIT" ]
null
null
null
class Solution: # Max Sum LC (Accepted), O(m * n) time, O(m) space def maximumWealth(self, accounts: List[List[int]]) -> int: return max(sum(row) for row in accounts) # Max Map Sum (Top Voted), O(m * n) time, O(m) space def maximumWealth(self, accounts: List[List[int]]) -> int: return max(map(sum, accounts))
38.111111
62
0.618076
54
343
3.925926
0.444444
0.037736
0.028302
0.066038
0.613208
0.613208
0.613208
0.613208
0.613208
0.613208
0
0
0.230321
343
8
63
42.875
0.80303
0.28863
0
0.4
0
0
0
0
0
0
0
0
0
1
0.4
false
0
0
0.4
1
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
ad9cb5fc8b1f0eda7ecb330a2e5819ac83d3f9f0
91
py
Python
password/apps.py
pyprism/Hiren-Pass
04f5f2b3b0e4b4caab43953acfd9021d51108388
[ "MIT" ]
4
2021-09-27T08:59:23.000Z
2021-09-30T17:45:31.000Z
password/apps.py
pyprism/Hiren-Pass
04f5f2b3b0e4b4caab43953acfd9021d51108388
[ "MIT" ]
141
2017-03-08T10:43:15.000Z
2021-02-04T08:31:08.000Z
password/apps.py
pyprism/Hiren-Pass
04f5f2b3b0e4b4caab43953acfd9021d51108388
[ "MIT" ]
1
2021-09-30T17:45:32.000Z
2021-09-30T17:45:32.000Z
from django.apps import AppConfig class PasswordConfig(AppConfig): name = 'password'
15.166667
33
0.758242
10
91
6.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.164835
91
5
34
18.2
0.907895
0
0
0
0
0
0.087912
0
0
0
0
0
0
1
0
false
0.666667
0.333333
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
1
0
0
5
a8dec56cecda919e3b92b4c4f62d965e2ee23137
164
py
Python
my_tsp/__init__.py
vmeta42/metaai
7800549f34bc9c041a07bddfb8d4c6e72248961c
[ "Apache-2.0" ]
null
null
null
my_tsp/__init__.py
vmeta42/metaai
7800549f34bc9c041a07bddfb8d4c6e72248961c
[ "Apache-2.0" ]
null
null
null
my_tsp/__init__.py
vmeta42/metaai
7800549f34bc9c041a07bddfb8d4c6e72248961c
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import from . import datasets from . import evaluation_metrics from . import models from . import utils from . import trainer
23.428571
39
0.786585
21
164
5.857143
0.47619
0.406504
0
0
0
0
0
0
0
0
0
0
0.182927
164
7
40
23.428571
0.91791
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a8e968ee3c6d4a31b3b4ed9a4b6631720f27c053
76
py
Python
001-099/20/20.py
lunixbochs/project-euler
aa974c5ae68547309f33adbb4e633fe040964855
[ "MIT" ]
6
2015-07-21T20:45:08.000Z
2021-03-13T14:07:48.000Z
001-099/20/20.py
lunixbochs/project-euler
aa974c5ae68547309f33adbb4e633fe040964855
[ "MIT" ]
null
null
null
001-099/20/20.py
lunixbochs/project-euler
aa974c5ae68547309f33adbb4e633fe040964855
[ "MIT" ]
2
2017-10-28T09:52:08.000Z
2019-04-11T00:55:36.000Z
import math print sum(int(c) for c in str(math.factorial(100)).rstrip('L'))
25.333333
63
0.710526
15
76
3.6
0.866667
0
0
0
0
0
0
0
0
0
0
0.044118
0.105263
76
2
64
38
0.75
0
0
0
0
0
0.013158
0
0
0
0
0
0
0
null
null
0
0.5
null
null
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
1
0
5
d12d1757e3773c3fe9a0dbd42883c232a4c0323d
13
py
Python
kt18data/pyt.py
term1830/function3unit
2c68bcda2bd6873c3e4a6ec6300466d93bc201d7
[ "MIT" ]
null
null
null
kt18data/pyt.py
term1830/function3unit
2c68bcda2bd6873c3e4a6ec6300466d93bc201d7
[ "MIT" ]
null
null
null
kt18data/pyt.py
term1830/function3unit
2c68bcda2bd6873c3e4a6ec6300466d93bc201d7
[ "MIT" ]
null
null
null
print('odoo')
13
13
0.692308
2
13
4.5
1
0
0
0
0
0
0
0
0
0
0
0
0
13
1
13
13
0.692308
0
0
0
0
0
0.285714
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
0f01dbaf2d6ac671d043ec2de681447e716b86fb
193
py
Python
moto/dynamodb/__init__.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
null
null
null
moto/dynamodb/__init__.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
1
2022-03-07T07:39:03.000Z
2022-03-07T07:39:03.000Z
moto/dynamodb/__init__.py
symroe/moto
4e106995af6f2820273528fca8a4e9ee288690a5
[ "Apache-2.0" ]
null
null
null
from moto.dynamodb.models import dynamodb_backends from ..core.models import base_decorator dynamodb_backend = dynamodb_backends["us-east-1"] mock_dynamodb = base_decorator(dynamodb_backends)
32.166667
50
0.84456
26
193
6
0.538462
0.307692
0.269231
0
0
0
0
0
0
0
0
0.005618
0.07772
193
5
51
38.6
0.870787
0
0
0
0
0
0.046632
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
0f2d1d5ec021f15188fe98c84a5e1debbde169bc
151
py
Python
email_confirm_la/apps.py
Robert-Chiang/django-email-confirm-la
c930b1722d1cb15e59e802938c7b68b8b25cf092
[ "MIT" ]
null
null
null
email_confirm_la/apps.py
Robert-Chiang/django-email-confirm-la
c930b1722d1cb15e59e802938c7b68b8b25cf092
[ "MIT" ]
null
null
null
email_confirm_la/apps.py
Robert-Chiang/django-email-confirm-la
c930b1722d1cb15e59e802938c7b68b8b25cf092
[ "MIT" ]
1
2017-01-03T00:47:03.000Z
2017-01-03T00:47:03.000Z
# coding: utf-8 from django.apps import AppConfig class ECLAAppConf(AppConfig): name = 'email_confirm_la' verbose_name = 'Email Confirm La'
16.777778
37
0.728477
20
151
5.35
0.75
0.168224
0.299065
0.336449
0
0
0
0
0
0
0
0.00813
0.18543
151
8
38
18.875
0.861789
0.086093
0
0
0
0
0.235294
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
0f5a0610708d6a4844226e21f032e414eefbfb92
81,654
py
Python
lib/scitools/avplotter.py
jayvdb/scitools
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
[ "BSD-3-Clause" ]
62
2015-03-28T18:07:51.000Z
2022-02-12T20:32:36.000Z
lib/scitools/avplotter.py
jayvdb/scitools
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
[ "BSD-3-Clause" ]
7
2015-06-09T09:56:03.000Z
2021-05-20T17:53:15.000Z
lib/scitools/avplotter.py
jayvdb/scitools
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
[ "BSD-3-Clause" ]
29
2015-04-16T03:48:57.000Z
2022-02-03T22:06:52.000Z
""" avplotter ("ascii vertical plotter") is a simple ASCII plotter for curve plots, where the x axis points downward and the y axis is horizontal. The plot is realized by printing it line by line. There are two main applications: 1) very long time series, and 2) plots that would be convenient to have as pure text. See the documentation of class Plotter for examples of various types of plots. """ class Plotter: """ ASCII plotter with x axis downwards and y axis horizontal. Can make a plot by writing out new x values line by line in a terminal window or a file. Very suited for long time series. Example: >>> a = 0.2 >>> p = Plotter(-1-a, 1+a, width=50) >>> from math import sin, pi >>> from numpy import linspace >>> num_periods = 2 >>> resolution_per_period = 22 >>> tp = linspace(0, num_periods*2*pi, ... num_periods*resolution_per_period + 1) >>> for t in tp: ... y = (1 + a*sin(0.5*t))*sin(t) ... print 't=%5.2f' % t, p.plot(t, y), '%5.2f' % y ... t= 0.00 | 0.00 t= 0.29 | * 0.29 t= 0.57 | * 0.57 t= 0.86 | * 0.82 t= 1.14 | * 1.01 t= 1.43 | * 1.12 t= 1.71 | * 1.14 t= 2.00 | * 1.06 t= 2.28 | * 0.89 t= 2.57 | * 0.64 t= 2.86 | * 0.34 t= 3.14 | 0.00 t= 3.43 * | -0.34 t= 3.71 * | -0.64 t= 4.00 * | -0.89 t= 4.28 * | -1.06 t= 4.57 * | -1.14 t= 4.86 * | -1.12 t= 5.14 * | -1.01 t= 5.43 * | -0.82 t= 5.71 * | -0.57 t= 6.00 * | -0.29 t= 6.28 | -0.00 t= 6.57 | * 0.27 t= 6.85 | * 0.51 t= 7.14 | * 0.69 t= 7.43 | * 0.81 t= 7.71 | * 0.86 t= 8.00 | * 0.84 t= 8.28 | * 0.76 t= 8.57 | * 0.62 t= 8.85 | * 0.44 t= 9.14 | * 0.23 t= 9.42 | 0.00 t= 9.71 * | -0.23 t=10.00 * | -0.44 t=10.28 * | -0.62 t=10.57 * | -0.76 t=10.85 * | -0.84 t=11.14 * | -0.86 t=11.42 * | -0.81 t=11.71 * | -0.69 t=12.00 * | -0.51 t=12.28 * | -0.27 t=12.57 | -0.00 Here is a one-dimensional random walk example:: from scitools.avplotter importer Plotter import time, numpy as np p = Plotter(-1, 1, width=75) # Horizontal axis: 75 chars wide dx = 0.05 np.random.seed(10) x = 0 while True: random_step = 1 if np.random.random() > 0.5 else -1 x = x + dx*random_step if x < -1: print 'HOME!!!' break print p.plot(0, x) # Allow Ctrl+c to abort the simulation try: time.sleep(0.1) # Wait for interrupt except KeyboardInterrupt: print 'Interrupted by Ctrl+c' break One can easily plot two or more curves side by side. Here we plot two curves (sine and cosine), each with a width of 25 characters:: p_sin = Plotter(-1, 1, width=25, symbols='s') p_cos = Plotter(-1, 1, width=25, symbols='c') from math import sin, cos, pi from numpy import linspace tp = linspace(0, 6*pi, 6*8+1) for t in tp: print p_sin.plot(t, sin(t)), p_cos.plot(t, cos(t)) The output reads:: | | c | s | c | s | c | s | c | s | | s c | | s c | | s c | | c | s | c | s | c | s | c | s | c| s | | c s | | c s | | c s| | c | s | c | s | c | s | c | s | | s c | | s c | | s c | | c | s | c | s | c | s | c | s | c| s | | c s | | c s | | c s| | c | s | c | s | c | s | c | s | | s c | | s c | | s c | | c | s | c | s | c | s | c | s | c| s | | c s | | c s | | c s| | c Alternatively, two curves (here sine and cosine) can be plotted in the same coordinate system:: p = Plotter(-1, 1, width=50, symbols='sc') from math import sin, cos, pi from numpy import linspace tp = linspace(0, 6*pi, 6*8+1) for t in tp: print p.plot(t, sin(t), cos(t)) The output from this code becomes:: | c | s c | c | c s | s c | s c | s c | s c | c s | c | s c | s | s | c s | c s | c | c | s c | c | c s | s c | s c | s c | s c | c s | c | s c | s | s | c s | c s | c | c | s c | c | c s | s c | s c | s c | s c | c s | c | s c | s | s | c s | c s | c | c """ def __init__(self, ymin, ymax, width=68, symbols='*o+x@', vertical_line=0): """ Create a line by line plotter with the x axis pointing downward. The `ymin` and `ymax` variables define the extent of the y axis. The `width` parameter is the number of characters used for the y domain (axis). The symbols used for curves are given by the `symbols` string (first symbol, by default is ``*``, next is ``o``). The `vertical_line` parameter specifies for which y value where the x axis is drawn (y=0 by default). """ self.yaxis = float(ymin), float(ymax) self.width = width self.symbols = symbols self.vertical_line = vertical_line def _map(self, y): """Return the column no. corresponding to y.""" ymin, ymax = self.yaxis if y < ymin: self.too_small = True self.too_large = False c = 0 elif y > ymax: self.too_small = False self.too_large = True c = -1 else: self.too_small = self.too_large = False y_in_01 = (y-ymin)/(ymax - ymin) c = int(round(y_in_01*self.width)) return c def plot(self, x, *y, **kwargs): """ Return next line in plot, given x and some y values. Supported kwargs: print_out_of_range_value: if True, print the value if it is out of range. """ print_out_of_range_value = \ kwargs.get('print_out_of_range_value', True) line = [' ']*(self.width + 1) y_value = '' for yi, symbol in zip(y, self.symbols): c = self._map(yi) if self.too_small or self.too_large: symbol = '|' if print_out_of_range_value: y_value = '%.1E' % yi else: line[c] = symbol # Mark 'x' axis if self.yaxis[0] < self.vertical_line and \ self.yaxis[1] > self.vertical_line: c = self._map(0) line[c] = '|' return ''.join(line) + y_value def plot(*args, **kwargs): """ Easyviz-style plot command. args holds x1, y1, x2, y2, ...:: plot(t, u1, t, u2, axis=[0, 10, -1, 1]) No other keyword arguments has any effect. """ if 'axis' in kwargs: ymin, ymax = kwargs['axis'][2:] else: ymin = 1E+20 ymax = -ymin for i in range(1,len(args),2): ymin = max(ymin, args[i].min()) ymax = max(ymax, args[i].max()) p = Plotter(ymin, ymax, width=70) num_curves = len(args)/2 if num_curves > 4: raise ValueError('avplotter.plot: cannot plot more than 4 curves') x_length = len(args[0]) for i in range(2,len(args),2): if len(args[i]) != x_length: raise ValueError('avplotter.plot: all x coordinates for all curves must have the same length (%d vs %d)' % (len(args[i]), x_length)) x_array = args[0] for i, x in enumerate(x_array): try: y = [args[j][i] for j in range(1,len(args),2)] except IndexError: raise ValueError('index %d in x_array is illegal in args[%d] (length=%d)' % (i, j, len(args[j]))) print p.plot(x_array, *y) def test_sin(): a = 0.2 p = Plotter(-1-a, 1+a, width=50) from math import sin, pi from numpy import linspace num_periods = 2 resolution_per_period = 22 s = '' tp = linspace(0, num_periods*2*pi, num_periods*resolution_per_period + 1) for t in tp: y = (1 + a*sin(0.5*t))*sin(t) s += 't=%5.2f %s %5.2f\n' % (t, p.plot(t, y), y) ans = """\ t= 0.00 | 0.00 t= 0.29 | * 0.29 t= 0.57 | * 0.57 t= 0.86 | * 0.82 t= 1.14 | * 1.01 t= 1.43 | * 1.12 t= 1.71 | * 1.14 t= 2.00 | * 1.06 t= 2.28 | * 0.89 t= 2.57 | * 0.64 t= 2.86 | * 0.34 t= 3.14 | 0.00 t= 3.43 * | -0.34 t= 3.71 * | -0.64 t= 4.00 * | -0.89 t= 4.28 * | -1.06 t= 4.57 * | -1.14 t= 4.86 * | -1.12 t= 5.14 * | -1.01 t= 5.43 * | -0.82 t= 5.71 * | -0.57 t= 6.00 * | -0.29 t= 6.28 | -0.00 t= 6.57 | * 0.27 t= 6.85 | * 0.51 t= 7.14 | * 0.69 t= 7.43 | * 0.81 t= 7.71 | * 0.86 t= 8.00 | * 0.84 t= 8.28 | * 0.76 t= 8.57 | * 0.62 t= 8.85 | * 0.44 t= 9.14 | * 0.23 t= 9.42 | 0.00 t= 9.71 * | -0.23 t=10.00 * | -0.44 t=10.28 * | -0.62 t=10.57 * | -0.76 t=10.85 * | -0.84 t=11.14 * | -0.86 t=11.42 * | -0.81 t=11.71 * | -0.69 t=12.00 * | -0.51 t=12.28 * | -0.27 t=12.57 | -0.00 """ assert _compare(ans, s) def test_2_curves_v1(): p_sin = Plotter(-1, 1, width=25, symbols='s') p_cos = Plotter(-1, 1, width=25, symbols='c') from math import sin, cos, pi from numpy import linspace tp = linspace(0, 6*pi, 6*8+1) s = '' for t in tp: s += '%s %s\n' % (p_sin.plot(t, sin(t)), p_cos.plot(t, cos(t))) ans = """\ | | c | s | c | s | c | s | c | s | | s c | | s c | | s c | | c | s | c | s | c | s | c | s | c| s | | c s | | c s | | c s| | c | s | c | s | c | s | c | s | | s c | | s c | | s c | | c | s | c | s | c | s | c | s | c| s | | c s | | c s | | c s| | c | s | c | s | c | s | c | s | | s c | | s c | | s c | | c | s | c | s | c | s | c | s | c| s | | c s | | c s | | c s| | c """ assert _compare(ans, s) def test_2_curves_v2(): p = Plotter(-1, 1, width=50, symbols='sc') from math import sin, cos, pi from numpy import linspace tp = linspace(0, 6*pi, 6*8+1) s = '' for t in tp: s += '%s\n' % (p.plot(t, sin(t), cos(t))) ans = """\ | c | s c | c | c s | s c | s c | s c | s c | c s | c | s c | s | s | c s | c s | c | c | s c | c | c s | s c | s c | s c | s c | c s | c | s c | s | s | c s | c s | c | c | s c | c | c s | s c | s c | s c | s c | c s | c | s c | s | s | c s | c s | c | c """ assert _compare(ans, s) def test_random_walk(): import time, numpy as np p = Plotter(-1, 1, width=75) np.random.seed(10) y = 0 s = '' while True: random_step = 1 if np.random.random() > 0.5 else -1 y = y + 0.05*random_step if y < -1: break s += '%s\n' % (p.plot(0, y)) # t is just dummy ans = """\ |* | |* | * |* | * | | * | * | * | | * | | |* | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * |* | * |* | |* | * | * | * | * | * | * | * | * | * |* | |* | * |* | * | | * | * | * | * | * | * | * | | * | * | * | | |* | * | | * | | |* | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | | |* | * |* | |* | * | * | * | | * | | |* | * |* | * | * | * | * | * | * | * | * | * | * | * | | |* | * | * | * | * | * |* | * |* | * |* | |* | * |* | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | | * | | |* | |* | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | | |* | * | * | * | * | * | * | * | * | * | | * | * | * | * | * | * | * | * | * | * | * | * | * | | * | * | * | * | * | | |* | * | * | * | | * | | |* | |* | * | * | * |* | |* | * | | * | * | * | | * | * | * | * | * | * | * | | |* | |* | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * |* | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | 1.0E+00 | 1.1E+00 | 1.1E+00 | 1.1E+00 | 1.0E+00 | * | * | * | * | * | 1.0E+00 | 1.1E+00 | 1.0E+00 | 1.1E+00 | 1.1E+00 | 1.1E+00 | 1.0E+00 | * | 1.0E+00 | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * |* *| * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | *| |* | * | * | * | * | * | * | * | * | * |* | * |* *| * | * | * | * | * | * | * | * | * | * | * | *| |* *| * | *| * | *| |* *| * | * | * | * | * | * | * | *| * | *| * | *| |* *| |* *| * | *| |* | * | * | * | * | * | * | * |* | * |* | * |* | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | 1.0E+00 | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | 1.0E+00 | 1.1E+00 | 1.0E+00 | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * |* | * |* *| * | * | * | *| |* | * |* *| * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | * | """ assert _compare(ans, s) def run_random_walk(): import time, numpy as np p = Plotter(-1, 1, width=75) # Horizontal axis: 75 chars wide dx = 0.05 np.random.seed(10) x = 0 while True: random_step = 1 if np.random.random() > 0.5 else -1 x = x + dx*random_step if x < -1: print 'HOME!!!' break print p.plot(0, x) # Allow Ctrl+c to abort the simulation try: time.sleep(0.1) # Wait for interrupt except KeyboardInterrupt: print 'Interrupted by Ctrl+c' break def _compare(ans, s): for line1, line2 in zip(ans.splitlines(), s.splitlines()): if line1.strip() != line2.strip(): return False return True if __name__ == '__main__': import sys try: if sys.argv[1] == 'random_walk': run_random_walk() except: pass
47.528522
144
0.068913
2,093
81,654
2.636885
0.137602
0.052183
0.068491
0.076826
0.527813
0.500091
0.490669
0.480341
0.469107
0.469107
0
0.084646
0.881071
81,654
1,717
145
47.556203
0.483678
0.001421
0
0.866852
0
0.000693
0.937415
0.000343
0
0
0
0
0.002774
0
null
null
0.000693
0.006241
null
null
0.004854
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
1
0
0
1
0
0
1
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
0f784df9ba01ec00480804c795c2c2c2a869bf12
54
py
Python
spynoza/denoising/motion_confounds/__init__.py
spinoza-centre/spynoza
d71d69e3ea60c9544f4e63940f053a2d1b3ac65f
[ "MIT" ]
7
2016-06-21T11:51:07.000Z
2018-08-10T15:41:37.000Z
spynoza/denoising/motion_confounds/__init__.py
spinoza-centre/spynoza
d71d69e3ea60c9544f4e63940f053a2d1b3ac65f
[ "MIT" ]
12
2017-07-05T09:14:31.000Z
2018-09-13T12:19:14.000Z
spynoza/denoising/motion_confounds/__init__.py
spinoza-centre/spynoza
d71d69e3ea60c9544f4e63940f053a2d1b3ac65f
[ "MIT" ]
8
2016-09-26T12:35:59.000Z
2021-06-05T05:50:23.000Z
from .workflows import create_motion_confound_workflow
54
54
0.925926
7
54
6.714286
1
0
0
0
0
0
0
0
0
0
0
0
0.055556
54
1
54
54
0.921569
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7e1f088fd881ab871418e7bd70ba9c0b332fcae0
118
py
Python
v1/api.py
ofekron/chef360
edbec22629781063c7f15fdbd772532a43253e94
[ "Apache-2.0" ]
null
null
null
v1/api.py
ofekron/chef360
edbec22629781063c7f15fdbd772532a43253e94
[ "Apache-2.0" ]
null
null
null
v1/api.py
ofekron/chef360
edbec22629781063c7f15fdbd772532a43253e94
[ "Apache-2.0" ]
null
null
null
from utils import version blueprint,api=version("v1") from v1.restaurants import routes from v1.visitors import routes
29.5
33
0.830508
18
118
5.444444
0.555556
0.122449
0
0
0
0
0
0
0
0
0
0.028302
0.101695
118
4
34
29.5
0.896226
0
0
0
0
0
0.016807
0
0
0
0
0
0
1
0
true
0
0.75
0
0.75
0.25
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7e25c0d1df82a4928e53876db03dbfb44401f94c
104
py
Python
label_studio/ml/__init__.py
beringresearch/label-studio
ab8b9b5605ec9eab76c4f90967874898239ed94e
[ "Apache-2.0" ]
2
2021-04-06T13:38:59.000Z
2021-04-06T13:43:28.000Z
label_studio/ml/__init__.py
beringresearch/label-studio
ab8b9b5605ec9eab76c4f90967874898239ed94e
[ "Apache-2.0" ]
null
null
null
label_studio/ml/__init__.py
beringresearch/label-studio
ab8b9b5605ec9eab76c4f90967874898239ed94e
[ "Apache-2.0" ]
null
null
null
from .api import init_app from .model import LabelStudioMLBase from .helpers import LabelStudioMLChoices
34.666667
41
0.865385
13
104
6.846154
0.692308
0
0
0
0
0
0
0
0
0
0
0
0.105769
104
3
41
34.666667
0.956989
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7e491bd34e28d4a0ce2bb9707c746c392cd42fdb
79
py
Python
just_another_settings/__init__.py
andreyrusanov/temp
8f493766c1dcf99fd55dae5e1bc1079725f5b801
[ "MIT" ]
null
null
null
just_another_settings/__init__.py
andreyrusanov/temp
8f493766c1dcf99fd55dae5e1bc1079725f5b801
[ "MIT" ]
null
null
null
just_another_settings/__init__.py
andreyrusanov/temp
8f493766c1dcf99fd55dae5e1bc1079725f5b801
[ "MIT" ]
null
null
null
from .selectors import EnvSelector, ValueSelector from .fields import EnvField
26.333333
49
0.848101
9
79
7.444444
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.113924
79
2
50
39.5
0.957143
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7e58a76f66722d65e90e33fe9879808ed28a09c7
345
py
Python
src/python/WMCore/WMBS/Oracle/Locations/GetPNNtoPSNMapping.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
21
2015-11-19T16:18:45.000Z
2021-12-02T18:20:39.000Z
src/python/WMCore/WMBS/Oracle/Locations/GetPNNtoPSNMapping.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
5,671
2015-01-06T14:38:52.000Z
2022-03-31T22:11:14.000Z
src/python/WMCore/WMBS/Oracle/Locations/GetPNNtoPSNMapping.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
67
2015-01-21T15:55:38.000Z
2022-02-03T19:53:13.000Z
""" _GetPNNtoPSNMapping_ Oracle implementation of Locations.GetPNNtoPSNMapping """ from __future__ import (print_function, division) from WMCore.WMBS.MySQL.Locations.GetPNNtoPSNMapping import GetPNNtoPSNMapping as MySQLGetPNNtoPSNMapping class GetPNNtoPSNMapping(MySQLGetPNNtoPSNMapping): """ Same as MySQL version """ pass
20.294118
104
0.794203
30
345
8.9
0.666667
0.202247
0
0
0
0
0
0
0
0
0
0
0.13913
345
16
105
21.5625
0.89899
0.281159
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.25
0.5
0
0.75
0.25
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
7e631608a68a481f8c7768b50e5a6d969ea0c9be
211
py
Python
custom_user/cust_user/myapp/admin.py
SameerGurjar/Cutomized-User-Authentication
6063c0e9e6d5d3f07c17ab7b7358bdb8cb554012
[ "MIT" ]
null
null
null
custom_user/cust_user/myapp/admin.py
SameerGurjar/Cutomized-User-Authentication
6063c0e9e6d5d3f07c17ab7b7358bdb8cb554012
[ "MIT" ]
null
null
null
custom_user/cust_user/myapp/admin.py
SameerGurjar/Cutomized-User-Authentication
6063c0e9e6d5d3f07c17ab7b7358bdb8cb554012
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import City, Country, Countrylanguage # Register your models here. admin.site.register(City) admin.site.register(Country) admin.site.register(Countrylanguage)
30.142857
51
0.800948
27
211
6.259259
0.481481
0.159763
0.301775
0
0
0
0
0
0
0
0
0
0.113744
211
6
52
35.166667
0.903743
0.123223
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.4
0
0.4
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
7e818a40b038130a2cc794008a5fc68b24c32459
11,117
py
Python
code/pyto/segmentation/test/test_connected.py
anmartinezs/pyseg_system
5bb07c7901062452a34b73f376057cabc15a13c3
[ "Apache-2.0" ]
12
2020-01-08T01:33:02.000Z
2022-03-16T00:25:34.000Z
code/pyto/segmentation/test/test_connected.py
anmartinezs/pyseg_system
5bb07c7901062452a34b73f376057cabc15a13c3
[ "Apache-2.0" ]
8
2019-12-19T19:34:56.000Z
2022-03-10T10:11:28.000Z
code/pyto/segmentation/test/test_connected.py
anmartinezs/pyseg_system
5bb07c7901062452a34b73f376057cabc15a13c3
[ "Apache-2.0" ]
2
2022-03-30T13:12:22.000Z
2022-03-30T18:12:10.000Z
""" Tests module connected. # Author: Vladan Lucic # $Id$ """ from __future__ import unicode_literals from __future__ import absolute_import __version__ = "$Revision$" from copy import copy, deepcopy import importlib import unittest import numpy import numpy.testing as np_test import scipy from pyto.segmentation.grey import Grey from pyto.segmentation.segment import Segment from pyto.segmentation.connected import Connected from pyto.segmentation.test import common class TestConnected(np_test.TestCase): """ """ def setUp(self): importlib.reload(common) # to avoid problems when running multiple tests def testMake(self): """ Tests make() """ conn, contacts = \ Connected.make(image=common.image_1, boundary=common.bound_1, thresh=4, boundaryIds=[3, 4], mask=5, nBoundary=1, boundCount='ge') np_test.assert_equal(conn.ids, [1,2]) i1 = conn.data[2,2] i2 = conn.data[2,5] desired = numpy.zeros((10,10), dtype=int) desired[2:6, 1:9] = numpy.array(\ [[0, 1, 0, 0, 2, 2, 2, 0], [0, 1, 0, 0, 2, 0, 2, 0], [1, 1, 1, 0, 2, 0, 2, 2], [1, 0, 1, 0, 2, 0, 2, 0]]) self.id_correspondence(conn.data, desired) conn, contacts = \ Connected.make(image=common.image_1, boundary=common.bound_1, thresh=4, boundaryIds=[3, 4], mask=5, nBoundary=1, boundCount='exact') np_test.assert_equal(conn.ids, []) conn, contacts = \ Connected.make(image=common.image_1, boundary=common.bound_1, thresh=2, boundaryIds=[3, 4], mask=5, nBoundary=2, boundCount='eq') np_test.assert_equal(conn.ids, [1]) conn, contacts = \ Connected.make(image=common.image_1, boundary=common.bound_1, thresh=2, boundaryIds=[3, 4], mask=5, nBoundary=1, boundCount='at_most') np_test.assert_equal(conn.ids, [1,2]) # test ids and data conn, contacts = \ Connected.make(image=common.image_1, boundary=common.bound_1, thresh=2, boundaryIds=[3, 4], mask=5, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2,3]) desired = numpy.zeros((10,10), dtype=int) desired[2:6, 1:9] = numpy.array(\ [[0, 1, 0, 0, 0, 3, 3, 0], [0, 1, 0, 0, 0, 0, 3, 0], [0, 0, 0, 0, 0, 0, 3, 0], [2, 0, 0, 0, 0, 0, 3, 0]]) self.id_correspondence(conn.data, desired) # use insets conn, contacts = Connected.make( image=common.image_1in2, boundary=common.bound_1in, thresh=2, boundaryIds=[3, 4], mask=5, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2,3]) self.id_correspondence(conn.data, desired[1:7, 1:9]) # mask Segment mask = Segment(data=numpy.where(common.bound_1.data==5, 1, 0)) image_inset = copy(common.image_1.inset) bound_inset = copy(common.bound_1.inset) image_data = common.image_1.data.copy() bound_data = common.bound_1.data.copy() conn, contacts = Connected.make( image=common.image_1, boundary=common.bound_1, thresh=2., boundaryIds=[3, 4], mask=mask, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2,3]) desired = numpy.zeros((10,10), dtype=int) desired[2:6, 1:9] = numpy.array( [[0, 1, 0, 0, 0, 3, 3, 0], [0, 1, 0, 0, 0, 0, 3, 0], [0, 0, 0, 0, 0, 0, 3, 0], [2, 0, 0, 0, 0, 0, 3, 0]]) np_test.assert_equal(conn.data>0, desired>0) np_test.assert_equal(image_inset, common.image_1.inset) np_test.assert_equal(bound_inset, common.bound_1.inset) np_test.assert_equal(image_data, common.image_1.data) np_test.assert_equal(bound_data, common.bound_1.data) # boundary inset, mask Segment same inset mask = Segment(data=numpy.where(common.bound_1in.data==5, 1, 0)) mask.setInset(inset=[slice(1,7), slice(1,9)], mode='abs') conn, contacts = Connected.make( image=common.image_1, boundary=common.bound_1in, thresh=2, boundaryIds=[3, 4], mask=mask, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2,3]) np_test.assert_equal(conn.data.shape, (6,8)) np_test.assert_equal(conn.inset, [slice(1,7), slice(1,9)]) desired = numpy.zeros((6,8), dtype=int) desired[1:5, 0:8] = numpy.array( [[0, 1, 0, 0, 0, 3, 3, 0], [0, 1, 0, 0, 0, 0, 3, 0], [0, 0, 0, 0, 0, 0, 3, 0], [2, 0, 0, 0, 0, 0, 3, 0]]) np_test.assert_equal(conn.data>0, desired>0) # boundary inset, mask Segment smaller inset (inside boundaries) mask = Segment(data=numpy.where(common.bound_1in.data==5, 1, 0)) mask.setInset(inset=[slice(1,7), slice(1,9)], mode='abs') mask.useInset([slice(2,6), slice(1,9)], mode='abs') conn, contacts = Connected.make( image=common.image_1, boundary=common.bound_1in, thresh=2, boundaryIds=[3, 4], mask=mask, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2,3]) np_test.assert_equal(conn.data.shape, (6,8)) np_test.assert_equal(conn.inset, [slice(1,7), slice(1,9)]) desired = numpy.zeros((6,8), dtype=int) desired[1:5, 0:8] = numpy.array( [[0, 1, 0, 0, 0, 3, 3, 0], [0, 1, 0, 0, 0, 0, 3, 0], [0, 0, 0, 0, 0, 0, 3, 0], [2, 0, 0, 0, 0, 0, 3, 0]]) np_test.assert_equal(conn.data>0, desired>0) # boundary inset, mask Segment even smaller inset (inside boundaries) mask = Segment(data=numpy.where(common.bound_1in.data==5, 1, 0)) mask.setInset(inset=[slice(1,7), slice(1,9)], mode='abs') mask.useInset([slice(2,6), slice(2,9)], mode='abs') image_inset = copy(common.image_1.inset) bound_inset = copy(common.bound_1in.inset) image_data = common.image_1.data.copy() bound_data = common.bound_1in.data.copy() conn, contacts = Connected.make( image=common.image_1, boundary=common.bound_1in, thresh=2, boundaryIds=[3, 4], mask=mask, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2]) np_test.assert_equal(conn.data.shape, (6,8)) np_test.assert_equal(conn.inset, [slice(1,7), slice(1,9)]) desired = numpy.zeros((6,8), dtype=int) desired[1:5, 1:8] = numpy.array( [[1, 0, 0, 0, 2, 2, 0], [1, 0, 0, 0, 0, 2, 0], [0, 0, 0, 0, 0, 2, 0], [0, 0, 0, 0, 0, 2, 0]]) np_test.assert_equal(conn.data, desired) np_test.assert_equal(image_inset, common.image_1.inset) np_test.assert_equal(bound_inset, common.bound_1in.inset) np_test.assert_equal(image_data, common.image_1.data) np_test.assert_equal(bound_data, common.bound_1in.data) # image smaller than boundaries mask = Segment(data=numpy.where(common.bound_1in.data==5, 1, 0)) mask.setInset(inset=[slice(1,7), slice(1,9)], mode='abs') mask.useInset([slice(2,6), slice(1,9)], mode='abs') image_inset = copy(common.image_1in.inset) image_data = common.image_1in.data.copy() bound_inset = copy(common.bound_1in.inset) bound_data = common.bound_1in.data.copy() conn, contacts = Connected.make( image=common.image_1in, boundary=common.bound_1in, thresh=2, boundaryIds=[3, 4], mask=mask, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2,3]) np_test.assert_equal(conn.data.shape, (6,8)) np_test.assert_equal(conn.inset, [slice(1,7), slice(1,9)]) desired = numpy.zeros((6,8), dtype=int) desired[1:5, 0:8] = numpy.array( [[0, 1, 0, 0, 0, 3, 3, 0], [0, 1, 0, 0, 0, 0, 3, 0], [0, 0, 0, 0, 0, 0, 3, 0], [2, 0, 0, 0, 0, 0, 3, 0]]) np_test.assert_equal(conn.data>0, desired>0) np_test.assert_equal(image_inset, common.image_1in.inset) np_test.assert_equal(image_data, common.image_1in.data) np_test.assert_equal(bound_inset, common.bound_1in.inset) np_test.assert_equal(bound_data, common.bound_1in.data) # image smaller than boundaries and intersects with free, boundaries # intersects with free image = Grey(data=common.image_1.data.copy()) image.useInset(inset=[slice(2,6), slice(2,9)], mode='abs') image_inset = copy(image.inset) image_data = image.data.copy() common.bound_1in.useInset(inset=[slice(1, 7), slice(1, 8)], mode='abs') bound_1in_inset = copy(common.bound_1in.inset) bound_data = common.bound_1in.data.copy() mask = Segment(data=numpy.where(common.bound_1in.data==5, 1, 0)) mask.setInset(inset=[slice(1,7), slice(1,9)], mode='abs') mask.useInset([slice(2,6), slice(1,9)], mode='abs') conn, contacts = Connected.make( image=image, boundary=common.bound_1in, thresh=3, boundaryIds=[3, 4], mask=mask, nBoundary=1, boundCount='at_least') np_test.assert_equal(conn.ids, [1,2,3,4]) np_test.assert_equal(conn.data.shape, (6,7)) np_test.assert_equal(conn.inset, [slice(1,7), slice(1,8)]) desired = numpy.zeros((6,7), dtype=int) desired[1:5, 0:8] = numpy.array( [[0, 1, 0, 0, 0, 3, 3], [0, 1, 0, 0, 0, 0, 3], [0, 1, 0, 0, 0, 0, 3], [0, 0, 4, 0, 2, 0, 3]]) np_test.assert_equal(conn.data>0, desired>0) np_test.assert_equal(image_inset, image.inset) np_test.assert_equal(bound_1in_inset, common.bound_1in.inset) np_test.assert_equal(image_data, image.data) np_test.assert_equal(bound_data, common.bound_1in.data) common.bound_1in.useInset( inset=[slice(1, 7), slice(1, 9)], mode='abs', expand=True) def id_correspondence(self, actual, desired): """ Check that data (given in actual and desired) agree and return dictionary with actual_id : desired_id pairs """ # check overall agreement np_test.assert_equal(actual>0, desired>0) # checl that individual segments agree desired_ids = numpy.unique(desired[desired>0]) id_dict = {} for d_id in desired_ids: a_id = actual[desired==d_id][0] np_test.assert_equal(actual==a_id, desired==d_id) id_dict[d_id] = a_id return id_dict if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestConnected) unittest.TextTestRunner(verbosity=2).run(suite)
43.089147
80
0.580372
1,640
11,117
3.789634
0.082927
0.034433
0.034272
0.125825
0.797426
0.78214
0.756235
0.722607
0.705229
0.701529
0
0.070048
0.269317
11,117
257
81
43.256809
0.695063
0.055411
0
0.596059
0
0
0.012866
0
0
0
0
0
0.226601
1
0.014778
false
0
0.064039
0
0.08867
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
0e6be9da5e86d4e97d90f7454085e06bfd884ddb
85
py
Python
alpha_zero/alpha_zero/game/__init__.py
tsukushibito/python_alpha_zero
59412fe417175cbb6ecd1dd90b6a2d47781c6e38
[ "MIT" ]
null
null
null
alpha_zero/alpha_zero/game/__init__.py
tsukushibito/python_alpha_zero
59412fe417175cbb6ecd1dd90b6a2d47781c6e38
[ "MIT" ]
null
null
null
alpha_zero/alpha_zero/game/__init__.py
tsukushibito/python_alpha_zero
59412fe417175cbb6ecd1dd90b6a2d47781c6e38
[ "MIT" ]
null
null
null
from .game import Game from .game_state import GameState from .action import Action
21.25
33
0.811765
14
85
4.928571
0.5
0.231884
0
0
0
0
0
0
0
0
0
0
0.141176
85
3
34
28.333333
0.931507
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
1
null
null
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
0e74359d09e351c0ab4862a9077144fcabc3e3a7
71
py
Python
krsh/cmd/group_create/templates/pipeline/pipeline.py
riiid/krsh
2238daa591b19d88722892f9a9f6ada3fe83c742
[ "Apache-2.0" ]
133
2021-05-28T07:41:49.000Z
2022-02-21T23:07:31.000Z
krsh/cmd/group_create/templates/pipeline/pipeline.py
DolceLatte/krsh
2238daa591b19d88722892f9a9f6ada3fe83c742
[ "Apache-2.0" ]
null
null
null
krsh/cmd/group_create/templates/pipeline/pipeline.py
DolceLatte/krsh
2238daa591b19d88722892f9a9f6ada3fe83c742
[ "Apache-2.0" ]
7
2021-06-04T00:53:04.000Z
2022-01-10T15:26:29.000Z
import kfp @kfp.dsl.pipeline(name="{name}") def pipeline(): pass
10.142857
32
0.647887
10
71
4.6
0.7
0
0
0
0
0
0
0
0
0
0
0
0.169014
71
6
33
11.833333
0.779661
0
0
0
0
0
0.084507
0
0
0
0
0
0
1
0.25
true
0.25
0.25
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
5
7ed633503e0c4710dbfe2d854a7124206a3e73d0
163
py
Python
tests/web_platform/CSS2/normal_flow/test_block_in_inline_margins.py
fletchgraham/colosseum
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
[ "BSD-3-Clause" ]
null
null
null
tests/web_platform/CSS2/normal_flow/test_block_in_inline_margins.py
fletchgraham/colosseum
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
[ "BSD-3-Clause" ]
null
null
null
tests/web_platform/CSS2/normal_flow/test_block_in_inline_margins.py
fletchgraham/colosseum
77be4896ee52b8f5956a3d77b5f2ccd2c8608e8f
[ "BSD-3-Clause" ]
1
2020-01-16T01:56:41.000Z
2020-01-16T01:56:41.000Z
from tests.utils import W3CTestCase class TestBlockInInlineMargins(W3CTestCase): vars().update(W3CTestCase.find_tests(__file__, 'block-in-inline-margins-'))
27.166667
79
0.797546
18
163
6.944444
0.833333
0
0
0
0
0
0
0
0
0
0
0.020134
0.08589
163
5
80
32.6
0.818792
0
0
0
0
0
0.148148
0.148148
0
0
0
0
0
1
0
true
0
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7edd35228292e75670ea1de566b2dc4efd48dae0
22
py
Python
PYTHON/HelloWorld/projeto1.py
Diegosds/Projeto-Hello-world
dbd46fb87ac02e9dc0984896a8e77cd0d56a00d8
[ "Apache-2.0" ]
null
null
null
PYTHON/HelloWorld/projeto1.py
Diegosds/Projeto-Hello-world
dbd46fb87ac02e9dc0984896a8e77cd0d56a00d8
[ "Apache-2.0" ]
null
null
null
PYTHON/HelloWorld/projeto1.py
Diegosds/Projeto-Hello-world
dbd46fb87ac02e9dc0984896a8e77cd0d56a00d8
[ "Apache-2.0" ]
null
null
null
print ('olá, mundo!')
11
21
0.590909
3
22
4.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.136364
22
1
22
22
0.684211
0
0
0
0
0
0.5
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
7d2171d5a4628f9005431b5dded739fe748b1a02
172
py
Python
src/euring/config.py
zostera/euring
a508f14ea20a690341e8435653e6f5337262b63d
[ "BSD-3-Clause" ]
null
null
null
src/euring/config.py
zostera/euring
a508f14ea20a690341e8435653e6f5337262b63d
[ "BSD-3-Clause" ]
null
null
null
src/euring/config.py
zostera/euring
a508f14ea20a690341e8435653e6f5337262b63d
[ "BSD-3-Clause" ]
null
null
null
import os PROJECT_DIR = os.path.dirname(__file__) SRC_DIR = os.path.abspath(os.path.join(PROJECT_DIR, "..")) API_DIR = os.path.abspath(os.path.join(SRC_DIR, "..", "api"))
28.666667
61
0.703488
29
172
3.862069
0.37931
0.267857
0.241071
0.285714
0.464286
0.464286
0.464286
0
0
0
0
0
0.087209
172
5
62
34.4
0.713376
0
0
0
0
0
0.040698
0
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7d2636aa79801c73e1da40ffee688182a13b36ae
49
py
Python
backend/core/__init__.py
GLY0826/flask-bigger4edu
663a4dedb39e2abb12e9fe98ed8eb5d1314fe413
[ "MIT" ]
29
2018-11-13T09:03:29.000Z
2021-11-07T20:20:38.000Z
backend/core/__init__.py
GLY0826/flask-bigger4edu
663a4dedb39e2abb12e9fe98ed8eb5d1314fe413
[ "MIT" ]
null
null
null
backend/core/__init__.py
GLY0826/flask-bigger4edu
663a4dedb39e2abb12e9fe98ed8eb5d1314fe413
[ "MIT" ]
21
2018-11-14T01:11:24.000Z
2021-12-08T09:20:30.000Z
# -*- coding: utf-8 -*- '''Web后端(业务无关的操作、配置)核心'''
24.5
25
0.530612
7
49
3.714286
1
0
0
0
0
0
0
0
0
0
0
0.022727
0.102041
49
2
25
24.5
0.568182
0.857143
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
7d3a750d2fc150a951b2961b4bfdf74cf5ac57ec
285
py
Python
haipproxy/tests/test2.py
jiyeme/haipproxy
de9003da8b19b5a29e157a757a1071ff5b166ac8
[ "MIT" ]
null
null
null
haipproxy/tests/test2.py
jiyeme/haipproxy
de9003da8b19b5a29e157a757a1071ff5b166ac8
[ "MIT" ]
null
null
null
haipproxy/tests/test2.py
jiyeme/haipproxy
de9003da8b19b5a29e157a757a1071ff5b166ac8
[ "MIT" ]
null
null
null
import requests proxies = {'http': 'http://127.0.0.1:3128'} resp = requests.get('http://httpbin.org/ip', proxies=proxies, timeout=5) print(resp.text) proxies = {'https': 'http://127.0.0.1:3128'} resp = requests.get('https://httpbin.org/ip', proxies=proxies, timeout=5) print(resp.text)
40.714286
73
0.694737
46
285
4.304348
0.391304
0.070707
0.080808
0.090909
0.767677
0.767677
0.767677
0.767677
0.767677
0.474747
0
0.083333
0.073684
285
7
74
40.714286
0.666667
0
0
0.285714
0
0
0.328671
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.285714
0
0
0
null
0
0
0
0
1
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ada591352e597aeeeb4256f01c7e1d6be7daaead
21,872
py
Python
rhea/manager.py
gardleopard/rhea
36a8e908281ca9af232c5ce2e2cf64259221c3a6
[ "MIT" ]
null
null
null
rhea/manager.py
gardleopard/rhea
36a8e908281ca9af232c5ce2e2cf64259221c3a6
[ "MIT" ]
null
null
null
rhea/manager.py
gardleopard/rhea
36a8e908281ca9af232c5ce2e2cf64259221c3a6
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import json import six from collections import Mapping from distutils.util import strtobool # pylint:disable=import-error from rhea import reader from rhea.exceptions import RheaError from rhea.specs import UriSpec class Rhea(object): def __init__(self, **params): self._params = params self._requested_keys = set() self._secret_keys = set() self._local_keys = set() @classmethod def read_configs(cls, config_values): # pylint:disable=redefined-outer-name config = reader.read(config_values) # pylint:disable=redefined-outer-name return cls(**config) if config else None def params_startswith(self, term): return [k for k in self._params if k.startswith(term)] def params_endswith(self, term): return [k for k in self._params if k.endswith(term)] def get_requested_params(self, include_secrets=False, include_locals=False, to_str=False): params = {} for key in self._requested_keys: if not include_secrets and key in self._secret_keys: continue if not include_locals and key in self._local_keys: continue value = self._params[key] params[key] = '{}'.format(value) if to_str else value return params def get_int(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `int`/`list(int)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `int`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=int, type_convert=int, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=int, type_convert=int, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) def get_float(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `float`/`list(float)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `float`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=float, type_convert=float, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=float, type_convert=float, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) def get_boolean(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `bool`/`list(str)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `bool`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=bool, type_convert=lambda x: bool(strtobool(x)), is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=bool, type_convert=lambda x: bool(strtobool(x)), is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) def get_string(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `str`/`list(str)`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=str, type_convert=str, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) def get_dict(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `dict`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ def convert_to_dict(x): x = json.loads(x) if not isinstance(x, Mapping): raise RheaError("Cannot convert value `{}` (key: `{}`) to `dict`".format(x, key)) return x if is_list: return self._get_typed_list_value(key=key, target_type=Mapping, type_convert=convert_to_dict, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) value = self._get_typed_value(key=key, target_type=Mapping, type_convert=convert_to_dict, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) if not value: return default if not isinstance(value, Mapping): raise RheaError("Cannot convert value `{}` (key: `{}`) " "to `dict`".format(value, key)) return value def get_dict_of_dicts(self, key, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `dict`. Add an extra validation that all keys have a dict as values. Args: key: the dict key. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ value = self.get_dict( key=key, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options, ) if not value: return default for k in value: if not isinstance(value[k], Mapping): raise RheaError( "`{}` must be an object. " "Received a non valid configuration for key `{}`.".format(value[k], key)) return value def get_uri(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `UriSpec`. Args key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ if is_list: return self._get_typed_list_value(key=key, target_type=UriSpec, type_convert=self.parse_uri_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) return self._get_typed_value(key=key, target_type=UriSpec, type_convert=self.parse_uri_spec, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) def get_list(self, key, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts comma separated values to a list. Args: key: the dict key. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ def parse_list(v): parts = v.split(',') results = [] for part in parts: part = part.strip() if part: results.append(part) return results return self._get_typed_value(key=key, target_type=list, type_convert=parse_list, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) def _get(self, key): """ Get key from the dictionary made out of the configs passed. Args: key: the dict key. Returns: The corresponding value of the key if found. Raises: KeyError """ return self._params[key] def _add_key(self, key, is_secret=False, is_local=False): self._requested_keys.add(key) if is_secret: self._secret_keys.add(key) if is_local: self._local_keys.add(key) @staticmethod def _check_options(key, value, options): if options and value not in options: raise RheaError( 'The value `{}` provided for key `{}` ' 'is not one of the possible values.'.format(value, key)) def _get_typed_value(self, key, target_type, type_convert, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Return the value corresponding to the key converted to the given type. Args: key: the dict key. target_type: The type we expect the variable or key to be in. type_convert: A lambda expression that converts the key to the desired type. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: The corresponding value of the key converted. """ try: value = self._get(key) except KeyError: if not is_optional: raise RheaError( 'No value was provided for the non optional key `{}`.'.format(key)) return default if isinstance(value, six.string_types): try: self._add_key(key, is_secret=is_secret, is_local=is_local) self._check_options(key=key, value=value, options=options) return type_convert(value) except ValueError: raise RheaError("Cannot convert value `{}` (key: `{}`) " "to `{}`".format(value, key, target_type)) if isinstance(value, target_type): self._add_key(key, is_secret=is_secret, is_local=is_local) self._check_options(key=key, value=value, options=options) return value raise RheaError("Cannot convert value `{}` (key: `{}`) " "to `{}`".format(value, key, target_type)) def _get_typed_list_value(self, key, target_type, type_convert, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Return the value corresponding to the key converted first to list than each element to the given type. Args: key: the dict key. target_type: The type we expect the variable or key to be in. type_convert: A lambda expression that converts the key to the desired type. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. """ value = self._get_typed_value(key=key, target_type=list, type_convert=json.loads, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) if not value: return default raise_type = 'dict' if target_type == Mapping else target_type if not isinstance(value, list): raise RheaError("Cannot convert value `{}` (key: `{}`) " "to `{}`".format(value, key, raise_type)) # If we are here the value must be a list result = [] for v in value: if isinstance(v, six.string_types): try: result.append(type_convert(v)) except ValueError: raise RheaError("Cannot convert value `{}` (found in list key: `{}`) " "to `{}`".format(v, key, raise_type)) elif isinstance(v, target_type): result.append(v) else: raise RheaError("Cannot convert value `{}` (found in list key: `{}`) " "to `{}`".format(v, key, raise_type)) return result def parse_uri_spec(self, uri_spec): parts = uri_spec.split('@') if len(parts) != 2: raise RheaError( 'Received invalid uri_spec `{}`. ' 'The uri must be in the format `user:pass@host`'.format(uri_spec)) user_pass, host = parts user_pass = user_pass.split(':') if len(user_pass) != 2: raise RheaError( 'Received invalid uri_spec `{}`. `user:host` is not conform.' 'The uri must be in the format `user:pass@host`'.format(uri_spec)) return UriSpec(user=user_pass[0], password=user_pass[1], host=host)
40.279926
97
0.465527
2,273
21,872
4.306643
0.076991
0.062315
0.049035
0.020431
0.772398
0.763714
0.76116
0.734907
0.734907
0.732046
0
0.000433
0.472065
21,872
542
98
40.354244
0.84732
0.246159
0
0.684685
0
0
0.047718
0
0
0
0
0
0
1
0.063063
false
0.018018
0.024024
0.006006
0.177177
0.003003
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
bc1a91bd1ca209bd58872fed52cc89e7aee544bf
528
py
Python
modelmaker/client/__init__.py
wangjm12138/modelmaker
aa42ce9d504cc13a636b0c9f4ac49b71538c7cda
[ "MIT" ]
null
null
null
modelmaker/client/__init__.py
wangjm12138/modelmaker
aa42ce9d504cc13a636b0c9f4ac49b71538c7cda
[ "MIT" ]
null
null
null
modelmaker/client/__init__.py
wangjm12138/modelmaker
aa42ce9d504cc13a636b0c9f4ac49b71538c7cda
[ "MIT" ]
null
null
null
# coding: utf-8 """ ModelMaker SDK ModelMaker SDK # noqa: E501 OpenAPI spec version: 1.0.0 """ from __future__ import absolute_import # import apis into sdk package from modelmaker.client.api.train_job_api import TrainJobApi from modelmaker.client.api.framewrok_api import FrameworkApi from modelmaker.client.api.spec_api import SpecApi from modelmaker.client.api.model_api import ModelApi from modelmaker.client.api.service_api import ServiceApi from modelmaker.client.api.algorithm_api import AlgorithmApi
24
60
0.804924
74
528
5.581081
0.445946
0.20339
0.290557
0.33414
0
0
0
0
0
0
0
0.015284
0.132576
528
21
61
25.142857
0.886463
0.221591
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
70b47444beeb4fdcc221d3c79e0490707d297815
208
py
Python
mpsci/polyapprox/__init__.py
WarrenWeckesser/mpsci
675f0f3b76700529558a3bae2a1b2ca09552233b
[ "BSD-2-Clause" ]
7
2019-03-27T17:25:41.000Z
2022-03-31T03:55:29.000Z
mpsci/polyapprox/__init__.py
WarrenWeckesser/mpsci
675f0f3b76700529558a3bae2a1b2ca09552233b
[ "BSD-2-Clause" ]
2
2019-05-09T16:09:45.000Z
2021-01-04T03:55:09.000Z
mpsci/polyapprox/__init__.py
WarrenWeckesser/mpsci
675f0f3b76700529558a3bae2a1b2ca09552233b
[ "BSD-2-Clause" ]
null
null
null
""" ``polyapprox`` -------------- Some tools for forming polynomial or rational approximations of the inverse of a function. """ from ._inverse_approximant_tools import revert, inverse_taylor, inverse_pade
20.8
76
0.740385
25
208
5.96
0.8
0
0
0
0
0
0
0
0
0
0
0
0.125
208
9
77
23.111111
0.818681
0.581731
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
70b6fe6312f5e185a491446b875d2594564eda1c
80
py
Python
tests/pyflakes_bears/pep8_naming_test_files/E03/invalid.py
MacBox7/coala-pyflakes
637f8a2e77973384be79d30b0dae1f43072e60c8
[ "MIT" ]
null
null
null
tests/pyflakes_bears/pep8_naming_test_files/E03/invalid.py
MacBox7/coala-pyflakes
637f8a2e77973384be79d30b0dae1f43072e60c8
[ "MIT" ]
12
2018-05-21T06:12:59.000Z
2018-07-30T10:37:16.000Z
tests/pyflakes_bears/pep8_naming_test_files/E03/invalid.py
MacBox7/coala-pyflakes
637f8a2e77973384be79d30b0dae1f43072e60c8
[ "MIT" ]
1
2018-06-10T16:16:47.000Z
2018-06-10T16:16:47.000Z
def foo(): ''' >>> from mod import GoodFile as bad ''' pass
13.333333
43
0.45
9
80
4
1
0
0
0
0
0
0
0
0
0
0
0
0.4
80
5
44
16
0.75
0.4375
0
0
0
0
0
0
0
0
0
0
0
1
0.5
true
0.5
0
0
0.5
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
5
cb26322ce1283d68641aebc887090ac680eddfed
20,498
py
Python
python/pixel/banner_bash.py
virtualvivek/BannerBash
979074457da6f009ba80688bfe4390725adb0861
[ "Apache-2.0" ]
11
2020-07-31T10:59:23.000Z
2021-09-24T15:33:01.000Z
python/pixel/banner_bash.py
virtualvivek/BannerBash
979074457da6f009ba80688bfe4390725adb0861
[ "Apache-2.0" ]
4
2020-08-13T02:01:56.000Z
2021-06-05T15:36:43.000Z
python/pixel/banner_bash.py
virtualvivek/BannerBash
979074457da6f009ba80688bfe4390725adb0861
[ "Apache-2.0" ]
1
2020-08-26T15:16:00.000Z
2020-08-26T15:16:00.000Z
# encoding=utf8 def banner_bash( bannerstring ): length= len(bannerstring) i=0 v_str="" while ( i < 6 ): j=0 while (j < length): char = bannerstring[j].lower() if ( i == 0 ): if ( char == "0" ):v_str+=" ██████╗ " elif ( char == "1" ):v_str+=" ██╗" elif ( char == "2" ):v_str+=" ██████╗ " elif ( char == "3" ):v_str+=" ██████╗ " elif ( char == "4" ):v_str+=" ██╗ ██╗" elif ( char == "5" ):v_str+=" ███████╗" elif ( char == "6" ):v_str+=" ██████╗ " elif ( char == "7" ):v_str+=" ███████╗" elif ( char == "8" ):v_str+=" █████╗ " elif ( char == "9" ):v_str+=" █████╗ " elif ( char == "a" ):v_str+=" █████╗ " elif ( char == "b" ):v_str+=" ██████╗ " elif ( char == "c" ):v_str+=" █████╗ " elif ( char == "d" ):v_str+=" ██████╗ " elif ( char == "e" ):v_str+=" ███████╗" elif ( char == "f" ):v_str+=" ███████╗" elif ( char == "g" ):v_str+=" ██████╗ " elif ( char == "h" ):v_str+=" ██╗ ██╗" elif ( char == "i" ):v_str+=" ██╗" elif ( char == "j" ):v_str+=" ██╗" elif ( char == "k" ):v_str+=" ██╗ ██╗" elif ( char == "l" ):v_str+=" ██╗ " elif ( char == "m" ):v_str+=" ███╗ ███╗" elif ( char == "n" ):v_str+=" ███╗ ██╗" elif ( char == "o" ):v_str+=" █████╗ " elif ( char == "p" ):v_str+=" ██████╗ " elif ( char == "q" ):v_str+=" ██████╗ " elif ( char == "r" ):v_str+=" ██████╗ " elif ( char == "s" ):v_str+=" ██████╗" elif ( char == "t" ):v_str+=" ████████╗" elif ( char == "u" ):v_str+=" ██╗ ██╗" elif ( char == "v" ):v_str+=" ██╗ ██╗" elif ( char == "w" ):v_str+=" ██╗ ██╗" elif ( char == "x" ):v_str+=" ██╗ ██╗" elif ( char == "y" ):v_str+=" ██╗ ██╗" elif ( char == "z" ):v_str+=" ███████╗" elif ( char == "+" ):v_str+=" " elif ( char == "-" ):v_str+=" " elif ( char == "*" ):v_str+=" " elif ( char == "/" ):v_str+=" ██╗" elif ( char == "=" ):v_str+=" " elif ( char == "." ):v_str+=" " elif ( char == "<" ):v_str+=" ██╗" elif ( char == ">" ):v_str+=" ██╗ " elif ( char == "%" ):v_str+=" ██╗ ██╗" elif ( char == "^" ):v_str+=" ██ " elif ( char == "!" ):v_str+=" ██╗" elif ( char == "?" ):v_str+=" █████╗ " elif ( char == ":" ):v_str+=" ██╗" elif ( char == "" ):v_str+=" ██╗" elif ( char == "$" ):v_str+=" ███████╗" elif ( char == "@" ):v_str+=" █████╗ " elif ( char == "," ):v_str+=" " elif ( char == "&" ):v_str+=" ╔██████╗ " elif ( char == "'" ):v_str+=" ██╗" elif ( char == "[" ):v_str+=" ████╗" elif ( char == "]" ):v_str+=" ████╗" elif ( char == "#" ):v_str+=" ██╗ ██╗ " elif ( char == " " ):v_str+=" " if ( i == 1 and j == 0 ):v_str+="\n" if ( i == 1 ): if ( char == "0" ):v_str+=" ██╔═████╗" elif ( char == "1" ):v_str+=" ███║" elif ( char == "2" ):v_str+=" ╚════██╗" elif ( char == "3" ):v_str+=" ╚════██╗" elif ( char == "4" ):v_str+=" ██║ ██║" elif ( char == "5" ):v_str+=" ██╔════╝" elif ( char == "6" ):v_str+=" ██╔════╝ " elif ( char == "7" ):v_str+=" ╚════██║" elif ( char == "8" ):v_str+=" ██╔══██╗" elif ( char == "9" ):v_str+=" ██╔══██╗" elif ( char == "a" ):v_str+=" ██╔══██╗" elif ( char == "b" ):v_str+=" ██╔══██╗" elif ( char == "c" ):v_str+=" ██╔══██╗" elif ( char == "d" ):v_str+=" ██╔══██╗" elif ( char == "e" ):v_str+=" ██╔════╝" elif ( char == "f" ):v_str+=" ██╔════╝" elif ( char == "g" ):v_str+=" ██╔════╝ " elif ( char == "h" ):v_str+=" ██║ ██║" elif ( char == "i" ):v_str+=" ██║" elif ( char == "j" ):v_str+=" ██║" elif ( char == "k" ):v_str+=" ██║ ██╔╝" elif ( char == "l" ):v_str+=" ██║ " elif ( char == "m" ):v_str+=" ████╗ ████║" elif ( char == "n" ):v_str+=" ████╗ ██║" elif ( char == "o" ):v_str+=" ██╔══██╗" elif ( char == "p" ):v_str+=" ██╔══██╗" elif ( char == "q" ):v_str+=" ██╔═══██╗" elif ( char == "r" ):v_str+=" ██╔══██╗" elif ( char == "s" ):v_str+=" ██╔════╝" elif ( char == "t" ):v_str+=" ╚══██╔══╝" elif ( char == "u" ):v_str+=" ██║ ██║" elif ( char == "v" ):v_str+=" ██║ ██║" elif ( char == "w" ):v_str+=" ██║ ██╗ ██║" elif ( char == "x" ):v_str+=" ╚██╗██╔╝" elif ( char == "y" ):v_str+=" ╚██╗ ██╔╝" elif ( char == "z" ):v_str+=" ╚════██║" elif ( char == "+" ):v_str+=" ██╗ " elif ( char == "-" ):v_str+=" " elif ( char == "*" ):v_str+=" ██ ██" elif ( char == "/" ):v_str+=" ██╔╝" elif ( char == "=" ):v_str+=" ██████╗" elif ( char == "." ):v_str+=" " elif ( char == "<" ):v_str+=" ██╔╝" elif ( char == ">" ):v_str+=" ╚██╗ " elif ( char == "%" ):v_str+=" ╚═╝██╔╝" elif ( char == "^" ):v_str+=" ██ ██ " elif ( char == "!" ):v_str+=" ██║" elif ( char == "?" ):v_str+=" ██╔══██╗" elif ( char == ":" ):v_str+=" ╚═╝" elif ( char == "" ):v_str+=" ╚═╝" elif ( char == "$" ):v_str+=" ██╔██╔══╝" elif ( char == "@" ):v_str+=" ██╔══█═██" elif ( char == "," ):v_str+=" " elif ( char == "&" ):v_str+=" █════██║ " elif ( char == "'" ):v_str+=" ╚█║" elif ( char == "[" ):v_str+=" ██╔═╝" elif ( char == "]" ):v_str+=" ╚═██║" elif ( char == "#" ):v_str+=" ██████████╗" elif ( char == " " ):v_str+=" " if ( i == 2 and j == 0 ):v_str+="\n" if ( i == 2 ): if ( char == "0" ):v_str+=" ██║██╔██║" elif ( char == "1" ):v_str+=" ╚██║" elif ( char == "2" ):v_str+=" █████╔╝" elif ( char == "3" ):v_str+=" █████╔╝" elif ( char == "4" ):v_str+=" ███████║" elif ( char == "5" ):v_str+=" ███████╗" elif ( char == "6" ):v_str+=" ███████╗ " elif ( char == "7" ):v_str+=" ██╔╝" elif ( char == "8" ):v_str+=" ╚█████╔╝" elif ( char == "9" ):v_str+=" ╚██████║" elif ( char == "a" ):v_str+=" ███████║" elif ( char == "b" ):v_str+=" ██████╦╝" elif ( char == "c" ):v_str+=" ██║ ╚═╝" elif ( char == "d" ):v_str+=" ██║ ██║" elif ( char == "e" ):v_str+=" █████╗ " elif ( char == "f" ):v_str+=" █████╗ " elif ( char == "g" ):v_str+=" ██║ ██╗ " elif ( char == "h" ):v_str+=" ███████║" elif ( char == "i" ):v_str+=" ██║" elif ( char == "j" ):v_str+=" ██║" elif ( char == "k" ):v_str+=" █████═╝ " elif ( char == "l" ):v_str+=" ██║ " elif ( char == "m" ):v_str+=" ██╔████╔██║" elif ( char == "n" ):v_str+=" ██╔██╗██║" elif ( char == "o" ):v_str+=" ██║ ██║" elif ( char == "p" ):v_str+=" ██████╔╝" elif ( char == "q" ):v_str+=" ██║██╗██║" elif ( char == "r" ):v_str+=" ██████╔╝" elif ( char == "s" ):v_str+=" ╚█████╗ " elif ( char == "t" ):v_str+=" ██║ " elif ( char == "u" ):v_str+=" ██║ ██║" elif ( char == "v" ):v_str+=" ╚██╗ ██╔╝" elif ( char == "w" ):v_str+=" ╚██╗████╗██╔╝" elif ( char == "x" ):v_str+=" ╚███╔╝ " elif ( char == "y" ):v_str+=" ╚████╔╝ " elif ( char == "z" ):v_str+=" ███╔═╝" elif ( char == "+" ):v_str+=" ██████╗" elif ( char == "-" ):v_str+=" █████╗" elif ( char == "*" ):v_str+=" ████ " elif ( char == "/" ):v_str+=" ██╔╝ " elif ( char == "=" ):v_str+=" ╚═════╝" elif ( char == "." ):v_str+=" " elif ( char == "<" ):v_str+=" ██╔╝ " elif ( char == ">" ):v_str+=" ╚██╗" elif ( char == "%" ):v_str+=" ██╔╝ " elif ( char == "^" ):v_str+=" ██ ██" elif ( char == "!" ):v_str+=" ██║" elif ( char == "?" ):v_str+=" ╚═╝███╔╝" elif ( char == ":" ):v_str+=" " elif ( char == "" ):v_str+=" " elif ( char == "$" ):v_str+=" ╚██████╗ " elif ( char == "@" ):v_str+=" ██║ ████" elif ( char == "," ):v_str+=" " elif ( char == "&" ):v_str+=" ███ ╚╝ " elif ( char == "'" ):v_str+=" ╚╝" elif ( char == "[" ):v_str+=" ██║ " elif ( char == "]" ):v_str+=" ██║" elif ( char == "#" ):v_str+=" ╚═██╔═██╔═╝" elif ( char == " " ):v_str+=" " if ( i == 3 and j == 0 ):v_str+="\n" if ( i == 3 ): if ( char == "0" ):v_str+=" ████╔╝██║" elif ( char == "1" ):v_str+=" ██║" elif ( char == "2" ):v_str+=" ██╔═══╝ " elif ( char == "3" ):v_str+=" ╚═══██╗" elif ( char == "4" ):v_str+=" ╚════██║" elif ( char == "5" ):v_str+=" ╚════██║" elif ( char == "6" ):v_str+=" ██╔═══██╗" elif ( char == "7" ):v_str+=" ██╔╝ " elif ( char == "8" ):v_str+=" ██╔══██╗" elif ( char == "9" ):v_str+=" ╚═══██║" elif ( char == "a" ):v_str+=" ██╔══██║" elif ( char == "b" ):v_str+=" ██╔══██╗" elif ( char == "c" ):v_str+=" ██║ ██╗" elif ( char == "d" ):v_str+=" ██║ ██║" elif ( char == "e" ):v_str+=" ██╔══╝ " elif ( char == "f" ):v_str+=" ██╔══╝ " elif ( char == "g" ):v_str+=" ██║ ╚██╗" elif ( char == "h" ):v_str+=" ██╔══██║" elif ( char == "i" ):v_str+=" ██║" elif ( char == "j" ):v_str+=" ██╗ ██║" elif ( char == "k" ):v_str+=" ██╔═██╗ " elif ( char == "l" ):v_str+=" ██║ " elif ( char == "m" ):v_str+=" ██║╚██╔╝██║" elif ( char == "n" ):v_str+=" ██║╚████║" elif ( char == "o" ):v_str+=" ██║ ██║" elif ( char == "p" ):v_str+=" ██╔═══╝ " elif ( char == "q" ):v_str+=" ╚██████╔╝" elif ( char == "r" ):v_str+=" ██╔══██╗" elif ( char == "s" ):v_str+=" ╚═══██╗" elif ( char == "t" ):v_str+=" ██║ " elif ( char == "u" ):v_str+=" ██║ ██║" elif ( char == "v" ):v_str+=" ╚████╔╝ " elif ( char == "w" ):v_str+=" ████╔═████║ " elif ( char == "x" ):v_str+=" ██╔██╗ " elif ( char == "y" ):v_str+=" ╚██╔╝ " elif ( char == "z" ):v_str+=" ██╔══╝ " elif ( char == "+" ):v_str+=" ╚═██╔═╝" elif ( char == "-" ):v_str+=" ╚════╝" elif ( char == "*" ):v_str+=" ████ " elif ( char == "/" ):v_str+=" ██╔╝ " elif ( char == "=" ):v_str+=" ██████╗" elif ( char == "." ):v_str+=" " elif ( char == "<" ):v_str+=" ╚██╗ " elif ( char == ">" ):v_str+=" ██╔╝" elif ( char == "%" ):v_str+=" ██╔╝ " elif ( char == "^" ):v_str+=" " elif ( char == "!" ):v_str+=" ╚═╝" elif ( char == "?" ):v_str+=" ╚══╝ " elif ( char == ":" ):v_str+=" " elif ( char == "" ):v_str+=" ██╗" elif ( char == "$" ):v_str+=" ╚═██╔██╗" elif ( char == "@" ):v_str+=" ██╚════╝ " elif ( char == "," ):v_str+=" ██╗" elif ( char == "&" ):v_str+=" ██╔══██ " elif ( char == "'" ):v_str+=" " elif ( char == "[" ):v_str+=" ██║ " elif ( char == "]" ):v_str+=" ██║" elif ( char == "#" ):v_str+=" ██████████╗" elif ( char == " " ):v_str+=" " if ( i == 4 and j == 0 ):v_str+="\n" if ( i == 4 ): if ( char == "0" ):v_str+=" ╚██████╔╝" elif ( char == "1" ):v_str+=" ██║" elif ( char == "2" ):v_str+=" ███████╗" elif ( char == "3" ):v_str+=" ██████╔╝" elif ( char == "4" ):v_str+=" ██║" elif ( char == "5" ):v_str+=" ███████║" elif ( char == "6" ):v_str+=" ╚██████╔╝" elif ( char == "7" ):v_str+=" ██║ " elif ( char == "8" ):v_str+=" ╚█████╔╝" elif ( char == "9" ):v_str+=" █████╔╝" elif ( char == "a" ):v_str+=" ██║ ██║" elif ( char == "b" ):v_str+=" ██████╦╝" elif ( char == "c" ):v_str+=" ╚█████╔╝" elif ( char == "d" ):v_str+=" ██████╔╝" elif ( char == "e" ):v_str+=" ███████╗" elif ( char == "f" ):v_str+=" ██║ " elif ( char == "g" ):v_str+=" ╚██████╔╝" elif ( char == "h" ):v_str+=" ██║ ██║" elif ( char == "i" ):v_str+=" ██║" elif ( char == "j" ):v_str+=" ╚█████╔╝" elif ( char == "k" ):v_str+=" ██║ ╚██╗" elif ( char == "l" ):v_str+=" ███████╗" elif ( char == "m" ):v_str+=" ██║ ╚═╝ ██║" elif ( char == "n" ):v_str+=" ██║ ╚███║" elif ( char == "o" ):v_str+=" ╚█████╔╝" elif ( char == "p" ):v_str+=" ██║ " elif ( char == "q" ):v_str+=" ╚═██╔═╝ " elif ( char == "r" ):v_str+=" ██║ ██║" elif ( char == "s" ):v_str+=" ██████╔╝" elif ( char == "t" ):v_str+=" ██║ " elif ( char == "u" ):v_str+=" ╚██████╔╝" elif ( char == "v" ):v_str+=" ╚██╔╝ " elif ( char == "w" ):v_str+=" ╚██╔╝ ╚██╔╝ " elif ( char == "x" ):v_str+=" ██╔╝╚██╗" elif ( char == "y" ):v_str+=" ██║ " elif ( char == "z" ):v_str+=" ███████╗" elif ( char == "+" ):v_str+=" ╚═╝ " elif ( char == "-" ):v_str+=" " elif ( char == "*" ):v_str+=" ██ ██" elif ( char == "/" ):v_str+=" ██╔╝ " elif ( char == "=" ):v_str+=" ╚═════╝" elif ( char == "." ):v_str+=" ██╗" elif ( char == "<" ):v_str+=" ╚██╗" elif ( char == ">" ):v_str+=" ██╔╝ " elif ( char == "%" ):v_str+=" ██╔╝██╗" elif ( char == "^" ):v_str+=" " elif ( char == "!" ):v_str+=" ██╗" elif ( char == "?" ):v_str+=" ██╗ " elif ( char == ":" ):v_str+=" ██╗" elif ( char == "" ):v_str+=" ╚█║" elif ( char == "$" ):v_str+=" ███████╔╝" elif ( char == "@" ):v_str+=" ╚████████" elif ( char == "," ):v_str+=" ╚█║" elif ( char == "&" ):v_str+=" █████████╗" elif ( char == "'" ):v_str+=" " elif ( char == "[" ):v_str+=" ████╗" elif ( char == "]" ):v_str+=" ████║" elif ( char == "#" ):v_str+=" ╚██╔═██╔══╝" elif ( char == " " ):v_str+=" " if ( i == 5 and j == 0 ):v_str+="\n" if ( i == 5 ): if ( char == "0" ):v_str+=" ╚═════╝ " elif ( char == "1" ):v_str+=" ╚═╝" elif ( char == "2" ):v_str+=" ╚══════╝" elif ( char == "3" ):v_str+=" ╚═════╝ " elif ( char == "4" ):v_str+=" ╚═╝" elif ( char == "5" ):v_str+=" ╚══════╝" elif ( char == "6" ):v_str+=" ╚═════╝ " elif ( char == "7" ):v_str+=" ╚═╝ " elif ( char == "8" ):v_str+=" ╚════╝ " elif ( char == "9" ):v_str+=" ╚════╝ " elif ( char == "a" ):v_str+=" ╚═╝ ╚═╝" elif ( char == "b" ):v_str+=" ╚═════╝ " elif ( char == "c" ):v_str+=" ╚════╝ " elif ( char == "d" ):v_str+=" ╚═════╝ " elif ( char == "e" ):v_str+=" ╚══════╝" elif ( char == "f" ):v_str+=" ╚═╝ " elif ( char == "g" ):v_str+=" ╚═════╝ " elif ( char == "h" ):v_str+=" ╚═╝ ╚═╝" elif ( char == "i" ):v_str+=" ╚═╝" elif ( char == "j" ):v_str+=" ╚════╝ " elif ( char == "k" ):v_str+=" ╚═╝ ╚═╝" elif ( char == "l" ):v_str+=" ╚══════╝" elif ( char == "m" ):v_str+=" ╚═╝ ╚═╝" elif ( char == "n" ):v_str+=" ╚═╝ ╚══╝" elif ( char == "o" ):v_str+=" ╚════╝ " elif ( char == "p" ):v_str+=" ╚═╝ " elif ( char == "q" ):v_str+=" ╚═╝ " elif ( char == "r" ):v_str+=" ╚═╝ ╚═╝" elif ( char == "s" ):v_str+=" ╚═════╝ " elif ( char == "t" ):v_str+=" ╚═╝ " elif ( char == "u" ):v_str+=" ╚═════╝ " elif ( char == "v" ):v_str+=" ╚═╝ " elif ( char == "w" ):v_str+=" ╚═╝ ╚═╝ " elif ( char == "x" ):v_str+=" ╚═╝ ╚═╝" elif ( char == "y" ):v_str+=" ╚═╝ " elif ( char == "z" ):v_str+=" ╚══════╝" elif ( char == "+" ):v_str+=" " elif ( char == "-" ):v_str+=" " elif ( char == "*" ):v_str+=" " elif ( char == "/" ):v_str+=" ╚═╝ " elif ( char == "=" ):v_str+=" " elif ( char == "." ):v_str+=" ╚═╝" elif ( char == "<" ):v_str+=" ╚═╝" elif ( char == ">" ):v_str+=" ╚═╝ " elif ( char == "%" ):v_str+=" ╚═╝ ╚═╝" elif ( char == "^" ):v_str+=" " elif ( char == "!" ):v_str+=" ╚═╝" elif ( char == "?" ):v_str+=" ╚═╝ " elif ( char == ":" ):v_str+=" ╚═╝" elif ( char == "" ):v_str+=" ╚╝" elif ( char == "$" ):v_str+=" ╚══════╝ " elif ( char == "@" ):v_str+=" ╚══════╝" elif ( char == "," ):v_str+=" ╚╝" elif ( char == "&" ):v_str+=" ╚════════╝" elif ( char == "'" ):v_str+=" " elif ( char == "[" ):v_str+=" ╚═══╝" elif ( char == "]" ):v_str+=" ╚═══╝" elif ( char == "#" ):v_str+=" ╚═╝ ╚═╝ " elif ( char == " " ):v_str+=" " j+=1 i+=1 return v_str #============================================================== print(banner_bash("Hi Earth"))
48.921241
63
0.246463
2,112
20,498
3.155303
0.065814
0.216687
0.194478
0.248499
0.90021
0.555222
0.471489
0.465636
0.441477
0.381903
0
0.007297
0.4518
20,498
418
64
49.038278
0.409985
0.003659
0
0.310345
0
0
0.163369
0
0
0
0
0
0
1
0.002653
false
0
0
0
0.005305
0.002653
0
0
0
null
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
cb3aff899bb4dde2a93d3708bffb192fd7c2df8e
3,542
py
Python
stack.py
edwintcloud/cs1.3_exercises
cd9cf117995a4bfc1ff8cfefac157a05179ee44f
[ "MIT" ]
null
null
null
stack.py
edwintcloud/cs1.3_exercises
cd9cf117995a4bfc1ff8cfefac157a05179ee44f
[ "MIT" ]
3
2019-04-20T08:50:05.000Z
2019-05-18T17:08:16.000Z
stack.py
edwintcloud/cs1.3_exercises
cd9cf117995a4bfc1ff8cfefac157a05179ee44f
[ "MIT" ]
null
null
null
#!python from linkedlist import LinkedList class LinkedStack(object): def __init__(self, iterable=None): """Initialize this stack and push the given items, if any.""" # Initialize a new linked list to store the items self.list = LinkedList() if iterable is not None: for item in iterable: self.push(item) def __repr__(self): """Return a string representation of this stack.""" return 'Stack({} items, top={})'.format(self.length(), self.peek()) def is_empty(self): """Return True if this stack is empty, or False otherwise.""" return self.list.is_empty() def length(self): """Return the number of items in this stack.""" return self.list.length() def push(self, item): """Insert the given item on the top of this stack. Best Case: O(1) Worse Case: O(1)""" self.list.append(item) def peek(self): """Return the item on the top of this stack without removing it, or None if this stack is empty.""" # return None if stack is empty if self.list.is_empty(): return None # return last item of linked_list return self.list.get_at_index(self.list.length()-1) def pop(self): """Remove and return the item on the top of this stack, or raise ValueError if this stack is empty. Best Case: O(n) Worse Case: O(n)""" # raise value error if stack is empty if self.list.is_empty(): raise ValueError("stack is empty") # get last item in stack last_item = self.peek() # delete item from stack self.list.delete(last_item) # return item return last_item class ArrayStack(object): def __init__(self, iterable=None): """Initialize this stack and push the given items, if any.""" # Initialize a new list (dynamic array) to store the items self.list = list() if iterable is not None: for item in iterable: self.push(item) def __repr__(self): """Return a string representation of this stack.""" return 'Stack({} items, top={})'.format(self.length(), self.peek()) def is_empty(self): """Return True if this stack is empty, or False otherwise.""" return self.length() <= 0 def length(self): """Return the number of items in this stack.""" return len(self.list) def push(self, item): """Insert the given item on the top of this stack. Best Case: O(1) Worse Case: O(1)""" self.list.append(item) def peek(self): """Return the item on the top of this stack without removing it, or None if this stack is empty.""" # return None if stack is empty if self.is_empty(): return None # return last item of list return self.list[-1] def pop(self): """Remove and return the item on the top of this stack, or raise ValueError if this stack is empty. Best Case: O(1) Worse Case: O(1)""" # raise value error if stack is empty if self.is_empty(): raise ValueError("stack is empty") # return last item from list and remove item return self.list.pop() # Implement LinkedStack and ArrayStack above, then change the assignment below # to use each of your Stack implementations to verify they each pass all tests Stack = LinkedStack # Stack = ArrayStack
30.016949
78
0.602484
497
3,542
4.235412
0.183099
0.063183
0.068409
0.037055
0.743943
0.743943
0.72209
0.696437
0.659382
0.615677
0
0.003639
0.301807
3,542
117
79
30.273504
0.847554
0.447487
0
0.666667
0
0
0.040952
0
0
0
0
0
0
1
0.291667
false
0
0.020833
0
0.604167
0
0
0
0
null
0
0
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
cb5584c796b846554f39c2b99d8b7a298ef9b123
54
py
Python
tonic/models/vic/__init__.py
jhamman/VICpy
67cc1a1efa481a65e304917bc8af36c2a30af055
[ "MIT" ]
18
2015-07-16T15:39:10.000Z
2021-10-12T15:22:08.000Z
tonic/models/vic/__init__.py
jhamman/VICpy
67cc1a1efa481a65e304917bc8af36c2a30af055
[ "MIT" ]
46
2015-07-16T18:00:45.000Z
2021-01-13T19:08:12.000Z
tonic/models/vic/__init__.py
jhamman/VICpy
67cc1a1efa481a65e304917bc8af36c2a30af055
[ "MIT" ]
24
2015-07-16T00:00:59.000Z
2020-08-19T05:02:50.000Z
from .vic import VIC, VICRuntimeError, read_vic_ascii
27
53
0.833333
8
54
5.375
0.75
0
0
0
0
0
0
0
0
0
0
0
0.111111
54
1
54
54
0.895833
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cb6450a428eb76a170c898678e7d1a5e50e6837b
222
py
Python
play-travis/test.py
Otus-DevOps-2021-11/lawn58_infra
9dc63e588df1b5588bfa9615caaddb852868f29c
[ "MIT" ]
null
null
null
play-travis/test.py
Otus-DevOps-2021-11/lawn58_infra
9dc63e588df1b5588bfa9615caaddb852868f29c
[ "MIT" ]
3
2021-12-21T17:08:08.000Z
2022-01-17T23:27:43.000Z
play-travis/test.py
Otus-DevOps-2021-11/lawn58_infra
9dc63e588df1b5588bfa9615caaddb852868f29c
[ "MIT" ]
null
null
null
import unittest class NumbersTest(unittest.TestCase): def test_equal(self): play-travis self.assertEqual(1 + 1, 2) ======= self.assertEqual(1 , 1) main if __name__ == '__main__': unittest.main()
15.857143
37
0.63964
27
222
4.925926
0.62963
0.225564
0.240602
0.255639
0
0
0
0
0
0
0
0.028736
0.216216
222
13
38
17.076923
0.735632
0
0
0
0
0
0.036036
0
0
0
0
0
0.2
0
null
null
0
0.1
null
null
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
cba6f86847bd81267a1f10c778f6f0584b7bacd0
62
py
Python
tests/unit/killself.py
tholom/pake
6777d63255eb3e4e834b77c9a1504b72dd2ed296
[ "BSD-3-Clause" ]
3
2019-08-28T21:54:30.000Z
2021-10-13T22:00:59.000Z
tests/unit/killself.py
tholom/pake
6777d63255eb3e4e834b77c9a1504b72dd2ed296
[ "BSD-3-Clause" ]
1
2021-01-05T01:37:57.000Z
2021-01-05T14:10:17.000Z
tests/unit/killself.py
tholom/pake
6777d63255eb3e4e834b77c9a1504b72dd2ed296
[ "BSD-3-Clause" ]
1
2021-01-16T18:44:36.000Z
2021-01-16T18:44:36.000Z
import os import signal os.kill(os.getpid(), signal.SIGKILL)
12.4
36
0.758065
10
62
4.7
0.6
0
0
0
0
0
0
0
0
0
0
0
0.112903
62
4
37
15.5
0.854545
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cbd902f96ea4734b268d1e64666eef50c66e85bb
157
py
Python
simplifiedmc/__init__.py
jpmvferreira/simplifiedmc
88f6f40463fec07c47f94c7c7ec08dcea5c6ddd6
[ "MIT" ]
null
null
null
simplifiedmc/__init__.py
jpmvferreira/simplifiedmc
88f6f40463fec07c47f94c7c7ec08dcea5c6ddd6
[ "MIT" ]
null
null
null
simplifiedmc/__init__.py
jpmvferreira/simplifiedmc
88f6f40463fec07c47f94c7c7ec08dcea5c6ddd6
[ "MIT" ]
null
null
null
# ez-emcee specific functions from .emcee import load, save, autocorrelation, timeseries, runlog # shared functions from .shared import corner, syslog, CIs
26.166667
66
0.789809
20
157
6.2
0.75
0.209677
0
0
0
0
0
0
0
0
0
0
0.140127
157
5
67
31.4
0.918519
0.280255
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
1dd8ead5d60aa6c6a9520eacfc744a9979fe0346
58
py
Python
start.py
heEXDe/password_generator
c546c09be927abc2a02971cab5f2d19817208cda
[ "MIT" ]
null
null
null
start.py
heEXDe/password_generator
c546c09be927abc2a02971cab5f2d19817208cda
[ "MIT" ]
null
null
null
start.py
heEXDe/password_generator
c546c09be927abc2a02971cab5f2d19817208cda
[ "MIT" ]
null
null
null
# start here import GUI import functions GUI.gui_start()
9.666667
16
0.775862
9
58
4.888889
0.555556
0
0
0
0
0
0
0
0
0
0
0
0.155172
58
5
17
11.6
0.897959
0.172414
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
1de597219c0e30f86fde4e0cf42be90cb3fbcbb3
215
py
Python
implementations/samsung_tv.py
heseba/wotdl2api
ff4f76d66b45c74d318a6e0701e0decd40623e76
[ "MIT" ]
1
2020-12-11T07:44:07.000Z
2020-12-11T07:44:07.000Z
implementations/samsung_tv.py
heseba/wotdl2api
ff4f76d66b45c74d318a6e0701e0decd40623e76
[ "MIT" ]
1
2021-04-24T19:20:50.000Z
2021-04-26T07:40:35.000Z
implementations/samsung_tv.py
heseba/wotdl2api
ff4f76d66b45c74d318a6e0701e0decd40623e76
[ "MIT" ]
null
null
null
from flask import Response print('samsungTV imported') def switch_on_tv(path_param, power): return Response(path_param + str(power), status=200) def switch_off_tv(): return Response('Running', status=200)
23.888889
56
0.753488
31
215
5.032258
0.645161
0.115385
0
0
0
0
0
0
0
0
0
0.032258
0.134884
215
9
57
23.888889
0.806452
0
0
0
0
0
0.115741
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
5
382beeda3863de2cee1f95073193c5de2b738d69
85
py
Python
qtt/algorithms/__init__.py
dpfranke/qtt
f60e812fe8b329e67f7b38d02eef552daf08d7c9
[ "MIT" ]
null
null
null
qtt/algorithms/__init__.py
dpfranke/qtt
f60e812fe8b329e67f7b38d02eef552daf08d7c9
[ "MIT" ]
null
null
null
qtt/algorithms/__init__.py
dpfranke/qtt
f60e812fe8b329e67f7b38d02eef552daf08d7c9
[ "MIT" ]
null
null
null
""" Methods for analysis of quantom dots and spin-qubits """ from . import functions
28.333333
60
0.741176
12
85
5.25
1
0
0
0
0
0
0
0
0
0
0
0
0.164706
85
3
61
28.333333
0.887324
0.611765
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
697f60fb438d02255f17afe91a38534d7dd923f3
114
py
Python
lightyear/metrics/__init__.py
alvations/lightyear
56a327ab11547fa13770109ef4ec481a9b341b15
[ "MIT" ]
null
null
null
lightyear/metrics/__init__.py
alvations/lightyear
56a327ab11547fa13770109ef4ec481a9b341b15
[ "MIT" ]
1
2022-01-10T07:03:01.000Z
2022-01-10T07:03:01.000Z
lightyear/metrics/__init__.py
alvations/lightyear
56a327ab11547fa13770109ef4ec481a9b341b15
[ "MIT" ]
null
null
null
from .bert_score import BERTScore from .bleu import BLEUScore, CHRFScore, TERScore from .comet import COMETScore
22.8
48
0.824561
15
114
6.2
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.131579
114
4
49
28.5
0.939394
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
699e9b7060333f035e58a72573e727c5720fbdae
63
py
Python
faces/__init__.py
blandfort/mirror
70ae41fd151275d42506d07117aa2ea3ce59ad23
[ "MIT" ]
null
null
null
faces/__init__.py
blandfort/mirror
70ae41fd151275d42506d07117aa2ea3ce59ad23
[ "MIT" ]
6
2020-11-06T22:40:05.000Z
2022-03-12T00:51:06.000Z
faces/__init__.py
blandfort/mirror
70ae41fd151275d42506d07117aa2ea3ce59ad23
[ "MIT" ]
null
null
null
from .shards import FaceShard from .lenses import FaceswapLens
21
32
0.84127
8
63
6.625
0.75
0
0
0
0
0
0
0
0
0
0
0
0.126984
63
2
33
31.5
0.963636
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
69c9d40ba0b9337cd6b3826113e7032642c27ec0
35
py
Python
examples/class_attribute.py
doboy/Underscore
d98273db3144cda79191d2c90f45d81b6d700b1f
[ "MIT" ]
7
2016-09-23T00:44:05.000Z
2021-10-04T21:19:12.000Z
examples/class_attribute.py
jameswu1991/Underscore
d98273db3144cda79191d2c90f45d81b6d700b1f
[ "MIT" ]
1
2016-09-23T00:45:05.000Z
2019-02-16T19:05:37.000Z
examples/class_attribute.py
jameswu1991/Underscore
d98273db3144cda79191d2c90f45d81b6d700b1f
[ "MIT" ]
3
2016-09-23T01:13:15.000Z
2018-07-20T21:22:17.000Z
class Bar: x = 1 print(Bar.x)
7
12
0.542857
7
35
2.714286
0.714286
0.421053
0
0
0
0
0
0
0
0
0
0.041667
0.314286
35
4
13
8.75
0.75
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0.666667
0.333333
1
1
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
5
69e8dfe03b7e7df68cf13d5e9720b8f986f2bb46
562
py
Python
src/py_dss_interface/models/Lines/Lines.py
davilamds/py_dss_interface
a447c97787aeac962381db88dd622ccb235eef4b
[ "MIT" ]
null
null
null
src/py_dss_interface/models/Lines/Lines.py
davilamds/py_dss_interface
a447c97787aeac962381db88dd622ccb235eef4b
[ "MIT" ]
null
null
null
src/py_dss_interface/models/Lines/Lines.py
davilamds/py_dss_interface
a447c97787aeac962381db88dd622ccb235eef4b
[ "MIT" ]
null
null
null
# -*- encoding: utf-8 -*- """ Created by eniocc at 11/10/2020 """ from py_dss_interface.models.Lines.LinesV import LinesV from py_dss_interface.models.Lines.LinesS import LinesS from py_dss_interface.models.Lines.LinesI import LinesI from py_dss_interface.models.Lines.LinesF import LinesF class Lines(LinesV, LinesS, LinesI, LinesF): """ This interface implements the Lines (ILines) interface of OpenDSS by declaring 4 procedures for accessing the different properties included in this interface: LinesV, LinesS, LinesI, LinesF. """ pass
33.058824
113
0.763345
79
562
5.329114
0.481013
0.057007
0.085511
0.171021
0.275534
0.275534
0
0
0
0
0
0.020964
0.151246
562
16
114
35.125
0.861635
0.439502
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.166667
0.666667
0
0.833333
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
69feee6209f49eb851b33bf51978c3252390303c
44
py
Python
minato_namikaze/lib/database/__init__.py
EitoZX/yondaime-hokage
c86285b385a60e3e47b9a7205ae36e7249b47eee
[ "Apache-2.0" ]
8
2021-05-20T07:32:20.000Z
2022-02-09T17:09:38.000Z
minato_namikaze/lib/database/__init__.py
EitoZX/yondaime-hokage
c86285b385a60e3e47b9a7205ae36e7249b47eee
[ "Apache-2.0" ]
77
2021-06-18T08:55:12.000Z
2022-03-31T07:15:12.000Z
minato_namikaze/lib/database/__init__.py
EitoZX/yondaime-hokage
c86285b385a60e3e47b9a7205ae36e7249b47eee
[ "Apache-2.0" ]
8
2021-08-14T11:29:49.000Z
2022-03-16T17:37:53.000Z
from .backup import * from .badges import *
14.666667
21
0.727273
6
44
5.333333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.181818
44
2
22
22
0.888889
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
38558821ab5bb4b5b363efde46a63be32d51c618
15,320
py
Python
comments/migrations/0001_initial.py
RichardHirtle/c4all
a09c4b098cf1a58ed5e3ab6116a749a17ec035e0
[ "MIT" ]
null
null
null
comments/migrations/0001_initial.py
RichardHirtle/c4all
a09c4b098cf1a58ed5e3ab6116a749a17ec035e0
[ "MIT" ]
null
null
null
comments/migrations/0001_initial.py
RichardHirtle/c4all
a09c4b098cf1a58ed5e3ab6116a749a17ec035e0
[ "MIT" ]
1
2021-07-08T09:50:05.000Z
2021-07-08T09:50:05.000Z
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CustomUser' db.create_table(u'comments_customuser', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('password', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)), ('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)), ('email', self.gf('django.db.models.fields.EmailField')(db_index=True, unique=True, max_length=255, blank=True)), ('full_name', self.gf('django.db.models.fields.CharField')(max_length=255)), ('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)), ('is_admin', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)), ('avatar_num', self.gf('django.db.models.fields.IntegerField')(default=6)), ('hidden', self.gf('django.db.models.fields.BooleanField')(default=False)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), )) db.send_create_signal(u'comments', ['CustomUser']) # Adding M2M table for field groups on 'CustomUser' db.create_table(u'comments_customuser_groups', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('customuser', models.ForeignKey(orm[u'comments.customuser'], null=False)), ('group', models.ForeignKey(orm[u'auth.group'], null=False)) )) db.create_unique(u'comments_customuser_groups', ['customuser_id', 'group_id']) # Adding M2M table for field user_permissions on 'CustomUser' db.create_table(u'comments_customuser_user_permissions', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('customuser', models.ForeignKey(orm[u'comments.customuser'], null=False)), ('permission', models.ForeignKey(orm[u'auth.permission'], null=False)) )) db.create_unique(u'comments_customuser_user_permissions', ['customuser_id', 'permission_id']) # Adding model 'Site' db.create_table(u'comments_site', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('domain', self.gf('django.db.models.fields.CharField')(max_length=255)), ('anonymous_allowed', self.gf('django.db.models.fields.BooleanField')(default=False)), ('rs_customer_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)), )) db.send_create_signal(u'comments', ['Site']) # Adding model 'Thread' db.create_table(u'comments_thread', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('site', self.gf('django.db.models.fields.related.ForeignKey')(related_name='threads', to=orm['comments.Site'])), ('url', self.gf('django.db.models.fields.CharField')(max_length=255)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('allow_comments', self.gf('django.db.models.fields.BooleanField')(default=True)), ('liked_by_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ('disliked_by_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ('titles', self.gf('jsonfield.fields.JSONField')(default={'page_title': '', 'h1_title': '', 'selector_title': ''})), )) db.send_create_signal(u'comments', ['Thread']) # Adding unique constraint on 'Thread', fields ['site', 'url'] db.create_unique(u'comments_thread', ['site_id', 'url']) # Adding M2M table for field liked_by on 'Thread' db.create_table(u'comments_thread_liked_by', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('thread', models.ForeignKey(orm[u'comments.thread'], null=False)), ('customuser', models.ForeignKey(orm[u'comments.customuser'], null=False)) )) db.create_unique(u'comments_thread_liked_by', ['thread_id', 'customuser_id']) # Adding M2M table for field disliked_by on 'Thread' db.create_table(u'comments_thread_disliked_by', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('thread', models.ForeignKey(orm[u'comments.thread'], null=False)), ('customuser', models.ForeignKey(orm[u'comments.customuser'], null=False)) )) db.create_unique(u'comments_thread_disliked_by', ['thread_id', 'customuser_id']) # Adding model 'Comment' db.create_table(u'comments_comment', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', null=True, to=orm['comments.CustomUser'])), ('poster_name', self.gf('django.db.models.fields.CharField')(max_length=100)), ('thread', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['comments.Thread'])), ('text', self.gf('django.db.models.fields.TextField')()), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('liked_by_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ('disliked_by_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ('avatar_num', self.gf('django.db.models.fields.IntegerField')(default=6)), ('hidden', self.gf('django.db.models.fields.BooleanField')(default=False)), ('ip_address', self.gf('django.db.models.fields.GenericIPAddressField')(max_length=39, null=True)), )) db.send_create_signal(u'comments', ['Comment']) # Adding M2M table for field liked_by on 'Comment' db.create_table(u'comments_comment_liked_by', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('comment', models.ForeignKey(orm[u'comments.comment'], null=False)), ('customuser', models.ForeignKey(orm[u'comments.customuser'], null=False)) )) db.create_unique(u'comments_comment_liked_by', ['comment_id', 'customuser_id']) # Adding M2M table for field disliked_by on 'Comment' db.create_table(u'comments_comment_disliked_by', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('comment', models.ForeignKey(orm[u'comments.comment'], null=False)), ('customuser', models.ForeignKey(orm[u'comments.customuser'], null=False)) )) db.create_unique(u'comments_comment_disliked_by', ['comment_id', 'customuser_id']) def backwards(self, orm): # Removing unique constraint on 'Thread', fields ['site', 'url'] db.delete_unique(u'comments_thread', ['site_id', 'url']) # Deleting model 'CustomUser' db.delete_table(u'comments_customuser') # Removing M2M table for field groups on 'CustomUser' db.delete_table('comments_customuser_groups') # Removing M2M table for field user_permissions on 'CustomUser' db.delete_table('comments_customuser_user_permissions') # Deleting model 'Site' db.delete_table(u'comments_site') # Deleting model 'Thread' db.delete_table(u'comments_thread') # Removing M2M table for field liked_by on 'Thread' db.delete_table('comments_thread_liked_by') # Removing M2M table for field disliked_by on 'Thread' db.delete_table('comments_thread_disliked_by') # Deleting model 'Comment' db.delete_table(u'comments_comment') # Removing M2M table for field liked_by on 'Comment' db.delete_table('comments_comment_liked_by') # Removing M2M table for field disliked_by on 'Comment' db.delete_table('comments_comment_disliked_by') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'comments.comment': { 'Meta': {'ordering': "['created']", 'object_name': 'Comment'}, 'avatar_num': ('django.db.models.fields.IntegerField', [], {'default': '6'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'disliked_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'disliked_comments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['comments.CustomUser']"}), 'disliked_by_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}), 'liked_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'liked_comments'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['comments.CustomUser']"}), 'liked_by_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'poster_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'text': ('django.db.models.fields.TextField', [], {}), 'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': u"orm['comments.Thread']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': u"orm['comments.CustomUser']"}) }, u'comments.customuser': { 'Meta': {'object_name': 'CustomUser'}, 'avatar_num': ('django.db.models.fields.IntegerField', [], {'default': '6'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '255', 'blank': 'True'}), 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'comments.site': { 'Meta': {'object_name': 'Site'}, 'anonymous_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'domain': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'rs_customer_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, u'comments.thread': { 'Meta': {'unique_together': "(('site', 'url'),)", 'object_name': 'Thread'}, 'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'disliked_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'disliked_threads'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['comments.CustomUser']"}), 'disliked_by_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'liked_by': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'liked_threads'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['comments.CustomUser']"}), 'liked_by_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'threads'", 'to': u"orm['comments.Site']"}), 'titles': ('jsonfield.fields.JSONField', [], {'default': "{'page_title': '', 'h1_title': '', 'selector_title': ''}"}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['comments']
66.899563
218
0.61436
1,746
15,320
5.230241
0.076747
0.075339
0.130311
0.186159
0.843079
0.806943
0.790298
0.746934
0.671375
0.544131
0
0.007202
0.184334
15,320
229
219
66.899563
0.723592
0.06312
0
0.331461
0
0
0.491277
0.279274
0
0
0
0
0
1
0.011236
false
0.011236
0.022472
0
0.050562
0
0
0
0
null
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
388e54ae59751fe96e59a6cf8d99ec1d947f56ed
60
py
Python
src/analyzer/components/__init__.py
ComeOnGetMe/checkee-stats
d6d3249fd6d99b7bc673423155a8714dea06fe3c
[ "Unlicense" ]
null
null
null
src/analyzer/components/__init__.py
ComeOnGetMe/checkee-stats
d6d3249fd6d99b7bc673423155a8714dea06fe3c
[ "Unlicense" ]
null
null
null
src/analyzer/components/__init__.py
ComeOnGetMe/checkee-stats
d6d3249fd6d99b7bc673423155a8714dea06fe3c
[ "Unlicense" ]
null
null
null
from page import Page from plots import ScatterPlot, BoxPlot
30
38
0.85
9
60
5.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.133333
60
2
38
30
0.980769
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2a4179c67bfd94049f20eda11d894027cb80e826
127
py
Python
backend/dict/admin.py
RagAndRoll/wordbook
495ae9f222a03323c6eddb542aa8b2b9200da8bd
[ "MIT" ]
null
null
null
backend/dict/admin.py
RagAndRoll/wordbook
495ae9f222a03323c6eddb542aa8b2b9200da8bd
[ "MIT" ]
null
null
null
backend/dict/admin.py
RagAndRoll/wordbook
495ae9f222a03323c6eddb542aa8b2b9200da8bd
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Expressions # Register your models here. admin.site.register(Expressions)
25.4
32
0.826772
17
127
6.176471
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.110236
127
5
33
25.4
0.929204
0.204724
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2a66daf53ff9e540b383a93a80ea40f0acc03fb2
37
py
Python
LociAnalysis/whitelistdb/__init__.py
bnbowman/LociAnalysis
c0f11c2a2b80c7cde61b9991283a17f97062118e
[ "BSD-3-Clause" ]
3
2017-09-22T15:17:42.000Z
2020-05-12T04:59:07.000Z
LociAnalysis/whitelistdb/__init__.py
bnbowman/LociAnalysis
c0f11c2a2b80c7cde61b9991283a17f97062118e
[ "BSD-3-Clause" ]
null
null
null
LociAnalysis/whitelistdb/__init__.py
bnbowman/LociAnalysis
c0f11c2a2b80c7cde61b9991283a17f97062118e
[ "BSD-3-Clause" ]
null
null
null
from .whitelistdb import WhitelistDb
18.5
36
0.864865
4
37
8
0.75
0
0
0
0
0
0
0
0
0
0
0
0.108108
37
1
37
37
0.969697
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
aa531936cd58985b7f78687f03ac64a0eb11627a
1,554
py
Python
tiles.py
koffes/professorspillet
735b460c4757fe9dbc5f12e25dd3c808a8fe3d1f
[ "MIT" ]
null
null
null
tiles.py
koffes/professorspillet
735b460c4757fe9dbc5f12e25dd3c808a8fe3d1f
[ "MIT" ]
null
null
null
tiles.py
koffes/professorspillet
735b460c4757fe9dbc5f12e25dd3c808a8fe3d1f
[ "MIT" ]
null
null
null
"""Description of the cards and the given deck.""" from enum import Enum TILES_NUM = 16 EDGE_NUM = 4 DIR_N = 0 DIR_E = 1 DIR_S = 2 DIR_W = 3 class Bprt(Enum): """The two possible side bodyparts. Torso or legs.""" T = 0 L = 1 class Clr(Enum): """All possible edge colors.""" G = 'green' R = 'red' P = 'purple' B = 'blue' DECK = [ [[Clr.P, Bprt.L], [Clr.G, Bprt.T], [Clr.B, Bprt.T], [Clr.R, Bprt.L]], [[Clr.G, Bprt.L], [Clr.R, Bprt.T], [Clr.P, Bprt.T], [Clr.B, Bprt.L]], [[Clr.B, Bprt.L], [Clr.R, Bprt.T], [Clr.P, Bprt.T], [Clr.G, Bprt.L]], [[Clr.P, Bprt.L], [Clr.B, Bprt.T], [Clr.R, Bprt.T], [Clr.G, Bprt.L]], [[Clr.B, Bprt.L], [Clr.P, Bprt.T], [Clr.G, Bprt.T], [Clr.R, Bprt.L]], [[Clr.P, Bprt.L], [Clr.R, Bprt.T], [Clr.B, Bprt.T], [Clr.R, Bprt.L]], [[Clr.B, Bprt.L], [Clr.G, Bprt.T], [Clr.G, Bprt.T], [Clr.R, Bprt.L]], [[Clr.P, Bprt.L], [Clr.G, Bprt.T], [Clr.B, Bprt.T], [Clr.R, Bprt.L]], [[Clr.P, Bprt.L], [Clr.R, Bprt.T], [Clr.B, Bprt.T], [Clr.G, Bprt.L]], [[Clr.G, Bprt.L], [Clr.B, Bprt.T], [Clr.P, Bprt.T], [Clr.R, Bprt.L]], [[Clr.G, Bprt.L], [Clr.G, Bprt.T], [Clr.P, Bprt.T], [Clr.R, Bprt.L]], [[Clr.R, Bprt.L], [Clr.R, Bprt.T], [Clr.B, Bprt.T], [Clr.B, Bprt.L]], [[Clr.B, Bprt.L], [Clr.G, Bprt.T], [Clr.P, Bprt.T], [Clr.G, Bprt.L]], [[Clr.R, Bprt.L], [Clr.P, Bprt.T], [Clr.B, Bprt.T], [Clr.G, Bprt.L]], [[Clr.G, Bprt.L], [Clr.R, Bprt.T], [Clr.P, Bprt.T], [Clr.G, Bprt.L]], [[Clr.P, Bprt.L], [Clr.G, Bprt.T], [Clr.R, Bprt.T], [Clr.R, Bprt.L]]]
34.533333
73
0.507079
317
1,554
2.466877
0.14511
0.204604
0.327366
0.11509
0.736573
0.736573
0.7289
0.654731
0.654731
0.654731
0
0.007177
0.19305
1,554
44
74
35.318182
0.616427
0.075933
0
0.0625
0
0
0.012676
0
0
0
0
0
0
1
0
false
0
0.03125
0
0.28125
0
0
0
0
null
1
1
0
0
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
aadca0f53dea81ab450baf7d69a7f7d95d4489ef
79
py
Python
AutoWorkup/TestSuite/__init__.py
pnlbwh/BRAINSTools
a2fe63ab5b795f03da140a4081d1fef6314dab95
[ "Apache-2.0" ]
89
2015-02-09T16:47:09.000Z
2022-02-21T07:19:27.000Z
AutoWorkup/TestSuite/__init__.py
pnlbwh/BRAINSTools
a2fe63ab5b795f03da140a4081d1fef6314dab95
[ "Apache-2.0" ]
166
2015-01-07T22:14:05.000Z
2021-12-26T06:58:00.000Z
AutoWorkup/TestSuite/__init__.py
BRAINSia/BRAINSTools
f09f74bd28ad07cd2347c2528921b1a43b97fa1d
[ "Apache-2.0" ]
80
2015-01-05T17:18:07.000Z
2022-01-06T12:46:29.000Z
# import utilities # import workflows from AutoWorkup import setup_environment
19.75
40
0.848101
9
79
7.333333
0.777778
0
0
0
0
0
0
0
0
0
0
0
0.126582
79
3
41
26.333333
0.956522
0.417722
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2ab9687d2fb43b537527b4904b5955c78b8a3cde
2,912
py
Python
sources/praline/client/project/pipeline/stages/validate_project_test.py
dansandu/praline
f1e87c8048787480262b330e6cc6d92d473eb50c
[ "MIT" ]
null
null
null
sources/praline/client/project/pipeline/stages/validate_project_test.py
dansandu/praline
f1e87c8048787480262b330e6cc6d92d473eb50c
[ "MIT" ]
null
null
null
sources/praline/client/project/pipeline/stages/validate_project_test.py
dansandu/praline
f1e87c8048787480262b330e6cc6d92d473eb50c
[ "MIT" ]
null
null
null
from os.path import normpath from praline.client.project.pipeline.stages.validate_project import validate_project, IllformedProjectError from praline.common.testing.file_system_mock import FileSystemMock from unittest import TestCase class ValidateProjectStageTest(TestCase): def test_validate_project(self): file_system = FileSystemMock({ 'my/project/resources/my_organization/my_artifact', 'my/project/sources/my_organization/my_artifact' }) resources = { 'project_directory': 'my/project', 'pralinefile': { 'organization': 'my_organization', 'artifact': 'my_artifact' } } validate_project(file_system, resources, None, None, None, None) expected_directories = { 'my/project/resources/my_organization/my_artifact', 'my/project/sources/my_organization/my_artifact' } self.assertEqual(file_system.directories, {normpath(p) for p in expected_directories}) self.assertEqual(len(file_system.files), 0) def test_invalid_resources_project(self): file_system = FileSystemMock({ 'my/project', 'my/project/resources/my_organization/my_artifact', 'my/project/sources/my_organization/my_artifact' }, {'my/project/resources/my_organization/somefile': b''}) resources = { 'project_directory': 'my/project', 'pralinefile': { 'organization': 'my_organization', 'artifact': 'my_artifact' } } self.assertRaises(IllformedProjectError, validate_project, file_system, resources, None, None, None, None) def test_invalid_sources_project(self): file_system = FileSystemMock({ 'my/project/resources/my_organization/my_artifact', 'my/project/sources/my_organization/my_artifact' }, {'my/project/sources/somefile': b''}) resources = { 'project_directory': 'my/project', 'pralinefile': { 'organization': 'my_organization', 'artifact': 'my_artifact' } } self.assertRaises(IllformedProjectError, validate_project, file_system, resources, None, None, None, None) def test_valid_project_with_hidden_file(self): file_system = FileSystemMock({ 'my/project', 'my/project/resources/my_organization/my_artifact', 'my/project/sources/my_organization/my_artifact' }, {'my/project/sources/.hidden': b''}) resources = { 'project_directory': 'my/project', 'pralinefile': { 'organization': 'my_organization', 'artifact': 'my_artifact' } } validate_project(file_system, resources, None, None, None, None)
35.512195
114
0.618475
272
2,912
6.378676
0.180147
0.098559
0.092219
0.138329
0.742363
0.729107
0.729107
0.725072
0.725072
0.725072
0
0.000474
0.275069
2,912
81
115
35.950617
0.821412
0
0
0.615385
0
0
0.317308
0.195055
0
0
0
0
0.061538
1
0.061538
false
0
0.061538
0
0.138462
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2af6c5b8f7b7206ffef6469c997eea66ed721db7
166
py
Python
ghia/__init__.py
asatur96/ghia_asatur96
9f8f460d60abddcad5a23725b691b12eb80438f9
[ "CC0-1.0" ]
null
null
null
ghia/__init__.py
asatur96/ghia_asatur96
9f8f460d60abddcad5a23725b691b12eb80438f9
[ "CC0-1.0" ]
1
2019-10-09T05:36:43.000Z
2019-10-10T02:04:16.000Z
ghia/__init__.py
asatur96/ghia
9f8f460d60abddcad5a23725b691b12eb80438f9
[ "CC0-1.0" ]
null
null
null
from ghia.cli import cli from ghia.logic import GHIA from ghia.github import GitHub from ghia.web import create_app __all__ = ['cli', 'create_app', 'GitHub', 'GHIA']
27.666667
49
0.759036
27
166
4.444444
0.37037
0.266667
0
0
0
0
0
0
0
0
0
0
0.13253
166
6
49
27.666667
0.833333
0
0
0
0
0
0.137725
0
0
0
0
0
0
1
0
false
0
0.8
0
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
2affc83f6fb29384849b433d1c555b45d2141d12
45
py
Python
enthought/pyface/i_window.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
3
2016-12-09T06:05:18.000Z
2018-03-01T13:00:29.000Z
enthought/pyface/i_window.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
1
2020-12-02T00:51:32.000Z
2020-12-02T08:48:55.000Z
enthought/pyface/i_window.py
enthought/etsproxy
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
[ "BSD-3-Clause" ]
null
null
null
# proxy module from pyface.i_window import *
15
29
0.777778
7
45
4.857143
1
0
0
0
0
0
0
0
0
0
0
0
0.155556
45
2
30
22.5
0.894737
0.266667
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
2d6bd6b27faf36c9f0bd3b76f1551acc6c921cf3
215
py
Python
services/games/players/player_service.py
project-lolquiz/the-backend
f8a84bd19f400b7c3a2c9b2dfbe305871c1e866e
[ "MIT" ]
null
null
null
services/games/players/player_service.py
project-lolquiz/the-backend
f8a84bd19f400b7c3a2c9b2dfbe305871c1e866e
[ "MIT" ]
19
2021-02-01T19:52:49.000Z
2021-09-26T13:52:41.000Z
services/games/players/player_service.py
project-lolquiz/the-backend
f8a84bd19f400b7c3a2c9b2dfbe305871c1e866e
[ "MIT" ]
null
null
null
def get_current_players(current_room): host_user = current_room['host_user'] users = current_room['game']['users'] players = [player for player in users] players.append(host_user) return players
30.714286
42
0.716279
29
215
5.034483
0.482759
0.226027
0.205479
0.260274
0
0
0
0
0
0
0
0
0.176744
215
6
43
35.833333
0.824859
0
0
0
0
0
0.083721
0
0
0
0
0
0
1
0.166667
false
0
0
0
0.333333
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2da0e0929b24606fa257233430d871e65ae8d545
126
py
Python
message/admin.py
FGAUnB-REQ-GM/2021.2-PousadaAnimal
b7371aebccad0da23073de0db642a6ce824f919e
[ "MIT" ]
null
null
null
message/admin.py
FGAUnB-REQ-GM/2021.2-PousadaAnimal
b7371aebccad0da23073de0db642a6ce824f919e
[ "MIT" ]
95
2022-02-04T19:40:09.000Z
2022-03-31T20:24:11.000Z
message/admin.py
FGAUnB-REQ-GM/2021.2-PousadaAnimal
b7371aebccad0da23073de0db642a6ce824f919e
[ "MIT" ]
4
2022-01-26T23:51:48.000Z
2022-01-27T18:28:16.000Z
from django.contrib import admin from message.models import Message # Register your models here. admin.site.register(Message)
25.2
34
0.825397
18
126
5.777778
0.611111
0
0
0
0
0
0
0
0
0
0
0
0.111111
126
5
35
25.2
0.928571
0.206349
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
2dd8d58120fa53d25d68d423dd4307c9bd086591
891
py
Python
tablut/utils/bitboards.py
carlo98/tablut-THOR
b990d49c66735c40afbfa7d26aeec7694a80a729
[ "MIT" ]
null
null
null
tablut/utils/bitboards.py
carlo98/tablut-THOR
b990d49c66735c40afbfa7d26aeec7694a80a729
[ "MIT" ]
null
null
null
tablut/utils/bitboards.py
carlo98/tablut-THOR
b990d49c66735c40afbfa7d26aeec7694a80a729
[ "MIT" ]
null
null
null
""" Constant bitboards: castle, escapes and camps. """ import numpy as np MAX_NUM_CHECKERS = 25 castle_bitboard = np.array([ 0b000000000, 0b000000000, 0b000000000, 0b000000000, 0b000010000, 0b000000000, 0b000000000, 0b000000000, 0b000000000], dtype=np.int) escapes_bitboard = np.array([ 0b011000110, 0b100000001, 0b100000001, 0b000000000, 0b000000000, 0b000000000, 0b100000001, 0b100000001, 0b011000110], dtype=np.int) camps_bitboard = np.array([ 0b000111000, 0b000010000, 0B000000000, 0b100000001, 0b110000011, 0b100000001, 0b000000000, 0b000010000, 0b000111000], dtype=np.int) blocks_bitboard = np.array([ 0b000000000, 0b001000100, 0b010000010, 0b000000000, 0b000000000, 0b000000000, 0b010000010, 0b001000100, 0b000000000], dtype=np.int)
16.811321
46
0.667789
78
891
7.551282
0.346154
0.373514
0.336163
0.088285
0
0
0
0
0
0
0
0.53789
0.244669
891
52
47
17.134615
0.337296
0.051627
0
0.714286
0
0
0
0
0
0
0
0
0
1
0
false
0
0.02381
0
0.02381
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2df1fbb9661353508df56b1c493efc9d46055b12
205
py
Python
jupyterlabpymolpysnips/Salt-bridge/his31asp70.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Salt-bridge/his31asp70.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
jupyterlabpymolpysnips/Salt-bridge/his31asp70.py
MooersLab/pymolpysnips
50a89c85adf8006d85c1d6cd3f8aad7e440a0b92
[ "MIT" ]
null
null
null
cmd.do('fetch 1lw9, async=0; ') cmd.do('zoom (resi 31 or resi 70); ') cmd.do('preset.technical(selection='all'); ') cmd.do('bg_color gray70; ') cmd.do('clip slab, 7,(resi 31 or resi 70);') cmd.do('rock;')
29.285714
45
0.639024
38
205
3.421053
0.578947
0.230769
0.123077
0.184615
0.292308
0.292308
0.292308
0
0
0
0
0.077348
0.117073
205
6
46
34.166667
0.640884
0
0
0
0
0
0.653659
0.131707
0
0
0
0
0
0
null
null
0
0
null
null
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
93081106ddbaa6461b91732d03b99f5db630d51f
6,095
py
Python
OPMS_v3-dev3.1/apps/host_management/migrations/0014_domainnameinfo_domainnameresolveinfo_networkdviceinfo_porttoportinfo.py
litiian/asyncstudy
a59119f189ca96fdd7f64b0b3212207572165dce
[ "Apache-2.0" ]
null
null
null
OPMS_v3-dev3.1/apps/host_management/migrations/0014_domainnameinfo_domainnameresolveinfo_networkdviceinfo_porttoportinfo.py
litiian/asyncstudy
a59119f189ca96fdd7f64b0b3212207572165dce
[ "Apache-2.0" ]
null
null
null
OPMS_v3-dev3.1/apps/host_management/migrations/0014_domainnameinfo_domainnameresolveinfo_networkdviceinfo_porttoportinfo.py
litiian/asyncstudy
a59119f189ca96fdd7f64b0b3212207572165dce
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.0.6 on 2018-07-13 14:42 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('host_management', '0013_userhostoperationrecord'), ] operations = [ migrations.CreateModel( name='DomainNameInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50, verbose_name='名称')), ('desc', models.TextField(blank=True, null=True, verbose_name='备注')), ('add_time', models.DateTimeField(auto_now_add=True, verbose_name='添加时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), ('status', models.PositiveSmallIntegerField(choices=[(1, '正常'), (0, '停用')], default=1, verbose_name='状态')), ('add_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dom_add_user', to=settings.AUTH_USER_MODEL, verbose_name='添加人')), ('update_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dom_update_user', to=settings.AUTH_USER_MODEL, verbose_name='修改人')), ], options={ 'verbose_name': '域名表', 'verbose_name_plural': '域名表', }, ), migrations.CreateModel( name='DomainNameResolveInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ip', models.GenericIPAddressField(verbose_name='IP地址')), ('desc', models.TextField(blank=True, null=True, verbose_name='备注')), ('add_time', models.DateTimeField(auto_now_add=True, verbose_name='添加时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), ('status', models.PositiveSmallIntegerField(choices=[(1, '正常'), (0, '停用')], default=1, verbose_name='状态')), ('add_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dom_res_add_user', to=settings.AUTH_USER_MODEL, verbose_name='添加人')), ('domain_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='host_management.DomainNameInfo', verbose_name='域名')), ('update_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dom_res_update_user', to=settings.AUTH_USER_MODEL, verbose_name='修改人')), ], options={ 'verbose_name': '域名解析表', 'verbose_name_plural': '域名解析表', }, ), migrations.CreateModel( name='NetworkDviceInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('category', models.CharField(max_length=20, verbose_name='设备分类')), ('name', models.CharField(max_length=20, verbose_name='设备型号')), ('address', models.CharField(max_length=20, verbose_name='设备地址')), ('ip_in', models.GenericIPAddressField(verbose_name='内网 IP')), ('ip_out', models.GenericIPAddressField(blank=True, null=True, verbose_name='公网 IP')), ('admin_user', models.CharField(max_length=20, verbose_name='管理用户')), ('admin_pass', models.CharField(max_length=20, verbose_name='管理密码')), ('desc', models.TextField(blank=True, null=True, verbose_name='备注')), ('add_time', models.DateTimeField(auto_now_add=True, verbose_name='添加时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), ('status', models.PositiveSmallIntegerField(choices=[(1, '正常'), (0, '停用')], default=1, verbose_name='状态')), ('add_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='net_add_user', to=settings.AUTH_USER_MODEL, verbose_name='添加人')), ('update_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='net_update_user', to=settings.AUTH_USER_MODEL, verbose_name='修改人')), ], options={ 'verbose_name': '网络设备表', 'verbose_name_plural': '网络设备表', }, ), migrations.CreateModel( name='PortToPortInfo', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ip_out', models.GenericIPAddressField(blank=True, null=True, verbose_name='公网 IP')), ('port_out', models.IntegerField(verbose_name='外网端口')), ('ip_in', models.GenericIPAddressField(verbose_name='内网 IP')), ('port_in', models.IntegerField(verbose_name='内网端口')), ('use', models.CharField(max_length=20, verbose_name='用途')), ('desc', models.TextField(blank=True, null=True, verbose_name='备注')), ('add_time', models.DateTimeField(auto_now_add=True, verbose_name='添加时间')), ('update_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')), ('status', models.PositiveSmallIntegerField(choices=[(1, '正常'), (0, '停用')], default=1, verbose_name='状态')), ('add_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='port_add_user', to=settings.AUTH_USER_MODEL, verbose_name='添加人')), ('update_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='port_update_user', to=settings.AUTH_USER_MODEL, verbose_name='修改人')), ], options={ 'verbose_name': '端口映射表', 'verbose_name_plural': '端口映射表', }, ), ]
63.489583
181
0.617391
666
6,095
5.403904
0.174174
0.155877
0.05835
0.061128
0.781606
0.772715
0.772715
0.711031
0.685468
0.670742
0
0.009601
0.231009
6,095
95
182
64.157895
0.758268
0.007383
0
0.494382
1
0
0.143188
0.013062
0
0
0
0
0
1
0
false
0.011236
0.033708
0
0.067416
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
93723677b100b2e1c2a4f6d71741b61b9c0a3974
85
py
Python
smpy_plugin/__init__.py
ismtabo/SemanticMergePythonPlugin
39a701533bf3679de12ede2ba7e8bf80ceebdffd
[ "MIT" ]
4
2019-04-06T02:57:31.000Z
2021-12-23T22:24:29.000Z
smpy_plugin/__init__.py
ismtabo/SemanticMergePythonPlugin
39a701533bf3679de12ede2ba7e8bf80ceebdffd
[ "MIT" ]
null
null
null
smpy_plugin/__init__.py
ismtabo/SemanticMergePythonPlugin
39a701533bf3679de12ede2ba7e8bf80ceebdffd
[ "MIT" ]
null
null
null
""" Main package """ from smpy_plugin._version import __version__, __version_info__
14.166667
62
0.788235
10
85
5.6
0.8
0
0
0
0
0
0
0
0
0
0
0
0.117647
85
5
63
17
0.746667
0.141176
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
faf878d75491090c5b2915e716b94afd3086707a
161
py
Python
interest/admin.py
ianpierreg/recroom
86c21332ab533ea6aaf7b4a3428f18ba2c4d1ebe
[ "MIT" ]
null
null
null
interest/admin.py
ianpierreg/recroom
86c21332ab533ea6aaf7b4a3428f18ba2c4d1ebe
[ "MIT" ]
4
2021-05-02T01:14:59.000Z
2022-02-13T17:58:36.000Z
interest/admin.py
ianpierreg/recroom
86c21332ab533ea6aaf7b4a3428f18ba2c4d1ebe
[ "MIT" ]
null
null
null
# interest/admin.py from django.contrib import admin from .models import InterestType, Interest admin.site.register(InterestType) admin.site.register(Interest)
23
42
0.826087
21
161
6.333333
0.52381
0.195489
0.255639
0
0
0
0
0
0
0
0
0
0.086957
161
7
43
23
0.904762
0.10559
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
878e0c17318946dcea97b44cb6e3d6bd4d8df71d
37
py
Python
tests/components/sonos/__init__.py
domwillcode/home-assistant
f170c80bea70c939c098b5c88320a1c789858958
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
tests/components/sonos/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
31,101
2020-03-02T13:00:16.000Z
2022-03-31T23:57:36.000Z
tests/components/sonos/__init__.py
jagadeeshvenkatesh/core
1bd982668449815fee2105478569f8e4b5670add
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Tests for the Sonos component."""
18.5
36
0.675676
5
37
5
1
0
0
0
0
0
0
0
0
0
0
0
0.135135
37
1
37
37
0.78125
0.810811
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
87c83250a16cd110aac207555727828b23c14858
36
py
Python
pyapps/rccat/__init__.py
VK/RcCat
9ae01a828ce1d5fa2deeb96676c127a0a5a010e2
[ "Apache-2.0" ]
1
2020-10-17T18:24:08.000Z
2020-10-17T18:24:08.000Z
pyapps/rccat/__init__.py
VK/RcCat
9ae01a828ce1d5fa2deeb96676c127a0a5a010e2
[ "Apache-2.0" ]
null
null
null
pyapps/rccat/__init__.py
VK/RcCat
9ae01a828ce1d5fa2deeb96676c127a0a5a010e2
[ "Apache-2.0" ]
null
null
null
from rccat.serialio import SerialIO
36
36
0.861111
5
36
6.2
0.8
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
0.96875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
87dd65085a4133a79a3aafd9584772d52c73e5aa
287
py
Python
src/lib/db/__init__.py
arnulfojr/money-manager
8600f1ff258a89f5742ffad4d5f589fd1def5259
[ "MIT" ]
1
2020-08-18T08:03:44.000Z
2020-08-18T08:03:44.000Z
src/lib/db/__init__.py
arnulfojr/money-manager
8600f1ff258a89f5742ffad4d5f589fd1def5259
[ "MIT" ]
null
null
null
src/lib/db/__init__.py
arnulfojr/money-manager
8600f1ff258a89f5742ffad4d5f589fd1def5259
[ "MIT" ]
null
null
null
from src.db import engine from src.db import Session from src.db import session from src.db import Model from models.mixin_model import ModelMixin from src.setup_app import setup_app from src.create_all import create_all from src.drop_all import drop_all from types.GUID import GUID
20.5
41
0.829268
52
287
4.442308
0.326923
0.212121
0.155844
0.25974
0.255411
0.255411
0.255411
0.255411
0.255411
0
0
0
0.139373
287
13
42
22.076923
0.935223
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
87e5f16041cb1914db6ba50b9e89e3ffa13d2d60
48
py
Python
babysploit/wpseku/modules/discovery/themes/__init__.py
kevinsegal/BabySploit
66bafc25e04e7512e8b87b161bd3b7201bb57b63
[ "MIT" ]
null
null
null
babysploit/wpseku/modules/discovery/themes/__init__.py
kevinsegal/BabySploit
66bafc25e04e7512e8b87b161bd3b7201bb57b63
[ "MIT" ]
null
null
null
babysploit/wpseku/modules/discovery/themes/__init__.py
kevinsegal/BabySploit
66bafc25e04e7512e8b87b161bd3b7201bb57b63
[ "MIT" ]
null
null
null
"""Support for discovering Wordpress themes."""
24
47
0.75
5
48
7.2
1
0
0
0
0
0
0
0
0
0
0
0
0.104167
48
1
48
48
0.837209
0.854167
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
357266baaa2fc70528cb3d0410122ca293d7e864
42
py
Python
tests/__init__.py
NooneBug/typing-model
228e851afc0c795f819da6ff800c7c4d0476b6a1
[ "MIT" ]
null
null
null
tests/__init__.py
NooneBug/typing-model
228e851afc0c795f819da6ff800c7c4d0476b6a1
[ "MIT" ]
null
null
null
tests/__init__.py
NooneBug/typing-model
228e851afc0c795f819da6ff800c7c4d0476b6a1
[ "MIT" ]
null
null
null
"""Unit test package for typing_model."""
21
41
0.714286
6
42
4.833333
1
0
0
0
0
0
0
0
0
0
0
0
0.119048
42
1
42
42
0.783784
0.833333
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
359b760602697de7c77f41cfd575c308f7894e01
194
py
Python
movie/permissions.py
lcbiplove/nepfdb
56e48bb0dcae34d409b7d75d210d2938e763a953
[ "MIT" ]
null
null
null
movie/permissions.py
lcbiplove/nepfdb
56e48bb0dcae34d409b7d75d210d2938e763a953
[ "MIT" ]
null
null
null
movie/permissions.py
lcbiplove/nepfdb
56e48bb0dcae34d409b7d75d210d2938e763a953
[ "MIT" ]
null
null
null
from rest_framework.permissions import BasePermission class IsOwner(BasePermission): def has_object_permission(self, request, view, obj): return obj.id == request.user.reviewer.id
27.714286
56
0.768041
24
194
6.083333
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.149485
194
6
57
32.333333
0.884848
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
35bc286c8d648fa27f1c5884fa61cee5fe838252
1,587
py
Python
adapters/actuators/overhead_display/hc595/test2_74HC595_shift_reg.py
diydsp/thirtybirds3.0
8d57c73f1c6597a3a5dddaaaca07511eaa2adaf8
[ "MIT" ]
2
2020-05-13T02:53:02.000Z
2021-03-21T05:54:53.000Z
adapters/actuators/overhead_display/hc595/test2_74HC595_shift_reg.py
diydsp/thirtybirds3.0
8d57c73f1c6597a3a5dddaaaca07511eaa2adaf8
[ "MIT" ]
null
null
null
adapters/actuators/overhead_display/hc595/test2_74HC595_shift_reg.py
diydsp/thirtybirds3.0
8d57c73f1c6597a3a5dddaaaca07511eaa2adaf8
[ "MIT" ]
1
2021-05-06T18:42:41.000Z
2021-05-06T18:42:41.000Z
#!/usr/bin/env python import time import math import HC595_shift_reg shift_register = HC595_shift_reg.HC595() sequence = [ [1,0,0,0,0], [0,0,1,0,0], [1,0,0,0,0], [0,0,1,0,0], [1,0,0,0,0], [0,0,0,1,0], [1,0,0,0,0], [0,0,0,1,0], [0,1,0,0,0], [0,0,0,1,0], [0,1,0,0,0], [0,0,0,1,0], [0,1,0,0,0], [0,0,0,0,1], [0,1,0,0,0], [0,0,0,0,1], [0,0,1,0,0], [0,0,0,0,1], [0,0,1,0,0], [0,0,0,0,1], [0,0,1,0,0], [1,0,0,0,0], [0,0,1,0,0], [1,0,0,0,0], [0,0,0,1,0], [1,0,0,0,0], [0,0,0,1,0], [1,0,0,0,0], [0,0,0,1,0], [0,1,0,0,0], [0,0,0,1,0], [0,1,0,0,0], [0,0,0,0,1], [0,1,0,0,0], [0,0,0,0,1], [0,1,0,0,0], [0,0,0,0,1], [0,0,1,0,0], [0,0,0,0,1], [0,0,1,0,0] ] register_states = [ 0 ] period = 0.8 try: ontime = 0.0100 offtime = period - ontime while True: for beat in sequence: register_states[ 0 ] = 0; for channel_number in range( 0, 5 ): if beat[channel_number] == 1: register_states[ 0 ] = register_states[ 0 ] + ( 1 << channel_number ) shift_register.write( register_states ) time.sleep( ontime ) register_states[ 0 ] = 0x00 shift_register.write( [0x00] ) time.sleep( offtime ) except KeyboardInterrupt: print( "You've exited the program." ) finally: print( "cleaning up GPIO now." ) shift_register.disable_Output_Enable()
17.831461
89
0.449275
301
1,587
2.305648
0.166113
0.348703
0.371758
0.386167
0.288184
0.288184
0.288184
0.288184
0.288184
0.288184
0
0.216418
0.324512
1,587
88
90
18.034091
0.43097
0.012602
0
0.58209
0
0
0.030109
0
0
0
0.005125
0
0
1
0
false
0
0.044776
0
0.044776
0.029851
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
35e6aaedea74a18fde3152ab798866748023eb3a
42
py
Python
model/error.py
ZegWe/Dota2Bot
ffc979d0cfa14bbd5f1961e997c54cc4a52b1367
[ "MIT" ]
12
2020-12-19T03:07:27.000Z
2021-12-20T13:50:34.000Z
model/error.py
ZegWe/Dota2Bot
ffc979d0cfa14bbd5f1961e997c54cc4a52b1367
[ "MIT" ]
4
2020-12-19T09:54:28.000Z
2021-11-02T11:23:00.000Z
model/error.py
ZegWe/Dota2Bot
ffc979d0cfa14bbd5f1961e997c54cc4a52b1367
[ "MIT" ]
1
2020-12-19T03:56:20.000Z
2020-12-19T03:56:20.000Z
class DOTA2HTTPError(Exception): pass
14
32
0.761905
4
42
8
1
0
0
0
0
0
0
0
0
0
0
0.028571
0.166667
42
2
33
21
0.885714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5