hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
daece99b2a82989e1c579f5ed5a84eefc556bf72
665
py
Python
lavajato/models.py
lolitthax/projeto-topicos-especiaisI
c2fa9ba2b8e55063be8e14f7d4012beb7be065dd
[ "MIT" ]
null
null
null
lavajato/models.py
lolitthax/projeto-topicos-especiaisI
c2fa9ba2b8e55063be8e14f7d4012beb7be065dd
[ "MIT" ]
null
null
null
lavajato/models.py
lolitthax/projeto-topicos-especiaisI
c2fa9ba2b8e55063be8e14f7d4012beb7be065dd
[ "MIT" ]
1
2020-11-03T13:11:40.000Z
2020-11-03T13:11:40.000Z
from django.db import models # Create your models here. class Cliente(models.Model): nome = models.CharField(max_length=30) email = models.CharField(max_length=60) cpf = models.CharField(max_length=11) telefone = models.CharField(max_length=15) def __str__(self): return self.nome class Veiculo(models.Model): placa = models.CharField(max_length=7) ano = models.IntegerField() modelo = models.CharField(max_length=30) cor = models.CharField(max_length=30) descricao = models.CharField(max_length=50) cliente = models.ForeignKey(Cliente, on_delete=models.CASCADE) def __str__(self): return self.placa
33.25
66
0.718797
88
665
5.238636
0.443182
0.260304
0.312364
0.416486
0.255965
0
0
0
0
0
0
0.027372
0.17594
665
20
67
33.25
0.813869
0.03609
0
0.117647
0
0
0
0
0
0
0
0
0
1
0.117647
false
0
0.058824
0.117647
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
5
9718c12608474f2540753bf4235e4e31ef3fb32e
224
py
Python
drivers/plot_S21_prot_vs_teff.py
lgbouma/gilly
b3bc7cf53c28eee6420cd85c3975062d4f46c611
[ "MIT" ]
null
null
null
drivers/plot_S21_prot_vs_teff.py
lgbouma/gilly
b3bc7cf53c28eee6420cd85c3975062d4f46c611
[ "MIT" ]
null
null
null
drivers/plot_S21_prot_vs_teff.py
lgbouma/gilly
b3bc7cf53c28eee6420cd85c3975062d4f46c611
[ "MIT" ]
null
null
null
from gilly.plotting import plot_S21_prot_vs_teff plot_S21_prot_vs_teff(koiflag=[0]) plot_S21_prot_vs_teff(koiflag=[0], ylim=[0.1,20]) plot_S21_prot_vs_teff(koiflag=[0,1]) plot_S21_prot_vs_teff(koiflag=[0,1], ylim=[0.1,20])
32
51
0.794643
47
224
3.361702
0.297872
0.221519
0.348101
0.411392
0.753165
0.64557
0.64557
0.329114
0
0
0
0.112676
0.049107
224
6
52
37.333333
0.629108
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.2
0
0.2
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
9736a81ef912c8509133559afc8f0df7e70c72ea
119
py
Python
coffemachine_project/coffemachine/machine/admin.py
Dejna93/django-coffee-machine
f7de7b0e5e3009d86b91512db0b5980196d95520
[ "MIT" ]
null
null
null
coffemachine_project/coffemachine/machine/admin.py
Dejna93/django-coffee-machine
f7de7b0e5e3009d86b91512db0b5980196d95520
[ "MIT" ]
null
null
null
coffemachine_project/coffemachine/machine/admin.py
Dejna93/django-coffee-machine
f7de7b0e5e3009d86b91512db0b5980196d95520
[ "MIT" ]
null
null
null
# Register your models here. from django.contrib import admin from .models import Coffee admin.site.register(Coffee)
17
32
0.798319
17
119
5.588235
0.647059
0
0
0
0
0
0
0
0
0
0
0
0.134454
119
6
33
19.833333
0.92233
0.218487
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
973f8dcb0481bff30ed685dd7c09ea8b65409194
27
py
Python
scanflow/deploy/__init__.py
gusseppe/autodeploy
a3caf2eb7db86cd111138a8cb5443d3f1ee4152c
[ "MIT" ]
2
2019-11-17T11:24:23.000Z
2020-02-07T10:57:54.000Z
scanflow/deploy/__init__.py
gusseppe/scanflow
16321a5380bebaa7ea9fff0bf5903c3bbf108cd2
[ "MIT" ]
6
2020-11-13T18:35:12.000Z
2022-02-10T01:55:33.000Z
scanflow/deploy/__init__.py
gusseppe/autodeploy
a3caf2eb7db86cd111138a8cb5443d3f1ee4152c
[ "MIT" ]
3
2020-11-27T09:29:40.000Z
2021-07-27T09:16:40.000Z
from .deploy import Deploy
13.5
26
0.814815
4
27
5.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
976654209133ce02df0cad5c79dd454fb1a53907
5,294
py
Python
ldklib/docker_models.py
lidingke/keras-yolo3
38748d814074dd9d311382a2c7b91505f9dceb58
[ "MIT" ]
null
null
null
ldklib/docker_models.py
lidingke/keras-yolo3
38748d814074dd9d311382a2c7b91505f9dceb58
[ "MIT" ]
null
null
null
ldklib/docker_models.py
lidingke/keras-yolo3
38748d814074dd9d311382a2c7b91505f9dceb58
[ "MIT" ]
null
null
null
import docker import grpc import ctpn_pb2 import ctpn_pb2_grpc import yolo_pb2 import yolo_pb2_grpc import crnn_pb2 import crnn_pb2_grpc import cv2 import json import numpy as np class CTPN_Docker(object): def __init__(self,docker_client = 'unix://var/run/docker.sock', host_port = 'localhost:50053',run_args=None,run_kwargs=None): self.client = docker.DockerClient(base_url=docker_client) channel = grpc.insecure_channel(host_port) self.stub = ctpn_pb2_grpc.ModelStub(channel) self.run_args = run_args if run_args else [] self.run_kwargs = {'image': "trnet/ctpn:1.0.1", 'runtime': 'nvidia', "command" : "python rpc/server.py", 'environment': ["CUDA_VISIBLE_DEVICES=1"], 'ports': {'50051/tcp': '50053'}, 'detach': True, 'auto_remove': True} if run_kwargs: self.run_kwargs.update(run_kwargs) # pass def run(self,img): assert isinstance(img,np.ndarray), 'img must be a numpy array.' imgstr = img.tobytes() shape = json.dumps(img.shape) # stub = ctpn_pb2_grpc.ModelStub(grpc.insecure_channel('localhost:50051')) response = self.stub.predict(ctpn_pb2.rect_request(img=imgstr, shape=shape)) return json.loads(response.message) def __enter__(self): self.container = self.client.containers.run(*self.run_args,**self.run_kwargs) for line in self.container.logs(stream=True): if line.strip().find(b'grpc_server_start') >= 0: break return self def __exit__(self, exc_type, exc_val, exc_tb): self.container.stop() print('container has stopped.') class YOLO_Docker(object): def __init__(self,docker_client = 'unix://var/run/docker.sock', host_port = 'localhost:50053'): self.client = docker.DockerClient(base_url=docker_client) channel = grpc.insecure_channel(host_port) self.stub = yolo_pb2_grpc.YOLOModelStub(channel) self.run_args = [] self.run_kwargs = {'image' : "yolo_server", 'runtime':'nvidia', 'environment' : ["CUDA_VISIBLE_DEVICES=1"], 'ports' : {'50051/tcp':'50053'}, 'detach':True, 'auto_remove' : True} def run(self,img): assert isinstance(img,np.ndarray), 'img must be a numpy array.' imgstr = img.tobytes() shape = json.dumps(img.shape) response = self.stub.predict(yolo_pb2.rect_request(img=imgstr, shape=shape)) return json.loads(response.message) def __enter__(self): self.container = self.client.containers.run(*self.run_args,**self.run_kwargs) for line in self.container.logs(stream=True): if line.strip() == b'grpc_server_start': break return self def __exit__(self, exc_type, exc_val, exc_tb): self.container.stop() print('container has stopped.') class CRNN_Docker(object): """ __init__ :param docker_client:unix://var/run/docker.sock :param host_port:'localhost:50053' :param run_args:[] :param run_kwargs:default- 'image': "trnet/crnn:1.0.2", 'runtime': 'nvidia', "command" : "python server.py", 'environment': ["CUDA_VISIBLE_DEVICES=1"], 'ports': {'50054/tcp': '50054'}, 'detach': True, 'auto_remove': True """ def __init__(self, docker_client='unix://var/run/docker.sock', host_port='localhost:50054',run_args=None,run_kwargs=None): self.client = docker.DockerClient(base_url=docker_client) channel = grpc.insecure_channel(host_port) self.stub = crnn_pb2_grpc.GreeterStub(channel) self.run_args = run_args if run_args else [] self.run_kwargs = {'image': "trnet/crnn:1.0.2", 'runtime': 'nvidia', "command" : "python server.py", 'environment': ["CUDA_VISIBLE_DEVICES=0"], 'ports': {'50054/tcp': '50054'}, 'detach': True, 'auto_remove': True} if run_kwargs: self.run_kwargs.update(run_kwargs) # self.run_kwargs = default_k def run(self,im): assert isinstance(im,np.ndarray), 'img must be a numpy array.' shape = json.dumps(im.shape) ymax, xmax, _ = im.shape xmin, ymin = 0, 0 boxline = xmin, ymin, xmax, ymax box = json.dumps([boxline]) response = self.stub.idc_crnn(crnn_pb2.CrnnRequest(img=im.tobytes(), shape=shape, box_list=box)) return response.message def __enter__(self): self.container = self.client.containers.run(*self.run_args,**self.run_kwargs) for line in self.container.logs(stream=True): # print(line) if line.strip().find(b'crnn_serve_start') >= 0: break return self def __exit__(self, exc_type, exc_val, exc_tb): self.container.stop() print('container has stopped.')
37.020979
104
0.584624
634
5,294
4.649842
0.190852
0.048847
0.039688
0.02578
0.760176
0.733718
0.725577
0.714722
0.701832
0.681479
0
0.025621
0.292218
5,294
143
105
37.020979
0.761142
0.093313
0
0.596154
0
0
0.141621
0.030393
0
0
0
0
0.028846
1
0.115385
false
0
0.105769
0
0.307692
0.028846
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
97abf2f9413abd54c0d76f68cb6670a25087922a
190
py
Python
imsearchtools/__init__.py
carandraug/imsearch-tools
9e6af18d63ffa43cef033bf5d75d32f62a8efcc7
[ "BSD-2-Clause" ]
7
2016-06-18T11:22:43.000Z
2019-08-28T23:28:41.000Z
imsearchtools/__init__.py
carandraug/imsearch-tools
9e6af18d63ffa43cef033bf5d75d32f62a8efcc7
[ "BSD-2-Clause" ]
null
null
null
imsearchtools/__init__.py
carandraug/imsearch-tools
9e6af18d63ffa43cef033bf5d75d32f62a8efcc7
[ "BSD-2-Clause" ]
2
2016-12-12T07:40:42.000Z
2018-02-19T13:26:07.000Z
import engines as query import process import utils import postproc_modules import http_service_helper from gevent import monkey monkey.patch_all(thread=False, select=False, httplib=False)
21.111111
59
0.852632
28
190
5.642857
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.105263
190
8
60
23.75
0.929412
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.857143
0
0.857143
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
97bd4fef8a915af2bee6d4d6ba8247fd48f32e29
83
py
Python
interleaving/simulation/__init__.py
mpkato/interleaving
7907f7dd61bfcad57ad602b5c93e601677025da7
[ "MIT" ]
107
2016-10-01T12:49:24.000Z
2022-02-23T23:48:26.000Z
interleaving/simulation/__init__.py
mpkato/interleaving
7907f7dd61bfcad57ad602b5c93e601677025da7
[ "MIT" ]
39
2016-09-25T01:41:25.000Z
2018-10-15T04:38:18.000Z
interleaving/simulation/__init__.py
mpkato/interleaving
7907f7dd61bfcad57ad602b5c93e601677025da7
[ "MIT" ]
20
2017-03-13T21:36:11.000Z
2022-03-24T17:57:46.000Z
from .simulator import Simulator from .ranker import Ranker from .user import User
20.75
32
0.819277
12
83
5.666667
0.416667
0
0
0
0
0
0
0
0
0
0
0
0.144578
83
3
33
27.666667
0.957746
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
97cee469a7e353e93fb3d68746f0e1145a322683
103
py
Python
MC/assets/__init__.py
Hoyin7123/ByTopicPastPaperCreator
083d6d59f634a7f8f6dc4d5b63471e56bd579f8a
[ "Apache-2.0" ]
null
null
null
MC/assets/__init__.py
Hoyin7123/ByTopicPastPaperCreator
083d6d59f634a7f8f6dc4d5b63471e56bd579f8a
[ "Apache-2.0" ]
null
null
null
MC/assets/__init__.py
Hoyin7123/ByTopicPastPaperCreator
083d6d59f634a7f8f6dc4d5b63471e56bd579f8a
[ "Apache-2.0" ]
null
null
null
from .pruner import * from .splitter import * from .sorter import * __all__ = ("Pruner", "Splitter")
14.714286
32
0.68932
12
103
5.583333
0.5
0.298507
0
0
0
0
0
0
0
0
0
0
0.174757
103
6
33
17.166667
0.788235
0
0
0
0
0
0.135922
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
97d0cd2387f29dad74ad0dcc461af6cad505f428
15
py
Python
test/integration/ListNewSized/list new sized.py
HighSchoolHacking/GLS-Draft
9e418b6290e7c8e3f2da87668784bdba1cde5a76
[ "MIT" ]
30
2019-10-29T12:47:50.000Z
2022-02-12T06:41:39.000Z
test/integration/ListNewSized/list new sized.py
HighSchoolHacking/GLS-Draft
9e418b6290e7c8e3f2da87668784bdba1cde5a76
[ "MIT" ]
247
2017-09-21T17:11:18.000Z
2019-10-08T12:59:07.000Z
test/integration/ListNewSized/list new sized.py
HighSchoolHacking/GLS-Draft
9e418b6290e7c8e3f2da87668784bdba1cde5a76
[ "MIT" ]
17
2017-10-01T16:53:20.000Z
2018-11-28T07:20:35.000Z
# [None] * 5 #
3.75
10
0.333333
2
15
2.5
1
0
0
0
0
0
0
0
0
0
0
0.1
0.333333
15
3
11
5
0.4
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
8afeb62f39b7504dfb14ac4c1f2d3f19910dd907
259
py
Python
keras_xlnet/__init__.py
weiyuhan9703/keras-xlnet
a50cca33e7948824ca4f392193689ce1a8535c14
[ "MIT" ]
2
2021-05-30T11:14:06.000Z
2021-05-30T11:18:29.000Z
keras_xlnet/__init__.py
Rukawa027/keras-xlnet
a50cca33e7948824ca4f392193689ce1a8535c14
[ "MIT" ]
null
null
null
keras_xlnet/__init__.py
Rukawa027/keras-xlnet
a50cca33e7948824ca4f392193689ce1a8535c14
[ "MIT" ]
1
2021-05-30T11:14:10.000Z
2021-05-30T11:14:10.000Z
from .permutation import * from .mask_embed import * from .position_embed import * from .segment_bias import * from .segment_embed import * from .attention import * from .xlnet import * from .loader import * from .tokenizer import * from .pretrained import *
23.545455
29
0.76834
34
259
5.735294
0.382353
0.461538
0.230769
0
0
0
0
0
0
0
0
0
0.15444
259
10
30
25.9
0.890411
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
c18cbf9b0ab0577e58c7e6a0a069ba62bb4d82bf
108
py
Python
setup.py
rfgil/quarere
1edd56379cb1fc883308f69a4b9bf7b606954d5d
[ "MIT" ]
null
null
null
setup.py
rfgil/quarere
1edd56379cb1fc883308f69a4b9bf7b606954d5d
[ "MIT" ]
null
null
null
setup.py
rfgil/quarere
1edd56379cb1fc883308f69a4b9bf7b606954d5d
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages setup(name='quarere', version='2.0', packages=find_packages())
27
62
0.777778
15
108
5.466667
0.733333
0.292683
0
0
0
0
0
0
0
0
0
0.020202
0.083333
108
3
63
36
0.808081
0
0
0
0
0
0.092593
0
0
0
0
0
0
1
0
true
0
0.5
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
c1a74458e4fd09a17299b3c4eee901b64e6d116b
92
py
Python
tools/checkkey.py
AyoubUmoru/raspi-box
e984bcf4cc18dd775cb455e674304c34f162f454
[ "MIT" ]
null
null
null
tools/checkkey.py
AyoubUmoru/raspi-box
e984bcf4cc18dd775cb455e674304c34f162f454
[ "MIT" ]
1
2021-06-02T01:00:19.000Z
2021-06-02T01:00:19.000Z
tools/checkkey.py
AyoubUmoru/raspi-box
e984bcf4cc18dd775cb455e674304c34f162f454
[ "MIT" ]
null
null
null
import sys import tty tty.setcbreak(sys.stdin) while True: print ord(sys.stdin.read(1))
15.333333
32
0.73913
16
92
4.25
0.6875
0.235294
0
0
0
0
0
0
0
0
0
0.012658
0.141304
92
5
33
18.4
0.848101
0
0
0
0
0
0
0
0
0
0
0
0
0
null
null
0
0.4
null
null
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
5
c1d38609937d0ae608227960c01763dacd095f03
1,463
py
Python
tests/test_auth.py
ionagamed/hexagonal
60c0e0839de0d616c50ad87d343a3da3ef6433a6
[ "MIT" ]
null
null
null
tests/test_auth.py
ionagamed/hexagonal
60c0e0839de0d616c50ad87d343a3da3ef6433a6
[ "MIT" ]
9
2018-02-14T07:41:40.000Z
2018-03-27T11:24:21.000Z
tests/test_auth.py
ionagamed/hexagonal
60c0e0839de0d616c50ad87d343a3da3ef6433a6
[ "MIT" ]
null
null
null
import pytest from tests.common import call, root_login, get_login_pair from hexagonal.auth import decode_token # decode_token raises an exception when unable to parse # # # def test__root_login__should_not_fail(): # decode_token(root_login()) # # # def test__registering_from_root__should_not_fail(): # token = root_login() # login, password = get_login_pair() # call('auth.register', { # 'login': login, # 'password': password, # 'role': 'student-patron', # 'address': '123', # 'name': 'One Two', # 'card_number': 123, # 'phone': 123 # }, token) # # # def test__just_registered_user__should_be_able_to_login(): # token = root_login() # login, password = get_login_pair() # call('auth.register', { # 'login': login, # 'password': password, # 'role': 'student-patron', # 'address': '123', # 'name': 'One Two', # 'card_number': 123, # 'phone': 123 # }, token) # decode_token(call('auth.login', { # 'login': login, # 'password': password # })) #ToDo #def test__creating_new_document_book_is_correct(): #def test__creating_new_document_journal_is_correct(): #def test_creating_new_document_av_file_is_correct(): #def access_of_librariant_to_adding_new_book_to_system_is_correct(): #def access_of_student_to_booking_system_is_correct(): #def access_of_TA_to_booking_system_is_correct():
26.125
68
0.650034
179
1,463
4.832402
0.346369
0.048555
0.104046
0.090173
0.565318
0.473988
0.413873
0.332948
0.332948
0.332948
0
0.015693
0.215995
1,463
55
69
26.6
0.738448
0.857143
0
0
0
0
0
0
0
0
0
0.018182
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
1
0
1
0
0
5
a9b4bc0810aa6a4c89a015b3a571c29fa97a826c
170
py
Python
my_project/apps/my_app/views.py
wen96/django-boilerplate
6e2b208de5730540ca4def28b296938582b8dac5
[ "MIT" ]
null
null
null
my_project/apps/my_app/views.py
wen96/django-boilerplate
6e2b208de5730540ca4def28b296938582b8dac5
[ "MIT" ]
null
null
null
my_project/apps/my_app/views.py
wen96/django-boilerplate
6e2b208de5730540ca4def28b296938582b8dac5
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.views.generic import View class HomeView(View): def get(self, request): return render(request, 'home.html')
21.25
43
0.729412
23
170
5.391304
0.73913
0.16129
0
0
0
0
0
0
0
0
0
0
0.176471
170
7
44
24.285714
0.885714
0
0
0
0
0
0.052941
0
0
0
0
0
0
1
0.2
false
0
0.4
0.2
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
1
1
0
0
5
a9edf0e974ebe0aba27e58d30254a9ca44fef0f6
202
py
Python
api/context_processors.py
UQ-UQx/uqx_api
54c132ab345fa698be090c3ab4f72c8bd7b42bc3
[ "MIT" ]
3
2015-04-13T14:23:39.000Z
2018-02-13T15:09:30.000Z
api/context_processors.py
UQ-UQx/uqx_api
54c132ab345fa698be090c3ab4f72c8bd7b42bc3
[ "MIT" ]
7
2015-04-20T07:00:09.000Z
2021-12-13T19:45:12.000Z
api/context_processors.py
UQ-UQx/uqx_api
54c132ab345fa698be090c3ab4f72c8bd7b42bc3
[ "MIT" ]
3
2015-03-26T19:29:18.000Z
2016-01-19T23:17:00.000Z
import uqx_api.settings def test_view(request): extra = {} extra['settings_brand'] = uqx_api.settings.BRAND extra['settings_brand_website'] = uqx_api.settings.BRAND_WEBSITE return extra
28.857143
68
0.742574
27
202
5.259259
0.444444
0.366197
0.295775
0.267606
0
0
0
0
0
0
0
0
0.153465
202
7
69
28.857143
0.830409
0
0
0
0
0
0.17734
0.108374
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.5
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e712b95dc49750ca928ac86fa3a26d881979c0e0
15,130
py
Python
deeplift/layers/convolutional.py
jianwang-ntu/deeplift_tf2.0
957511e3e307fdb93f65bf54cc2b5214e5374f49
[ "MIT" ]
null
null
null
deeplift/layers/convolutional.py
jianwang-ntu/deeplift_tf2.0
957511e3e307fdb93f65bf54cc2b5214e5374f49
[ "MIT" ]
null
null
null
deeplift/layers/convolutional.py
jianwang-ntu/deeplift_tf2.0
957511e3e307fdb93f65bf54cc2b5214e5374f49
[ "MIT" ]
null
null
null
from __future__ import division from __future__ import print_function from __future__ import absolute_import from .core import * from .helper_functions import conv1d_transpose_via_conv2d from . import helper_functions as hf import tensorflow as tf PoolMode = deeplift.util.enum(max='max', avg='avg') PaddingMode = deeplift.util.enum(same='SAME', valid='VALID') DataFormat = deeplift.util.enum(channels_first='channels_first', channels_last='channels_last') class Conv(SingleInputMixin, Node): def __init__(self, conv_mxts_mode, **kwargs): self.conv_mxts_mode = conv_mxts_mode super(Conv, self).__init__(**kwargs) class Conv1D(Conv): """ Note: is ACTUALLY a cross-correlation i.e. weights are not 'flipped' """ def __init__(self, kernel, bias, stride, padding, **kwargs): """ The ordering of the dimensions is assumed to be: length, channels Note: this is ACTUALLY a cross-correlation, i.e. the weights are not 'flipped' as for a convolution. This is the tensorflow behaviour. """ super(Conv1D, self).__init__(**kwargs) #kernel has dimensions: #length x inp_channels x num output channels self.kernel = kernel self.bias = bias if (hasattr(stride, '__iter__')): assert len(stride)==1 stride=stride[0] self.stride = stride self.padding = padding def _compute_shape(self, input_shape): #assuming a theano dimension ordering here... shape_to_return = [None] if (input_shape is None or input_shape[1] is None): shape_to_return += [None] else: if (self.padding == PaddingMode.valid): #overhands are excluded shape_to_return.append( 1+int((input_shape[1]-self.kernel.shape[0])/self.stride)) elif (self.padding == PaddingMode.same): shape_to_return.append( int((input_shape[1]+self.stride-1)/self.stride)) else: raise RuntimeError("Please implement shape inference for" " padding mode: "+str(self.padding)) shape_to_return.append(self.kernel.shape[-1]) #num output channels return shape_to_return def _build_activation_vars(self, input_act_vars): conv_without_bias = self._compute_conv_without_bias( input_act_vars, kernel=self.kernel) return conv_without_bias + self.bias[None,None,:] def _build_pos_and_neg_contribs(self): if (self.conv_mxts_mode == ConvMxtsMode.Linear): inp_diff_ref = self._get_input_diff_from_reference_vars() pos_contribs = (self._compute_conv_without_bias( x=inp_diff_ref*hf.gt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.gt_mask(self.kernel,0.0)) +self._compute_conv_without_bias( x=inp_diff_ref*hf.lt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.lt_mask(self.kernel,0.0))) neg_contribs = (self._compute_conv_without_bias( x=inp_diff_ref*hf.lt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.gt_mask(self.kernel,0.0)) +self._compute_conv_without_bias( x=inp_diff_ref*hf.gt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.lt_mask(self.kernel,0.0))) else: raise RuntimeError("Unsupported conv_mxts_mode: "+ self.conv_mxts_mode) return pos_contribs, neg_contribs def _compute_conv_without_bias(self, x, kernel): conv_without_bias = tf.nn.conv1d( input=x, filters=kernel, stride=self.stride, padding=self.padding) return conv_without_bias def _get_mxts_increments_for_inputs(self): pos_mxts = self.get_pos_mxts() neg_mxts = self.get_neg_mxts() inp_diff_ref = self._get_input_diff_from_reference_vars() output_shape = self._get_input_shape() if (self.conv_mxts_mode == ConvMxtsMode.Linear): pos_inp_mask = hf.gt_mask(inp_diff_ref,0.0) neg_inp_mask = hf.lt_mask(inp_diff_ref,0.0) zero_inp_mask = hf.eq_mask(inp_diff_ref,0.0) inp_mxts_increments = pos_inp_mask*( conv1d_transpose_via_conv2d( value=pos_mxts, kernel=self.kernel*(hf.gt_mask(self.kernel,0.0)), tensor_with_output_shape=self.inputs.get_activation_vars(), padding=self.padding, stride=self.stride) +conv1d_transpose_via_conv2d( value=neg_mxts, kernel=self.kernel*(hf.lt_mask(self.kernel,0.0)), tensor_with_output_shape=self.inputs.get_activation_vars(), padding=self.padding, stride=self.stride)) inp_mxts_increments += neg_inp_mask*( conv1d_transpose_via_conv2d( value=pos_mxts, kernel=self.kernel*(hf.lt_mask(self.kernel,0.0)), tensor_with_output_shape=self.inputs.get_activation_vars(), padding=self.padding, stride=self.stride) +conv1d_transpose_via_conv2d( value=neg_mxts, kernel=self.kernel*(hf.gt_mask(self.kernel,0.0)), tensor_with_output_shape=self.inputs.get_activation_vars(), padding=self.padding, stride=self.stride)) inp_mxts_increments += zero_inp_mask*( conv1d_transpose_via_conv2d( value=0.5*(neg_mxts+pos_mxts), kernel=self.kernel, tensor_with_output_shape=self.inputs.get_activation_vars(), padding=self.padding, stride=self.stride)) pos_mxts_increments = inp_mxts_increments neg_mxts_increments = inp_mxts_increments else: raise RuntimeError("Unsupported conv mxts mode: " +str(self.conv_mxts_mode)) return pos_mxts_increments, neg_mxts_increments class Conv2D(Conv): """ Note: is ACTUALLY a cross-correlation i.e. weights are not 'flipped' """ def __init__(self, kernel, bias, strides, padding, data_format, **kwargs): """ Note: this is ACTUALLY a cross-correlation, i.e. the weights are not 'flipped' as for a convolution. This is the tensorflow behaviour. """ super(Conv2D, self).__init__(**kwargs) #kernel has dimensions: #rows_kern_width x cols_kern_width x inp_channels x num output channels self.kernel = kernel self.bias = bias self.strides = strides self.padding = padding self.data_format = data_format if (data_format not in ['channels_last', 'channels_first']): raise NotImplementedError(data_format+" data format" +" not implemented") def _compute_shape(self, input_shape): if (self.data_format == DataFormat.channels_first): input_shape = [input_shape[0], input_shape[2], input_shape[3], input_shape[1]] #assuming channels_last dimension ordering here shape_to_return = [None] if (input_shape is None): shape_to_return += [None, None] else: if (self.padding == PaddingMode.valid): for (dim_inp_len, dim_kern_width, dim_stride) in\ zip(input_shape[1:3], self.kernel.shape[:2], self.strides): #overhangs are excluded shape_to_return.append( 1+int((dim_inp_len-dim_kern_width)/dim_stride)) elif (self.padding == PaddingMode.same): for (dim_inp_len, dim_kern_width, dim_stride) in\ zip(input_shape[1:3], self.kernel.shape[:2], self.strides): shape_to_return.append( int((dim_inp_len+dim_stride-1)/dim_stride)) else: raise RuntimeError("Please implement shape inference for" " border mode: "+str(self.padding)) shape_to_return.append(self.kernel.shape[-1]) #num output channels if (self.data_format == DataFormat.channels_first): shape_to_return = [shape_to_return[0], shape_to_return[3], shape_to_return[1], shape_to_return[2]] return shape_to_return def _build_activation_vars(self, input_act_vars): if (self.data_format == DataFormat.channels_first): input_act_vars = tf.transpose(a=input_act_vars, perm=[0,2,3,1]) conv_without_bias = self._compute_conv_without_bias( x=input_act_vars, kernel=self.kernel) to_return = conv_without_bias + self.bias[None,None,None,:] if (self.data_format == DataFormat.channels_first): to_return = tf.transpose(a=to_return, perm=[0,3,1,2]) return to_return def _build_pos_and_neg_contribs(self): if (self.conv_mxts_mode == ConvMxtsMode.Linear): inp_diff_ref = self._get_input_diff_from_reference_vars() if (self.data_format == DataFormat.channels_first): inp_diff_ref = tf.transpose(a=inp_diff_ref, perm=[0,2,3,1]) pos_contribs = (self._compute_conv_without_bias( x=inp_diff_ref*hf.gt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.gt_mask(self.kernel,0.0)) +self._compute_conv_without_bias( x=inp_diff_ref*hf.lt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.lt_mask(self.kernel,0.0))) neg_contribs = (self._compute_conv_without_bias( x=inp_diff_ref*hf.lt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.gt_mask(self.kernel,0.0)) +self._compute_conv_without_bias( x=inp_diff_ref*hf.gt_mask(inp_diff_ref,0.0), kernel=self.kernel*hf.lt_mask(self.kernel,0.0))) else: raise RuntimeError("Unsupported conv_mxts_mode: "+ self.conv_mxts_mode) if (self.data_format == DataFormat.channels_first): pos_contribs = tf.transpose(a=pos_contribs, perm=[0,3,1,2]) neg_contribs = tf.transpose(a=neg_contribs, perm=[0,3,1,2]) return pos_contribs, neg_contribs def _compute_conv_without_bias(self, x, kernel): conv_without_bias = tf.nn.conv2d( input=x, filters=kernel, strides=[1]+list(self.strides)+[1], padding=self.padding) return conv_without_bias def _get_mxts_increments_for_inputs(self): pos_mxts = self.get_pos_mxts() neg_mxts = self.get_neg_mxts() inp_diff_ref = self._get_input_diff_from_reference_vars() inp_act_vars = self.inputs.get_activation_vars() strides_to_supply = [1]+list(self.strides)+[1] if (self.data_format == DataFormat.channels_first): pos_mxts = tf.transpose(a=pos_mxts, perm=(0,2,3,1)) neg_mxts = tf.transpose(a=neg_mxts, perm=(0,2,3,1)) inp_diff_ref = tf.transpose(a=inp_diff_ref, perm=(0,2,3,1)) inp_act_vars = tf.transpose(a=inp_act_vars, perm=(0,2,3,1)) output_shape = tf.shape(input=inp_act_vars) if (self.conv_mxts_mode == ConvMxtsMode.Linear): pos_inp_mask = hf.gt_mask(inp_diff_ref,0.0) neg_inp_mask = hf.lt_mask(inp_diff_ref,0.0) zero_inp_mask = hf.eq_mask(inp_diff_ref, 0.0) inp_mxts_increments = pos_inp_mask*( tf.nn.conv2d_transpose( input=pos_mxts, filters=self.kernel*hf.gt_mask(self.kernel, 0.0), output_shape=output_shape, padding=self.padding, strides=strides_to_supply ) +tf.nn.conv2d_transpose( input=neg_mxts, filters=self.kernel*hf.lt_mask(self.kernel, 0.0), output_shape=output_shape, padding=self.padding, strides=strides_to_supply )) inp_mxts_increments += neg_inp_mask*( tf.nn.conv2d_transpose( input=pos_mxts, filters=self.kernel*hf.lt_mask(self.kernel, 0.0), output_shape=output_shape, padding=self.padding, strides=strides_to_supply ) +tf.nn.conv2d_transpose( input=neg_mxts, filters=self.kernel*hf.gt_mask(self.kernel, 0.0), output_shape=output_shape, padding=self.padding, strides=strides_to_supply )) inp_mxts_increments += zero_inp_mask*tf.nn.conv2d_transpose( input=0.5*(pos_mxts+neg_mxts), filters=self.kernel, output_shape=output_shape, padding=self.padding, strides=strides_to_supply) pos_mxts_increments = inp_mxts_increments neg_mxts_increments = inp_mxts_increments else: raise RuntimeError("Unsupported conv mxts mode: " +str(self.conv_mxts_mode)) if (self.data_format == DataFormat.channels_first): pos_mxts_increments = tf.transpose(a=pos_mxts_increments, perm=(0,3,1,2)) neg_mxts_increments = tf.transpose(a=neg_mxts_increments, perm=(0,3,1,2)) return pos_mxts_increments, neg_mxts_increments
46.269113
79
0.548579
1,746
15,130
4.416953
0.087056
0.058351
0.0389
0.03112
0.82028
0.779305
0.731587
0.682184
0.63278
0.610996
0
0.016705
0.362987
15,130
326
80
46.411043
0.783461
0.054131
0
0.710526
0
0
0.022475
0
0
0
0
0
0.003759
1
0.048872
false
0
0.026316
0
0.12406
0.003759
0
0
0
null
0
0
0
1
1
1
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e727503520b94740decb311593c92fabee4dcda6
36
py
Python
static/data/api_key.py
KholiswaT/Satellite-Exploration
3aa828318457e66a5dd98cc816de09e166307560
[ "MIT" ]
1
2021-05-27T07:10:14.000Z
2021-05-27T07:10:14.000Z
static/data/api_key.py
KholiswaT/Satellite-Exploration
3aa828318457e66a5dd98cc816de09e166307560
[ "MIT" ]
null
null
null
static/data/api_key.py
KholiswaT/Satellite-Exploration
3aa828318457e66a5dd98cc816de09e166307560
[ "MIT" ]
1
2020-11-24T03:10:05.000Z
2020-11-24T03:10:05.000Z
API_Key= "RSNS74-FAEMYM-S26ZDF-4LIZ"
36
36
0.805556
6
36
4.666667
1
0
0
0
0
0
0
0
0
0
0
0.142857
0.027778
36
1
36
36
0.657143
0
0
0
0
0
0.675676
0.675676
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
e764e53d402e8d6e2db6395c2ddfe1d2e3ff11e9
95
py
Python
jpn_bpe_tokenizer/__init__.py
stsuchi/Japanese-BPE-Tokenizer
a7070913dcae5a84f87fb52362fb5ed7e8a55813
[ "MIT" ]
null
null
null
jpn_bpe_tokenizer/__init__.py
stsuchi/Japanese-BPE-Tokenizer
a7070913dcae5a84f87fb52362fb5ed7e8a55813
[ "MIT" ]
null
null
null
jpn_bpe_tokenizer/__init__.py
stsuchi/Japanese-BPE-Tokenizer
a7070913dcae5a84f87fb52362fb5ed7e8a55813
[ "MIT" ]
null
null
null
from .mecab_bpe_tokenizer import MecabBPETokenizer from .trainer import MecabBPETrainTokenizer
31.666667
50
0.894737
10
95
8.3
0.8
0
0
0
0
0
0
0
0
0
0
0
0.084211
95
2
51
47.5
0.954023
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e7a0f23a8beea3d75beb72809aa98f715c7e3f7b
50
py
Python
backend/accounts/signals/__init__.py
LloydTao/tickexe
a0262b4c8f11fdf57b8284d2a6b80dd2a3ad90ff
[ "MIT" ]
null
null
null
backend/accounts/signals/__init__.py
LloydTao/tickexe
a0262b4c8f11fdf57b8284d2a6b80dd2a3ad90ff
[ "MIT" ]
2
2021-10-15T19:28:59.000Z
2021-10-15T19:52:00.000Z
backend/accounts/signals/__init__.py
LloydTao/tickexe
a0262b4c8f11fdf57b8284d2a6b80dd2a3ad90ff
[ "MIT" ]
null
null
null
from .profile import create_profile, save_profile
25
49
0.86
7
50
5.857143
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.1
50
1
50
50
0.911111
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
e7d6dda3573072373b2c0ea60c3ecbe2d3d080ea
39
py
Python
intro/part01-01_emoticon/src/emoticon.py
Hannah-Abi/python-pro-21
2ce32c4bf118054329d19afdf83c50561be1ada8
[ "MIT" ]
null
null
null
intro/part01-01_emoticon/src/emoticon.py
Hannah-Abi/python-pro-21
2ce32c4bf118054329d19afdf83c50561be1ada8
[ "MIT" ]
null
null
null
intro/part01-01_emoticon/src/emoticon.py
Hannah-Abi/python-pro-21
2ce32c4bf118054329d19afdf83c50561be1ada8
[ "MIT" ]
null
null
null
# Write your solution here print(":-)")
19.5
26
0.666667
5
39
5.2
1
0
0
0
0
0
0
0
0
0
0
0
0.128205
39
2
27
19.5
0.764706
0.615385
0
0
0
0
0.214286
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
99dc7ee1d93ec3ee363347a8ca1f50b6c77f6159
15,240
py
Python
iotfunctions/ui.py
sedgewickmm18/functions
69d04a67b122601c4f207ded8e872d31b2ddafc8
[ "Apache-2.0" ]
null
null
null
iotfunctions/ui.py
sedgewickmm18/functions
69d04a67b122601c4f207ded8e872d31b2ddafc8
[ "Apache-2.0" ]
null
null
null
iotfunctions/ui.py
sedgewickmm18/functions
69d04a67b122601c4f207ded8e872d31b2ddafc8
[ "Apache-2.0" ]
null
null
null
# ***************************************************************************** # © Copyright IBM Corp. 2018. All Rights Reserved. # # This program and the accompanying materials # are made available under the terms of the Apache V2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 # # ***************************************************************************** import logging import datetime as dt logger = logging.getLogger(__name__) class BaseUIControl(object): def convert_datatype(self,from_datatype): conversions = {bool: 'BOOLEAN', str: 'LITERAL', float: 'NUMBER', int: 'NUMBER', dict: 'JSON', dt.datetime: 'TIMESTAMP', None: None } try: return conversions[from_datatype] except KeyError: msg = 'couldnt convert type %s ' %from_datatype raise TypeError(msg) class UIFunctionOutSingle(BaseUIControl): ''' Single output item Parameters ----------- name : str Name of function argument datatype: python datatype object Used to validate UI input. e.g. str, float, dt.datetime, bool description: str Help text to display in UI tags: list of strs Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT'] ''' def __init__(self,name, datatype=None, description=None, tags = None): self.name = name self.datatype = datatype if description is None: description = 'Choose an item name for the function output' self.description = description if tags is None: tags = [] self.tags = tags def to_metadata(self): meta = { 'name' : self.name, 'dataType' : self.convert_datatype(self.datatype), 'description' : self.description, 'tags' : self.tags } return meta class UIFunctionOutMulti(BaseUIControl): ''' Array of multiple outputs Parameters ----------- name : str Name of function argument cardinality_from: str Name of input argument that defines the number of items to expect from this array output. Specify an array input. is_datatype_derived: bool Specify true when the output datatypes are the same as the datatypes of the input array that drives this output array. datatype: python datatype object Used to validate UI input. e.g. str, float, dt.datetime, bool description: str Help text to display in UI tags: list of strs Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT'] ''' def __init__(self,name, cardinality_from, is_datatype_derived = False, datatype = None, description=None, tags = None, output_item = None): self.name = name self.cardinality_from = cardinality_from self.is_datatype_derived = is_datatype_derived if description is None: description = 'Provide names and datatypes for output items' self.description = description if datatype is not None: datatype = self.convert_datatype(datatype) self.datatype = datatype if tags is None: tags = [] self.tags = tags def to_metadata(self): if not self.datatype is None: datatype = [self.datatype] else: datatype= None meta = { 'name' : self.name, 'cardinalityFrom' : self.cardinality_from, 'dataTypeForArray' : datatype, 'description' : self.description, 'tags' : self.tags, 'jsonSchema' : { "$schema" : "http://json-schema.org/draft-07/schema#", "type" : "array", "items" : {"type": "string"} } } if self.is_datatype_derived: meta['dataTypeFrom'] = self.cardinality_from return meta class UISingleItem(BaseUIControl): ''' Choose a single item as a function argument Parameters ----------- name : str Name of function argument datatype: python datatype object Used to validate UI input. e.g. str, float, dt.datetime, bool required: bool Specify True when this argument is mandatory description: str Help text to display in UI tags: list of strs Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT'] ''' def __init__(self,name, datatype=None, description=None, required = True, tags = None): self.name = name self.datatype = datatype self.required = required if description is None: description = 'Choose one or more data item to use as a function input' self.description = description if tags is None: tags = [] self.tags = tags def to_metadata(self): if self.datatype is None: datatype = None else: datatype = [self.convert_datatype(self.datatype)] meta = { 'name' : self.name, 'type' : 'DATA_ITEM' , 'dataType' : datatype, 'required' : self.required, 'description' : self.description, 'tags' : self.tags } return meta class UIMultiItem(BaseUIControl): ''' Multi-select list of data items Parameters ----------- name : str Name of function argument datatype: python datatype object Used to validate UI input. e.g. str, float, dt.datetime, bool required: bool Specify True when this argument is mandatory min_items: int The minimum number of items that must be selected max_items: int The maximum number of items that can be selected description: str Help text to display in UI tags: list of strs Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT'] ''' def __init__(self,name, datatype=None, description=None, required = True, min_items = None, max_items = None, tags = None, output_item = None, is_output_datatype_derived = False, output_datatype = None ): self.name = name self.datatype = datatype self.required = required if description is None: description = 'Choose one or more data item to use as a function input' self.description = description if min_items is None: if self.required: min_items = 1 else: min_items = 0 self.min_items = min_items self.max_items = max_items if tags is None: tags = [] self.tags = tags #the following metadata is optional #used to create an array output for this input self.output_item = output_item self.is_output_datatype_derived = is_output_datatype_derived self.output_datatype = output_datatype def to_metadata(self): if self.datatype is None: datatype = None else: datatype = [self.convert_datatype(self.datatype)] meta = { 'name' : self.name, 'type' : 'DATA_ITEM' , 'dataType' : 'ARRAY', 'dataTypeForArray' : datatype, 'required' : self.required, 'description' : self.description, 'tags' : self.tags, 'jsonSchema' : { "$schema" : "http://json-schema.org/draft-07/schema#", "type" : "array", "minItems" : self.min_items, "maxItems" : self.max_items, "items" : {"type": "string"} } } return meta def to_output_metadata(self): if self.output_item is not None: if not self.output_datatype is None: datatype = [self.convert_datatype(self.output_datatype)] else: datatype= None meta = { 'name' : self.output_item, 'cardinalityFrom' : self.name, 'dataTypeForArray' : datatype, 'description' : self.description, 'tags' : self.tags, 'jsonSchema' : { "$schema" : "http://json-schema.org/draft-07/schema#", "type" : "array", "items" : {"type": "string"} } } if self.is_output_datatype_derived: meta['dataTypeFrom'] = self.name return meta else: return None class UIMulti(BaseUIControl): ''' Multi-select list of constants Parameters ----------- name : str Name of function argument datatype: python datatype object Used to validate UI input. e.g. str, float, dt.datetime, bool required: bool Specify True when this argument is mandatory min_items: int The minimum number of values that must be entered/selected max_items: int The maximum number of values that can be entered/selected description: str Help text to display in UI tags: list of strs Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT'] values: list Values to display in UI picklist ''' def __init__(self,name, datatype, description=None, required = True, min_items = None, max_items = None, tags = None, values = None, output_item = None, is_output_datatype_derived = False, output_datatype = None): self.name = name self.datatype = datatype self.required = required if description is None: description = 'Enter a list of comma separated values' self.description = description if min_items is None: if self.required: min_items = 1 else: min_items = 0 self.min_items = min_items self.max_items = max_items if tags is None: tags = [] self.tags = tags self.values = values #the following metadata is optional #used to create an array output for this input self.output_item = output_item self.is_output_datatype_derived = is_output_datatype_derived self.output_datatype = output_datatype def to_metadata(self): if self.datatype is None: msg = 'Datatype is required for multi constant array input %s' %self.name raise ValueError(msg) else: datatype = [self.convert_datatype(self.datatype)] meta = { 'name' : self.name, 'type' : 'CONSTANT' , 'dataType' : 'ARRAY', 'dataTypeForArray' : datatype, 'required' : self.required, 'description' : self.description, 'tags' : self.tags, 'values' : self.values, 'jsonSchema' : { "$schema" : "http://json-schema.org/draft-07/schema#", "type" : "array", "minItems" : self.min_items, "maxItems" : self.max_items, "items" : {"type": "string"} } } return meta def to_output_metadata(self): if self.output_item is not None: if self.output_datatype is not None: datatype = [self.convert_datatype(self.output_datatype)] else: datatype= None meta = { 'name' : self.output_item, 'cardinalityFrom' : self.name, 'dataTypeForArray' : datatype, 'description' : self.description, 'tags' : self.tags, 'jsonSchema' : { "$schema" : "http://json-schema.org/draft-07/schema#", "type" : "array", "items" : {"type": "string"} } } if self.is_output_datatype_derived: meta['dataTypeFrom'] = self.name return meta else: return None class UISingle(BaseUIControl): ''' Single valued constant Parameters ----------- name : str Name of function argument datatype: python datatype object Used to validate UI input. e.g. str, float, dt.datetime, bool required: bool Specify True when this argument is mandatory description: str Help text to display in UI tags: list of strs Optional tags, e.g. ['DIMENSION', 'EVENT', 'ALERT'] values: list Values to display in UI picklist ''' def __init__(self,name, datatype=None, description=None, tags = None, required = True, values = None, default = None): self.name = name self.datatype = datatype if description is None: description = 'Enter a constant value' self.description = description if tags is None: tags = [] self.tags = tags self.required = required self.values = values self.default = default def to_metadata(self): meta = { 'name' : self.name, 'type' : 'CONSTANT', 'dataType' : self.convert_datatype(self.datatype), 'description' : self.description, 'tags' : self.tags, 'required' : self.required, 'values' : self.values } if self.default is not None: if isinstance(self.default,dict): meta['value'] = self.default else: meta['value'] = {'value':self.default} return meta
33.791574
126
0.498163
1,456
15,240
5.119505
0.120879
0.024685
0.022538
0.013952
0.766702
0.74242
0.735042
0.720687
0.696002
0.690904
0
0.002452
0.41122
15,240
451
127
33.791574
0.828151
0.224344
0
0.716364
0
0
0.11251
0
0
0
0
0
0
1
0.054545
false
0
0.007273
0
0.127273
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
99ea14d5a26acfce667a74609393041dacbcf24b
46
py
Python
crawlib2/tests/dummy_site/music/controller/__init__.py
MacHu-GWU/crawlib2-project
618d72522d5b36d40607b53b7de7623976460712
[ "MIT" ]
1
2020-06-19T09:45:20.000Z
2020-06-19T09:45:20.000Z
crawlib2/tests/dummy_site/music/controller/__init__.py
MacHu-GWU/crawlib2-project
618d72522d5b36d40607b53b7de7623976460712
[ "MIT" ]
1
2019-12-27T18:41:21.000Z
2019-12-27T18:41:21.000Z
crawlib2/tests/dummy_site/music/controller/__init__.py
MacHu-GWU/crawlib2-project
618d72522d5b36d40607b53b7de7623976460712
[ "MIT" ]
1
2021-04-14T22:56:34.000Z
2021-04-14T22:56:34.000Z
# -*- coding: utf-8 -*- from .view import bp
11.5
23
0.565217
7
46
3.714286
1
0
0
0
0
0
0
0
0
0
0
0.027778
0.217391
46
3
24
15.333333
0.694444
0.456522
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
99fcfe3053da0dac86ac5e602bc9ca4a2b36e21a
60
py
Python
tests.py
gadse/mite-reader
a4ee10e2029708c05c104499c0e85049a0e7b10d
[ "MIT" ]
null
null
null
tests.py
gadse/mite-reader
a4ee10e2029708c05c104499c0e85049a0e7b10d
[ "MIT" ]
null
null
null
tests.py
gadse/mite-reader
a4ee10e2029708c05c104499c0e85049a0e7b10d
[ "MIT" ]
null
null
null
# TODO: Is there an example mite account we can access here?
60
60
0.766667
11
60
4.181818
1
0
0
0
0
0
0
0
0
0
0
0
0.183333
60
1
60
60
0.938776
0.966667
0
null
0
null
0
0
null
0
0
1
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
0
0
0
0
0
5
413e91172426529e257639e2d367bfa5d9c39252
293
py
Python
src/songbook/console/_remove.py
kipyin/-
5d372c7d987e6a1da380197c1b990def0d240298
[ "MIT" ]
1
2021-01-03T10:40:28.000Z
2021-01-03T10:40:28.000Z
src/songbook/console/_remove.py
kipyin/-
5d372c7d987e6a1da380197c1b990def0d240298
[ "MIT" ]
null
null
null
src/songbook/console/_remove.py
kipyin/-
5d372c7d987e6a1da380197c1b990def0d240298
[ "MIT" ]
1
2021-01-03T10:40:29.000Z
2021-01-03T10:40:29.000Z
import click @click.group() def remove(): pass @remove.command("song") def _remove_song(): pass @remove.command("arrangement") def _remove_arrangement(): pass @remove.command("worship") def _remove_worship(): pass @remove.command("hymn") def _remove_hymn(): pass
10.851852
30
0.675768
35
293
5.428571
0.314286
0.236842
0.357895
0
0
0
0
0
0
0
0
0
0.177474
293
26
31
11.269231
0.788382
0
0
0.3125
0
0
0.088737
0
0
0
0
0
0
1
0.3125
true
0.3125
0.0625
0
0.375
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
5
4143d0c8dab45ffb0f2d7668cc74f652d1aa68ad
3,185
py
Python
droxi/drox/omcdbase/transc1/models.py
andydude/droxtools
d608ceb715908fb00398c0d28eee74286fef3750
[ "MIT" ]
null
null
null
droxi/drox/omcdbase/transc1/models.py
andydude/droxtools
d608ceb715908fb00398c0d28eee74286fef3750
[ "MIT" ]
null
null
null
droxi/drox/omcdbase/transc1/models.py
andydude/droxtools
d608ceb715908fb00398c0d28eee74286fef3750
[ "MIT" ]
null
null
null
''' Created on Mar 31, 2014 @author: ajr ''' from ...models import Number from ..models import OMSym import math # Inverse Trig @OMSym.called("transc1", "arccos") class ArcCos(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arccosh") class ArcCosh(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arccot") class ArcCot(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arccoth") class ArcCoth(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arccsc") class ArcCsc(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arccsch") class ArcCsch(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arcsec") class ArcSec(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arcsech") class ArcSech(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arcsin") class ArcSin(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arcsinh") class ArcSinh(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arctan") class ArcTan(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "arctanh") class ArcTanh(OMSym): def __call__(self, arg): return None # Forward Trig @OMSym.called("transc1", "cos") class Cos(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "cosh") class Cosh(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "cot") class Cot(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "coth") class Coth(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "csc") class Csc(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "csch") class Csch(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "sec") class Sec(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "sech") class Sech(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "sin") class Sin(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "sinh") class Sinh(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "tan") class Tan(OMSym): def __call__(self, arg): return None @OMSym.called("transc1", "tanh") class Tanh(OMSym): def __call__(self, arg): return None # Basic @OMSym.called("transc1", "exp") class Exp(OMSym): def __call__(self, arg): cls = type(arg) result = math.exp(arg.num) return cls(result) @OMSym.called("transc1", "ln") class Ln(OMSym): def __call__(self, arg): cls = type(arg) result = math.log(arg.num) return cls(result) @OMSym.called("transc1", "Log") class Log(OMSym): def __call__(self, base, arg): cls = Number.result_type(base, arg) result = math.log(arg.num, base.num) return cls(result)
20.158228
44
0.629513
406
3,185
4.669951
0.142857
0.156646
0.256329
0.227848
0.671414
0.671414
0.658228
0.627637
0.586498
0.586498
0
0.013258
0.218524
3,185
157
45
20.286624
0.748493
0.021978
0
0.470085
0
0
0.102126
0
0
0
0
0
0
1
0.230769
false
0
0.025641
0.205128
0.717949
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
4182225e976063680c2fe2b96ea88ba059cacec6
159
py
Python
openCV/OCR.py
untrobotics/IEEE-2019-R5
799bffc95b7be1c939d1ad1858b10faabb3cc842
[ "MIT" ]
null
null
null
openCV/OCR.py
untrobotics/IEEE-2019-R5
799bffc95b7be1c939d1ad1858b10faabb3cc842
[ "MIT" ]
6
2019-03-06T01:10:24.000Z
2020-06-17T05:04:43.000Z
openCV/OCR.py
untrobotics/IEEE-2019-R5
799bffc95b7be1c939d1ad1858b10faabb3cc842
[ "MIT" ]
3
2019-03-01T05:11:39.000Z
2019-11-22T15:01:02.000Z
try: from PIL import Image except ImportError: import Image import pytesseract print(pytesseract.image_to_string(Image.open('testocr.png')))
17.666667
61
0.72956
20
159
5.7
0.7
0.192982
0
0
0
0
0
0
0
0
0
0
0.188679
159
8
62
19.875
0.883721
0
0
0
0
0
0.06962
0
0
0
0
0
0
1
0
true
0
0.666667
0
0.666667
0.166667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
41902a781a0d8c79f7b55ae174b5f138ba9c49dc
5,816
py
Python
pp/components/manhattan_font.py
smartalecH/gdsfactory
66dfbf740704f1a6155f4812a1d9483ccf5c116c
[ "MIT" ]
16
2020-02-03T07:05:31.000Z
2021-12-29T18:40:09.000Z
pp/components/manhattan_font.py
smartalecH/gdsfactory
66dfbf740704f1a6155f4812a1d9483ccf5c116c
[ "MIT" ]
2
2020-01-31T20:01:40.000Z
2020-09-26T17:50:55.000Z
pp/components/manhattan_font.py
smartalecH/gdsfactory
66dfbf740704f1a6155f4812a1d9483ccf5c116c
[ "MIT" ]
7
2020-02-09T23:16:18.000Z
2020-10-30T03:12:04.000Z
import numpy as np from omegaconf.listconfig import ListConfig from pp.component import Component from typing import List, Tuple import pp from pp.layers import LAYER from pp.name import clean_name """ A pixel based font, guaranteed to be manhattan, without accute angles """ def manhattan_text( text: str = "abcd", size: float = 10, position: Tuple[int, int] = (0, 0), justify: str = "left", layer: ListConfig = LAYER.M1, layers_cladding: List[ListConfig] = [], cladding_offset: int = 3, ) -> Component: """ .. plot:: :include-source: import pp c = pp.c.text(text="abcd", size=10, position=(0, 0), justify="left", layer=1) pp.plotgds(c) """ pixel_size = size xoffset = position[0] yoffset = position[1] t = pp.Component( name=clean_name(text) + "_{}_{}".format(int(position[0]), int(position[1])) ) for i, line in enumerate(text.split("\n")): component = pp.Component(name=t.name + "{}".format(i)) for c in line: try: if c not in CHARAC_MAP: c = c.upper() pixels = CHARAC_MAP[c] except: print( "character {} could not be written (probably not part of dictionnary)".format( c ) ) continue _c = component.add_ref( pixel_array(pixels=pixels, pixel_size=pixel_size, layer=layer) ) _c.move((xoffset, yoffset)) component.absorb(_c) xoffset += pixel_size * 6 t.add_ref(component) yoffset -= pixel_size * 6 xoffset = position[0] justify = justify.lower() for ref in t.references: if justify == "left": pass if justify == "right": ref.xmax = position[0] if justify == "center": ref.move(origin=ref.center, destination=position, axis="x") points = [ [t.xmin - cladding_offset / 2, t.ymin - cladding_offset], [t.xmax + cladding_offset / 2, t.ymin - cladding_offset], [t.xmax + cladding_offset / 2, t.ymax + cladding_offset], [t.xmin - cladding_offset / 2, t.ymax + cladding_offset], ] for layer in layers_cladding: t.add_polygon(points, layer=layer) return t @pp.autoname def pixel_array( pixels: str = """ XXX X X XXXXX X X X X """, pixel_size: float = 10.0, layer: ListConfig = LAYER.M1, ) -> Component: component = pp.Component() lines = [line for line in pixels.split("\n") if len(line) > 0] lines.reverse() j = 0 i = 0 i_max = 0 a = pixel_size for line in lines: i = 0 for c in line: if c in ["X", "1"]: p0 = np.array([i * a, j * a]) pixel = [p0 + p for p in [(0, 0), (a, 0), (a, a), (0, a)]] component.add_polygon(pixel, layer=layer) i += 1 i_max = max(i_max, i) j += 1 return component FONT = """\ A 1 1 1 1 1 1 0 0 0 1 1 1 1 0 1 1 0 0 0 1 1 0 0 0 1 B 1 1 1 1 1 1 0 0 0 1 1 0 1 1 1 1 0 0 0 1 1 0 1 1 1 C 1 1 1 1 1 1 0 0 0 1 1 0 0 0 0 1 0 0 0 1 1 1 1 1 1 D 1 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 1 0 1 1 1 E 1 1 1 1 1 1 0 0 0 0 1 0 1 1 0 1 0 0 0 0 1 0 1 1 1 F 1 1 1 1 1 1 0 0 0 0 1 0 1 1 0 1 0 0 0 0 1 0 0 0 0 G 1 1 1 1 1 1 0 0 0 0 1 0 1 1 1 1 0 0 0 1 1 1 1 1 1 H 1 0 0 0 1 1 0 0 0 1 1 1 1 1 1 1 0 0 0 1 1 0 0 0 1 I 1 1 1 1 1 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 1 1 1 1 1 J 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 1 0 0 0 1 1 1 1 1 1 K 1 0 0 0 1 1 0 0 1 1 1 1 1 1 0 1 0 0 1 1 1 0 0 0 1 L 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 1 1 1 1 M 1 1 0 1 1 1 1 1 1 1 1 0 1 0 1 1 0 0 0 1 1 0 0 0 1 N 1 1 1 0 1 1 0 1 0 1 1 0 1 1 1 1 0 0 1 1 1 0 0 0 1 O 1 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 1 1 0 1 1 P 1 1 1 1 1 1 0 0 0 1 1 0 1 1 1 1 0 0 0 0 1 0 0 0 0 Q 1 1 1 1 0 1 0 0 1 0 1 0 0 1 0 1 0 0 1 0 1 1 1 1 1 R 1 1 1 1 0 1 0 0 1 0 1 0 1 1 1 1 0 0 0 1 1 0 0 0 1 S 1 1 1 1 1 1 1 0 0 0 0 1 1 1 0 0 0 0 1 1 1 1 1 1 1 T 1 1 1 1 1 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 U 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 1 1 1 1 1 V 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 1 0 0 W 1 0 0 0 1 1 0 0 0 1 1 0 1 0 1 1 1 1 1 1 1 1 0 1 1 X 1 1 0 1 1 1 1 1 1 1 0 1 1 1 0 1 1 1 1 1 1 1 0 1 1 Y 1 0 0 0 1 1 0 0 0 1 1 1 1 1 1 0 0 1 0 0 0 0 1 0 0 Z 1 1 1 1 1 0 0 0 1 1 0 0 1 1 0 0 1 1 0 0 1 1 1 1 1 1 0 1 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 1 1 1 0 2 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 3 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 4 1 0 0 1 1 1 0 0 1 1 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 5 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 6 1 1 1 1 1 1 0 0 0 0 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 7 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 1 0 0 0 1 1 8 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 9 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 0 0 0 1 1 0 0 0 1 1 0 0 1 1 1 1 0 1 0 0 1 0 1 0 0 1 0 1 0 0 1 0 1 1 1 1 + 0 0 0 0 0 0 0 1 0 0 0 1 1 1 0 0 0 1 0 0 0 0 0 0 0 - 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 _ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 """ CHARAC_MAP = {} def load_font() -> None: lines = FONT.split("\n") global CHARAC_MAP while lines: line = lines.pop(0) if not line: break charac = line[0] pixels = "" for i in range(5): pixels += lines.pop(0).replace("\t", "").replace(" ", "") + "\n" CHARAC_MAP[charac] = pixels load_font() if __name__ == "__main__": c = manhattan_text( text="The mask is nearly done. only 12345 drc errors remaining", layers_cladding=[(33, 44)], ) pp.show(c)
14.686869
98
0.51685
1,524
5,816
1.937664
0.101706
0.24382
0.244836
0.22892
0.40061
0.40061
0.397223
0.374534
0.369116
0.369116
0
0.312482
0.400791
5,816
395
99
14.724051
0.534864
0.023728
0
0.601671
0
0
0.424645
0
0
0
0
0
0
1
0.008357
false
0.002786
0.019499
0
0.033426
0.002786
0
0
1
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
41b18c16b10395f112ebbec8aed3cb28a64d70bf
50,977
py
Python
mmhelper/tests/test_detection.py
jmetz/momanalysis
8d71490c99127568b184784890258e9a6ef876ef
[ "MIT" ]
null
null
null
mmhelper/tests/test_detection.py
jmetz/momanalysis
8d71490c99127568b184784890258e9a6ef876ef
[ "MIT" ]
3
2019-07-25T13:43:15.000Z
2019-11-04T12:39:22.000Z
mmhelper/tests/test_detection.py
jmetz/momanalysis
8d71490c99127568b184784890258e9a6ef876ef
[ "MIT" ]
1
2021-03-28T03:00:21.000Z
2021-03-28T03:00:21.000Z
# -*- coding: utf-8 -*- """ Created on Fri Nov 1 11:35 2016 @author: as624 """ import unittest from mmhelper.detection.bacteria import detect_bacteria_in_all_wells as detbac from mmhelper.comparison import match_labels, determine_precision_recall import mmhelper.detection.bacteria as mdet import mmhelper.detection.wells as mdet_wells import numpy as np import skimage.measure as skmeas class TestSubtractBackground(unittest.TestCase): """ class for testing background subtraction """ def setUp(self): self.sz0 = (100, 100) self.ground_truth = np.zeros(self.sz0) self.ground_truth_level = 400 # Add some objects self.ground_truth[10:20, 50:60] = self.ground_truth_level # Noisy background self.bg_std = 10 self.bg_offset = 100 self.bg_grad_max = 100 self.bkg = self.bg_std * np.random.randn(*self.sz0) + self.bg_offset # Add a constant gradient x0_ = np.meshgrid(np.arange(self.sz0[0]), np.arange(self.sz0[1]))[0] self.bkg += self.bg_grad_max * x0_ / x0_.max() self.image = {0: self.ground_truth + self.bkg} def test_subtract_background(self): """ Tests background subtraction """ removed = mdet_wells.remove_background(self.image, light_background=False) # For our current workflow, the background-removed images are inverted removed = -removed[0] #import matplotlib.pyplot as plt # plt.imshow(removed) # plt.colorbar() # plt.show() # Make sure the background is all relatively low now # NOTE: As background is subtracted, need to go double above reasonable # statisitcally realy unlikely values of ~4 sigma from normal # distribution self.assertTrue( np.all(removed[self.ground_truth == 0] < 8 * self.bg_std)) # Make sure the foreground is about right self.assertTrue( np.all( np.abs( removed[self.ground_truth > 0] - self.ground_truth_level) < 8 * self.bg_std)) class DetectBacteria(unittest.TestCase): """ Unittests for detecting bacteria """ def setUp(self): self.lbl1 = {1: np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 211, 1, 1, 1, 1, 1, 1, 1], [1, 211, 211, 211, 1, 1, 1, 1, 1, 1], [1, 211, 211, 211, 1, 1, 1, 1, 1, 1], [1, 1, 211, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])} self.lbl2 = {2: np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 211, 211, 1, 1, 1], [1, 1, 1, 1, 211, 211, 211, 211, 1, 1], [1, 1, 1, 1, 211, 211, 211, 211, 1, 1], [1, 1, 1, 1, 211, 211, 211, 211, 1, 1], [1, 1, 1, 1, 1, 211, 211, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])} self.lbl_twobac1 = {3: np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 185, 185, 1, 1, 1, 1, 1], [1, 1, 185, 185, 185, 185, 1, 1, 1, 1], [1, 1, 185, 185, 185, 185, 1, 1, 1, 1], [1, 1, 185, 185, 185, 185, 1, 1, 1, 1], [1, 1, 1, 185, 185, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 215, 215, 1, 1, 1, 1, 1], [1, 1, 215, 215, 215, 215, 1, 1, 1, 1], [1, 1, 215, 215, 215, 215, 1, 1, 1, 1], [1, 1, 215, 215, 215, 215, 1, 1, 1, 1], [1, 1, 1, 215, 215, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )} # self.lbl2 = np.array([[0,0,0,0,0,0,0,0,0,0], # [0,0,0,0,0,0,0,0,0,0], # [0,1,1,1,0,0,0,0,0,0], # [0,1,1,1,0,0,0,0,0,0], # [0,1,1,1,0,0,0,0,0,0], # [0,1,1,1,0,0,0,0,0,0], # [0,0,0,0,0,0,0,0,0,0], # [0,0,0,0,0,0,0,0,0,0], # [0,0,0,0,0,0,0,0,0,0]]) # Seems to now remove a border.... self.res1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) self.res2 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) self.res_two1 = np.array( [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 2, 2, 0, 0, 0, 0, 0], [0, 0, 2, 2, 2, 2, 0, 0, 0, 0], [0, 0, 2, 2, 2, 2, 0, 0, 0, 0], [0, 0, 2, 2, 2, 2, 0, 0, 0, 0], [0, 0, 0, 2, 2, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] ) self.wellnum1 = [1] self.wellnum2 = [2] self.wellnum3 = [3] self.label_string = {1: '1'} self.label_string2 = {1: '1', 2: '2'} def test_detect_small_bacteria1(self): """ Test the detection of small bacteria """ detected = detbac(self.lbl1, # maximum area (in pixels) of an object to be # considered a bacteria maxsize=1500, # maximum area (in pixels) of an object to be # considered a bacteria minsize=0, # width (in pixels) at which something is definitely # a bacteria (can override relativewidth) absolwidth=0.1, # ignores anything labeled this distance from the # bottom of the well (prevents channel border being # labelled) ) for k in detected.values(): man_ids, det_ids, man_id_dict, det_id_dict = match_labels( self.res1, k) precision_scores, recall_scores = determine_precision_recall( self.res1, k, man_ids, det_ids, man_id_dict, det_id_dict) assert np.all(np.array(precision_scores) >= 0.75) assert np.all(np.array(recall_scores) >= 0.75) def test_detect_small_bacteria2(self): """ A test for detecting small bacteria """ detected = detbac(self.lbl2, # maximum area (in pixels) of an object to be # considered a bacteria maxsize=1500, # maximum area (in pixels) of an object to be # considered a bacteria minsize=0, # width (in pixels) at which something is definitely # a bacteria (can override relativewidth) absolwidth=0.1, ) for k in detected.values(): man_ids, det_ids, man_id_dict, det_id_dict = match_labels( self.res2, k) precision_scores, recall_scores = determine_precision_recall( self.res2, k, man_ids, det_ids, man_id_dict, det_id_dict) assert np.all(np.array(precision_scores) >= 0.75) assert np.all(np.array(recall_scores) >= 0.75) def test_detect_two_bacteria1(self): """ Test two detect two bacteria """ detected = detbac(self.lbl_twobac1, # maximum area (in pixels) of an object to be # considered a bacteria maxsize=1500, # maximum area (in pixels) of an object to be # considered a bacteria minsize=0, # width (in pixels) at which something is definitely # a bacteria (can override relativewidth) absolwidth=0.1, ) for k in detected.values(): man_ids, det_ids, man_id_dict, det_id_dict = match_labels( self.res_two1, k) precision_scores, recall_scores = determine_precision_recall( self.res_two1, k, man_ids, det_ids, man_id_dict, det_id_dict) assert np.all(np.array(precision_scores) >= 0.75) assert np.all(np.array(recall_scores) >= 0.75) class TestSplitBacteria(unittest.TestCase): """ Class for testing the splitting of bacteria """ def setUp(self): self.wells = {1: np.array([[0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])} self.wells2 = {2: np.array([[0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) } self.wells_int = {1: np.array([[0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])} self.wells2_int = {2: np.array([[0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 400, 400, 400, 400, 400, 400, 0, 0], [0, 0, 0, 400, 400, 400, 400, 0, 0, 0], [0, 0, 0, 0, 400, 400, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) } self.out_wells = [np.array([[0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), ] self.out_wells2 = [np.array([[0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 2, 2, 2, 2, 2, 2, 0, 0], [0, 0, 0, 2, 2, 2, 2, 0, 0, 0], [0, 0, 0, 0, 2, 2, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 3, 3, 0, 0, 0, 0], [0, 0, 0, 3, 3, 3, 3, 0, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 3, 3, 3, 3, 3, 3, 0, 0], [0, 0, 0, 3, 3, 3, 3, 0, 0, 0], [0, 0, 0, 0, 3, 3, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) ] self.out_string = {1: '1', 2: '2'} self.out_string2 = {1: '1', 2: '2', 3: '3'} self.wellnum1 = [1] self.wellnum2 = [2] def test_bacteria_no_split(self): """ Test bacteria that don't need splitting """ split = mdet.split_bacteria_in_all_wells( self.wells, self.wells_int, min_skel_length=3, ) newarrays1 = [] wellnums1 = [] for j, k in split.items(): wellnums1.append(j) newarrays1.append(k) np.testing.assert_array_equal(newarrays1, self.out_wells) self.assertEqual(self.wellnum1, wellnums1) def test_bacteria_split(self): """ Test bacteria that need splitting """ split = mdet.split_bacteria_in_all_wells( self.wells2, self.wells2_int, min_skel_length=4, ) newarrays2 = [] wellnums2 = [] for j, k in split.items(): wellnums2.append(j) newarrays2.append(k) np.testing.assert_array_equal(newarrays2, self.out_wells2) self.assertEqual(self.wellnum2, wellnums2) class TestExtractWells(unittest.TestCase): """ Class for testing the extraction of well profiles """ def setUp(self): # Create test data for the extraction # Doesn't really matter what the image is self.image = np.random.rand(10, 10) self.channel = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 1, 1, 1, 1, 1, 1, 1, 1, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype=bool) self.wells = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 2, 0, 0, 0, 0, 0, ], [0, 0, 1, 0, 2, 0, 0, 0, 3, 0, ], [0, 0, 1, 0, 2, 0, 0, 0, 3, 0, ], [0, 0, 1, 0, 2, 0, 0, 0, 3, 0, ], [0, 0, 1, 0, 0, 0, 0, 0, 3, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype='uint16') self.wellstrue = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [1, 0, 2, 0, 3, 0, 4, 0, 5, 0, ], [1, 0, 2, 0, 3, 0, 4, 0, 5, 0, ], [1, 0, 2, 0, 3, 0, 4, 0, 5, 0, ], [1, 0, 2, 0, 3, 0, 4, 0, 5, 0, ], [1, 0, 2, 0, 3, 0, 4, 0, 5, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype='uint16') self.wellwidth = 1 self.coords = { 1: ([6, 5, 4, 3, 2], [0, 0, 0, 0, 0]), 2: ([6, 5, 4, 3, 2], [2, 2, 2, 2, 2]), 3: ([6, 5, 4, 3, 2], [4, 4, 4, 4, 4]), 4: ([6, 5, 4, 3, 2], [6, 6, 6, 6, 6]), 5: ([6, 5, 4, 3, 2], [8, 8, 8, 8, 8]), } self.wellimages = {l: self.image[c][:, None] for l, c in self.coords.items() } def test_extract_well_profiles(self): """ Tests extract_well_profiles """ images, wellimage, coords = mdet_wells.extract_well_profiles( self.image, self.wells, wellwidth=self.wellwidth, min_well_sep_factor=0.5, ) self.assertEqual(len(images), len(self.wellimages)) for well1, well2 in zip(images, self.wellimages): np.testing.assert_array_equal(well1, well2) #core.assert_array_almost_equal(w1, w2) np.testing.assert_array_equal(wellimage, self.wellstrue) self.assertEqual(coords, self.coords) class TestDetectWells(unittest.TestCase): """ Class for testing well detection """ def setUp(self): # Create test data for the detection self.image = 10 + np.random.randn(30, 30) self.lbl = np.zeros((30, 30), dtype=int) self.lbl[6: 9, 6:24] = 1 self.lbl[16: 19, 6:24] = 2 self.image[5:10, 5:25] -= 20 self.image[6: 9, 6:24] += 40 self.image[15:20, 5:25] -= 20 self.image[16: 19, 6:24] += 40 self.scale_range = [1.0, 3.0] self.maxd = 40 self.mind = 3 self.maxperp = 10 self.minwidth = 0 self.min_outline_area = 0 def test_detect_initial_well_masks(self): """ Tests the detect_initial_well_masks function """ lblgood = mdet_wells.detect_initial_well_masks( self.image, scale_range=self.scale_range, maxd=self.maxd, mind=self.mind, maxperp=self.maxperp, min_outline_area=self.min_outline_area, merge_length=0, debug="", )[0] man_ids, det_ids, man_id_dict, det_id_dict = match_labels( self.lbl, lblgood) precision_scores, recall_scores = determine_precision_recall( self.lbl, lblgood, man_ids, det_ids, man_id_dict, det_id_dict) try: assert np.all(np.array(precision_scores) > 0.9) assert np.all(np.array(recall_scores) > 0.9) #core.assert_array_equal(lblgood, self.lbl) except BaseException: import matplotlib.pyplot as plt plt.figure() plt.imshow(self.image, cmap='gray') plt.title("Input image") plt.savefig("test_detect_initial_well_masks_fail_input_image.jpg") plt.figure() plt.imshow(lblgood) plt.title("Got labels") plt.savefig("test_detect_initial_well_masks_fail_detected_labels.jpg") plt.figure() plt.imshow(self.lbl) plt.title("Expected labels") plt.savefig("test_detect_initial_well_masks_fail_expected_labels.jpg") plt.close("all") raise class TestGetWellsAndUnitVectors(unittest.TestCase): """ Class for testing get_wells_and_unit_vectors """ def setUp(self): wells_vertical = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 3, 0, 0, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype=int) self.props_vertical = skmeas.regionprops(wells_vertical) wells_horizontal = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 1, 1, 1, 1, 1, 1, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 2, 2, 2, 2, 2, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 3, 3, 3, 3, 3, 3, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 4, 4, 4, 4, 4, 4, 4, 4, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype=int) self.props_horizontal = skmeas.regionprops(wells_horizontal) wells_vertical_with_outlier = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 5, 5, 5, 5, 5, 5, 5, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 3, 0, 0, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 1, 0, 2, 0, 3, 0, 4, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype=int) self.props_vertical_with_outlier = skmeas.regionprops( wells_vertical_with_outlier) def test_get_wells_vertical(self): """ Test for get_wells_and_unit_vectors on vertical wells """ coms, oris, uvec_para, uvec_perp = mdet_wells.get_wells_and_unit_vectors( self.props_vertical) # Unit vectors can be +-, so make sure by abs-ing uvec_perp = np.abs(uvec_perp) uvec_para = np.abs(uvec_para) np.testing.assert_array_almost_equal(uvec_para, [0, 1]) np.testing.assert_array_almost_equal(uvec_perp, [1, 0]) def test_get_wells_horizontal(self): """ Test for get_wells_and_unit_vectors on horizontal wells """ coms, oris, uvec_para, uvec_perp = mdet_wells.get_wells_and_unit_vectors( self.props_horizontal) # Unit vectors can be +-, so make sure by abs-ing uvec_perp = np.abs(uvec_perp) uvec_para = np.abs(uvec_para) np.testing.assert_array_almost_equal(uvec_para, [1, 0]) np.testing.assert_array_almost_equal(uvec_perp, [0, 1]) def test_get_wells_vertical_outlier(self): """ Now added in a horizontal well - if the simple outlier detection doesn't filter it, the vectors should be off. """ coms, oris, uvec_para, uvec_perp = mdet_wells.get_wells_and_unit_vectors( self.props_vertical_with_outlier) # Unit vectors can be +-, so make sure by abs-ing uvec_perp = np.abs(uvec_perp) uvec_para = np.abs(uvec_para) np.testing.assert_array_almost_equal(uvec_para, [0, 1]) np.testing.assert_array_almost_equal(uvec_perp, [1, 0]) # Check that one region got rejected self.assertEqual(len(oris), len(self.props_vertical_with_outlier) - 1) class TestGetWellSpacingAndSeparations(unittest.TestCase): """ Class for testing well_spacing_and_seps """ def setUp(self): # Need, coms (centres of mass), the perpendicular unit vector # to project the coms along, and wellwidth for filtering # nearby coms # Simple case - all uniformly distributed self.coms_horizontal = [ [2, 5], [4, 6], [6, 4], [8, 5.5], [10, 5], ] self.uvec_perp_horizontal = [1, 0] self.coms_vertical = [ [5, 2], [6, 4], [4, 6], [5.5, 8], [5, 10], ] self.uvec_perp_vertical = [0, 1] self.coms_horizontal_with_gaps = [ [2, 5], [4, 6], [6, 4], [8, 5.5], [12, 5], [14, 6.5], [18, 4.5], ] self.wellwidth = 0.5 def test_well_spacing_horizontal(self): """ Test for the function well_spacing_and_seps on horizontal wells """ normseps, posperp_sorted = mdet_wells.well_spacing_and_seps( self.coms_horizontal, self.uvec_perp_horizontal, self.wellwidth, ) self.assertListEqual(normseps.tolist(), [1, 1, 1, 1]) self.assertListEqual(posperp_sorted.tolist(), [2, 4, 6, 8, 10]) def test_well_spacing_vertical(self): """ Test for the function well_spacing_and_seps on vertical wells """ normseps, posperp_sorted = mdet_wells.well_spacing_and_seps( self.coms_vertical, self.uvec_perp_vertical, self.wellwidth, ) self.assertListEqual(normseps.tolist(), [1, 1, 1, 1]) self.assertListEqual(posperp_sorted.tolist(), [2, 4, 6, 8, 10]) def test_well_spacing_horiz_gaps(self): """ Test for the function well_spacing_and_seps on horizontal wells with gaps """ normseps, posperp_sorted = mdet_wells.well_spacing_and_seps( self.coms_horizontal_with_gaps, self.uvec_perp_horizontal, self.wellwidth, ) self.assertListEqual(normseps.tolist(), [1, 1, 1, 2, 1, 2]) self.assertListEqual(posperp_sorted.tolist(), [2, 4, 6, 8, 12, 14, 18]) class TestInterpolatePositionsAndExtractProfiles(unittest.TestCase): """ Class for testing well interpolation and extracting well profiles """ def setUp(self): self.image = 10 + np.random.randn(10, 22) self.image += 20 * np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 10, 10, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, ], [0, 0, 10, 10, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, ], [0, 0, 10, 10, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, ], [0, 0, 10, 10, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, ], [0, 0, 10, 10, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, ], [0, 0, 10, 10, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ]) # Means for the regions after extrapolation self.image_means = {1: 210, 2: 30, 3: 50, 4: 70, 5: 90} labels = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype=int) self.finallabel = np.array([ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, 5, 5, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, 5, 5, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, 5, 5, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, 5, 5, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, 5, 5, 0, 0, ], [0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 3, 3, 0, 0, 4, 4, 0, 0, 5, 5, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], ], dtype=int) self.normseps = [1, 1, 2] self.posperp_sorted = [2.5, 6.6, 10.5, 18.5] self.propsgood = skmeas.regionprops(labels) self.uvec_para = [0, 1] self.uvec_perp = [1, 0] self.wellwidth = 2 def test_interp_pos_extract_profs(self): """ Test for interpolate_pos_extract_profs """ images, wellimage, coords = mdet_wells.interpolate_pos_extract_profs( np.array( self.normseps), np.array( self.posperp_sorted), self.propsgood, np.array( self.uvec_para), np.array( self.uvec_perp), self.wellwidth, self.image, ) # We can check the stats for the regions for k, im0 in images.items(): # Have normally distributed noise with std 1, # averaged over 12 values... deviation should # be well less than 1... but just failed, set to delta=2 self.assertAlmostEqual( im0.mean(), self.image_means[k], delta=2, ) np.testing.assert_array_equal(wellimage, self.finallabel) class TestRelabelBacteria(unittest.TestCase): """ class for testing bacteria relabelling """ def setUp(self): self.old_labels = { 1 : np.array([[1,0,0,2],]), 2 : np.array([[1,0,0,2],]), } self.expected = { 1 : np.array([[1,0,0,2],]), 2 : np.array([[3,0,0,4],]), } def test_simple_relabel(self): """ Tests relabelling of bacteria """ result = mdet.relabel_bacteria(self.old_labels) self.assertListEqual(list(self.expected.keys()), list(result.keys())) for k, v in result.items(): np.testing.assert_array_equal(v, self.expected[k]) class TestFilterBacteria(unittest.TestCase): """ class for testing bacteria relabelling """ def setUp(self): self.old_labels = np.array([ [0,0,0,0,0,0,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,0,0,0,0,0,0], ]) self.expected = np.array([ [0,0,0,0,0,0,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,1,1,1,1,1,0], [0,0,0,0,0,0,0], ]) self.expected_nothing = np.array([ [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], [0,0,0,0,0,0,0], ]) self.min_av_width = 1 self.minsize = 30 self.maxsize = 50 self.min_width_too_big = 4 self.minsize_too_big = 100 self.maxsize_too_small = 20 def test_simple_filter_nothing(self): """ Tests filtering, with nothing to remove """ result = mdet.filter_bacteria( self.old_labels, self.min_av_width, self.minsize, self.maxsize, )[0] np.testing.assert_array_equal(result, self.expected) def test_filter_area_too_small(self): """ Tests filter when area is too small """ result = mdet.filter_bacteria( self.old_labels, self.min_av_width, self.minsize_too_big, self.maxsize, )[0] np.testing.assert_array_equal(result, self.expected_nothing) def test_filter_area_too_big(self): """ Tests filter when area is too big """ result = mdet.filter_bacteria( self.old_labels, self.min_av_width, self.minsize, self.maxsize_too_small, )[0] np.testing.assert_array_equal(result, self.expected_nothing) def test_filter_too_narrow(self): """ Tests filter when bacteria is too narrow """ result = mdet.filter_bacteria( self.old_labels, self.min_width_too_big, self.minsize, self.maxsize, )[0] np.testing.assert_array_equal(result, self.expected_nothing) if __name__ == '__main__': unittest.main()
46.554338
83
0.343253
7,664
50,977
2.220903
0.051409
0.341931
0.444157
0.518418
0.698431
0.648963
0.626403
0.613419
0.59785
0.572352
0
0.253129
0.492242
50,977
1,094
84
46.596892
0.404458
0.075053
0
0.683662
0
0
0.004993
0.003465
0
0
0
0
0.045191
1
0.034762
false
0
0.00927
0
0.05562
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
68d68474da3452d1e66846c039d8ff1a4637dd3e
724
py
Python
tests/test_calc.py
Cymptom-Research/QA_candidate_task
2a88dac877a50b6e5f71c205e870abb1bace2757
[ "MIT" ]
null
null
null
tests/test_calc.py
Cymptom-Research/QA_candidate_task
2a88dac877a50b6e5f71c205e870abb1bace2757
[ "MIT" ]
null
null
null
tests/test_calc.py
Cymptom-Research/QA_candidate_task
2a88dac877a50b6e5f71c205e870abb1bace2757
[ "MIT" ]
1
2022-02-01T14:08:14.000Z
2022-02-01T14:08:14.000Z
#!/usr/bin/env python3 # ___ _ # / __\ _ _ __ ___ _ __ | |_ ___ _ __ ___ # / / | | | | '_ ` _ \| '_ \| __/ _ \| '_ ` _ \ # / /__| |_| | | | | | | |_) | || (_) | | | | | | # \____/\__, |_| |_| |_| .__/ \__\___/|_| |_| |_| # |___/ |_| # # # Author: Ziv Kaspersky <ziv@cymptom.com> on 19/11/2021 from calc import Calculator def test_add(): # test basic functionality assert Calculator.add(4, 5) == 9 # test addition with negative numbers assert Calculator.add(4, -5) == -1 assert Calculator.add(-4, 5) == 1 assert Calculator.add(-56, -47) == -101 # ? assert Calculator.add(0, 0) == 0 # ? assert Calculator.add(2 ** 36, 1) == 2 ** 36 + 1
26.814815
55
0.480663
68
724
4.191176
0.544118
0.336842
0.4
0.210526
0.294737
0.221053
0.221053
0.221053
0.221053
0
0
0.072435
0.313536
724
26
56
27.846154
0.501006
0.530387
0
0
0
0
0
0
0
0
0
0
0.75
1
0.125
true
0
0.125
0
0.25
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
5
68d992a87a5b79d1a9bcc5fea9d86c8e5859ac6b
684
py
Python
my-modules/assets/controllers/controllers.py
vinit-ww/odoo_app
af8458ae1ca125737826eda743a918ed3acd88f2
[ "Apache-2.0" ]
null
null
null
my-modules/assets/controllers/controllers.py
vinit-ww/odoo_app
af8458ae1ca125737826eda743a918ed3acd88f2
[ "Apache-2.0" ]
null
null
null
my-modules/assets/controllers/controllers.py
vinit-ww/odoo_app
af8458ae1ca125737826eda743a918ed3acd88f2
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from odoo import http # class Assets(http.Controller): # @http.route('/assets/assets/', auth='public') # def index(self, **kw): # return "Hello, world" # @http.route('/assets/assets/objects/', auth='public') # def list(self, **kw): # return http.request.render('assets.listing', { # 'root': '/assets/assets', # 'objects': http.request.env['assets.assets'].search([]), # }) # @http.route('/assets/assets/objects/<model("assets.assets"):obj>/', auth='public') # def object(self, obj, **kw): # return http.request.render('assets.object', { # 'object': obj # })
34.2
88
0.549708
75
684
5.013333
0.413333
0.191489
0.119681
0.167553
0.31383
0.164894
0
0
0
0
0
0.001912
0.23538
684
20
89
34.2
0.717017
0.916667
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
68dd7173cff844eb49217dd7652b0ba8a13278ba
386
py
Python
vendors/__init__.py
Globaldots/s3-trigger-purge-edgecast
5cc9356858030d4d5b74b0070bcf492ae1972c2d
[ "MIT" ]
2
2018-04-08T18:52:41.000Z
2018-12-05T15:18:51.000Z
vendors/__init__.py
Globaldots/s3-trigger-purge-cdn
5cc9356858030d4d5b74b0070bcf492ae1972c2d
[ "MIT" ]
null
null
null
vendors/__init__.py
Globaldots/s3-trigger-purge-cdn
5cc9356858030d4d5b74b0070bcf492ae1972c2d
[ "MIT" ]
null
null
null
from akamai.akamaiclient import Akamai as akamai from edgecast.edgecastclient import Edgecast as edgecast from highwinds.highwindsclient import Highwinds as highwinds from cloudflare.cloudflareclient import Cloudflare as cloudflare from fastly.fastlyclient import Fastly as fastly from chinacache.chinacacheclient import Chinacache as chinacache # TODO complete cloudinary integration
42.888889
64
0.873057
46
386
7.326087
0.413043
0
0
0
0
0
0
0
0
0
0
0
0.108808
386
8
65
48.25
0.979651
0.093264
0
0
0
0
0
0
0
0
0
0.125
0
1
0
true
0
1
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
0
1
0
1
0
0
5
68e3127c47201853f06aa94e672ee9c432ccb052
87
py
Python
booked_api_client/exceptions.py
abdelkafiahmed/booked-python-api-client
6b1995a8ca3f0176d29a8dcdad47b6eddfd707dd
[ "BSD-2-Clause" ]
null
null
null
booked_api_client/exceptions.py
abdelkafiahmed/booked-python-api-client
6b1995a8ca3f0176d29a8dcdad47b6eddfd707dd
[ "BSD-2-Clause" ]
null
null
null
booked_api_client/exceptions.py
abdelkafiahmed/booked-python-api-client
6b1995a8ca3f0176d29a8dcdad47b6eddfd707dd
[ "BSD-2-Clause" ]
1
2021-06-18T14:06:00.000Z
2021-06-18T14:06:00.000Z
class AuthenticationError(Exception): pass class APICallError(Exception): pass
17.4
37
0.770115
8
87
8.375
0.625
0.38806
0
0
0
0
0
0
0
0
0
0
0.16092
87
5
38
17.4
0.917808
0
0
0.5
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
68f84298c453717c3748c6636f036129f173faca
89
py
Python
app/main/services/conversions.py
pebblecode/cirrus-marketplace-search-api
01b643e0c45a6ca9586751f489da09ff8593e8a9
[ "MIT" ]
null
null
null
app/main/services/conversions.py
pebblecode/cirrus-marketplace-search-api
01b643e0c45a6ca9586751f489da09ff8593e8a9
[ "MIT" ]
null
null
null
app/main/services/conversions.py
pebblecode/cirrus-marketplace-search-api
01b643e0c45a6ca9586751f489da09ff8593e8a9
[ "MIT" ]
null
null
null
import re def strip_and_lowercase(value): return re.sub(r'\W+', '', value).lower()
14.833333
44
0.651685
14
89
4
0.857143
0
0
0
0
0
0
0
0
0
0
0
0.157303
89
5
45
17.8
0.746667
0
0
0
0
0
0.033708
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
0
0
0
5
ec1d8ef0b9a82f4b9a9e01206f8b88355f246846
4,857
py
Python
src/raport_slotow/tests/test_raport_slotow_autor/test_raport_slotow_autor.py
iplweb/django-bpp
85f183a99d8d5027ae4772efac1e4a9f21675849
[ "BSD-3-Clause" ]
1
2017-04-27T19:50:02.000Z
2017-04-27T19:50:02.000Z
src/raport_slotow/tests/test_raport_slotow_autor/test_raport_slotow_autor.py
mpasternak/django-bpp
434338821d5ad1aaee598f6327151aba0af66f5e
[ "BSD-3-Clause" ]
41
2019-11-07T00:07:02.000Z
2022-02-27T22:09:39.000Z
src/raport_slotow/tests/test_raport_slotow_autor/test_raport_slotow_autor.py
iplweb/bpp
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
[ "BSD-3-Clause" ]
null
null
null
from io import BytesIO import PyPDF2 import pytest from django.urls import reverse from openpyxl import load_workbook from raport_slotow import const from raport_slotow.forms import AutorRaportSlotowForm from raport_slotow.views import SESSION_KEY def test_raport_slotow_formularz(admin_client): res = admin_client.get(reverse("raport_slotow:index")) assert res.status_code == 200 def test_raport_slotow_autor_brak_danych(admin_client, autor_jan_kowalski, rok): url = reverse( "raport_slotow:raport", ) dane_raportu = { "obiekt": autor_jan_kowalski.pk, "od_roku": rok, "do_roku": rok, "dzialanie": const.DZIALANIE_WSZYSTKO, "minimalny_pk": 0, "slot": None, "_export": "html", } form = AutorRaportSlotowForm(dane_raportu) assert form.is_valid(), form._errors s = admin_client.session s.update({SESSION_KEY: dane_raportu}) s.save() res = admin_client.get(url) assert res.status_code == 200 assert "Brak danych" in res.rendered_content res = admin_client.get(url + "?_export=xlsx") assert res.status_code == 200 wb = load_workbook(BytesIO(res.content)) assert len(wb.get_sheet_names()) > 0 def test_raport_slotow_autor_sa_dane_eksport_wszystkiego( admin_client, autor_jan_kowalski, rekord_slotu, rok, ): url = reverse("raport_slotow:raport") dane_raportu = { "obiekt": autor_jan_kowalski.pk, "od_roku": rok, "do_roku": rok, "dzialanie": const.DZIALANIE_WSZYSTKO, "minimalny_pk": 0, "slot": None, "_export": "html", } s = admin_client.session s.update({SESSION_KEY: dane_raportu}) s.save() res = admin_client.get(url) assert res.status_code == 200 assert "Brak danych" not in res.rendered_content res = admin_client.get(url + "?_export=xlsx") assert res.status_code == 200 wb = load_workbook(BytesIO(res.content)) assert len(wb.get_sheet_names()) > 0 def test_raport_slotow_autor_sa_dane_eksport_wszystkiego_do_pdf( admin_client, autor_jan_kowalski, rekord_slotu, rok, ): url = reverse("raport_slotow:raport") dane_raportu = { "obiekt": autor_jan_kowalski.pk, "od_roku": rok, "do_roku": rok, "dzialanie": const.DZIALANIE_WSZYSTKO, "minimalny_pk": 0, "slot": None, "_export": "html", } s = admin_client.session s.update({SESSION_KEY: dane_raportu}) s.save() res = admin_client.get(url + "?_export=pdf") assert res.status_code == 200 pdfReader = PyPDF2.PdfFileReader(BytesIO(res.content)) assert pdfReader.numPages >= 1 def test_raport_slotow_autor_zbieraj_slot( admin_client, autor_jan_kowalski, rekord_slotu, rok ): url = reverse("raport_slotow:raport") dane_raportu = { "obiekt": autor_jan_kowalski.pk, "od_roku": rok, "do_roku": rok, "dzialanie": const.DZIALANIE_SLOT, "minimalny_pk": 0, "slot": 20, "_export": "html", } s = admin_client.session s.update({SESSION_KEY: dane_raportu}) s.save() res = admin_client.get(url) assert res.status_code == 200 assert "Brak danych" not in res.rendered_content res = admin_client.get(url + "?_export=xlsx") assert res.status_code == 200 wb = load_workbook(BytesIO(res.content)) assert len(wb.get_sheet_names()) > 0 def test_raport_slotow_autor_wartosci_poczatkowe(admin_client): url = reverse("raport_slotow:index") res = admin_client.get(url, dict(od_roku=5000)) assert b"5000" in res.content @pytest.mark.parametrize( "dzialanie,slot", [(const.DZIALANIE_WSZYSTKO, None), (const.DZIALANIE_SLOT, 20)] ) def test_raport_slotow_autor_sa_dane_minimalny_pk( admin_client, autor_jan_kowalski, rekord_slotu, rok, dzialanie, slot ): w = rekord_slotu.rekord w.punkty_pk = 10 w.save() url = reverse("raport_slotow:raport") dane_raportu = { "obiekt": autor_jan_kowalski.pk, "od_roku": rok, "do_roku": rok, "dzialanie": dzialanie, "minimalny_pk": 0, "slot": slot, "_export": "html", } s = admin_client.session s.update({SESSION_KEY: dane_raportu}) s.save() res = admin_client.get(url) assert res.status_code == 200 assert "Brak danych" not in res.rendered_content dane_raportu = { "obiekt": autor_jan_kowalski.pk, "od_roku": rok, "do_roku": rok, "dzialanie": dzialanie, "minimalny_pk": 200, "slot": slot, "_export": "html", } s = admin_client.session s.update({SESSION_KEY: dane_raportu}) s.save() res = admin_client.get(url) assert res.status_code == 200 assert "Brak danych" in res.rendered_content
25.973262
84
0.654725
623
4,857
4.807384
0.147673
0.088147
0.051419
0.062437
0.762938
0.716528
0.714524
0.704508
0.690818
0.690818
0
0.015529
0.231007
4,857
186
85
26.112903
0.786345
0
0
0.710526
0
0
0.123121
0
0
0
0
0
0.138158
1
0.046053
false
0
0.052632
0
0.098684
0
0
0
0
null
0
0
0
0
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
ec23453bfe4b1d06d03014a27747bf0f9024512b
141
py
Python
metriq/__init__.py
unitaryfund/metriq-client
7d8831d5015baa490ec77a04ea704d2e9aa9d8d0
[ "Apache-2.0" ]
null
null
null
metriq/__init__.py
unitaryfund/metriq-client
7d8831d5015baa490ec77a04ea704d2e9aa9d8d0
[ "Apache-2.0" ]
null
null
null
metriq/__init__.py
unitaryfund/metriq-client
7d8831d5015baa490ec77a04ea704d2e9aa9d8d0
[ "Apache-2.0" ]
null
null
null
__all__ = ["MetriqClient", "version", "__version__"] from metriq.client import MetriqClient from metriq.version import version, __version__
28.2
52
0.794326
15
141
6.666667
0.466667
0.28
0
0
0
0
0
0
0
0
0
0
0.106383
141
4
53
35.25
0.793651
0
0
0
0
0
0.212766
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
ec33984c3e3175f1c43fdbb188ad4d57c58e335c
221
py
Python
base/pylib/none.py
jpolitz/lambda-py-paper
746ef63fc1123714b4adaf78119028afbea7bd76
[ "Apache-2.0" ]
25
2015-04-16T04:31:49.000Z
2022-03-10T15:53:28.000Z
base/pylib/none.py
jpolitz/lambda-py-paper
746ef63fc1123714b4adaf78119028afbea7bd76
[ "Apache-2.0" ]
1
2018-11-21T22:40:02.000Z
2018-11-26T17:53:11.000Z
base/pylib/none.py
jpolitz/lambda-py-paper
746ef63fc1123714b4adaf78119028afbea7bd76
[ "Apache-2.0" ]
1
2021-03-26T03:36:19.000Z
2021-03-26T03:36:19.000Z
class NoneType(object): def __new__(self, *args): return None def __init__(self, *args): pass def __bool__(self): return False def __str__(self): return "None" ___assign("%NoneType", NoneType)
14.733333
32
0.656109
27
221
4.666667
0.555556
0.126984
0
0
0
0
0
0
0
0
0
0
0.221719
221
14
33
15.785714
0.732558
0
0
0
0
0
0.058824
0
0
0
0
0
0
1
0.4
false
0.1
0
0.3
0.8
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
1
0
1
1
0
0
5
6b8c8bc201525c06a544583d2122abe2b8024ccb
102
py
Python
sqlbucket/__init__.py
sp-anna-jones/sqlbucket
a9dda3ad0f8594c16e02f7a293b084b809920e92
[ "MIT" ]
null
null
null
sqlbucket/__init__.py
sp-anna-jones/sqlbucket
a9dda3ad0f8594c16e02f7a293b084b809920e92
[ "MIT" ]
null
null
null
sqlbucket/__init__.py
sp-anna-jones/sqlbucket
a9dda3ad0f8594c16e02f7a293b084b809920e92
[ "MIT" ]
null
null
null
__version__ = "0.3.0" from sqlbucket.core import SQLBucket from sqlbucket.project import Project
11.333333
37
0.77451
14
102
5.357143
0.571429
0.346667
0
0
0
0
0
0
0
0
0
0.034884
0.156863
102
8
38
12.75
0.837209
0
0
0
0
0
0.050505
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
6bda686042217cdad58b6e731ee1c83afc300202
88
py
Python
iscan/tests/test_modules/relative.py
ZhengnanZhao/importscanner
75ef3b10383b54a5d318a15d5a85c7fcb4ff762d
[ "MIT" ]
3
2021-04-15T14:11:28.000Z
2022-02-06T14:28:33.000Z
iscan/tests/test_modules/relative.py
zzhengnan/iscan
75ef3b10383b54a5d318a15d5a85c7fcb4ff762d
[ "MIT" ]
null
null
null
iscan/tests/test_modules/relative.py
zzhengnan/iscan
75ef3b10383b54a5d318a15d5a85c7fcb4ff762d
[ "MIT" ]
null
null
null
from ..grandparentutils import baz from ..parentutils import bar from .utils import foo
22
34
0.806818
12
88
5.916667
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.136364
88
3
35
29.333333
0.934211
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
6bf8ed61d9a8d59a0cac9e01d230e8a1e3f93410
328
py
Python
tests/test_prepare_data.py
ayasyrev/dup_finder
f5b9dca06bf183b016ea23c7f7fa1bb3adecac9e
[ "Apache-2.0" ]
null
null
null
tests/test_prepare_data.py
ayasyrev/dup_finder
f5b9dca06bf183b016ea23c7f7fa1bb3adecac9e
[ "Apache-2.0" ]
1
2020-11-13T13:03:55.000Z
2020-11-13T13:10:52.000Z
tests/test_prepare_data.py
ayasyrev/dup_finder
f5b9dca06bf183b016ea23c7f7fa1bb3adecac9e
[ "Apache-2.0" ]
null
null
null
import pathlib from dup_finder.prepare_test_data import TEST_DATA_PATH, TEST_ROOT, PACKAGE_ROOT, LIB_ROOT def test_path_names(): assert type(TEST_ROOT) == pathlib.PosixPath assert type(TEST_DATA_PATH) == pathlib.PosixPath assert type(PACKAGE_ROOT) == pathlib.PosixPath assert type(LIB_ROOT) == pathlib.PosixPath
41
90
0.786585
47
328
5.170213
0.361702
0.164609
0.246914
0.320988
0.246914
0
0
0
0
0
0
0
0.131098
328
8
91
41
0.852632
0
0
0
0
0
0
0
0
0
0
0
0.571429
1
0.142857
true
0
0.285714
0
0.428571
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
1
0
0
0
0
0
0
5
d46145f461d9d7deda5700cc4f0bf26477023c4a
860
py
Python
tests/integration_tests/foo_tests.py
FireXStuff/firex-bundle-foobar
e6d27987b880674e50470931c117aadc278e66d8
[ "BSD-3-Clause" ]
2
2019-04-05T14:14:34.000Z
2019-04-24T19:59:55.000Z
tests/integration_tests/foo_tests.py
FireXStuff/firex-bundle-foobar
e6d27987b880674e50470931c117aadc278e66d8
[ "BSD-3-Clause" ]
null
null
null
tests/integration_tests/foo_tests.py
FireXStuff/firex-bundle-foobar
e6d27987b880674e50470931c117aadc278e66d8
[ "BSD-3-Clause" ]
1
2019-04-05T14:15:10.000Z
2019-04-05T14:15:10.000Z
from firexapp.testing.config_base import FlowTestConfiguration, assert_is_good_run class MyFooDefaultTest(FlowTestConfiguration): def initial_firex_options(self) -> list: return ["submit", "--chain", "foo,bar"] def assert_expected_firex_output(self, cmd_output, cmd_err): assert "defeat No success!!!" in cmd_output def assert_expected_return_code(self, ret_value): assert_is_good_run(ret_value) class DefineMyOwnSuccessTest(FlowTestConfiguration): def initial_firex_options(self) -> list: return ["submit", "--chain", "foo,bar", "--define_success", "not likely"] def assert_expected_firex_output(self, cmd_output, cmd_err): assert "defeat No not likely" in cmd_output, "Good try... but no" def assert_expected_return_code(self, ret_value): assert_is_good_run(ret_value)
34.4
82
0.723256
110
860
5.318182
0.372727
0.061538
0.116239
0.076923
0.673504
0.673504
0.673504
0.673504
0.673504
0.673504
0
0
0.174419
860
24
83
35.833333
0.823944
0
0
0.533333
0
0
0.153667
0
0
0
0
0
0.6
1
0.4
false
0
0.066667
0.133333
0.733333
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
1
0
0
0
1
1
0
0
5
2e5543171ff5a6308629d1195e29d7f1f007d04b
4,366
py
Python
baidu_fy/baidu_fy.py
vleij/-
2b0f7bb91025b57e370ba4bfe7fe259a3ab78360
[ "Apache-2.0" ]
null
null
null
baidu_fy/baidu_fy.py
vleij/-
2b0f7bb91025b57e370ba4bfe7fe259a3ab78360
[ "Apache-2.0" ]
null
null
null
baidu_fy/baidu_fy.py
vleij/-
2b0f7bb91025b57e370ba4bfe7fe259a3ab78360
[ "Apache-2.0" ]
null
null
null
import execjs import requests with open('baiodu_fy.js', encoding='utf-8') as f: js_code = f.read() node = execjs.get() #编译js代码 ctx = node.compile(js_code) #compile方法去加载js代码,参数cwd指定本地安装模块所在目录 search = '你好呀' data1 = ctx.eval('data("'+search+'")') #eval方法中,整个函数调用包含在字符串内 data1['query'] = search headers = { 'Host':'fanyi.baidu.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36', 'X-Requested-With':'XMLHttpRequest', 'Origin': 'https://fanyi.baidu.com', 'Referer': 'https://fanyi.baidu.com/translate?aldtype=16047&query=%E4%BD%A0%E5%A5%BD&keyfrom=baidu&smartresult=dict&lang=auto2zh', 'Cookie':'PSTM=1625158021; BAIDUID=226BB1684C9766B09993FCD1A713C35C:FG=1; BIDUPSID=6FE83BD85D2A96B001DAA1FD295A1AA5; __yjs_duid=1_2253a89fdac985131d9133c4a173b2d81625489822668; REALTIME_TRANS_SWITCH=1; FANYI_WORD_SWITCH=1; HISTORY_SWITCH=1; SOUND_SPD_SWITCH=1; SOUND_PREFER_SWITCH=1; BDUSS_BFESS=FpQOEQ3a1dyRXhvY2M2SXlqb21nTUUtUjJ4eVFoeUdoUzZCWjE3V2l5UGVMWVZoRUFBQUFBJCQAAAAAAAAAAAEAAAAGNGeutPPAx7m32K8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAN6gXWHeoF1he; BDSFRCVID=DdAOJeC62lQ-nZ3HcqLwesrXsg0IRInTH6aoO5DD4o1we3OU9p85EG0PKx8g0Kubz7pbogKKLmOTHp-F_2uxOjjg8UtVJeC6EG0Ptf8g0f5; H_BDCLCKID_SF=JJ4eoIKbfIvbfP0k5PoEb-F_hmT22-us0-jR2hcHMPoosIJXXKcCybFqXlO-bbj8LIjiaKJjBMbUotoH2RbEhl5yQh3ianQp52jb_h5TtUJMbb3dLnoMqfAnMMTyKMniBIj9-pnMHlQrh459XP68bTkA5bjZKxtq3mkjbPbDfn028DKuj605D5QyeaRabK6aKC5bL6rJabC3hI3JXU6q2bDeQN3kWIrNWT5B2T6H--oBVb6oyT3JXp0vWq54WbbvLT7johRTWqR4ep6gjUonDh83eMJ33bQJHmJ7BnrO5hvv8b6O3M72qfKmDloOW-TB5bbPLUQF5l8-sq0x0bOte-bQXH_EJ58qJbIJ_DDQ5RbhKCKl-nOoq4tehHRx3R39WDTm_DonBtONoJcSK6OvW-LyKR7kK6OALe5X-pPKKRABVR3XLtPbXTkFyH525M5Q3mkjbUODfn02OP5PetorDt4syPRr2xRnWT4LKfA-b4ncjRcTehoM3xI8LNj405OTbIFO0KJDJCFabKtCjTAKDTPyMMrL2K60aR3Hbx7vWJ5TMCoz2q7ObP4nQPvzJ-Qe-j5-0-5TX-TKShPC-qjM3q0Z3MCDBnoN025Gon0h3l02VMQEe-t2ynLV34QB0PRMW23rWl7mWPtVsxA45J7cM4IseboJLfT-0bc4KKJxbnLWeIJIjjCKjj3QjHD8JTn-5TIX_CjJbnA_Hn7zeUOfLf4pbt-qJt7IHCozohnJ-lRbetO4WbbJDMFwyfTnBT5Ka28tKp-2aKjbeDOO3jr2e5LkQN3TBbkO5bRiLRoLJ-JDDn3oyUvVXp0nK2cly5jtMgOBBJ0yQ4b4OR5JjxonDh83bG7MJPKtfD7H3KChtI0hbf5; BDSFRCVID_BFESS=DdAOJeC62lQ-nZ3HcqLwesrXsg0IRInTH6aoO5DD4o1we3OU9p85EG0PKx8g0Kubz7pbogKKLmOTHp-F_2uxOjjg8UtVJeC6EG0Ptf8g0f5; H_BDCLCKID_SF_BFESS=JJ4eoIKbfIvbfP0k5PoEb-F_hmT22-us0-jR2hcHMPoosIJXXKcCybFqXlO-bbj8LIjiaKJjBMbUotoH2RbEhl5yQh3ianQp52jb_h5TtUJMbb3dLnoMqfAnMMTyKMniBIj9-pnMHlQrh459XP68bTkA5bjZKxtq3mkjbPbDfn028DKuj605D5QyeaRabK6aKC5bL6rJabC3hI3JXU6q2bDeQN3kWIrNWT5B2T6H--oBVb6oyT3JXp0vWq54WbbvLT7johRTWqR4ep6gjUonDh83eMJ33bQJHmJ7BnrO5hvv8b6O3M72qfKmDloOW-TB5bbPLUQF5l8-sq0x0bOte-bQXH_EJ58qJbIJ_DDQ5RbhKCKl-nOoq4tehHRx3R39WDTm_DonBtONoJcSK6OvW-LyKR7kK6OALe5X-pPKKRABVR3XLtPbXTkFyH525M5Q3mkjbUODfn02OP5PetorDt4syPRr2xRnWT4LKfA-b4ncjRcTehoM3xI8LNj405OTbIFO0KJDJCFabKtCjTAKDTPyMMrL2K60aR3Hbx7vWJ5TMCoz2q7ObP4nQPvzJ-Qe-j5-0-5TX-TKShPC-qjM3q0Z3MCDBnoN025Gon0h3l02VMQEe-t2ynLV34QB0PRMW23rWl7mWPtVsxA45J7cM4IseboJLfT-0bc4KKJxbnLWeIJIjjCKjj3QjHD8JTn-5TIX_CjJbnA_Hn7zeUOfLf4pbt-qJt7IHCozohnJ-lRbetO4WbbJDMFwyfTnBT5Ka28tKp-2aKjbeDOO3jr2e5LkQN3TBbkO5bRiLRoLJ-JDDn3oyUvVXp0nK2cly5jtMgOBBJ0yQ4b4OR5JjxonDh83bG7MJPKtfD7H3KChtI0hbf5; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; ZD_ENTRY=empty; delPer=0; PSINO=7; Hm_lvt_64ecd82404c51e03dc91cb9e8c025574=1641620515; Hm_lpvt_64ecd82404c51e03dc91cb9e8c025574=1641620515; APPGUIDE_10_0_2=1; __yjs_st=2_MzM3MTQ0NGJlZjViZGNmMjhhMTY0NjhkNTg0YWFlY2M3OTJjNGUyZjAwZWFjOGY1MTIwNWI0MzcwOGIzMzZkMGFmOWQxYmRlOWY2NTYyOGY3YTJkMGEwM2YxNTM2ZDFiOTBkY2YzZWNiYTE1NDU1ZGJjZWY5NmY1NWJjZDVmN2IwZmNlNzRlMmNkY2I0Yjg0MmU2ZjAwZTc0MTU5MzExYzM1ZjBiNGNjZGMxM2RhYmZhNjhjNTczY2UxOTEyOTQ3M2E4ZWNiZTNlNGNiNTQzZDM3Zjc3MDdlYmI2OWY5MWFiN2IwMmUxZmZiZjUwMGEzMTQ2NWFkNTI0MzY2OWZlYV83XzllMzRhMjg0; ab_sr=1.0.1_ZTA0MmUwOTlkM2ZhY2JmZTdhM2JmM2UwMDVlZWJmZmJjMGE5NTUxNjVmMTk1ZjFjMDUwM2FhNjM2N2I2NmUwY2FiN2QzNGI2NzRjOTkyMDRhZGVjYTA5MDRmZTc0OTA4YmIyODAzODZlNWYyNjAwMWJiNmRhOGU2YjNiZGQ4YWVjZWFhNzMyNmQ3OTIzYzBlZWFhNGY5NzVhZTI1MWZlZTc5MjIwZGYzNTQ1MGNlMDY2YzAwNzc4MmNkNWRlYTFk; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; H_PS_PSSID=35106_31254_35489_35604_35456_34584_35490_35700_34813_35664_35321_26350_22158; BA_HECTOR=8h04850ha501a0aksf1gtid4j0r', } r = requests.post("https://fanyi.baidu.com/v2transapi?from=zh&to=en",data=data1,headers=headers) print(r.text)
189.826087
3,538
0.905405
295
4,366
13.169492
0.637288
0.009009
0.013385
0.0139
0.494208
0.494208
0.494208
0.435006
0.435006
0.435006
0
0.179669
0.03115
4,366
22
3,539
198.454545
0.738771
0.013972
0
0
0
0.157895
0.918906
0.788502
0
0
0
0
0
1
0
false
0
0.105263
0
0.105263
0.052632
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
1
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
2e80040fac53e82ed710eeb1045512689988b8c3
233
py
Python
PEPit/examples/low_dimensional_worst_cases_scenarios/__init__.py
PerformanceEstimation/PEPit
7005bc9a9da11dea448966437365c897734ec341
[ "MIT" ]
1
2022-03-30T11:18:37.000Z
2022-03-30T11:18:37.000Z
PEPit/examples/low_dimensional_worst_cases_scenarios/__init__.py
PerformanceEstimation/PEPit
7005bc9a9da11dea448966437365c897734ec341
[ "MIT" ]
1
2022-02-23T10:26:38.000Z
2022-02-23T10:26:38.000Z
PEPit/examples/low_dimensional_worst_cases_scenarios/__init__.py
PerformanceEstimation/PEPit
7005bc9a9da11dea448966437365c897734ec341
[ "MIT" ]
null
null
null
from .inexact_gradient import wc_inexact_gradient from .optimized_gradient import wc_optimized_gradient __all__ = ['inexact_gradient', 'wc_inexact_gradient', 'optimized_gradient.py', 'wc_optimized_gradient', ]
33.285714
60
0.759657
26
233
6.192308
0.307692
0.372671
0.198758
0
0
0
0
0
0
0
0
0
0.16309
233
6
61
38.833333
0.825641
0
0
0
0
0
0.330472
0.180258
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
5cf6bf0b3d2c488b0c4a9e08aab9a9fde852eea6
530
py
Python
intents/connectors/dialogflow_es/names.py
dario-chiappetta/dialogflow_agents
ecb03bdce491a3c9d6769816507f3027fd5a60d1
[ "Apache-2.0" ]
6
2021-06-24T12:22:21.000Z
2021-07-21T21:06:19.000Z
intents/connectors/dialogflow_es/names.py
dario-chiappetta/dialogflow_agents
ecb03bdce491a3c9d6769816507f3027fd5a60d1
[ "Apache-2.0" ]
27
2021-06-05T10:41:08.000Z
2021-11-01T17:29:38.000Z
intents/connectors/dialogflow_es/names.py
dariowho/intents
ecb03bdce491a3c9d6769816507f3027fd5a60d1
[ "Apache-2.0" ]
null
null
null
from typing import Type from intents import Intent from intents.helpers.misc import camel_to_snake_case def context_name(intent_cls: Type[Intent]) -> str: return "c_" + camel_to_snake_case(intent_cls.name.replace(".", "_")) # TODO: refine def event_name(intent_cls: Type[Intent]) -> str: """ Generate the default event name that we associate with every intent. >>> event_name('test.intent_name') 'E_TEST_INTENT_NAME' """ return "E_" + camel_to_snake_case(intent_cls.name.replace(".", "_")).upper()
31.176471
87
0.711321
76
530
4.644737
0.447368
0.101983
0.101983
0.135977
0.351275
0.351275
0.203966
0.203966
0
0
0
0
0.156604
530
16
88
33.125
0.789709
0.262264
0
0
0
0
0.021798
0
0
0
0
0.0625
0
1
0.285714
false
0
0.428571
0.142857
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
1
0
0
1
1
1
0
0
5
cf3374eb700fea99113bffe97a268a36cf5e84b6
68
py
Python
models/segmentation/unet/__init__.py
neurips2021vat/Variance-Aware-Training
2dcd017ef06e81e299448bdd9da65fa682835127
[ "BSD-2-Clause" ]
null
null
null
models/segmentation/unet/__init__.py
neurips2021vat/Variance-Aware-Training
2dcd017ef06e81e299448bdd9da65fa682835127
[ "BSD-2-Clause" ]
null
null
null
models/segmentation/unet/__init__.py
neurips2021vat/Variance-Aware-Training
2dcd017ef06e81e299448bdd9da65fa682835127
[ "BSD-2-Clause" ]
null
null
null
from models.segmentation.unet.model import Model # pyflakes.ignore
34
67
0.823529
9
68
6.222222
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.102941
68
1
68
68
0.918033
0.220588
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cf7c89d4bc4a19c01768bccdde351e1c9b7ad481
40
py
Python
tests/databases/__init__.py
sethfischer/mundo-flags
20e5ad68d760b6c736701f6e43551c738456098d
[ "MIT" ]
null
null
null
tests/databases/__init__.py
sethfischer/mundo-flags
20e5ad68d760b6c736701f6e43551c738456098d
[ "MIT" ]
1
2021-09-06T01:48:18.000Z
2021-09-06T08:47:36.000Z
tests/databases/__init__.py
sethfischer/mundo-flags
20e5ad68d760b6c736701f6e43551c738456098d
[ "MIT" ]
null
null
null
"""Tests for manage_flags databases."""
20
39
0.725
5
40
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.1
40
1
40
40
0.777778
0.825
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
d84270c6cec886ec27e64f92ea306be0fa13d1db
250
py
Python
pymarket/conftest.py
taqtiqa-mark/pymarket
2f8db92010d5f9407a72941788500351e92cbe81
[ "MIT" ]
null
null
null
pymarket/conftest.py
taqtiqa-mark/pymarket
2f8db92010d5f9407a72941788500351e92cbe81
[ "MIT" ]
null
null
null
pymarket/conftest.py
taqtiqa-mark/pymarket
2f8db92010d5f9407a72941788500351e92cbe81
[ "MIT" ]
null
null
null
import numpy import pandas import pymarket import pytest @pytest.fixture(autouse=True) def add_namespace(doctest_namespace): doctest_namespace['np'] = numpy doctest_namespace['pd'] = pandas doctest_namespace['pm'] = pymarket
22.727273
42
0.728
29
250
6.103448
0.517241
0.361582
0.282486
0
0
0
0
0
0
0
0
0
0.18
250
10
43
25
0.863415
0
0
0
0
0
0.024
0
0
0
0
0
0
1
0.111111
false
0
0.444444
0
0.555556
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
d881fd1c7e59b4bcec3ccbd0b8558e306a842cba
252
py
Python
6 kyu/Drunk friend.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
6
2020-09-03T09:32:25.000Z
2020-12-07T04:10:01.000Z
6 kyu/Drunk friend.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
1
2021-12-13T15:30:21.000Z
2021-12-13T15:30:21.000Z
6 kyu/Drunk friend.py
mwk0408/codewars_solutions
9b4f502b5f159e68024d494e19a96a226acad5e5
[ "MIT" ]
null
null
null
def decode(string_): if not isinstance(string_, str): return "Input is not a string" return string_.translate(str.maketrans("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ", "zyxwvutsrqponmlkjihgfedcbaZYXWVUTSRQPONMLKJIHGFEDCBA"))
63
155
0.801587
21
252
9.47619
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.119048
252
4
155
63
0.896396
0
0
0
0
0
0.494071
0.411067
0
0
0
0
0
1
0.25
false
0
0
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
1
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
d8ef151827cad5109913a30783c7723e28a22f0d
34
py
Python
unleash/__init__.py
SerpentAI/Unleash
add22233f5280e462d6410bfc9ccc3f38b7d4a78
[ "MIT" ]
1
2020-06-10T06:39:22.000Z
2020-06-10T06:39:22.000Z
unleash/__init__.py
SerpentAI/Unleash
add22233f5280e462d6410bfc9ccc3f38b7d4a78
[ "MIT" ]
null
null
null
unleash/__init__.py
SerpentAI/Unleash
add22233f5280e462d6410bfc9ccc3f38b7d4a78
[ "MIT" ]
3
2021-09-05T21:49:40.000Z
2021-10-01T12:24:57.000Z
from unleash.logger import logger
17
33
0.852941
5
34
5.8
0.8
0
0
0
0
0
0
0
0
0
0
0
0.117647
34
1
34
34
0.966667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
d8f0a52563ffa96892286e741539c87ce3599c9f
1,239
py
Python
home/models.py
StoneMasons4106/clay-cabinet
6defd2fcf55d5589777d2e92154668344e923b52
[ "MIT" ]
null
null
null
home/models.py
StoneMasons4106/clay-cabinet
6defd2fcf55d5589777d2e92154668344e923b52
[ "MIT" ]
null
null
null
home/models.py
StoneMasons4106/clay-cabinet
6defd2fcf55d5589777d2e92154668344e923b52
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. class HomePagePicture(models.Model): name = models.CharField(max_length=254) image = models.ImageField(null=True, blank=True) title = models.CharField(max_length=254, null=True, blank=True) description = models.CharField(max_length=254, null=True, blank=True) def __str__(self): return self.name class Testimonial(models.Model): name = models.CharField(max_length=254) image = models.ImageField(null=True, blank=True) testimonial = models.TextField(max_length=2048) date = models.DateField(null=True, blank=True) def __str__(self): return self.name class Content(models.Model): name = models.CharField(max_length=254) banner_text = models.CharField(max_length=254) gallery_title = models.CharField(max_length=254) gallery_text = models.CharField(max_length=254) video_title = models.CharField(max_length=254) video_content = models.CharField(max_length=100000, null=True, blank=True) video_text = models.CharField(max_length=254) testimonial_title = models.CharField(max_length=254) testimonial_text = models.CharField(max_length=254) def __str__(self): return self.name
33.486486
78
0.734463
164
1,239
5.341463
0.231707
0.143836
0.267123
0.356164
0.753425
0.753425
0.416667
0.416667
0.368721
0.287671
0
0.044231
0.160613
1,239
37
79
33.486486
0.798077
0.01937
0
0.407407
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0.037037
0.111111
1
0
0
0
0
null
0
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
1
0
0
5
2b569f8a7fe3ff47329054fe5a502067ff7a3836
223
py
Python
tests/urls.py
chewse/rest_condition
213e504a930a6c76331df936dbf998efa2450bb3
[ "MIT" ]
250
2015-01-07T14:37:32.000Z
2022-03-29T15:15:57.000Z
tests/urls.py
chewse/djangorestframework-signed-permissions
b1cc4c57999fc5be8361f60f0ada1d777b27feab
[ "MIT" ]
10
2015-04-06T18:38:07.000Z
2020-09-10T08:48:26.000Z
tests/urls.py
chewse/djangorestframework-signed-permissions
b1cc4c57999fc5be8361f60f0ada1d777b27feab
[ "MIT" ]
26
2015-08-10T14:17:06.000Z
2022-03-25T12:31:52.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- try: from django.conf.urls import include, patterns, url except ImportError: from django.conf.urls.defaults import include, patterns, url urlpatterns = patterns('', )
22.3
64
0.699552
29
223
5.37931
0.689655
0.128205
0.179487
0.230769
0
0
0
0
0
0
0
0.005348
0.161435
223
9
65
24.777778
0.828877
0.188341
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.6
0
0.6
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
2b5b668061af93827be5d4c184e169822aba151e
228
py
Python
Python/empire/aws/s3/storage_classes.py
Tombmyst/Empire
f28782787c5fa9127e353549b73ec90d3c82c003
[ "Apache-2.0" ]
null
null
null
Python/empire/aws/s3/storage_classes.py
Tombmyst/Empire
f28782787c5fa9127e353549b73ec90d3c82c003
[ "Apache-2.0" ]
null
null
null
Python/empire/aws/s3/storage_classes.py
Tombmyst/Empire
f28782787c5fa9127e353549b73ec90d3c82c003
[ "Apache-2.0" ]
null
null
null
from empire.python.typings import * class StorageClasses: STANDARD: Final[str] = 'STANDARD' @staticmethod def values() -> List[str]: return list(StorageClasses.__dict__['__annotations__'].keys())
22.8
71
0.662281
22
228
6.5
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.214912
228
9
72
25.333333
0.798883
0
0
0
0
0
0.105023
0
0
0
0
0
0
1
0.166667
true
0
0.166667
0.166667
0.833333
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
1
1
0
0
5
2b5d08180f1f8bddc00238e916e34bcfee23f15c
27
py
Python
bin/__init__.py
josephmje/datman
c18bbbbe11b679d3535d02edb6711c76a891a350
[ "Apache-2.0" ]
17
2015-09-08T13:56:40.000Z
2022-01-20T19:09:33.000Z
bin/__init__.py
josephmje/datman
c18bbbbe11b679d3535d02edb6711c76a891a350
[ "Apache-2.0" ]
169
2015-02-23T23:11:15.000Z
2022-03-28T20:32:22.000Z
bin/__init__.py
josephmje/datman
c18bbbbe11b679d3535d02edb6711c76a891a350
[ "Apache-2.0" ]
21
2015-09-15T16:22:44.000Z
2021-11-05T19:03:02.000Z
# Needed for tests to work
13.5
26
0.740741
5
27
4
1
0
0
0
0
0
0
0
0
0
0
0
0.222222
27
1
27
27
0.952381
0.888889
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
2b725d0cb4b8aa42c33bcd24e1c04091070ef136
149
py
Python
gpytorch/constraints/__init__.py
techshot25/gpytorch
b4aee6f81a3428172d4914e7e0fef0e71cd1f519
[ "MIT" ]
2
2019-04-19T00:35:49.000Z
2019-04-19T02:51:49.000Z
gpytorch/constraints/__init__.py
VonRosenchild/gpytorch
092d523027a844939ba85d7ea8c8c7b7511843d5
[ "MIT" ]
null
null
null
gpytorch/constraints/__init__.py
VonRosenchild/gpytorch
092d523027a844939ba85d7ea8c8c7b7511843d5
[ "MIT" ]
1
2019-04-19T00:42:35.000Z
2019-04-19T00:42:35.000Z
from .constraints import GreaterThan, Interval, LessThan, Positive __all__ = [ "GreaterThan", "Interval", "LessThan", "Positive", ]
16.555556
66
0.657718
12
149
7.833333
0.666667
0.404255
0.574468
0.744681
0
0
0
0
0
0
0
0
0.214765
149
8
67
18.625
0.803419
0
0
0
0
0
0.234899
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
9918e8785b560f6c5562e9fc93b33ebba0d19aff
46
py
Python
MicroRegEx/PatternSyntaxError.py
howl-anderson/MicroRegEx
2bfe48f1ac018398e1e77e7f1a6f5c64771399ca
[ "MIT" ]
44
2017-04-06T07:41:05.000Z
2021-04-02T16:09:29.000Z
MicroRegEx/PatternSyntaxError.py
howl-anderson/MicroRegEx
2bfe48f1ac018398e1e77e7f1a6f5c64771399ca
[ "MIT" ]
null
null
null
MicroRegEx/PatternSyntaxError.py
howl-anderson/MicroRegEx
2bfe48f1ac018398e1e77e7f1a6f5c64771399ca
[ "MIT" ]
5
2018-08-13T11:17:03.000Z
2020-09-04T09:11:55.000Z
class PatternSyntaxError(Exception): pass
15.333333
36
0.782609
4
46
9
1
0
0
0
0
0
0
0
0
0
0
0
0.152174
46
2
37
23
0.923077
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
9941cd5a627a421d97c20763a67dd725597ebc99
160
py
Python
heroku.py
rooted-cyber/Heroku-CLI
980085fb384a8dc4c2b77bc832e34f556a64ee4f
[ "Apache-2.0" ]
null
null
null
heroku.py
rooted-cyber/Heroku-CLI
980085fb384a8dc4c2b77bc832e34f556a64ee4f
[ "Apache-2.0" ]
null
null
null
heroku.py
rooted-cyber/Heroku-CLI
980085fb384a8dc4c2b77bc832e34f556a64ee4f
[ "Apache-2.0" ]
null
null
null
import os def banner(): os.system("toilet -f font -F metal Heroku") os.system("cd javascript;node menu.js") os.system("cd bash;bash start.sh") banner()
17.777778
45
0.68125
27
160
4.037037
0.666667
0.220183
0.183486
0
0
0
0
0
0
0
0
0
0.15625
160
8
46
20
0.807407
0
0
0
0
0
0.48125
0
0
0
0
0
0
1
0.166667
true
0
0.166667
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
9972dfb04979768e7e1f5ad4c5e7b9f7d2beeef7
2,094
py
Python
week_7/lab_1.py
assassinen/python_openedu
50805715f99cdc84fe1dd5d7007a13e37668ab6f
[ "Apache-2.0" ]
null
null
null
week_7/lab_1.py
assassinen/python_openedu
50805715f99cdc84fe1dd5d7007a13e37668ab6f
[ "Apache-2.0" ]
null
null
null
week_7/lab_1.py
assassinen/python_openedu
50805715f99cdc84fe1dd5d7007a13e37668ab6f
[ "Apache-2.0" ]
null
null
null
__author__ = 'NovikovII' #!/usr/bin/env python3 # -*- coding: utf-8 -*- # import sqlite3 # con = sqlite3.connect('books01.db') # con.close() # import sqlite3 # import os # os.chdir(r'C:\shareFolder\Dropbox\python_openedu\week_7') # print(os.getcwd()) # con=sqlite3.connect('books01.db') # con.close() # # # import sqlite3 # import os # #os.chdir(r'g:\sqlite_opros_6') # con=sqlite3.connect('books02.db') # cur=con.cursor() # sql=''' # CREATE TABLE IF NOT EXISTS author ( # id_author INTEGER PRIMARY KEY AUTOINCREMENT, # author_name TEXT, # author_descr TEXT # ); # ''' # cur.executescript(sql) # cur.close() # con.close() # import sqlite3 # import os # #os.chdir(r'g:\sqlite_opros_6') # con=sqlite3.connect('books02.db') # cur=con.cursor() # sql=''' # CREATE TABLE IF NOT EXISTS author ( # id_author INTEGER PRIMARY KEY AUTOINCREMENT, # author_name TEXT, # author_descr TEXT # ); # CREATE TABLE IF NOT EXISTS style ( # id_style INTEGER PRIMARY KEY AUTOINCREMENT, # style_name TEXT # ); # CREATE TABLE IF NOT EXISTS book ( # id_book INTEGER PRIMARY KEY AUTOINCREMENT, # id_author INTEGER, # id_style INTEGER, # title TEXT, # description TEXT, # number_ex INTEGER # ); # ''' # cur.executescript(sql) # cur.close() # con.close() # import sqlite3 # import os # #os.chdir(r'g:\sqlite_opros_6') # con=sqlite3.connect('books02.db') # cur = con.cursor() # sql = """\ # INSERT INTO author (author_name, author_descr) # VALUES ('Chukovskiy', 'Pisatel') # """ # # cur.executescript(sql) # cur.close() # con.commit() # con.close() # import sqlite3 # import os # #os.chdir(r'g:\sqlite_opros_6') # con=sqlite3.connect('books02.db') # cur = con.cursor() # sql = """\ # select * from author # """ # # cur.executescript(sql) # cur.close() # con.commit() # con.close() import sqlite3 import os #os.chdir(r'g:\sqlite_opros_6') con=sqlite3.connect('books02.db') cur=con.cursor() sql=''' CREATE TABLE style IF NOT EXISTS ( id_style INTEGER PRIMARY KEY AUTOINCREMENT, style_name TEXT );''' cur.executescript(sql) cur.close() con.close()
19.570093
59
0.664279
285
2,094
4.768421
0.224561
0.066961
0.087564
0.092715
0.772627
0.772627
0.74025
0.74025
0.714496
0.640912
0
0.020654
0.167622
2,094
107
60
19.570093
0.759036
0.787488
0
0
0
0
0.348189
0
0
0
0
0
0
1
0
false
0
0.153846
0
0.153846
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
41e4620427c5012ca8eafdeb5bfc1ded0401e9ec
150
py
Python
calculation/gmhazard_calc/gmhazard_calc/scenario/__init__.py
ucgmsim/gmhazard
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
[ "MIT" ]
null
null
null
calculation/gmhazard_calc/gmhazard_calc/scenario/__init__.py
ucgmsim/gmhazard
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
[ "MIT" ]
8
2021-10-13T02:33:23.000Z
2022-03-29T21:01:08.000Z
calculation/gmhazard_calc/gmhazard_calc/scenario/__init__.py
ucgmsim/gmhazard
d3d90b4c94b3d9605597a3efeccc8523a1e50c0e
[ "MIT" ]
null
null
null
from .scenario import run_ensemble_scenario, filter_ruptures from .ScenarioResult import EnsembleScenarioResult, BranchScenarioResult, ScenarioResult
50
88
0.893333
14
150
9.357143
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.073333
150
2
89
75
0.942446
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
513bdaed9c8611de52f39818e6308c92b66efb30
119
py
Python
sgnlp/models/lif_3way_ap/__init__.py
jonheng/sgnlp
aeee85b78de2e449ca1dc6b18686a060cb938d07
[ "MIT" ]
null
null
null
sgnlp/models/lif_3way_ap/__init__.py
jonheng/sgnlp
aeee85b78de2e449ca1dc6b18686a060cb938d07
[ "MIT" ]
null
null
null
sgnlp/models/lif_3way_ap/__init__.py
jonheng/sgnlp
aeee85b78de2e449ca1dc6b18686a060cb938d07
[ "MIT" ]
null
null
null
from .config import LIF3WayAPConfig from .modeling import LIF3WayAPModel from .preprocess import LIF3WayAPPreprocessor
29.75
45
0.87395
12
119
8.666667
0.666667
0
0
0
0
0
0
0
0
0
0
0.028037
0.10084
119
3
46
39.666667
0.943925
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5a970a4c88c3df8348f5365785d4400dd3e4160e
83
py
Python
data_hacking/simple_stats/__init__.py
c4pr1c3/data_hacking
a2c746375a2b8704eb8f263f6e2b3250ad7ec0ab
[ "MIT" ]
1
2022-02-19T11:36:37.000Z
2022-02-19T11:36:37.000Z
data_hacking/simple_stats/__init__.py
c4pr1c3/data_hacking
a2c746375a2b8704eb8f263f6e2b3250ad7ec0ab
[ "MIT" ]
null
null
null
data_hacking/simple_stats/__init__.py
c4pr1c3/data_hacking
a2c746375a2b8704eb8f263f6e2b3250ad7ec0ab
[ "MIT" ]
3
2017-09-23T01:17:54.000Z
2022-03-23T13:11:37.000Z
'''Package for the Simple Statistical Functionality''' from .simple_stats import *
27.666667
54
0.783133
10
83
6.4
0.9
0
0
0
0
0
0
0
0
0
0
0
0.120482
83
2
55
41.5
0.876712
0.578313
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5aac7332aa6cf8d07e3b45b4ef14ddbd65244e8f
172
py
Python
app/main/__init__.py
Bobfrat/sequence-alignment-app
64cbd790705ccd5a328da2798445e7b9ce65d647
[ "BSD-3-Clause" ]
null
null
null
app/main/__init__.py
Bobfrat/sequence-alignment-app
64cbd790705ccd5a328da2798445e7b9ce65d647
[ "BSD-3-Clause" ]
null
null
null
app/main/__init__.py
Bobfrat/sequence-alignment-app
64cbd790705ccd5a328da2798445e7b9ce65d647
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python ''' app/main/__init__.py ''' from flask import Blueprint main = Blueprint('main', __name__, static_folder="../ui/build") from app.main import views
17.2
63
0.715116
25
172
4.56
0.72
0.122807
0
0
0
0
0
0
0
0
0
0
0.116279
172
9
64
19.111111
0.75
0.238372
0
0
0
0
0.121951
0
0
0
0
0
0
1
0
false
0
0.666667
0
0.666667
0.666667
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
1
0
5
5ac2541df527bc46ac029ad16c61c9d293dbe8d7
184
py
Python
src/core/sessions/buffers/buffers/__init__.py
Oire/TheQube
fcfd8a68b15948e0740642d635db24adef8cc314
[ "MIT" ]
21
2015-08-02T21:26:14.000Z
2019-12-27T09:57:44.000Z
src/core/sessions/buffers/buffers/__init__.py
Oire/TheQube
fcfd8a68b15948e0740642d635db24adef8cc314
[ "MIT" ]
34
2015-01-12T00:38:14.000Z
2020-08-31T11:19:37.000Z
src/core/sessions/buffers/buffers/__init__.py
Oire/TheQube
fcfd8a68b15948e0740642d635db24adef8cc314
[ "MIT" ]
15
2015-03-24T15:42:30.000Z
2020-09-24T20:26:42.000Z
# -*- coding: utf-8 -*- from buffer import Buffer from dismissable import Dismissable from updating import Updating from filtered import Filtered from messages import Messages
23
36
0.771739
23
184
6.173913
0.434783
0
0
0
0
0
0
0
0
0
0
0.006623
0.179348
184
7
37
26.285714
0.933775
0.11413
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
5ae16f89ad51d4da0e6182f2806df670211dceec
53
py
Python
src/exceptions/action_not_supported_exception.py
mehsoy/jaws
b79723c1fc549741494ebf5d948e94a44e971f2a
[ "MIT" ]
1
2019-06-17T17:01:17.000Z
2019-06-17T17:01:17.000Z
src/exceptions/action_not_supported_exception.py
mehsoy/jaws
b79723c1fc549741494ebf5d948e94a44e971f2a
[ "MIT" ]
7
2021-02-08T20:46:15.000Z
2021-09-08T02:12:59.000Z
src/exceptions/action_not_supported_exception.py
mehsoy/jaws
b79723c1fc549741494ebf5d948e94a44e971f2a
[ "MIT" ]
null
null
null
class ActionNotSupportedException(Exception): pass
13.25
45
0.849057
4
53
11.25
1
0
0
0
0
0
0
0
0
0
0
0
0.09434
53
3
46
17.666667
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
5af4788582a4125cb61283099afaef85b9b25989
113
py
Python
auto_events/form/__init__.py
fedecech/form_automator
b20364803b000333b24ce55ef8c01a18d7b47f23
[ "MIT" ]
null
null
null
auto_events/form/__init__.py
fedecech/form_automator
b20364803b000333b24ce55ef8c01a18d7b47f23
[ "MIT" ]
null
null
null
auto_events/form/__init__.py
fedecech/form_automator
b20364803b000333b24ce55ef8c01a18d7b47f23
[ "MIT" ]
null
null
null
from .Form import Form from .FormComponent import FormComponent from .FormComponentType import FormComponentType
28.25
48
0.867257
12
113
8.166667
0.416667
0
0
0
0
0
0
0
0
0
0
0
0.106195
113
3
49
37.666667
0.970297
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8503be01b49daa229fd55657a591f2aa16a62ebd
188
py
Python
app/models/__init__.py
jattoabdul/vanhack-cms
ab2cb054e35765531833afd98051027d891baf10
[ "MIT" ]
null
null
null
app/models/__init__.py
jattoabdul/vanhack-cms
ab2cb054e35765531833afd98051027d891baf10
[ "MIT" ]
null
null
null
app/models/__init__.py
jattoabdul/vanhack-cms
ab2cb054e35765531833afd98051027d891baf10
[ "MIT" ]
null
null
null
from .admin import Admin from .student import Student from .event import Event from .lecture import Lecture from .student_event import StudentEvent from .lecture_admin import LectureAdmin
26.857143
39
0.840426
26
188
6
0.307692
0.141026
0
0
0
0
0
0
0
0
0
0
0.12766
188
6
40
31.333333
0.95122
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
85065a61a919d471077a777cc54e76b7a05a3a5e
40
py
Python
modules_imports_names/module.py
UWSEDS/lecture-materials
42f24ce191efc4a193ac4a84e067519045f7f7c3
[ "BSD-2-Clause" ]
null
null
null
modules_imports_names/module.py
UWSEDS/lecture-materials
42f24ce191efc4a193ac4a84e067519045f7f7c3
[ "BSD-2-Clause" ]
null
null
null
modules_imports_names/module.py
UWSEDS/lecture-materials
42f24ce191efc4a193ac4a84e067519045f7f7c3
[ "BSD-2-Clause" ]
4
2020-10-09T01:07:19.000Z
2020-12-11T23:11:35.000Z
print("Inside module.py") print("hai!")
13.333333
25
0.675
6
40
4.5
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.075
40
2
26
20
0.72973
0
0
0
0
0
0.5
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
8518659d193d15da1f865318d888cc09f41636db
6,680
py
Python
API_Scrapers/apiscraper_gamersclub.py
filipefborba/GamersNetwork
28f7254293192fb80f7d84b5e936893fa70ec474
[ "MIT" ]
null
null
null
API_Scrapers/apiscraper_gamersclub.py
filipefborba/GamersNetwork
28f7254293192fb80f7d84b5e936893fa70ec474
[ "MIT" ]
null
null
null
API_Scrapers/apiscraper_gamersclub.py
filipefborba/GamersNetwork
28f7254293192fb80f7d84b5e936893fa70ec474
[ "MIT" ]
null
null
null
import requests import json from bs4 import BeautifulSoup, SoupStrainer from get_match_html import get_partida_html #pip install bs4 #pip install lxml def get_partidas_campeonato(id): url = "https://gamersclub.com.br/campeonatos/csgo/" + str(id) querystring = {"pag":"partidas"} headers = { 'Referer': "https://gamersclub.com.br/campeonatos/csgo/846", 'X-Requested-With': "XMLHttpRequest", 'cache-control': "no-cache" } response = requests.request("GET", url, headers=headers, params=querystring) partidas_html = response.text camp_partidas = {id: {}} soup = BeautifulSoup(partidas_html, features="lxml") for link in soup.find_all('a'): partida_id = link.get("href").split("/")[-1:][0] if (partida_id not in camp_partidas[id]) and (partida_id != None) and (not partida_id.startswith("#")) : camp_partidas[id][partida_id] = {} return camp_partidas def get_partida(camp_id, partida_id): url = "https://gamersclub.com.br/api/ebacon2/stats/scoreboards/{}/{}".format(str(camp_id), str(partida_id)) headers = { 'Referer': "https://gamersclub.com.br/campeonatos/csgo/1257/partida/104124", 'Cookie': "__utma=203582342.232122176.1489463720.1497671483.1497749677.12; _ga=GA1.3.232122176.1489463720; rdtrk=%7B%22id%22%3A%2219e6bdb6-ab8f-4549-bf65-45ff0c86e846%22%7D; intercom-lou-gp4gdmdo=1; __trf.src=encoded_eyJmaXJzdF9zZXNzaW9uIjp7InZhbHVlIjoiMjAzNTgyMzQyLjE0ODk0NjM3MjEuMS4xLnV0bWNzcj0oZGlyZWN0KXx1dG1jY249KGRpcmVjdCl8dXRtY21kPShub25lKSIsImV4dHJhX3BhcmFtcyI6e319LCJjdXJyZW50X3Nlc3Npb24iOnsidmFsdWUiOiJodHRwczovL2dhbWVyc2NsdWIuY29tLmJyLyIsImV4dHJhX3BhcmFtcyI6e319LCJjcmVhdGVkX2F0IjoxNTE4MTM1OTYwMzAwfQ==; __cfduid=dfaae443a782f1d6d0b715bebbb0338321522639930; SL_C_23361dd035530_KEY=a14d3638cda988422792e3613234743b983fdd9e; crisp-client%2Fsession%2F839282a3-c2c1-4fd3-b493-0f2d3c1e2102=session_91c09cdd-f616-4e12-9599-8aad4f09988d; crisp-client%2Fsession%2F839282a3-c2c1-4fd3-b493-0f2d3c1e2102%2Fc3b2fd43f122359ddf0a576a7d4d75ab74b4d92f9feb92c94eeb2406a6d36192=session_91c09cdd-f616-4e12-9599-8aad4f09988d; SL_C_23361dd035530_VID=Nm59QhW-6TYP; SL_C_23361dd035530_SID=hMyy703pm5eY; gclubsess=fe6dc36080d6083b0e1060909e4d5a51218b6dd8; _gid=GA1.3.168545321.1543697640; _fbp=fb.2.1543697640249.576304885", 'cache-control': "no-cache", } response = requests.request("GET", url, headers=headers) resultados_partida = response.json() return resultados_partida try: campeonatos_ids = [846, 881, 915, 957, 1008, 1019, 1039, 1079, 1116, 1164, 1209, 1257] campeonatos = {} for str(camp_id) in campeonatos_ids: camp = get_partidas_campeonato(camp_id) campeonatos.update(camp) #campeonatos = {846: {'66817': {}, '66818': {}, '66386': {}, '66819': {}, '66387': {}, '66820': {}, '66385': {}, '66920': {}, '66885': {}, '66884': {}, '66919': {}, '66388': {}, '66917': {}, '66918': {}, '67081': {}, '67082': {}, '66981': {}, '66921': {}, '67083': {}, '67115': {}, '67119': {}, '67117': {}, '67116': {}, '67118': {}, '68711': {}, '68712': {}, '68775': {}}, 881: {'69447': {}, '69448': {}, '69449': {}, '69456': {}, '69455': {}, '69452': {}, '69454': {}, '69451': {}, '69489': {}, '69492': {}, '69490': {}, '69633': {}, '69493': {}, '69494': {}, '69495': {}, '69488': {}, '69634': {}, '69630': {}, '69631': {}, '69632': {}, '70936': {}, '71172': {}, '70935': {}, '70934': {}, '71187': {}, '71188': {}, '71704': {}}, 915: {'73003': {}, '73004': {}, '73007': {}, '73005': {}, '73076': {}, '73008': {}, '73011': {}, '73009': {}, '73010': {}, '73194': {}, '73195': {}, '73197': {}, '73196': {}, '73212': {}, '73213': {}, '73405': {}, '73272': {}, '73273': {}, '73599': {}, '73598': {}, '73406': {}, '73637': {}, '73636': {}, '73634': {}, '73600': {}, '76347': {}, '75972': {}, '76382': {}}, 957: {'76641': {}, '77119': {}, '77117': {}, '77120': {}, '76642': {}, '77121': {}, '76643': {}, '76640': {}, '77245': {}, '77248': {}, '77243': {}, '77242': {}, '77240': {}, '77247': {}, '77241': {}, '77244': {}, '77643': {}, '77611': {}, '77642': {}, '77610': {}, '79525': {}, '79524': {}, '77645': {}, '79597': {}, '79980': {}, '79981': {}, '80325': {}}, 1008: {'80886': {}, '80888': {}, '80889': {}, '80890': {}, '80892': {}, '80893': {}, '80887': {}, '80891': {}, '80960': {}, '80895': {}, '80961': {}, '80894': {}}, 1019: {'81425': {}, '81432': {}, '81430': {}, '81435': {}, '81428': {}, '81436': {}, '81427': {}, '81431': {}, '81429': {}, '81433': {}, '81426': {}, '81434': {}, '82905': {}, '82906': {}, '82908': {}, '82907': {}, '82950': {}, '82951': {}, '83230': {}}, 1039: {'84509': {}, '84510': {}, '84511': {}, '84512': {}, '84513': {}, '84514': {}, '84515': {}, '84516': {}, '84517': {}, '84518': {}, '84519': {}, '84520': {}, '84693': {}, '84696': {}, '84694': {}, '84695': {}, '84698': {}, '84699': {}, '85588': {}}, 1079: {'88339': {}, '88343': {}, '88340': {}, '88348': {}, '88337': {}, '88344': {}, '88342': {}, '88347': {}, '88338': {}, '88345': {}, '88341': {}, '88346': {}, '88494': {}, '88496': {}, '88493': {}, '88495': {}, '88504': {}, '88505': {}, '88628': {}}, 1116: {'91382': {}, '91392': {}, '91385': {}, '91387': {}, '91391': {}, '91381': {}, '91386': {}, '91388': {}, '91383': {}, '91389': {}, '91384': {}, '91390': {}, '91598': {}, '91599': {}, '91607': {}, '91608': {}, '91703': {}}, 1164: {'96587': {}, '96590': {}, '96586': {}, '96595': {}, '96589': {}, '96593': {}, '96584': {}, '96592': {}, '96585': {}, '96588': {}, '96594': {}, '96591': {}, '96985': {}, '96986': {}, '97064': {}, '97065': {}, '97521': {}}, 1209: {'101120': {}, '101117': {}, '101143': {}, '101138': {}, '101119': {}, '101118': {}, '101139': {}, '101142': {}, '101116': {}, '101121': {}, '101140': {}, '101141': {}, '102020': {}, '102019': {}, '102044': {}, '102047': {}, '102261': {}}, 1257: {'104124': {}, '104125': {}, '103994': {}, '103992': {}, '103988': {}, '103986': {}, '104127': {}, '104126': {}, '103993': {}, '103987': {}, '104128': {}, '104129': {}, '105105': {}, '105106': {}, '105235': {}, '105240': {}, '105376': {}}} for camp_id in campeonatos: for partida_id in campeonatos[camp_id]: if camp_id >= 1116: partida = get_partida(camp_id, partida_id) else: partida = get_partida_html(camp_id, partida_id) campeonatos[camp_id][partida_id] = partida with open("campeonatos_completo.json", 'w') as fp: json.dump(campeonatos, fp) except Exception as e: print(e) print("Um erro ocorreu. Salvando o restante...") with open("campeonatos_incompleto.json", 'w') as fp: json.dump(campeonatos, fp)
99.701493
3,344
0.564371
629
6,680
5.887122
0.653418
0.026735
0.014853
0.021604
0.1569
0.1569
0.10532
0.10532
0.033486
0.033486
0
0.321498
0.152545
6,680
66
3,345
101.212121
0.332627
0.504341
0
0.078431
0
0.019608
0.460538
0.343816
0
0
0
0
0
0
null
null
0
0.078431
null
null
0.039216
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
1
1
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
5
51a6517a7a2a18e1f5a2996d6a7f6710c0563839
30
py
Python
application/services/__init__.py
raphaelbh/timezone-api
bd273614685065a74df0c577673b43b42bae813c
[ "MIT" ]
2
2022-02-14T19:52:34.000Z
2022-02-14T19:52:39.000Z
application/services/__init__.py
raphaelbh/timezone-api
bd273614685065a74df0c577673b43b42bae813c
[ "MIT" ]
null
null
null
application/services/__init__.py
raphaelbh/timezone-api
bd273614685065a74df0c577673b43b42bae813c
[ "MIT" ]
null
null
null
from . import timezone_service
30
30
0.866667
4
30
6.25
1
0
0
0
0
0
0
0
0
0
0
0
0.1
30
1
30
30
0.925926
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
cfad419ee40a8dc8ac1df4ad10d5679f1daa4135
91
py
Python
tests/parser/edbidb.5.test.py
veltri/DLV2
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
[ "Apache-2.0" ]
null
null
null
tests/parser/edbidb.5.test.py
veltri/DLV2
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
[ "Apache-2.0" ]
null
null
null
tests/parser/edbidb.5.test.py
veltri/DLV2
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
[ "Apache-2.0" ]
null
null
null
input = """ a(1). a(2) :- true. true. """ output = """ a(1). a(2) :- true. true. """
8.272727
14
0.384615
14
91
2.5
0.428571
0.114286
0.171429
0.228571
0.685714
0.685714
0
0
0
0
0
0.059701
0.263736
91
10
15
9.1
0.462687
0
0
0.8
0
0
0.635294
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
cfb6aa456d2c2752350fbe4ccb98ce7dd3d89265
95
py
Python
torchrecipes/audio/source_separation/conf/__init__.py
nateanl/recipes-1
3b46a7479508608f73b6f24deffdc8fcffd25ee5
[ "BSD-3-Clause" ]
null
null
null
torchrecipes/audio/source_separation/conf/__init__.py
nateanl/recipes-1
3b46a7479508608f73b6f24deffdc8fcffd25ee5
[ "BSD-3-Clause" ]
null
null
null
torchrecipes/audio/source_separation/conf/__init__.py
nateanl/recipes-1
3b46a7479508608f73b6f24deffdc8fcffd25ee5
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 import torchrecipes.audio.source_separation.datamodule.librimix # noqa
31.666667
71
0.821053
12
95
6.416667
1
0
0
0
0
0
0
0
0
0
0
0.011364
0.073684
95
2
72
47.5
0.863636
0.273684
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
cfca8c4ad5c1bc9a4f8bb36916d1e68974f19aea
32
py
Python
python/testData/inspections/PyUnresolvedReferencesInspection/NamespacePackageNameDoesntMatchFileName/google/protobuf/service.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/inspections/PyUnresolvedReferencesInspection/NamespacePackageNameDoesntMatchFileName/google/protobuf/service.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/inspections/PyUnresolvedReferencesInspection/NamespacePackageNameDoesntMatchFileName/google/protobuf/service.py
truthiswill/intellij-community
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
class Service(object): pass
10.666667
22
0.6875
4
32
5.5
1
0
0
0
0
0
0
0
0
0
0
0
0.21875
32
2
23
16
0.88
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.5
0
0
0.5
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
0
0
0
0
0
5
320dd67193a6a4adc37b0b547b4128d8d24fd8b6
198
py
Python
components.py
Aspect13/scheduling
95d133efa49c10ab9d7c543631df9e3ce8e6302b
[ "Apache-2.0" ]
null
null
null
components.py
Aspect13/scheduling
95d133efa49c10ab9d7c543631df9e3ce8e6302b
[ "Apache-2.0" ]
null
null
null
components.py
Aspect13/scheduling
95d133efa49c10ab9d7c543631df9e3ce8e6302b
[ "Apache-2.0" ]
1
2022-01-20T09:49:33.000Z
2022-01-20T09:49:33.000Z
from flask import render_template def render_security_test_create(context, slot, payload): return render_template( 'scheduling:security_test_create.html', config=payload )
22
56
0.737374
23
198
6.043478
0.695652
0.201439
0.258993
0
0
0
0
0
0
0
0
0
0.19697
198
8
57
24.75
0.874214
0
0
0
0
0
0.181818
0.181818
0
0
0
0
0
1
0.166667
false
0
0.166667
0.166667
0.5
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
5
3225b50f657517d6323b186f71d8dfdce19be319
6,599
py
Python
modules/dbnd/test_dbnd/tracking/user_commands/test_tracking_datasets.py
busunkim96/dbnd
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
[ "Apache-2.0" ]
224
2020-01-02T10:46:37.000Z
2022-03-02T13:54:08.000Z
modules/dbnd/test_dbnd/tracking/user_commands/test_tracking_datasets.py
busunkim96/dbnd
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
[ "Apache-2.0" ]
16
2020-03-11T09:37:58.000Z
2022-01-26T10:22:08.000Z
modules/dbnd/test_dbnd/tracking/user_commands/test_tracking_datasets.py
busunkim96/dbnd
0191fdcd4c4fbd35006f1026d1a55b2abab9097b
[ "Apache-2.0" ]
24
2020-03-24T13:53:50.000Z
2022-03-22T11:55:18.000Z
import json import pytest from more_itertools import one from dbnd import dataset_op_logger, log_dataset_op, task from dbnd._core.constants import DbndDatasetOperationType, DbndTargetOperationStatus from dbnd._core.tracking.schemas.metrics import Metric from dbnd.testing.helpers_mocks import set_tracking_context from targets import target from test_dbnd.tracking.tracking_helpers import ( get_log_datasets, get_log_metrics, get_log_targets, ) @pytest.mark.usefixtures(set_tracking_context.__name__) class TestTrackingDatasets(object): def test_log_dataset(self, mock_channel_tracker): @task() def task_with_log_datasets(): log_dataset_op( "location://path/to/value.csv", DbndDatasetOperationType.read ) task_with_log_datasets() log_dataset_arg = one(get_log_datasets(mock_channel_tracker)) assert log_dataset_arg.operation_path == "location://path/to/value.csv" assert log_dataset_arg.operation_type == DbndDatasetOperationType.read assert log_dataset_arg.operation_status == DbndTargetOperationStatus.OK assert log_dataset_arg.value_preview == "" assert log_dataset_arg.data_dimensions is None assert log_dataset_arg.data_schema is None # no metrics reported log_metrics_args = list(get_log_metrics(mock_channel_tracker)) assert len(log_metrics_args) == 0 def test_log_dataset_with_wrapper(self, mock_channel_tracker, pandas_data_frame): @task() def task_with_log_dataset_wrapper(): with dataset_op_logger( op_path=target("/path/to/value.csv"), op_type="read", ) as logger: ans = 42 logger.set(data=pandas_data_frame) task_with_log_dataset_wrapper() log_dataset_arg = one(get_log_datasets(mock_channel_tracker)) assert log_dataset_arg.operation_path == "/path/to/value.csv" assert log_dataset_arg.operation_type == DbndDatasetOperationType.read assert log_dataset_arg.operation_status == DbndTargetOperationStatus.OK assert log_dataset_arg.value_preview is not None assert log_dataset_arg.data_dimensions == (5, 3) assert set(json.loads(log_dataset_arg.data_schema).keys()) == { "columns", "dtypes", "shape", "size.bytes", "type", } def test_failed_target_with_wrapper(self, mock_channel_tracker, pandas_data_frame): @task() def task_with_log_dataset_wrapper(): with dataset_op_logger( op_path=target("/path/to/value.csv"), data=pandas_data_frame, op_type="write", ) as logger: ans = 42 ans / 0 try: task_with_log_dataset_wrapper() except Exception: pass log_dataset_arg = one(get_log_datasets(mock_channel_tracker)) assert log_dataset_arg.operation_path == "/path/to/value.csv" assert log_dataset_arg.operation_type == DbndDatasetOperationType.write assert log_dataset_arg.operation_status == DbndTargetOperationStatus.NOK assert log_dataset_arg.value_preview is not None assert log_dataset_arg.data_dimensions == (5, 3) assert set(json.loads(log_dataset_arg.data_schema).keys()) == { "columns", "dtypes", "shape", "size.bytes", "type", } def test_failed_target(self, mock_channel_tracker): @task() def task_with_log_datasets(): log_dataset_op( "location://path/to/value.csv", "read", # Check passing str values too success=False, ) task_with_log_datasets() log_dataset_arg = one(get_log_datasets(mock_channel_tracker)) assert log_dataset_arg.operation_path == "location://path/to/value.csv" assert log_dataset_arg.operation_type == DbndDatasetOperationType.read assert log_dataset_arg.operation_status == DbndTargetOperationStatus.NOK assert log_dataset_arg.value_preview == "" assert log_dataset_arg.data_dimensions is None assert log_dataset_arg.data_schema is None log_metrics_args = get_log_metrics(mock_channel_tracker) assert len(list(log_metrics_args)) == 0 def test_with_actual_op_path(self, mock_channel_tracker): @task() def task_with_log_datasets(): a_target = target("/path/to/value.csv") log_dataset_op(a_target, DbndDatasetOperationType.read) task_with_log_datasets() log_dataset_arg = one(get_log_datasets(mock_channel_tracker)) assert log_dataset_arg.operation_path == "/path/to/value.csv" assert log_dataset_arg.operation_type == DbndDatasetOperationType.read assert log_dataset_arg.operation_status == DbndTargetOperationStatus.OK assert log_dataset_arg.value_preview == "" assert log_dataset_arg.data_dimensions is None assert log_dataset_arg.data_schema is None log_metrics_args = get_log_metrics(mock_channel_tracker) assert len(list(log_metrics_args)) == 0 def test_path_with_data_meta(self, mock_channel_tracker, pandas_data_frame): @task() def task_with_log_datasets(): log_dataset_op( "/path/to/value.csv", DbndDatasetOperationType.read, data=pandas_data_frame, with_preview=True, with_schema=True, ) task_with_log_datasets() log_dataset_arg = one(get_log_datasets(mock_channel_tracker)) assert log_dataset_arg.operation_path == "/path/to/value.csv" assert log_dataset_arg.operation_type == DbndDatasetOperationType.read assert log_dataset_arg.operation_status == DbndTargetOperationStatus.OK assert log_dataset_arg.value_preview is not None assert log_dataset_arg.data_dimensions == (5, 3) assert set(json.loads(log_dataset_arg.data_schema).keys()) == { "columns", "dtypes", "shape", "size.bytes", "type", } log_metrics_args = get_log_metrics(mock_channel_tracker) metrics_names = {metric_row["metric"].key for metric_row in log_metrics_args} assert metrics_names == { "path.to.schema", "path.to.shape0", "path.to.shape1", "path.to", }
37.925287
87
0.660858
783
6,599
5.176245
0.137931
0.130767
0.134715
0.1547
0.779176
0.761905
0.746114
0.746114
0.736245
0.720207
0
0.003269
0.258372
6,599
173
88
38.144509
0.824888
0.007274
0
0.62069
0
0
0.064142
0.017104
0
0
0
0
0.275862
1
0.082759
false
0.006897
0.062069
0
0.151724
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
5c4e773fe46ed33728bf1c32029d239fd38c681d
80
py
Python
Starting Out with Python/Chapter 2/2.3 Displaying Output with the print Function/2-2_double_quotes.py
jesushilarioh/Python
79c8b0d2c4f0ec9cccee26dcd563de0c55ba283e
[ "MIT" ]
null
null
null
Starting Out with Python/Chapter 2/2.3 Displaying Output with the print Function/2-2_double_quotes.py
jesushilarioh/Python
79c8b0d2c4f0ec9cccee26dcd563de0c55ba283e
[ "MIT" ]
null
null
null
Starting Out with Python/Chapter 2/2.3 Displaying Output with the print Function/2-2_double_quotes.py
jesushilarioh/Python
79c8b0d2c4f0ec9cccee26dcd563de0c55ba283e
[ "MIT" ]
null
null
null
print("Kate Austen") print("123 Full Circle Drive") print("Asheville, NC 28899")
26.666667
30
0.7375
12
80
4.916667
0.833333
0
0
0
0
0
0
0
0
0
0
0.111111
0.1
80
3
31
26.666667
0.708333
0
0
0
0
0
0.62963
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
5
5cd50937db1449f40e90303bb25e195677b58565
71
py
Python
datahub/company/admin/constants.py
uktrade/data-hub-api-actions-test
a72439dfeb34e4179491db42fea290b9c2afadb1
[ "MIT" ]
null
null
null
datahub/company/admin/constants.py
uktrade/data-hub-api-actions-test
a72439dfeb34e4179491db42fea290b9c2afadb1
[ "MIT" ]
16
2020-04-01T15:25:35.000Z
2020-04-14T14:07:30.000Z
datahub/company/admin/constants.py
uktrade/data-hub-api-actions-test
a72439dfeb34e4179491db42fea290b9c2afadb1
[ "MIT" ]
null
null
null
ADMIN_ADD_ADVISER_FROM_SSO_FEATURE_FLAG = 'admin-add-adviser-from-sso'
35.5
70
0.859155
12
71
4.583333
0.583333
0.290909
0.545455
0.690909
0.8
0
0
0
0
0
0
0
0.042254
71
1
71
71
0.808824
0
0
0
0
0
0.366197
0.366197
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7a2a306c62af325778b965f704dc45193e65081a
24,396
py
Python
jupiter/domain/schedules.py
horia141/jupiter
2c721d1d44e1cd2607ad9936e54a20ea254741dc
[ "MIT" ]
15
2019-05-05T14:34:58.000Z
2022-02-25T09:57:28.000Z
jupiter/domain/schedules.py
horia141/jupiter
2c721d1d44e1cd2607ad9936e54a20ea254741dc
[ "MIT" ]
3
2020-02-22T16:09:39.000Z
2021-12-18T21:33:06.000Z
jupiter/domain/schedules.py
horia141/jupiter
2c721d1d44e1cd2607ad9936e54a20ea254741dc
[ "MIT" ]
null
null
null
"""Module for working with schedules.""" import abc import typing from typing import Optional import pendulum from pendulum import UTC from pendulum.tz.timezone import Timezone from jupiter.domain.adate import ADate from jupiter.domain.entity_name import EntityName from jupiter.domain.recurring_task_due_at_day import RecurringTaskDueAtDay from jupiter.domain.recurring_task_due_at_month import RecurringTaskDueAtMonth from jupiter.domain.recurring_task_due_at_time import RecurringTaskDueAtTime from jupiter.domain.recurring_task_period import RecurringTaskPeriod from jupiter.domain.recurring_task_skip_rule import RecurringTaskSkipRule from jupiter.domain.timezone import Timezone as DomainTimezone from jupiter.framework.base.timestamp import Timestamp class Schedule(abc.ABC): """The base class for the schedule descriptors class.""" _should_skip: bool _actionable_date: Optional[pendulum.Date] _date: pendulum.Date _due_date: pendulum.Date _due_time: Optional[pendulum.DateTime] _full_name: EntityName _timeline: str def __str__(self) -> str: """String representation.""" return f"Schedule({self.period} {self.first_day} {self.end_day} {self.timeline})" def __repr__(self) -> str: """String representation.""" return f"Schedule({self.period} {self.first_day} {self.end_day} {self.timeline})" @staticmethod def year_two_digits(date: Timestamp) -> str: """Get the last two digits (decade and year) from a date.""" return str(date.value.year % 100) @staticmethod def month_to_quarter_num(date: pendulum.Date) -> int: """Map a date to one of the four quarters from the year.""" month_to_quarter_num = { 1: 1, 2: 1, 3: 1, 4: 2, 5: 2, 6: 2, 7: 3, 8: 3, 9: 3, 10: 4, 11: 4, 12: 4 } return month_to_quarter_num[date.month] @staticmethod def month_to_quarter(date: typing.Union[pendulum.Date, Timestamp]) -> str: """Map a date to the name of four quarters from the year.""" month_to_quarter = { 1: "Q1", 2: "Q1", 3: "Q1", 4: "Q2", 5: "Q2", 6: "Q2", 7: "Q3", 8: "Q3", 9: "Q3", 10: "Q4", 11: "Q4", 12: "Q4" } return month_to_quarter[date.month] @staticmethod def month_to_quarter_start(date: typing.Union[pendulum.Date, Timestamp]) -> int: """Map a month in a date to the first month of a quarter of which the date belongs.""" month_to_quarter = { 1: 1, 2: 1, 3: 1, 4: 4, 5: 4, 6: 4, 7: 7, 8: 7, 9: 7, 10: 10, 11: 10, 12: 10 } return month_to_quarter[date.month] @staticmethod def month_to_quarter_end(date: typing.Union[pendulum.Date, Timestamp]) -> int: """Map a month in a date to the last month of a quarter of which the date belongs.""" month_to_quarter = { 1: 3, 2: 3, 3: 3, 4: 6, 5: 6, 6: 6, 7: 9, 8: 9, 9: 9, 10: 12, 11: 12, 12: 12 } return month_to_quarter[date.month] @staticmethod def month_to_month(date: typing.Union[pendulum.Date, Timestamp]) -> str: """Map a month to the name it has.""" month_to_month = { 1: "Jan", 2: "Feb", 3: "Mar", 4: "Apr", 5: "May", 6: "Jun", 7: "Jul", 8: "Aug", 9: "Sep", 10: "Oct", 11: "Nov", 12: "Dec" } return month_to_month[date.month] @property def should_skip(self) -> bool: """Whether the date should be skipped according to the planning rules.""" return self._should_skip @property def actionable_date(self) -> Optional[ADate]: """The actionable date for the schedule, if any.""" return ADate.from_date(self._actionable_date) if self._actionable_date else None @property def due_time(self) -> ADate: """The due time of an event according to the schedule.""" if self._due_time: return ADate.from_date_and_time(self._due_time) else: return ADate.from_date(self._due_date) @property def full_name(self) -> EntityName: """The full name of the event with the schedule info in it.""" return self._full_name @property def timeline(self) -> str: """The timeline of an event.""" return self._timeline @staticmethod def _skip_helper(skip_rule: RecurringTaskSkipRule, param: int) -> bool: skip_rule_str = str(skip_rule) if skip_rule_str == "even": return param % 2 == 0 elif skip_rule_str == "odd": return param % 2 != 0 else: # Why don't you write better programs, bro? return skip_rule_str.find(str(param)) != -1 @property @abc.abstractmethod def period(self) -> RecurringTaskPeriod: """The period for the schedule.""" @property @abc.abstractmethod def first_day(self) -> ADate: """The first day of the interval represented by the schedule block.""" @property @abc.abstractmethod def end_day(self) -> ADate: """The end day of the interval represented by the schedule block.""" def contains_adate(self, adate: ADate) -> bool: """Tests whether a particular datetime is in the schedule block.""" first_day_dt = pendulum.DateTime(self.first_day.year, self.first_day.month, self.first_day.day, tzinfo=UTC) end_day_dt = \ pendulum.DateTime(self.end_day.year, self.end_day.month, self.end_day.day, tzinfo=UTC).end_of("day") adate_ts = adate.to_timestamp().value.end_of("day") return typing.cast(bool, first_day_dt <= adate_ts) and typing.cast(bool, adate_ts <= end_day_dt) def contains_timestamp(self, timestamp: Timestamp) -> bool: """Tests whether a particular datetime is in the schedule block.""" first_day_dt = pendulum.DateTime(self.first_day.year, self.first_day.month, self.first_day.day, tzinfo=UTC) end_day_dt = \ pendulum.DateTime(self.end_day.year, self.end_day.month, self.end_day.day, tzinfo=UTC).end_of("day") timestamp = timestamp.value.end_of("day") return typing.cast(bool, first_day_dt <= timestamp) and typing.cast(bool, timestamp <= end_day_dt) class DailySchedule(Schedule): """A daily schedule.""" def __init__( self, name: EntityName, right_now: Timestamp, timezone: Timezone, skip_rule: Optional[RecurringTaskSkipRule] = None, due_at_time: Optional[RecurringTaskDueAtTime] = None) -> None: """Construct a schedule.""" self._date = typing.cast(pendulum.Date, right_now.value.date()) self._due_date = typing.cast(pendulum.Date, right_now.value.date()) self._actionable_date = None if due_at_time: self._due_time = pendulum.parse( "{date} {time}".format(date=self._due_date.to_date_string(), time=due_at_time), tz=timezone) else: self._due_time = None self._full_name = EntityName("{name} {year}:{month}{day}".format( name=name, year=self.year_two_digits(right_now), month=self.month_to_month(right_now), day=right_now.value.day)) self._timeline = self._generate_timeline(right_now) self._should_skip = self._skip_helper(skip_rule, self._due_date.day_of_week) if skip_rule else False @property def period(self) -> RecurringTaskPeriod: """The period string.""" return RecurringTaskPeriod.DAILY @property def first_day(self) -> ADate: """The first day of the interval represented by the schedule block.""" return ADate.from_date(self._due_date) @property def end_day(self) -> ADate: """The end day of the interval represented by the schedule block.""" return ADate.from_date(self._due_date) def _generate_timeline(self, right_now: Timestamp) -> str: year = "{year}".format(year=right_now.value.year) quarter = self.month_to_quarter(right_now) month = self.month_to_month(right_now) week = "W{week}".format(week=right_now.value.week_of_year) day = "D{day}".format(day=right_now.value.day_of_week) return "{year},{quarter},{month},{week},{day}".format(year=year, quarter=quarter, month=month, week=week, day=day) class WeeklySchedule(Schedule): """A monthly schedule.""" def __init__( self, name: EntityName, right_now: Timestamp, timezone: Timezone, skip_rule: Optional[RecurringTaskSkipRule], actionable_from_day: Optional[RecurringTaskDueAtDay], due_at_time: Optional[RecurringTaskDueAtTime], due_at_day: Optional[RecurringTaskDueAtDay]) -> None: """Construct a schedule.""" super().__init__() start_of_week = right_now.value.start_of("week") self._date = typing.cast(pendulum.Date, right_now.value.date()) if actionable_from_day: self._actionable_date = \ typing.cast(pendulum.Date, start_of_week.add(days=actionable_from_day.as_int() - 1).date()) else: self._actionable_date = None if due_at_day: self._due_date = start_of_week.add(days=due_at_day.as_int() - 1).end_of("day") else: self._due_date = start_of_week.end_of("week").end_of("day") if due_at_time: self._due_time = pendulum.parse( "{date} {time}".format(date=self._due_date.to_date_string(), time=due_at_time), tz=timezone) else: self._due_time = None self._full_name = EntityName("{name} {year}:W{week}".format( name=name, year=self.year_two_digits(right_now), week=start_of_week.week_of_year)) self._timeline = self._generate_timeline(start_of_week) self._should_skip = self._skip_helper(skip_rule, self._due_date.week_of_year) if skip_rule else False @property def period(self) -> RecurringTaskPeriod: """The period string.""" return RecurringTaskPeriod.WEEKLY @property def first_day(self) -> ADate: """The first day of the interval represented by the schedule block.""" return ADate.from_date(self._date.start_of("week")) @property def end_day(self) -> ADate: """The end day of the interval represented by the schedule block.""" return ADate.from_date(self._date.end_of("week")) def _generate_timeline(self, right_now: pendulum.DateTime) -> str: year = "{year}".format(year=right_now.year) quarter = self.month_to_quarter(right_now) month = self.month_to_month(right_now) week = "W{week}".format(week=right_now.week_of_year) return "{year},{quarter},{month},{week}".format(year=year, quarter=quarter, month=month, week=week) class MonthlySchedule(Schedule): """A monthly schedule.""" def __init__( self, name: EntityName, right_now: Timestamp, timezone: Timezone, skip_rule: Optional[RecurringTaskSkipRule], actionable_from_day: Optional[RecurringTaskDueAtDay], due_at_time: Optional[RecurringTaskDueAtTime], due_at_day: Optional[RecurringTaskDueAtDay]) -> None: """Construct a schedule.""" super().__init__() start_of_month = right_now.value.start_of("month") self._date = typing.cast(pendulum.Date, right_now.value.date()) if actionable_from_day: self._actionable_date = \ typing.cast(pendulum.Date, start_of_month.add(days=actionable_from_day.as_int() - 1).date()) else: self._actionable_date = None if due_at_day: self._due_date = start_of_month.add(days=due_at_day.as_int() - 1).end_of("day") else: self._due_date = start_of_month.end_of("month").end_of("day") if due_at_time: self._due_time = pendulum.parse( "{date} {time}".format(date=self._due_date.to_date_string(), time=due_at_time), tz=timezone) else: self._due_time = None self._full_name = EntityName("{name} {year}:{month}".format( name=name, year=self.year_two_digits(right_now), month=self.month_to_month(right_now))) self._timeline = self._generate_timeline(Timestamp(start_of_month)) self._should_skip = self._skip_helper(skip_rule, self._due_date.month) if skip_rule else False @property def period(self) -> RecurringTaskPeriod: """The period string.""" return RecurringTaskPeriod.MONTHLY @property def first_day(self) -> ADate: """The first day of the interval represented by the schedule block.""" return ADate.from_date(self._date.start_of("month")) @property def end_day(self) -> ADate: """The end day of the interval represented by the schedule block.""" return ADate.from_date(self._date.end_of("month")) def _generate_timeline(self, right_now: Timestamp) -> str: year = "{year}".format(year=right_now.value.year) quarter = self.month_to_quarter(right_now) month = self.month_to_month(right_now) return "{year},{quarter},{month}".format(year=year, quarter=quarter, month=month) class QuarterlySchedule(Schedule): """A quarterly schedule.""" def __init__( self, name: EntityName, right_now: Timestamp, timezone: Timezone, skip_rule: Optional[RecurringTaskSkipRule], actionable_from_day: Optional[RecurringTaskDueAtDay], actionable_from_month: Optional[RecurringTaskDueAtMonth], due_at_time: Optional[RecurringTaskDueAtTime], due_at_day: Optional[RecurringTaskDueAtDay], due_at_month: Optional[RecurringTaskDueAtMonth]) -> None: """Construct a schedule.""" super().__init__() self._date = typing.cast(pendulum.Date, right_now.value.date()) if actionable_from_month: if actionable_from_day: self._actionable_date = typing.cast(pendulum.Date, right_now .value .on(right_now.value.year, self.month_to_quarter_start(right_now), 1) .start_of("month") .add(months=actionable_from_month.as_int() - 1) .add(days=actionable_from_day.as_int() - 1) .date()) else: self._actionable_date = typing.cast(pendulum.Date, right_now .value .on(right_now.value.year, self.month_to_quarter_start(right_now), 1) .start_of("month") .add(months=actionable_from_month.as_int() - 1) .date()) elif actionable_from_day: self._actionable_date = typing.cast(pendulum.Date, right_now .value .on(right_now.value.year, self.month_to_quarter_start(right_now), 1) .start_of("month") .add(days=actionable_from_day.as_int() - 1) .date()) else: self._actionable_date = None if due_at_month: if due_at_day: self._due_date = right_now\ .value\ .on(right_now.value.year, self.month_to_quarter_start(right_now), 1)\ .start_of("month")\ .add(months=due_at_month.as_int() - 1)\ .add(days=due_at_day.as_int() - 1)\ .end_of("day") else: self._due_date = right_now\ .value\ .on(right_now.value.year, self.month_to_quarter_start(right_now), 1)\ .start_of("month")\ .add(months=due_at_month.as_int() - 1)\ .end_of("month")\ .end_of("day") elif due_at_day: self._due_date = right_now\ .value\ .on(right_now.value.year, self.month_to_quarter_start(right_now), 1)\ .start_of("month")\ .add(days=due_at_day.as_int() - 1)\ .end_of("day") else: self._due_date = right_now\ .value\ .on(right_now.value.year, self.month_to_quarter_end(right_now), 1)\ .end_of("month")\ .end_of("day") if due_at_time: self._due_time = pendulum.parse( "{date} {time}".format(date=self._due_date.to_date_string(), time=due_at_time), tz=timezone) else: self._due_time = None self._full_name = EntityName("{name} {year}:{quarter}".format( name=name, year=self.year_two_digits(right_now), quarter=self.month_to_quarter(right_now))) self._timeline = self._generate_timeline(right_now) self._should_skip = \ self._skip_helper(skip_rule, self.month_to_quarter_num(self._due_date)) if skip_rule else False @property def period(self) -> RecurringTaskPeriod: """The period string.""" return RecurringTaskPeriod.QUARTERLY @property def first_day(self) -> ADate: """The first day of the interval represented by the schedule block.""" return ADate.from_date_and_time(pendulum\ .DateTime(self._date.year, self.month_to_quarter_start(self._date), self._date.day, tzinfo=UTC)\ .start_of("month")) @property def end_day(self) -> ADate: """The end day of the interval represented by the scedule block.""" return ADate.from_date_and_time(pendulum\ .DateTime(self._date.year, self.month_to_quarter_end(self._date), self._date.day, tzinfo=UTC)\ .end_of("month")) def _generate_timeline(self, right_now: Timestamp) -> str: year = "{year}".format(year=right_now.value.year) quarter = self.month_to_quarter(right_now) return "{year},{quarter}".format(year=year, quarter=quarter) class YearlySchedule(Schedule): """A yearly schedule.""" def __init__( self, name: EntityName, right_now: Timestamp, timezone: Timezone, actionable_from_day: Optional[RecurringTaskDueAtDay], actionable_from_month: Optional[RecurringTaskDueAtMonth], due_at_time: Optional[RecurringTaskDueAtTime], due_at_day: Optional[RecurringTaskDueAtDay], due_at_month: Optional[RecurringTaskDueAtMonth]) -> None: """Construct a schedule.""" super().__init__() self._date = typing.cast(pendulum.Date, right_now.value.date()) if actionable_from_month: if actionable_from_day: self._actionable_date = typing.cast(pendulum.Date, right_now .value .start_of("year") .add(months=actionable_from_month.as_int() - 1) .add(days=actionable_from_day.as_int() - 1) .date()) else: self._actionable_date = typing.cast(pendulum.Date, right_now .value .start_of("year") .add(months=actionable_from_month.as_int() - 1) .date()) elif actionable_from_day: self._actionable_date = typing.cast( pendulum.Date, right_now.value.start_of("year").add(days=actionable_from_day.as_int() - 1).date()) else: self._actionable_date = None if due_at_month: if due_at_day: self._due_date = right_now\ .value\ .start_of("year")\ .add(months=due_at_month.as_int() - 1)\ .add(days=due_at_day.as_int() - 1)\ .end_of("day") else: self._due_date = right_now\ .value\ .start_of("year")\ .add(months=due_at_month.as_int() - 1)\ .end_of("month")\ .end_of("day") elif due_at_day: self._due_date = right_now.value.start_of("year").add(days=due_at_day.as_int() - 1).end_of("day") else: self._due_date = right_now.value.end_of("year").end_of("day") if due_at_time: self._due_time = pendulum.parse( "{date} {time}".format(date=self._due_date.to_date_string(), time=due_at_time), tz=timezone) else: self._due_time = None self._full_name = EntityName("{name} {year}".format(name=name, year=self.year_two_digits(right_now))) self._timeline = self._generate_timeline(right_now) self._should_skip = False @property def period(self) -> RecurringTaskPeriod: """The period string.""" return RecurringTaskPeriod.YEARLY @property def first_day(self) -> ADate: """The first day of the interval represented by the schedule block.""" return ADate.from_date(self._date.start_of("year")) @property def end_day(self) -> ADate: """The end day of the interval represented by the schedule block.""" return ADate.from_date(self._date.end_of("year")) @staticmethod def _generate_timeline(right_now: Timestamp) -> str: year = "{year}".format(year=right_now.value.year) return year def get_schedule( period: RecurringTaskPeriod, name: EntityName, right_now: Timestamp, timezone: DomainTimezone, skip_rule: Optional[RecurringTaskSkipRule], actionable_from_day: Optional[RecurringTaskDueAtDay], actionable_from_month: Optional[RecurringTaskDueAtMonth], due_at_time: Optional[RecurringTaskDueAtTime], due_at_day: Optional[RecurringTaskDueAtDay], due_at_month: Optional[RecurringTaskDueAtMonth]) -> Schedule: """Build an appropriate schedule from the given parameters.""" pendulum_timezone = pendulum.timezone(str(timezone)) if period == RecurringTaskPeriod.DAILY: return DailySchedule(name, right_now, pendulum_timezone, skip_rule, due_at_time) elif period == RecurringTaskPeriod.WEEKLY: return WeeklySchedule( name, right_now, pendulum_timezone, skip_rule, actionable_from_day, due_at_time, due_at_day) elif period == RecurringTaskPeriod.MONTHLY: return MonthlySchedule( name, right_now, pendulum_timezone, skip_rule, actionable_from_day, due_at_time, due_at_day) elif period == RecurringTaskPeriod.QUARTERLY: return QuarterlySchedule( name, right_now, pendulum_timezone, skip_rule, actionable_from_day, actionable_from_month, due_at_time, due_at_day, due_at_month) elif period == RecurringTaskPeriod.YEARLY: return YearlySchedule( name, right_now, pendulum_timezone, actionable_from_day, actionable_from_month, due_at_time, due_at_day, due_at_month) else: raise Exception(f"Invalid period {period}")
41.989673
120
0.591244
2,891
24,396
4.707022
0.066413
0.046443
0.034392
0.024985
0.779615
0.748236
0.736111
0.707378
0.70194
0.685847
0
0.009627
0.30173
24,396
580
121
42.062069
0.789199
0.084481
0
0.54329
0
0.004329
0.033207
0.006153
0
0
0
0
0
1
0.097403
false
0
0.032468
0
0.253247
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
7a319c61790b7e7d01726f98e57f5651f0b50cd0
200
py
Python
wfdb/processing/__init__.py
Chirayu-sopho/Sleep_Disorder_Classification
1566c26b79ec089943cbeec5e4b9ed41e601477c
[ "Apache-2.0" ]
null
null
null
wfdb/processing/__init__.py
Chirayu-sopho/Sleep_Disorder_Classification
1566c26b79ec089943cbeec5e4b9ed41e601477c
[ "Apache-2.0" ]
null
null
null
wfdb/processing/__init__.py
Chirayu-sopho/Sleep_Disorder_Classification
1566c26b79ec089943cbeec5e4b9ed41e601477c
[ "Apache-2.0" ]
null
null
null
from .basic import resample_ann, resample_sig, resample_singlechan, resample_multichan, normalize from .gqrs import gqrs_detect from .hr import compute_hr from .peaks import find_peaks, correct_peaks
40
97
0.85
29
200
5.586207
0.551724
0
0
0
0
0
0
0
0
0
0
0
0.105
200
4
98
50
0.905028
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
7a4aaa58cb60c3bf29cc356b0c84992d3ba7402f
591
py
Python
src/shared/domain/service/logging/logger.py
fdelgados/python-ddd-skeleton
9c48588929d82e7cbb0e27bd9717123eb9bd26a0
[ "MIT" ]
null
null
null
src/shared/domain/service/logging/logger.py
fdelgados/python-ddd-skeleton
9c48588929d82e7cbb0e27bd9717123eb9bd26a0
[ "MIT" ]
null
null
null
src/shared/domain/service/logging/logger.py
fdelgados/python-ddd-skeleton
9c48588929d82e7cbb0e27bd9717123eb9bd26a0
[ "MIT" ]
null
null
null
import abc class Logger(metaclass=abc.ABCMeta): @abc.abstractmethod def debug(self, message: str, *args) -> None: raise NotImplementedError @abc.abstractmethod def info(self, message: str, *args) -> None: raise NotImplementedError @abc.abstractmethod def warning(self, message: str, *args) -> None: raise NotImplementedError @abc.abstractmethod def error(self, message: str, *args) -> None: raise NotImplementedError @abc.abstractmethod def critical(self, message: str) -> None: raise NotImplementedError
24.625
51
0.666667
61
591
6.459016
0.327869
0.215736
0.253807
0.182741
0.670051
0.670051
0.670051
0.670051
0.670051
0.670051
0
0
0.235195
591
23
52
25.695652
0.871681
0
0
0.588235
0
0
0
0
0
0
0
0
0
1
0.294118
false
0
0.058824
0
0.411765
0
0
0
0
null
1
1
1
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
5
7a5cde4682269d94cf9876e2ba2d3429c11a847b
75
py
Python
boa3_test/example/logical_test/MixedOperations.py
jplippi/neo3-boa
052be4adebb665113715bb80067d954f7ad85ad5
[ "Apache-2.0" ]
null
null
null
boa3_test/example/logical_test/MixedOperations.py
jplippi/neo3-boa
052be4adebb665113715bb80067d954f7ad85ad5
[ "Apache-2.0" ]
null
null
null
boa3_test/example/logical_test/MixedOperations.py
jplippi/neo3-boa
052be4adebb665113715bb80067d954f7ad85ad5
[ "Apache-2.0" ]
null
null
null
def Main(a: bool, b: bool, c: bool) -> bool: return not a and (b or c)
25
44
0.573333
16
75
2.6875
0.625
0
0
0
0
0
0
0
0
0
0
0
0.266667
75
2
45
37.5
0.781818
0
0
0
0
0
0
0
0
0
0
0
0
1
0.5
false
0
0
0.5
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5
7ab04ee207f871f90c4ce14b558a0f6a0340939e
96
py
Python
mongodm/__init__.py
tdeni/mongodm
e793373a7083d83d3f7f36fd1446844a39cf876e
[ "MIT" ]
null
null
null
mongodm/__init__.py
tdeni/mongodm
e793373a7083d83d3f7f36fd1446844a39cf876e
[ "MIT" ]
1
2021-08-03T07:16:46.000Z
2021-08-03T07:16:46.000Z
mongodm/__init__.py
tdeni/mongodm
e793373a7083d83d3f7f36fd1446844a39cf876e
[ "MIT" ]
null
null
null
# flake8: noqa from .mongo import MongoClient from .types import Document from .query import Q
16
30
0.78125
14
96
5.357143
0.714286
0
0
0
0
0
0
0
0
0
0
0.0125
0.166667
96
5
31
19.2
0.925
0.125
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
8f8bd8ce5ba3b27ff15a33109fb1a3fd67e1da3d
159
py
Python
tests/strategies/__init__.py
lycantropos/voronoi
977e0b3e5eff2dd294e2e6ce1a8030c763e86233
[ "MIT" ]
null
null
null
tests/strategies/__init__.py
lycantropos/voronoi
977e0b3e5eff2dd294e2e6ce1a8030c763e86233
[ "MIT" ]
null
null
null
tests/strategies/__init__.py
lycantropos/voronoi
977e0b3e5eff2dd294e2e6ce1a8030c763e86233
[ "MIT" ]
null
null
null
from .base import (doubles, integers_32, integers_64, sizes, unsigned_integers_32)
26.5
40
0.427673
12
159
5.333333
0.75
0.3125
0
0
0
0
0
0
0
0
0
0.08
0.528302
159
5
41
31.8
0.773333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.2
0
0.2
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
1
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
890df086266af089d02f91fe1b3ab8e36420310f
254
py
Python
psaw/exceptions.py
LeartS/PSAW
fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2
[ "MIT" ]
null
null
null
psaw/exceptions.py
LeartS/PSAW
fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2
[ "MIT" ]
null
null
null
psaw/exceptions.py
LeartS/PSAW
fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2
[ "MIT" ]
null
null
null
class SearchaniseException(Exception): def __init__(self, message): super(SearchaniseException, self).__init__(message) class PSAWException(Exception): def __init__(self, message): super(PSAWException, self).__init__(message)
23.090909
59
0.732283
24
254
7.083333
0.375
0.141176
0.188235
0.235294
0.376471
0.376471
0
0
0
0
0
0
0.165354
254
10
60
25.4
0.801887
0
0
0.333333
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0
0
0.666667
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
890f8d821a8e5baaaa4cabbd6a51552f6b111b72
177
py
Python
preprocessing/__init__.py
costaruan/kaggle-dogs-vs-cats-competition
d386e3267a3ab81e17f84c0c260973404b1e6808
[ "MIT" ]
null
null
null
preprocessing/__init__.py
costaruan/kaggle-dogs-vs-cats-competition
d386e3267a3ab81e17f84c0c260973404b1e6808
[ "MIT" ]
null
null
null
preprocessing/__init__.py
costaruan/kaggle-dogs-vs-cats-competition
d386e3267a3ab81e17f84c0c260973404b1e6808
[ "MIT" ]
null
null
null
from .image_preprocessing import create_testing_data from .image_preprocessing import create_training_data __all__ = ['create_testing_data', 'create_training_data']
29.5
53
0.80791
21
177
6.142857
0.428571
0.139535
0.341085
0.434109
0.527132
0
0
0
0
0
0
0
0.135593
177
5
54
35.4
0.843137
0
0
0
0
0
0.220339
0
0
0
0
0
0
1
0
false
0
0.5
0
0.5
0
1
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
891906192ecd0af6d29c3f7ce57aa6aa4a0c05d5
501
py
Python
dataset/__init__.py
Jueast/VLAE_Pytorch
8373390008d611909997e4a3de8396f617d53a49
[ "MIT" ]
null
null
null
dataset/__init__.py
Jueast/VLAE_Pytorch
8373390008d611909997e4a3de8396f617d53a49
[ "MIT" ]
null
null
null
dataset/__init__.py
Jueast/VLAE_Pytorch
8373390008d611909997e4a3de8396f617d53a49
[ "MIT" ]
null
null
null
try: # Works for python 3 from dataset.dataset import * from dataset.dataset_mnist import MnistDataset from dataset.dataset_SVHN import SVHNDataset from dataset.dataset_dsprites import DspritesDataset from dataset.dataset_HEART import HeartDataset except: # Works for python 2 from dataset import * from dataset_mnist import MnistDataset from dataset_SVHN import SVHNDataset from dataset_dsprites import DspritesDataset from dataset_HEART import HeartDataset
41.75
56
0.790419
61
501
6.360656
0.295082
0.283505
0.231959
0.123711
0.618557
0.618557
0
0
0
0
0
0.004914
0.187625
501
12
57
41.75
0.948403
0.073852
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.833333
0
0.833333
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
56eab0dcd9984691e100ac12be2044e800ca41bf
1,180
py
Python
lists_01/list_examples.py
YAtOff/vcs-internship
bcbd4f54402fd65c6c5f955e16ef50413c8cd7e4
[ "Apache-2.0" ]
null
null
null
lists_01/list_examples.py
YAtOff/vcs-internship
bcbd4f54402fd65c6c5f955e16ef50413c8cd7e4
[ "Apache-2.0" ]
null
null
null
lists_01/list_examples.py
YAtOff/vcs-internship
bcbd4f54402fd65c6c5f955e16ef50413c8cd7e4
[ "Apache-2.0" ]
1
2018-11-08T13:01:47.000Z
2018-11-08T13:01:47.000Z
""" Indexing ======== >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> ??? 1 Negative indexing ================= >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> ??? 10 >>> ??? 8 List slices (``a[start:end]``) ============================== >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> ??? [2, 3, 4, 5, 6, 7] List slices with negative indexing ================================== >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> ??? [7, 8] List slices with step (``a[start:end:step]``) ============================================= >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> ??? [0, 2, 4, 6, 8, 10] >>> ??? [0, 3, 6, 9] >>> ??? [2, 4, 6] List slices with negative step ============================== >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> ??? [10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] >>> ??? [10, 8, 6, 4, 2, 0] List slice assignment ===================== >>> a = [1, 2, 3, 4, 5] >>> a[2:3] = [0,0] >>> a ??? >>> a[1:1], a[4:4], a[4:5] = [8,9], [0], [] >>> a ??? >>> a[1:7] = [] >>> a ??? """ if __name__ == "__main__": import doctest doctest.testmod()
14.75
47
0.29322
178
1,180
1.898876
0.157303
0.053254
0.071006
0.094675
0.41716
0.402367
0.384615
0.384615
0.384615
0.384615
0
0.163876
0.291525
1,180
79
48
14.936709
0.240431
0.933051
0
0
0
0
0.112676
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
56fdfb66660db6efff34d8fd14cff0943c9e7cd7
1,312
py
Python
Connector/sendData.py
katarinagalic/pillow-clock
bad4003ded92a3faf2ca4913623f47b4630ac53d
[ "MIT" ]
null
null
null
Connector/sendData.py
katarinagalic/pillow-clock
bad4003ded92a3faf2ca4913623f47b4630ac53d
[ "MIT" ]
null
null
null
Connector/sendData.py
katarinagalic/pillow-clock
bad4003ded92a3faf2ca4913623f47b4630ac53d
[ "MIT" ]
1
2019-08-07T19:38:05.000Z
2019-08-07T19:38:05.000Z
import requests import datetime class sendData(): def __init__(self, userID, startTime, endTime): self.__date = datetime.datetime.now().date() self.__url = "http://127.0.0.1:8000/nights/" self.__user = userID self.__start = startTime self.__end = endTime self.__data = { 'sleeper': self.__user, 'start_sleep': self.__start, 'end_sleep': self.__end } def send (self): requests.post(url = self.__url, data = self.__data) if __name__ == "__main__": pass # testArray = ["2019-10-31 20:21:01", "2019-10-31 20:22:01"] # test = sendData ("2", testArray[0], testArray[1]) # test.send() # test = ("I got: getData" # "[2019/05/26 14:41:54, 2019/05/26 14:42:02]" # "[2019/05/26 14:42:03, 2019/05/26 14:42:03]" # "[2019/05/26 14:42:03, 2019/05/26 14:42:03]" # "[2019/05/26 14:42:03, 2019/05/26 14:42:03]" # "[2019/05/26 14:42:03, 2019/05/26 14:42:04]" # "[2019/05/26 14:42:04, 2019/05/26 14:42:05]" # "[2019/05/26 14:42:05, 2019/05/26 14:42:05]" # "[2019/05/26 14:42:05, 2019/05/26 14:42:05]" # "[2019/05/26 14:42:05, 2019/05/26 14:42:06]" # "[2019/05/26 14:42:06, 2019/05/26 14:42:13]" # "[2019/05/26 14:42:14, 2019/05/26 14:42:16]") # test = test.replace('I got: getData', '') # test = test.replace('[', '') # fin = test.split(',') # print (fin[0])
31.238095
61
0.604421
237
1,312
3.194093
0.257384
0.174373
0.232497
0.290621
0.377807
0.330251
0.330251
0.330251
0.330251
0.330251
0
0.32022
0.166921
1,312
41
62
32
0.37237
0.587652
0
0
0
0
0.122841
0
0
0
0
0
0
1
0.111111
false
0.055556
0.111111
0
0.277778
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
5
71247fd00182d2a6e325f812cf9fdc8b3d6a0051
33
py
Python
LogTelegram/__init__.py
xTruog94/LogTelegram
1c7ce557a4bb39e97f39e4038f66ac7563ceaa87
[ "MIT" ]
null
null
null
LogTelegram/__init__.py
xTruog94/LogTelegram
1c7ce557a4bb39e97f39e4038f66ac7563ceaa87
[ "MIT" ]
null
null
null
LogTelegram/__init__.py
xTruog94/LogTelegram
1c7ce557a4bb39e97f39e4038f66ac7563ceaa87
[ "MIT" ]
null
null
null
from .LogTele import send_message
33
33
0.878788
5
33
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.090909
33
1
33
33
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
713f4c94b04175a1092ee1b9928125ea67ce203a
39
py
Python
py_graph_t/vertex/__init__.py
sturmianseq/PyGraph
e81c3a0f543f5bfcda1a603c6dcecde13d582c57
[ "MIT" ]
17
2019-09-29T22:02:57.000Z
2020-04-03T00:04:34.000Z
py_graph_t/vertex/__init__.py
sturmianseq/PyGraph
e81c3a0f543f5bfcda1a603c6dcecde13d582c57
[ "MIT" ]
63
2019-10-01T12:13:35.000Z
2019-12-11T11:32:21.000Z
py_graph_t/vertex/__init__.py
sturmianseq/PyGraph
e81c3a0f543f5bfcda1a603c6dcecde13d582c57
[ "MIT" ]
24
2019-10-01T15:53:37.000Z
2020-03-08T13:36:06.000Z
from .SimpleVertex import SimpleVertex
19.5
38
0.871795
4
39
8.5
0.75
0
0
0
0
0
0
0
0
0
0
0
0.102564
39
1
39
39
0.971429
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
8579181b012eb6f5bd8dcdbfdd73cbc71b0d52fa
258
py
Python
Fase 4 - Temas avanzados/Tema 11 - Modulos/Apuntes/Leccion 01 (Apuntes) - Modulos/hola.py
ruben69695/python-course
a3d3532279510fa0315a7636c373016c7abe4f0a
[ "MIT" ]
1
2019-01-27T20:44:53.000Z
2019-01-27T20:44:53.000Z
Fase 4 - Temas avanzados/Tema 11 - Modulos/Apuntes/Leccion 02 (Apuntes) - Paquetes/paquete/saludos.py
ruben69695/python-course
a3d3532279510fa0315a7636c373016c7abe4f0a
[ "MIT" ]
null
null
null
Fase 4 - Temas avanzados/Tema 11 - Modulos/Apuntes/Leccion 02 (Apuntes) - Paquetes/paquete/saludos.py
ruben69695/python-course
a3d3532279510fa0315a7636c373016c7abe4f0a
[ "MIT" ]
null
null
null
# Este es un módulo con funciones que saludan def saludar(): print("Hola, te estoy saludando desde la función saludar() del módulo saludos") class Saludo(): def __init__(self): print("Hola, te estoy saludando desde el __init__ de la clase Saludo")
36.857143
81
0.732558
39
258
4.641026
0.692308
0.099448
0.121547
0.176796
0.331492
0.331492
0
0
0
0
0
0
0.182171
258
7
82
36.857143
0.85782
0.166667
0
0
0
0
0.629808
0
0
0
0
0
0
1
0.4
false
0
0
0
0.6
0.4
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
1
0
0
0
0
1
0
0
5
857970a9bbbaacfd684e380db747712f65196f31
133
py
Python
cwbbus/__init__.py
killertux/cwbbus
a57580a72ad2c5ead7b78e9381ccf80fbe8f6e31
[ "MIT" ]
null
null
null
cwbbus/__init__.py
killertux/cwbbus
a57580a72ad2c5ead7b78e9381ccf80fbe8f6e31
[ "MIT" ]
null
null
null
cwbbus/__init__.py
killertux/cwbbus
a57580a72ad2c5ead7b78e9381ccf80fbe8f6e31
[ "MIT" ]
1
2019-06-16T18:39:07.000Z
2019-06-16T18:39:07.000Z
from cwbbus.downloader import get_data, get_data_range from cwbbus.datareader import DataReader from cwbbus.filetype import FileType
33.25
54
0.87218
19
133
5.947368
0.473684
0.265487
0
0
0
0
0
0
0
0
0
0
0.097744
133
3
55
44.333333
0.941667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
85817df9c5ffb74dd7b128d64245f8f543c460fb
50
py
Python
boofuzz/unit_tests/__init__.py
youngcraft/boofuzz-modbus
bfeb48345b56797b48079e0620e7b06b27085789
[ "Apache-2.0" ]
23
2018-08-11T12:12:33.000Z
2022-01-28T10:22:49.000Z
boofuzz/unit_tests/__init__.py
ctf-fuzzer/boofuzz-modbus
bfeb48345b56797b48079e0620e7b06b27085789
[ "Apache-2.0" ]
2
2018-07-24T15:15:40.000Z
2020-07-12T13:06:56.000Z
boofuzz/unit_tests/__init__.py
ctf-fuzzer/boofuzz-modbus
bfeb48345b56797b48079e0620e7b06b27085789
[ "Apache-2.0" ]
10
2018-04-02T13:21:36.000Z
2022-01-17T09:20:27.000Z
import test_blocks import legos import primitives
12.5
18
0.88
7
50
6.142857
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.12
50
3
19
16.666667
0.977273
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
85b33a6aee8cbe25a47bbdedfaa5429e9f34bd76
6,206
py
Python
rcnn_dff/tools/monitor_log.py
tonysy/mx-rcnn-flow
b78c3c964c802bb874d673170d7452e7a573a998
[ "Apache-2.0" ]
2
2018-01-31T02:47:42.000Z
2019-07-05T03:48:54.000Z
rcnn_dff/tools/monitor_log.py
tonysy/mx-rcnn-flow
b78c3c964c802bb874d673170d7452e7a573a998
[ "Apache-2.0" ]
null
null
null
rcnn_dff/tools/monitor_log.py
tonysy/mx-rcnn-flow
b78c3c964c802bb874d673170d7452e7a573a998
[ "Apache-2.0" ]
null
null
null
# coding:utf-8 import tail import matplotlib.pyplot as plt from matplotlib.ticker import MultipleLocator, FormatStrFormatter import numpy as np import re import argparse parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves') parser.add_argument('--log_file', type=str,default="log_tr_va", help='the path of log file') parser.add_argument('--metric', type=str,default="L1Loss", help='the path of log file') parser.add_argument('--ylim_log', type=float,default=1.2, help='the path of log file') parser.add_argument('--ylim_l1', type=float,default=1.2, help='the path of log file') args = parser.parse_args() global args def print_line(txt): global args print(txt) plot_curve(args.metric, args.log_file) def plot_curve(metric, log_file): global args plt.ion() plt.clf() plt.style.use('ggplot') # ax = plt.subplot(1,1,1) metric_list = ["LogLoss", 'L1Loss'] ylimm_list = [args.ylim_log,args.ylim_l1] y_size_list = [0.1, 0.1] linestyle_list = ['-', '--'] for i, item in enumerate(metric_list): log_rpn, log_rcnn, idx, metric_name = log_parse(item, log_file) # if i+1 < 3: # ax = plt.subplot(2,1,i+1) # else: # ax = plt.subplot(2,2,i+1) ax = plt.subplot(1,2,i+1) plt.xlabel("Epoch") plt.ylabel(item) plt.plot(idx, log_rpn, '-', linestyle=linestyle_list[i], color="g", label=metric_name[0]) plt.plot(idx, log_rcnn, '-', linestyle=linestyle_list[i], color="b", label=metric_name[1]) plt.legend(loc="best") # plt.xticks(np.arange(min(idx), max(idx)+1, 100), ) # plt.yticks(np.arange(0, 1.2, 0.2)) plt.ylim([0,ylimm_list[i]]) xmajorLocator = MultipleLocator(1) #将x主刻度标签设置为20的倍数 xmajorFormatter = FormatStrFormatter('%5.1f') #设置x轴标签文本的格式 xminorLocator = MultipleLocator(0.2) #将x轴次刻度标签设置为5的倍数 ymajorLocator = MultipleLocator(0.1) #将y轴主刻度标签设置为0.5的倍数 ymajorFormatter = FormatStrFormatter('%1.1f') #设置y轴标签文本的格式 yminorLocator = MultipleLocator(y_size_list[i]) #将此y轴次刻度标签设置为0.1的倍数 ax.xaxis.set_major_locator(xmajorLocator) ax.xaxis.set_major_formatter(xmajorFormatter) ax.yaxis.set_major_locator(ymajorLocator) ax.yaxis.set_major_formatter(ymajorFormatter) #显示次刻度标签的位置,没有标签文本 ax.xaxis.set_minor_locator(xminorLocator) ax.yaxis.set_minor_locator(yminorLocator) ax.xaxis.grid(True, which='major') #x坐标轴的网格使用主刻度 ax.yaxis.grid(True, which='minor') #y坐标轴的网格使用次刻度 plt.tight_layout() plt.draw() plt.pause(0.01) def log_parse(metric, log_file): if metric == 'TRAIN_ACC': metric_name = ['Train-RPNAcc','Train-RCNNAcc'] elif metric == 'ACC': metric_name = ['RPNAcc', 'RCNNAcc'] elif metric == 'L1Loss': metric_name = ['RPNL1Loss', 'RCNNL1Loss'] elif metric == 'LogLoss': metric_name = ['RPNLogLoss', 'RCNNLogLoss'] else: assert 1==1, 'metric error!' if metric == 'TRAIN_ACC': RPN = re.compile('.*?]\s{}=([\d\.]+)'.format(metric_name[0])) RCNN = re.compile('.*?]\s{}=([\d\.]+)'.format(metric_name[1])) else: RPN = re.compile('.*{}=([\d\.]+).*?'.format(metric_name[0])) RCNN = re.compile('.*{}=([\d\.]+).*?'.format(metric_name[1])) log = open(log_file).read() log_rpn = [float(x) for x in RPN.findall(log)] log_rcnn = [float(x) for x in RCNN.findall(log)] idx = np.arange(len(log_rpn),dtype='float32') idx = idx / 186 return log_rpn, log_rcnn, idx, metric_name t = tail.Tail(args.log_file) t.register_callback(print_line) t.follow(s=1) # plot_curve(args.metric, args.log_file) # def plot_curve(metric, log_file): # if metric == 'TRAIN_ACC': # metric_name = ['Train-RPNAcc','Train-RCNNAcc'] # elif metric == 'ACC': # metric_name = ['RPNAcc', 'RCNNAcc'] # elif metric == 'L1Loss': # metric_name = ['RPNL1Loss', 'RCNNL1Loss'] # elif metric == 'LogLoss': # metric_name = ['RPNLogLoss', 'RCNNLogLoss'] # else: # assert 1==1, 'metric error!' # # if metric == 'TRAIN_ACC': # RPN = re.compile('.*?]\s{}=([\d\.]+)'.format(metric_name[0])) # RCNN = re.compile('.*?]\s{}=([\d\.]+)'.format(metric_name[1])) # else: # RPN = re.compile('.*{}=([\d\.]+).*?'.format(metric_name[0])) # RCNN = re.compile('.*{}=([\d\.]+).*?'.format(metric_name[1])) # log = open(log_file).read() # log_rpn = [float(x) for x in RPN.findall(log)] # log_rcnn = [float(x) for x in RCNN.findall(log)] # # idx = np.arange(len(log_rpn),dtype='float32') # idx = idx / 186 # # # plt.figure(figsize=(8, 6)) # plt.ion() # plt.clf() # ax = plt.subplot(111) # plt.xlabel("Epoch") # plt.ylabel(metric) # plt.plot(idx, log_rpn, '-', linestyle='-', color="r", # label=metric_name[0]) # # plt.plot(idx, log_rcnn, '-', linestyle='-', color="b", # label=metric_name[1]) # # plt.legend(loc="best") # # # plt.xticks(np.arange(min(idx), max(idx)+1, 100), ) # # plt.yticks(np.arange(0, 1.2, 0.2)) # plt.ylim([0,1.2]) # # xmajorLocator = MultipleLocator(1) #将x主刻度标签设置为20的倍数 # xmajorFormatter = FormatStrFormatter('%5.1f') #设置x轴标签文本的格式 # xminorLocator = MultipleLocator(0.2) #将x轴次刻度标签设置为5的倍数 # # # ymajorLocator = MultipleLocator(0.1) #将y轴主刻度标签设置为0.5的倍数 # ymajorFormatter = FormatStrFormatter('%1.1f') #设置y轴标签文本的格式 # yminorLocator = MultipleLocator(0.1) #将此y轴次刻度标签设置为0.1的倍数 # # ax.xaxis.set_major_locator(xmajorLocator) # ax.xaxis.set_major_formatter(xmajorFormatter) # # ax.yaxis.set_major_locator(ymajorLocator) # ax.yaxis.set_major_formatter(ymajorFormatter) # # #显示次刻度标签的位置,没有标签文本 # ax.xaxis.set_minor_locator(xminorLocator) # ax.yaxis.set_minor_locator(yminorLocator) # # ax.xaxis.grid(True, which='major') #x坐标轴的网格使用主刻度 # ax.yaxis.grid(True, which='minor') #y坐标轴的网格使用次刻度 # # plt.draw() # plt.pause(0.001)
33.010638
94
0.608605
793
6,206
4.629256
0.208071
0.059929
0.02833
0.037047
0.789975
0.760556
0.746935
0.73277
0.73277
0.722691
0
0.02637
0.217854
6,206
187
95
33.187166
0.729913
0.413149
0
0.130952
0
0
0.122743
0
0
0
0
0
0.011905
1
0.035714
false
0
0.071429
0
0.119048
0.035714
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a411fedfd58f7de76afb052efd977eb76b07577a
123
py
Python
borrowingMoneyManagement/apps.py
520MianXiangDuiXiang520/FamilyPropertyManageSystem
b4f9d681a96a6547c6755d0229f420b4112076c5
[ "MIT" ]
7
2019-11-24T08:24:33.000Z
2021-11-07T20:25:51.000Z
borrowingMoneyManagement/apps.py
520MianXiangDuiXiang520/FamilyPropertyManageSystem
b4f9d681a96a6547c6755d0229f420b4112076c5
[ "MIT" ]
6
2020-02-12T02:58:28.000Z
2022-02-10T08:52:38.000Z
borrowingMoneyManagement/apps.py
520MianXiangDuiXiang520/FamilyPropertyManageSystem
b4f9d681a96a6547c6755d0229f420b4112076c5
[ "MIT" ]
1
2019-11-30T03:11:32.000Z
2019-11-30T03:11:32.000Z
from django.apps import AppConfig class BorrowingmoneymanagementConfig(AppConfig): name = 'borrowingMoneyManagement'
20.5
48
0.821138
10
123
10.1
0.9
0
0
0
0
0
0
0
0
0
0
0
0.121951
123
5
49
24.6
0.935185
0
0
0
0
0
0.195122
0.195122
0
0
0
0
0
1
0
false
0
0.333333
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
5
a44ac13aedef01c9d27ae624b2360ccbd994a7f1
287
py
Python
Exercicios/exe002.py
Isaaquee/Curso-em-Video---Python
6d0ed4a9aac2b7a50df9f9d07a5b4de7e0999f88
[ "MIT" ]
null
null
null
Exercicios/exe002.py
Isaaquee/Curso-em-Video---Python
6d0ed4a9aac2b7a50df9f9d07a5b4de7e0999f88
[ "MIT" ]
null
null
null
Exercicios/exe002.py
Isaaquee/Curso-em-Video---Python
6d0ed4a9aac2b7a50df9f9d07a5b4de7e0999f88
[ "MIT" ]
null
null
null
print ('=====Crie um programa que pergunte seu nome, e imprima, É um prazer te conhecer====') nome=input('Qual seu nome?') #print ('É um prazer te conhecer', nome,'!') #Metodo do professor,{} - esse bloco sera substituido pelo format print ('É um prazer te conhecer, {}!'.format(nome))
41
93
0.686411
44
287
4.477273
0.568182
0.045685
0.137056
0.167513
0.380711
0.380711
0
0
0
0
0
0
0.149826
287
6
94
47.833333
0.807377
0.372822
0
0
0
0
0.702247
0
0
0
0
0.166667
0
1
0
false
0
0
0
0
0.666667
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
1
0
0
0
0
0
0
0
0
1
0
5
a452e63ad08ea6ae5e58a9d655a09d94d4354074
30
py
Python
py/redrock/_version.py
michaelJwilson/redrock
477c3d231514b1926dca493f8ab121aa194917bb
[ "BSD-3-Clause" ]
null
null
null
py/redrock/_version.py
michaelJwilson/redrock
477c3d231514b1926dca493f8ab121aa194917bb
[ "BSD-3-Clause" ]
null
null
null
py/redrock/_version.py
michaelJwilson/redrock
477c3d231514b1926dca493f8ab121aa194917bb
[ "BSD-3-Clause" ]
null
null
null
__version__ = '0.13.2.dev565'
15
29
0.7
5
30
3.4
1
0
0
0
0
0
0
0
0
0
0
0.259259
0.1
30
1
30
30
0.37037
0
0
0
0
0
0.433333
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
a46cc92d6fc4969c3b12dfe4a62e7b893b552204
29
py
Python
meteoalertapi/__init__.py
xbgmsharp/meteoalert-api
f6886faa57c3dc69df10630d824e880de96cc7fc
[ "MIT" ]
5
2019-05-18T10:39:23.000Z
2022-01-17T06:22:13.000Z
meteoalertapi/__init__.py
xbgmsharp/meteoalert-api
f6886faa57c3dc69df10630d824e880de96cc7fc
[ "MIT" ]
12
2019-05-18T10:35:46.000Z
2022-02-09T12:21:22.000Z
meteoalertapi/__init__.py
xbgmsharp/meteoalert-api
f6886faa57c3dc69df10630d824e880de96cc7fc
[ "MIT" ]
8
2019-05-24T20:53:28.000Z
2022-02-19T07:01:56.000Z
from .meteoalertapi import *
14.5
28
0.793103
3
29
7.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.137931
29
1
29
29
0.92
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
5
a46f4b86e8076de73d267180b206a413cd1c6d83
71
py
Python
grab/djangoui/grabstat/forms.py
subeax/grab
55518263c543da214d1f0cb54622bbc4fda66349
[ "MIT" ]
1
2021-05-10T16:03:24.000Z
2021-05-10T16:03:24.000Z
grab/djangoui/grabstat/forms.py
subeax/grab
55518263c543da214d1f0cb54622bbc4fda66349
[ "MIT" ]
null
null
null
grab/djangoui/grabstat/forms.py
subeax/grab
55518263c543da214d1f0cb54622bbc4fda66349
[ "MIT" ]
null
null
null
# coding: utf-8 from django import forms #from grabstat.models import
14.2
28
0.774648
11
71
5
0.818182
0
0
0
0
0
0
0
0
0
0
0.016667
0.15493
71
4
29
17.75
0.9
0.56338
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f100dc1ddda289aaf6937ccc63a34af09286b350
72
py
Python
wakati/__init__.py
DHI-GRAS/wakati
eb854464e46eae7b44c5a925b9d035b1bf9d3f82
[ "BSD-2-Clause" ]
2
2019-02-15T03:51:27.000Z
2021-06-30T12:49:06.000Z
wakati/__init__.py
DHI-GRAS/wakati
eb854464e46eae7b44c5a925b9d035b1bf9d3f82
[ "BSD-2-Clause" ]
null
null
null
wakati/__init__.py
DHI-GRAS/wakati
eb854464e46eae7b44c5a925b9d035b1bf9d3f82
[ "BSD-2-Clause" ]
null
null
null
from __future__ import absolute_import from wakati.wakati import Timer
18
38
0.861111
10
72
5.7
0.6
0
0
0
0
0
0
0
0
0
0
0
0.125
72
3
39
24
0.904762
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
5
f17acfbf2c1dd815df82261e781f38461b7df2d5
209
py
Python
centermask/__init__.py
MiXaiLL76/centermask2
612fa5f02b09c4167e14031be50c6e5e4e58ea77
[ "Apache-2.0" ]
null
null
null
centermask/__init__.py
MiXaiLL76/centermask2
612fa5f02b09c4167e14031be50c6e5e4e58ea77
[ "Apache-2.0" ]
null
null
null
centermask/__init__.py
MiXaiLL76/centermask2
612fa5f02b09c4167e14031be50c6e5e4e58ea77
[ "Apache-2.0" ]
null
null
null
from centermask import utils from centermask import layers from centermask import evaluation from centermask import config from centermask import modeling from centermask import model_zoo __version__ = "0.1"
23.222222
33
0.84689
28
209
6.142857
0.464286
0.488372
0.697674
0
0
0
0
0
0
0
0
0.01105
0.133971
209
8
34
26.125
0.939227
0
0
0
0
0
0.014354
0
0
0
0
0
0
1
0
false
0
0.857143
0
0.857143
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
5
74de7ef681499a8d3cf74df077ee838c246bf21e
206
py
Python
src/python/WMCore/ResourceControl/Oracle/Destroy.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
21
2015-11-19T16:18:45.000Z
2021-12-02T18:20:39.000Z
src/python/WMCore/ResourceControl/Oracle/Destroy.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
5,671
2015-01-06T14:38:52.000Z
2022-03-31T22:11:14.000Z
src/python/WMCore/ResourceControl/Oracle/Destroy.py
khurtado/WMCore
f74e252412e49189a92962945a94f93bec81cd1e
[ "Apache-2.0" ]
67
2015-01-21T15:55:38.000Z
2022-02-03T19:53:13.000Z
#/usr/bin/env python """ _Destroy_ Oracle implementation of ResourceControl.Destroy. """ from WMCore.ResourceControl.MySQL.Destroy import Destroy as MySQLDestroy class Destroy(MySQLDestroy): pass
13.733333
72
0.776699
23
206
6.869565
0.73913
0
0
0
0
0
0
0
0
0
0
0
0.135922
206
14
73
14.714286
0.88764
0.38835
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
5
74fd3d43402305d25e3681b0e67143d361009225
289
py
Python
src/interface/clients/provider.py
sdediego/forex-django-clean-architecture
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
[ "MIT" ]
8
2021-11-09T16:43:38.000Z
2022-03-25T16:04:26.000Z
src/interface/clients/provider.py
sdediego/forex-django-clean-architecture
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
[ "MIT" ]
null
null
null
src/interface/clients/provider.py
sdediego/forex-django-clean-architecture
915a8d844a8db5a40c726fe4cf9f6d50f7c95275
[ "MIT" ]
2
2021-11-16T21:17:31.000Z
2022-02-11T11:15:29.000Z
# coding: utf-8 from typing import Any class ProviderClient: def __init__(self, provider_driver: object): self.provider_driver = provider_driver def fetch_data(self, action: str, **kwargs: dict) -> Any: return self.provider_driver.fetch_data(action, **kwargs)
22.230769
64
0.705882
37
289
5.243243
0.594595
0.28866
0.278351
0
0
0
0
0
0
0
0
0.004292
0.193772
289
12
65
24.083333
0.828326
0.044983
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.166667
0.166667
0.833333
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
5