hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1967b5cbe9cc4d8e1820eb5ee22aea8355578e45
| 8,306
|
py
|
Python
|
experiments/track_deep_trial.py
|
tcstewar/davis_tracking
|
4813eeaf66bdfad7b90f17831f6b0946daa8cbdf
|
[
"MIT"
] | 5
|
2019-06-13T02:38:51.000Z
|
2021-07-29T03:32:41.000Z
|
experiments/track_deep_trial.py
|
tcstewar/davis_tracking
|
4813eeaf66bdfad7b90f17831f6b0946daa8cbdf
|
[
"MIT"
] | 1
|
2019-08-05T17:30:31.000Z
|
2019-08-05T17:30:31.000Z
|
experiments/track_deep_trial.py
|
tcstewar/davis_tracking
|
4813eeaf66bdfad7b90f17831f6b0946daa8cbdf
|
[
"MIT"
] | 3
|
2019-06-05T18:42:14.000Z
|
2019-06-19T19:36:02.000Z
|
import pytry
import os
import random
import nengo
import nengo_extras
import numpy as np
import nengo_dl
import tensorflow as tf
import davis_tracking
class TrackingTrial(pytry.PlotTrial):
def params(self):
self.param('number of data sets to use', n_data=-1)
self.param('data directory', dataset_dir=r'../dataset')
self.param('dt', dt=0.1)
self.param('dt_test', dt_test=0.001)
self.param('decay time (input synapse)', decay_time=0.01)
self.param('test set (odd|one)', test_set='one')
self.param('augment training set with flips', augment=False)
self.param('miniback size', minibatch_size=200)
self.param('learning rate', learning_rate=1e-3)
self.param('number of epochs', n_epochs=5)
self.param('saturation', saturation=5)
self.param('separate positive and negative channels', separate_channels=True)
self.param('number of features in layer 1', n_features_1=28)
self.param('number of features in layer 2', n_features_2=64)
self.param('kernel size layer 1', kernel_size_1=5)
self.param('stride layer 1', stride_1=3)
self.param('kernel size layer 2', kernel_size_2=3)
self.param('stride layer 2', stride_2=1)
self.param('split spatial configuration', split_spatial=True)
self.param('spatial stride', spatial_stride=10)
self.param('spatial kernel size', spatial_size=20)
self.param('number of parallel ensembles', n_parallel=2)
self.param('merge pixels (to make a smaller image)', merge=3)
self.param('normalize inputs', normalize=False)
def evaluate(self, p, plt):
files = []
sets = []
for f in os.listdir(p.dataset_dir):
if f.endswith('events'):
files.append(os.path.join(p.dataset_dir, f))
if p.test_set == 'one':
test_file = random.sample(files, 1)[0]
files.remove(test_file)
if p.n_data != -1:
files = random.sample(files, p.n_data)
inputs = []
targets = []
for f in files:
times, imgs, targs = davis_tracking.load_data(f, dt=p.dt, decay_time=p.decay_time,
separate_channels=p.separate_channels,
saturation=p.saturation, merge=p.merge)
inputs.append(imgs)
targets.append(targs[:,:2])
inputs_all = np.vstack(inputs)
targets_all = np.vstack(targets)
if p.test_set == 'odd':
inputs_train = inputs_all[::2]
inputs_test = inputs_all[1::2]
targets_train = targets_all[::2]
targets_test = targets_all[1::2]
dt_test = p.dt*2
elif p.test_set == 'one':
times, imgs, targs = davis_tracking.load_data(test_file, dt=p.dt_test, decay_time=p.decay_time,
separate_channels=p.separate_channels,
saturation=p.saturation, merge=p.merge)
inputs_test = imgs
targets_test = targs[:, :2]
inputs_train = inputs_all
targets_train = targets_all
dt_test = p.dt_test
if p.augment:
inputs_train, targets_train = davis_tracking.augment(inputs_train, targets_train,
separate_channels=p.separate_channels)
if p.separate_channels:
shape = (2, 180//p.merge, 240//p.merge)
else:
shape = (1, 180//p.merge, 240//p.merge)
dimensions = shape[0]*shape[1]*shape[2]
if p.normalize:
magnitude = np.linalg.norm(inputs_train.reshape(-1, dimensions), axis=1)
inputs_train = inputs_train*(1.0/magnitude[:,None,None])
magnitude = np.linalg.norm(inputs_test.reshape(-1, dimensions), axis=1)
inputs_test = inputs_test*(1.0/magnitude[:,None,None])
max_rate = 100
amp = 1 / max_rate
model = nengo.Network()
with model:
model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)
model.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])
model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
model.config[nengo.Connection].synapse = None
inp = nengo.Node(
nengo.processes.PresentInput(inputs_test.reshape(-1, dimensions), dt_test),
size_out=dimensions,
)
out = nengo.Node(None, size_in=2)
if not p.split_spatial:
# do a standard convnet
conv1 = nengo.Convolution(p.n_features_1, shape, channels_last=False, strides=(p.stride_1,p.stride_1),
kernel_size=(p.kernel_size_1, p.kernel_size_1))
layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
nengo.Connection(inp, layer1.neurons, transform=conv1)
conv2 = nengo.Convolution(p.n_features_2, conv1.output_shape, channels_last=False, strides=(p.stride_2,p.stride_2),
kernel_size=(p.kernel_size_2, p.kernel_size_2))
layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
nengo.Connection(layer1.neurons, layer2.neurons, transform=conv2)
nengo.Connection(layer2.neurons, out, transform=nengo_dl.dists.Glorot())
else:
# do the weird spatially split convnet
convnet = davis_tracking.ConvNet(nengo.Network())
convnet.make_input_layer(
shape,
spatial_stride=(p.spatial_stride, p.spatial_stride),
spatial_size=(p.spatial_size,p.spatial_size))
nengo.Connection(inp, convnet.input)
convnet.make_middle_layer(n_features=p.n_features_1, n_parallel=p.n_parallel, n_local=1,
kernel_stride=(p.stride_1,p.stride_1), kernel_size=(p.kernel_size_1,p.kernel_size_1))
convnet.make_middle_layer(n_features=p.n_features_2, n_parallel=p.n_parallel, n_local=1,
kernel_stride=(p.stride_2,p.stride_2), kernel_size=(p.kernel_size_2,p.kernel_size_2))
convnet.make_output_layer(2)
nengo.Connection(convnet.output, out)
p_out = nengo.Probe(out)
N = len(inputs_train)
n_steps = int(np.ceil(N/p.minibatch_size))
dl_train_data = {inp: np.resize(inputs_train, (p.minibatch_size, n_steps, dimensions)),
p_out: np.resize(targets_train, (p.minibatch_size, n_steps, 2))}
N = len(inputs_test)
n_steps = int(np.ceil(N/p.minibatch_size))
dl_test_data = {inp: np.resize(inputs_test, (p.minibatch_size, n_steps, dimensions)),
p_out: np.resize(targets_test, (p.minibatch_size, n_steps, 2))}
with nengo_dl.Simulator(model, minibatch_size=p.minibatch_size) as sim:
#loss_pre = sim.loss(dl_test_data)
if p.n_epochs > 0:
sim.train(dl_train_data, tf.train.RMSPropOptimizer(learning_rate=p.learning_rate),
n_epochs=p.n_epochs)
loss_post = sim.loss(dl_test_data)
sim.run_steps(n_steps, data=dl_test_data)
data = sim.data[p_out].reshape(-1,2)[:len(targets_test)]
rmse_test = np.sqrt(np.mean((targets_test-data)**2, axis=0))*p.merge
if plt:
plt.plot(data*p.merge)
plt.plot(targets_test*p.merge, ls='--')
return dict(
rmse_test = rmse_test,
max_n_neurons = max([ens.n_neurons for ens in model.all_ensembles]),
test_targets = targets_test,
test_output = data,
test_loss = loss_post
)
| 44.655914
| 131
| 0.567903
| 1,034
| 8,306
| 4.348162
| 0.181818
| 0.048043
| 0.019573
| 0.018906
| 0.355205
| 0.25089
| 0.216637
| 0.155694
| 0.155694
| 0.137456
| 0
| 0.023576
| 0.325909
| 8,306
| 185
| 132
| 44.897297
| 0.779425
| 0.010956
| 0
| 0.054422
| 0
| 0
| 0.062234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013605
| false
| 0
| 0.061224
| 0
| 0.088435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
196901465c6a28bf050c45315b7c684ef13a92ea
| 1,337
|
py
|
Python
|
apps/collection/tests/public/test_public_collection_list.py
|
magocod/django_repository
|
660664ba2321499e92c3c5c23719756db2569e90
|
[
"MIT"
] | 1
|
2019-10-01T01:39:29.000Z
|
2019-10-01T01:39:29.000Z
|
apps/collection/tests/public/test_public_collection_list.py
|
magocod/django_repository
|
660664ba2321499e92c3c5c23719756db2569e90
|
[
"MIT"
] | 7
|
2019-12-04T21:40:40.000Z
|
2020-06-26T21:49:51.000Z
|
apps/collection/tests/public/test_public_collection_list.py
|
magocod/django_repository
|
660664ba2321499e92c3c5c23719756db2569e90
|
[
"MIT"
] | 1
|
2020-04-08T02:46:31.000Z
|
2020-04-08T02:46:31.000Z
|
"""
...
"""
# Django
from django.conf import settings
# local Django
from apps.collection.models import Collection
from apps.collection.serializers import CollectionSlugSerializer
from apps.tests.fixtures import RepositoryTestCase
PAGE_SIZE = settings.REST_FRAMEWORK["PAGE_SIZE"]
class CollectionPublicListTest(RepositoryTestCase):
"""
...
"""
serializer = CollectionSlugSerializer
def test_request_all_collections_without_parameters_in_the_route(self):
"""
...
"""
response = self.public_client.get("/api/collections/slug_articles/")
serializer = self.serializer(Collection.objects.all()[:PAGE_SIZE], many=True,)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data["results"]), PAGE_SIZE)
self.assertEqual(serializer.data, response.data["results"])
def test_request_all_collections_with_parameters_in_the_route(self):
"""
...
"""
response = self.public_client.get("/api/collections/slug_articles/?page=1")
serializer = self.serializer(Collection.objects.all()[:PAGE_SIZE], many=True,)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data["results"]), PAGE_SIZE)
self.assertEqual(serializer.data, response.data["results"])
| 31.093023
| 86
| 0.703067
| 142
| 1,337
| 6.415493
| 0.359155
| 0.052689
| 0.083425
| 0.037322
| 0.645445
| 0.583974
| 0.583974
| 0.583974
| 0.583974
| 0.583974
| 0
| 0.006335
| 0.173523
| 1,337
| 42
| 87
| 31.833333
| 0.8181
| 0.026926
| 0
| 0.421053
| 0
| 0
| 0.086109
| 0.056052
| 0
| 0
| 0
| 0
| 0.315789
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
196a103d422e501e45fed5d95aa6423587fcbc43
| 2,033
|
py
|
Python
|
main.py
|
uditarora/fashion-product-classification
|
686f8a6c34c54f2a09baa7fba89fe446582e8408
|
[
"MIT"
] | 9
|
2020-04-28T17:26:42.000Z
|
2021-07-13T14:47:42.000Z
|
main.py
|
uditarora/fashion-product-classification
|
686f8a6c34c54f2a09baa7fba89fe446582e8408
|
[
"MIT"
] | 2
|
2021-09-08T01:55:51.000Z
|
2022-03-12T00:25:38.000Z
|
main.py
|
uditarora/fashion-product-classification
|
686f8a6c34c54f2a09baa7fba89fe446582e8408
|
[
"MIT"
] | 1
|
2020-08-03T15:27:05.000Z
|
2020-08-03T15:27:05.000Z
|
import argparse
import logging
import os
from src.train import setup_top20, setup_ft, setup_bottom
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('fashion')
def main(data_path, ckpt_path, mt, train_bottom=False):
# Train on top-20 classes (train subsplit)
logger.info("Training on top-20 classes")
ckpt_path_top20 = os.path.join(ckpt_path, 'best_val_top20.ckpt')
processor, trainer_top20, _ = setup_top20(ckpt_path=ckpt_path_top20,
data_path=data_path, mt=mt)
trainer_top20.train(20)
acc_df = trainer_top20.get_test_accuracy()
print("Test accuracy for top-20 classes:")
print(acc_df)
# Train on remaining classes (fine-tune subsplit)
logger.info("Training on fine-tune subsplit")
ckpt_path_ft = os.path.join(ckpt_path, 'best_val_ft.ckpt')
processor, trainer_ft, _ = setup_ft(processor=processor, mt=mt,
ckpt_path=ckpt_path_ft, model=trainer_top20.get_best_model())
trainer_ft.train(20)
acc_df_ft = trainer_ft.get_test_accuracy()
print("Test accuracy for fine-tune classes:")
print(acc_df_ft)
if train_bottom:
# Train on bottom 50 classes of fine-tune subsplit
# with alternate data augmentations
logger.info("Training on bottom 50 classes of fine-tune subsplit")
trainer_b50 = setup_bottom(processor, trainer_ft, num=50)
trainer_b50.train(20)
acc_df_ft2 = trainer_ft.get_test_accuracy()
print("Test accuracy for fine-tune data after second round of training:")
print(acc_df_ft2)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data", help="path to the dataset", required=True)
parser.add_argument("--ckpt", help="path to checkpoint folder", default='ckpts')
parser.add_argument("-m", "--multi", help="run multitask learning model", action="store_true")
args = parser.parse_args()
if not os.path.exists(args.ckpt):
os.makedirs(args.ckpt)
main(args.data, args.ckpt, args.multi)
| 37.648148
| 98
| 0.711264
| 295
| 2,033
| 4.657627
| 0.291525
| 0.052402
| 0.046579
| 0.043668
| 0.229258
| 0.188501
| 0.188501
| 0.126638
| 0.075691
| 0.075691
| 0
| 0.02512
| 0.17757
| 2,033
| 53
| 99
| 38.358491
| 0.796651
| 0.084112
| 0
| 0
| 0
| 0
| 0.215401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.102564
| 0
| 0.128205
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
196bb8f934a37aeb286c8e9a73930e525b7e413c
| 8,508
|
py
|
Python
|
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
|
BD2KGenomics/toil-scripts
|
f878d863defcdccaabb7fe06f991451b7a198fb7
|
[
"Apache-2.0"
] | 33
|
2015-10-28T18:26:31.000Z
|
2021-10-10T21:19:31.000Z
|
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
|
BD2KGenomics/toil-scripts
|
f878d863defcdccaabb7fe06f991451b7a198fb7
|
[
"Apache-2.0"
] | 464
|
2015-08-11T04:12:10.000Z
|
2018-02-21T21:29:11.000Z
|
src/toil_scripts/transfer_gtex_to_s3/transfer_gtex_to_s3.py
|
BD2KGenomics/toil-scripts
|
f878d863defcdccaabb7fe06f991451b7a198fb7
|
[
"Apache-2.0"
] | 21
|
2015-09-08T18:07:49.000Z
|
2020-11-24T01:02:08.000Z
|
#!/usr/bin/env python2.7
"""
Toil script to move TCGA data into an S3 bucket.
Dependencies
Curl: apt-get install curl
Docker: wget -qO- https://get.docker.com/ | sh
Toil: pip install toil
S3AM: pip install --pre s3am
"""
import argparse
import glob
import hashlib
import os
import shutil
import subprocess
import tarfile
from toil.job import Job
def build_parser():
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-s', '--sra', default=None, required=True,
help='Path to a file with one analysis ID per line for data hosted on CGHub.')
parser.add_argument('-k', '--dbgap_key', default=None, required=True,
help='Path to a CGHub key that has access to the TCGA data being requested. An exception will'
'be thrown if "-g" is set but not this argument.')
parser.add_argument('--s3_dir', default=None, required=True, help='S3 Bucket. e.g. tcga-data')
parser.add_argument('--ssec', default=None, required=True, help='Path to Key File for SSE-C Encryption')
parser.add_argument('--single_end', default=None, action='store_true', help='Set this flag if data is single-end')
parser.add_argument('--sudo', dest='sudo', default=None, action='store_true',
help='Docker usually needs sudo to execute locally, but not when running Mesos or when '
'the user is a member of a Docker group.')
return parser
# Convenience Functions
def generate_unique_key(master_key_path, url):
"""
master_key_path: str Path to the BD2K Master Key (for S3 Encryption)
url: str S3 URL (e.g. https://s3-us-west-2.amazonaws.com/bucket/file.txt)
Returns: str 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key
def docker_call(work_dir, tool_parameters, tool, java_opts=None, sudo=False, outfile=None):
"""
Makes subprocess call of a command to a docker container.
tool_parameters: list An array of the parameters to be passed to the tool
tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools)
java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G')
outfile: file Filehandle that stderr will be passed to
sudo: bool If the user wants the docker command executed as sudo
"""
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if sudo:
base_docker_call = ['sudo'] + base_docker_call
if java_opts:
base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)]
try:
if outfile:
subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile)
else:
subprocess.check_call(base_docker_call + [tool] + tool_parameters)
except subprocess.CalledProcessError:
raise RuntimeError('docker command returned a non-zero exit status: {}'
''.format(base_docker_call + [tool] + tool_parameters))
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.')
def parse_sra(path_to_config):
"""
Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ]
Returns duplicate of ids to follow UUID/URL standard.
"""
samples = []
with open(path_to_config, 'r') as f:
for line in f.readlines():
if not line.isspace():
samples.append(line.strip())
return samples
def tarball_files(work_dir, tar_name, uuid=None, files=None):
"""
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
"""
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname)
# Job Functions
def start_batch(job, input_args):
"""
This function will administer 5 jobs at a time then recursively call itself until subset is empty
"""
samples = parse_sra(input_args['sra'])
# for analysis_id in samples:
job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30')
def download_and_transfer_sample(job, input_args, samples):
"""
Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
"""
if len(samples) > 1:
a = samples[len(samples)/2:]
b = samples[:len(samples)/2]
job.addChildJobFn(download_and_transfer_sample, input_args, a, disk='30G')
job.addChildJobFn(download_and_transfer_sample, input_args, b, disk='30G')
else:
analysis_id = samples[0]
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# Acquire dbgap_key
shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc'))
# Call to fastq-dump to pull down SRA files and convert to fastq
if input_args['single_end']:
parameters = [analysis_id]
else:
parameters = ['--split-files', analysis_id]
docker_call(tool='quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1',
work_dir=work_dir, tool_parameters=parameters, sudo=sudo)
# Collect files and encapsulate into a tarball
shutil.rmtree(os.path.join(work_dir, 'sra'))
sample_name = analysis_id + '.tar.gz'
if input_args['single_end']:
r = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*.f*'))]
tarball_files(work_dir, tar_name=sample_name, files=r)
else:
r1 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_1*'))]
r2 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_2*'))]
tarball_files(work_dir, tar_name=sample_name, files=r1 + r2)
# Parse s3_dir to get bucket and s3 path
key_path = input_args['ssec']
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command)
def main():
"""
Transfer gTEX data from dbGaP (NCBI) to S3
"""
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'sra': args.sra,
'dbgap_key': args.dbgap_key,
'ssec': args.ssec,
's3_dir': args.s3_dir,
'single_end': args.single_end,
'sudo': args.sudo}
# Sanity checks
if args.ssec:
assert os.path.isfile(args.ssec)
if args.sra:
assert os.path.isfile(args.sra)
if args.dbgap_key:
assert os.path.isfile(args.dbgap_key)
# Start Pipeline
Job.Runner.startToil(Job.wrapJobFn(start_batch, inputs), args)
if __name__ == '__main__':
main()
| 42.328358
| 118
| 0.629525
| 1,180
| 8,508
| 4.383051
| 0.266102
| 0.027069
| 0.023202
| 0.029776
| 0.20959
| 0.174014
| 0.152552
| 0.118329
| 0.076179
| 0.040603
| 0
| 0.015216
| 0.258463
| 8,508
| 201
| 119
| 42.328358
| 0.804565
| 0.239892
| 0
| 0.057851
| 0
| 0
| 0.171651
| 0.011931
| 0
| 0
| 0
| 0
| 0.041322
| 1
| 0.066116
| false
| 0
| 0.066116
| 0
| 0.157025
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
196bec7ed8b6cf58b1516910c7416d81061dacce
| 1,885
|
py
|
Python
|
tests/metrics/cpu_psutil_tests.py
|
shareablee/apm-agent-python
|
29f12ceb410b3c1a7f933b29dcecccf628dbbb6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/metrics/cpu_psutil_tests.py
|
shareablee/apm-agent-python
|
29f12ceb410b3c1a7f933b29dcecccf628dbbb6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/metrics/cpu_psutil_tests.py
|
shareablee/apm-agent-python
|
29f12ceb410b3c1a7f933b29dcecccf628dbbb6c
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import pytest
from elasticapm.utils import compat
cpu_psutil = pytest.importorskip("elasticapm.metrics.sets.cpu_psutil")
pytestmark = pytest.mark.psutil
def test_cpu_mem_from_psutil():
metricset = cpu_psutil.CPUMetricSet()
# do something that generates some CPU load
for i in compat.irange(10 ** 6):
j = i * i
data = metricset.collect()
# we can't really test any specific values here as it depends on the system state.
# Mocking is also not really a viable choice, as we would then lose the "integration testing"
# nature of this test with different versions of psutil
assert 0 < data["samples"]["system.cpu.total.norm.pct"]["value"] < 1
assert 0 < data["samples"]["system.process.cpu.total.norm.pct"]["value"] < 1
assert data["samples"]["system.memory.total"]["value"] > 0
assert data["samples"]["system.memory.actual.free"]["value"] > 0
assert data["samples"]["system.process.memory.rss.bytes"]["value"] > 0
assert data["samples"]["system.process.memory.size"]["value"] > 0
cpu_linux = pytest.importorskip("elasticapm.metrics.sets.cpu_linux")
def test_compare_psutil_linux_metricsets():
psutil_metricset = cpu_psutil.CPUMetricSet()
linux_metricset = cpu_linux.CPUMetricSet()
# do something that generates some CPU load
for i in compat.irange(10 ** 6):
j = i * i
psutil_data = psutil_metricset.collect()
linux_data = linux_metricset.collect()
assert (
abs(
psutil_data["samples"]["system.cpu.total.norm.pct"]["value"]
- linux_data["samples"]["system.cpu.total.norm.pct"]["value"]
)
< 0.02
)
assert (
abs(
psutil_data["samples"]["system.process.cpu.total.norm.pct"]["value"]
- linux_data["samples"]["system.process.cpu.total.norm.pct"]["value"]
)
< 0.02
)
| 33.660714
| 97
| 0.658886
| 250
| 1,885
| 4.868
| 0.348
| 0.090386
| 0.139688
| 0.073952
| 0.607231
| 0.527527
| 0.410025
| 0.393591
| 0.263763
| 0.11668
| 0
| 0.013324
| 0.203714
| 1,885
| 55
| 98
| 34.272727
| 0.797468
| 0.164456
| 0
| 0.263158
| 0
| 0
| 0.294455
| 0.205864
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.052632
| false
| 0
| 0.131579
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
196e9f2b4480f78817f1f39de8dbbca5da718152
| 3,785
|
py
|
Python
|
day11/main.py
|
josteinl/advent2020
|
f109f9c1ef4fcb14582cda6b114d7648000decc6
|
[
"Apache-2.0"
] | null | null | null |
day11/main.py
|
josteinl/advent2020
|
f109f9c1ef4fcb14582cda6b114d7648000decc6
|
[
"Apache-2.0"
] | null | null | null |
day11/main.py
|
josteinl/advent2020
|
f109f9c1ef4fcb14582cda6b114d7648000decc6
|
[
"Apache-2.0"
] | null | null | null |
"""
Day 11 - Seating
"""
from itertools import chain
import collections
def occupied_count(seating):
return ''.join(chain.from_iterable(seating)).count('#')
def adjacent_occupied(x, y, seating):
occupied = 0
# Above
for look_x in range(x - 1, x + 2):
for look_y in range(y - 1, y + 2):
if look_x == x and look_y == y:
continue
if seating[look_y][look_x] == '#':
occupied += 1
return occupied
def sit_down(seating, dim_x, dim_y):
return_seating = seating.copy()
for y in range(1, dim_y):
for x in range(1, dim_x):
occupied = adjacent_occupied(x, y, seating)
if seating[y][x] == 'L' and occupied == 0:
# Empty seat
# and no adjacent occupied
return_seating[y] = return_seating[y][:x] + '#' + return_seating[y][x + 1:]
elif seating[y][x] == '#' and occupied >= 4:
# Occupied and 4 or more adjecent seats, raise up
return_seating[y] = return_seating[y][:x] + 'L' + return_seating[y][x + 1:]
return return_seating
def see_occupied_in_direction(x, y, dir_x, dir_y, seating):
max_x = len(seating[0]) - 1
max_y = len(seating) - 1
cur_x = x
cur_y = y
cur_x += dir_x
cur_y += dir_y
while 0 <= cur_x <= max_x and 0 <= cur_y <= max_y:
if seating[cur_y][cur_x] == '#':
return 1
elif seating[cur_y][cur_x] == 'L':
return 0
cur_x += dir_x
cur_y += dir_y
return 0
def seen_occupied(x, y, seating):
occupied = 0
for look_x in range(- 1, + 2):
for look_y in range(- 1, + 2):
if look_x == 0 and look_y == 0:
continue
occupied += see_occupied_in_direction(x, y, look_x, look_y, seating)
return occupied
def sit_down_part_two(seating, dim_x, dim_y):
return_seating = seating.copy()
for y in range(0, dim_y):
for x in range(0, dim_x):
occupied = seen_occupied(x, y, seating)
if seating[y][x] == 'L' and occupied == 0:
# Empty seat
# and no adjacent occupied
return_seating[y] = return_seating[y][:x] + '#' + return_seating[y][x + 1:]
elif seating[y][x] == '#' and occupied >= 5:
# Occupied and 5 or more seen seats, raise up
return_seating[y] = return_seating[y][:x] + 'L' + return_seating[y][x + 1:]
return return_seating
def part_two():
with open('data.txt', 'r') as f:
seating = [data.strip() for data in f]
dimension_x = len(seating[0])
dimension_y = len(seating)
last_seating = None
while collections.Counter(last_seating) != collections.Counter(seating):
last_seating = seating.copy()
seating = sit_down_part_two(seating, dimension_x, dimension_y)
return occupied_count(last_seating)
def part_one():
with open('data.txt', 'r') as f:
seating = [data.strip() for data in f]
dimension_x = len(seating[0])
dimension_y = len(seating)
# Extend seating with empty space all around, makes it easier to count later
for row_number in range(dimension_y):
seating[row_number] = '.' + seating[row_number] + '.'
seating = ['.' * (dimension_x + 2)] + seating + ['.' * (dimension_x + 2)]
last_seating = None
while collections.Counter(last_seating) != collections.Counter(seating):
last_seating = seating.copy()
seating = sit_down(seating, dimension_x + 1, dimension_y + 1)
return occupied_count(last_seating)
if __name__ == '__main__':
# Part one:
# result = part_one()
# print(f'Result {result}')
result = part_two()
print(f'Result {result}')
| 29.115385
| 91
| 0.57675
| 534
| 3,785
| 3.874532
| 0.155431
| 0.100532
| 0.052199
| 0.057999
| 0.647173
| 0.544224
| 0.465926
| 0.465926
| 0.450459
| 0.450459
| 0
| 0.016147
| 0.296433
| 3,785
| 129
| 92
| 29.341085
| 0.760796
| 0.084016
| 0
| 0.463415
| 0
| 0
| 0.016527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.02439
| 0.012195
| 0.243902
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1971dea6d61ad931626506926b94703bc0412249
| 2,843
|
py
|
Python
|
research/cv/yolox/src/boxes.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/yolox/src/boxes.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/yolox/src/boxes.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================================
"""
box iou related
"""
from mindspore.ops import operations as P
from mindspore.ops.primitive import constexpr
@constexpr
def raise_bbox_error():
raise IndexError("Index error, shape of input must be 4!")
def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
"""
calculate iou
Args:
bboxes_a:
bboxes_b:
xyxy:
Returns:
"""
if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:
raise_bbox_error()
if xyxy:
tl = P.Maximum()(bboxes_a[:, None, :2], bboxes_b[:, :2])
br = P.Minimum()(bboxes_a[:, None, 2:], bboxes_b[:, 2:])
area_a = bboxes_a[:, 2:] - bboxes_a[:, :2]
area_a = (area_a[:, 0:1] * area_a[:, 1:2]).squeeze(-1)
area_b = bboxes_b[:, 2:] - bboxes_b[:, :2]
area_b = (area_b[:, 0:1] * area_b[:, 1:2]).squeeze(-1)
else:
tl = P.Maximum()(
(bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] - bboxes_b[:, 2:] / 2),
)
br = P.Minimum()(
(bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),
(bboxes_b[:, :2] + bboxes_b[:, 2:] / 2),
)
area_a = (bboxes_a[:, 2:3] * bboxes_a[:, 3:4]).squeeze(-1)
area_b = (bboxes_b[:, 2:3] * bboxes_b[:, 3:4]).squeeze(-1)
en = (tl < br).astype(tl.dtype)
en = (en[..., 0:1] * en[..., 1:2]).squeeze(-1)
area_i = tl - br
area_i = (area_i[:, :, 0:1] * area_i[:, :, 1:2]).squeeze(-1) * en
return area_i / (area_a[:, None] + area_b - area_i)
def batch_bboxes_iou(batch_bboxes_a, batch_bboxes_b, xyxy=True):
"""
calculate iou for one batch
Args:
batch_bboxes_a:
batch_bboxes_b:
xyxy:
Returns:
"""
if batch_bboxes_a.shape[-1] != 4 or batch_bboxes_b.shape[-1] != 4:
raise_bbox_error()
ious = []
for i in range(len(batch_bboxes_a)):
if xyxy:
iou = bboxes_iou(batch_bboxes_a[i], batch_bboxes_b[i], True)
else:
iou = bboxes_iou(batch_bboxes_a[i], batch_bboxes_b[i], False)
iou = P.ExpandDims()(iou, 0)
ious.append(iou)
ious = P.Concat(axis=0)(ious)
return ious
| 30.569892
| 89
| 0.562082
| 418
| 2,843
| 3.638756
| 0.287081
| 0.087442
| 0.047337
| 0.04142
| 0.378041
| 0.330703
| 0.25904
| 0.192636
| 0.130835
| 0.105194
| 0
| 0.032471
| 0.25255
| 2,843
| 92
| 90
| 30.902174
| 0.683294
| 0.291242
| 0
| 0.136364
| 0
| 0
| 0.019812
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.045455
| 0
| 0.159091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1973e78021509b21a44ae56187ef66052677e85e
| 2,029
|
py
|
Python
|
qa_multi_span/model.py
|
chunchiehy/musst
|
1525f917d8802d18c302720125ef9720b9c743fd
|
[
"MIT"
] | 14
|
2021-02-26T15:19:21.000Z
|
2022-03-31T18:49:12.000Z
|
qa_multi_span/model.py
|
chunchiehy/musst
|
1525f917d8802d18c302720125ef9720b9c743fd
|
[
"MIT"
] | null | null | null |
qa_multi_span/model.py
|
chunchiehy/musst
|
1525f917d8802d18c302720125ef9720b9c743fd
|
[
"MIT"
] | 1
|
2021-11-16T07:20:32.000Z
|
2021-11-16T07:20:32.000Z
|
from transformers import AlbertModel
from transformers import AlbertPreTrainedModel
import torch.nn as nn
import torch
import torch.nn.functional as F
class MUSTTransformerModel(AlbertPreTrainedModel):
def __init__(self, config, max_num_spans, max_seq_len):
super(MUSTTransformerModel, self).__init__(config)
self.max_num_spans = max_num_spans
self.max_seq_len = max_seq_len
self.albert = AlbertModel(config)
self.dropout = nn.Dropout(config.classifier_dropout_prob)
self.span_outputs = nn.ModuleList(
[nn.Linear(config.hidden_size, 2) for _ in range(max_num_spans)])
self.relu = nn.ReLU()
self.init_weights()
def forward(self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None):
outputs = self.albert(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds)
# (batch_size, seq_len, hidden_size)
transformer_output = outputs[0]
transformer_output = self.dropout(transformer_output)
span_start_logits = []
span_end_logits = []
for span_output_layer in self.span_outputs:
# (batch_size, seq_len)
logits = span_output_layer(transformer_output)
# (batch_size, seq_len)
start_logits, end_logits = torch.split(logits, 1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
span_start_logits.append(start_logits)
span_end_logits.append(end_logits)
# (batch_size, max_num_spans, seq_len)
span_start_logits = torch.stack(span_start_logits, dim=1)
span_end_logits = torch.stack(span_end_logits, dim=1)
return (
span_start_logits,
span_end_logits,
) + outputs[2:]
| 33.816667
| 73
| 0.667817
| 254
| 2,029
| 4.952756
| 0.255906
| 0.078696
| 0.04372
| 0.035771
| 0.063593
| 0.044515
| 0
| 0
| 0
| 0
| 0
| 0.005921
| 0.250862
| 2,029
| 59
| 74
| 34.389831
| 0.821711
| 0.056678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.108696
| 0
| 0.195652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19753cc35369937ec7625ed05051badbaa38c37c
| 1,160
|
py
|
Python
|
class_one/02_strings.py
|
nclairesays/intro-python-hackforla
|
65ab8584ab9d5082aaa405b40c47cbeacfff610c
|
[
"MIT"
] | null | null | null |
class_one/02_strings.py
|
nclairesays/intro-python-hackforla
|
65ab8584ab9d5082aaa405b40c47cbeacfff610c
|
[
"MIT"
] | null | null | null |
class_one/02_strings.py
|
nclairesays/intro-python-hackforla
|
65ab8584ab9d5082aaa405b40c47cbeacfff610c
|
[
"MIT"
] | null | null | null |
# STRINGS
# https://docs.python.org/3/tutorial/introduction.html#strings
s = str(42)
s # convert another data type into a string (casting)
s = 'I like you'
# examine a string
s[0] # returns 'I'
len(s) # returns 10
# string slicing like lists
s[0:7] # returns 'I like '
s[6:] # returns 'you'
s[-1] # returns 'u'
# EXERCISE: Book Titles (Part 1)
# 1) Extract the book title from the string
# 2) Save each book title to a variable (ie book1_title)
# 3) How many characters/elements are in each title?
# Hint: {bookTitle} by {author}, {years}
book1 = "Beyond the Door by Dick, Philip K., 1928-1982"
book2 = "The Variable Man by Dick, Philip K., 1928-1982"
book3 = "The Skull by Dick, Philip K., 1928-1982"
# STRINGS - Part II
# concatenate strings
s3 = 'The meaning of life is'
s4 = '42'
s3 + ' ' + s4 # returns 'The meaning of life is 42'
s3 + ' ' + str(42) # same thing
# split a string into a list of substrings separated by a delimiter
s = 'I like you'
s.split(' ') # returns ['I','like','you']
s.split() # same thing
## Learn more with Automate the Boring Stuff:
## https://automatetheboringstuff.com/chapter1/
| 22.745098
| 67
| 0.655172
| 186
| 1,160
| 4.080645
| 0.510753
| 0.02635
| 0.031621
| 0.051383
| 0.167325
| 0.083004
| 0
| 0
| 0
| 0
| 0
| 0.059276
| 0.214655
| 1,160
| 51
| 68
| 22.745098
| 0.773875
| 0.622414
| 0
| 0.111111
| 0
| 0
| 0.430657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1975eb951bcdc39a34c55d4b1e7b13067352c07a
| 11,809
|
py
|
Python
|
amqpy/transport.py
|
veegee/amqpy
|
c5346b1f6910b8553ca96769d2c88a7807f83417
|
[
"MIT"
] | 32
|
2015-02-04T03:57:28.000Z
|
2021-01-17T13:19:02.000Z
|
amqpy/transport.py
|
veegee/amqpy
|
c5346b1f6910b8553ca96769d2c88a7807f83417
|
[
"MIT"
] | 33
|
2015-01-12T21:17:16.000Z
|
2018-03-14T22:54:13.000Z
|
amqpy/transport.py
|
veegee/amqpy
|
c5346b1f6910b8553ca96769d2c88a7807f83417
|
[
"MIT"
] | 9
|
2015-02-17T04:44:31.000Z
|
2021-12-09T20:36:29.000Z
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import errno
import six
import socket
import ssl
from abc import ABCMeta, abstractmethod
import logging
from threading import RLock
from ssl import SSLError
import datetime
import time
from . import compat
from .proto import Frame
from .concurrency import synchronized
from .exceptions import UnexpectedFrame
from .utils import get_errno
from .spec import FrameType
log = logging.getLogger('amqpy')
compat.patch()
_UNAVAIL = {errno.EAGAIN, errno.EINTR, errno.ENOENT}
AMQP_PROTOCOL_HEADER = b'AMQP\x00\x00\x09\x01' # bytes([65, 77, 81, 80, 0, 0, 9, 1])
class Transport:
__metaclass__ = ABCMeta
"""Common superclass for TCP and SSL transports"""
connected = False
def __init__(self, host, port, connect_timeout, buf_size):
"""
:param host: hostname or IP address
:param port: port
:param connect_timeout: connect timeout
:type host: str
:type port: int
:type connect_timeout: float or None
"""
self._rbuf = bytearray(buf_size)
#: :type: datetime.datetime
self.last_heartbeat_sent = None
#: :type: datetime.datetime
self.last_heartbeat_received = None
self.last_heartbeat_sent_monotonic = 0.0
# the purpose of the frame lock is to allow no more than one thread to read/write a frame
# to the connection at any time
self._frame_write_lock = RLock()
self._frame_read_lock = RLock()
self.sock = None
# try to connect
last_err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.settimeout(connect_timeout)
self.sock.connect(sa)
break
except socket.error as exc:
self.sock.close()
self.sock = None
last_err = exc
if not self.sock:
# didn't connect, return the most recent error message
raise socket.error(last_err)
try:
assert isinstance(self.sock, socket.socket)
self.sock.settimeout(None)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._setup_transport()
self.write(AMQP_PROTOCOL_HEADER)
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
self.connected = True
def __del__(self):
try:
# socket module may have been collected by gc if this is called by a thread at shutdown
if socket is not None:
# noinspection PyBroadException
try:
self.close()
except:
pass
finally:
self.sock = None
def _read(self, n, initial, _errnos):
"""Read from socket
This is the default implementation. Subclasses may implement `read()` to simply call this
method, or provide their own `read()` implementation.
Note: According to SSL_read(3), it can at most return 16kB of data. Thus, we use an internal
read buffer like to get the exact number of bytes wanted.
Note: ssl.sock.read may cause ENOENT if the operation couldn't be performed (?).
:param int n: exact number of bytes to read
:return: data read
:rtype: memoryview
"""
mview = memoryview(self._rbuf)
to_read = n
while to_read:
try:
bytes_read = self.sock.recv_into(mview, to_read)
mview = mview[bytes_read:]
to_read -= bytes_read
except socket.error as exc:
if not initial and exc.errno in _errnos:
continue
raise
if not bytes_read:
raise IOError('socket closed')
return memoryview(self._rbuf)[:n]
@abstractmethod
def read(self, n, initial=False):
"""Read exactly `n` bytes from the peer
:param n: number of bytes to read
:type n: int
:return: data read
:rtype: bytes
"""
pass
@abstractmethod
def write(self, s):
"""Completely write a string to the peer
"""
def _setup_transport(self):
"""Do any additional initialization of the class (used by the subclasses)
"""
pass
def close(self):
if self.sock is not None:
# call shutdown first to make sure that pending messages reach the AMQP broker if the
# program exits after calling this method
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
self.connected = False
@synchronized('_frame_read_lock')
def read_frame(self):
"""Read frame from connection
Note that the frame may be destined for any channel. It is permitted to interleave frames
from different channels.
:return: frame
:rtype: amqpy.proto.Frame
"""
frame = Frame()
try:
# read frame header: 7 bytes
frame_header = self.read(7, True)
frame.data.extend(frame_header)
# read frame payload
payload = self.read(frame.payload_size)
frame.data.extend(payload)
# read frame terminator byte
frame_terminator = self.read(1)
frame.data.extend(frame_terminator)
if six.PY2:
#: :type: int
i_last_byte = six.byte2int(frame_terminator)
else:
# this fixes the change in memoryview in Python 3.3 (accessing an element returns the
# correct type)
#: :type: int
i_last_byte = six.byte2int(bytes(frame_terminator))
except (OSError, IOError, socket.error) as exc:
# don't disconnect for ssl read time outs (Python 3.2):
# http://bugs.python.org/issue10272
if isinstance(exc, SSLError) and 'timed out' in str(exc):
raise socket.timeout()
if get_errno(exc) not in _UNAVAIL and not isinstance(exc, socket.timeout):
self.connected = False
raise
if i_last_byte == FrameType.END:
if frame.frame_type == FrameType.HEARTBEAT:
self.last_heartbeat_received = datetime.datetime.now()
return frame
else:
raise UnexpectedFrame('Received {} while expecting 0xce (FrameType.END)'.format(hex(i_last_byte)))
@synchronized('_frame_write_lock')
def write_frame(self, frame):
"""Write frame to connection
Note that the frame may be destined for any channel. It is permitted to interleave frames
from different channels.
:param frame: frame
:type frame: amqpy.proto.Frame
"""
try:
self.write(frame.data)
except socket.timeout:
raise
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
def send_heartbeat(self):
"""Send a heartbeat to the server
"""
self.last_heartbeat_sent = datetime.datetime.now()
self.last_heartbeat_sent_monotonic = time.monotonic()
self.write_frame(Frame(FrameType.HEARTBEAT))
def is_alive(self):
"""Check if connection is alive
This method is the primary way to check if the connection is alive.
Side effects: This method may send a heartbeat as a last resort to check if the connection
is alive.
:return: True if connection is alive, else False
:rtype: bool
"""
if not self.sock:
# we don't have a valid socket, this connection is definitely not alive
return False
if not self.connected:
# the `transport` is not connected
return False
# recv with MSG_PEEK to check if the connection is alive
# note: if there is data still in the buffer, this will not tell us anything
# if hasattr(socket, 'MSG_PEEK') and not isinstance(self.sock, ssl.SSLSocket):
# prev = self.sock.gettimeout()
# self.sock.settimeout(0.0001)
# try:
# self.sock.recv(1, socket.MSG_PEEK)
# except socket.timeout:
# pass
# except socket.error:
# # the exception is usually (always?) a ConnectionResetError in Python 3.3+
# log.debug('socket.error, connection is closed')
# return False
# finally:
# self.sock.settimeout(prev)
# send a heartbeat to check if the connection is alive
try:
self.send_heartbeat()
except socket.error:
return False
return True
class SSLTransport(Transport):
"""Transport that works over SSL
"""
def __init__(self, host, port, connect_timeout, frame_max, ssl_opts):
self.ssl_opts = ssl_opts
super(SSLTransport, self).__init__(host, port, connect_timeout, frame_max)
def _setup_transport(self):
"""Wrap the socket in an SSL object
"""
self.sock = ssl.wrap_socket(self.sock, **self.ssl_opts)
def read(self, n, initial=False):
"""Read from socket
According to SSL_read(3), it can at most return 16kb of data. Thus, we use an internal read
buffer like `TCPTransport.read()` to get the exact number of bytes wanted.
:param int n: exact number of bytes to read
:return: data read
:rtype: bytes
"""
return self._read(n, initial, _errnos=(errno.ENOENT, errno.EAGAIN, errno.EINTR))
def write(self, s):
"""Write a string out to the SSL socket fully
"""
try:
write = self.sock.write
except AttributeError:
# works around a bug in python socket library
raise IOError('Socket closed')
else:
while s:
n = write(s)
if not n:
raise IOError('Socket closed')
s = s[n:]
class TCPTransport(Transport):
"""Transport that deals directly with TCP socket
"""
def read(self, n, initial=False):
"""Read exactly n bytes from the socket
:param int n: exact number of bytes to read
:return: data read
:rtype: bytes
"""
return self._read(n, initial, _errnos=(errno.EAGAIN, errno.EINTR))
def write(self, s):
self.sock.sendall(s)
def create_transport(host, port, connect_timeout, frame_max, ssl_opts=None):
"""Given a few parameters from the Connection constructor, select and create a subclass of
Transport
If `ssl_opts` is a dict, SSL will be used and `ssl_opts` will be passed to
:func:`ssl.wrap_socket()`. In all other cases, SSL will not be used.
:param host: host
:param connect_timeout: connect timeout
:param ssl_opts: ssl options passed to :func:`ssl.wrap_socket()`
:type host: str
:type connect_timeout: float or None
:type ssl_opts: dict or None
"""
if isinstance(ssl_opts, dict):
return SSLTransport(host, port, connect_timeout, frame_max, ssl_opts)
else:
return TCPTransport(host, port, connect_timeout, frame_max)
| 32.53168
| 110
| 0.597172
| 1,461
| 11,809
| 4.715264
| 0.225873
| 0.031354
| 0.013064
| 0.019161
| 0.280012
| 0.246044
| 0.203658
| 0.159675
| 0.125127
| 0.125127
| 0
| 0.006899
| 0.324922
| 11,809
| 362
| 111
| 32.621547
| 0.85725
| 0.35329
| 0
| 0.314286
| 0
| 0
| 0.021872
| 0
| 0
| 0
| 0.000568
| 0
| 0.005714
| 1
| 0.102857
| false
| 0.017143
| 0.097143
| 0
| 0.285714
| 0.005714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19775af3ac0af2c8563350d3a05ceb55a427a779
| 639
|
py
|
Python
|
tracking/urls.py
|
KolibriSolutions/BepMarketplace
|
c47d252fd744cde6b927e37c34d7a103c6162be5
|
[
"BSD-3-Clause"
] | 1
|
2019-06-29T15:24:24.000Z
|
2019-06-29T15:24:24.000Z
|
tracking/urls.py
|
KolibriSolutions/BepMarketplace
|
c47d252fd744cde6b927e37c34d7a103c6162be5
|
[
"BSD-3-Clause"
] | 2
|
2020-01-12T17:47:33.000Z
|
2020-01-12T17:47:45.000Z
|
tracking/urls.py
|
KolibriSolutions/BepMarketplace
|
c47d252fd744cde6b927e37c34d7a103c6162be5
|
[
"BSD-3-Clause"
] | 2
|
2019-06-29T15:24:26.000Z
|
2020-01-08T15:15:03.000Z
|
# Bep Marketplace ELE
# Copyright (c) 2016-2021 Kolibri Solutions
# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE
#
from django.urls import path
from . import views
app_name = 'tracking'
urlpatterns = [
path('user/login/', views.list_user_login, name='listuserlog'),
path('user/<int:pk>/', views.telemetry_user_detail, name='userdetail'),
path('project/', views.list_project_status_change, name='statuslist'),
path('application/', views.list_application_change, name='applicationlist'),
path('download/', views.download_telemetry, name='downloadtelemetry'),
]
| 37.588235
| 102
| 0.744914
| 77
| 639
| 6.038961
| 0.61039
| 0.058065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014134
| 0.114241
| 639
| 16
| 103
| 39.9375
| 0.80742
| 0.255086
| 0
| 0
| 0
| 0
| 0.265957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
197a1ef6fa46d979a05f557aa10ddfcfec221a69
| 5,301
|
py
|
Python
|
saefportal/saef/views/connection_view.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 4
|
2020-12-16T13:14:26.000Z
|
2022-03-26T08:54:12.000Z
|
saefportal/saef/views/connection_view.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 1
|
2022-03-26T09:09:04.000Z
|
2022-03-26T09:09:04.000Z
|
saefportal/saef/views/connection_view.py
|
harry-consulting/SAEF
|
12ef43bbcc3178b8a988e21c1bef035881cf6e6d
|
[
"BSD-2-Clause"
] | 1
|
2020-12-16T13:20:17.000Z
|
2020-12-16T13:20:17.000Z
|
from saef.connections import ConnectionFormHelper
from ..models import Connection
from ..forms import ConnectionTypeForm
from saefportal.settings import MSG_SUCCESS_CONNECTION_UPDATE, MSG_SUCCESS_CONNECTION_VALID, \
MSG_ERROR_CONNECTION_INVALID, MSG_SUCCESS_CONNECTION_SAVED, MSG_ERROR_CONNECTION_SELECT_INVALID, \
MSG_SUCCESS_CONNECTION_DELETED
from django.contrib import messages
from django.shortcuts import redirect, render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import generic
ADD_CONNECTION_TITLE = 'Add Connection'
ADD_CONNECTION_TEMPLATE_NAME = "connection/add_connection.html"
EDIT_CONNECTION_TEMPLATE_NAME = "connection/edit_connection_detail.html"
POSTGRESQL_NAME = "PostgreSQL"
class ConnectionView(LoginRequiredMixin, generic.ListView):
template_name = 'connection/connection_list.html'
model = Connection
context_object_name = 'connections'
@login_required()
def update_connection(request, connection_id):
helper = ConnectionFormHelper()
connection = get_object_or_404(Connection, id=connection_id)
if request.method == "POST":
if request.POST["Operation"] == 'Delete':
instance = Connection.objects.get(pk=connection_id)
instance.delete()
messages.success(request, MSG_SUCCESS_CONNECTION_DELETED)
return redirect('connection')
else:
edit_method = helper.lookup_connection(connection.connection_type.name, 'edit')
edit_form = edit_method(post_request=request.POST)
if edit_form.is_valid():
save_edit_method = helper.lookup_connection(connection.connection_type.name, 'save_edit')
save_edit_method(edit_form, connection_id)
messages.success(request, MSG_SUCCESS_CONNECTION_UPDATE)
return redirect("connection")
else:
messages.error(request, MSG_ERROR_CONNECTION_SELECT_INVALID)
context = {"connection_form": edit_form}
return render(request, EDIT_CONNECTION_TEMPLATE_NAME, context)
edit_method = helper.lookup_connection(connection.connection_type.name, 'edit')
edit_form = edit_method(connection_pk=connection.pk)
context = {"connection_form": edit_form}
return render(request, EDIT_CONNECTION_TEMPLATE_NAME, context)
@login_required()
def test_database_connection(request, form):
helper = ConnectionFormHelper()
connection_type = form.cleaned_data['connection_type'].name
add_form_method = helper.lookup_connection(connection_type, 'add')
connection_form = add_form_method(request.POST)
if connection_form.is_valid():
test_connection_method = helper.lookup_connection(connection_type, 'test')
result = test_connection_method(connection_form.cleaned_data, form.cleaned_data)
if result is True:
messages.success(request, MSG_SUCCESS_CONNECTION_VALID)
else:
messages.error(request, MSG_ERROR_CONNECTION_INVALID(result))
context = {
'form': form,
'connection_form': add_form_method(request.POST),
'connection_type': connection_type
}
return render(request, ADD_CONNECTION_TEMPLATE_NAME, context)
@login_required()
def save_connection(request, form):
helper = ConnectionFormHelper()
connection_type = form.cleaned_data['connection_type'].name
add_form_method = helper.lookup_connection(connection_type, 'add')
connection_form = add_form_method(request.POST)
if connection_form.is_valid():
save_method = helper.lookup_connection(connection_type, 'save')
save_method(connection_form.cleaned_data, form.cleaned_data)
messages.success(request, MSG_SUCCESS_CONNECTION_SAVED)
return redirect("connection")
messages.error(request, MSG_ERROR_CONNECTION_SELECT_INVALID)
connection_type = form.cleaned_data['connection_type'].name
context = {
"form": ConnectionTypeForm(request.POST),
"connection_form": add_form_method(request.POST),
"connection_type": connection_type
}
return render(request, ADD_CONNECTION_TEMPLATE_NAME, context)
@login_required()
def add_connection(request):
helper = ConnectionFormHelper()
if request.method == "POST":
form = ConnectionTypeForm(request.POST)
if form.is_valid() and form.cleaned_data['connection_type']:
connection_type = form.cleaned_data['connection_type'].name
if "Operation" not in request.POST:
add_form_method = helper.lookup_connection(connection_type, 'add')
context = {
"form": form,
"connection_form": add_form_method(),
"connection_type": connection_type
}
return render(request, ADD_CONNECTION_TEMPLATE_NAME, context)
elif request.POST["Operation"] == "Test":
return test_database_connection(request, form)
elif request.POST["Operation"] == "Save":
return save_connection(request, form)
else:
form = ConnectionTypeForm()
return render(request, ADD_CONNECTION_TEMPLATE_NAME, {'form': form})
| 41.414063
| 105
| 0.716469
| 580
| 5,301
| 6.215517
| 0.139655
| 0.08932
| 0.037448
| 0.062136
| 0.556172
| 0.527046
| 0.457143
| 0.427739
| 0.332039
| 0.300971
| 0
| 0.001412
| 0.198642
| 5,301
| 127
| 106
| 41.740157
| 0.847222
| 0
| 0
| 0.409524
| 0
| 0
| 0.088097
| 0.018676
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038095
| false
| 0
| 0.085714
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
197a2e031fcb6bc8c3c7153d570a652bf400420f
| 6,206
|
py
|
Python
|
modules/niftitools.py
|
NeuroSainteAnne/synthFLAIR
|
ddb083e0ddbb5a7a3131e947c8a84809f25b93a1
|
[
"BSD-3-Clause"
] | 2
|
2022-01-09T11:25:40.000Z
|
2022-03-24T04:00:11.000Z
|
modules/niftitools.py
|
yunfei920406/synthFLAIR
|
ddb083e0ddbb5a7a3131e947c8a84809f25b93a1
|
[
"BSD-3-Clause"
] | null | null | null |
modules/niftitools.py
|
yunfei920406/synthFLAIR
|
ddb083e0ddbb5a7a3131e947c8a84809f25b93a1
|
[
"BSD-3-Clause"
] | 2
|
2022-03-24T04:00:17.000Z
|
2022-03-25T00:36:13.000Z
|
import os
import pydicom
import glob
import numpy as np
import nibabel as nib
from skimage import filters, morphology
from scipy.ndimage.morphology import binary_fill_holes
from scipy.ndimage import label
from dipy.segment.mask import median_otsu
def padvolume(volume):
"Applies a padding/cropping to a volume in order to hav 256x256 size"
padx1 = padx2 = pady1 = pady2 = 0
orig_shape = volume.shape
if orig_shape[0] < 256 or orig_shape[1] < 256:
if orig_shape[0] < 256:
padx1 = int((256.0 - orig_shape[0])/2)
padx2 = 256 - orig_shape[0] - padx1
if orig_shape[1] < 256:
pady1 = int((256.0 - orig_shape[1])/2)
pady2 = 256 - orig_shape[1] - pady1
volume = np.pad(volume, ((padx1, padx2),(pady1,pady2),(0,0)), mode="edge")
cutx1 = cutx2 = cuty1 = cuty2 = 0
if orig_shape[0] > 256 or orig_shape[1] > 256:
if orig_shape[0] > 256:
cutx1 = int((orig_shape[0]-256.0)/2)
cutx2 = orig_shape[0] - 256 - cutx1
volume = volume[cutx1:-cutx2,:,:]
if orig_shape[1] > 256:
cuty1 = int((orig_shape[1]-256.0)/2)
cuty2 = orig_shape[1] - 256 - cuty1
volume = volume[:,cuty1:-cuty2,:]
return volume, (padx1, padx2, pady1, pady2, cutx1, cutx2, cuty1, cuty2)
def zpadding(volume, zpad):
orig_shape = volume.shape
if orig_shape[2] < zpad:
padz1 = int((zpad - orig_shape[2])/2)
padz2 = zpad - orig_shape[2] - padz1
volume = np.pad(volume, ((0,0),(0,0),(padz1,padz2)), mode="minimum")
elif orig_shape[2] > zpad:
cutz1 = int((orig_shape[2] - zpad)/2)
cutz2 = orig_shape[2] - zpad - cutz2
volume = volume[:,:,cutz1:cutz2]
return volume
def reversepad(volume, padspecs):
"Reserves a previously applied padding"
padx1 = padspecs[0]
padx2 = padspecs[1]
pady1 = padspecs[2]
pady2 = padspecs[3]
cutx1 = padspecs[4]
cutx2 = padspecs[5]
cuty1 = padspecs[6]
cuty2 = padspecs[7]
if cutx1>0 or cutx2>0:
volume = np.pad(volume, ((cutx1, cutx2),(0,0),(0,0)), mode="edge")
if cuty1>0 or cuty2>0:
volume = np.pad(volume, ((0,0),(cuty1,cuty2),(0,0)), mode="edge")
if padx1>0 or padx2>0:
volume = volume[padx1:-padx2,:,:]
if pady1>0 or pady2>0:
volume = volume[:,pady1:-pady2,:]
return volume
def brain_component(vol):
"Select the largest component in a mask (brain)"
label_im, nb_labels = label(vol)
label_count = np.bincount(label_im.ravel().astype(np.int))
label_count[0] = 0
return label_im == label_count.argmax()
def normalize(vol, mask):
"Normalization of a volume"
masked_vol = vol[mask]
mean_val, sd_val = np.mean(masked_vol), np.std(masked_vol)
vol = (vol - mean_val) / sd_val
return vol
def adccompute(b0, b1000):
"Computes ADC map"
crudemask = (b0 >= 1) & (b1000 >= 1) # exclude zeros for ADC calculation
adc = np.zeros(b0.shape, b1000.dtype)
adc[crudemask] = -1. * float(1000) * np.log(b1000[crudemask] / b0[crudemask])
adc[adc < 0] = 0
return adc
def maskcompute(b0, b1000):
"Computes a brain mask using otsu method"
b0_mask, mask0 = median_otsu(b0, 1, 1)
b1000_mask, mask1000 = median_otsu(b1000, 1, 1)
mask = binary_fill_holes(morphology.binary_dilation(brain_component(mask0 & mask1000)))
mask = mask & (b0 >= 1) & (b1000 >= 1)
return mask
def splitdiffusion(diffdata):
"Splits b0 and b1000 based on value mean"
vol1 = diffdata[...,0]
vol2 = diffdata[...,1]
if vol1.mean() > vol2.mean():
b0 = vol1
b1000 = vol2
else:
b0 = vol2
b1000 = vol1
return b0, b1000
def nifti2array(diffpath):
# load diffusion
diffnib = nib.load(diffpath)
diffdata = diffnib.get_fdata()
# differenciate b1000 from b0
b0, b1000 = splitdiffusion(diffdata)
stacked, mask, padspecs = splitdwi2array(b0,b1000,adjust=True,z_pad=False)
stacked = stacked.transpose([3,2,1,0])[:,:,::-1,np.newaxis,:]
qualarr = np.tile(2, (stacked.shape[0],1))
return stacked, qualarr, padspecs, diffnib.affine
def twoniftis2array(b0path, b1000path,z_pad=None):
# load niftis
diff0nib = nib.load(b0path)
diff0data = diff0nib.get_fdata()
diff1000nib = nib.load(b1000path)
diff1000data = diff1000nib.get_fdata()
return splitdwi2array(diff0data,diff1000data,adjust=False,z_pad=z_pad)
def flairnifti2array(flairpath, mask, z_pad=None):
flairnib = nib.load(flairpath)
flair = flairnib.get_fdata()
# pad
flair, padspecs = padvolume(flair)
if z_pad is not None:
flair = zpadding(flair, z_pad)
# normalisation
flair = normalize(flair, mask)
return flair
def masknifti2array(mask, z_pad=None):
# load nifti
masknib = nib.load(mask)
mask = masknib.get_fdata()
# pad
mask, padspecs = padvolume(mask)
if z_pad is not None:
mask = zpadding(mask, z_pad)
return mask
def splitdwi2array(b0,b1000,adjust=False,z_pad=None):
# pad/crop volumes to 256x256
b0, _ = padvolume(b0)
b1000, padspecs = padvolume(b1000)
#Z-pad
if z_pad is not None:
b0 = zpadding(b0, z_pad)
b1000 = zpadding(b1000, z_pad)
# ADC calculation
adc = adccompute(b0, b1000)
# mask computation
mask = maskcompute(b0, b1000)
# normalisation
b0 = normalize(b0, mask)
b1000 = normalize(b1000, mask)
# adjust for model input
if adjust:
b0 = ((b0 + 5) / (12 + 5))*2-1
b1000 = ((b1000 + 5) / (12 + 5))*2-1
adc = ((adc) / (7500))*2-1
# export for model input
stacked = np.stack([b0,b1000,adc])
return stacked, mask, padspecs
def array2nifti(savearray, padspecs, affine, outpath):
synthflair = savearray[:,:,::-1,0].transpose(2,1,0)
synthflair = synthflair - synthflair.min()
synthflair = 256*(synthflair / synthflair.max())
synthflair = reversepad(synthflair, padspecs)
synthflairnib = nib.Nifti1Image(synthflair, affine=affine)
nib.save(synthflairnib, outpath)
| 31.502538
| 91
| 0.618595
| 838
| 6,206
| 4.497613
| 0.220764
| 0.05731
| 0.021226
| 0.020695
| 0.122314
| 0.059432
| 0.03741
| 0.023879
| 0.023879
| 0.023879
| 0
| 0.094673
| 0.25282
| 6,206
| 197
| 92
| 31.502538
| 0.718137
| 0.084434
| 0
| 0.059603
| 0
| 0
| 0.048452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092715
| false
| 0
| 0.059603
| 0
| 0.238411
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
197baac1afa4b35de9d60fc8d92a0d5870b20c39
| 1,937
|
py
|
Python
|
recognition.py
|
1159186649/Raspberry-Car
|
d1114793dd45be4e60a5d8a8da57b01ae3210f94
|
[
"Apache-2.0"
] | 1
|
2020-11-04T02:28:32.000Z
|
2020-11-04T02:28:32.000Z
|
recognition.py
|
1159186649/Raspberry-Car
|
d1114793dd45be4e60a5d8a8da57b01ae3210f94
|
[
"Apache-2.0"
] | null | null | null |
recognition.py
|
1159186649/Raspberry-Car
|
d1114793dd45be4e60a5d8a8da57b01ae3210f94
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from aip import AipFace
from picamera import PiCamera
import urllib.request
import RPi.GPIO as GPIO
import base64
import time
import Main
#百度智能云人脸识别id、key
APP_ID = '*****'
API_KEY = '**********'
SECRET_KEY ='**********'
client = AipFace(APP_ID, API_KEY, SECRET_KEY)#创建一个客户端用以访问百度云
#图像编码方式
IMAGE_TYPE='BASE64'
camera = PiCamera()#定义一个摄像头对象
#用户组
GROUP = 'usr1'
#照相函数
# def getimage():
# camera.resolution = (1024,768)#摄像界面为1024*768
# camera.start_preview()#开始摄像
# time.sleep(1)
# camera.capture('faceimage.jpg')#拍照并保存
# time.sleep(1)
# camera.close()
# 对图片的格式进行转换
def transimage():
f = open('faceimage.jpg','rb')
img = base64.b64encode(f.read())
return img
#上传到百度api进行人脸检测
def go_api(image):
result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP);#在百度云人脸库中寻找有没有匹配的人脸
if result['error_msg'] == 'SUCCESS':#如果成功了
name = result['result']['user_list'][0]['user_id']#获取名字
score = result['result']['user_list'][0]['score']#获取相似度
if score < 80:#如果相似度大于80
print("对不起,我不认识你!")
name = 'Unknow'
return 0
curren_time = time.asctime(time.localtime(time.time()))#获取当前时间
#将人员出入的记录保存到Log.txt中
f = open('Log.txt','a')
f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n')
f.close()
return 1
if result['error_msg'] == 'pic not has face':
# print('检测不到人脸')
time.sleep(2)
return 1
else:
print(result['error_code']+' ' + result['error_code'])
return 0
#主函数
if __name__ == '__main__':
print('准备')
# getimage()#拍照
img = transimage()#转换照片格式
res = go_api(img)#将转换了格式的图片上传到百度云
if res == 1:#是人脸库中的人
print("正常")
else:
print("出现陌生人")
print('等40s进入下一轮')
time.sleep(1)
| 25.826667
| 87
| 0.573051
| 231
| 1,937
| 4.683983
| 0.502165
| 0.033272
| 0.027726
| 0.020333
| 0.075786
| 0.036969
| 0
| 0
| 0
| 0
| 0
| 0.029371
| 0.261745
| 1,937
| 74
| 88
| 26.175676
| 0.727273
| 0.224574
| 0
| 0.125
| 0
| 0
| 0.164393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.145833
| 0
| 0.291667
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
197f119da149ba3627c410ef123a293d7183c17e
| 4,986
|
py
|
Python
|
rssxkcd.py
|
bkentropy/xkcd-updater
|
732b60428a9fdc79c2cd847623c7416cb4b6022d
|
[
"MIT"
] | null | null | null |
rssxkcd.py
|
bkentropy/xkcd-updater
|
732b60428a9fdc79c2cd847623c7416cb4b6022d
|
[
"MIT"
] | null | null | null |
rssxkcd.py
|
bkentropy/xkcd-updater
|
732b60428a9fdc79c2cd847623c7416cb4b6022d
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python2
import argparse
import requests
import feedparser
import time
import sys
import sqlite3
import datetime
# Command line args
parser = argparse.ArgumentParser(description='Provide HipChat integration url to post xkcd comics')
parser.add_argument('url', type=str, help='(string) a special url for posting to a hipchat room')
parser.add_argument('-c', '--commit', action='store_true', help='check the output, and commit if it looks right')
args = parser.parse_args()
class Entry:
def __init__(self, title, imglink, summary, link, pubts, posted):
self.title = title
self.imglink = imglink
self.summary = summary
self.link = link # this will be the id field in db
self.pubts = pubts
self.posted = 0
def analyze(self):
data = "Link: " + self.link + "\n"
data += "Title: " + self.title + "\n"
data += "Summary: " + self.summary + "\n"
data += "Pubts: " + self.pubts + "\n"
data += "Imglink: " + self.imglink + "\n"
data += "Posted: " + str(self.posted)
print(data)
# Get rss feed from URL (https://xkcd.com/rss.xml)
def check_rss_feed(cursor, feedurl, rssentries):
row = cursor.execute("SELECT id FROM lastpub")
lastts = row.fetchone() or ("",)
req = requests.get(feedurl, headers={
"If-Modified-Since": lastts[0]
})
if req.text:
# get the rss feed data from the feedurl
feed = feedparser.parse(feedurl)
entries = feed.entries
for i in range(len(entries)):
e = Entry(
entries[i]['title'],
entries[i]['summary'].split('\"')[3],
entries[i]['summary'].split('\"')[1],
entries[i]['link'],
entries[i]['published'],
0
)
rssentries.append(e)
return req
# Hipchat posting function
def post_to_hipchat(title, src, alttext, posturl):
payload = {
"color": "gray",
"message": "<span>" + title + "</span><br/><img src='" + src + "'/>" +
"<br/><span>(Alt-text: " + alttext + ")</span>",
"notify": True,
"message_format": "html"
}
if args.commit:
r = requests.post(posturl, data=payload)
print(title, "Posted!", args.commit)
# Database functions
def insert_entry(db, cursor, e):
if args.commit:
cursor.execute('''INSERT INTO entries(id, title, imglink, summary, pubts, posted)
VALUES(?,?,?,?,?,?)''', (e.link, e.title, e.imglink, e.summary, e.pubts, 0))
db.commit()
print("Saved entry in db", args.commit)
def update_to_posted(db, cursor, e):
if args.commit:
cursor.execute('UPDATE entries SET posted=1 WHERE id=?', (e.link,))
db.commit()
print("Updated posted for:", e.link, args.commit)
def check_if_in_db(db, cursor, e):
rc = cursor.execute('SELECT id FROM entries WHERE id=?', (e.link,))
if rc.fetchone():
return True
else:
return False
def check_if_posted(db, cursor, e):
rc = cursor.execute('SELECT posted FROM entries WHERE id=?', (e.link,))
exists = rc.fetchone()[0]
if exists is 1:
return True
else:
return False
# Primary function
def check_and_post(db, cursor, ents, posturl):
# TODO: lines 96-99 and 102-106 are ripe for refactor
update_timestamp = False
for e in ents:
indb = check_if_in_db(db, cursor, e)
if indb:
posted = check_if_posted(db, cursor, e)
if not posted:
title = e.title + " (" + str(e.link) + ")"
post_to_hipchat(title, e.imglink, e.summary, posturl)
update_to_posted(db, cursor, e)
update_timestamp = True
print("in db, not posted", datetime.datetime.now())
else:
insert_entry(db, cursor, e)
title = e.title + " (" + str(e.link) + ")"
post_to_hipchat(title, e.imglink, e.summary, posturl)
update_to_posted(db, cursor, e)
update_timestamp = True
print("not in db at all", datetime.datetime.now())
return update_timestamp
def main():
# Globals
feedurl = 'https://xkcd.com/rss.xml'
posturl = str(sys.argv[1])
RSSEntries = []
db = sqlite3.connect("feed.db")
cursor = db.cursor()
if feedurl and posturl:
req = check_rss_feed(cursor, feedurl, RSSEntries)
RSSEntries = sorted(RSSEntries, key=lambda e: e.link)
if len(RSSEntries) > 0:
need_update_timestamp = check_and_post(db, cursor, RSSEntries, posturl)
if need_update_timestamp:
newts = (req.headers["Last-Modified"],)
if args.commit:
cursor.execute("UPDATE lastpub set id=?", newts)
db.commit()
print('Updated lastpub date to: ', newts, args.commit)
else:
print("All up to date!", datetime.datetime.now())
if __name__ == "__main__":
main()
| 33.689189
| 113
| 0.583233
| 632
| 4,986
| 4.511076
| 0.265823
| 0.036478
| 0.028411
| 0.026307
| 0.27429
| 0.201684
| 0.130831
| 0.098913
| 0.075061
| 0.075061
| 0
| 0.006663
| 0.277577
| 4,986
| 147
| 114
| 33.918367
| 0.784842
| 0.056558
| 0
| 0.186992
| 0
| 0
| 0.170467
| 0.004475
| 0
| 0
| 0
| 0.006803
| 0
| 1
| 0.081301
| false
| 0
| 0.056911
| 0
| 0.195122
| 0.065041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1982e8dcbffc500675e9501f2db4214567aa2081
| 24,054
|
py
|
Python
|
boundaries/ca_qc_districts/definition.py
|
imhangoo/represent-canada-data
|
0d9cc818b343079f81a00c15438d79c079a10c9b
|
[
"OML"
] | null | null | null |
boundaries/ca_qc_districts/definition.py
|
imhangoo/represent-canada-data
|
0d9cc818b343079f81a00c15438d79c079a10c9b
|
[
"OML"
] | null | null | null |
boundaries/ca_qc_districts/definition.py
|
imhangoo/represent-canada-data
|
0d9cc818b343079f81a00c15438d79c079a10c9b
|
[
"OML"
] | null | null | null |
# coding: utf-8
import re
from datetime import date
import boundaries
# Noting that the "union" merge strategy fails with:
#
# GEOS_ERROR: TopologyException: found non-noded intersection between
# LINESTRING (...) and LINESTRING (...)
#
# django.contrib.gis.geos.error.GEOSException: Could not initialize GEOS Geometry with given input.
#
# So, we instead use the "combine" merge strategy.
# Generated by sets.rb and then edited.
sets = {
10043: ["Rimouski", "districts"],
10070: ["Saint-Fabien", "districts"],
1023: ["Les Îles-de-la-Madeleine", "districts"],
11040: ["Trois-Pistoles", "quartiers"],
12015: ["Saint-Antonin", "districts"],
12072: ["Rivière-du-Loup", "districts"],
13073: ["Témiscouata-sur-le-Lac", "districts"],
13095: ["Pohénégamook", "quartiers"],
15013: ["La Malbaie", "districts"],
15035: ["Clermont", "districts"],
15058: ["Saint-Siméon", "districts"],
16013: ["Baie-Saint-Paul", "districts"],
16055: ["Saint-Urbain", "districts"],
17055: ["Saint-Aubert", "districts"],
18050: ["Montmagny", "districts"],
19055: ["Sainte-Claire", "districts"],
19068: ["Saint-Henri", "districts"],
19097: ["Saint-Charles-de-Bellechasse", "districts"],
19105: ["Beaumont", "districts"],
2005: ["Percé", "districts"],
2010: ["Sainte-Thérèse-de-Gaspé", "districts"],
2015: ["Grande-Rivière", "districts"],
2028: ["Chandler", "districts"],
2047: ["Port-Daniel—Gascons", "districts"],
21010: ["Saint-Ferréol-les-Neiges", "districts"],
21045: ["Boischatel", "districts"],
22005: ["Sainte-Catherine-de-la-Jacques-Cartier", "districts"],
22010: ["Fossambault-sur-le-Lac", "districts"],
22015: ["Lac-Saint-Joseph", "districts"],
22035: ["Stoneham-et-Tewkesbury", "districts"],
22040: ["Lac-Beauport", "districts"],
22045: ["Sainte-Brigitte-de-Laval", "districts"],
23027: ["Québec", "districts"],
23057: ["L'Ancienne-Lorette", "districts"],
23072: ["Saint-Augustin-de-Desmaures", "districts"],
25213: ["Lévis", "districts"],
26030: ["Sainte-Marie", "districts"],
26063: ["Saint-Isidore", "districts"],
27028: ["Beauceville", "districts"],
27043: ["Saint-Joseph-de-Beauce", "districts"],
29073: ["Saint-Georges", "districts"],
30010: ["Notre-Dame-des-Bois", "districts"],
30025: ["Frontenac", "districts"],
30030: ["Lac-Mégantic", "districts"],
30045: ["Nantes", "districts"],
3005: ["Gaspé", "quartiers"],
3010: ["Cloridorme", "districts"],
31015: ["Disraeli", "districts"],
31056: ["Adstock", "districts"],
31084: ["Thetford Mines", "districts"],
31122: ["East Broughton", "districts"],
32013: ["Saint-Ferdinand", "districts"],
32033: ["Princeville", "districts"],
32040: ["Plessisville", "districts"],
32065: ["Lyster", "districts"],
33045: ["Saint-Agapit", "districts"],
33052: ["Saint-Flavien", "districts"],
34030: ["Cap-Santé", "districts"],
34038: ["Saint-Basile", "districts"],
34120: ["Lac-Sergent", "quartiers"],
35027: ["Saint-Tite", "districts"],
36033: ["Shawinigan", "districts"],
37067: ["Trois-Rivières", "districts"],
37230: ["Saint-Maurice", "districts"],
37235: ["Notre-Dame-du-Mont-Carmel", "districts"],
39060: ["Saint-Christophe-d'Arthabaska", "districts"],
39062: ["Victoriaville", "districts"],
4037: ["Sainte-Anne-des-Monts", "districts"],
41038: ["Cookshire-Eaton", "districts"],
41098: ["Weedon", "districts"],
42020: ["Saint-François-Xavier-de-Brompton", "districts"],
42025: ["Saint-Denis-de-Brompton", "districts"],
42032: ["Racine", "districts"],
42098: ["Richmond", "districts"],
42100: ["Saint-Claude", "districts"],
42110: ["Cleveland", "districts"],
43027: ["Sherbrooke", "districts"],
44071: ["Compton", "districts"],
45060: ["Sainte-Catherine-de-Hatley", "districts"],
45072: ["Magog", "districts"],
46050: ["Dunham", "districts"],
46058: ["Sutton", "districts"],
46075: ["Lac-Brome", "districts"],
46078: ["Bromont", "districts"],
46080: ["Cowansville", "quartiers"],
46112: ["Farnham", "districts"],
47017: ["Granby", "districts"],
47025: ["Waterloo", "districts"],
47047: ["Roxton Pond", "districts"],
48028: ["Acton Vale", "districts"],
49048: ["Saint-Germain-de-Grantham", "districts"],
49058: ["Drummondville", "districts"],
49070: ["Saint-Cyrille-de-Wendover", "districts"],
50042: ["Saint-Léonard-d'Aston", "districts"],
51015: ["Louiseville", "districts"],
52007: ["Lavaltrie", "districts"],
52017: ["Lanoraie", "districts"],
52035: ["Berthierville", "districts"],
52040: ["Sainte-Geneviève-de-Berthier", "districts"],
52045: ["Saint-Ignace-de-Loyola", "districts"],
52080: ["Saint-Gabriel", "districts"],
52095: ["Mandeville", "districts"],
53040: ["Saint-Roch-de-Richelieu", "districts"],
53050: ["Saint-Joseph-de-Sorel", "quartiers"],
53052: ["Sorel-Tracy", "districts"],
53065: ["Sainte-Anne-de-Sorel", "districts"],
54008: ["Saint-Pie", "districts"],
54017: ["Saint-Damase", "districts"],
54048: ["Saint-Hyacinthe", "districts"],
54060: ["Saint-Dominique", "districts"],
55008: ["Ange-Gardien", "districts"],
55023: ["Saint-Césaire", "districts"],
55037: ["Rougemont", "districts"],
55048: ["Marieville", "districts"],
55057: ["Richelieu", "districts"],
56083: ["Saint-Jean-sur-Richelieu", "districts"],
57005: ["Chambly", "districts"],
57010: ["Carignan", "districts"],
57020: ["Saint-Basile-le-Grand", "districts"],
57025: ["McMasterville", "districts"],
57030: ["Otterburn Park", "districts"],
57033: ["Saint-Jean-Baptiste", "districts"],
57035: ["Mont-Saint-Hilaire", "districts"],
57040: ["Beloeil", "districts"],
57045: ["Saint-Mathieu-de-Beloeil", "districts"],
58007: ["Brossard", "districts"],
58012: ["Saint-Lambert", "districts"],
58033: ["Boucherville", "districts"],
58037: ["Saint-Bruno-de-Montarville", "districts"],
58227: ["Longueuil", "districts"],
59010: ["Sainte-Julie", "districts"],
59015: ["Saint-Amable", "districts"],
59020: ["Varennes", "districts"],
59025: ["Verchères", "districts"],
59035: ["Contrecoeur", "districts"],
60005: ["Charlemagne", "districts"],
60013: ["Repentigny", "districts"],
60028: ["L'Assomption", "districts"],
60035: ["L'Épiphanie", "districts"],
61025: ["Joliette", "districts"],
61027: ["Saint-Thomas", "districts"],
61030: ["Notre-Dame-des-Prairies", "districts"],
61040: ["Saint-Ambroise-de-Kildare", "districts"],
61050: ["Sainte-Mélanie", "districts"],
62007: ["Saint-Félix-de-Valois", "districts"],
62025: ["Saint-Alphonse-Rodriguez", "districts"],
62037: ["Rawdon", "districts"],
62047: ["Chertsey", "districts"],
62060: ["Saint-Donat", "districts"],
62075: ["Saint-Damien", "districts"],
63030: ["Saint-Esprit", "districts"],
63035: ["Saint-Roch-de-l'Achigan", "districts"],
63048: ["Saint-Lin—Laurentides", "districts"],
63055: ["Saint-Calixte", "districts"],
63060: ["Sainte-Julienne", "districts"],
64008: ["Terrebonne", "districts"],
64015: ["Mascouche", "districts"],
65005: ["Laval", "districts"],
66007: ["Montréal-Est", "districts"],
66023: ["Montréal", "districts"],
66032: ["Westmount", "districts"],
66058: ["Côte-Saint-Luc", "districts"],
66072: ["Mont-Royal", "districts"],
66087: ["Dorval", "districts"],
66097: ["Pointe-Claire", "districts"],
66102: ["Kirkland", "districts"],
66107: ["Beaconsfield", "districts"],
66117: ["Sainte-Anne-de-Bellevue", "districts"],
66127: ["Senneville", "districts"],
66142: ["Dollard-Des Ormeaux", "districts"],
67010: ["Saint-Philippe", "districts"],
67015: ["La Prairie", "districts"],
67020: ["Candiac", "districts"],
67025: ["Delson", "quartiers"],
67030: ["Sainte-Catherine", "districts"],
67035: ["Saint-Constant", "districts"],
67045: ["Mercier", "districts"],
67050: ["Châteauguay", "districts"],
67055: ["Léry", "districts"],
68020: ["Sainte-Clotilde", "districts"],
68050: ["Saint-Michel", "districts"],
68055: ["Saint-Rémi", "districts"],
69017: ["Saint-Chrysostome", "districts"],
69055: ["Huntingdon", "quartiers"],
69070: ["Saint-Anicet", "districts"],
70012: ["Sainte-Martine", "districts"],
70022: ["Beauharnois", "districts"],
70035: ["Saint-Louis-de-Gonzague", "districts"],
70040: ["Saint-Stanislas-de-Kostka", "districts"],
70052: ["Salaberry-de-Valleyfield", "districts"],
7018: ["Causapscal", "districts"],
7047: ["Amqui", "districts"],
7057: ["Lac-au-Saumon", "districts"],
71025: ["Saint-Zotique", "districts"],
71033: ["Les Coteaux", "districts"],
71040: ["Coteau-du-Lac", "districts"],
71050: ["Les Cèdres", "districts"],
71060: ["L'Île-Perrot", "districts"],
71065: ["Notre-Dame-de-l'Île-Perrot", "districts"],
71070: ["Pincourt", "districts"],
71083: ["Vaudreuil-Dorion", "districts"],
71100: ["Hudson", "districts"],
71105: ["Saint-Lazare", "districts"],
71133: ["Rigaud", "districts"],
72005: ["Saint-Eustache", "districts"],
72010: ["Deux-Montagnes", "districts"],
72015: ["Sainte-Marthe-sur-le-Lac", "districts"],
72020: ["Pointe-Calumet", "districts"],
72025: ["Saint-Joseph-du-Lac", "districts"],
72032: ["Oka", "districts"],
72043: ["Saint-Placide", "districts"],
73005: ["Boisbriand", "districts"],
73010: ["Sainte-Thérèse", "districts"],
73015: ["Blainville", "districts"],
73035: ["Sainte-Anne-des-Plaines", "districts"],
74005: ["Mirabel", "districts"],
75005: ["Saint-Colomban", "districts"],
75017: ["Saint-Jérôme", "districts"],
75028: ["Sainte-Sophie", "districts"],
75040: ["Prévost", "districts"],
76008: ["Saint-André-d'Argenteuil", "districts"],
76020: ["Lachute", "districts"],
76043: ["Brownsburg-Chatham", "districts"],
77022: ["Sainte-Adèle", "districts"],
77035: ["Sainte-Anne-des-Lacs", "districts"],
77055: ["Lac-des-Seize-Îles", "districts"],
77060: ["Wentworth-Nord", "districts"],
78010: ["Val-David", "districts"],
78047: ["Saint-Faustin—Lac-Carré", "districts"],
78055: ["Montcalm", "districts"],
78070: ["Amherst", "districts"],
78095: ["Lac-Supérieur", "districts"],
78102: ["Mont-Tremblant", "districts"],
79078: ["Lac-des-Écorces", "districts"],
8053: ["Matane", "districts"],
81017: ["Gatineau", "districts"],
82005: ["L'Ange-Gardien", "districts"],
82015: ["Val-des-Monts", "districts"],
82020: ["Cantley", "districts"],
82025: ["Chelsea", "districts"],
82030: ["Pontiac", "districts"],
82035: ["La Pêche", "districts"],
83065: ["Maniwaki", "quartiers"],
85045: ["Saint-Bruno-de-Guigues", "districts"],
86042: ["Rouyn-Noranda", "districts"],
87058: ["Macamic", "districts"],
87090: ["La Sarre", "quartiers"],
88022: ["Barraute", "districts"],
89008: ["Val-d'Or", "districts"],
89015: ["Malartic", "districts"],
89040: ["Senneterre", "quartiers"],
90012: ["La Tuque", "districts"],
9077: ["Mont-Joli", "districts"],
93005: ["Desbiens", "quartiers"],
93012: ["Métabetchouan—Lac-à-la-Croix", "districts"],
93020: ["Hébertville", "districts"],
93030: ["Saint-Bruno", "districts"],
93035: ["Saint-Gédéon", "districts"],
93042: ["Alma", "districts"],
93045: ["Saint-Nazaire", "districts"],
93065: ["L'Ascension-de-Notre-Seigneur", "districts"],
93070: ["Saint-Henri-de-Taillon", "districts"],
94068: ["Saguenay", "districts"],
94235: ["Saint-Fulgence", "districts"],
94240: ["Saint-Honoré", "districts"],
94245: ["Saint-David-de-Falardeau", "districts"],
94255: ["Saint-Ambroise", "districts"],
96020: ["Baie-Comeau", "districts"],
96025: ["Pointe-Lebel", "districts"],
96030: ["Pointe-aux-Outardes", "districts"],
96040: ["Ragueneau", "districts"],
97007: ["Sept-Îles", "districts"],
99060: ["Eeyou Istchee Baie-James", "quartiers"],
}
# Check the names with (replace `CODE`):
# ogrinfo -al -geom=NO boundaries/ca_qc_districts | grep -B6 CODE | grep NM_DIS | sort
def district_namer(f):
import boundaries
type_id = f.get('NO_DIS')
code = f.get('CO_MUNCP')
name = f.get('NM_DIS')
# Québec
if code == 23027:
return {
# Hyphens.
'Cap-Rouge-Laurentien': 'Cap-Rouge—Laurentien',
'Chute-Montmorency-Seigneurial': 'Chute-Montmorency—Seigneurial',
'Lac-Saint-Charles-Saint-Émile': 'Lac-Saint-Charles—Saint-Émile',
'Montcalm-Saint-Sacrement': 'Montcalm—Saint-Sacrement',
'Saint-Louis-Sillery': 'Saint-Louis—Sillery',
'Saint-Roch-Saint-Sauveur': 'Saint-Roch—Saint-Sauveur',
}.get(name, name)
# Sherbrooke
elif code == 43027:
# https://cartes.ville.sherbrooke.qc.ca/monarrondissementenligne/
return {
1.10: 'Lac Magog',
1.20: 'Rock Forest',
1.30: 'Saint-Élie',
1.40: 'Brompton',
2.10: 'Hôtel-Dieu',
2.20: 'Desranleau',
2.30: 'Quatre-Saisons',
2.40: 'Pin-Solitaire',
3.10: 'Uplands',
3.20: 'Fairview',
4.10: 'Université',
4.20: 'Ascot',
4.30: 'Lac-des-Nations',
4.40: 'Golf',
4.50: 'Carrefour',
}[type_id]
# Longueuil
elif code == 58227:
return re.sub(r"\b(?:d'|de |du |des )", '', name)
# Montréal
elif code == 66023:
return {
'Est (Pierrefonds-Roxboro)': 'Bois-de-Liesse',
'Ouest (Pierrefonds-Roxboro)': 'Cap-Saint-Jacques',
'St-Henri-Petite-Bourgogne-Pte-St-Charles': 'Saint-Henri—Petite-Bourgogne—Pointe-Saint-Charles',
'Étienne-Desmarteaux': 'Étienne-Desmarteau',
# Articles.
"d'Ahuntsic": 'Ahuntsic',
'de Bordeaux-Cartierville': 'Bordeaux-Cartierville',
'de Saint-Sulpice': 'Saint-Sulpice',
'du Sault-au-Récollet': 'Sault-au-Récollet',
# Hyphens.
"Champlain-L'Île-des-Soeurs": "Champlain—L'Île-des-Soeurs",
'Maisonneuve-Longue-Pointe': 'Maisonneuve—Longue-Pointe',
'Saint-Paul-Émard': 'Saint-Paul—Émard',
}.get(name, name)
# Pointe-Claire
elif code == 66097:
# Check if required with:
# ogrinfo -al -geom=NO boundaries/ca_qc_districts | grep '/ '
return name.replace('/ ', '/')
# Gatineau
elif code == 81017:
return {
# Hyphens.
'de Hull-Val-Tétreau': 'de Hull—Val-Tétreau',
'de Saint-Raymond-Vanier': 'de Saint-Raymond—Vanier',
'de Wright-Parc-de-la-Montagne': 'de Wright—Parc-de-la-Montagne',
'du Plateau-Manoir-des-Trembles': 'du Plateau—Manoir-des-Trembles',
}.get(name, name)
else:
if name:
# Check if required with:
# ogrinfo -al -geom=NO boundaries/ca_qc_districts | grep ' no '
if 'District no ' in name:
return f.get('NM_DIS').replace(' no ', ' ') # Baie-Saint-Paul
else:
return boundaries.clean_attr('NM_DIS')(f)
elif f.get('MODE_SUFRG') == 'Q':
return 'Quartier %s' % int(type_id)
else:
return 'District %s' % int(type_id)
def borough_namer(f):
import boundaries
code = f.get('CO_MUNCP')
name = f.get('NM_ARON')
# Sherbrooke
if code == 43027:
return 'Arrondissement %s' % int(f.get('NO_ARON'))
# Montréal
elif code == 66023:
return {
'Le Plateau-Mont-Royal': 'Plateau-Mont-Royal',
'Le Sud-Ouest': 'Sud-Ouest',
'Pierrefond-Roxboro': 'Pierrefonds-Roxboro',
'Rosemont--La-Petite-Patrie': 'Rosemont—La Petite-Patrie',
}.get(name, boundaries.clean_attr('NM_ARON')(f))
else:
return boundaries.clean_attr('NM_ARON')(f)
# Check if required with:
# ogrinfo -al -geom=NO boundaries/ca_qc_districts | grep -A9 ' 1\.10'
def district_ider(f):
if f.get('CO_MUNCP') in (43027, 66023): # Sherbrooke, Montréal
return f.get('NO_DIS') # e.g. "1.10"
else:
return int(f.get('NO_DIS'))
for geographic_code, (name, type) in sets.items():
geographic_codes = [geographic_code]
boundaries.register('%s %s' % (name, type),
domain='%s, QC' % name,
last_updated=date(2017, 11, 30),
name_func=district_namer,
id_func=district_ider,
authority='Directeur général des élections du Québec',
licence_url='https://www.electionsquebec.qc.ca/francais/conditions-d-utilisation-de-notre-site-web.php',
encoding='utf-8',
extra={'division_id': 'ocd-division/country:ca/csd:24%05d' % geographic_code},
is_valid_func=lambda f, geographic_codes=geographic_codes: int(f.get('CO_MUNCP')) in geographic_codes,
notes='Load the shapefile manually:\nfab alpheus update_boundaries:args="-r --merge combine -d data/shapefiles/public/boundaries/ca_qc_districts"',
)
boundaries.register('Paroisse de Plessisville districts',
domain='Plessisville, QC',
last_updated=date(2017, 11, 30),
name_func=district_namer,
id_func=district_ider,
authority='Directeur général des élections du Québec',
licence_url='https://www.electionsquebec.qc.ca/francais/conditions-d-utilisation-de-notre-site-web.php',
encoding='utf-8',
extra={'division_id': 'ocd-division/country:ca/csd:2432045'},
is_valid_func=lambda f: int(f.get('CO_MUNCP')) == 32045,
)
# Check the names with (replace `CODE`):
# ogrinfo -al -geom=NO boundaries/ca_qc_districts | grep -B3 CODE | sort | uniq
# Check the identifiers with:
# ogrinfo -al -geom=NO boundaries/ca_qc_districts | grep -B4 CODE
municipalities_with_boroughs = [
{
'name': 'Lévis',
'geographic_code': 25213,
'boroughs': {
'ocd-division/country:ca/csd:2425213/borough:1': 'Desjardins',
'ocd-division/country:ca/csd:2425213/borough:2': 'Les Chutes-de-la-Chaudière-Est',
'ocd-division/country:ca/csd:2425213/borough:3': 'Les Chutes-de-la-Chaudière-Ouest',
},
},
{
'name': 'Longueuil',
'geographic_code': 58227,
'boroughs': {
'ocd-division/country:ca/csd:2458227/borough:1': 'Le Vieux-Longueuil',
'ocd-division/country:ca/csd:2458227/borough:2': 'Greenfield Park',
'ocd-division/country:ca/csd:2458227/borough:3': 'Saint-Hubert',
},
},
{
'name': 'Montréal',
'geographic_code': 66023,
'boroughs': {
'ocd-division/country:ca/csd:2466023/borough:1': "Ahuntsic-Cartierville",
'ocd-division/country:ca/csd:2466023/borough:2': "Anjou",
'ocd-division/country:ca/csd:2466023/borough:3': "Côte-des-Neiges—Notre-Dame-de-Grâce",
'ocd-division/country:ca/csd:2466023/borough:4': "Lachine",
'ocd-division/country:ca/csd:2466023/borough:5': "LaSalle",
'ocd-division/country:ca/csd:2466023/borough:6': "L'Île-Bizard—Sainte-Geneviève",
'ocd-division/country:ca/csd:2466023/borough:7': "Mercier—Hochelaga-Maisonneuve",
'ocd-division/country:ca/csd:2466023/borough:8': "Montréal-Nord",
'ocd-division/country:ca/csd:2466023/borough:9': "Outremont",
'ocd-division/country:ca/csd:2466023/borough:10': "Pierrefonds-Roxboro",
'ocd-division/country:ca/csd:2466023/borough:11': "Plateau-Mont-Royal",
'ocd-division/country:ca/csd:2466023/borough:12': "Rivière-des-Prairies—Pointe-aux-Trembles",
'ocd-division/country:ca/csd:2466023/borough:13': "Rosemont—La Petite-Patrie",
'ocd-division/country:ca/csd:2466023/borough:14': "Saint-Laurent",
'ocd-division/country:ca/csd:2466023/borough:15': "Saint-Léonard",
'ocd-division/country:ca/csd:2466023/borough:16': "Sud-Ouest",
'ocd-division/country:ca/csd:2466023/borough:17': "Verdun",
'ocd-division/country:ca/csd:2466023/borough:18': "Ville-Marie",
'ocd-division/country:ca/csd:2466023/borough:19': "Villeray—Saint-Michel—Parc-Extension",
},
},
{
'name': 'Québec',
'geographic_code': 23027,
'boroughs': {
'ocd-division/country:ca/csd:2423027/borough:1': 'La Cité-Limoilou',
'ocd-division/country:ca/csd:2423027/borough:2': 'Les Rivières',
'ocd-division/country:ca/csd:2423027/borough:3': 'Sainte-Foy—Sillery—Cap-Rouge',
'ocd-division/country:ca/csd:2423027/borough:4': 'Charlesbourg',
'ocd-division/country:ca/csd:2423027/borough:5': 'Beauport',
'ocd-division/country:ca/csd:2423027/borough:6': 'La Haute-Saint-Charles',
},
},
{
'name': 'Saguenay',
'geographic_code': 94068,
'boroughs': {
'ocd-division/country:ca/csd:2494068/borough:1': 'Chicoutimi',
'ocd-division/country:ca/csd:2494068/borough:2': 'Jonquière',
'ocd-division/country:ca/csd:2494068/borough:3': 'La Baie',
},
},
{
'name': 'Sherbrooke',
'geographic_code': 43027,
'boroughs': {
'ocd-division/country:ca/csd:2443027/borough:1': 'Arrondissement 1',
'ocd-division/country:ca/csd:2443027/borough:2': 'Arrondissement 2',
'ocd-division/country:ca/csd:2443027/borough:3': 'Arrondissement 3',
'ocd-division/country:ca/csd:2443027/borough:4': 'Arrondissement 4',
},
},
]
# @see http://www.toponymie.gouv.qc.ca/ct/toponymie-municipale/municipalites-arrondissements/arrondissement.aspx
# @see http://www.mamrot.gouv.qc.ca/repertoire-des-municipalites/fiche/arrondissement/?tx_mamrotrepertoire_pi1[order]=asc_nom_mun
for municipality in municipalities_with_boroughs:
geographic_code = municipality['geographic_code']
geographic_name = municipality['name']
for division_id, name in municipality['boroughs'].items():
subdivision_id = int(division_id.rsplit(':', 1)[-1])
boundaries.register('%s districts' % name,
domain='%s, %s, QC' % (name, geographic_name),
last_updated=date(2017, 11, 30),
name_func=district_namer,
id_func=district_ider,
authority='Directeur général des élections du Québec',
licence_url='https://www.electionsquebec.qc.ca/francais/conditions-d-utilisation-de-notre-site-web.php',
encoding='utf-8',
extra={'division_id': division_id},
is_valid_func=lambda f, geographic_code=geographic_code, subdivision_id=subdivision_id: int(f.get('CO_MUNCP')) == geographic_code and int(f.get('NO_ARON')) == subdivision_id,
notes='Load the shapefile manually:\nfab alpheus update_boundaries:args="-r --merge combine -d data/shapefiles/public/boundaries/ca_qc_districts"',
)
boundaries.register('%s boroughs' % geographic_name,
domain='%s, QC' % geographic_name,
last_updated=date(2017, 11, 30),
name_func=borough_namer,
id_func=lambda f: int(f.get('NO_ARON')),
authority='Directeur général des élections du Québec',
licence_url='https://www.electionsquebec.qc.ca/francais/conditions-d-utilisation-de-notre-site-web.php',
encoding='utf-8',
extra={'division_id': 'ocd-division/country:ca/csd:24%05d' % geographic_code},
is_valid_func=lambda f, geographic_code=geographic_code: int(f.get('CO_MUNCP')) == geographic_code,
notes='Load the shapefile manually:\nfab alpheus update_boundaries:args="-r --merge combine -d data/shapefiles/public/boundaries/ca_qc_districts"',
)
| 42.877005
| 186
| 0.61204
| 2,645
| 24,054
| 5.528166
| 0.342533
| 0.030844
| 0.050472
| 0.05608
| 0.25742
| 0.242443
| 0.230338
| 0.130352
| 0.130352
| 0.122555
| 0
| 0.096505
| 0.197015
| 24,054
| 560
| 187
| 42.953571
| 0.659021
| 0.059491
| 0
| 0.107071
| 0
| 0.014141
| 0.50341
| 0.181029
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006061
| false
| 0
| 0.010101
| 0
| 0.046465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
198370b8aa4515542204d0344e7d286fb2f76907
| 1,803
|
py
|
Python
|
pdbfairy/commands/compare_interactions.py
|
dannyroberts/pdbfairy
|
2cefd4a5e6c40f32e6fa3974ffd595fc336c582b
|
[
"BSD-3-Clause"
] | null | null | null |
pdbfairy/commands/compare_interactions.py
|
dannyroberts/pdbfairy
|
2cefd4a5e6c40f32e6fa3974ffd595fc336c582b
|
[
"BSD-3-Clause"
] | null | null | null |
pdbfairy/commands/compare_interactions.py
|
dannyroberts/pdbfairy
|
2cefd4a5e6c40f32e6fa3974ffd595fc336c582b
|
[
"BSD-3-Clause"
] | null | null | null |
import difflib
import io
import click
from pdbfairy import utils
from pdbfairy.commands import find_interactions
@click.argument('pdb_file_1')
@click.argument('pdb_file_2')
@click.option('--max-distance', default=find_interactions.MAX_DISTANCE, help=(
"The distance in Angstroms under which atoms should "
"be considered to interact (default {})"
.format(find_interactions.MAX_DISTANCE)))
def compare_interactions(pdb_file_1, pdb_file_2, max_distance):
"""
Show how find-interactions differs for PDB_FILE_1 and PDB_FILE_2
"""
structure_1 = utils.load_pdb_file(pdb_file_1)
structure_2 = utils.load_pdb_file(pdb_file_2)
with utils.capture() as (interactions_text_1, _):
find_interactions.print_interactions(structure_1, max_distance)
with utils.capture() as (interactions_text_2, _):
find_interactions.print_interactions(structure_2, max_distance)
differ = difflib.Differ()
diff = list(differ.compare(
interactions_text_1.getvalue().splitlines()[5:],
interactions_text_2.getvalue().splitlines()[5:],
))
print("PDB file name 1\t{}".format(structure_1.id))
print("PDB file name 2\t{}".format(structure_2.id))
print("Distance cutoff\t{}".format(max_distance))
print()
print()
print()
print('File\t{}'.format(diff[0].strip()))
for line in sorted(diff[1:]):
marker, rest = line[:2], line[2:]
if marker == '- ':
print('{}\t{}'.format(structure_1.id, rest))
elif marker == '+ ':
print('{}\t{}'.format(structure_2.id, rest))
elif marker == ' ':
print('both\t{}'.format(rest))
elif marker in ('', '? '):
pass
else:
raise ValueError("This should never happen: {!r}".format(marker))
| 33.388889
| 78
| 0.653356
| 233
| 1,803
| 4.832618
| 0.313305
| 0.0746
| 0.028419
| 0.035524
| 0.286856
| 0.101243
| 0
| 0
| 0
| 0
| 0
| 0.019512
| 0.204104
| 1,803
| 53
| 79
| 34.018868
| 0.765157
| 0.035496
| 0
| 0.071429
| 0
| 0
| 0.142774
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0.02381
| 0.119048
| 0
| 0.142857
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1983c2bf22bdcb42a662ebdbf2a359de7f422d6f
| 444
|
py
|
Python
|
accelerator/migrations/023_alter_topics_field_office_hours.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | 6
|
2017-06-14T19:34:01.000Z
|
2020-03-08T07:16:59.000Z
|
accelerator/migrations/023_alter_topics_field_office_hours.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | 160
|
2017-06-20T17:12:13.000Z
|
2022-03-30T13:53:12.000Z
|
accelerator/migrations/023_alter_topics_field_office_hours.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0022_add_meeting_info_on_office_hour_model'),
]
operations = [
migrations.AlterField(
model_name='mentorprogramofficehour',
name='topics',
field=models.CharField(blank=True, default='', max_length=2000),
),
]
| 23.368421
| 76
| 0.655405
| 42
| 444
| 6.595238
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023881
| 0.245496
| 444
| 18
| 77
| 24.666667
| 0.802985
| 0
| 0
| 0
| 0
| 0
| 0.184685
| 0.146396
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
198549dbef342bfbbd642fd61a76b181c129ac11
| 1,085
|
py
|
Python
|
guanabara/mundo1/ex018.py
|
thekilian/Python-pratica
|
875661addd5b8eb4364bc638832c7ab55dcefce4
|
[
"MIT"
] | null | null | null |
guanabara/mundo1/ex018.py
|
thekilian/Python-pratica
|
875661addd5b8eb4364bc638832c7ab55dcefce4
|
[
"MIT"
] | null | null | null |
guanabara/mundo1/ex018.py
|
thekilian/Python-pratica
|
875661addd5b8eb4364bc638832c7ab55dcefce4
|
[
"MIT"
] | null | null | null |
# 018 - Faça um programa que leia um ângulo qualquer e mostre na tela o valor do seno, cosseno e tangente desse ângulo.
'''
from math import sin, cos, tan
ang = float(input('Digite um ângulo: '))
sen = sin(ang)
cos = cos(ang)
tan = tan(ang)
print('Ângulo de {}°: \n Seno = {:.2f} \n Cosseno = {:.2f} \n Tangente = {:.2f}'.format(ang, sen, cos, tan))
'''
'''
# apenas faltou conventer para radiano:
import math
ang = float(input('Digite um ângulo: '))
sen = math.sin(math.radians(ang))
cos = math.cos(math.radians(ang))
tan = math.tan(math.radians(ang))
print('O ângulo de {} tem o SENO de {:.2f}'.format(ang, sen))
print('O ângulo de {} tem o COSSENO de {:.2f}'.format(ang, cos))
print('O ângulo de {} tem a TANGENTE de {:.2f}'.format(ang, tan))
'''
# importando somente o que vamos utilizar: sin, cos, tan, radians
from math import sin, cos, tan, radians
ang = float(input('Digite um ângulo: '))
sen = sin(radians(ang))
cos = cos(radians(ang))
tan = tan(radians(ang))
print('Ângulo de {}°: \n Seno = {:.2f} \n Cosseno = {:.2f} \n Tangente = {:.2f}'.format(ang, sen, cos, tan))
| 31
| 119
| 0.648848
| 181
| 1,085
| 3.900552
| 0.265193
| 0.09915
| 0.077904
| 0.080737
| 0.454674
| 0.430595
| 0.314448
| 0.271955
| 0.17847
| 0.17847
| 0
| 0.01323
| 0.164055
| 1,085
| 35
| 120
| 31
| 0.762955
| 0.375115
| 0
| 0
| 0
| 0.166667
| 0.332103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1989abd5efbf0b7dfe6d6083f611454b62427f76
| 654
|
py
|
Python
|
exp1_bot_detection/Cresci/Cresci.py
|
GabrielHam/SATAR_Twitter_Bot_Detection_with_Self-supervised_User_Representation_Learning
|
ac73e5deb9d748f02d1396d1458e716408470cc9
|
[
"MIT"
] | 5
|
2021-08-10T14:15:18.000Z
|
2022-03-09T07:06:19.000Z
|
exp1_bot_detection/Cresci/Cresci.py
|
GabrielHam/SATAR_Twitter_Bot_Detection_with_Self-supervised_User_Representation_Learning
|
ac73e5deb9d748f02d1396d1458e716408470cc9
|
[
"MIT"
] | null | null | null |
exp1_bot_detection/Cresci/Cresci.py
|
GabrielHam/SATAR_Twitter_Bot_Detection_with_Self-supervised_User_Representation_Learning
|
ac73e5deb9d748f02d1396d1458e716408470cc9
|
[
"MIT"
] | null | null | null |
import math
result = [1] * 4355
list = []
flag = ''
def issub(strrr):
global list
cnt = 0
for dna in list:
if strrr in dna:
cnt = cnt + 1
return cnt
def tryy(strrr):
num = issub(strrr)
if num > 1:
for tmp in range(num):
result[tmp] = max(result[tmp], len(strrr))
tryy(strrr + 'A')
tryy(strrr + 'C')
tryy(strrr + 'T')
Sigma = ['A', 'C', 'T']
f = open('./cresci_text/finalTest1.txt', 'r', encoding='UTF-8')
for line in f:
list.append(line[line.find(' ', 2) + 1: -1])
f.close()
for substring in Sigma:
tryy(substring)
for i in result:
print(i)
| 15.571429
| 63
| 0.524465
| 97
| 654
| 3.525773
| 0.474227
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028953
| 0.313456
| 654
| 41
| 64
| 15.95122
| 0.732739
| 0
| 0
| 0
| 0
| 0
| 0.063077
| 0.043077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.035714
| 0
| 0.142857
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
198aafa829b773ab465bf52f7c98972087fa1385
| 3,119
|
py
|
Python
|
examples/smiley_face.py
|
Markichu/PythonAutoNifty
|
ab646601058297b6bfe14332f17b836ee3dfbe69
|
[
"MIT"
] | null | null | null |
examples/smiley_face.py
|
Markichu/PythonAutoNifty
|
ab646601058297b6bfe14332f17b836ee3dfbe69
|
[
"MIT"
] | 6
|
2021-11-24T00:48:57.000Z
|
2022-03-17T07:51:36.000Z
|
examples/smiley_face.py
|
Markichu/PythonAutoNifty
|
ab646601058297b6bfe14332f17b836ee3dfbe69
|
[
"MIT"
] | null | null | null |
import random
from pyautonifty.constants import DRAWING_SIZE, YELLOW, BLACK, MAGENTA, BLUE
from pyautonifty.pos import Pos
from pyautonifty.drawing import Drawing
from pyautonifty.renderer import Renderer
def smiley_face(drawing):
# Set a background colour to be used for the drawing
background_colour = MAGENTA
# Add a blue square that is not filled in around where the smiley face will be
square_position = Pos(DRAWING_SIZE / 2, DRAWING_SIZE / 2)
square_width = DRAWING_SIZE / 1.5
square_colour = BLUE
square_brush_radius = DRAWING_SIZE / 64
square_filled = False
# Set the position of the yellow face itself in the middle of the drawing (typically 500, 500)
face_position = Pos(DRAWING_SIZE / 2, DRAWING_SIZE / 2)
face_radius = DRAWING_SIZE / 4 # 250
face_colour = YELLOW # Yellow in RGBA (255, 255, 0, 1)
# Create the eyes
eye_horizontal_offset = DRAWING_SIZE / 10
eye_vertical_offset = DRAWING_SIZE / 16
left_eye_position = face_position - Pos(eye_horizontal_offset, eye_vertical_offset)
right_eye_position = face_position - Pos(-eye_horizontal_offset, eye_vertical_offset)
eye_radius = DRAWING_SIZE / 32
eye_colour = BLACK # Black in RGBA (0, 0, 0, 1)
# Create the curve for the smile
smile_horizontal_offset = DRAWING_SIZE / 8
smile_vertical_offset = DRAWING_SIZE / 12
smile_starting_point = face_position + Pos(-smile_horizontal_offset, smile_vertical_offset)
smile_control_point = face_position + Pos(0, smile_vertical_offset * 3)
smile_ending_point = face_position + Pos(smile_horizontal_offset, smile_vertical_offset)
smile_points = [smile_starting_point, smile_control_point, smile_ending_point]
smile_brush_radius = DRAWING_SIZE / 64
smile_colour = BLACK # Black in RGBA (0, 0, 0, 1)
# Put it all together in a drawing using chained methods
drawing.add_background(background_colour) \
.add_rounded_square(square_position, square_width, square_colour, square_brush_radius, square_filled) \
.add_point(face_position, face_colour, face_radius) \
.add_point(left_eye_position, eye_colour, eye_radius) \
.add_point(right_eye_position, eye_colour, eye_radius) \
.add_general_bezier_curve(smile_points, smile_colour, smile_brush_radius)
return drawing
if __name__ == "__main__":
example_drawing = smiley_face(Drawing())
output_data = example_drawing.to_nifty_import() # Replace previous canvas contents in Nifty.Ink
print(f"Lines: {len(example_drawing)}, "
f"Points: {sum([len(line['points']) for line in example_drawing])}, "
f"Size: {(len(output_data) / 1024.0 ** 2):.2f}MB")
with open("output.txt", "w") as file:
file.write(output_data)
# Init render class.
renderer = Renderer()
# Render in a very accurate (but slower) way.
renderer.render(example_drawing, filename="smiley_face_%Y_%m_%d_%H-%M-%S-%f.png",
simulate=True, allow_transparency=True, proper_line_thickness=True, draw_as_bezier=True,
step_size=10)
| 43.319444
| 114
| 0.721064
| 435
| 3,119
| 4.850575
| 0.314943
| 0.072986
| 0.042654
| 0.028436
| 0.23128
| 0.208531
| 0.208531
| 0.178199
| 0.145024
| 0.120379
| 0
| 0.022409
| 0.198782
| 3,119
| 71
| 115
| 43.929577
| 0.821929
| 0.167041
| 0
| 0
| 0
| 0
| 0.076625
| 0.032508
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.125
| 0
| 0.166667
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
198b6c8bdf9c4c45a62fc10ae148a99e4911a2b1
| 1,074
|
py
|
Python
|
cumulusci/tests/test_schema.py
|
davisagli/CumulusCI
|
fd74c324ad3ff662484b159395c639879011e711
|
[
"BSD-3-Clause"
] | 109
|
2015-01-20T14:28:48.000Z
|
2018-08-31T12:12:39.000Z
|
cumulusci/tests/test_schema.py
|
davisagli/CumulusCI
|
fd74c324ad3ff662484b159395c639879011e711
|
[
"BSD-3-Clause"
] | 365
|
2015-01-07T19:54:25.000Z
|
2018-09-11T15:10:02.000Z
|
cumulusci/tests/test_schema.py
|
davisagli/CumulusCI
|
fd74c324ad3ff662484b159395c639879011e711
|
[
"BSD-3-Clause"
] | 125
|
2015-01-17T16:05:39.000Z
|
2018-09-06T19:05:00.000Z
|
import json
import yaml
from jsonschema import validate
from cumulusci.utils.yaml import cumulusci_yml
class TestSchema:
def test_schema_validates(self, cumulusci_test_repo_root):
schemapath = (
cumulusci_test_repo_root / "cumulusci/schema/cumulusci.jsonschema.json"
)
with open(schemapath) as f:
schema = json.load(f)
with open(cumulusci_test_repo_root / "cumulusci.yml") as f:
data = yaml.safe_load(f)
assert validate(data, schema=schema) is None
def test_schema_is_current(self, cumulusci_test_repo_root):
current_schema = cumulusci_yml.CumulusCIRoot.schema()
schemapath = (
cumulusci_test_repo_root / "cumulusci/schema/cumulusci.jsonschema.json"
)
with open(schemapath) as f:
saved_schema = json.load(f)
assert current_schema == saved_schema, (
"The models used to validate cumulusci.yml do not match cumulusci.jsonschema.json. "
"Use `make schema` to update the jsonschema file."
)
| 32.545455
| 96
| 0.666667
| 129
| 1,074
| 5.341085
| 0.333333
| 0.09434
| 0.123367
| 0.152395
| 0.377358
| 0.261248
| 0.261248
| 0.261248
| 0.261248
| 0.261248
| 0
| 0
| 0.257914
| 1,074
| 32
| 97
| 33.5625
| 0.864492
| 0
| 0
| 0.24
| 0
| 0
| 0.211359
| 0.102421
| 0
| 0
| 0
| 0
| 0.08
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
198b7bfde744c3980b7b517559c0d495392afa36
| 7,650
|
py
|
Python
|
luxon/core/networking/sock.py
|
HieronymusCrouse/luxon
|
b0b08c103936adcbb3dd03b1701d44a65de8f61e
|
[
"BSD-3-Clause"
] | 7
|
2018-02-27T00:18:02.000Z
|
2019-05-16T16:57:00.000Z
|
luxon/core/networking/sock.py
|
HieronymusCrouse/luxon
|
b0b08c103936adcbb3dd03b1701d44a65de8f61e
|
[
"BSD-3-Clause"
] | 47
|
2018-01-23T13:49:28.000Z
|
2019-06-06T13:14:59.000Z
|
luxon/core/networking/sock.py
|
HieronymusCrouse/luxon
|
b0b08c103936adcbb3dd03b1701d44a65de8f61e
|
[
"BSD-3-Clause"
] | 14
|
2018-01-15T08:47:11.000Z
|
2019-12-27T12:05:41.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import ssl
import struct
import socket
import select
import pickle
from multiprocessing import Lock
def recv_pickle(sock):
header = sock.read(8)
length = struct.unpack('!Q', header)[0]
payload = sock.read(length)
return pickle.loads(payload)
def send_pickle(sock, data):
payload = pickle.dumps(data, 4)
sock.write(struct.pack('!Q', len(payload)) + payload)
return True
class Socket(object):
def __init__(self, sock, addr=None):
self._sock_lock = Lock()
self._transaction_lock = Lock()
self._sock = sock
self._addr = addr
self._closed = False
def _check_closed(self):
if self._closed:
raise ConnectionError('Connection closed')
@property
def raw_socket(self):
return self._sock
@property
def addr(self):
return self._addr
def setblocking(self, value):
self._check_closed()
try:
self._sock_lock.acquire()
self._sock.setblocking(bool(value))
finally:
self._sock_lock.release()
def settimeout(self, value):
self._check_closed()
try:
self._sock_lock.acquire()
try:
self._sock.setblocking(True)
self._sock.settimeout(float(value))
except ValueError:
self._sock.settimeout(None)
finally:
self._sock_lock.release()
def fileno(self):
self._check_closed()
try:
self._sock_lock.acquire()
return self._sock.fileno()
finally:
self._sock_lock.release()
def read(self, length=2, timeout=None):
self._check_closed()
if timeout and float(timeout) < float(0):
timeout = None
self._sock_lock.acquire()
try:
buf = b''
while True:
data = b''
try:
if timeout is not None and timeout <= 0:
raise socket.timeout('Socket Read Timeout')
# Should be ready to read
try:
select.select([self._sock], [], [], timeout)
except ValueError:
return b''
data = self._sock.recv(length)
if data:
buf += data
length -= len(data)
else:
self._close()
raise ConnectionError('Connection closed')
if length == 0:
return buf
try:
select.select([self._sock], [], [], timeout)
except ValueError:
return b''
if timeout:
timeout -= timeout
except (BlockingIOError, ssl.SSLWantReadError):
# Resource temporarily unavailable (errno EWOULDBLOCK)
try:
select.select([self._sock], [], [], timeout)
except ValueError:
return b''
if timeout:
timeout -= timeout
finally:
self._sock_lock.release()
def write(self, data, timeout=None):
self._check_closed()
totalsent = 0
data_size = len(data)
if timeout and float(timeout) < float(0):
timeout = None
try:
self._sock_lock.acquire()
while True:
try:
while totalsent < data_size:
select.select([], [self._sock], [], timeout)
if timeout is not None and timeout <= 0:
raise socket.timeout('Socket Write Timeout')
sent = self._sock.send(data[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent += sent
return totalsent
except (BlockingIOError, ssl.SSLWantWriteError):
# Resource temporarily unavailable (errno EWOULDBLOCK)
select.select([], [self._sock], [], timeout)
if timeout:
timeout -= timeout
except BrokenPipeError:
self._close()
raise ConnectionError('Connection closed')
finally:
self._sock_lock.release()
def recv(self, max_size=64):
self._check_closed()
try:
self._sock_lock.acquire()
return self._sock.recv(max_size)
finally:
self._sock_lock.release()
def send(self, data):
self._check_closed()
try:
self._sock_lock.acquire()
return self._sock.send(data)
finally:
self._sock_lock.release()
def sendall(self, data):
self._check_closed()
try:
self._sock_lock.acquire()
return self._sock.sendall(data)
finally:
self._sock_lock.release()
def _close(self):
self._closed = True
try:
self._sock.send(b'')
except Exception:
pass
try:
return self._sock.close()
except Exception:
pass
def close(self):
try:
self._sock_lock.acquire()
return self._close()
finally:
try:
self._sock_lock.release()
except ValueError:
pass
def __enter__(self):
self._check_closed()
self._transaction_lock.acquire()
return self
def __exit__(self, exception, value, traceback):
self._transaction_lock.release()
def Pipe():
sock1, sock2 = socket.socketpair()
sock1.setblocking(False)
sock2.setblocking(False)
return (Socket(sock1), Socket(sock2),)
| 33.116883
| 79
| 0.555817
| 794
| 7,650
| 5.215365
| 0.278338
| 0.075344
| 0.055059
| 0.032601
| 0.369717
| 0.305723
| 0.241971
| 0.203091
| 0.203091
| 0.183289
| 0
| 0.005738
| 0.362092
| 7,650
| 230
| 80
| 33.26087
| 0.842828
| 0.218693
| 0
| 0.531429
| 0
| 0
| 0.019852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108571
| false
| 0.017143
| 0.034286
| 0.011429
| 0.245714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
198cce97dd571e2e8a46ea89d848568e280efd25
| 739
|
py
|
Python
|
merge.py
|
Graze-Lab/SDE-of-species
|
c53ca05ff840e722fec3d71b5794057713d221f8
|
[
"MIT"
] | null | null | null |
merge.py
|
Graze-Lab/SDE-of-species
|
c53ca05ff840e722fec3d71b5794057713d221f8
|
[
"MIT"
] | null | null | null |
merge.py
|
Graze-Lab/SDE-of-species
|
c53ca05ff840e722fec3d71b5794057713d221f8
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"A script to perform the linear regression and create the plot."
# Python program to
# demonstrate merging of
# two files
# Creating a list of filenames
filenames = ['A.591-search-immune-dmel-FlyBase_IDs.txt', 'B.432_search-defense-dmel-FlyBase_IDs.txt']
# Open file3 in write mode
with open('C.search-immune-defense-dmel-FlyBase_IDs.txt', 'w') as outfile:
# Iterate through list
for names in filenames:
# Open each file in read mode
with open(names) as infile:
# read the data from file1 and
# file2 and write it in file3
outfile.write(infile.read())
# Add '\n' to enter data of file2
# from next line
outfile.write("\n")
| 26.392857
| 101
| 0.653586
| 110
| 739
| 4.354545
| 0.563636
| 0.068894
| 0.087683
| 0.106472
| 0.100209
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021661
| 0.250338
| 739
| 27
| 102
| 27.37037
| 0.84296
| 0.464141
| 0
| 0
| 0
| 0
| 0.425056
| 0.279642
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1992027ec84925790a7d6fb4bda0cd8820388c0a
| 2,891
|
py
|
Python
|
src/rhc_hqa/rhc_hqa.py
|
Minyus/rhc_hqa
|
2b9f37a8b6ddb9dd36c08764acd2dcf9cbd7c551
|
[
"Apache-2.0"
] | null | null | null |
src/rhc_hqa/rhc_hqa.py
|
Minyus/rhc_hqa
|
2b9f37a8b6ddb9dd36c08764acd2dcf9cbd7c551
|
[
"Apache-2.0"
] | null | null | null |
src/rhc_hqa/rhc_hqa.py
|
Minyus/rhc_hqa
|
2b9f37a8b6ddb9dd36c08764acd2dcf9cbd7c551
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMRegressor
def cv_index_list_to_df(d):
out_dict = {}
for group, index_list in d.items():
for index in index_list:
out_dict[index] = group
df = pd.DataFrame.from_dict(out_dict, orient="index", columns=["split"])
df.index.name = "id"
return df
def estimate_cate(df, parameters):
col_outcome = parameters.get("col_outcome")
col_treatment = parameters.get("col_treatment")
cols_feature = parameters.get("cols_feature")
propensity_lower = parameters.get("propensity_lower")
propensity_upper = parameters.get("propensity_upper")
split_list = df["split"].drop_duplicates().to_list()
split_list = sorted(split_list)
ps_model_dict = {}
tot_model_dict = {}
np.random.seed(42)
for s in split_list:
ps_model = LogisticRegression(solver="liblinear", random_state=42)
tot_model = LGBMRegressor(
min_child_samples=400, importance_type="gain", random_state=42
)
col_propensity = "propensity_score_{}".format(s)
col_trans_outcome = "transformed_outcome_{}".format(s)
col_cate = "cate_{}".format(s)
train_df = df.query("split != @s")
ps_model.fit(train_df[cols_feature], train_df[col_treatment])
df[col_propensity] = ps_model.predict_proba(df[cols_feature])[:, 1]
df[col_propensity].clip(
inplace=True, lower=propensity_lower, upper=propensity_upper
)
df[col_trans_outcome] = (
df[col_outcome]
* (df[col_treatment] - df[col_propensity])
/ (df[col_propensity] * (1 - df[col_propensity]))
)
train_df = df.query("split != @s")
tot_model.fit(train_df[cols_feature], train_df[col_trans_outcome])
df[col_cate] = tot_model.predict(df[cols_feature])
col_cate_if_seps_1 = "cate_if_seps_1_{}".format(s)
col_cate_if_seps_0 = "cate_if_seps_0_{}".format(s)
col_cate_diff_seps = "cate_diff_seps_{}".format(s)
seps_1_df = df[cols_feature].copy()
seps_1_df["seps_1"] = 1.0
df[col_cate_if_seps_1] = tot_model.predict(seps_1_df)
seps_0_df = df[cols_feature].copy()
seps_0_df["seps_1"] = 0.0
df[col_cate_if_seps_0] = tot_model.predict(seps_0_df)
df[col_cate_diff_seps] = df[col_cate_if_seps_1] - df[col_cate_if_seps_0]
imp_df = pd.DataFrame(
{
"feature": cols_feature,
"propensity_model_coef": np.squeeze(ps_model.coef_),
"cate_model_importances": tot_model.feature_importances_,
}
)
ps_model_dict[s] = ps_model
tot_model_dict[s] = tot_model
model_dict = dict(ps=ps_model_dict, tot=tot_model_dict)
return df, imp_df, model_dict
| 32.852273
| 80
| 0.649256
| 400
| 2,891
| 4.29
| 0.215
| 0.04662
| 0.04662
| 0.045455
| 0.200466
| 0.184149
| 0.041958
| 0.041958
| 0.041958
| 0
| 0
| 0.014034
| 0.235905
| 2,891
| 87
| 81
| 33.229885
| 0.762789
| 0
| 0
| 0.030303
| 0
| 0
| 0.097198
| 0.022484
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.090909
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1992310a23c16a6b24ee79181f9ba76bf057b0f3
| 2,301
|
py
|
Python
|
257.py
|
wilbertgeng/LeetCode_exercise
|
f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc
|
[
"MIT"
] | null | null | null |
257.py
|
wilbertgeng/LeetCode_exercise
|
f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc
|
[
"MIT"
] | null | null | null |
257.py
|
wilbertgeng/LeetCode_exercise
|
f00c08e0d28ffa88d61d4262c6d1f49f1fa91ebc
|
[
"MIT"
] | null | null | null |
"""257. Binary Tree Paths"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
### Practice
if not root:
return []
self.res = []
self.dfs(root, str(root.val))
return self.res
def dfs(self, node, path):
if not node.left and not node.right:
self.res.append(path)
return
if node.left:
self.dfs(node.left, path+"->"+str(node.left.val))
if node.right:
self.dfs(node.right, path+"->"+str(node.right.val))
############
if not root:
return []
res = []
self.dfs_backtrack(root, "", res)
return res
def dfs_backtrack(self, node, path, res):
if not node:
return None
if not node.left and not node.right:
res.append(path + str(node.val))
self.dfs_backtrack(node.left, path + str(node.val) + "->", res)
self.dfs_backtrack(node.right, path + str(node.val) + "->", res)
# dfs + stack
def binaryTreePaths1(self, root):
if not root:
return []
res, stack = [], [(root, "")]
while stack:
node, ls = stack.pop()
if not node.left and not node.right:
res.append(ls+str(node.val))
if node.right:
stack.append((node.right, ls+str(node.val)+"->"))
if node.left:
stack.append((node.left, ls+str(node.val)+"->"))
return res
# bfs + queue
def binaryTreePaths2(self, root):
if not root:
return []
res, queue = [], collections.deque([(root, "")])
while queue:
node, ls = queue.popleft()
if not node.left and not node.right:
res.append(ls+str(node.val))
if node.left:
queue.append((node.left, ls+str(node.val)+"->"))
if node.right:
queue.append((node.right, ls+str(node.val)+"->"))
return res
| 29.883117
| 72
| 0.49761
| 276
| 2,301
| 4.119565
| 0.17029
| 0.084433
| 0.079156
| 0.063325
| 0.430079
| 0.335092
| 0.319261
| 0.153914
| 0.129288
| 0.129288
| 0
| 0.004051
| 0.356367
| 2,301
| 76
| 73
| 30.276316
| 0.763673
| 0.12299
| 0
| 0.433962
| 0
| 0
| 0.008167
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0
| 0
| 0.301887
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1993e0484cd76cbe8ee03395dd60ee7a00db241f
| 2,179
|
py
|
Python
|
crm/forms.py
|
zhangyafeii/CRM
|
4d93fb276c7210676590da48b18d8e72cc202ef0
|
[
"MIT"
] | null | null | null |
crm/forms.py
|
zhangyafeii/CRM
|
4d93fb276c7210676590da48b18d8e72cc202ef0
|
[
"MIT"
] | null | null | null |
crm/forms.py
|
zhangyafeii/CRM
|
4d93fb276c7210676590da48b18d8e72cc202ef0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Datetime: 2018/10/23
@Author: Zhang Yafei
"""
from django.forms import ModelForm, forms
from crm import models
class EnrollmentForm(ModelForm):
def __new__(cls, *args, **kwargs):
# print('__new__',cls,args,kwargs)
for field_name in cls.base_fields:
file_obj = cls.base_fields[field_name]
file_obj.widget.attrs.update({'class': 'form-control'})
if field_name in cls.Meta.readonly_fields:
file_obj.widget.attrs.update({'disabled':True})
return ModelForm.__new__(cls)
def clean(self):
if not self.cleaned_data['contract_approved']:
self.add_error('contract_approved','请勾选单选框')
class Meta:
model = models.StudentEnrollment
# fields = ['name','consultant','status']
fields = '__all__'
exclude = ['contract_approved_date']
readonly_fields = []
# readonly_fields = ['contract_agreed']
class CustomerForm(ModelForm):
def __new__(cls, *args, **kwargs):
# print('__new__',cls,args,kwargs)
for field_name in cls.base_fields:
file_obj = cls.base_fields[field_name]
file_obj.widget.attrs.update({'class': 'form-control'})
if field_name in cls.Meta.readonly_fields:
file_obj.widget.attrs.update({'disabled':True})
return ModelForm.__new__(cls)
def clean(self):
if self.errors:
raise forms.ValidationError(('Please fix errors before re-submit'))
if self.instance.id is not None:
for field in self.Meta.readonly_fields:
old_field_val = getattr(self.instance,field)
form_val = self.cleaned_data[field]
if old_field_val != form_val:
self.add_error(field,'Readonly Field:field should be "{}",not "{}"'.format(old_field_val,form_val))
class Meta:
model = models.CustomerInfo
# fields = ['name','consultant','status']
fields = '__all__'
exclude = ['consult_content','status','consult_course']
readonly_fields = ['contact_type','contact','consultant','referral_from',]
| 35.145161
| 119
| 0.620009
| 254
| 2,179
| 5.019685
| 0.346457
| 0.028235
| 0.031373
| 0.050196
| 0.486275
| 0.458039
| 0.458039
| 0.392157
| 0.392157
| 0.392157
| 0
| 0.005535
| 0.253786
| 2,179
| 61
| 120
| 35.721311
| 0.778598
| 0.114273
| 0
| 0.5
| 0
| 0
| 0.146583
| 0.011476
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
199526a23444f21fe4ced48163607211a83fd853
| 3,911
|
py
|
Python
|
api/user_api.py
|
HuQi2018/BiSheServer
|
66fd77865e131f0a06313562b5d127e530128944
|
[
"Apache-2.0"
] | 44
|
2021-06-03T04:01:30.000Z
|
2022-03-31T15:46:00.000Z
|
api/user_api.py
|
HuQi2018/BiSheServer
|
66fd77865e131f0a06313562b5d127e530128944
|
[
"Apache-2.0"
] | 1
|
2022-02-21T05:40:01.000Z
|
2022-03-17T10:50:51.000Z
|
api/user_api.py
|
HuQi2018/BiSheServer
|
66fd77865e131f0a06313562b5d127e530128944
|
[
"Apache-2.0"
] | 8
|
2021-06-05T17:13:35.000Z
|
2022-03-24T05:04:30.000Z
|
import uuid
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.db.models import Q
from BiSheServer import settings
from api.model_json import queryset_to_json
from user.models import UsersPerfer, UsersDetail, UserTag
# 用户api
class User:
def __init__(self):
pass
# 获取用户喜欢的电影类型
@staticmethod
def getUserPreferTag(user_id):
user_prefer_tag_rs = UsersDetail.objects.filter(user_id_id=user_id).values_list("user_prefer", flat=True)[0]
if not user_prefer_tag_rs:
return ""
return user_prefer_tag_rs
# 获取用户的兴趣爱好
@staticmethod
def getUserHobbiesTag(user_id):
user_obbies_tag_rs = UsersDetail.objects.filter(user_id_id=user_id).first().values("user_hobbies")
if not user_obbies_tag_rs:
return ""
return user_obbies_tag_rs
# 获取所有的爱好标签
@staticmethod
def getHobbiesTag():
hobbies_tag_rs = UsersPerfer.objects.all()
return hobbies_tag_rs
def getHobbiesTagJson(self):
hobbies_tag_rs = queryset_to_json(self.getHobbiesTag().all())
return hobbies_tag_rs
# 添加用户的标签
@staticmethod
def add_user_tag(user_id, tag_type, tag_name, tag_weight):
# 除评论和评分标签外,所有标签初始化创建时初始设置为5
if tag_type != "rating_movie_id" and tag_type != "comment_movie_id" and tag_type != "info_movie_tag":
UserTag.objects.create(user_id=user_id, tag_type=tag_type, tag_name=tag_name, tag_weight=5)
else:
if tag_type == "info_movie_tag": # 电影标签默认一律为2
tag_weight = 2
UserTag.objects.create(user_id=user_id, tag_type=tag_type, tag_name=tag_name, tag_weight=tag_weight)
# 修改用户的标签权重
def modify_user_tag(self, user_id, tag_type, tag_name, tag_weight):
user_tag = UserTag.objects.filter(Q(user_id=user_id) & Q(tag_type=tag_type) & Q(tag_name=tag_name))
if user_tag.exists(): # 存在该标签则进行修改
if type(tag_weight) == str: # 判断其为修改标签权值,如果为数字则直接对其进行赋值
old_tag_weight = int(user_tag.first().tag_weight)
try:
tag_weight = int(tag_weight)
except Exception as ex:
print("非法权值!" + ex.__str__())
return ""
if old_tag_weight != 0: # 修改标签权值
tag_weight = old_tag_weight + tag_weight
else: # 第二次添加标签
tag_weight = 5 + tag_weight
user_tag.update(tag_weight=str(tag_weight))
else:
self.add_user_tag(user_id, tag_type, tag_name, tag_weight)
# 检查用户是否登录
@staticmethod
def isNotLogin(request):
try:
if not request.session['is_login'] or not request.session['user_id']:
raise Exception
except:
return True
# 用户头像上传
@staticmethod
def userImageUpload(user_img):
rs = []
imgName = uuid.uuid4().hex
img_size = user_img.size
img_name = user_img.name
img_ext = '.' in img_name and img_name.rsplit('.', 1)[-1]
# print(img_size)
# 判断文件后缀是否在列表中
def allowed_file(img_ext):
return img_ext in settings.default['allow_extensions']
if user_img:
if not allowed_file(img_ext):
rs = [False,"非图片类型上传!"]
# return JsonError("非图片类型上传!")
elif img_size > int(settings.default['allow_maxsize']):
rs = [False, "图片大小超过5MB,上传失败!"]
# return JsonError("图片大小超过5MB,上传失败!")
else:
img_path = default_storage.save(settings.default['avatars_upload_folder'] + imgName + "." + img_ext,
ContentFile(user_img.read())) # 保存文件
# request.session['user_img'] = img_path
rs = [True, img_path]
return rs
| 35.554545
| 116
| 0.611864
| 477
| 3,911
| 4.698113
| 0.272537
| 0.080321
| 0.035698
| 0.029005
| 0.21419
| 0.147702
| 0.147702
| 0.147702
| 0.134761
| 0.134761
| 0
| 0.004367
| 0.297366
| 3,911
| 109
| 117
| 35.880734
| 0.811135
| 0.076451
| 0
| 0.2125
| 0
| 0
| 0.049582
| 0.00585
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.0125
| 0.0875
| 0.0125
| 0.35
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
199538a4a4f978d0b56aadb122776630718bac24
| 1,777
|
py
|
Python
|
Diretide/lib.py
|
sleibrock/dotacli
|
e361fbcf787f13232fcf8994b839d4c9f08bc67a
|
[
"MIT"
] | null | null | null |
Diretide/lib.py
|
sleibrock/dotacli
|
e361fbcf787f13232fcf8994b839d4c9f08bc67a
|
[
"MIT"
] | null | null | null |
Diretide/lib.py
|
sleibrock/dotacli
|
e361fbcf787f13232fcf8994b839d4c9f08bc67a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
lib.py
"""
from string import printable as printable_chrs
from collections import namedtuple
from requests import get as re_get
from datetime import datetime
# Define constants
API_URL = "http://dailydota2.com/match-api"
Match = namedtuple("Match", ["timediff", "series_type", "link", "starttime",
"status", "starttime_unix", "comment", "viewers",
"team1", "team2", "league"])
def get_url(url):
return re_get(url).json()
def get_longest(matches):
return max([max(len(x.team1["team_name"]),len(x.team2["team_name"]))
for x in matches])
def print_match(m, longest):
"Do all the match info here"
print("=== {} (best of {}) ===".format(m.league["name"], m.series_type))
print("{[team_name]:<{width}} vs. {[team_name]:>{width}}".format(
m.team1, m.team2, width=longest))
print(display_time(m))
print()
def display_time(m):
"""Convert unix time to readable"""
x = int(m.timediff)
if x <= 0:
return "***Currently Running***"
return "Time until: {}".format(
datetime.fromtimestamp(x).strftime("%H:%M:%S"))
def main(*args, **kwargs):
"""
Main function
Retrieves, forms data, and prints out information
"""
print()
# First retrieve the JSON
data = get_url(API_URL)
# Interpret results into a list of matches
matches = [Match(**unpack) for unpack in data["matches"]]
# Find the longest team name and create dynamic alignment
longest = get_longest(matches)
# Print out a list of all matches (possibly order by the Unix timestamp)
for match in matches:
print_match(match, longest)
if __name__ == "__main__":
main()
# end
| 26.924242
| 78
| 0.621835
| 234
| 1,777
| 4.602564
| 0.461538
| 0.03714
| 0.031569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00656
| 0.227912
| 1,777
| 65
| 79
| 27.338462
| 0.778426
| 0.214969
| 0
| 0.057143
| 0
| 0
| 0.215474
| 0.031815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.114286
| 0.057143
| 0.371429
| 0.228571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1996ff9cbd376245bbd4ada8ffdff6a62803fa08
| 417
|
py
|
Python
|
hash-tool/vv_hash.py
|
amjunliang/virtualview_tools
|
7fcf62134b0a23a71bb8f4b54a6aac19cf858ef6
|
[
"MIT"
] | 186
|
2017-12-06T09:17:07.000Z
|
2022-01-10T04:04:09.000Z
|
hash-tool/vv_hash.py
|
amjunliang/virtualview_tools
|
7fcf62134b0a23a71bb8f4b54a6aac19cf858ef6
|
[
"MIT"
] | 16
|
2017-12-18T04:15:57.000Z
|
2021-04-15T06:50:37.000Z
|
hash-tool/vv_hash.py
|
amjunliang/virtualview_tools
|
7fcf62134b0a23a71bb8f4b54a6aac19cf858ef6
|
[
"MIT"
] | 47
|
2018-01-12T06:23:26.000Z
|
2022-02-22T05:56:59.000Z
|
import sys
if len(sys.argv) <= 1:
print("python vv_hash.py property_name")
exit(0)
propertyName = sys.argv[1]
if len(propertyName) == 0:
print("empty element name")
exit(0)
hashCode = 0
for i in range(0, len(propertyName)):
hashCode = (31 * hashCode + ord(propertyName[i])) & 0xFFFFFFFF
if hashCode > 0x7FFFFFFF:
hashCode = hashCode - 0x100000000
print("hash code: %d" % (hashCode))
| 24.529412
| 66
| 0.654676
| 57
| 417
| 4.754386
| 0.526316
| 0.0369
| 0.059041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066465
| 0.206235
| 417
| 17
| 67
| 24.529412
| 0.752266
| 0
| 0
| 0.142857
| 0
| 0
| 0.148325
| 0
| 0
| 0
| 0.074163
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
199c463aaa4e8ff9bc128e9fe6eb09901f8c3ffb
| 2,354
|
py
|
Python
|
API/AdminPanel/smoke_all_pages/credentials.py
|
Mikhail-QA/HS
|
8ddbc09a0d1493128f3af6b8078c295609908dd7
|
[
"Apache-2.0"
] | null | null | null |
API/AdminPanel/smoke_all_pages/credentials.py
|
Mikhail-QA/HS
|
8ddbc09a0d1493128f3af6b8078c295609908dd7
|
[
"Apache-2.0"
] | null | null | null |
API/AdminPanel/smoke_all_pages/credentials.py
|
Mikhail-QA/HS
|
8ddbc09a0d1493128f3af6b8078c295609908dd7
|
[
"Apache-2.0"
] | null | null | null |
# Админка раздел редактор курсов
# Энпоинты меню редактора курсов ДШ в тек. уг
path_admin_schedules_grade_1 = '/schedules?grade=1&school=true&'
path_admin_schedules_grade_2 = '/schedules?grade=2&school=true&'
path_admin_schedules_grade_3 = '/schedules?grade=3&school=true&'
path_admin_schedules_grade_4 = '/schedules?grade=4&school=true&'
path_admin_schedules_grade_5 = '/schedules?grade=5&school=true&'
path_admin_schedules_grade_6 = '/schedules?grade=6&school=true&'
path_admin_schedules_grade_7 = '/schedules?grade=7&school=true&'
path_admin_schedules_grade_8 = '/schedules?grade=8&school=true&'
path_admin_schedules_grade_9 = '/schedules?grade=9&school=true&'
path_admin_schedules_grade_10 = '/schedules?grade=10&school=true&'
path_admin_schedules_grade_11 = '/schedules?grade=11&school=true&'
# Прикрепление\удаление предмета
path_admin_add_subject = '/schedules?'
path_admin_delete_subject = '/schedules/5583026?'
# Раздел редактирования предмета
path_admin_item_editor = '/schedule_items.json?schedule_id=3908531&' # переход в редактор предмета
path_admin_add_topic = '/topics?' # добавить тему
path_admin_add_lesson = 'lessons.json?' # Создание нового урока
path_admin_lesson_for_day = '/schedule_items.json?' # привязка урока к дате
path_admin_remove_lesson = '/lessons/37865.json?' # удаление урока
path_admin_remove_topic = '/topics/24273?addLessonHide=true&addLessonNameEvent=click&calendarActive=false&editTopicNameHide=true&lessonsHide=false&name=тест&schedule_id=3908531&subject_id=201&'
path_admin_save_date_ege = '/schedules/3908531?' # сохранение даты ЕГЭ
# редактор МДЗ
path_admin_monthly_homework_editor = '/monthly_homeworks?schedule_id=3908531&' # открытие редактора МДЗ
path_admin_create_monthly_homework = '/monthly_homeworks?' # создание МДЗ
path_admin_delete_monthly_homework = '/monthly_homeworks/7229?' # удаление МДЗ
# Энпоинты редактора курсов ЕГЭ
path_admin_editor_ege = '/schedules?grade=11&school=false&' # переход в редактор егэ
path_admin_attach_subject_ege = '/schedules?' # прикрепление предмета егэ
path_admin_delete_subject_ege = '/schedules/5583707?' # удаление предмета егэ
path_admin_add_topic = '/topics?' # добавить тему
def __init__(self, token=None):
self.token = token
def get_token(self):
headers_user = {
"Authorization": self.access_token,
}
return headers_user
| 52.311111
| 193
| 0.800765
| 322
| 2,354
| 5.493789
| 0.301242
| 0.137366
| 0.111928
| 0.143019
| 0.226116
| 0.226116
| 0.03957
| 0
| 0
| 0
| 0
| 0.04073
| 0.092608
| 2,354
| 44
| 194
| 53.5
| 0.787453
| 0.184792
| 0
| 0.058824
| 0
| 0.029412
| 0.434966
| 0.350711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
199d8a7e9157f48b6df5a88dec9570242bb4b50a
| 963
|
py
|
Python
|
2021/aoc07.py
|
wolfgangmuender/AdventOfCode
|
3956c245f6d3e789cb5424ac54832ebcfc03eb21
|
[
"Apache-2.0"
] | null | null | null |
2021/aoc07.py
|
wolfgangmuender/AdventOfCode
|
3956c245f6d3e789cb5424ac54832ebcfc03eb21
|
[
"Apache-2.0"
] | null | null | null |
2021/aoc07.py
|
wolfgangmuender/AdventOfCode
|
3956c245f6d3e789cb5424ac54832ebcfc03eb21
|
[
"Apache-2.0"
] | null | null | null |
with open("input/input07.txt") as f:
content = f.read().splitlines()
positions = list(map(lambda x: int(x), content[0].split(",")))
max_pos = max(positions)
min_pos = min(positions)
possible_pos = range(min_pos, max_pos + 1)
fuel_costs1 = []
for align_pos in possible_pos:
fuel_costs1.append(sum([abs(pos - align_pos) for pos in positions]))
final_fuel_cost1 = min(fuel_costs1)
align_position1 = possible_pos[fuel_costs1.index(final_fuel_cost1)]
print("Solution 1: the crabs need to spend {} fuel to align at position {}".format(final_fuel_cost1, align_position1))
fuel_costs2 = []
for align_pos in possible_pos:
fuel_costs2.append(sum([int(abs(pos - align_pos) * (abs(pos - align_pos) + 1) / 2) for pos in positions]))
final_fuel_cost2 = min(fuel_costs2)
align_position2 = possible_pos[fuel_costs2.index(final_fuel_cost2)]
print("Solution 2: the crabs need to spend {} fuel to align at position {}".format(final_fuel_cost2, align_position2))
| 35.666667
| 118
| 0.74351
| 154
| 963
| 4.402597
| 0.324675
| 0.079646
| 0.088496
| 0.061947
| 0.321534
| 0.321534
| 0.244838
| 0.162242
| 0.162242
| 0.162242
| 0
| 0.030952
| 0.127726
| 963
| 26
| 119
| 37.038462
| 0.77619
| 0
| 0
| 0.111111
| 0
| 0
| 0.15784
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
199dc81e3c59d04bf93fe983d5b45a4485997a89
| 2,227
|
py
|
Python
|
sources/install_deps.py
|
Radeon4650/PocketBudgetTracker
|
61b792f648e73e8043e42e0e1c55b3acf1868f90
|
[
"Apache-2.0"
] | 1
|
2018-12-03T12:41:50.000Z
|
2018-12-03T12:41:50.000Z
|
sources/install_deps.py
|
Radeon4650/PocketBudgetTracker
|
61b792f648e73e8043e42e0e1c55b3acf1868f90
|
[
"Apache-2.0"
] | 16
|
2018-10-08T17:37:52.000Z
|
2019-01-06T12:02:54.000Z
|
sources/install_deps.py
|
Radeon4650/PocketBudgetTracker
|
61b792f648e73e8043e42e0e1c55b3acf1868f90
|
[
"Apache-2.0"
] | 1
|
2018-10-11T20:01:05.000Z
|
2018-10-11T20:01:05.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright © 2018 PocketBudgetTracker. All rights reserved.
Author: Andrey Shelest (khadsl1305@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import os.path
import subprocess
import sys
modules_dir = os.path.dirname(__file__)
req_file_name = 'requirements.txt'
class Installer:
python_path = os.path.dirname(sys.executable)
pip_path = os.path.join(python_path, 'pip')
def __init__(self, upgrade=False):
print('Python location prefix is {}'.format(self.python_path))
self.upgrade = upgrade
def _pip_install(self, install_args=None):
args = [self.pip_path, 'install']
if install_args is not None:
args += install_args
if self.upgrade:
args.append('--upgrade')
if subprocess.call(args):
print("ERROR: installing of [{}] failed".format(' '.join(args)))
def install(self, name):
print("Installing [%s]" % name)
self._pip_install([name])
def install_modules_deps(self):
print('Search for submodules into {} dir'.format(modules_dir))
modules = next(os.walk(modules_dir))[1]
print('Found modules {}'.format(modules))
for module in modules:
dep_file = os.path.join(modules_dir, module, req_file_name)
if os.path.isfile(dep_file):
self._pip_install(['--requirement', dep_file])
else:
print('Module does not have {} file'.format(req_file_name))
if __name__ == '__main__':
installer = Installer(True)
# Upgrade pip
installer.install('pip')
# Install all modules dependencies
installer.install_modules_deps()
sys.exit(0)
| 28.922078
| 76
| 0.671756
| 297
| 2,227
| 4.882155
| 0.451178
| 0.041379
| 0.022759
| 0.022069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008646
| 0.220925
| 2,227
| 76
| 77
| 29.302632
| 0.826513
| 0.322407
| 0
| 0
| 0
| 0
| 0.141522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.289474
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
199fc92154f499c85aed3bce44e91934109ae7d3
| 5,305
|
py
|
Python
|
tools/utils/building/layer.py
|
shinh/dldt
|
693ab4e79a428e0801f17f4511b129a3fa8f4a62
|
[
"Apache-2.0"
] | 1
|
2021-02-20T21:48:36.000Z
|
2021-02-20T21:48:36.000Z
|
tools/utils/building/layer.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | null | null | null |
tools/utils/building/layer.py
|
erinpark33/dldt
|
edd86d090592f7779f4dbb2681546e1f4e81284f
|
[
"Apache-2.0"
] | 1
|
2021-02-19T01:06:12.000Z
|
2021-02-19T01:06:12.000Z
|
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..biases import Biases
from ..weights import Weights
class Layer:
TEMPLATE = (
'<layer name="{name}" type="{type}" precision="FP32" id="{id}">'
'{data}'
'{input}'
'{output}'
'{weights}'
'{biases}'
'</layer>')
def __init__(
self, id: int,
type: str,
name: str,
params: dict,
input_dims: list,
output_dims: list,
weights: Weights = None,
biases: Biases = None):
self._id = id
self._type = type
self._name = name
self._params = params
self._input_dims = input_dims
self._output_dims = output_dims
self._weights = weights
self._biases = biases
@property
def id(self) -> str:
return self._id
@property
def type(self) -> str:
return self._type
@property
def name(self) -> str:
return self._name
@property
def params(self) -> dict:
return self._params
@property
def input_dims(self) -> list:
return self._input_dims
@property
def output_dims(self) -> list:
return self._output_dims
@property
def weights(self) -> Weights:
return self._weights
@property
def biases(self) -> Biases:
return self._biases
def _output_dims_to_xml(self) -> str:
if self._output_dims:
if len(self._output_dims) == 2:
output_xml = (
'<output>'
'<port id="1">'
'<dim>{}</dim>'
'<dim>{}</dim>'
'</port>'
'</output>').format(self._output_dims[0], self._output_dims[1])
elif len(self._output_dims) == 4:
output_xml = (
'<output>'
'<port id="1">'
'<dim>{}</dim>'
'<dim>{}</dim>'
'<dim>{}</dim>'
'<dim>{}</dim>'
'</port>'
'</output>').format(self._output_dims[0], self._output_dims[1], self._output_dims[2], self._output_dims[3])
else:
raise NotImplementedError("{} dimensions for outputs (layer name '{}', type '{}') are not supported".format(
len(self._output_dims),
self._name,
self._type))
else:
output_xml = None
return output_xml
def _input_dims_to_xml(self) -> str:
if self._input_dims:
if len(self._input_dims) == 2:
input_xml = (
'<input>'
'<port id="0">'
'<dim>{}</dim>'
'<dim>{}</dim>'
'</port>'
'</input>').format(self._input_dims[0], self._input_dims[1])
elif len(self._input_dims) == 4:
input_xml = (
'<input>'
'<port id="0">'
'<dim>{}</dim>'
'<dim>{}</dim>'
'<dim>{}</dim>'
'<dim>{}</dim>'
'</port>'
'</input>').format(self._input_dims[0], self._input_dims[1], self._input_dims[2], self._input_dims[3])
else:
raise NotImplementedError("{} dimensions for inputs (layer name '{}', type '{}') are not supported".format(
len(self._input_dims),
self._name,
self._type))
else:
input_xml = None
return input_xml
def __str__(self) -> str:
if self._params:
data_xml = "<data "
for param_key in self._params.keys():
data_xml += '{}="{}" '.format(param_key, self._params[param_key])
data_xml += " />"
else:
data_xml = None
return self.TEMPLATE.format(
name=self._name,
type=self._type,
id=self._id,
data=(data_xml if data_xml else ''),
input=(self._input_dims_to_xml() if self._input_dims else ''),
output=(self._output_dims_to_xml() if self._output_dims else ''),
weights=('<weights offset="{offset}" size="{size}"/>'.format(offset=self._weights.offset, size=self._weights.size) if self._weights else ''),
biases=('<biases offset="{offset}" size="{size}"/>'.format(offset=self._biases.offset, size=self._biases.size) if self._biases else '')
)
| 33.575949
| 153
| 0.489915
| 559
| 5,305
| 4.422182
| 0.205725
| 0.048544
| 0.058252
| 0.058252
| 0.31068
| 0.271845
| 0.252427
| 0.168285
| 0.168285
| 0.135113
| 0
| 0.01036
| 0.381338
| 5,305
| 157
| 154
| 33.789809
| 0.74284
| 0.107069
| 0
| 0.357143
| 0
| 0
| 0.137661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.015873
| 0.063492
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19a02ddbcaad9d7bf3192c753f7e060611b72ae8
| 19,686
|
py
|
Python
|
tests/test_lightningStub.py
|
engenegr/lnd_grpc
|
89336bc83fcfa286dd7927e92fbc88293bd29547
|
[
"MIT"
] | null | null | null |
tests/test_lightningStub.py
|
engenegr/lnd_grpc
|
89336bc83fcfa286dd7927e92fbc88293bd29547
|
[
"MIT"
] | null | null | null |
tests/test_lightningStub.py
|
engenegr/lnd_grpc
|
89336bc83fcfa286dd7927e92fbc88293bd29547
|
[
"MIT"
] | null | null | null |
import os
import time
import unittest
import bitcoin.rpc
import grpc
import lnd_grpc.lnd_grpc as py_rpc
import lnd_grpc.protos.rpc_pb2 as rpc_pb2
# from google.protobuf.internal.containers import RepeatedCompositeFieldContainer
#######################
# Configure variables #
#######################
CWD = os.getcwd()
ALICE_LND_DIR = '/Users/will/regtest/.lnd/'
ALICE_NETWORK = 'regtest'
ALICE_RPC_HOST = '127.0.0.1'
ALICE_RPC_PORT = '10009'
ALICE_MACAROON_PATH = '/Users/will/regtest/.lnd/data/chain/bitcoin/regtest/admin.macaroon'
ALICE_PEER_PORT = '9735'
ALICE_HOST_ADDR = ALICE_RPC_HOST + ':' + ALICE_PEER_PORT
BOB_LND_DIR = '/Users/will/regtest/.lnd2/'
BOB_NETWORK = 'regtest'
BOB_RPC_HOST = '127.0.0.1'
BOB_RPC_PORT = '11009'
BOB_MACAROON_PATH = '/Users/will/regtest/.lnd2/data/chain/bitcoin/regtest/admin.macaroon'
BOB_PEER_PORT = '9734'
BOB_HOST_ADDR = BOB_RPC_HOST + ':' + BOB_PEER_PORT
BITCOIN_SERVICE_PORT = 18443
BITCOIN_CONF_FILE = '/Users/will/regtest/.bitcoin/bitcoin.conf'
BITCOIN_ADDR = None
DEBUG_LEVEL = 'error'
SLEEP_TIME = 0.5
def initialise_clients():
global BITCOIN_ADDR
alice = py_rpc.Client(lnd_dir=ALICE_LND_DIR,
network=ALICE_NETWORK,
grpc_host=ALICE_RPC_HOST,
grpc_port=ALICE_RPC_PORT,
macaroon_path=ALICE_MACAROON_PATH)
alice.pub_key = alice.get_info().identity_pubkey
alice.lightning_addr = py_rpc.Client.lightning_address(
pubkey=alice.pub_key,
host=ALICE_HOST_ADDR)
bob = py_rpc.Client(lnd_dir=BOB_LND_DIR,
network=BOB_NETWORK,
grpc_host=BOB_RPC_HOST,
grpc_port=BOB_RPC_PORT,
macaroon_path=BOB_MACAROON_PATH)
bob.pub_key = bob.get_info().identity_pubkey
bob.lightning_addr = py_rpc.Client.lightning_address(
pubkey=bob.pub_key,
host=BOB_HOST_ADDR)
bitcoin_rpc = bitcoin.rpc.RawProxy(service_port=BITCOIN_SERVICE_PORT,
btc_conf_file=BITCOIN_CONF_FILE)
BITCOIN_ADDR = bitcoin_rpc.getnewaddress()
return alice, bob, bitcoin_rpc
def ensure_peer_connected(alice, bob):
if len(alice.list_peers()) == 0:
alice.connect_peer(addr=bob.lightning_addr)
assert (len(alice.list_peers()) > 0)
def ensure_channel_open(alice, bob, bitcoin_rpc, address):
if len(alice.list_channels()) == 0:
alice.open_channel_sync(local_funding_amount=1_000_000,
push_sat=500_000,
node_pubkey_string=bob.pub_key)
bitcoin_rpc.generatetoaddress(3, address)
time.sleep(SLEEP_TIME)
assert (len(alice.list_channels()) > 0)
def disconnect_all_peers(alice):
for peer in alice.list_peers():
alice.disconnect_peer(pub_key=peer.pub_key)
time.sleep(SLEEP_TIME)
assert (0 == len(alice.list_peers()))
def close_all_channels(peer):
if len(peer.list_channels()) > 0:
peer.close_all_channels()
time.sleep(SLEEP_TIME)
assert (0 == len(peer.list_channels()))
##################
# Test framework #
##################i
class TestLightningStubResponses(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.alice, cls.bob, cls.bitcoin_rpc = initialise_clients()
def test_aaa_assert_variables(self):
self.assertIsInstance(self.alice, py_rpc.Client)
self.assertIsInstance(self.alice.channel, grpc._channel.Channel)
self.assertIsInstance(self.bob, py_rpc.Client)
self.assertIsInstance(self.bitcoin_rpc, bitcoin.rpc.RawProxy)
def test_wallet_balance(self):
self.assertIsInstance(self.alice.wallet_balance(), rpc_pb2.WalletBalanceResponse)
# lambda function prevents TypeError being raised before assert is run.
self.assertRaises(TypeError, lambda: self.alice.wallet_balance('please'))
def test_channel_balance(self):
self.assertIsInstance(self.alice.channel_balance(), rpc_pb2.ChannelBalanceResponse)
self.assertRaises(TypeError, lambda: self.alice.channel_balance('please'))
def test_get_transactions(self):
self.assertIsInstance(self.alice.get_transactions(), rpc_pb2.TransactionDetails)
self.assertRaises(TypeError, lambda: self.alice.get_transactions('please'))
def test_send_coins(self):
response1 = self.alice.send_coins(self.alice.new_address(address_type='p2wkh').address,
amount=100000)
response2 = self.alice.send_coins(self.alice.new_address(address_type='np2wkh').address,
amount=100000)
self.assertIsInstance(response1, rpc_pb2.SendCoinsResponse)
self.assertIsInstance(response2, rpc_pb2.SendCoinsResponse)
# negative send
self.assertRaises(grpc.RpcError,
lambda: self.alice.send_coins(
self.alice.new_address(address_type='p2wkh').address,
amount=100000 * -1))
# impossibly large send
self.assertRaises(grpc.RpcError,
lambda: self.alice.send_coins(
self.alice.new_address(address_type='p2wkh').address,
amount=1000000000000000))
def test_list_unspent(self):
self.assertIsInstance(self.alice.list_unspent(0, 1000), rpc_pb2.ListUnspentResponse)
def subscribe_transactions(self):
self.assertIsInstance(self.alice.subscribe_transactions(), rpc_pb2.Transaction)
def send_many(self):
pass
def test_new_address(self):
self.address_p2wkh = self.alice.new_address(address_type='p2wkh')
self.address_np2wkh = self.alice.new_address(address_type='np2wkh')
self.assertIsInstance(self.address_p2wkh, rpc_pb2.NewAddressResponse)
self.assertIsInstance(self.address_np2wkh, rpc_pb2.NewAddressResponse)
self.assertRaises(TypeError, self.alice.new_address(address_type='segwit'))
def test_sign_message(self):
self.assertIsInstance(self.alice.sign_message(msg='test message content'),
rpc_pb2.SignMessageResponse)
self.assertRaises(AttributeError,
lambda: self.alice.sign_message(msg=b'bytes message'))
def test_verify_message(self):
message = 'test message content'
message_sig_true = self.alice.sign_message(msg=message).signature
message_sig_false = message_sig_true + '1'
self.assertTrue(self.alice.verify_message(msg=message, signature=message_sig_true).valid)
self.assertFalse(self.alice.verify_message(msg=message, signature=message_sig_false).valid)
def test_disconnect_peer(self):
bitcoin_address = self.bitcoin_rpc.getnewaddress()
# make sure we have a peer
ensure_peer_connected(self.alice, self.bob)
# make sure all channels closed otherwise disconnect will fail
close_all_channels(self.alice)
self.bitcoin_rpc.generatetoaddress(3, bitcoin_address)
time.sleep(SLEEP_TIME)
# now disconnect all peers
for peer in self.alice.list_peers():
self.alice.disconnect_peer(pub_key=peer.pub_key)
time.sleep(SLEEP_TIME)
self.bitcoin_rpc.generatetoaddress(3, bitcoin_address)
time.sleep(SLEEP_TIME)
self.assertEqual(0, len(self.alice.list_peers()))
def test_connect(self):
# close any open channels:
close_all_channels(self.alice)
self.bitcoin_rpc.generatetoaddress(3, BITCOIN_ADDR)
time.sleep(SLEEP_TIME)
# check we are fully disconnected from peer before proceeding
disconnect_all_peers(self.alice)
time.sleep(SLEEP_TIME)
# now test the connect
self.alice.connect_peer(addr=self.bob.lightning_addr)
self.bitcoin_rpc.generatetoaddress(3, BITCOIN_ADDR)
time.sleep(SLEEP_TIME)
self.assertEqual(1, len(self.alice.list_peers()))
self.assertEqual(self.alice.list_peers()[0].pub_key, self.bob.pub_key)
def test_list_peers(self):
# make sure we are connected to one peer
ensure_peer_connected(self.alice, self.bob)
# Test length with connected peer
self.assertGreater(len(self.alice.list_peers()), 0)
# close and active channels before disconnect
close_all_channels(self.alice)
self.bitcoin_rpc.generatetoaddress(3, BITCOIN_ADDR)
time.sleep(SLEEP_TIME)
# disconnect
disconnect_all_peers(self.alice)
time.sleep(SLEEP_TIME)
# test after disconnect
self.assertEqual(0, len(self.alice.list_peers()))
def test_get_info(self):
self.assertIsInstance(self.alice.get_info(), rpc_pb2.GetInfoResponse)
def test_pending_channels(self):
self.assertIsInstance(self.alice.pending_channels(), rpc_pb2.PendingChannelsResponse)
def test_list_channels(self):
# self.assertTrue(self.test_connect())
# TODO: open a channel
# self.assertGreater(len(self.alice.list_channels()), 0)
pass
def test_closed_channels(self):
# TODO: open a channel
# ... then close it ...
# self.assertGreater(len(self.alice.closed_channels()), 0)
pass
def test_open_channel_sync(self):
# make sure we are connected
ensure_peer_connected(self.alice, self.bob)
start_channels = len(self.alice.list_channels())
self.assertIsInstance(self.alice.open_channel_sync(local_funding_amount=500_000,
node_pubkey_string=self.bob.pub_key),
rpc_pb2.ChannelPoint)
self.bitcoin_rpc.generatetoaddress(3, BITCOIN_ADDR)
time.sleep(SLEEP_TIME)
end_channels = len(self.alice.list_channels())
self.assertGreater(end_channels, start_channels)
def test_open_channel(self):
pass
def test_close_channel(self):
# setup
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
self.assertGreater(len(self.alice.list_channels()), 0)
# close all active channels
self.alice.close_all_channels()
self.bitcoin_rpc.generatetoaddress(3, BITCOIN_ADDR)
time.sleep(SLEEP_TIME)
self.assertEqual(0, len(self.alice.list_channels()))
def test_abandon_channel(self):
# this is a development function only and does not need test
pass
def test_send_payment(self):
pass
def test_send_payment_sync(self):
# setup
inv_amt = 10000
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
# test payment_request
payment_request = self.bob.add_invoice(value=inv_amt).payment_request
start_len = len(self.alice.list_payments().payments)
self.assertIsInstance(self.alice.send_payment_sync(payment_request=payment_request),
rpc_pb2.SendResponse)
time.sleep(SLEEP_TIME)
end_len = len(self.alice.list_payments().payments)
self.assertGreater(end_len, start_len)
# test manual request
invoice = self.bob.add_invoice(value=inv_amt)
start_len = len(self.alice.list_payments().payments)
self.assertIsInstance(self.alice.send_payment_sync(dest_string=self.bob.pub_key,
amt=inv_amt,
payment_hash=invoice.r_hash,
final_cltv_delta=144),
rpc_pb2.SendResponse)
time.sleep(SLEEP_TIME)
end_len = len(self.alice.list_payments().payments)
self.assertGreater(end_len, start_len)
def test_send_to_route(self):
pass
def test_send_to_route_sync(self):
pass
def test_add_invoice(self):
# setup
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
channel_0_balance = self.alice.list_channels()[0].local_balance
# test valid
start_index = self.alice.list_invoices().invoices[0].add_index
self.assertIsInstance(self.alice.add_invoice(memo='test',
value=channel_0_balance // 2,
expiry=3600),
rpc_pb2.AddInvoiceResponse)
end_index = self.alice.list_invoices().invoices[0].add_index
time.sleep(SLEEP_TIME)
self.assertGreater(end_index, start_index)
# test invalid
start_index = self.alice.list_invoices().invoices[0].add_index
self.assertRaises(grpc.RpcError,
lambda:
self.alice.add_invoice(memo='test',
value=100_000_000,
expiry=3600)
)
end_index = self.alice.list_invoices().invoices[0].add_index
self.assertEqual(end_index, start_index)
def test_list_invoices(self):
self.assertIsInstance(self.alice.list_invoices(),
rpc_pb2.ListInvoiceResponse)
def test_lookup_invoice(self):
test_invoice = self.alice.list_invoices().invoices[0]
self.assertIsInstance(self.alice.lookup_invoice(r_hash=test_invoice.r_hash),
rpc_pb2.Invoice)
def test_subscribe_invoices(self):
self.assertIsInstance(self.alice.subscribe_invoices(),
grpc._channel._Rendezvous)
def test_decode_pay_req(self):
inv_value = 15000
expiry = 3600
cltv_expiry = 144
invoice = self.bob.add_invoice(value=inv_value, expiry=expiry,
cltv_expiry=cltv_expiry)
decoded_inv = self.alice.decode_pay_req(pay_req=invoice.payment_request)
self.assertEqual(invoice.r_hash, bytes.fromhex(decoded_inv.payment_hash))
self.assertEqual(self.bob.pub_key, decoded_inv.destination)
self.assertEqual(inv_value, decoded_inv.num_satoshis)
self.assertEqual(expiry, decoded_inv.expiry)
self.assertEqual(cltv_expiry, decoded_inv.cltv_expiry)
def test_list_payments(self):
self.assertIsInstance(self.alice.list_payments(), rpc_pb2.ListPaymentsResponse)
def test_z_delete_all_payments(self):
# this will delete your payment history -- be careful
# pass
start_len = len(self.alice.list_payments().payments)
if start_len == 0:
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
invoice = self.bob.add_invoice(value=5000)
self.alice.pay_invoice(payment_request=invoice.payment_request)
self.assertGreater(start_len, 0)
self.assertIsInstance(self.alice.delete_all_payments(),
rpc_pb2.DeleteAllPaymentsResponse)
time.sleep(SLEEP_TIME)
end_len = len(self.alice.list_payments().payments)
self.assertEqual(end_len, 0)
def test_describe_graph(self):
self.assertIsInstance(self.alice.describe_graph(), rpc_pb2.ChannelGraph)
def test_get_chan_info(self):
if len(self.alice.list_channels()) == 0:
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
chan_id = self.alice.list_channels()[0].chan_id
self.assertIsInstance(self.alice.get_chan_info(chan_id=chan_id),
rpc_pb2.ChannelEdge)
def test_subscribe_channel_events(self):
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
results = []
subscription = self.alice.subscribe_channel_events()
self.assertIsInstance(subscription, grpc._channel._Rendezvous)
self.alice.open_channel_sync(local_funding_amount=500_000,
node_pubkey_string=self.bob.pub_key)
self.bitcoin_rpc.generatetoaddress(3, BITCOIN_ADDR)
time.sleep(SLEEP_TIME)
results.append(subscription.next())
self.assertGreater(len(results), 0)
def test_get_node_info(self):
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
self.assertIsInstance(self.alice.get_node_info(pub_key=self.bob.pub_key),
rpc_pb2.NodeInfo)
def test_query_routes(self):
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
self.assertIsInstance(self.alice.query_routes(pub_key=self.bob.pub_key,
amt=10000,
num_routes=1),
rpc_pb2.QueryRoutesResponse)
def test_network_info(self):
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
self.assertIsInstance(self.alice.get_network_info(), rpc_pb2.NetworkInfo)
def test_stop_daemon(self):
pass
def test_subscribe_channel_graph(self):
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
results = []
subscription = self.alice.subscribe_channel_graph()
self.assertIsInstance(subscription, grpc._channel._Rendezvous)
def test_debug_level(self):
self.assertIsInstance(self.alice.debug_level(level_spec='off'),
rpc_pb2.DebugLevelResponse)
def test_fee_report(self):
self.assertIsInstance(self.alice.fee_report(), rpc_pb2.FeeReportResponse)
def test_update_channel_policy(self):
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
# testing global policy setting
self.assertIsInstance(self.alice.update_channel_policy(base_fee_msat=1000,
fee_rate=1,
time_lock_delta=144),
rpc_pb2.PolicyUpdateResponse)
# test single channel
channel_point = self.alice.list_channels()[0].channel_point
self.assertIsInstance(self.alice.update_channel_policy(base_fee_msat=1000,
fee_rate=1,
time_lock_delta=144,
chan_point=channel_point),
rpc_pb2.PolicyUpdateResponse)
def test_forwarding_history(self):
ensure_peer_connected(self.alice, self.bob)
ensure_channel_open(self.alice, self.bob, self.bitcoin_rpc, BITCOIN_ADDR)
self.assertIsInstance(self.alice.forwarding_history(),
rpc_pb2.ForwardingHistoryResponse)
| 39.689516
| 99
| 0.642436
| 2,299
| 19,686
| 5.219226
| 0.129622
| 0.086257
| 0.066006
| 0.070089
| 0.549129
| 0.460122
| 0.372781
| 0.331528
| 0.307359
| 0.291358
| 0
| 0.017615
| 0.264655
| 19,686
| 495
| 100
| 39.769697
| 0.811274
| 0.056131
| 0
| 0.296512
| 0
| 0
| 0.021832
| 0.012189
| 0
| 0
| 0
| 0.00202
| 0.209302
| 1
| 0.148256
| false
| 0.026163
| 0.020349
| 0
| 0.174419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19a1cf2092a5e1441835b5519079835b5bb7b437
| 484
|
py
|
Python
|
20_valid_parens.py
|
claytonjwong/leetcode-py
|
16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7
|
[
"MIT"
] | 1
|
2020-07-15T14:16:23.000Z
|
2020-07-15T14:16:23.000Z
|
20_valid_parens.py
|
claytonjwong/leetcode-py
|
16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7
|
[
"MIT"
] | null | null | null |
20_valid_parens.py
|
claytonjwong/leetcode-py
|
16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7
|
[
"MIT"
] | null | null | null |
#
# 20. Valid Parentheses
#
# Q: https://leetcode.com/problems/valid-parentheses/
# A: https://leetcode.com/problems/valid-parentheses/discuss/9214/Kt-Js-Py3-Cpp-Stack
#
class Solution:
def isValid(self, A: str) -> bool:
s = []
for c in A:
if c == '(': s.append(')')
elif c == '[': s.append(']')
elif c == '{': s.append('}')
elif not len(s) or c != s.pop():
return False
return not len(s)
| 26.888889
| 85
| 0.506198
| 63
| 484
| 3.888889
| 0.571429
| 0.032653
| 0.097959
| 0.146939
| 0.473469
| 0.473469
| 0.146939
| 0.146939
| 0
| 0
| 0
| 0.020958
| 0.309917
| 484
| 17
| 86
| 28.470588
| 0.712575
| 0.32438
| 0
| 0
| 0
| 0
| 0.01875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19a1f5173da1a19ab9a723e6c007b756a7bf4d27
| 1,474
|
py
|
Python
|
2016/23/solution.py
|
Artemigos/advent-of-code
|
0b2ae6af3788b9e6891219b2b64ce0d510b3e1aa
|
[
"MIT"
] | null | null | null |
2016/23/solution.py
|
Artemigos/advent-of-code
|
0b2ae6af3788b9e6891219b2b64ce0d510b3e1aa
|
[
"MIT"
] | null | null | null |
2016/23/solution.py
|
Artemigos/advent-of-code
|
0b2ae6af3788b9e6891219b2b64ce0d510b3e1aa
|
[
"MIT"
] | null | null | null |
import common
lines = common.read_file('2016/23/data.txt').splitlines()
# part 1
eggs = 7
# part 2
eggs = 12
registers = dict(a=eggs, b=0, c=0, d=0)
def get_val(val_spec):
if val_spec in registers.keys():
return registers[val_spec]
return int(val_spec)
def is_reg(val_spec):
return val_spec in registers.keys()
instructions = [x.split(' ') for x in lines]
i = 0
while i < len(instructions) and i >= 0:
print(registers)
instr = instructions[i]
code = instr[0]
if code == 'cpy':
if is_reg(instr[2]):
registers[instr[2]] = get_val(instr[1])
i += 1
elif code == 'inc':
if is_reg(instr[1]):
registers[instr[1]] += 1
i += 1
elif code == 'dec':
if is_reg(instr[1]):
registers[instr[1]] -= 1
i += 1
elif code == 'jnz':
if get_val(instr[1]) != 0:
i += get_val(instr[2])
else:
i += 1
else: # tgl
instr_offset = get_val(instr[1])
instr_idx = i + instr_offset
if 0 <= instr_idx < len(instructions):
to_tgl = instructions[instr_idx]
if to_tgl[0] == 'inc':
to_tgl[0] = 'dec'
elif to_tgl[0] == 'dec' or to_tgl[0] == 'tgl':
to_tgl[0] = 'inc'
elif to_tgl[0] == 'jnz':
to_tgl[0] = 'cpy'
else: # cpy
to_tgl[0] = 'jnz'
i += 1
print('a=', registers['a'])
| 24.566667
| 58
| 0.501357
| 213
| 1,474
| 3.328639
| 0.262911
| 0.06347
| 0.067701
| 0.050776
| 0.187588
| 0.110014
| 0.110014
| 0.110014
| 0.110014
| 0.110014
| 0
| 0.045738
| 0.347354
| 1,474
| 59
| 59
| 24.983051
| 0.691268
| 0.014247
| 0
| 0.204082
| 0
| 0
| 0.038674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040816
| false
| 0
| 0.020408
| 0.020408
| 0.122449
| 0.040816
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19a87db683c9165ec3ec45d64b90db5e4bc925f8
| 3,272
|
py
|
Python
|
src/experiment/experiment.py
|
ricoms/credit-fraud-dealing-with-imbalanced-datasets-mlops
|
aa483832493faa88affd00eff5489d03506abdeb
|
[
"MIT"
] | 5
|
2021-04-29T22:20:08.000Z
|
2021-05-21T03:29:52.000Z
|
src/experiment/experiment.py
|
ricoms/credit-fraud-dealing-with-imbalanced-datasets-mlops
|
aa483832493faa88affd00eff5489d03506abdeb
|
[
"MIT"
] | null | null | null |
src/experiment/experiment.py
|
ricoms/credit-fraud-dealing-with-imbalanced-datasets-mlops
|
aa483832493faa88affd00eff5489d03506abdeb
|
[
"MIT"
] | 3
|
2021-04-29T22:41:59.000Z
|
2021-10-03T20:06:05.000Z
|
import sys
from dataclasses import dataclass
from io import StringIO
from pathlib import Path
from typing import Any
import numpy as np
from sklearn.model_selection import StratifiedKFold
from utils.logger import logger
from .artifacts import ExperimentArtifacts
INPUT_COLUMNS = [
"Time", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V13", "V14", "V15", "V16", "V17", "V18", "V19", "V20",
"V21", "V22", "V23", "V24", "V25", "V26", "V27", "V28", "Amount", "Class",
]
@dataclass
class Experiment:
run_tag: str
model: Any
input_dir: Path
artifacts_handler: ExperimentArtifacts
training_portion: float = .8
random_state: int = 42
def load_data(self):
with open(self.input_dir, 'r') as file:
data = file.read().replace('"', '')
data = np.genfromtxt(StringIO(data), delimiter=',', skip_header=1) # , dtype=None
self.X, self.y = data[:, :-1], data[:, -1]
shape0 = self.y.shape[0]
bin_counts = np.bincount(self.y.astype(np.int32))
number_no_frauds = round(bin_counts[0]/shape0 * 100, 2)
number_frauds = round(bin_counts[1]/shape0 * 100, 2)
logger.info(f'Number of instances of the dataset: {shape0}')
logger.info(f'No Frauds {number_no_frauds} of the dataset')
logger.info(f'Frauds {number_frauds} of the dataset')
def split_data(self):
self.sss = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)
def train(self):
accuracy_lst = []
precision_lst = []
recall_lst = []
f1_lst = []
auc_lst = []
for train, validation in self.sss.split(self.X, self.y):
self.model.train(
self.X[train],
self.y[train],
)
acc, precision, recall, f1, auc = self.model.evaluate(
self.X[validation],
self.y[validation],
)
accuracy_lst.append(acc)
precision_lst.append(precision)
recall_lst.append(recall)
f1_lst.append(f1)
auc_lst.append(auc)
visual_results = self.model.gen_plots(
self.X[validation],
self.y[validation],
)
artifacts = {
"metrics": {
"avg_accuracy": np.mean(accuracy_lst),
"avg_precision": np.mean(precision_lst),
"avg_recall": np.mean(recall_lst),
"avg_f1": np.mean(f1_lst),
"avg_auc": np.mean(auc_lst),
},
"images": visual_results,
}
logger.info(artifacts)
return artifacts
def save(self):
self.artifacts_handler.save()
self.model.save(self.artifacts_handler.output_prefix)
def run(self):
logger.info(f"Begin Experiment {self.run_tag} for model {self.model.model_id}.")
self.load_data()
self.split_data()
try:
artifacts = self.train()
except Exception as e:
self.artifacts_handler.training_error(e)
sys.exit(255)
self.artifacts_handler.get_artifacts(
artifacts
)
self.save()
self.artifacts_handler.create_package_with_models()
| 31.161905
| 90
| 0.573655
| 394
| 3,272
| 4.611675
| 0.388325
| 0.019263
| 0.055036
| 0.011007
| 0.033021
| 0.033021
| 0
| 0
| 0
| 0
| 0
| 0.034783
| 0.297066
| 3,272
| 104
| 91
| 31.461538
| 0.755217
| 0.003667
| 0
| 0.044944
| 0
| 0
| 0.104972
| 0.006753
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.101124
| 0
| 0.247191
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19b063208b212cda110697a8227b09dc8f98a0db
| 414
|
py
|
Python
|
catalog/bindings/csw/role.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/role.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/role.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from bindings.csw.code_type_1 import CodeType1
__NAMESPACE__ = "http://www.opengis.net/ows"
@dataclass
class Role(CodeType1):
"""Function performed by the responsible party.
Possible values of this Role shall include the values and the
meanings listed in Subclause B.5.5 of ISO 19115:2003.
"""
class Meta:
namespace = "http://www.opengis.net/ows"
| 24.352941
| 65
| 0.724638
| 58
| 414
| 5.068966
| 0.706897
| 0.088435
| 0.108844
| 0.156463
| 0.197279
| 0.197279
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.188406
| 414
| 16
| 66
| 25.875
| 0.833333
| 0.388889
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fd63dd37aacbace93081d1f24365379814ddc62
| 1,847
|
py
|
Python
|
discogstagger/lyrics.py
|
makzyt4/discogs-tagger
|
b578257922230b634d43349f2efd4fda72fdd008
|
[
"MIT"
] | 2
|
2019-09-05T04:26:20.000Z
|
2020-08-05T02:56:04.000Z
|
discogstagger/lyrics.py
|
makzyt4/discogs-tagger
|
b578257922230b634d43349f2efd4fda72fdd008
|
[
"MIT"
] | null | null | null |
discogstagger/lyrics.py
|
makzyt4/discogs-tagger
|
b578257922230b634d43349f2efd4fda72fdd008
|
[
"MIT"
] | 1
|
2019-10-02T13:07:34.000Z
|
2019-10-02T13:07:34.000Z
|
import warnings
import urllib
warnings.filterwarnings("ignore", category=UserWarning)
from fuzzywuzzy import fuzz
from bs4 import BeautifulSoup
from discogstagger.crawler import WebCrawler
class LyricsSearcher:
def __init__(self, artist_name):
self.artist_name = artist_name
self.url_base = "http://lyrics.wikia.com"
self.crawler = WebCrawler()
url_search_fragment = "/wiki/Special:Search?query="
artist_query = urllib.parse.quote_plus(artist_name)
url = self.url_base + url_search_fragment + artist_query
self.soup = self.crawler.get_soup(url)
def load(self):
li = self.soup.find("li", {"class": "result"})
if li is None:
return False
first_artist = li.find_all("a", {"class": "result-link"})[0].text
ratio = fuzz.ratio(first_artist, self.artist_name)
if ratio < 60:
return False
first_link = li.find_all("a", {"class": "result-link"})[0]['href']
self.soup = self.crawler.get_soup(first_link)
return True
def search_lyrics(self, track_title):
ols = self.soup.find_all("ol")
max_ratio = 0
found = None
for ol in ols:
links = ol.find_all("a")
for link in links:
ratio = fuzz.ratio(track_title, link.text)
if ratio > max_ratio:
max_ratio = ratio
found = {'link': link['href'], 'ratio': ratio}
if found == None or found['ratio'] < 80:
return ''
track_soup = self.crawler.get_soup(self.url_base + found['link'])
lyricbox = track_soup.find("div", {"class": "lyricbox"})
if lyricbox is None:
return ''
for br in lyricbox.find_all("br"):
br.replace_with("\n")
return lyricbox.text
| 35.519231
| 74
| 0.593936
| 231
| 1,847
| 4.575758
| 0.320346
| 0.047304
| 0.039735
| 0.051088
| 0.119205
| 0.098392
| 0.049196
| 0.049196
| 0
| 0
| 0
| 0.006084
| 0.288035
| 1,847
| 51
| 75
| 36.215686
| 0.797719
| 0
| 0
| 0.086957
| 0
| 0
| 0.082296
| 0.014618
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.108696
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fd7391d4fbb21e69c15f5d957bc435fc6d49a18
| 7,798
|
py
|
Python
|
env/lib/python3.7/site-packages/cleo/inputs/argv_input.py
|
Kolawole39/masonite-guides-tutorial
|
9a21cc635291a42f0722f69925be1809bb20e01c
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/cleo/inputs/argv_input.py
|
Kolawole39/masonite-guides-tutorial
|
9a21cc635291a42f0722f69925be1809bb20e01c
|
[
"MIT"
] | null | null | null |
env/lib/python3.7/site-packages/cleo/inputs/argv_input.py
|
Kolawole39/masonite-guides-tutorial
|
9a21cc635291a42f0722f69925be1809bb20e01c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import re
from .input import Input
from ..exceptions import NoSuchOption, BadOptionUsage, TooManyArguments
class ArgvInput(Input):
def __init__(self, argv=None, definition=None):
super(ArgvInput, self).__init__(definition)
if argv is None:
argv = sys.argv[:]
argv.pop(0)
self._tokens = argv
self._parsed = None
def parse(self):
parse_options = True
self._parsed = self._tokens
while True:
try:
token = self._parsed.pop(0)
except IndexError:
break
if parse_options and token == '':
self.parse_argument(token)
elif parse_options and token == '--':
parse_options = False
elif parse_options and token.find('--') == 0:
self.parse_long_option(token)
elif parse_options and token[0] == '-' and token != '-':
self.parse_short_option(token)
else:
self.parse_argument(token)
def parse_short_option(self, token):
name = token[1:]
if len(name) > 1:
if self.definition.has_shortcut(name[0])\
and self.definition.get_option_for_shortcut(name[0]).accept_value():
# an option with a value (with no space)
self.add_short_option(name[0], name[1:])
else:
self.parse_short_option_set(name)
else:
if self.definition.has_shortcut(name) and self.definition.get_option_for_shortcut(name).accept_value():
try:
value = self._parsed.pop(0)
except IndexError:
value = None
if value and value.startswith('-'):
self._parsed.insert(0, value)
value = None
self.add_short_option(name, value)
else:
self.add_short_option(name, None)
def parse_short_option_set(self, name):
l = len(name)
for i in range(0, l):
if not self.definition.has_shortcut(name[i]):
raise NoSuchOption('The "-%s" option does not exist.' % name[i])
option = self.definition.get_option_for_shortcut(name[i])
if option.accept_value():
self.add_long_option(option.get_name(), None if l - 1 == i else name[i + 1:])
break
else:
self.add_long_option(option.get_name(), None)
def parse_long_option(self, token):
name = token[2:]
pos = name.find('=')
if pos != -1:
self.add_long_option(name[:pos], name[pos + 1:])
else:
if self.definition.has_option(name) and self.definition.get_option(name).accept_value():
try:
value = self._parsed.pop(0)
except IndexError:
value = None
if value and value.startswith('-'):
self._parsed.insert(0, value)
value = None
self.add_long_option(name, value)
else:
self.add_long_option(name, None)
def parse_argument(self, token):
c = len(self.arguments)
# if input is expecting another argument, add it
if self.definition.has_argument(c):
arg = self.definition.get_argument(c)
self.arguments[arg.get_name()] = [token] if arg.is_list() else token
elif self.definition.has_argument(c - 1) and self.definition.get_argument(c - 1).is_list():
arg = self.definition.get_argument(c - 1)
self.arguments[arg.get_name()].append(token)
# unexpected argument
else:
raise TooManyArguments('Too many arguments.')
def add_short_option(self, shortcut, value):
if not self.definition.has_shortcut(shortcut):
raise NoSuchOption('The "-%s" option does not exist.' % shortcut)
self.add_long_option(self.definition.get_option_for_shortcut(shortcut).get_name(), value)
def add_long_option(self, name, value):
if not self.definition.has_option(name):
raise NoSuchOption('The "--%s" option does not exist.' % name)
option = self.definition.get_option(name)
if value is False:
value = None
if value is not None and not option.accept_value():
raise BadOptionUsage('The "--%s" option does not accept a value.' % name)
if value is None and option.accept_value() and len(self._parsed):
# if option accepts an optional or mandatory argument
# let's see if there is one provided
try:
nxt = self._parsed.pop(0)
except IndexError:
nxt = None
if nxt and len(nxt) >= 1 and nxt[0] != '-':
value = nxt
elif not nxt:
value = ''
else:
self._parsed.insert(0, nxt)
# This test is here to handle cases like --foo=
# and foo option value is optional
if value == '':
value = None
if value is None:
if option.is_value_required():
raise BadOptionUsage('The "--%s" option requires a value.' % name)
if not option.is_list():
value = option.get_default() if option.is_value_optional() else True
if option.is_list():
if name not in self.options:
self.options[name] = [value]
else:
self.options[name].append(value)
else:
self.options[name] = value
def get_first_argument(self):
for token in self._tokens:
if token and token[0] == '-':
continue
return token
def has_parameter_option(self, values):
values = [values] if not isinstance(values, (list, tuple)) else values
for token in self._tokens:
for value in values:
if token == value:
return True
# Options with values:
# For long options, test for '--option=' at beginning
# For short options, test for '-o' at beginning
leading = value + "=" if value.find("--") == 0 else value
if leading and token.find(leading) == 0:
return True
return False
def get_parameter_option(self, values, default=False):
values = [values] if not isinstance(values, (list, tuple)) else values
tokens = self._tokens[:]
while True:
try:
token = tokens.pop(0)
except IndexError:
break
for value in values:
if token == value:
try:
return tokens.pop(0)
except IndexError:
return
# Options with values:
# For long options, test for '--option=' at beginning
# For short options, test for '-o' at beginning
leading = value + "=" if value.find("--") == 0 else value
if leading and token.find(leading) == 0:
return token[len(leading):]
return default
def __str__(self):
def stringify(token):
m = re.match('^(-[^=]+=)(.+)', token)
if m:
return m.group(1) + self.escape_token(m.group(2))
if token and token[0] != '-':
return self.escape_token(token)
return token
tokens = map(stringify, self._tokens)
return ' '.join(tokens)
| 33.467811
| 115
| 0.530649
| 892
| 7,798
| 4.497758
| 0.141256
| 0.059322
| 0.038136
| 0.02991
| 0.552094
| 0.392074
| 0.268943
| 0.234048
| 0.187936
| 0.166501
| 0
| 0.007733
| 0.369838
| 7,798
| 232
| 116
| 33.612069
| 0.80871
| 0.068094
| 0
| 0.345238
| 0
| 0
| 0.031164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077381
| false
| 0
| 0.02381
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fda0bec3ea02ea3a6f8218190306677fcc6bbb6
| 3,371
|
py
|
Python
|
digibanner.py
|
w4mhi/digipi
|
1fb00450cf9ed14a30c293e744848a7cad09f489
|
[
"MIT"
] | null | null | null |
digibanner.py
|
w4mhi/digipi
|
1fb00450cf9ed14a30c293e744848a7cad09f489
|
[
"MIT"
] | null | null | null |
digibanner.py
|
w4mhi/digipi
|
1fb00450cf9ed14a30c293e744848a7cad09f489
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# direwatch
"""
Craig Lamparter KM6LYW, 2021, MIT License
modified by W4MHI February 2022
- see the init_display.py module for display settings
- see https://www.delftstack.com/howto/python/get-ip-address-python/ for the ip address
"""
import sys
import argparse
import time
from netifaces import interfaces, ifaddresses, AF_INET
sys.path.insert(0, '/home/pi/common')
from display_util import *
from constants import *
def parse_arguments():
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--fontsize", required=False, help="Font size for messages")
ap.add_argument("-b", "--big", required=False, help="large text to display")
ap.add_argument("-s", "--small", required=False, help="smaller text underneath")
args = vars(ap.parse_args())
return args
args = parse_arguments()
if args["fontsize"]:
fontsize = int(args["fontsize"])
if fontsize > 34:
print("The input: " + str(fontsize) + " is greater than: 34 that is maximum value supported.")
print("Setting to maximum value: 34.")
fontsize = 34
elif fontsize < 20:
print("The input: " + str(fontsize) + " is lower than: 20 that is minimum value supported.")
print("Setting to minimum value: 20.")
fontsize = 20
else:
print("Setting font size to default value: 24.")
fontsize = 24
if args["big"]:
message_big = args["big"]
else:
message_big = "DigiPi"
if args["small"]:
message_small = args["small"]
else:
message_small = "DigiPi Operational!"
# title
font_title = get_titlefont()
# define writing fonts
font_message = get_writingfont(fontsize)
spacing = get_spacing(fontsize)
last_line = get_lastline(fontsize)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill="#000000")
# title bar
draw.rectangle((0, 0, width, TITLE_BAR_H), outline=0, fill="#333333")
draw.text((10, 0) , TITLE, font=font_title, fill="#888888")
# ip addresses message
count = 1
draw.text((PAD_LEFT, count*spacing), "Net's IP Addresses", font=font_message, fill="#00FF00")
first_pass = True
ip_present = False
while ip_present == False:
count = 1
# ip addresses
for ifaceName in interfaces():
for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr':'No IP yet'}]):
if ifaceName.startswith("wlan") or ifaceName.startswith("eth"):
# increment for interface name
count = count + 1
if first_pass:
# show the interface name
draw.text((PAD_LEFT, count*spacing), "[" + ifaceName + "]", font=font_message, fill="#00FF00")
# increment for the ip address
count = count + 1
#delete the previous line if exists
draw.rectangle((0, count*spacing, width, (count+1)*spacing), outline=0, fill="#000000")
draw.text((4*PAD_LEFT, count*spacing), i['addr'], font=font_message, fill="#00FF00")
if i['addr'].startswith('No IP') == False:
ip_present = True
disp.image(image)
first_pass = False
# wait and re-iterate if no ip address
if ip_present == False:
time.sleep(3)
# message
count = count + 1
if last_line >= count:
draw.text((PAD_LEFT, last_line*spacing), message_small, font=font_message, fill="#FFFF00")
#with display_lock:
disp.image(image)
print("DigiPi operational!\n")
exit(0)
| 29.570175
| 110
| 0.664195
| 463
| 3,371
| 4.742981
| 0.37149
| 0.016393
| 0.027322
| 0.034608
| 0.126138
| 0.04827
| 0
| 0
| 0
| 0
| 0
| 0.033445
| 0.201721
| 3,371
| 113
| 111
| 29.831858
| 0.782609
| 0.159893
| 0
| 0.142857
| 0
| 0
| 0.184763
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0.042857
| 0.085714
| 0
| 0.114286
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fdb29602cc883d0c3c33fa3a91c2d4157fe4739
| 7,007
|
py
|
Python
|
autotorrent/utils.py
|
jyggen/autotorrent
|
5a8f2b40ccc8c66c73dc520f98b886d21e163afa
|
[
"MIT"
] | 278
|
2015-02-12T19:19:53.000Z
|
2022-03-22T21:17:28.000Z
|
autotorrent/utils.py
|
jyggen/autotorrent
|
5a8f2b40ccc8c66c73dc520f98b886d21e163afa
|
[
"MIT"
] | 56
|
2015-03-27T00:38:37.000Z
|
2022-03-26T17:52:58.000Z
|
autotorrent/utils.py
|
jyggen/autotorrent
|
5a8f2b40ccc8c66c73dc520f98b886d21e163afa
|
[
"MIT"
] | 48
|
2015-03-10T16:50:19.000Z
|
2022-03-20T12:11:50.000Z
|
from __future__ import division
import hashlib
import logging
import os
import re
__all__ = [
'is_unsplitable',
'get_root_of_unsplitable',
'Pieces',
]
UNSPLITABLE_FILE_EXTENSIONS = [
set(['.rar', '.sfv']),
set(['.mp3', '.sfv']),
set(['.vob', '.ifo']),
]
logger = logging.getLogger(__name__)
def is_unsplitable(files):
"""
Checks if a list of files can be considered unsplitable, e.g. VOB/IFO or scene release.
This means the files can only be used in this combination.
"""
extensions = set(os.path.splitext(f)[1].lower() for f in files)
found_unsplitable_extensions = False
for exts in UNSPLITABLE_FILE_EXTENSIONS:
if len(extensions & exts) == len(exts):
found_unsplitable_extensions = True
break
lowercased_files = set([f.lower() for f in files])
found_magic_file = False
if 'movieobject.bdmv' in lowercased_files:
found_magic_file = True
return found_unsplitable_extensions or found_magic_file
def get_root_of_unsplitable(path):
"""
Scans a path for the actual scene release name, e.g. skipping cd1 folders.
Returns None if no scene folder could be found
"""
path = path[::-1]
for p in path:
if not p:
continue
if re.match(r'^(cd[1-9])|(samples?)|(proofs?)|((vob)?sub(title)?s?)$', p, re.IGNORECASE): # scene paths
continue
if re.match(r'^(bdmv)|(disc\d*)|(video_ts)$', p, re.IGNORECASE): # bluray / dd
continue
return p
class Pieces(object):
"""
Can help check if files match the files found in a torrent.
"""
def __init__(self, torrent):
self.piece_size = torrent[b'info'][b'piece length']
self.pieces = []
for i in range(0, len(torrent[b'info'][b'pieces']), 20):
self.pieces.append(torrent[b'info'][b'pieces'][i:i+20])
def get_complete_pieces(self, start_size, end_size):
"""
Finds complete pieces and returns the alignment needed from
the beginning and the end (to match the file).
"""
logger.debug('Getting complete pieces for file starting at %i and ending at %i. Piece size is %i' % (start_size, end_size, self.piece_size))
start_piece, start_offset = divmod(start_size, self.piece_size)
if start_offset:
start_piece += 1
if start_offset:
start_offset = self.piece_size - start_offset
end_piece, end_offset = divmod(end_size, self.piece_size)
logger.debug('Start piece:%i end piece:%i' % (start_piece, end_piece-1))
return start_offset, end_offset, self.pieces[start_piece:end_piece]
def find_piece_breakpoint(self, file_path, start_size, end_size):
"""
Finds the point where a file with a different size is modified and tries to align it with pieces.
"""
start_offset, end_offset, pieces = self.get_complete_pieces(start_size, end_size)
failed_pieces = (len(pieces) // 20) or 1 # number of pieces that can fail in a row and then put an end to checking
success_count = failed_pieces
piece_status = []
with open(file_path, 'rb') as f:
f.seek(start_offset)
for i, piece in enumerate(pieces):
logger.debug('Checking piece %i for breakingpoint' % (i, ))
h = hashlib.sha1(f.read(self.piece_size)).digest()
if h == piece:
logger.debug('Piece %i matched' % i)
if success_count < failed_pieces:
success_count += 1
piece_status.append(True)
else:
logger.debug('Piece %i did not match' % i)
success_count -= 1
piece_status.append(False)
if success_count <= 0:
logger.debug('The breakingpoint has been found after piece %i - more than %i failed pieces' % (i, failed_pieces))
break
for p in piece_status[::-1]:
if p:
break
i -= 1
breakingpoint = start_offset + self.piece_size*i
logger.debug('A total of %i pieces were ok, so we set breakingpoint at %i' % (i, breakingpoint))
return breakingpoint
def match_file(self, file_path, start_size, end_size):
"""
Try to match file starting at start_size and ending at end_size.
"""
start_offset, end_offset, pieces = self.get_complete_pieces(start_size, end_size)
logger.debug('Stuff to check start_offset:%i end_offset:%i pieces:%s' % (start_offset, end_offset, len(pieces)))
if not pieces:
logger.debug('No whole pieces found for %r, taking this as a not-match' % file_path)
return False, False
check_pieces = (len(pieces) // 10) or 1
match_start, match_end = 0, 0
size = os.path.getsize(file_path)
with open(file_path, 'rb') as f:
for i in range(check_pieces): # check from beginning
seek_offset = start_offset+self.piece_size*i
logger.debug('Checking piece %i from beginning of file, reading from %i bytes. Filesize: %i' % (i, seek_offset, size))
f.seek(seek_offset)
h = hashlib.sha1(f.read(self.piece_size)).digest()
logger.debug('Matching hash %r against %r' % (h, pieces[i]))
if h == pieces[i]:
logger.debug('Piece %i matched' % i)
match_start += 1
else:
logger.debug('Piece %i did not match' % i)
for i in range(check_pieces): # check from end
seek_offset = size-end_offset-self.piece_size*(i+1)
logger.debug('Checking piece %i from end of file, reading from %i bytes. Filesize: %i' % (i, seek_offset, size))
f.seek(seek_offset)
h = hashlib.sha1(f.read(self.piece_size)).digest()
piece = pieces[(i+1)*-1]
logger.debug('Matching hash %r against %r' % (h, piece))
if h == piece:
logger.debug('Piece %i matched' % i)
match_end += 1
else:
logger.debug('Piece %i did not match' % i)
logger.debug('Checked %i pieces from both start and end. %i matched from start and %i matched from end.' % (check_pieces, match_start, match_end))
if check_pieces < 4:
must_match = 1
elif check_pieces < 10:
must_match = 2
else:
must_match = max(check_pieces // 10, 3)
return (match_start and check_pieces - match_start <= must_match,
match_end and check_pieces - match_end <= must_match)
| 38.927778
| 154
| 0.572285
| 912
| 7,007
| 4.226974
| 0.210526
| 0.051362
| 0.037095
| 0.024903
| 0.318547
| 0.270558
| 0.222568
| 0.188067
| 0.136187
| 0.101167
| 0
| 0.008898
| 0.326388
| 7,007
| 179
| 155
| 39.145251
| 0.807839
| 0.104467
| 0
| 0.248
| 0
| 0.008
| 0.162973
| 0.017275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048
| false
| 0
| 0.04
| 0
| 0.144
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fdbaa30fa591b9ff594e38b62a435e122fd9e26
| 232
|
py
|
Python
|
Documentation/matplotlib/users/tight_layout_guide-11.py
|
leesavide/pythonista-docs-deprecated
|
9ec3363f07e328bde0a58738a16907f11dfd06e1
|
[
"Apache-2.0"
] | 16
|
2016-06-14T19:45:35.000Z
|
2020-11-30T19:02:58.000Z
|
Documentation/matplotlib/users/tight_layout_guide-11.py
|
leesavide/pythonista-docs
|
9ec3363f07e328bde0a58738a16907f11dfd06e1
|
[
"Apache-2.0"
] | 1
|
2016-06-15T07:10:27.000Z
|
2016-06-15T07:10:27.000Z
|
Documentation/matplotlib/users/tight_layout_guide-11.py
|
leesavide/pythonista-docs
|
9ec3363f07e328bde0a58738a16907f11dfd06e1
|
[
"Apache-2.0"
] | null | null | null |
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)
| 21.090909
| 53
| 0.637931
| 44
| 232
| 3.204545
| 0.613636
| 0.106383
| 0.156028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072917
| 0.172414
| 232
| 11
| 53
| 21.090909
| 0.661458
| 0
| 0
| 0
| 0
| 0
| 0.030043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fde36f357f3d09233e7473cce22e0c33f10115a
| 5,203
|
py
|
Python
|
scripts/posts.py
|
flashreads/mediumish-theme-jekyll
|
428e3338c00a3aee9fde119f0885496edd082f12
|
[
"MIT"
] | null | null | null |
scripts/posts.py
|
flashreads/mediumish-theme-jekyll
|
428e3338c00a3aee9fde119f0885496edd082f12
|
[
"MIT"
] | null | null | null |
scripts/posts.py
|
flashreads/mediumish-theme-jekyll
|
428e3338c00a3aee9fde119f0885496edd082f12
|
[
"MIT"
] | null | null | null |
from argparse import ArgumentParser
from datetime import datetime
from os import path, listdir, getcwd, chdir
import subprocess
from dateutil.parser import parse as dt_parse
from ruamel.yaml import YAML
def transform_posts(src_dir, dest_dir, featured_path=None):
featured = load_featured(featured_path) if featured_path else []
for p in (src_dir, dest_dir):
if not path.isdir(p):
raise Exception('{} is not a directory.'.format(p))
for dir_name in listdir(src_dir):
if path.isdir(path.join(src_dir, dir_name)):
transform_category(dir_name, path.join(src_dir, dir_name), dest_dir, featured)
def transform_category(category, src_dir, dest_dir, featured):
for file_name in listdir(src_dir):
if not path.isfile(path.join(src_dir, file_name)):
continue
_, file_ext = path.splitext(file_name)
if file_ext.lower() not in ['.md', '.mdx']:
continue
if file_name.lower() in ['readme.md']:
continue
try:
transform_post(category, path.join(src_dir, file_name), dest_dir, featured)
except Exception as e:
print('Failed to transform post: ', file_name, 'Error: {}'.format(e))
def transform_post(category, post_file, dest_dir, featured):
frontmatter, fm_length = read_frontmatter(post_file)
post_date = frontmatter.get('date')
if not post_date:
dt = path.getmtime(post_file)
post_date = datetime.fromtimestamp(dt)
else:
if isinstance(post_date, str):
post_date = dt_parse(post_date)
frontmatter['date'] = post_date.isoformat()
if not frontmatter.get('layout'):
frontmatter['layout'] = 'post'
categories = frontmatter.get('categories')
if isinstance(categories, str):
categories = [categories]
if category not in categories:
categories.append(category)
frontmatter['categories'] = categories
tags = frontmatter.get('tags')
if isinstance(tags, str):
tags = [tags]
frontmatter['tags'] = tags
try:
frontmatter['author_email'] = get_email_from_git(post_file)
except Exception as e:
print('Failed to get author email: ', e)
file_name = path.basename(post_file)
file_name = '{}-{}'.format(
post_date.strftime('%Y-%m-%d'),
file_name,
)
post_id = frontmatter.get('id')
if post_id and post_id in featured:
frontmatter['featured'] = True
with open(post_file) as pf:
pf_content = pf.readlines()
pf_content = pf_content[fm_length:]
yaml = YAML(typ='safe')
with open(path.join(dest_dir, file_name), 'w') as tf:
tf.write('---\n')
yaml.dump(frontmatter, tf)
tf.write('---\n')
tf.write('\n')
tf.write('\n'.join(pf_content))
def read_frontmatter(post_file):
frontmatter = []
reading_fm = False
read_fm = False
count = 0
with open(post_file) as pf:
for line in pf:
count += 1
if line.strip() == '---':
if reading_fm:
read_fm = True
reading_fm = False
break
reading_fm = True
continue
elif reading_fm:
frontmatter.append(line)
if not read_fm:
if reading_fm:
raise Exception('Invalid front-matter in post: {}'.format(post_file))
raise Exception('No front-matter in post: {}'.format(post_file))
yaml = YAML(typ='safe')
yaml.allow_duplicate_keys = True
yaml.allow_unicode = True
return (yaml.load(''.join(frontmatter)), count)
def get_email_from_git(post_file):
cwd = getcwd()
try:
chdir(path.dirname(post_file))
file_name = path.basename(post_file)
#email = exec_cmd(['git', 'log', '-1', '--format=\'%ae\'', '--', file_name])
# --diff-filter=A
email = exec_cmd(['git', 'log', '--diff-filter=A', '--format=\'%ae\'', '--', file_name])
return email.strip().replace("'", '')
finally:
chdir(cwd)
def exec_cmd(cmd):
result = subprocess.run(cmd, capture_output=True, text=True)
return result.stdout
def load_featured(file_path):
if path.isfile(file_path):
featured = []
with open(file_path) as fp:
for line in fp:
featured += [feat.strip() for feat in line.strip().split(',')]
return featured
return []
if __name__ == '__main__':
parser = ArgumentParser(description='Transform the FlashReads posts to Jekyll-style MD posts.')
parser.add_argument('-s', '--source', type=str, dest='src_dir', help='The source directory where the flash-reads posts are contained.')
parser.add_argument('-d', '--dest', type=str, dest='dest_dir', help='The destination directory where the Jekyll-style posts will reside - usually <repo>/_posts.')
parser.add_argument('-f', '--featured', type=str, dest='featured_path', help='File containing the featured posts by id, separated by a comma.')
args = parser.parse_args()
transform_posts(args.src_dir, args.dest_dir, args.featured_path)
| 32.51875
| 166
| 0.617528
| 667
| 5,203
| 4.628186
| 0.244378
| 0.036281
| 0.024295
| 0.018141
| 0.160674
| 0.135407
| 0.040168
| 0
| 0
| 0
| 0
| 0.000775
| 0.256006
| 5,203
| 159
| 167
| 32.72327
| 0.796693
| 0.01749
| 0
| 0.170732
| 0
| 0
| 0.126614
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056911
| false
| 0
| 0.04878
| 0
| 0.146341
| 0.01626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fe3ceaa4d994eb74303ce409f0c500eac7328b4
| 4,262
|
py
|
Python
|
gelweb/gel2clin/views.py
|
moka-guys/GeL2MDT
|
09bf25b8452e2e887dbf74b1cd4771d234c6166c
|
[
"MIT"
] | null | null | null |
gelweb/gel2clin/views.py
|
moka-guys/GeL2MDT
|
09bf25b8452e2e887dbf74b1cd4771d234c6166c
|
[
"MIT"
] | 1
|
2020-02-06T13:17:40.000Z
|
2020-02-06T13:17:40.000Z
|
gelweb/gel2clin/views.py
|
byronmews/GeL2MDT
|
1449831f0d7c570b71e7f46fb4dd1fcb805b0325
|
[
"MIT"
] | null | null | null |
"""Copyright (c) 2018 Great Ormond Street Hospital for Children NHS Foundation
Trust & Birmingham Women's and Children's NHS Foundation Trust
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django.shortcuts import render
from django.shortcuts import render, redirect
from . import *
from gel2mdt.models import *
from gel2mdt.config import load_config
from django.contrib.auth.decorators import login_required
from .forms import ProbandCancerForm
from django.contrib import messages
@login_required
def index(request):
return render(request, 'gel2clin/index.html', {'sample_type': None})
@login_required
def cancer_main(request):
'''
Shows all the Cancer cases the user has access to and allows easy searching of cases
:param request:
:return:
'''
return render(request, 'gel2clin/cancer_main.html', {'sample_type': 'cancer'})
@login_required
def rare_disease_main(request):
'''
Shows all the RD cases the user has access to and allows easy searching of cases
:param request:
:return:
'''
return render(request, 'gel2clin/rare_disease_main.html', {'sample_type': 'raredisease'})
@login_required
def proband_view(request, report_id):
'''
Shows details about a particular proband, some fields are editable by clinical scientists
:param request:
:param report_id: GEL Report ID
:return:
'''
report = GELInterpretationReport.objects.get(id=report_id)
relatives = Relative.objects.filter(proband=report.ir_family.participant_family.proband)
proband_variants = ProbandVariant.objects.filter(interpretation_report=report)
proband_mdt = MDTReport.objects.filter(interpretation_report=report)
panels = InterpretationReportFamilyPanel.objects.filter(ir_family=report.ir_family)
if request.method=='POST':
form = ProbandCancerForm(request.POST, instance=report.ir_family.participant_family.proband)
form.save()
messages.add_message(request, 25, 'Clinical History Updated')
if report.sample_type == 'cancer':
form = ProbandCancerForm(instance=report.ir_family.participant_family.proband)
return render(request, 'gel2clin/cancer_proband.html', {'report': report,
'relatives': relatives,
'proband_variants': proband_variants,
'proband_mdt': proband_mdt,
'panels': panels,
'sample_type': report.sample_type,
'form': form})
else:
return render(request, 'gel2clin/raredisease_proband.html', {'report': report,
'relatives': relatives,
'proband_variants': proband_variants,
'proband_mdt': proband_mdt,
'panels': panels,
'sample_type': report.sample_type})
| 45.827957
| 106
| 0.644533
| 477
| 4,262
| 5.660377
| 0.392034
| 0.032593
| 0.035185
| 0.05
| 0.298148
| 0.215556
| 0.201481
| 0.167407
| 0.167407
| 0.167407
| 0
| 0.004274
| 0.286251
| 4,262
| 92
| 107
| 46.326087
| 0.8833
| 0.358048
| 0
| 0.272727
| 0
| 0
| 0.128765
| 0.044051
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0.022727
| 0.386364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fea42b894dab2e2c30e2950d94a43b828f9c4c1
| 1,743
|
py
|
Python
|
research/controllers/cartpole_regression_sysid.py
|
MinRegret/TigerControl
|
b1ca0617cbb2198f9d5cb37f725f3d7accbab08f
|
[
"Apache-2.0"
] | 31
|
2019-11-08T06:01:54.000Z
|
2021-11-20T04:50:43.000Z
|
research/controllers/cartpole_regression_sysid.py
|
johnhallman/ctsb
|
b1ca0617cbb2198f9d5cb37f725f3d7accbab08f
|
[
"Apache-2.0"
] | 32
|
2019-06-27T15:05:04.000Z
|
2019-08-07T04:23:47.000Z
|
research/controllers/cartpole_regression_sysid.py
|
MinRegret/tigercontrol
|
b1ca0617cbb2198f9d5cb37f725f3d7accbab08f
|
[
"Apache-2.0"
] | 3
|
2020-09-30T17:06:50.000Z
|
2021-04-12T22:39:34.000Z
|
class RegressionSystemID:
def __init__(self):
self.initialized = False
def initialize(self, n, m, K=None, learning_rate=0.001):
self.initialized = True
self.n, self.m = n, m
self.T = 0
self.K = K if K != None else np.zeros((m, n))
self.lr = learning_rate
self.stash = []
self.x_history = []
self.u_history = []
# initialize matrices
self.A = np.identity(n)
self.B = np.zeros((n, m))
def get_action(self, x_t, done):
""" return action """
self.T += 1
# regular numpy
eta_t = 1 - 2*random.randint(2, size=(self.m,))
u_t = - self.K @ x_t + np.expand_dims(eta_t, axis=1)
self.x_history.append(np.squeeze(x_t, axis=1))
self.u_history.append(np.squeeze(u_t, axis=1))
if done:
if len(self.x_history) > 1:
self.stash.append((self.x_history, self.u_history))
self.x_history = []
self.u_history = []
return u_t
def system_id(self):
""" returns current estimate of hidden system dynamics """
assert self.T > 1 # need at least 2 data points
if len(self.x_history) > 1:
self.stash.append((self.x_history, self.u_history))
# transform x and u into regular numpy arrays for least squares
x_t = onp.vstack([onp.array(x[:-1]) for x, u in self.stash])
u_t = onp.vstack([onp.array(u[:-1]) for x, u in self.stash])
x_t1 = onp.vstack([onp.array(x[1:]) for x, u in self.stash])
# regression on A and B jointly
A_B = scilinalg.lstsq(np.hstack((x_t, u_t)), x_t1)[0]
A, B = np.array(A_B[:self.n]).T, np.array(A_B[self.n:]).T
return (A, B)
| 36.3125
| 71
| 0.560528
| 277
| 1,743
| 3.389892
| 0.288809
| 0.042599
| 0.089457
| 0.068158
| 0.317359
| 0.297125
| 0.246006
| 0.195953
| 0.195953
| 0.195953
| 0
| 0.017959
| 0.297189
| 1,743
| 48
| 72
| 36.3125
| 0.748571
| 0.126793
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fead45ea3ba403eb9b2c9548ef8cb56dd3f5f96
| 1,121
|
py
|
Python
|
binary_tree/vertical_order_traversal.py
|
x899/algorithms
|
38a5de72db14ef2664489da9857b598d24c4e276
|
[
"MIT"
] | 472
|
2018-05-25T06:45:44.000Z
|
2020-01-06T15:46:09.000Z
|
binary_tree/vertical_order_traversal.py
|
pratik-a/algorithms-2
|
241115c64a7518c34f672eb2b851b05f353247f1
|
[
"MIT"
] | 6
|
2020-01-25T22:22:44.000Z
|
2021-06-01T04:53:25.000Z
|
binary_tree/vertical_order_traversal.py
|
pratik-a/algorithms-2
|
241115c64a7518c34f672eb2b851b05f353247f1
|
[
"MIT"
] | 44
|
2019-03-02T07:38:38.000Z
|
2020-01-01T16:05:06.000Z
|
""" Vertical Order Traversal On Binary Tree
"""
from collections import defaultdict
class Node:
""" class representing node in binary tree """
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def preorder(root, horizontal_dist, hd_map):
if root is None:
return
hd_map[horizontal_dist].append(root.data)
preorder(root.left, horizontal_dist-1, hd_map)
preorder(root.right, horizontal_dist+1, hd_map)
def vertical_order(root):
""" vertical order traversal of binary tree """
hd_map = defaultdict(list)
horizontal_dist = 0
preorder(root, horizontal_dist, hd_map)
for key, value in hd_map.items():
print(f"{key}: {value}")
def main():
""" operational function """
root = Node(2)
root.left = Node(7)
root.left.left = Node(2)
root.left.right = Node(6)
root.left.right.left = Node(5)
root.left.right.right = Node(11)
root.right = Node(5)
root.right.right = Node(9)
root.right.right.left = Node(4)
vertical_order(root)
if __name__ == "__main__":
main()
| 21.557692
| 52
| 0.6405
| 155
| 1,121
| 4.458065
| 0.335484
| 0.050651
| 0.05644
| 0.075253
| 0.147612
| 0.089725
| 0
| 0
| 0
| 0
| 0
| 0.015152
| 0.234612
| 1,121
| 51
| 53
| 21.980392
| 0.79021
| 0.127565
| 0
| 0
| 0
| 0
| 0.023134
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.032258
| 0
| 0.225806
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5feaf4c73afb53db46a1e5e5e4652d4a41e6e80e
| 1,452
|
py
|
Python
|
data_preprocessing/load_cifar_ten.py
|
gwenniger/multi-hare
|
fb3f655cbbf4af6ccbfc77d587b8ea2924b300cd
|
[
"Apache-2.0"
] | 7
|
2019-12-04T05:58:40.000Z
|
2021-08-04T07:19:55.000Z
|
data_preprocessing/load_cifar_ten.py
|
gwenniger/multi-hare
|
fb3f655cbbf4af6ccbfc77d587b8ea2924b300cd
|
[
"Apache-2.0"
] | null | null | null |
data_preprocessing/load_cifar_ten.py
|
gwenniger/multi-hare
|
fb3f655cbbf4af6ccbfc77d587b8ea2924b300cd
|
[
"Apache-2.0"
] | 4
|
2019-12-03T23:42:00.000Z
|
2020-12-19T19:48:04.000Z
|
import torchvision.transforms as transforms
import torch
import torchvision
__author__ = "Dublin City University"
__copyright__ = "Copyright 2019, Dublin City University"
__credits__ = ["Gideon Maillette de Buy Wenniger"]
__license__ = "Dublin City University Software License (enclosed)"
def get_train_set():
transform = transforms.Compose(
[transforms.Resize((32, 32)), transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
return trainset
def get_test_set():
transform = transforms.Compose(
[transforms.Resize((32, 32)), transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
return testset
def get_train_loader(batch_size: int):
train_loader = torch.utils.data.DataLoader(get_train_set(), batch_size=batch_size,
shuffle=True, num_workers=2)
return train_loader
def get_test_loader(batch_size: int):
test_loader = torch.utils.data.DataLoader(get_test_set(), batch_size=batch_size,
shuffle=True, num_workers=2)
return test_loader
| 37.230769
| 86
| 0.637741
| 170
| 1,452
| 5.211765
| 0.311765
| 0.027088
| 0.03386
| 0.045147
| 0.575621
| 0.494357
| 0.331828
| 0.331828
| 0.331828
| 0.331828
| 0
| 0.038603
| 0.250689
| 1,452
| 38
| 87
| 38.210526
| 0.775735
| 0
| 0
| 0.344828
| 0
| 0
| 0.106061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.103448
| 0
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5feb07c430256fe7dba892fc904f0cd15150fcbf
| 1,018
|
py
|
Python
|
dprofiler/profile.py
|
disktnk/dprofiler
|
be820de440c21d3fef711db1220bfddb9bdee177
|
[
"MIT"
] | null | null | null |
dprofiler/profile.py
|
disktnk/dprofiler
|
be820de440c21d3fef711db1220bfddb9bdee177
|
[
"MIT"
] | 3
|
2018-10-16T04:03:50.000Z
|
2018-10-17T01:34:16.000Z
|
dprofiler/profile.py
|
disktnk/dprofiler
|
be820de440c21d3fef711db1220bfddb9bdee177
|
[
"MIT"
] | null | null | null |
import cProfile
import functools
import logging
import pstats
import sys
import six
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
_stream_handler = logging.StreamHandler(sys.stdout)
_stream_handler.setLevel(logging.DEBUG)
_logger.addHandler(_stream_handler)
def profile(
fn=None, sort_key='cumtime', n=20, prefix='', suffix='', logger=None):
if fn is None:
return functools.partial(
profile, sort_key=sort_key, n=n, prefix=prefix, suffix=suffix,
logger=logger)
@functools.wraps(fn)
def hook(*args, **kwargs):
out = logger
if out is None:
out = _logger
stream = six.StringIO()
cp = cProfile.Profile()
cp.enable()
stream.write(prefix)
ret = fn(*args, **kwargs)
st = pstats.Stats(cp, stream=stream)
st.strip_dirs().sort_stats(sort_key).print_stats(n)
stream.write(suffix)
out.debug(stream.getvalue())
return ret
return hook
| 23.674419
| 78
| 0.642436
| 125
| 1,018
| 5.064
| 0.384
| 0.044234
| 0.063191
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002608
| 0.246562
| 1,018
| 42
| 79
| 24.238095
| 0.822686
| 0
| 0
| 0
| 0
| 0
| 0.006876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.181818
| 0
| 0.333333
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fec5184bee3516b0d2cd771d0ad21b3d6f1997f
| 3,948
|
py
|
Python
|
server.py
|
HusseinKabbout/qwc-document-service
|
c8d856390006e0f5ecc0d28b0d53da55e5505381
|
[
"MIT"
] | null | null | null |
server.py
|
HusseinKabbout/qwc-document-service
|
c8d856390006e0f5ecc0d28b0d53da55e5505381
|
[
"MIT"
] | null | null | null |
server.py
|
HusseinKabbout/qwc-document-service
|
c8d856390006e0f5ecc0d28b0d53da55e5505381
|
[
"MIT"
] | 1
|
2020-04-24T11:36:26.000Z
|
2020-04-24T11:36:26.000Z
|
import os
import sys
from urllib.parse import urlencode
from flask import Flask, Response, abort, request, stream_with_context, jsonify
from flask_restx import Api, Resource, fields, reqparse
import requests
from qwc_services_core.api import CaseInsensitiveArgument
from qwc_services_core.app import app_nocache
from qwc_services_core.auth import auth_manager, optional_auth, get_auth_user
from qwc_services_core.tenant_handler import TenantHandler
from qwc_services_core.runtime_config import RuntimeConfig
from qwc_services_core.permissions_reader import PermissionsReader
# Flask application
app = Flask(__name__)
app_nocache(app)
api = Api(app, version='1.0', title='Document service API',
description="""API for QWC Document service.
The document service delivers reports from the Jasper reporting service.
""",
default_label='Document operations', doc='/api/')
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
# disable verbose 404 error message
app.config['ERROR_404_HELP'] = False
auth = auth_manager(app, api)
tenant_handler = TenantHandler(app.logger)
config_handler = RuntimeConfig("document", app.logger)
def get_document(tenant, template, format):
"""Return report with specified template and format.
:param str template: Template ID
:param str format: Document format
"""
config = config_handler.tenant_config(tenant)
jasper_service_url = config.get(
'jasper_service_url', 'http://localhost:8002/reports')
jasper_timeout = config.get("jasper_timeout", 60)
resources = config.resources().get('document_templates', [])
permissions_handler = PermissionsReader(tenant, app.logger)
permitted_resources = permissions_handler.resource_permissions(
'document_templates', get_auth_user()
)
if template in permitted_resources:
resource = list(filter(
lambda entry: entry.get("template") == template, resources))
if len(resource) != 1:
app.logger.info("Template '%s' not found in config", template)
abort(404)
jasper_template = resource[0]['report_filename']
# http://localhost:8002/reports/BelasteteStandorte/?format=pdf&p1=v1&..
url = "%s/%s/" % (jasper_service_url, jasper_template)
params = {"format": format}
for k, v in request.args.lists():
params[k] = v
app.logger.info("Forward request to %s?%s" %
(url, urlencode(params)))
response = requests.get(url, params=params, timeout=jasper_timeout)
r = Response(
stream_with_context(response.iter_content(chunk_size=16*1024)),
content_type=response.headers['content-type'],
status=response.status_code)
return r
else:
app.logger.info("Missing permissions for template '%s'", template)
abort(404)
# routes
@api.route('/<template>')
@api.param('template', 'The report template')
class Document(Resource):
@api.doc('document')
@optional_auth
def get(self, template):
"""Request document
Return report with specified template.
The extension is inferred from the template name, and defaults to PDF.
Query parameters are passed to the reporting engine.
"""
tenant = tenant_handler.tenant()
pos = template.rfind('.')
if pos != -1:
format = template[pos + 1:]
template = template[:pos]
else:
format = 'pdf'
return get_document(tenant, template, format)
""" readyness probe endpoint """
@app.route("/ready", methods=['GET'])
def ready():
return jsonify({"status": "OK"})
""" liveness probe endpoint """
@app.route("/healthz", methods=['GET'])
def healthz():
return jsonify({"status": "OK"})
# local webserver
if __name__ == '__main__':
print("Starting GetDocument service...")
app.run(host='localhost', port=5018, debug=True)
| 32.360656
| 79
| 0.680091
| 470
| 3,948
| 5.546809
| 0.342553
| 0.01611
| 0.034522
| 0.043728
| 0.049099
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012775
| 0.20694
| 3,948
| 121
| 80
| 32.628099
| 0.819866
| 0.113222
| 0
| 0.075949
| 0
| 0
| 0.165532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0
| 0.151899
| 0.025316
| 0.265823
| 0.012658
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5fedb1afd0a343dd3c21ad7f3aee3e96bedef35d
| 403
|
py
|
Python
|
scripts/Government Spending.py
|
MarcosDaNight/Dashboard-COVID19
|
26967d2fb7b3dd44c3c51b8ac79608d91bf26d71
|
[
"MIT"
] | null | null | null |
scripts/Government Spending.py
|
MarcosDaNight/Dashboard-COVID19
|
26967d2fb7b3dd44c3c51b8ac79608d91bf26d71
|
[
"MIT"
] | null | null | null |
scripts/Government Spending.py
|
MarcosDaNight/Dashboard-COVID19
|
26967d2fb7b3dd44c3c51b8ac79608d91bf26d71
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
# data speding from Federal Government Plan
planes = ['Transferência para saúde', 'FPE e FPM', 'Assistência Social',
'Suspensão de dívidas da União', 'Renegociação com bancos']
spend = [8, 16, 2, 12.6, 9.3]
plt.axis("equal")
plt.pie(spend, labels=planes, autopct='%1.2f%%')
plt.title("Gastos dos Planos Governamentais")
plt.show()
| 22.388889
| 73
| 0.665012
| 56
| 403
| 4.785714
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034268
| 0.203474
| 403
| 17
| 74
| 23.705882
| 0.800623
| 0.101737
| 0
| 0
| 0
| 0
| 0.431085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ff029ed47a23de0243a31bef9cc5542860e44d1
| 867
|
py
|
Python
|
ngo_point1/ngo_requirements/urls.py
|
Sanayshah2/T046_Scrapshut
|
ecb269373e9b78a738ceb99675379ca21ee313a5
|
[
"MIT"
] | null | null | null |
ngo_point1/ngo_requirements/urls.py
|
Sanayshah2/T046_Scrapshut
|
ecb269373e9b78a738ceb99675379ca21ee313a5
|
[
"MIT"
] | null | null | null |
ngo_point1/ngo_requirements/urls.py
|
Sanayshah2/T046_Scrapshut
|
ecb269373e9b78a738ceb99675379ca21ee313a5
|
[
"MIT"
] | 1
|
2021-04-23T17:06:44.000Z
|
2021-04-23T17:06:44.000Z
|
from django.contrib import admin
from django.urls import path
from . import views
from django.conf.urls import url
urlpatterns = [
path('',views.home,name='home'),
path('register/',views.register,name='register'),
path('login/',views.Login,name='login'),
path('logout/', views.logout_view, name='logout_view'),
path('add-requirement/', views.addRequirement, name='addRequirement'),
path('ngo-dashboard/', views.ngoDashboard, name='ngoDashboard'),
path('ngo-requirement-view/<int:rid>/', views.ngoRequirementView, name='ngoRequirementView'),
path('donor-requirement-view/<int:rid>/', views.donorRequirementView, name='donorRequirementView'),
path('donor-dashboard/', views.donorDashboard, name='donorDashboard'),
path('requirement-fulfillment/<int:rid>/', views.requirement_fulfillment, name='requirement_fulfillment'),
]
| 37.695652
| 110
| 0.726644
| 97
| 867
| 6.453608
| 0.309278
| 0.047923
| 0.052716
| 0.067093
| 0.083067
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103806
| 867
| 23
| 111
| 37.695652
| 0.805663
| 0
| 0
| 0
| 0
| 0
| 0.339862
| 0.139401
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ff34356a914a7907c97352a87bdb8eb2ee14f4e
| 32,810
|
py
|
Python
|
parakeet/models/wavenet.py
|
lfchener/Parakeet
|
b0ba6e7bf9b44b6309ca45927d4405bb85fcd103
|
[
"Apache-2.0"
] | 1
|
2021-02-03T12:11:21.000Z
|
2021-02-03T12:11:21.000Z
|
parakeet/models/wavenet.py
|
gzfffff/Parakeet
|
a84b6d3383b2a8a5fb45d0c233bee1ed80d0b389
|
[
"Apache-2.0"
] | null | null | null |
parakeet/models/wavenet.py
|
gzfffff/Parakeet
|
a84b6d3383b2a8a5fb45d0c233bee1ed80d0b389
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import time
from typing import Union, Sequence, List
from tqdm import trange
import numpy as np
import paddle
from paddle import nn
from paddle.nn import functional as F
import paddle.fluid.initializer as I
import paddle.fluid.layers.distributions as D
from parakeet.modules.conv import Conv1dCell
from parakeet.modules.audio import quantize, dequantize, STFT
from parakeet.utils import checkpoint, layer_tools
__all__ = ["WaveNet", "ConditionalWaveNet"]
def crop(x, audio_start, audio_length):
"""Crop the upsampled condition to match audio_length.
The upsampled condition has the same time steps as the whole audio does.
But since audios are sliced to 0.5 seconds randomly while conditions are
not, upsampled conditions should also be sliced to extactly match the time
steps of the audio slice.
Parameters
----------
x : Tensor [shape=(B, C, T)]
The upsampled condition.
audio_start : Tensor [shape=(B,), dtype:int]
The index of the starting point of the audio clips.
audio_length : int
The length of the audio clip(number of samples it contaions).
Returns
-------
Tensor [shape=(B, C, audio_length)]
Cropped condition.
"""
# crop audio
slices = [] # for each example
# paddle now supports Tensor of shape [1] in slice
# starts = audio_start.numpy()
for i in range(x.shape[0]):
start = audio_start[i]
end = start + audio_length
slice = paddle.slice(x[i], axes=[1], starts=[start], ends=[end])
slices.append(slice)
out = paddle.stack(slices)
return out
class UpsampleNet(nn.LayerList):
"""A network used to upsample mel spectrogram to match the time steps of
audio.
It consists of several layers of Conv2DTranspose. Each Conv2DTranspose
layer upsamples the time dimension by its `stride` times.
Also, each Conv2DTranspose's filter_size at frequency dimension is 3.
Parameters
----------
upscale_factors : List[int], optional
Time upsampling factors for each Conv2DTranspose Layer.
The ``UpsampleNet`` contains ``len(upscale_factor)`` Conv2DTranspose
Layers. Each upscale_factor is used as the ``stride`` for the
corresponding Conv2DTranspose. Defaults to [16, 16], this the default
upsampling factor is 256.
Notes
------
``np.prod(upscale_factors)`` should equals the ``hop_length`` of the stft
transformation used to extract spectrogram features from audio.
For example, ``16 * 16 = 256``, then the spectrogram extracted with a stft
transformation whose ``hop_length`` equals 256 is suitable.
See Also
---------
``librosa.core.stft``
"""
def __init__(self, upscale_factors=[16, 16]):
super(UpsampleNet, self).__init__()
self.upscale_factors = list(upscale_factors)
self.upscale_factor = 1
for item in upscale_factors:
self.upscale_factor *= item
for factor in self.upscale_factors:
self.append(
nn.utils.weight_norm(
nn.Conv2DTranspose(
1,
1,
kernel_size=(3, 2 * factor),
stride=(1, factor),
padding=(1, factor // 2))))
def forward(self, x):
r"""Compute the upsampled condition.
Parameters
-----------
x : Tensor [shape=(B, F, T)]
The condition (mel spectrogram here). ``F`` means the frequency
bands, which is the feature size of the input.
In the internal Conv2DTransposes, the frequency dimension
is treated as ``height`` dimension instead of ``in_channels``.
Returns:
Tensor [shape=(B, F, T \* upscale_factor)]
The upsampled condition.
"""
x = paddle.unsqueeze(x, 1)
for sublayer in self:
x = F.leaky_relu(sublayer(x), 0.4)
x = paddle.squeeze(x, 1)
return x
class ResidualBlock(nn.Layer):
"""A Residual block used in wavenet. Conv1D-gated-tanh Block.
It consists of a Conv1DCell and an Conv1D(kernel_size = 1) to integrate
information of the condition.
Notes
--------
It does not have parametric residual or skip connection.
Parameters
-----------
residual_channels : int
The feature size of the input. It is also the feature size of the
residual output and skip output.
condition_dim : int
The feature size of the condition.
filter_size : int
Kernel size of the internal convolution cells.
dilation :int
Dilation of the internal convolution cells.
"""
def __init__(self,
residual_channels: int,
condition_dim: int,
filter_size: Union[int, Sequence[int]],
dilation: int):
super(ResidualBlock, self).__init__()
dilated_channels = 2 * residual_channels
# following clarinet's implementation, we do not have parametric residual
# & skip connection.
_filter_size = filter_size[0] if isinstance(filter_size, (
list, tuple)) else filter_size
std = math.sqrt(1 / (_filter_size * residual_channels))
conv = Conv1dCell(
residual_channels,
dilated_channels,
filter_size,
dilation=dilation,
weight_attr=I.Normal(scale=std))
self.conv = nn.utils.weight_norm(conv)
std = math.sqrt(1 / condition_dim)
condition_proj = Conv1dCell(
condition_dim,
dilated_channels, (1, ),
weight_attr=I.Normal(scale=std))
self.condition_proj = nn.utils.weight_norm(condition_proj)
self.filter_size = filter_size
self.dilation = dilation
self.dilated_channels = dilated_channels
self.residual_channels = residual_channels
self.condition_dim = condition_dim
def forward(self, x, condition=None):
"""Forward pass of the ResidualBlock.
Parameters
-----------
x : Tensor [shape=(B, C, T)]
The input tensor.
condition : Tensor, optional [shape(B, C_cond, T)]
The condition.
It has been upsampled in time steps, so it has the same time steps
as the input does.(C_cond stands for the condition's channels).
Defaults to None.
Returns
-----------
residual : Tensor [shape=(B, C, T)]
The residual, which is used as the input to the next ResidualBlock.
skip_connection : Tensor [shape=(B, C, T)]
Tthe skip connection. This output is accumulated with that of
other ResidualBlocks.
"""
h = x
# dilated conv
h = self.conv(h)
# condition
if condition is not None:
h += self.condition_proj(condition)
# gated tanh
content, gate = paddle.split(h, 2, axis=1)
z = F.sigmoid(gate) * paddle.tanh(content)
# projection
residual = paddle.scale(z + x, math.sqrt(.5))
skip_connection = z
return residual, skip_connection
def start_sequence(self):
"""Prepare the ResidualBlock to generate a new sequence.
Warnings
---------
This method should be called before calling ``add_input`` multiple times.
"""
self.conv.start_sequence()
self.condition_proj.start_sequence()
def add_input(self, x, condition=None):
"""Take a step input and return a step output.
This method works similarily with ``forward`` but in a
``step-in-step-out`` fashion.
Parameters
----------
x : Tensor [shape=(B, C)]
Input for a step.
condition : Tensor, optional [shape=(B, C_cond)]
Condition for a step. Defaults to None.
Returns
----------
residual : Tensor [shape=(B, C)]
The residual for a step, which is used as the input to the next
layer of ResidualBlock.
skip_connection : Tensor [shape=(B, C)]
T he skip connection for a step. This output is accumulated with
that of other ResidualBlocks.
"""
h = x
# dilated conv
h = self.conv.add_input(h)
# condition
if condition is not None:
h += self.condition_proj.add_input(condition)
# gated tanh
content, gate = paddle.split(h, 2, axis=1)
z = F.sigmoid(gate) * paddle.tanh(content)
# projection
residual = paddle.scale(z + x, math.sqrt(0.5))
skip_connection = z
return residual, skip_connection
class ResidualNet(nn.LayerList):
"""The residual network in wavenet.
It consists of ``n_stack`` stacks, each of which consists of ``n_loop``
ResidualBlocks.
Parameters
----------
n_stack : int
Number of stacks in the ``ResidualNet``.
n_loop : int
Number of ResidualBlocks in a stack.
residual_channels : int
Input feature size of each ``ResidualBlock``'s input.
condition_dim : int
Feature size of the condition.
filter_size : int
Kernel size of the internal ``Conv1dCell`` of each ``ResidualBlock``.
"""
def __init__(self,
n_stack: int,
n_loop: int,
residual_channels: int,
condition_dim: int,
filter_size: int):
super(ResidualNet, self).__init__()
# double the dilation at each layer in a stack
dilations = [2**i for i in range(n_loop)] * n_stack
self.context_size = 1 + sum(dilations)
for dilation in dilations:
self.append(
ResidualBlock(residual_channels, condition_dim, filter_size,
dilation))
def forward(self, x, condition=None):
"""Forward pass of ``ResidualNet``.
Parameters
----------
x : Tensor [shape=(B, C, T)]
The input.
condition : Tensor, optional [shape=(B, C_cond, T)]
The condition, it has been upsampled in time steps, so it has the
same time steps as the input does. Defaults to None.
Returns
--------
Tensor [shape=(B, C, T)]
The output.
"""
for i, func in enumerate(self):
x, skip = func(x, condition)
if i == 0:
skip_connections = skip
else:
skip_connections = paddle.scale(skip_connections + skip,
math.sqrt(0.5))
return skip_connections
def start_sequence(self):
"""Prepare the ResidualNet to generate a new sequence. This method
should be called before starting calling ``add_input`` multiple times.
"""
for block in self:
block.start_sequence()
def add_input(self, x, condition=None):
"""Take a step input and return a step output.
This method works similarily with ``forward`` but in a
``step-in-step-out`` fashion.
Parameters
----------
x : Tensor [shape=(B, C)]
Input for a step.
condition : Tensor, optional [shape=(B, C_cond)]
Condition for a step. Defaults to None.
Returns
----------
Tensor [shape=(B, C)]
The skip connection for a step. This output is accumulated with
that of other ResidualBlocks.
"""
for i, func in enumerate(self):
x, skip = func.add_input(x, condition)
if i == 0:
skip_connections = skip
else:
skip_connections = paddle.scale(skip_connections + skip,
math.sqrt(0.5))
return skip_connections
class WaveNet(nn.Layer):
"""Wavenet that transform upsampled mel spectrogram into waveform.
Parameters
-----------
n_stack : int
``n_stack`` for the internal ``ResidualNet``.
n_loop : int
``n_loop`` for the internal ``ResidualNet``.
residual_channels : int
Feature size of the input.
output_dim : int
Feature size of the input.
condition_dim : int
Feature size of the condition (mel spectrogram bands).
filter_size : int
Kernel size of the internal ``ResidualNet``.
loss_type : str, optional ["mog" or "softmax"]
The output type and loss type of the model, by default "mog".
If "softmax", the model input is first quantized audio and the model
outputs a discret categorical distribution.
If "mog", the model input is audio in floating point format, and the
model outputs parameters for a mixture of gaussian distributions.
Namely, the weight, mean and log scale of each gaussian distribution.
Thus, the ``output_size`` should be a multiple of 3.
log_scale_min : float, optional
Minimum value of the log scale of gaussian distributions, by default
-9.0.
This is only used for computing loss when ``loss_type`` is "mog", If
the predicted log scale is less than -9.0, it is clipped at -9.0.
"""
def __init__(self, n_stack, n_loop, residual_channels, output_dim,
condition_dim, filter_size, loss_type, log_scale_min):
super(WaveNet, self).__init__()
if loss_type not in ["softmax", "mog"]:
raise ValueError("loss_type {} is not supported".format(loss_type))
if loss_type == "softmax":
self.embed = nn.Embedding(output_dim, residual_channels)
else:
if (output_dim % 3 != 0):
raise ValueError(
"with Mixture of Gaussians(mog) output, the output dim must be divisible by 3, but get {}".
format(output_dim))
self.embed = nn.utils.weight_norm(
nn.Linear(1, residual_channels), dim=1)
self.resnet = ResidualNet(n_stack, n_loop, residual_channels,
condition_dim, filter_size)
self.context_size = self.resnet.context_size
skip_channels = residual_channels # assume the same channel
self.proj1 = nn.utils.weight_norm(
nn.Linear(skip_channels, skip_channels), dim=1)
self.proj2 = nn.utils.weight_norm(
nn.Linear(skip_channels, skip_channels), dim=1)
# if loss_type is softmax, output_dim is n_vocab of waveform magnitude.
# if loss_type is mog, output_dim is 3 * gaussian, (weight, mean and stddev)
self.proj3 = nn.utils.weight_norm(
nn.Linear(skip_channels, output_dim), dim=1)
self.loss_type = loss_type
self.output_dim = output_dim
self.input_dim = 1
self.skip_channels = skip_channels
self.log_scale_min = log_scale_min
def forward(self, x, condition=None):
"""Forward pass of ``WaveNet``.
Parameters
-----------
x : Tensor [shape=(B, T)]
The input waveform.
condition : Tensor, optional [shape=(B, C_cond, T)]
the upsampled condition. Defaults to None.
Returns
-------
Tensor: [shape=(B, T, C_output)]
The parameters of the output distributions.
"""
# Causal Conv
if self.loss_type == "softmax":
x = paddle.clip(x, min=-1., max=0.99999)
x = quantize(x, self.output_dim)
x = self.embed(x) # (B, T, C)
else:
x = paddle.unsqueeze(x, -1) # (B, T, 1)
x = self.embed(x) # (B, T, C)
x = paddle.transpose(x, perm=[0, 2, 1]) # (B, C, T)
# Residual & Skip-conenection & linears
z = self.resnet(x, condition)
z = paddle.transpose(z, [0, 2, 1])
z = F.relu(self.proj2(F.relu(self.proj1(z))))
y = self.proj3(z)
return y
def start_sequence(self):
"""Prepare the WaveNet to generate a new sequence. This method should
be called before starting calling ``add_input`` multiple times.
"""
self.resnet.start_sequence()
def add_input(self, x, condition=None):
"""Compute the output distribution (represented by its parameters) for
a step. It works similarily with the ``forward`` method but in a
``step-in-step-out`` fashion.
Parameters
-----------
x : Tensor [shape=(B,)]
A step of the input waveform.
condition : Tensor, optional [shape=(B, C_cond)]
A step of the upsampled condition. Defaults to None.
Returns
--------
Tensor: [shape=(B, C_output)]
A step of the parameters of the output distributions.
"""
# Causal Conv
if self.loss_type == "softmax":
x = paddle.clip(x, min=-1., max=0.99999)
x = quantize(x, self.output_dim)
x = self.embed(x) # (B, C)
else:
x = paddle.unsqueeze(x, -1) # (B, 1)
x = self.embed(x) # (B, C)
# Residual & Skip-conenection & linears
z = self.resnet.add_input(x, condition)
z = F.relu(self.proj2(F.relu(self.proj1(z)))) # (B, C)
# Output
y = self.proj3(z)
return y
def compute_softmax_loss(self, y, t):
"""Compute the loss when output distributions are categorial
distributions.
Parameters
----------
y : Tensor [shape=(B, T, C_output)]
The logits of the output distributions.
t : Tensor [shape=(B, T)]
The target audio. The audio is first quantized then used as the
target.
Notes
-------
Output distributions whose input contains padding is neglected in
loss computation. So the first ``context_size`` steps does not
contribute to the loss.
Returns
--------
Tensor: [shape=(1,)]
The loss.
"""
# context size is not taken into account
y = y[:, self.context_size:, :]
t = t[:, self.context_size:]
t = paddle.clip(t, min=-1.0, max=0.99999)
quantized = quantize(t, n_bands=self.output_dim)
label = paddle.unsqueeze(quantized, -1)
loss = F.softmax_with_cross_entropy(y, label)
reduced_loss = paddle.mean(loss)
return reduced_loss
def sample_from_softmax(self, y):
"""Sample from the output distribution when the output distributions
are categorical distriobutions.
Parameters
----------
y : Tensor [shape=(B, T, C_output)]
The logits of the output distributions.
Returns
--------
Tensor [shape=(B, T)]
Waveform sampled from the output distribution.
"""
# dequantize
batch_size, time_steps, output_dim, = y.shape
y = paddle.reshape(y, (batch_size * time_steps, output_dim))
prob = F.softmax(y)
quantized = paddle.fluid.layers.sampling_id(prob)
samples = dequantize(quantized, n_bands=self.output_dim)
samples = paddle.reshape(samples, (batch_size, -1))
return samples
def compute_mog_loss(self, y, t):
"""Compute the loss where output distributions is a mixture of
Gaussians distributions.
Parameters
-----------
y : Tensor [shape=(B, T, C_output)]
The parameterd of the output distribution. It is the concatenation
of 3 parts, the logits of every distribution, the mean of each
distribution and the log standard deviation of each distribution.
Each part's shape is (B, T, n_mixture), where ``n_mixture`` means
the number of Gaussians in the mixture.
t : Tensor [shape=(B, T)]
The target audio.
Notes
-------
Output distributions whose input contains padding is neglected in
loss computation. So the first ``context_size`` steps does not
contribute to the loss.
Returns
--------
Tensor: [shape=(1,)]
The loss.
"""
n_mixture = self.output_dim // 3
# context size is not taken in to account
y = y[:, self.context_size:, :]
t = t[:, self.context_size:]
w, mu, log_std = paddle.split(y, 3, axis=2)
# 100.0 is just a large float
log_std = paddle.clip(log_std, min=self.log_scale_min, max=100.)
inv_std = paddle.exp(-log_std)
p_mixture = F.softmax(w, -1)
t = paddle.unsqueeze(t, -1)
if n_mixture > 1:
# t = F.expand_as(t, log_std)
t = paddle.expand(t, [-1, -1, n_mixture])
x_std = inv_std * (t - mu)
exponent = paddle.exp(-0.5 * x_std * x_std)
pdf_x = 1.0 / math.sqrt(2.0 * math.pi) * inv_std * exponent
pdf_x = p_mixture * pdf_x
# pdf_x: [bs, len]
pdf_x = paddle.sum(pdf_x, -1)
per_sample_loss = -paddle.log(pdf_x + 1e-9)
loss = paddle.mean(per_sample_loss)
return loss
def sample_from_mog(self, y):
"""Sample from the output distribution when the output distribution
is a mixture of Gaussian distributions.
Parameters
------------
y : Tensor [shape=(B, T, C_output)]
The parameterd of the output distribution. It is the concatenation
of 3 parts, the logits of every distribution, the mean of each
distribution and the log standard deviation of each distribution.
Each part's shape is (B, T, n_mixture), where ``n_mixture`` means
the number of Gaussians in the mixture.
Returns
--------
Tensor: [shape=(B, T)]
Waveform sampled from the output distribution.
"""
batch_size, time_steps, output_dim = y.shape
n_mixture = output_dim // 3
w, mu, log_std = paddle.split(y, 3, -1)
reshaped_w = paddle.reshape(w, (batch_size * time_steps, n_mixture))
prob_ids = paddle.fluid.layers.sampling_id(F.softmax(reshaped_w))
prob_ids = paddle.reshape(prob_ids, (batch_size, time_steps))
prob_ids = prob_ids.numpy()
# do it
index = np.array([[[b, t, prob_ids[b, t]] for t in range(time_steps)]
for b in range(batch_size)]).astype("int32")
index_var = paddle.to_tensor(index)
mu_ = paddle.gather_nd(mu, index_var)
log_std_ = paddle.gather_nd(log_std, index_var)
dist = D.Normal(mu_, paddle.exp(log_std_))
samples = dist.sample(shape=[])
samples = paddle.clip(samples, min=-1., max=1.)
return samples
def sample(self, y):
"""Sample from the output distribution.
Parameters
----------
y : Tensor [shape=(B, T, C_output)]
The parameterd of the output distribution.
Returns
--------
Tensor [shape=(B, T)]
Waveform sampled from the output distribution.
"""
if self.loss_type == "softmax":
return self.sample_from_softmax(y)
else:
return self.sample_from_mog(y)
def loss(self, y, t):
"""Compute the loss given the output distribution and the target.
Parameters
----------
y : Tensor [shape=(B, T, C_output)]
The parameters of the output distribution.
t : Tensor [shape=(B, T)]
The target audio.
Returns
---------
Tensor: [shape=(1,)]
The loss.
"""
if self.loss_type == "softmax":
return self.compute_softmax_loss(y, t)
else:
return self.compute_mog_loss(y, t)
class ConditionalWaveNet(nn.Layer):
r"""Conditional Wavenet. An implementation of
`WaveNet: A Generative Model for Raw Audio <http://arxiv.org/abs/1609.03499>`_.
It contains an UpsampleNet as the encoder and a WaveNet as the decoder.
It is an autoregressive model that generate raw audio.
Parameters
----------
upsample_factors : List[int]
The upsampling factors of the UpsampleNet.
n_stack : int
Number of convolution stacks in the WaveNet.
n_loop : int
Number of convolution layers in a convolution stack.
Convolution layers in a stack have exponentially growing dilations,
from 1 to .. math:: `k^{n_{loop} - 1}`, where k is the kernel size.
residual_channels : int
Feature size of each ResidualBlocks.
output_dim : int
Feature size of the output. See ``loss_type`` for details.
n_mels : int
The number of bands of mel spectrogram.
filter_size : int, optional
Convolution kernel size of each ResidualBlock, by default 2.
loss_type : str, optional ["mog" or "softmax"]
The output type and loss type of the model, by default "mog".
If "softmax", the model input should be quantized audio and the model
outputs a discret distribution.
If "mog", the model input is audio in floating point format, and the
model outputs parameters for a mixture of gaussian distributions.
Namely, the weight, mean and logscale of each gaussian distribution.
Thus, the ``output_size`` should be a multiple of 3.
log_scale_min : float, optional
Minimum value of the log scale of gaussian distributions, by default
-9.0.
This is only used for computing loss when ``loss_type`` is "mog", If
the predicted log scale is less than -9.0, it is clipped at -9.0.
"""
def __init__(self,
upsample_factors: List[int],
n_stack: int,
n_loop: int,
residual_channels: int,
output_dim: int,
n_mels: int,
filter_size: int=2,
loss_type: str="mog",
log_scale_min: float=-9.0):
super(ConditionalWaveNet, self).__init__()
self.encoder = UpsampleNet(upsample_factors)
self.decoder = WaveNet(
n_stack=n_stack,
n_loop=n_loop,
residual_channels=residual_channels,
output_dim=output_dim,
condition_dim=n_mels,
filter_size=filter_size,
loss_type=loss_type,
log_scale_min=log_scale_min)
def forward(self, audio, mel, audio_start):
"""Compute the output distribution given the mel spectrogram and the input(for teacher force training).
Parameters
-----------
audio : Tensor [shape=(B, T_audio)]
ground truth waveform, used for teacher force training.
mel : Tensor [shape(B, F, T_mel)]
Mel spectrogram. Note that it is the spectrogram for the whole
utterance.
audio_start : Tensor [shape=(B,), dtype: int]
Audio slices' start positions for each utterance.
Returns
----------
Tensor [shape(B, T_audio - 1, C_output)]
Parameters for the output distribution, where ``C_output`` is the
``output_dim`` of the decoder.)
"""
audio_length = audio.shape[1] # audio clip's length
condition = self.encoder(mel)
condition_slice = crop(condition, audio_start, audio_length)
# shifting 1 step
audio = audio[:, :-1]
condition_slice = condition_slice[:, :, 1:]
y = self.decoder(audio, condition_slice)
return y
def loss(self, y, t):
"""Compute loss with respect to the output distribution and the target
audio.
Parameters
-----------
y : Tensor [shape=(B, T - 1, C_output)]
Parameters of the output distribution.
t : Tensor [shape(B, T)]
target waveform.
Returns
--------
Tensor: [shape=(1,)]
the loss.
"""
t = t[:, 1:]
loss = self.decoder.loss(y, t)
return loss
def sample(self, y):
"""Sample from the output distribution.
Parameters
-----------
y : Tensor [shape=(B, T, C_output)]
Parameters of the output distribution.
Returns
--------
Tensor [shape=(B, T)]
Sampled waveform from the output distribution.
"""
samples = self.decoder.sample(y)
return samples
@paddle.no_grad()
def infer(self, mel):
r"""Synthesize waveform from mel spectrogram.
Parameters
-----------
mel : Tensor [shape=(B, F, T)]
The ondition (mel spectrogram here).
Returns
-----------
Tensor [shape=(B, T \* upsacle_factor)]
Synthesized waveform.
``upscale_factor`` is the ``upscale_factor`` of the encoder
``UpsampleNet``.
"""
condition = self.encoder(mel)
batch_size, _, time_steps = condition.shape
samples = []
self.decoder.start_sequence()
x_t = paddle.zeros((batch_size, ), dtype=mel.dtype)
for i in trange(time_steps):
c_t = condition[:, :, i] # (B, C)
y_t = self.decoder.add_input(x_t, c_t) #(B, C)
y_t = paddle.unsqueeze(y_t, 1)
x_t = self.sample(y_t) # (B, 1)
x_t = paddle.squeeze(x_t, 1) #(B,)
samples.append(x_t)
samples = paddle.stack(samples, -1)
return samples
@paddle.no_grad()
def predict(self, mel):
r"""Synthesize audio from mel spectrogram.
The output and input are numpy arrays without batch.
Parameters
----------
mel : np.ndarray [shape=(C, T)]
Mel spectrogram of an utterance.
Returns
-------
Tensor : np.ndarray [shape=(C, T \* upsample_factor)]
The synthesized waveform of an utterance.
"""
mel = paddle.to_tensor(mel)
mel = paddle.unsqueeze(mel, 0)
audio = self.infer(mel)
audio = audio[0].numpy()
return audio
@classmethod
def from_pretrained(cls, config, checkpoint_path):
"""Build a ConditionalWaveNet model from a pretrained model.
Parameters
----------
config: yacs.config.CfgNode
model configs
checkpoint_path: Path or str
the path of pretrained model checkpoint, without extension name
Returns
-------
ConditionalWaveNet
The model built from pretrained result.
"""
model = cls(upsample_factors=config.model.upsample_factors,
n_stack=config.model.n_stack,
n_loop=config.model.n_loop,
residual_channels=config.model.residual_channels,
output_dim=config.model.output_dim,
n_mels=config.data.n_mels,
filter_size=config.model.filter_size,
loss_type=config.model.loss_type,
log_scale_min=config.model.log_scale_min)
layer_tools.summary(model)
checkpoint.load_parameters(model, checkpoint_path=checkpoint_path)
return model
| 33.548057
| 111
| 0.569857
| 3,966
| 32,810
| 4.595058
| 0.122794
| 0.015474
| 0.026997
| 0.01498
| 0.462961
| 0.426306
| 0.386633
| 0.358483
| 0.320292
| 0.283637
| 0
| 0.00983
| 0.333404
| 32,810
| 977
| 112
| 33.582395
| 0.823419
| 0.477141
| 0
| 0.299094
| 0
| 0.003021
| 0.013955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084592
| false
| 0
| 0.039275
| 0
| 0.205438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ff67231767a550306fa1eed298e972631db8363
| 1,407
|
py
|
Python
|
06/TemplateMaching.py
|
mastnk/GCIP
|
1d1ec23388629f145a021efd69797aa18b3891e3
|
[
"MIT"
] | null | null | null |
06/TemplateMaching.py
|
mastnk/GCIP
|
1d1ec23388629f145a021efd69797aa18b3891e3
|
[
"MIT"
] | null | null | null |
06/TemplateMaching.py
|
mastnk/GCIP
|
1d1ec23388629f145a021efd69797aa18b3891e3
|
[
"MIT"
] | null | null | null |
import sys
import cv2 # it is necessary to use cv2 library
import numpy as np
# https://docs.opencv.org/4.5.2/d4/dc6/tutorial_py_template_matching.html
def main( input_filename, template_filename ):
img = cv2.imread(input_filename,0)
template = cv2.imread(template_filename,0)
w, h = template.shape[::-1]
# chose a method
# https://docs.opencv.org/4.5.2/df/dfb/group__imgproc__object.html#ga3a7850640f1fe1f58fe91a2d7583695d
method = 'TM_SQDIFF' #SSD
# method = 'TM_SQDIFF_NORMED'#Normalized SSD
# method = 'TM_CCORR_NORMED' #NCC
# method = 'TM_CCOEFF_NORMED'#ZNCC
# Apply template Matching
res = cv2.matchTemplate(img,template,eval('cv2.'+method))
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in ['TM_SQDIFF', 'TM_SQDIFF_NORMED']:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
cv2.imwrite( method+'_img.png', img )
res = res / np.max(res) * 255
res = res.clip(0,255).astype('uint8')
cv2.imwrite( method+'_res.png', res )
if( __name__ == '__main__' ):
if( len(sys.argv) >= 3 ):
main( sys.argv[1], sys.argv[2] )
else:
print( 'usage: python '+sys.argv[0]+' input_filenname template_filename' )
| 31.977273
| 105
| 0.668088
| 210
| 1,407
| 4.242857
| 0.428571
| 0.053872
| 0.047138
| 0.040404
| 0.047138
| 0.047138
| 0.047138
| 0
| 0
| 0
| 0
| 0.05403
| 0.197584
| 1,407
| 43
| 106
| 32.72093
| 0.735164
| 0.297797
| 0
| 0.08
| 0
| 0
| 0.118191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.16
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ff692081eaaca772a3292cf14f5309c58c43958
| 516
|
py
|
Python
|
contest/tenka1-pbc2019/C_Stones.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/tenka1-pbc2019/C_Stones.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/tenka1-pbc2019/C_Stones.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
n = int(input())
s = input()
bcnt = [0]*n
wcnt = [0]*n
result = 2 * 10 ** 5
# 累積和
if s[0] == "#":
bcnt[0] = 1
if s[n-1] == ".":
wcnt[n-1] = 1
for i in range(1,n):
bcnt[i] = bcnt[i-1]
if s[i] == "#":
bcnt[i] += 1
for i in range(n-2,-1,-1):
wcnt[i] = wcnt[i+1]
if s[i] == ".":
wcnt[i] += 1
# 最小コストを求める
for i in range(0,n-1):
result = min(bcnt[i] + wcnt[i+1],result)
result = min(result,bcnt[n-1])
result = min(result,wcnt[0])
print(result)
| 17.2
| 45
| 0.45155
| 95
| 516
| 2.452632
| 0.221053
| 0.042918
| 0.051502
| 0.141631
| 0.154506
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070028
| 0.30814
| 516
| 29
| 46
| 17.793103
| 0.582633
| 0.025194
| 0
| 0
| 0
| 0
| 0.008493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ff9658511513524516ba459b859ca485a53d2a3
| 9,413
|
py
|
Python
|
scripts/myclient.py
|
Anant16/Networks_Project
|
a0f15bde4b2625a5ffe382da958f04d4538d23d2
|
[
"MIT"
] | null | null | null |
scripts/myclient.py
|
Anant16/Networks_Project
|
a0f15bde4b2625a5ffe382da958f04d4538d23d2
|
[
"MIT"
] | null | null | null |
scripts/myclient.py
|
Anant16/Networks_Project
|
a0f15bde4b2625a5ffe382da958f04d4538d23d2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Script for Tkinter GUI chat client."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
from tkinter import filedialog, Tk
import os
import time
def recv_file():
print("file request from ")
fname = client_socket.recv(BUFSIZ).decode("utf8")
print ("recieving file " + fname )
fsize = client_socket.recv(BUFSIZ)
fsize = int(fsize)
data_len = 0
print("fsize: {}".format(fsize))
local_file = "../received_files/" + fname
with open(local_file, 'wb') as f:
print ('opened file')
while data_len<fsize:
data = client_socket.recv(BUFSIZ)
if not data:
break
data_len += len(data)
f.write(data)
print("Done writing file at client")
return fname, fsize
def private_recv_file(pclient_socket):
print("file request from ")
fname = pclient_socket.recv(BUFSIZ).decode("utf8")
print ("recieving file " + fname )
fsize = pclient_socket.recv(BUFSIZ)
fsize = int(fsize)
data_len = 0
print("fsize: {}".format(fsize))
local_file = "../received_files/" + fname
with open(local_file, 'wb') as f:
print ('opened file')
while data_len<fsize:
data = pclient_socket.recv(BUFSIZ)
if not data:
break
data_len += len(data)
f.write(data)
print("Done writing file at client")
return fname, fsize
def send_file():
fpath = filedialog.askopenfilename(initialdir = "/",title = "Select file")
fname = fpath.split('/')[-1]
fsize = os.path.getsize(fpath)
client_socket.send(bytes('{file}', "utf8"))
time.sleep(0.5)
client_socket.send(bytes(fname, "utf8"))
time.sleep(0.5)
client_socket.send(bytes(str(fsize), "utf8"))
time.sleep(0.5)
with open(fpath, 'rb') as f:
while True:
data = f.read(BUFSIZ)
if not data:
break
client_socket.sendall(data)
print("File sent to server")
time.sleep(0.5)
def private_send_file(pclient_socket):
fpath = filedialog.askopenfilename(initialdir = "/",title = "Select file")
fname = fpath.split('/')[-1]
fsize = os.path.getsize(fpath)
pclient_socket.send(bytes('{file}', "utf8"))
time.sleep(0.5)
pclient_socket.send(bytes(fname, "utf8"))
time.sleep(0.5)
pclient_socket.send(bytes(str(fsize), "utf8"))
time.sleep(0.5)
with open(fpath, 'rb') as f:
while True:
data = f.read(BUFSIZ)
if not data:
break
pclient_socket.sendall(data)
print("File sent to server")
time.sleep(0.5)
def private_receive(pmsg_list, pclient_socket):
"""Handles receiving of messages."""
# pmsg_list = ptop.messages_frame.msg_list
while True:
try:
msg = pclient_socket.recv(BUFSIZ)
if msg == bytes("{file}", "utf8"):
pmsg_list.insert(tkinter.END, "Receiving File")
fname, fsize = private_recv_file(pclient_socket)
pmsg_list.insert(tkinter.END, "File Recieved")
elif msg == bytes("{quit}", "utf8"):
break
else:
msg = msg.decode('utf8')
pmsg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def receive():
"""Handles receiving of messages."""
buttons_frame = tkinter.Frame(top)
while True:
try:
msg = client_socket.recv(BUFSIZ).decode("utf8")
# print(msg)
if msg == '{quit}':
break
elif '{prequest}' in msg[0:12]:
name = msg[11:]
handle_connection_request(name)
elif '{name}' in msg[0:6]:
print(msg)
uname.insert(tkinter.END, msg[7:])
elif '{namelist}' in msg[0:12]:
nlist = msg.split('_')[1]
name_list = nlist.split(',')[1:]
print(name_list)
buttons_frame.destroy()
buttons_frame = tkinter.Frame(top)
for name in name_list:
private_button = tkinter.Button(buttons_frame, text=name, command=lambda user=name: create_private(user))
private_button.pack(side=tkinter.LEFT)
buttons_frame.pack(side=tkinter.LEFT)
else:
msg_list.insert(tkinter.END, msg)
except OSError: # Possibly client has left the chat.
break
def private_send(client_socket_no, pmy_msg, pmsg_list, event=None): # event is passed by binders.
"""Handles sending of messages."""
print("socket")
print(client_socket_no)
print(pmy_msg)
print(pmsg_list)
msg = pmy_msg.get()
pmy_msg.delete(0, 100) # Clears input field.
print("message sent is: " + msg)
try:
client_socket_no.send(bytes(msg, "utf8"))
except BrokenPipeError:
error_msg = "Unable to send"
pmsg_list.insert(tkinter.END, error_msg)
if msg == "{quit}":
client_socket_no.close()
top.quit()
def send(event=None): # event is passed by binders.
"""Handles sending of messages."""
print("socket")
print(client_socket)
msg = my_msg.get()
my_msg.set("") # Clears input field.
try:
client_socket.send(bytes(msg, "utf8"))
except BrokenPipeError:
error_msg = "Unable to send"
msg_list.insert(tkinter.END, error_msg)
if msg == "{quit}":
client_socket.close()
top.quit()
def create_private(name):
print("create_private")
print(name)
new_name = uname.get('1.0', tkinter.END) + '_' + name
new_name = new_name.replace('\n', '')
print(new_name)
Thread(target=private_client, args=(new_name,)).start()
def private_client(name):
pclient_socket = socket(AF_INET, SOCK_STREAM)
pclient_socket.connect(ADDR)
pclient_socket.send(bytes(name, "utf8"))
ptop = tkinter.Tk()
ptop.title("Private Chat - " + uname.get('1.0', tkinter.END))
messages_frame = tkinter.Frame(ptop)
my_msg = tkinter.StringVar() # For the messages to be sent.
# my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
print(my_msg)
entry_field = tkinter.Entry(ptop, textvariable=my_msg)
entry_field.bind("<Return>", lambda event, temp = pclient_socket: private_send(temp, entry_field, msg_list))
entry_field.pack()
send_button = tkinter.Button(ptop, text="Send", command=lambda: private_send(pclient_socket, entry_field, msg_list))
send_button.pack()
send_file_button = tkinter.Button(ptop, text="Send File", command= lambda: private_send_file(pclient_socket))
send_file_button.pack()
receive_thread = Thread(target=private_receive, args=(msg_list, pclient_socket,))
receive_thread.start()
ptop.mainloop() # Starts GUI execution.
def handle_connection_request(name):
new_name = uname.get('1.0', tkinter.END) + '_' + name + '_'
new_name = new_name.replace('\n', '')
Thread(target=private_client, args=(new_name,)).start()
# def on_closing(event=None):
# """This function is to be called when the window is closed."""
# my_msg.set("{quit}")
# try:
# send()
# except BrokenPipeError:
# print("BrokenPipeError")
# top.quit()
#----Now comes the sockets part----
HOST = input('Enter host: ')
PORT = input('Enter port: ')
if not PORT:
PORT = 35000
else:
PORT = int(PORT)
if not HOST:
HOST = '127.0.0.1'
BUFSIZ = 1024
ADDR = (HOST, PORT)
client_socket = socket(AF_INET, SOCK_STREAM)
client_socket.connect(ADDR)
top = tkinter.Tk()
top.title("Group Chat")
uname = tkinter.Text(top)
# uname.pack()
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar() # For the messages to be sent.
# my_msg.set("Type your messages here.")
scrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.
# Following will contain the messages.
msg_list = tkinter.Listbox(messages_frame, height=15, width=50, yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg)
entry_field.bind("<Return>", send)
entry_field.pack()
send_button = tkinter.Button(top, text="Send", command=send)
send_button.pack()
send_file_button = tkinter.Button(top, text="Send File", command=send_file)
send_file_button.pack()
# top.protocol("WM_DELETE_WINDOW", on_closing)
# #----Now comes the sockets part----
# HOST = input('Enter host: ')
# PORT = input('Enter port: ')
# if not PORT:
# PORT = 33000
# else:
# PORT = int(PORT)
# BUFSIZ = 1024
# ADDR = (HOST, PORT)
# client_socket = socket(AF_INET, SOCK_STREAM)
# client_socket.connect(ADDR)
receive_thread = Thread(target=receive)
receive_thread.start()
top.mainloop() # Starts GUI execution.
| 31.272425
| 125
| 0.627961
| 1,216
| 9,413
| 4.708059
| 0.17023
| 0.039825
| 0.022358
| 0.015371
| 0.64
| 0.591965
| 0.569258
| 0.535895
| 0.521572
| 0.477555
| 0
| 0.012742
| 0.241262
| 9,413
| 301
| 126
| 31.272425
| 0.788855
| 0.137257
| 0
| 0.495455
| 0
| 0
| 0.075978
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.027273
| 0
| 0.086364
| 0.113636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ffb47974af4fe5ac023573e2b360b56ebf1ffbc
| 1,689
|
py
|
Python
|
issues/i10_parent.py
|
Raph-xyz/wexpect
|
c1e81b3a69d4d3821cb01b61bd5297b51a24539f
|
[
"MIT"
] | 52
|
2019-04-24T14:38:43.000Z
|
2022-03-08T22:03:11.000Z
|
issues/i10_parent.py
|
Raph-xyz/wexpect
|
c1e81b3a69d4d3821cb01b61bd5297b51a24539f
|
[
"MIT"
] | 51
|
2019-05-13T12:15:09.000Z
|
2021-12-15T14:00:15.000Z
|
issues/i10_parent.py
|
Raph-xyz/wexpect
|
c1e81b3a69d4d3821cb01b61bd5297b51a24539f
|
[
"MIT"
] | 20
|
2019-07-15T15:48:31.000Z
|
2022-03-27T08:55:17.000Z
|
import wexpect
import time
import sys
import os
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, here)
from long_printer import puskas_wiki
print(wexpect.__version__)
# With quotes (C:\Program Files\Python37\python.exe needs quotes)
python_executable = '"' + sys.executable + '" '
child_script = here + '\\long_printer.py'
def main():
longPrinter = python_executable + ' ' + child_script
prompt = 'puskas> '
# Start the child process
p = wexpect.spawn(longPrinter)
# Wait for prompt
p.expect(prompt)
try:
for i in range(10):
print('.', end='')
p.sendline('0')
p.expect(prompt)
if p.before.splitlines()[1] != puskas_wiki[0]:
print(p.before.splitlines()[1])
raise Exception()
p.sendline('all')
p.expect(prompt)
for a,b in zip(p.before.splitlines()[1:], puskas_wiki):
if a!=b:
print(a)
print(b)
raise Exception()
for j, paragraph in enumerate(puskas_wiki):
p.sendline(str(j))
p.expect(prompt)
if p.before.splitlines()[1] != paragraph:
print(p.before.splitlines()[1])
print(i)
print(j)
print(paragraph)
raise Exception()
except:
p.interact()
time.sleep(5)
else:
print('')
print('[PASS]')
main()
| 26.390625
| 68
| 0.474837
| 172
| 1,689
| 4.55814
| 0.412791
| 0.044643
| 0.108418
| 0.114796
| 0.191327
| 0.132653
| 0.084184
| 0.084184
| 0
| 0
| 0
| 0.013092
| 0.412078
| 1,689
| 63
| 69
| 26.809524
| 0.776435
| 0.060983
| 0
| 0.195652
| 0
| 0
| 0.026333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0.021739
| 0.108696
| 0
| 0.130435
| 0.282609
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5ffcd7c87c77f89439cef7d88357fcf225e1b8ea
| 3,694
|
py
|
Python
|
sir_sampler.py
|
bveitch/EpidPy
|
455cd67afa2efbb774300115abb5fc7d4600b37d
|
[
"BSD-3-Clause"
] | null | null | null |
sir_sampler.py
|
bveitch/EpidPy
|
455cd67afa2efbb774300115abb5fc7d4600b37d
|
[
"BSD-3-Clause"
] | null | null | null |
sir_sampler.py
|
bveitch/EpidPy
|
455cd67afa2efbb774300115abb5fc7d4600b37d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 10:17:51 2020
description : Sampling methods from sir space to data space
author : bveitch
version : 1.0
project : EpidPy (epidemic modelling in python)
Usage:
Data fitting in least squares fit
Also acts on labels for data description and fitting
randSampler: adds random noise/sampling bias to synthetics
"""
import numpy as np;
def labels_from_type(sir_type,nstage=0):
if(sir_type == 'sir'):
return list(sir_type)
elif(sir_type == 'seir'):
return list(sir_type)
elif(sir_type == 'sirs'):
return list(sir_type)
elif(sir_type == 'si2r'):
return ['s','i1','i2','r']
elif(sir_type == 'sir_nstage'):
assert(nstage>0),'nstage must be >0'
labels=['s']
[labels.append('i'+str(istage)) for istage in range(nstage)]
labels.append('r')
return labels
else:
raise ValueError("Invalid modelling type")
class Sampler:
def __init__(self, nsize,intervals):
for ival in intervals:
assert(ival[1] > ival[0]),'interval must be increasing'
self.insize = nsize
self.intervals = intervals
self.outsize = len(intervals)
def F(self,data):
[nt,nsize]=data.shape
assert(nsize == self.insize),"data size doesnt match insize"
data_samp=np.zeros((nt,self.outsize))
icount=0
for ival in self.intervals:
isum = np.zeros(nt)
for i in range(ival[0],ival[1]):
isum += data[:,i]
# isum=np.sum(data[:,ival[0]:ival[1]],1)
data_samp[:,icount]=isum
icount+=1
return data_samp
def Ft(self,data_samp):
[nt,isize]=data_samp.shape
assert(isize == self.outsize),"data sampled size doesnt match outsize"
data=np.zeros((nt,self.insize))
icount=0
for ival in self.intervals:
isum = data_samp[:,icount]
# data[:,ival[0]:ival[1]] +=isum
for i in range(ival[0],ival[1]):
data[:,i]+=isum
icount+=1
return data
def Flabels(self,labels):
sampled_labels=[]
for ival in self.intervals:
if(ival[1] == ival[0]+1):
l0=labels[ival[0]]
sampled_labels.append(l0)
else:
l0=labels[ival[0] ]
l1=labels[ival[1]-1]
sampled_labels.append(l0 + '-' + l1)
return sampled_labels
def Flabels_from_type(self,sir_type,nstage=0):
labels=labels_from_type(sir_type,nstage)
return self.Flabels(labels)
#def draw_samples(pT,n,Ntests):
# np.random.seed(0)
# s = np.random.normal(0, 1,n)
# mu = Ntests*pT
# sigma = np.sqrt(Ntests*pT*(1-pT))
# data_samp = (sigma*s+mu)/Ntests
# return data_samp
def randSampler(data,pTT,pTF,Ntests):
def draw_samples(pT,n,Ntests):
np.random.seed(0)
s = np.random.normal(0, 1,n)
mu = Ntests*pT
sigma = np.sqrt(Ntests*pT*(1-pT))
data_samp = (sigma*s+mu)/Ntests
return data_samp
if(len(data.shape)==1):
n=data.shape[0]
pT=pTT*data+pTF*(1-data)
data_samp=draw_samples(pT,n,Ntests)
return data_samp
else:
[n,m]=data.shape
assert (m == len(pTT)),"data size doesnt match size pTT"
pT=np.zeros(n)
for i in range(m):
pT=pTT[i]*data[:,i]+pTF*(1-np.sum(data,1))
data_samp=draw_samples(pT,n,Ntests)
return data_samp
| 29.790323
| 78
| 0.554413
| 503
| 3,694
| 3.984095
| 0.244533
| 0.055888
| 0.03493
| 0.01996
| 0.336327
| 0.286427
| 0.259481
| 0.217565
| 0.163673
| 0.163673
| 0
| 0.025247
| 0.313752
| 3,694
| 124
| 79
| 29.790323
| 0.765286
| 0.190309
| 0
| 0.277108
| 0
| 0
| 0.066913
| 0
| 0
| 0
| 0
| 0
| 0.060241
| 1
| 0.096386
| false
| 0
| 0.012048
| 0
| 0.26506
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2704c694bb778a6cd16b19f2de02eece854903d3
| 4,491
|
py
|
Python
|
reflect/menus.py
|
stefanthaler/daily-reflection
|
2aba4873742b205f75a71b2ce382d126153f14c5
|
[
"MIT"
] | null | null | null |
reflect/menus.py
|
stefanthaler/daily-reflection
|
2aba4873742b205f75a71b2ce382d126153f14c5
|
[
"MIT"
] | null | null | null |
reflect/menus.py
|
stefanthaler/daily-reflection
|
2aba4873742b205f75a71b2ce382d126153f14c5
|
[
"MIT"
] | null | null | null |
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit import PromptSession,prompt,print_formatted_text
from reflect.style import *
import os
from prompt_toolkit.shortcuts import clear
from prompt_toolkit.formatted_text import FormattedText
def get_menu(menu_items):
message = []
for k in menu_items:
if "title" == k:
message.append( ('class:title', menu_items[k]["text"]+"\n") )
message.append(('class:separator',"="*20+"\n\n") )
continue
if "==" in k:
message.append( ('class:separator', "="*20+"\n" ) )
continue
if "--" in k:
message.append( ('class:separator', "-"*20+"\n" ) )
continue
if not menu_items[k]: # key only
continue
m=menu_items[k]
message.append( ('class:key', "(%s) " %k) )
message.append( ('class:menu_item' , "%s \n"%m["text"]) )
return message
def key_press_menu(menu_items, loop_display=""):
bindings = KeyBindings()
message = get_menu(menu_items)
global key_pressed
key_pressed = ""
@bindings.add('c-m')
@bindings.add('up')
@bindings.add('left')
@bindings.add('right')
@bindings.add('escape', eager=True)
@bindings.add('down')
@bindings.add('<any>')
def _(event):
event.app.exit()
global key_pressed
key_pressed=str(event.key_sequence[0].key)
session = PromptSession()
loop = True
current_key = 0
key_positions = []
for i, m in enumerate(message):
if m[0]=="class:key":
key_positions.append(i)
while loop:
if len(loop_display)>0:
print_formatted_text(FormattedText(loop_display), style=style)
for k in key_positions:
if not message[k]: # ignore menu items that don't have a message binding
continue
message[k]=('class:key', message[k][1])
message[k+1]=('class:menu_item', message[k+1][1])
message[key_positions[current_key]]=('class:current_key',message[key_positions[current_key]][1])
message[key_positions[current_key]+1]=('class:current_key',message[key_positions[current_key]+1][1])
session.prompt(message, style=style, key_bindings=bindings)
if str(key_pressed)=="Keys.Up":
if current_key>0:
current_key=current_key-1
clear()
continue
if str(key_pressed)=="Keys.Down":
if current_key<len(key_positions)-1:
current_key=current_key+1
clear()
continue
if str(key_pressed)=="Keys.ControlM": # enter has been pressed
key_pressed=message[key_positions[current_key]][1].split(")")[0][1:]
if str(key_pressed)=="Keys.Escape": # enter has been pressed
key_pressed="escape"
loop=False
return key_pressed
if not (str(key_pressed) in list(menu_items.keys())):
clear()
continue
else:
loop=False
break
return key_pressed
def time_menu():
items = {
"title":{"text":"Which type of reflection do you want to do?"},
"m":{"text":"Morning"},
"e":{"text":"Evening"},
"b":{"text":"Back"}
}
return items[key_press_menu(items)]["text"]
def formatted_questions(questions, title):
items = {
"title":{"text":title}
}
key_codes = [c for c in "123456789abcdefghijklmnoprstuvwxyz"]
for i,q in enumerate(questions):
items[key_codes[i]]={"text":q["text"]}
items["--1"]={}
items["q"]={"text":"Abort"}
return get_menu(items)
def menu_from_questions(questions, title, loop_display="", menu_generator=key_press_menu):
items = {
"title":{"text":title}
}
key_codes = [c for c in "123456789abcdefghijklmnoprstuvwxyz"]
for i,q in enumerate(questions):
items[key_codes[i]]={"text":q["text"]}
items["--1"]={}
items["q"]={"text":"Abort"}
return menu_generator(items,loop_display)
def browse_menu(day_string):
items = {
"title":{"text":"What do you want to do?"},
"n":{"text":"Next day", "handler": quit},
"p":{"text":"Previous day", "handler": quit },
"g":{"text":"Goto day", "handler": quit },
"b":{"text":"Back to main menu", "handler": quit },
"Keys.Left":False,
"Keys.Right":False
}
return key_press_menu(items,day_string)
| 30.760274
| 108
| 0.583389
| 556
| 4,491
| 4.553957
| 0.210432
| 0.049763
| 0.042654
| 0.03752
| 0.344787
| 0.298973
| 0.228673
| 0.228673
| 0.228673
| 0.193128
| 0
| 0.013595
| 0.26297
| 4,491
| 145
| 109
| 30.972414
| 0.75136
| 0.023603
| 0
| 0.272727
| 0
| 0
| 0.142466
| 0.015525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057851
| false
| 0
| 0.049587
| 0
| 0.165289
| 0.016529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27059812bfb36013c718b5579e1caa2896f39e29
| 636
|
py
|
Python
|
matrix/main.py
|
oleggator/python-advanced
|
0e951fe6e7ae74129536ded5c02a9f1ea3337a7d
|
[
"MIT"
] | null | null | null |
matrix/main.py
|
oleggator/python-advanced
|
0e951fe6e7ae74129536ded5c02a9f1ea3337a7d
|
[
"MIT"
] | 2
|
2019-04-28T20:37:35.000Z
|
2019-10-29T08:11:24.000Z
|
matrix/main.py
|
oleggator/python-advanced
|
0e951fe6e7ae74129536ded5c02a9f1ea3337a7d
|
[
"MIT"
] | null | null | null |
from matrix import Matrix as CMatrix
from py_matrix import PyMatrix
def test(matrix_class, rows=3, columns=2):
m = matrix_class([
[column for column in range(row * columns, row * columns + columns)]
for row in range(rows)
])
# # or
# m = matrix_class([
# [1, 2],
# [3, 4],
# [5, 6],
# ])
n = 2 * m # multiply matrix by integer
n = n / 2 # divide matrix by integer
o = m + m # sum matrices
p = m.transpose() # transpose
q = m @ p # multiply matrices
r = 2 in m
def main():
test(CMatrix)
test(PyMatrix)
if __name__ == '__main__':
main()
| 21.2
| 76
| 0.550314
| 89
| 636
| 3.797753
| 0.449438
| 0.097633
| 0.071006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025701
| 0.327044
| 636
| 29
| 77
| 21.931034
| 0.764019
| 0.240566
| 0
| 0
| 0
| 0
| 0.017058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27085e60eb21632a2fb266c8e1fc048c12001579
| 3,947
|
py
|
Python
|
horizon_telemetry/compute/views.py
|
simonpasquier/horizon-telemetry-dashboard
|
f284ec6ae8b1932079852fe3e9ab4b7a27ff58d7
|
[
"Apache-2.0"
] | null | null | null |
horizon_telemetry/compute/views.py
|
simonpasquier/horizon-telemetry-dashboard
|
f284ec6ae8b1932079852fe3e9ab4b7a27ff58d7
|
[
"Apache-2.0"
] | 2
|
2017-10-10T07:30:41.000Z
|
2017-10-19T18:34:13.000Z
|
horizon_telemetry/compute/views.py
|
simonpasquier/horizon-telemetry-dashboard
|
f284ec6ae8b1932079852fe3e9ab4b7a27ff58d7
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import json
from django.conf import settings
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView
from horizon import exceptions, tables
from horizon_telemetry.forms import DateForm
from horizon_telemetry.utils.influxdb_client import (get_host_usage_metrics,
get_host_cpu_metric,
get_host_disk_metric,
get_host_memory_metric,
get_host_network_metric)
from openstack_dashboard import api
from . import tables as project_tables
class AdminIndexView(tables.DataTableView):
table_class = project_tables.AdminHypervisorsTable
template_name = 'telemetry/compute/index.html'
def get_data(self):
hypervisors = []
try:
hypervisors = api.nova.hypervisor_list(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor information.'))
return hypervisors
def get_context_data(self, **kwargs):
context = super(AdminIndexView, self).get_context_data(**kwargs)
try:
context["stats"] = api.nova.hypervisor_stats(self.request)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve hypervisor statistics.'))
return context
class AdminDetailView(TemplateView):
template_name = 'telemetry/compute/detail.html'
def get_data(self):
pass
def get_context_data(self, **kwargs):
context = super(AdminDetailView, self).get_context_data(**kwargs)
today = datetime.date.today()
date_range = {
'start': self.request.GET.get("start", today - datetime.timedelta(1)),
'end': self.request.GET.get('end', today)
}
form = DateForm(date_range)
self.request.session.update(date_range)
# convert inputs to date objects
if not isinstance(date_range['start'], datetime.date):
date_range['start'] = datetime.datetime.strptime(date_range['start'], "%Y-%m-%d").date()
date_range['end'] = datetime.datetime.strptime(date_range['end'], "%Y-%m-%d").date()
hour_interval = (date_range['end'] - date_range['start']).total_seconds() / 3600
if hour_interval > 24:
context['tickFormat'] = "%x"
else:
context['tickFormat'] = "%H:%M"
context['dateform'] = form
node = context['node'] = context['hypervisor']
context['cpu_data'] = json.dumps(
get_host_cpu_metric(settings.ENVIRONMENT_LABEL, node,
date_range['start'], date_range['end'])
)
context['mem_data'] = json.dumps(
get_host_memory_metric(settings.ENVIRONMENT_LABEL, node,
date_range['start'], date_range['end'])
)
context['hdd_data'] = json.dumps(
get_host_disk_metric(settings.ENVIRONMENT_LABEL, node,
date_range['start'], date_range['end'])
)
context['net_data'] = json.dumps(
get_host_network_metric(settings.ENVIRONMENT_LABEL, node,
date_range['start'], date_range['end'],
getattr(settings, 'TELEMETRY_COMPUTE_INTERFACES', None))
)
return context
class DataView(TemplateView):
template_name = 'telemetry/dummy.html'
def get(self, *args, **kwargs):
data = get_host_usage_metrics(settings.ENVIRONMENT_LABEL,
self.kwargs.get('hypervisor'))
return HttpResponse(json.dumps(data), content_type='application/json')
| 36.88785
| 100
| 0.593615
| 403
| 3,947
| 5.593052
| 0.295285
| 0.071872
| 0.055901
| 0.028394
| 0.322538
| 0.220497
| 0.220497
| 0.220497
| 0.185892
| 0.185892
| 0
| 0.00255
| 0.304535
| 3,947
| 106
| 101
| 37.235849
| 0.818579
| 0.007601
| 0
| 0.185185
| 0
| 0
| 0.100383
| 0.021711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0.012346
| 0.135802
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
270baa9c084cf98e0065fdca116072edd2dacd09
| 1,335
|
py
|
Python
|
dz3/task3_5.py
|
EsipenkoAnna/PycharmProjects-EsipenkoAnna_2242
|
46fe55de36b7b1cfdeafcf685ea01c6220e16546
|
[
"MIT"
] | null | null | null |
dz3/task3_5.py
|
EsipenkoAnna/PycharmProjects-EsipenkoAnna_2242
|
46fe55de36b7b1cfdeafcf685ea01c6220e16546
|
[
"MIT"
] | null | null | null |
dz3/task3_5.py
|
EsipenkoAnna/PycharmProjects-EsipenkoAnna_2242
|
46fe55de36b7b1cfdeafcf685ea01c6220e16546
|
[
"MIT"
] | null | null | null |
import random
nouns = ["автомобиль", "лес", "огонь", "город", "дом"]
adverbs = ["сегодня", "вчера", "завтра", "позавчера", "ночью"]
adjectives = ["веселый", "яркий", "зеленый", "утопичный", "мягкий"]
def get_jokes(count: int) -> list:
#формируем список состящий из фраз. Каждая фраза содержит по три слова, которые случайным образом берутся из списоков nouns, adverbs и adjectives
list_out = []
for i in range(count):
list_out.extend([random.choice(nouns)+" "+random.choice(adverbs)+" "+random.choice(adjectives)])
return list_out
print(get_jokes(6))
def get_jokes_adv(count: int, Double: bool) -> list:
#Если выставлен флаг Double как False, то после того как слово было использовано, оно будет удалено из списка
list_out_1 = []
for a in range(count):
if Double==False:
nouns_w = random.choice(nouns)
adverbs_w = random.choice(adverbs)
adjectives_w = random.choice(adjectives)
list_out_1.extend([nouns_w+" "+adverbs_w+" "+adjectives_w])
nouns.remove(nouns_w)
adverbs.remove(adverbs_w)
adjectives.remove(adjectives_w)
else:
list_out_1.extend([random.choice(nouns) + " " + random.choice(adverbs) + " " + random.choice(adjectives)])
return list_out_1
print(get_jokes_adv(2,False))
print(nouns, adverbs, adjectives) #проверяем, что элементы удалены
| 36.081081
| 145
| 0.698127
| 180
| 1,335
| 5.033333
| 0.455556
| 0.119205
| 0.03532
| 0.050773
| 0.169978
| 0.169978
| 0.169978
| 0.169978
| 0.169978
| 0.169978
| 0
| 0.005391
| 0.166292
| 1,335
| 37
| 146
| 36.081081
| 0.808625
| 0.211985
| 0
| 0
| 0
| 0
| 0.093333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0
| 0.192308
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27155b9a924aec60a88449cb892d18dddf705986
| 5,837
|
py
|
Python
|
bundled/linter/linter_server.py
|
luabud/vscode-pylint
|
2297f1a6210bf93f7d9c083e32c59ed03664ca05
|
[
"MIT"
] | null | null | null |
bundled/linter/linter_server.py
|
luabud/vscode-pylint
|
2297f1a6210bf93f7d9c083e32c59ed03664ca05
|
[
"MIT"
] | null | null | null |
bundled/linter/linter_server.py
|
luabud/vscode-pylint
|
2297f1a6210bf93f7d9c083e32c59ed03664ca05
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Implementation of linting support over LSP.
"""
import json
import pathlib
import sys
from typing import Dict, Sequence, Union
# Ensure that will can import LSP libraries, and other bundled linter libraries
sys.path.append(str(pathlib.Path(__file__).parent.parent / "libs"))
# pylint: disable=wrong-import-position,import-error
import utils
from pygls import lsp, server
from pygls.lsp import types
all_configurations = {
"name": "Pylint",
"module": "pylint",
"patterns": {
"default": {
"regex": "",
"args": ["--reports=n", "--output-format=json"],
"lineStartsAt1": True,
"columnStartsAt1": False,
"useStdin": True,
}
},
}
SETTINGS = {}
LINTER = {}
MAX_WORKERS = 5
LSP_SERVER = server.LanguageServer(max_workers=MAX_WORKERS)
def _get_severity(
symbol: str, code: str, code_type: str, severity: Dict[str, str]
) -> types.DiagnosticSeverity:
"""Converts severity provided by linter to LSP specific value."""
value = (
severity.get(symbol, None)
or severity.get(code, None)
or severity.get(code_type, "Error")
)
try:
return types.DiagnosticSeverity[value]
except KeyError:
pass
return types.DiagnosticSeverity.Error
def _parse_output(
content: str,
line_at_1: bool,
column_at_1: bool,
severity: Dict[str, str],
additional_offset: int = 0,
) -> Sequence[types.Diagnostic]:
"""Parses linter messages and return LSP diagnostic object for each message."""
diagnostics = []
line_offset = (1 if line_at_1 else 0) + additional_offset
col_offset = 1 if column_at_1 else 0
messages = json.loads(content)
for data in messages:
start = types.Position(
line=int(data["line"]) - line_offset,
character=int(data["column"]) - col_offset,
)
if data["endLine"] is not None:
end = types.Position(
line=int(data["endLine"]) - line_offset,
character=int(data["endColumn"]) - col_offset,
)
else:
end = start
diagnostic = types.Diagnostic(
range=types.Range(
start=start,
end=end,
),
message=data["message"],
severity=_get_severity(
data["symbol"], data["message-id"], data["type"], severity
),
code=f"{data['message-id']}:{ data['symbol']}",
source=LINTER["name"],
)
diagnostics.append(diagnostic)
return diagnostics
def _lint_and_publish_diagnostics(
params: Union[types.DidOpenTextDocumentParams, types.DidSaveTextDocumentParams]
) -> None:
"""Runs linter, processes the output, and publishes the diagnostics over LSP."""
document = LSP_SERVER.workspace.get_document(params.text_document.uri)
if utils.is_stdlib_file(document.path):
# Don't lint standard library python files.
# Publishing empty diagnostics clears the entry.
LSP_SERVER.publish_diagnostics(document.uri, [])
return
module = LINTER["module"]
use_stdin = LINTER["useStdin"]
use_path = len(SETTINGS["path"]) > 0
argv = SETTINGS["path"] if use_path else [module]
argv += LINTER["args"] + SETTINGS["args"]
argv += ["--from-stdin", document.path] if use_stdin else [document.path]
if use_path:
result = utils.run_path(argv, use_stdin, document.source)
else:
# This is needed to preserve sys.path, pylint modifies
# sys.path and that might not work for this scenario
# next time around.
with utils.SubstituteAttr(sys, "path", sys.path[:]):
result = utils.run_module(module, argv, use_stdin, document.source)
if result.stderr:
LSP_SERVER.show_message_log(result.stderr)
LSP_SERVER.show_message_log(f"{document.uri} :\r\n{result.stdout}")
diagnostics = _parse_output(
result.stdout,
LINTER["lineStartsAt1"],
LINTER["columnStartsAt1"],
SETTINGS["severity"],
)
LSP_SERVER.publish_diagnostics(document.uri, diagnostics)
@LSP_SERVER.feature(lsp.INITIALIZE)
def initialize(params: types.InitializeParams):
"""LSP handler for initialize request."""
paths = "\r\n".join(sys.path)
LSP_SERVER.show_message_log(f"sys.path used to run Linter:\r\n{paths}")
# First get workspace settings to know if we are using linter
# module or binary.
global SETTINGS # pylint: disable=global-statement
SETTINGS = params.initialization_options["settings"]
global LINTER # pylint: disable=global-statement
LINTER = utils.get_linter_options_by_version(
all_configurations,
SETTINGS["path"] if len(SETTINGS["path"]) > 0 else None,
)
@LSP_SERVER.feature(lsp.TEXT_DOCUMENT_DID_OPEN)
def did_open(_server: server.LanguageServer, params: types.DidOpenTextDocumentParams):
"""LSP handler for textDocument/didOpen request."""
_lint_and_publish_diagnostics(params)
@LSP_SERVER.feature(lsp.TEXT_DOCUMENT_DID_SAVE)
def did_save(_server: server.LanguageServer, params: types.DidSaveTextDocumentParams):
"""LSP handler for textDocument/didOpen request."""
_lint_and_publish_diagnostics(params)
@LSP_SERVER.feature(lsp.TEXT_DOCUMENT_DID_CLOSE)
def did_close(_server: server.LanguageServer, params: types.DidCloseTextDocumentParams):
"""LSP handler for textDocument/didClose request."""
# Publishing empty diagnostics to clear the entries for this file.
text_document = LSP_SERVER.workspace.get_document(params.text_document.uri)
LSP_SERVER.publish_diagnostics(text_document.uri, [])
if __name__ == "__main__":
LSP_SERVER.start_io()
| 31.38172
| 88
| 0.664211
| 688
| 5,837
| 5.46657
| 0.297965
| 0.035895
| 0.027652
| 0.020207
| 0.229992
| 0.140654
| 0.1138
| 0.086147
| 0.086147
| 0.086147
| 0
| 0.003528
| 0.22306
| 5,837
| 185
| 89
| 31.551351
| 0.825799
| 0.182799
| 0
| 0.047619
| 0
| 0
| 0.086551
| 0.004667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.007937
| 0.055556
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
271c49cbaa02490060b18e56f8f7193420d20ee7
| 2,411
|
py
|
Python
|
src/utils/roles.py
|
dciborow/SubscriptionPolicy
|
100718bca552fb92edcb1867a94aba1f2d131edc
|
[
"MIT"
] | null | null | null |
src/utils/roles.py
|
dciborow/SubscriptionPolicy
|
100718bca552fb92edcb1867a94aba1f2d131edc
|
[
"MIT"
] | null | null | null |
src/utils/roles.py
|
dciborow/SubscriptionPolicy
|
100718bca552fb92edcb1867a94aba1f2d131edc
|
[
"MIT"
] | null | null | null |
from .cmdline import CmdUtils
class AzRole:
def __init__(self):
self.subscription = None
self.id = None
self.name = None
self.principalId = None
self.principalName = None
self.principalType = None
self.roleDefinitionName = None
self.roleDefinitionId = None
self.scope = None
def get_delete_command(self):
command = "az role assignment delete --assignee {} --role {} --scope {} --subscription {}".format(
self.principalId,
self.roleDefinitionId,
self.scope,
self.subscription
)
return command
def delete(self):
command = self.get_delete_command()
command = command.split(' ')
print("Deleting {} role for {}".format(
self.principalType,
self.principalName
))
CmdUtils.get_command_output(command,False)
def _load_raw(self, az_role_json:dict):
for key in az_role_json:
setattr(self, key, az_role_json[key])
class AzRolesUtils:
@staticmethod
def get_sub_roles(sub_id, raw:bool = True):
output = CmdUtils.get_command_output(
[
"az",
"role",
"assignment",
"list",
"--include-classic-administrators",
"false",
"--subscription",
sub_id
]
)
if raw is True:
return output
return AzRolesUtils._convert_raw_roles(output, sub_id)
@staticmethod
def get_all_roles(sub_id : str, raw:bool = True):
output = CmdUtils.get_command_output(
[
"az",
"role",
"assignment",
"list",
"--all",
"--include-classic-administrators",
"false",
"--subscription",
sub_id
]
)
if raw is True:
return output
return AzRolesUtils._convert_raw_roles(output, sub_id)
@staticmethod
def _convert_raw_roles(raw_roles, sub_id):
return_list = []
for r in raw_roles:
cur_role = AzRole()
cur_role._load_raw(r)
cur_role.subscription = sub_id
return_list.append(cur_role)
return return_list
| 26.788889
| 106
| 0.520116
| 229
| 2,411
| 5.240175
| 0.257642
| 0.053333
| 0.04
| 0.06
| 0.321667
| 0.321667
| 0.321667
| 0.321667
| 0.321667
| 0.321667
| 0
| 0
| 0.390709
| 2,411
| 89
| 107
| 27.089888
| 0.816882
| 0
| 0
| 0.355263
| 0
| 0
| 0.10332
| 0.026556
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0
| 0.013158
| 0
| 0.210526
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
271fa52916c14e0243668b5ba4593d20238992fd
| 3,996
|
py
|
Python
|
ExCon/explainer.py
|
DarrenZhang01/ExCon
|
2467c2fa8c0c52edaf54091d2bfecd132eeae594
|
[
"Apache-2.0"
] | 17
|
2021-11-30T03:50:24.000Z
|
2022-01-16T10:58:07.000Z
|
ExCon/explainer.py
|
DarrenZhang01/ExCon
|
2467c2fa8c0c52edaf54091d2bfecd132eeae594
|
[
"Apache-2.0"
] | 1
|
2021-12-04T02:35:59.000Z
|
2021-12-05T00:53:35.000Z
|
ExCon/explainer.py
|
DarrenZhang01/ExCon
|
2467c2fa8c0c52edaf54091d2bfecd132eeae594
|
[
"Apache-2.0"
] | null | null | null |
"""
An utility class for initializing different explainer objects.
"""
from torch import nn
import numpy as np
from captum.attr import DeepLift, IntegratedGradients, ShapleyValueSampling, LayerGradCam, Saliency
from captum.attr._utils.attribution import LayerAttribution
class Explainer(nn.Module):
def __init__(self, method, model_name, dataset, model, training):
super(Explainer, self).__init__()
self.model = model
self.explainer = None
if method == "DeepLift":
self.explainer = DeepLift(self.model)
elif method == "IntegratedGradients":
self.explainer = IntegratedGradients(self.model)
elif method == "ShapleyValueSampling":
self.explainer = ShapleyValueSampling(self.model)
elif method == "Saliency":
self.explainer = Saliency(self.model)
elif method == "GradCAM":
# Need to retrieve the `module` in the data parallel mode for multi-GPU processing.
if model_name == 'vgg16':
if dataset.startswith('cifar'):
self.explainer = LayerGradCam(self.model, self.model[0].features[21])
else:
self.explainer = LayerGradCam(self.model, self.model[0].features[30])
elif model_name == 'resnet56':
self.explainer = LayerGradCam(self.model, self.model[0].layer3[8].conv2)
elif model_name == 'resnet50':
if dataset.startswith('cifar'):
if "CE" in training:
self.explainer = LayerGradCam(self.model, self.model.module.encoder.layer2[3].conv3)
else:
self.explainer = LayerGradCam(self.model, self.model[0].layer2[3].conv3)
else:
if "CE" in training:
self.explainer = LayerGradCam(self.model, self.model.module.encoder.layer4[2].conv3)
else:
self.explainer = LayerGradCam(self.model, self.model[0].layer4[2].conv3)
elif model_name == 'resnet18':
if dataset.startswith('cifar'):
if "CE" in training:
self.explainer = LayerGradCam(self.model, self.model.module.encoder.layer2[1].conv2)
else:
self.explainer = LayerGradCam(self.model, self.model[0].layer2[1].conv2)
else:
if "CE" in training:
self.explainer = LayerGradCam(self.model, self.model.module.encoder.layer4[1].conv2)
else:
self.explainer = LayerGradCam(self.model, self.model[0].layer4[1].conv2)
self.method = method
if dataset in ['cifar10', 'cifar100', 'SVHN']:
self.img_size = (32, 32)
elif dataset == 'ImageNet':
self.img_size = (64, 64)
def attribute(self, input, target, omit_channel=True):
"""
omit_channel: bool, whether to omit the channel dimension in the explanations.
"""
importances = self.explainer.attribute(inputs=input, target=target)
if self.method == "GradCAM":
importances = LayerAttribution.interpolate(importances,
self.img_size,
interpolate_mode="bilinear")
if omit_channel is False:
importances = importances.repeat((1, 3, 1, 1))
else:
if omit_channel is False:
importances = importances
else:
importances = importances.sum(dim=1)
importances = importances.squeeze().detach().cpu().numpy()
return importances
def normalize(self, importances):
importances = importances - np.min(importances, axis=(0, 1))
importances = importances / (np.max(importances, axis=(0, 1)) + 1e-8)
return importances
| 47.571429
| 108
| 0.567067
| 400
| 3,996
| 5.61
| 0.27
| 0.108289
| 0.122549
| 0.142157
| 0.371658
| 0.371658
| 0.371658
| 0.334225
| 0.314617
| 0.266488
| 0
| 0.026129
| 0.32958
| 3,996
| 83
| 109
| 48.144578
| 0.811497
| 0.056056
| 0
| 0.28169
| 0
| 0
| 0.0417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0
| 0.211268
| 0
| 0.295775
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2723629eca545ab7058f06d730db5b5baa33b30a
| 3,516
|
py
|
Python
|
gdrive.py
|
abwilf/Factorized
|
64e7d2a54bbfbc8b1c5a2130f2b941c376402fe6
|
[
"MIT"
] | null | null | null |
gdrive.py
|
abwilf/Factorized
|
64e7d2a54bbfbc8b1c5a2130f2b941c376402fe6
|
[
"MIT"
] | null | null | null |
gdrive.py
|
abwilf/Factorized
|
64e7d2a54bbfbc8b1c5a2130f2b941c376402fe6
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from apiclient.http import MediaFileUpload
import gdown
def gdrive_up(credentials_path, file_list, folder_id, token_path='/work/awilf/utils/gdrive_token.json'):
'''
credentials_path: json containing gdrive credentials of form {"installed":{"client_id":"<something>.apps.googleusercontent.com","project_id":"decisive-engine-<something>","auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://oauth2.googleapis.com/token","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs","client_secret":"<client_secret>","redirect_uris":["urn:ietf:wg:oauth:2.0:oob","http://localhost"]}}
file_list: full path of files to upload, e.g. ['/work/awilf/tonicnet.tar']
folder_id: id of folder you've already created in google drive (awilf@andrew account, for these credentials)
e.g.
gdrive_up('gdrive_credentials.json', ['hi.txt', 'yo.txt'], '1E1ub35TDJP59rlIqDBI9SLEncCEaI4aT')
note: if token_path does not exist, you will need to authenticate. here are the instructions
ON MAC: ssh -N -f -L localhost:8080:localhost:8080 awilf@taro
ON MAC (CHROME): go to link provided
'''
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/drive.metadata.readonly', 'https://www.googleapis.com/auth/drive.file']
creds = None
if os.path.exists(token_path): # UNCOMMENT THIS IF DON'T WANT TO LOG IN EACH TIME
creds = Credentials.from_authorized_user_file(token_path, SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(credentials_path, SCOPES)
creds = flow.run_local_server(port=8080)
with open(token_path, 'w') as token:
token.write(creds.to_json())
service = build('drive', 'v3', credentials=creds)
for name in file_list:
file_metadata = {
'name': name,
'parents': [folder_id]
}
media = MediaFileUpload(file_metadata['name'], resumable=True)
file = service.files().create(body=file_metadata, media_body=media, fields='id').execute()
def gdrive_down(url, out_path=None):
'''
first, make sure file in url is available for anyone to view
url should be of one of these forms:
https://drive.google.com/file/d/195C6CoqMBYzteJIx-FFOsNFATvu5cr_z/view?usp=sharing
https://drive.google.com/uc?id=1eGj8DSau66NiklH30UIGab55cUWR_qw9
out_path can be None, in which case the result will be the file name from google drive saved in ./. else, save to out_path
'''
if 'uc?' not in url:
id = url.split('/')[-2]
url = f'https://drive.google.com/uc?id={id}'
gdown.download(url, out_path)
# gdrive_down('https://drive.google.com/file/d/1dAvxdsHWbtA1ZIh3Ex9DPn9Nemx9M1-L/view', out_path='/work/awilf/mfa/')
# gdrive_down('https://drive.google.com/file/d/1XEsc6rLXtjfo2rtms2GR0hDqfTiat5Zo/view?usp=sharing')
# gdrive_up('/work/awilf/utils/gdrive_credentials.json', ['pyg.sif'], '1zBldu3ipR6LtrJBxxNlaKBPW_kio6nli')
gdrive_down('https://drive.google.com/file/d/1eEdRQVgBCcq8DyasduZpMzTlCIjrekLM/view?usp=sharing')
| 45.662338
| 455
| 0.71587
| 485
| 3,516
| 5.057732
| 0.412371
| 0.025683
| 0.039136
| 0.046474
| 0.094578
| 0.094578
| 0.041582
| 0.041582
| 0
| 0
| 0
| 0.021959
| 0.158134
| 3,516
| 76
| 456
| 46.263158
| 0.806757
| 0.493458
| 0
| 0
| 0
| 0
| 0.163915
| 0.020637
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.228571
| 0
| 0.285714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2725c4c1b926b1b3e672d43af2ed2535f3dcf9a6
| 705
|
py
|
Python
|
web_ext/sseq_gui/tests/test_load.py
|
hoodmane/sseq
|
0f19a29c95486a629b0d054c703ca0a58999ae97
|
[
"Apache-2.0",
"MIT"
] | 7
|
2021-04-22T04:06:09.000Z
|
2022-01-25T04:05:49.000Z
|
web_ext/sseq_gui/tests/test_load.py
|
hoodmane/sseq
|
0f19a29c95486a629b0d054c703ca0a58999ae97
|
[
"Apache-2.0",
"MIT"
] | 68
|
2020-03-21T22:37:24.000Z
|
2022-03-31T02:51:35.000Z
|
web_ext/sseq_gui/tests/test_load.py
|
hoodmane/sseq
|
0f19a29c95486a629b0d054c703ca0a58999ae97
|
[
"Apache-2.0",
"MIT"
] | 5
|
2021-02-17T06:37:43.000Z
|
2022-02-01T03:53:22.000Z
|
import pytest
from pathlib import Path
@pytest.mark.parametrize("module", ["S_2", "S_3", "C2v14"])
def test_load(driver, module: str):
driver.go("/")
driver.driver.find_element_by_css_selector(f'a[data="{module}"]').click()
driver.wait_complete()
driver.check_svg(f"{module}_load.svg")
@pytest.mark.parametrize("module", ["S_2", "S_3", "C2v14"])
def test_load_json(driver, module: str):
path = Path(__file__).parent / "../../../ext/steenrod_modules" / f"{module}.json"
path = path.resolve()
driver.go("/")
driver.driver.find_element_by_id("json-upload").send_keys(str(path))
driver.reply("40")
driver.wait_complete()
driver.check_svg(f"{module}_load.svg")
| 30.652174
| 85
| 0.673759
| 101
| 705
| 4.445545
| 0.435644
| 0.046771
| 0.093541
| 0.120267
| 0.561247
| 0.561247
| 0.561247
| 0.414254
| 0.414254
| 0.414254
| 0
| 0.019512
| 0.12766
| 705
| 22
| 86
| 32.045455
| 0.710569
| 0
| 0
| 0.470588
| 0
| 0
| 0.202837
| 0.041135
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2727cc439b34121ae42c9bebd5ffc173f65fc3c6
| 5,905
|
py
|
Python
|
foresight/environment/environment_info_support.py
|
thundra-io/thundra-agent-python
|
448e18c17d8730c381b2e2a773782cf80c5a7cfb
|
[
"Apache-2.0"
] | 15
|
2021-07-28T08:03:50.000Z
|
2021-11-08T08:36:06.000Z
|
foresight/environment/environment_info_support.py
|
thundra-io/thundra-agent-python
|
448e18c17d8730c381b2e2a773782cf80c5a7cfb
|
[
"Apache-2.0"
] | 1
|
2021-08-08T07:45:45.000Z
|
2021-08-08T12:41:36.000Z
|
foresight/environment/environment_info_support.py
|
thundra-io/thundra-agent-python
|
448e18c17d8730c381b2e2a773782cf80c5a7cfb
|
[
"Apache-2.0"
] | 3
|
2021-08-07T14:19:23.000Z
|
2021-12-08T15:35:40.000Z
|
import os, logging
from foresight.environment.git.git_helper import GitHelper
from foresight.environment.git.git_env_info_provider import GitEnvironmentInfoProvider
from foresight.environment.github.github_environment_info_provider import GithubEnvironmentInfoProvider
from foresight.environment.gitlab.gitlab_environment_info_provider import GitlabEnvironmentInfoProvider
from foresight.environment.jenkins.jenkins_environment_info_provider import JenkinsEnvironmentInfoProvider
from foresight.environment.travisci.travisci_environment_info_provider import TravisCIEnvironmentInfoProvider
from foresight.environment.circleci.circleci_environment_info_provider import CircleCIEnvironmentInfoProvider
from foresight.environment.bitbucket.bitbucket_environment_info_provider import BitbucketEnvironmentInfoProvider
from foresight.environment.azure.azure_environment_info_provider import AzureEnvironmentInfoProvider
from foresight.test_runner_tags import TestRunnerTags
from foresight.utils.generic_utils import print_debug_message_to_console
LOGGER = logging.getLogger(__name__)
class EnvironmentSupport:
ENVIRONMENTS_VARS = {
"GITHUB_ENV": GithubEnvironmentInfoProvider, # https://docs.github.com/en/actions/learn-github-actions/environment-variables
"CI_PROJECT_ID": GitlabEnvironmentInfoProvider, # https://docs.gitlab.com/ee/ci/variables/predefined_variables.html
"JENKINS_HOME": JenkinsEnvironmentInfoProvider, # https://e.printstacktrace.blog/jenkins-pipeline-environment-variables-the-definitive-guide/
"JENKINS_URL": JenkinsEnvironmentInfoProvider, # https://e.printstacktrace.blog/jenkins-pipeline-environment-variables-the-definitive-guide/
"TRAVIS": TravisCIEnvironmentInfoProvider, # https://docs.travis-ci.com/user/environment-variables/#default-environment-variables
"CIRCLECI": CircleCIEnvironmentInfoProvider, # https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
"BITBUCKET_GIT_HTTP_ORIGIN": BitbucketEnvironmentInfoProvider, # https://support.atlassian.com/bitbucket-cloud/docs/variables-and-secrets/
"BITBUCKET_GIT_SSH_ORIGIN": BitbucketEnvironmentInfoProvider, # https://support.atlassian.com/bitbucket-cloud/docs/variables-and-secrets/
"TF_BUILD": AzureEnvironmentInfoProvider # https://docs.microsoft.com/en-us/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml
}
environment_info = None
@classmethod
def init(cls):
"""First check git provider, then iterate over ENVIRONMENTS_VARS dict.
"""
try:
LOGGER.debug("Checking ci environments...")
for key, clz in cls.ENVIRONMENTS_VARS.items():
LOGGER.debug("Current key, clz: {}, {}".format(key,clz))
if os.getenv(key):
ei = clz.build_env_info()
if ei:
cls.environment_info = ei
print_debug_message_to_console("Environment info: {}".format(cls.environment_info.to_json()))
print_debug_message_to_console("Founded key and class: {}, {}".format(key, clz))
break
if cls.environment_info == None:
if GitHelper.get_repo_url():
print_debug_message_to_console("Couldn't find any ci envrionment! Trying .git file...")
cls.environment_info = GitEnvironmentInfoProvider.build_env_info()
print_debug_message_to_console("Environment info: {}".format(cls.environment_info.to_json()))
else:
print_debug_message_to_console("Couldn't find .git file!")
except Exception as err:
LOGGER.error("Environment Support environment_info could not set: {}".format(err))
cls.environment_info = None
print_debug_message_to_console("environment_info is None!")
pass
@classmethod
def set_span_tags(cls, span):
"""Set span data tags corresponds to TestRunner
Args:
obj (ThundraSpan): Span or invocation data
"""
try:
if cls.environment_info:
span.set_tag(TestRunnerTags.TEST_ENVIRONMENT, cls.environment_info.environment)
span.set_tag(TestRunnerTags.SOURCE_CODE_REPO_URL, cls.environment_info.repo_url)
span.set_tag(TestRunnerTags.SOURCE_CODE_REPO_NAME, cls.environment_info.repo_name)
span.set_tag(TestRunnerTags.SOURCE_CODE_BRANCH, cls.environment_info.branch)
span.set_tag(TestRunnerTags.SOURCE_CODE_COMMIT_HASH, cls.environment_info.commit_hash)
span.set_tag(TestRunnerTags.SOURCE_CODE_COMMIT_MESSAGE, cls.environment_info.commit_message)
except Exception as e:
LOGGER.error("Couldn't set environment info for span tags.")
pass
@classmethod
def set_invocation_tags(cls, invocation_data):
"""Set invocation data tags corresponds to TestRunner
Args:
obj (invocation): Span or invocation data
"""
try:
if cls.environment_info:
invocation_data[TestRunnerTags.TEST_ENVIRONMENT] = cls.environment_info.environment
invocation_data[TestRunnerTags.SOURCE_CODE_REPO_URL] = cls.environment_info.repo_url
invocation_data[TestRunnerTags.SOURCE_CODE_REPO_NAME] = cls.environment_info.repo_name
invocation_data[TestRunnerTags.SOURCE_CODE_BRANCH] = cls.environment_info.branch
invocation_data[TestRunnerTags.SOURCE_CODE_COMMIT_HASH] = cls.environment_info.commit_hash
invocation_data[TestRunnerTags.SOURCE_CODE_COMMIT_MESSAGE] = cls.environment_info.commit_message
except Exception as e:
LOGGER.error("Couldn't set environment info for invocation tags.")
pass
| 60.255102
| 150
| 0.720745
| 636
| 5,905
| 6.437107
| 0.237421
| 0.124573
| 0.087934
| 0.049585
| 0.461895
| 0.431119
| 0.414998
| 0.348315
| 0.303859
| 0.282853
| 0
| 0.000423
| 0.198645
| 5,905
| 98
| 151
| 60.255102
| 0.864751
| 0.171041
| 0
| 0.194805
| 0
| 0
| 0.101037
| 0.010166
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038961
| false
| 0.038961
| 0.155844
| 0
| 0.233766
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
272ad5e735d859c51d641dfdb613ff9296e5cbee
| 660
|
py
|
Python
|
Problem_Solving/Algorithms/Warmup/6_Plus_Minus/Solution.py
|
CFLSousa/HackerRank
|
29ed039634e88d72981b2ecd619e5c65d37111e4
|
[
"MIT"
] | null | null | null |
Problem_Solving/Algorithms/Warmup/6_Plus_Minus/Solution.py
|
CFLSousa/HackerRank
|
29ed039634e88d72981b2ecd619e5c65d37111e4
|
[
"MIT"
] | null | null | null |
Problem_Solving/Algorithms/Warmup/6_Plus_Minus/Solution.py
|
CFLSousa/HackerRank
|
29ed039634e88d72981b2ecd619e5c65d37111e4
|
[
"MIT"
] | null | null | null |
import math
import os
import random
import re
import sys
def plusMinus(arr):
posNums=0.0
negNums=0.0
zeroNums=0.0
posFraction=0.0
negFraction=0.0
zeroFraction=0.0
arrLen=len(arr)
for i in range(arrLen):
if arr[i]==0:
zeroNums+=1
elif arr[i]>0:
posNums+=1
else:
negNums+=1
posFraction=posNums/arrLen
negFraction=negNums/arrLen
zeroFraction=zeroNums/arrLen
print(f'{posFraction:.6f}\n{negFraction:.6f}\n{zeroFraction:.6f}')
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().rstrip().split()))
plusMinus(arr)
| 18.333333
| 71
| 0.59697
| 88
| 660
| 4.386364
| 0.431818
| 0.031088
| 0.025907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04158
| 0.271212
| 660
| 35
| 72
| 18.857143
| 0.760915
| 0
| 0
| 0
| 0
| 0
| 0.09697
| 0.084848
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.178571
| 0
| 0.214286
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
272af550504e85a1489b88397788f6d4d8f037c0
| 1,403
|
py
|
Python
|
golang/godoc.py
|
nlfiedler/devscripts
|
122c7b1424b457d7d5499552065da83d76f6b922
|
[
"BSD-3-Clause"
] | null | null | null |
golang/godoc.py
|
nlfiedler/devscripts
|
122c7b1424b457d7d5499552065da83d76f6b922
|
[
"BSD-3-Clause"
] | 1
|
2015-03-04T15:01:08.000Z
|
2015-03-04T15:01:08.000Z
|
golang/godoc.py
|
nlfiedler/devscripts
|
122c7b1424b457d7d5499552065da83d76f6b922
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""Start the godoc server and open the docs in a browser window.
This uses the 'open' command to open a web browser.
"""
import argparse
import http
import http.client
import subprocess
import time
def is_ready(host, port):
"""Check if the web server returns an OK status."""
conn = http.client.HTTPConnection(host, port)
try:
conn.request('HEAD', '/')
resp = conn.getresponse()
return resp.status == 200
except ConnectionRefusedError:
return False
def main():
"""Do the thing."""
parser = argparse.ArgumentParser(description='Spawn godoc and open browser window.')
parser.add_argument('--port', help='port on which to run godoc', default=6060)
args = parser.parse_args()
host = "localhost"
port = args.port
# If not already running, start godoc in the background and wait for it
# to be ready by making an HTTP request and checking the status.
if not is_ready(host, port):
subprocess.Popen(["godoc", "-http=:{port}".format(port=port)])
while True:
if is_ready(host, port):
break
print('Waiting for server to start...')
time.sleep(1)
# Open the docs in a browser window.
url = "http://{host}:{port}".format(host=host, port=port)
subprocess.check_call(["open", url])
if __name__ == "__main__":
main()
| 26.980769
| 88
| 0.639344
| 190
| 1,403
| 4.647368
| 0.473684
| 0.05436
| 0.037373
| 0.050963
| 0.061155
| 0.061155
| 0.061155
| 0
| 0
| 0
| 0
| 0.008459
| 0.241625
| 1,403
| 51
| 89
| 27.509804
| 0.821429
| 0.259444
| 0
| 0
| 0
| 0
| 0.159136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.166667
| 0
| 0.3
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
272f0cf0754b945f03c4fe2b4a5977930d84644b
| 1,752
|
py
|
Python
|
7_kyu/grow_of_population.py
|
dimishpatriot/way_on_the_highway
|
4865db946632b7bd3d74509a20a307841c02169d
|
[
"MIT"
] | null | null | null |
7_kyu/grow_of_population.py
|
dimishpatriot/way_on_the_highway
|
4865db946632b7bd3d74509a20a307841c02169d
|
[
"MIT"
] | null | null | null |
7_kyu/grow_of_population.py
|
dimishpatriot/way_on_the_highway
|
4865db946632b7bd3d74509a20a307841c02169d
|
[
"MIT"
] | null | null | null |
"""In a small town the population is p0 = 1000 at the beginning of a year.
The population regularly increases by 2 percent per year and moreover 50 new
inhabitants per year come to live in the town.
How many years does the town need to see its population greater or equal to p = 1200 inhabitants?
1) At the end of the first year there will be:
1000 + 1000 * 0.02 + 50 => 1070 inhabitants
2) At the end of the 2nd year there will be:
1070 + 1070 * 0.02 + 50 => 1141 inhabitants (number of inhabitants is an integer)
3) At the end of the 3rd year there will be:
1141 + 1141 * 0.02 + 50 => 1213
It will need 3 entire years.
=== More generally given parameters: ===
p0, percent,
aug (inhabitants coming or leaving each year),
p (population to surpass)
the function nb_year should return n number of entire years needed to get
a population greater or equal to p.
aug is an integer,
percent a positive or null number,
p0 and p are positive integers (> 0).
=== Note: ===
Don't forget to convert the percent parameter as a percentage in the body of
your function: if the parameter percent is 2 you have to convert it to 0.02.
"""
import pytest
@pytest.mark.parametrize("test, result", [
((1500, 5, 100, 5000), 15),
((1500000, 2.5, 10000, 2000000), 10),
((1500000, 0.25, 1000, 2000000), 94),
((1000, 10, 0, 1000), 0),
((999, 10, 0, 1000), 1),
((999, 0, 1, 1000), 1),
((1000, 100, 0, 2000), 1),
])
def test_population(test, result):
assert nb_year(*test) == result
def nb_year(p0: int, percent: float, aug: int, p: int) -> int:
percent = percent / 100 # convert to percentage
current = p0
year = 0
while current < p:
current = current + current*percent + aug
year += 1
return year
| 30.736842
| 97
| 0.673516
| 295
| 1,752
| 3.986441
| 0.39322
| 0.017007
| 0.020408
| 0.02551
| 0.079082
| 0.045918
| 0
| 0
| 0
| 0
| 0
| 0.138971
| 0.223744
| 1,752
| 56
| 98
| 31.285714
| 0.725735
| 0.652968
| 0
| 0
| 0
| 0
| 0.019967
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
273038458cb6e72fc3b79b5c6ab692951f85b0c3
| 4,144
|
py
|
Python
|
pysimplegui/DemoPrograms/Demo_Multithreaded_Long_Task_Simple.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | null | null | null |
pysimplegui/DemoPrograms/Demo_Multithreaded_Long_Task_Simple.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | 2
|
2020-06-06T00:30:56.000Z
|
2021-06-10T22:30:37.000Z
|
pysimplegui/DemoPrograms/Demo_Multithreaded_Long_Task_Simple.py
|
konsan1101/py-etc
|
bcca13119b0d2453866988404fd1c4976f55d4d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import threading
import time
import PySimpleGUI as sg
"""
DESIGN PATTERN - Multithreaded Long Tasks GUI using shared global variables
Presents one method for running long-running operations in a PySimpleGUI environment.
The PySimpleGUI code, and thus the underlying GUI framework, runs as the primary, main thread
The "long work" is contained in the thread that is being started. Communicating is done (carefully) using global variables
There are 2 ways "progress" is being reported to the user. If a the amount of time is known ahead of time and
the work can be broek down into countable units, then a progress bar is used. If a task is one long chunk
of time that cannot be broken down into smaller units, then an animated GIF is shown that spins as
long as the task is running.
"""
thread_done = 1
message = ''
thread_counter = 0
total = 100
progress = 0
def long_operation_thread(seconds):
"""
A worker thread that communicates with the GUI through a global variable
This thread can block for as long as it wants and the GUI will not be affected
:param seconds: (int) How long to sleep, the ultimate blocking call
:return:
"""
global thread_done, message, thread_counter, progress
print('Thread started - will sleep for {} seconds'.format(seconds))
thread_counter += 1
for i in range(int(seconds*10)):
time.sleep(.1) # sleep for a while
progress += total/(seconds*10)
message = f'***This is a message from the thread {thread_counter} ***'
thread_done = True
def the_gui():
"""
Starts and executes the GUI
Reads data from a global variable and displays
Returns when the user exits / closes the window
"""
global thread_done, message, progress
sg.theme('Light Brown 3')
layout = [[sg.Text('Long task to perform example')],
[sg.Output(size=(80, 12))],
[sg.Text('Number of seconds your task will take'),
sg.Input(key='-SECONDS-', size=(5, 1)),
sg.Button('Do Long Task', bind_return_key=True),
sg.CBox('ONE chunk, cannot break apart', key='-ONE CHUNK-')],
[sg.Text('Work progress'), sg.ProgressBar(total, size=(20,20), orientation='h', key='-PROG-')],
[sg.Button('Click Me'), sg.Button('Exit')], ]
window = sg.Window('Multithreaded Window', layout)
one_chunk = False
sg.popup_animated(None)
# --------------------- EVENT LOOP ---------------------
while True:
event, values = window.read(timeout=100)
if event in (None, 'Exit'):
break
elif event.startswith('Do'):
seconds = float(values['-SECONDS-'])
one_chunk = values['-ONE CHUNK-']
print('Thread Starting! Long work....sending value of {} seconds'.format(seconds))
threading.Thread(target=long_operation_thread, args=(seconds, ), daemon=True).start()
elif event == 'Click Me':
print('Your GUI is alive and well')
# --------------- Check for incoming messages from threads ---------------
if thread_done is True:
print('The thread has finished!')
print(f'message = {message}')
# reset everything for the next run
sg.popup_animated(None) # just in case one's running, stop it
thread_done = one_chunk = False
message, progress = '', 0
window['-PROG-'].update_bar(total, total) # show the bar as maxed out
if one_chunk: # flag signifies the thread is taking to take one big chunk of time
sg.popup_animated(sg.DEFAULT_BASE64_LOADING_GIF, background_color='white', transparent_color='white', time_between_frames=100)
elif progress != 0:
window['-PROG-'].update_bar(progress, total) # update the progress bar if non-zero
# if user exits the window, then close the window and exit the GUI func
window.close()
if __name__ == '__main__':
the_gui()
print('Exiting Program')
| 41.029703
| 138
| 0.625483
| 555
| 4,144
| 4.594595
| 0.392793
| 0.021961
| 0.017647
| 0.018039
| 0.021961
| 0.021961
| 0
| 0
| 0
| 0
| 0
| 0.011468
| 0.263514
| 4,144
| 100
| 139
| 41.44
| 0.82405
| 0.189189
| 0
| 0.035088
| 0
| 0
| 0.195652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.052632
| 0
| 0.087719
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27346ed4d6dc889abd1e3fa083354a32e52ae98a
| 2,024
|
py
|
Python
|
chmap/examples/defunct/EUV_map_query_options.py
|
predsci/CHD
|
35f29d1b62861f4ffed57b38d18689b282664bcf
|
[
"Apache-2.0"
] | 3
|
2021-06-29T00:23:47.000Z
|
2021-09-17T18:29:05.000Z
|
chmap/examples/defunct/EUV_map_query_options.py
|
predsci/CHD
|
35f29d1b62861f4ffed57b38d18689b282664bcf
|
[
"Apache-2.0"
] | null | null | null |
chmap/examples/defunct/EUV_map_query_options.py
|
predsci/CHD
|
35f29d1b62861f4ffed57b38d18689b282664bcf
|
[
"Apache-2.0"
] | 1
|
2021-12-08T06:26:18.000Z
|
2021-12-08T06:26:18.000Z
|
import os
import datetime
import pandas as pd
from chmap.settings.app import App
from chmap.database.db_classes import *
from chmap.database.deprecated.db_funs import init_db_conn
from sqlalchemy.orm import joinedload
# Assume that we are using images from the 'reference_data' setup supplied with repo
# manually set the data dir
raw_data_dir = os.path.join(App.APP_HOME, 'reference_data', 'raw')
hdf_data_dir = os.path.join(App.APP_HOME, 'reference_data', 'processed')
# manually set the database location using the installed app settings.
database_dir = os.path.join(App.APP_HOME, 'reference_data')
# setup database path
use_db = "sqlite"
sqlite_filename = "dbtest.db"
sqlite_path = os.path.join(database_dir, sqlite_filename)
# re-initialize database file and establish a connection/session
db_session = init_db_conn(db_name=use_db, chd_base=Base, sqlite_path=sqlite_path)
# define query time range
query_time_min = datetime.datetime(2000, 1, 1, 1, 0, 0)
query_time_max = datetime.datetime(2019, 1, 1, 1, 0, 0)
mean_time_range = [query_time_min, query_time_max]
combo_query = db_session.query(Data_Combos.combo_id).filter(Data_Combos.date_mean.between(mean_time_range[0],
mean_time_range[1]))
euv_map_query = db_session.query(EUV_Maps).filter(EUV_Maps.combo_id.in_(combo_query))
euv_map_query_join = db_session.query(EUV_Maps, Data_Combos).filter(EUV_Maps.combo_id.in_(combo_query))
euv_map_rel_join = db_session.query(EUV_Maps).options(joinedload(EUV_Maps.combos)).filter(EUV_Maps.combo_id.in_(combo_query))
test = euv_map_query.all()
len(test)
# lazyload combo info
test[0].combos.combo_id
# lazyload which images the combo is made up of
for row in test[0].combos.images:
print(row.data_id)
test2 = pd.read_sql(euv_map_query.statement, db_session.bind)
test2_2 = pd.read_sql(euv_map_query_join.statement, db_session.bind)
test2_3 = pd.read_sql(euv_map_rel_join.statement, db_session.bind)
db_session.close()
| 37.481481
| 125
| 0.76334
| 332
| 2,024
| 4.358434
| 0.310241
| 0.055978
| 0.03801
| 0.026952
| 0.34416
| 0.225294
| 0.163096
| 0.163096
| 0.163096
| 0.107809
| 0
| 0.015499
| 0.139328
| 2,024
| 53
| 126
| 38.188679
| 0.815155
| 0.172925
| 0
| 0
| 0
| 0
| 0.041491
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.225806
| 0
| 0.225806
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27376a84a1f1ba1faffcc4132873b55e2e8ca9cd
| 308
|
py
|
Python
|
hasensor/sensors/announcer.py
|
eblanton/hasensor
|
4421bf939f0904fc85ecbdfbd404dabe33c1c443
|
[
"BSD-2-Clause"
] | 1
|
2020-01-02T21:53:22.000Z
|
2020-01-02T21:53:22.000Z
|
hasensor/sensors/announcer.py
|
eblanton/hasensor
|
4421bf939f0904fc85ecbdfbd404dabe33c1c443
|
[
"BSD-2-Clause"
] | null | null | null |
hasensor/sensors/announcer.py
|
eblanton/hasensor
|
4421bf939f0904fc85ecbdfbd404dabe33c1c443
|
[
"BSD-2-Clause"
] | null | null | null |
from ..sensor import ArgDict, Sensor
class Announcer(Sensor):
_argtypes: ArgDict = {
"value": str
}
def __init__(self, value: str = "ON", **kwargs):
super().__init__(**kwargs)
self._value = value
def fire(self):
self._loop.publish(self.name, self._value)
| 19.25
| 52
| 0.594156
| 35
| 308
| 4.885714
| 0.542857
| 0.157895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266234
| 308
| 15
| 53
| 20.533333
| 0.756637
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
273b0739f92587e2927e090ad5eb2c6345702f86
| 7,295
|
py
|
Python
|
src/news/collector.py
|
aibazhang/multitrue-bot
|
189c5163010269eec57ae48f8e9d08d22ed5fcb8
|
[
"MIT"
] | null | null | null |
src/news/collector.py
|
aibazhang/multitrue-bot
|
189c5163010269eec57ae48f8e9d08d22ed5fcb8
|
[
"MIT"
] | null | null | null |
src/news/collector.py
|
aibazhang/multitrue-bot
|
189c5163010269eec57ae48f8e9d08d22ed5fcb8
|
[
"MIT"
] | null | null | null |
import os
import requests
import json
import pathlib
from abc import ABCMeta, abstractmethod
from .news import News, print_format_markdown, print_format_telebot
KEY_PATH = pathlib.Path(os.path.dirname(__file__), "../..")
class NewsCollector(metaclass=ABCMeta):
@abstractmethod
def format_news(self):
pass
@abstractmethod
def filter_news(self):
pass
@abstractmethod
def print_news(self):
pass
@abstractmethod
def collcet_news(self):
pass
@abstractmethod
def save_news(self):
pass
class WebNewsCollector(NewsCollector):
def __init__(
self,
params=None,
block_list=None,
print_format=None,
base_url=None,
mode=None,
news_list=None,
time_format=None,
headers=None,
):
self.params = params
self.print_format = print_format
self.base_url = base_url
self._mode = mode
if news_list is None:
news_list = list()
if block_list is None:
block_list = json.load(open(KEY_PATH / "block_list.json", "r"))["block_list"]
self.news_list = news_list
self.time_format = time_format
self.block_list = block_list
self.headers = headers
def _get(self):
self.response = requests.get(self.base_url + self.mode, headers=self.headers, params=self.params).text
data_json = json.loads(self.response)
if data_json["status"] == "error":
print("{}: {}".format(data_json["code"], data_json["message"]))
raise requests.exceptions.ConnectionError
def filter_news(self):
filtered_news_list = list()
for news in self.news_list:
if any(bl in news.title for bl in self.block_list):
continue
filtered_news_list.append(news)
self.news_list = filtered_news_list
def print_news(self, news):
if self.print_format not in ["markdown", "telebot"]:
raise NotImplementedError
if self.print_format == "telebot":
return print_format_telebot(news.source, news.author, news.published_time, news.title, news.url)
if self.print_format == "markdown":
print(print_format_markdown(news.published_time, news.title, news.url))
def collcet_news(self):
self._get()
self.format_news()
self.filter_news()
# transfer to local time
[news.trans_utc_to_local(news.published_time, self.time_format) for news in self.news_list]
self.news_list = [self.print_news(news) for news in self.news_list if news.is_latest()]
def format_news(self):
raise NotImplementedError
def save_news(self):
raise NotImplementedError
class NewsAPICollector(WebNewsCollector):
def __init__(
self,
print_format=None,
mode=None,
country=None,
category=None,
sources=None,
query=None,
page_size=None,
):
super().__init__()
self._mode = mode
self.print_format = print_format
self.base_url = "https://newsapi.org/v2/"
self.headers = {"X-Api-Key": json.load(open(KEY_PATH / "keys.json", "r"))["news_api_key"]}
self.time_format = "%Y-%m-%dT%H:%M:%S"
self.params = {
"country": country,
"category": category,
"sources": sources,
"q": query,
"pageSize": page_size,
}
@property
def mode(self):
if self._mode is None:
self._mode = "top-headlines"
if self._mode not in ["top-headlines", "everything", "sources"]:
raise NotImplementedError
return self._mode
def format_news(self):
for text in json.loads(self.response)["articles"]:
news = News()
news.title = text["title"]
news.source = text["source"]["name"]
news.author = text["author"]
news.url = text["url"]
news.published_time = text["publishedAt"]
self.news_list.append(news)
class NewsCatcherAPICollector(WebNewsCollector):
"""
{"message":"You are not subscribed to this API."}⏎
"""
def __init__(
self,
print_format=None,
mode=None,
language=None,
country=None,
category=None,
sources=None,
query=None,
page_size=None,
):
super().__init__()
self._mode = mode
self.print_format = print_format
self.time_format = "%Y-%m-%d %H:%M:%S"
self.base_url = "https://newscatcher.p.rapidapi.com/v1/"
self.headers = {
"x-rapidapi-key": json.load(open(KEY_PATH / "keys.json", "r"))["news_catcher_key"],
"x-rapidapi-host": "newscatcher.p.rapidapi.com",
}
self.params = {"lang": language, "country": country, "topic": category}
@property
def mode(self):
if self._mode is None:
self._mode = "latest_headlines"
if self._mode not in ["latest_headlines"]:
raise NotImplementedError
return self._mode
def format_news(self):
for text in json.loads(self.response)["articles"]:
news = News()
news.title = text["title"]
news.source = text["clean_url"]
news.author = text["author"]
news.url = text["link"]
news.published_time = text["published_date"]
news.country = text["country"]
news.language = text["language"]
news.copyright = text["rights"]
news.description = text["summary"]
self.news_list.append(news)
class MediastackCollector(WebNewsCollector):
"""
https_access_restricted","message":"Access Restricted -
Your current Subscription Plan does not support HTTPS Encryption.
"""
def __init__(
self,
print_format=None,
mode=None,
country=None,
category=None,
sources=None,
query=None,
page_size=None,
):
super().__init__()
self._mode = mode
self.print_format = print_format
self.base_url = "https://api.mediastack.com/v1/"
self.key = json.load(open(KEY_PATH / "keys.json", "r"))["mediastack_key"]
self.time_format = "%Y-%m-%dT%H:%M:%S"
self.params = {
"countries": country,
"categories": category,
"sources": sources,
"keywords": query,
"limit": page_size,
}
@property
def mode(self):
self._mode = "news"
return self._mode
def _get(self):
self.response = requests.get(
"{}{}?access_key={}".format(self.base_url, self._mode, self.key), params=self.params
).text
def format_news(self):
for text in json.loads(self.response)["data"]:
news = News()
news.author = text["author"]
news.title = text["title"]
news.description = text["description"]
news.url = text["url"]
news.source = text["source"]
news.image = text["image"]
news.published_time = text["published_at"]
self.news_list.append(news)
| 29.534413
| 110
| 0.580123
| 834
| 7,295
| 4.874101
| 0.173861
| 0.054121
| 0.0369
| 0.02091
| 0.439114
| 0.35695
| 0.318081
| 0.259041
| 0.241574
| 0.233948
| 0
| 0.000588
| 0.300754
| 7,295
| 246
| 111
| 29.654472
| 0.796118
| 0.026868
| 0
| 0.529703
| 0
| 0
| 0.101784
| 0.003681
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108911
| false
| 0.024752
| 0.029703
| 0
| 0.183168
| 0.089109
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
273c940a6cfe38710caeba8144fda95ba8e580c5
| 9,282
|
py
|
Python
|
scheduler/generation.py
|
richplane/PyREmatcher
|
93df987478849e5905f0ee5c9ee79801479acb1e
|
[
"Apache-2.0"
] | 1
|
2020-03-10T14:35:32.000Z
|
2020-03-10T14:35:32.000Z
|
scheduler/generation.py
|
richplane/PyREmatcher
|
93df987478849e5905f0ee5c9ee79801479acb1e
|
[
"Apache-2.0"
] | null | null | null |
scheduler/generation.py
|
richplane/PyREmatcher
|
93df987478849e5905f0ee5c9ee79801479acb1e
|
[
"Apache-2.0"
] | null | null | null |
# Renewable generation at Findhorn
from windpowerlib import WindFarm
from windpowerlib import WindTurbine
from windpowerlib import WindTurbineCluster
from windpowerlib.turbine_cluster_modelchain import TurbineClusterModelChain
import pvlib
from pvlib.pvsystem import PVSystem
from pvlib.location import Location
from pvlib.modelchain import ModelChain
from pvlib.forecast import GFS
from pvlib.irradiance import disc
import pandas as pd
import numpy as np
import datetime
import scipy
import sys
# University computers can't install tables (bosc needs C++ compiler)
try:
import tables
except ImportError:
pass
class RenewablesException(Exception):
pass
class LocalRE(object):
forecast_height = 10 # for DarkSky API
def __init__(
self,
wind_turbines: list = [],
pv_arrays: list = [],
latitude: float = 57.6568,
longitude: float = -3.5818,
altitude: float = 10,
roughness_length: float = 0.15, # roughness length (bit of a guess)
hellman_exp: float = 0.2
):
""" Set up the renewable energy generation
"""
# This needs to be repeated in every forecast
self.roughness_length = roughness_length
# Initialise empty forecast dataframe, just so nothing complains
self.wind_forecast = pd.DataFrame()
self.pv_forecast = pd.DataFrame()
# Wind turbine(s)
turbines = []
for turbine in wind_turbines:
turbines.append(
{
'wind_turbine' : WindTurbine(
turbine['name'],
turbine['hub_height'],
nominal_power = turbine['nominal_power'],
rotor_diameter = turbine['rotor_diameter'],
power_curve = turbine['power_curve']
),
'number_of_turbines' : turbine['qty']
}
)
local_wind_farm = WindFarm(
'Local windfarm',
turbines,
[latitude, longitude]
)
# TODO - check for learned local data & overwrite power_curve
self.wind_modelchain = TurbineClusterModelChain(
local_wind_farm,
smoothing = False,
hellman_exp = hellman_exp,
)
# Initialise PV models
self.pv_location = Location(
latitude=latitude,
longitude=longitude,
altitude=altitude
)
# Now set up the PV array & system.
cec_pv_model_params = pvlib.pvsystem.retrieve_sam('CECMod')
sandia_pv_model_params = pvlib.pvsystem.retrieve_sam('SandiaMod')
cec_inverter_model_params = pvlib.pvsystem.retrieve_sam('CECInverter')
adr_inverter_model_params = pvlib.pvsystem.retrieve_sam('ADRInverter')
self.pv_modelchains = {}
for pv_array in pv_arrays:
# Try to find the module names in the libraries
if pv_array['module_name'] in cec_pv_model_params:
pv_array['module_parameters'] = cec_pv_model_params[
pv_array['module_name']
]
elif pv_array['module_name'] in sandia_pv_model_params:
pv_array['module_parameters'] = sandia_pv_model_params[
pv_array['module_name']
]
else:
raise RenewablesException('Could not retrieve PV module data')
# Do the same with the inverter(s)
if pv_array['inverter_name'] in cec_inverter_model_params:
pv_array['inverter_parameters'] = cec_inverter_model_params[
pv_array['inverter_name']
]
elif pv_array['inverter_name'] in adr_inverter_model_params:
pv_array['inverter_parameters'] = adr_inverter_model_params[
pv_array['inverter_name']
]
else:
raise RenewablesException('Could not retrieve PV module data')
self.pv_modelchains[pv_array['name']] = ModelChain(
PVSystem(**pv_array),
self.pv_location,
aoi_model='physical',
spectral_model='no_loss'
)
def make_generation_forecasts(self, forecast):
""" Makes generation forecast data from the supplied Dark Sky forecast
Arguments:
forecast {pandas.DataFrame} -- DarkSky originated forecast
"""
self.pv_forecast = self._make_pv_forecast(forecast)
self.wind_forecast = self._make_wind_forecast(forecast)
def _make_pv_forecast(self, forecast) -> pd.DataFrame:
"""Compile the forecast required for PV generation prediction
Uses pvlib to generate solar irradiance predictions.
Arguments:
forecast {pandas.DataFrame} -- DarkSky originated forecast
"""
# Annoyingly, the PV & wind libraries want temperature named differently
pv_forecast = forecast.rename(
columns={
'temperature' : 'air_temp',
'windSpeed' : 'wind_speed',
}
)
# Use PV lib to get insolation based on the cloud cover reported here
model = GFS()
# Next up, we get hourly solar irradiance using interpolated cloud cover
# We can get this from the clearsky GHI...
if tables in sys.modules:
# We can use Ineichen clear sky model (uses pytables for turbidity)
clearsky = self.pv_location.get_clearsky(pv_forecast.index)
else:
# We can't, so use 'Simplified Solis'
clearsky = self.pv_location.get_clearsky(
pv_forecast.index, model='simplified_solis'
)
# ... and by knowledge of where the sun is
solpos = self.pv_location.get_solarposition(pv_forecast.index)
ghi = model.cloud_cover_to_ghi_linear(
pv_forecast['cloudCover'] * 100, clearsky['ghi']
)
dni = disc(ghi, solpos['zenith'], pv_forecast.index)['dni']
dhi = ghi - dni * np.cos(np.radians(solpos['zenith']))
# Whump it all together and we have our forecast!
pv_forecast['dni'] = dni
pv_forecast['dhi'] = dhi
pv_forecast['ghi'] = ghi
return pv_forecast
def _make_wind_forecast(self, forecast) -> pd.DataFrame:
"""Creates forecast needed for wind generation prediction
Creates renamed multidimensional columns needed for the windpowerlib
system.
Arguments:
forecast {pandas.DataFrame} -- DarkSky originated forecast
"""
# Easiest to build multiindexes up one by one.
columns_index = pd.MultiIndex.from_tuples(
[
('wind_speed', 10), ('temperature', 10),
('pressure', 10), ('roughness_length', 0),
('wind_bearing', 10)
]
)
wind_forecast = pd.DataFrame(
index=forecast.index.copy(),
columns = columns_index
)
wind_forecast.loc[:,('wind_speed',10)] = forecast['windSpeed'].loc[:]
wind_forecast.loc[:,('temperature',10)] = forecast['temperature'].loc[:]
wind_forecast.loc[:,('pressure',10)] = forecast['pressure'].loc[:]
wind_forecast.loc[:,('wind_bearing',10)] = forecast['windBearing'].loc[:]
wind_forecast.loc[:,('roughness_length', 0)] = self.roughness_length
return wind_forecast
def predict_generation(self, reserved_wind_consumption = 0) -> pd.DataFrame:
""" Predict electricity generated from forecast
Will use the timestamp index of the forecast property to estimate
instantaneous electricity generation. Returns table giving amounts in
kWh.
Arguments:
reserved_wind_consumption {float} - constant amount that is assumed
to be required from wind generation to meet other local need
"""
prediction = pd.DataFrame(index = self.pv_forecast.index.copy())
# First up - PV
# Create a total gen column of zeros
prediction['PV_AC_TOTAL'] = 0
for pv_array, pv_model in self.pv_modelchains.items():
pv_model.run_model(prediction.index, self.pv_forecast)
output_column_name = 'PV_AC_' + pv_array
prediction[output_column_name] = pv_model.ac
# Add to the total column
prediction['PV_AC_TOTAL'] = prediction['PV_AC_TOTAL'] + pv_model.ac
# Next - wind power.
self.wind_modelchain.run_model(
self.wind_forecast
)
prediction['WIND_AC'] = self.wind_modelchain.power_output
# Convert everything into kWh
prediction = prediction * 0.001
prediction['available_wind'] = prediction['WIND_AC'] - reserved_wind_consumption
prediction['available_wind'][prediction['available_wind']<0] = 0
prediction['total'] = prediction['WIND_AC'] + prediction['PV_AC_TOTAL']
prediction['surplus'] = prediction['available_wind'] + prediction['PV_AC_TOTAL']
prediction['surplus'][prediction['surplus']<0] = 0
return prediction
| 34
| 88
| 0.607735
| 997
| 9,282
| 5.447342
| 0.281846
| 0.0232
| 0.019149
| 0.026514
| 0.207697
| 0.176211
| 0.176211
| 0.039772
| 0.039772
| 0.022095
| 0
| 0.007939
| 0.307908
| 9,282
| 272
| 89
| 34.125
| 0.837484
| 0.215471
| 0
| 0.068323
| 0
| 0
| 0.112698
| 0
| 0
| 0
| 0
| 0.003676
| 0
| 1
| 0.031056
| false
| 0.012422
| 0.10559
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
273e3b061e019a5cb896032af82de6f73be47078
| 1,985
|
py
|
Python
|
app.py
|
twitterdev/getting-started-with-dash
|
f0c81a03d0f31ed787a90492d2412c9fe48eeb6d
|
[
"Apache-2.0"
] | 2
|
2022-03-13T22:54:16.000Z
|
2022-03-28T02:37:48.000Z
|
app.py
|
twitterdev/getting-started-with-dash
|
f0c81a03d0f31ed787a90492d2412c9fe48eeb6d
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
twitterdev/getting-started-with-dash
|
f0c81a03d0f31ed787a90492d2412c9fe48eeb6d
|
[
"Apache-2.0"
] | null | null | null |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import os
import requests
app = dash.Dash(external_stylesheets=[dbc.themes.JOURNAL])
server = app.server
search_url = "https://api.twitter.com/2/tweets/counts/recent"
query_params = {"query": "from:jessicagarson", "granularity": "day"}
bearer_token = os.environ.get("BEARER_TOKEN")
def bearer_oauth(r):
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "GettingStartedDash"
return r
def connect_to_endpoint(url, tweet_fields):
response = requests.request("GET", url, auth=bearer_oauth, params=tweet_fields)
print(response.status_code)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
return response.json()
json_response = connect_to_endpoint(search_url, query_params)
df = pd.DataFrame(json_response["data"])
df["start"] = pd.to_datetime(df["start"])
final = df[["start", "tweet_count"]]
fig = px.line(final, x="start", y="tweet_count")
colors = {"background": "#FFFFFF", "text": "#1DA1F2"}
fig.update_layout(
plot_bgcolor=colors["background"],
paper_bgcolor=colors["background"],
font_color=colors["text"],
)
app.layout = html.Div(
style={"backgroundColor": colors["background"]},
children=[
html.H1(
children="Tweets by Date",
style={"textAlign": "center", "color": colors["text"]},
),
html.Div(
children="An example using Dash and the Twitter API recent search counts to see how much I've been Tweeting this week",
style={"textAlign": "center", "color": colors["text"]},
),
dcc.Graph(id="example-twitter", figure=fig),
],
)
if __name__ == "__main__":
app.run_server(debug=True)
| 26.824324
| 131
| 0.665995
| 250
| 1,985
| 5.116
| 0.5
| 0.031274
| 0.04222
| 0.039093
| 0.05473
| 0.05473
| 0
| 0
| 0
| 0
| 0
| 0.004991
| 0.192443
| 1,985
| 73
| 132
| 27.191781
| 0.792888
| 0
| 0
| 0.074074
| 0
| 0.018519
| 0.255416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.148148
| 0
| 0.222222
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
273f6a820dd8f3aeae5864ba60eb32fd5d0541ad
| 2,398
|
py
|
Python
|
testing/statistic.py
|
methk/RadixDLT-IoTSimulation
|
886ba589c8e7be08c95cf3636438c10e97e16752
|
[
"MIT"
] | null | null | null |
testing/statistic.py
|
methk/RadixDLT-IoTSimulation
|
886ba589c8e7be08c95cf3636438c10e97e16752
|
[
"MIT"
] | 3
|
2021-03-09T21:18:22.000Z
|
2021-09-02T01:05:44.000Z
|
testing/statistic.py
|
methk/RadixDLT-IoTSimulation
|
886ba589c8e7be08c95cf3636438c10e97e16752
|
[
"MIT"
] | 1
|
2022-02-18T14:51:46.000Z
|
2022-02-18T14:51:46.000Z
|
import os
import csv
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
startingDir = 'data/output'
plotTestNumber = 12
totalRequests = 447
singleTestData = []
allLatencies = []
allErrors = 0
errorsData = []
tipsData = []
tipsDataSTD = []
powsData = []
powsDataSTD = []
plotData = {}
path = os.walk(startingDir)
next(path)
for directory in path:
tempTestData = {
'name': directory[0].split('/')[-1],
'tipsValue': [],
'powValue': [],
'errors': 0
}
for csvFilename in directory[2]:
with open(directory[0]+'/'+csvFilename, 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
srt = int(row[1])
tips = int(row[2])
fin = int(row[3])
if fin is -1:
tempTestData['errors'] += 1
allErrors += 1
else:
tipsValue = tips - srt
powValue = fin - tips
tempTestData['powValue'].append(powValue)
tempTestData['tipsValue'].append(tipsValue)
allLatencies.append(tipsValue+powValue)
latence = fin - srt
if latence in plotData.keys():
plotData[latence] += 1
else:
plotData[latence] = 1
csvFile.close()
errorsNotWritten = totalRequests - \
len(tempTestData['powValue']) - tempTestData['errors']
tempTestData['errors'] += errorsNotWritten
allErrors += errorsNotWritten
singleTestData.append(tempTestData)
print('Avg= ' + str(round(np.mean(allLatencies), 4)))
print('Err= ' + str(100 * round((allErrors / (totalRequests * len(singleTestData))), 4)) + '%')
print(mean_confidence_interval(allLatencies))
fig = plt.figure()
ax = plt.axes()
sortedData = {}
for k, v in sorted(plotData.items()):
sortedData[k] = v
mean = int(round(np.mean(allLatencies), 4))
plt.plot(range(len(sortedData)), list(sortedData.values()), color='#b42526')
plt.axvline(x = mean, color='#252525')
plt.show()
| 27.25
| 95
| 0.565888
| 259
| 2,398
| 5.223938
| 0.416988
| 0.022173
| 0.03252
| 0.033999
| 0.035477
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025989
| 0.293995
| 2,398
| 87
| 96
| 27.563218
| 0.773184
| 0
| 0
| 0.027778
| 0
| 0
| 0.052544
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013889
| false
| 0
| 0.069444
| 0
| 0.097222
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
273feec7e2e1888ae2ab4fb11f3421c980ee0353
| 4,844
|
py
|
Python
|
skutil/preprocessing/tests/test_balance.py
|
tgsmith61591/pynorm
|
672e353a721036791e1e32250879c3276961e05a
|
[
"BSD-3-Clause"
] | 38
|
2016-08-31T19:24:13.000Z
|
2021-06-28T17:10:20.000Z
|
skutil/preprocessing/tests/test_balance.py
|
tgsmith61591/pynorm
|
672e353a721036791e1e32250879c3276961e05a
|
[
"BSD-3-Clause"
] | 42
|
2016-06-20T19:07:21.000Z
|
2017-10-29T20:53:11.000Z
|
skutil/preprocessing/tests/test_balance.py
|
tgsmith61591/pynorm
|
672e353a721036791e1e32250879c3276961e05a
|
[
"BSD-3-Clause"
] | 17
|
2016-06-27T18:07:53.000Z
|
2019-04-09T12:33:59.000Z
|
from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from skutil.preprocessing import *
from skutil.preprocessing.balance import _BaseBalancer
from numpy.testing import assert_array_equal
from skutil.testing import assert_fails
import warnings
# Def data for testing
iris = load_iris()
X = pd.DataFrame(data=iris.data, columns=iris.feature_names)
X['target'] = iris.target
def _get_three_results(sampler):
x = X.iloc[:60] # 50 zeros, 10 ones
y = pd.concat([x, X.iloc[140:150]])
a, b = sampler.balance(x), sampler.balance(y)
sampler.ratio = 0.2
return a, b, sampler.balance(y)
def test_oversample():
a, b, c = _get_three_results(OversamplingClassBalancer(y='target', ratio=0.5))
expected_1_ct = 25
cts = a.target.value_counts()
assert cts[1] == expected_1_ct
cts = b.target.value_counts()
assert cts[1] == expected_1_ct
assert cts[2] == expected_1_ct
expected_2_ct = 10
cts = c.target.value_counts()
assert cts[1] == expected_2_ct
assert cts[2] == expected_2_ct
# test what happens when non-string passed as col name
failed = False
try:
OversamplingClassBalancer(y=1).balance(X)
except ValueError:
failed = True
assert failed
# test with too many classes
Y = X.copy()
Y['class'] = np.arange(Y.shape[0])
failed = False
try:
OversamplingClassBalancer(y='class').balance(Y)
except ValueError:
failed = True
assert failed
# test with one class
Y['class'] = np.zeros(Y.shape[0])
failed = False
try:
OversamplingClassBalancer(y='class').balance(Y)
except ValueError:
failed = True
assert failed
# test with bad ratio
for r in [0.0, 1.1, 'string']:
failed = False
try:
OversamplingClassBalancer(y='target', ratio=r).balance(X)
except ValueError:
failed = True
assert failed
# test where two classes are equally represented, and one has only a few
Y = X.iloc[:105]
d = OversamplingClassBalancer(y='target', ratio=1.0).balance(Y)
assert d.shape[0] == 150
cts = d.target.value_counts()
assert cts[0] == 50
assert cts[1] == 50
assert cts[2] == 50
def test_oversample_warning():
x = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# catch the warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
OversamplingClassBalancer(y='c', ratio=1.0).balance(df)
assert len(w) == 1
def test_smote_error():
x = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# this fails because we can't perform smote on single observation (obs='4', in this case)
assert_fails(SMOTEClassBalancer(y='c', ratio=1.0).balance, ValueError, df)
def test_smote():
a, b, c = _get_three_results(SMOTEClassBalancer(y='target', ratio=0.5))
expected_1_ct = 25
cts = a.target.value_counts()
assert cts[1] == expected_1_ct
cts = b.target.value_counts()
assert cts[1] == expected_1_ct
assert cts[2] == expected_1_ct
expected_2_ct = 10
cts = c.target.value_counts()
assert cts[1] == expected_2_ct
assert cts[2] == expected_2_ct
def test_undersample():
# since all classes are equal, should be no change here
b = UndersamplingClassBalancer(y='target').balance(X)
assert b.shape[0] == X.shape[0]
x = X.iloc[:60] # 50 zeros, 10 ones
b = UndersamplingClassBalancer(y='target', ratio=0.5).balance(x)
assert b.shape[0] == 30
cts = b.target.value_counts()
assert cts[0] == 20
assert cts[1] == 10
b = UndersamplingClassBalancer(y='target', ratio=0.25).balance(x)
assert b.shape[0] == 50
cts = b.target.value_counts()
assert cts[0] == 40
assert cts[1] == 10
def test_unneeded():
for sample_class in (UndersamplingClassBalancer,
SMOTEClassBalancer,
OversamplingClassBalancer):
sampler = sample_class(y='target', ratio=0.2, shuffle=False)
sampled = sampler.balance(X)
# assert array the same
assert_array_equal(X.index.values, sampled.index.values)
assert sampled.shape[0] == X.shape[0]
def test_superclass_not_implemented():
# anon balancer
class AnonBalancer(_BaseBalancer):
def __init__(self, y=None, ratio=0.2, as_df=True):
super(AnonBalancer, self).__init__(ratio, y, as_df)
def balance(self, X):
return super(AnonBalancer, self).balance(X)
assert_fails(AnonBalancer().balance, NotImplementedError, X)
| 27.367232
| 93
| 0.635632
| 678
| 4,844
| 4.41003
| 0.233038
| 0.051171
| 0.051171
| 0.069231
| 0.430435
| 0.396656
| 0.317057
| 0.317057
| 0.280936
| 0.247492
| 0
| 0.036987
| 0.240917
| 4,844
| 176
| 94
| 27.522727
| 0.776176
| 0.091453
| 0
| 0.48
| 0
| 0
| 0.021427
| 0
| 0
| 0
| 0
| 0
| 0.256
| 1
| 0.08
| false
| 0
| 0.072
| 0.008
| 0.176
| 0.008
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
274247c350250c602de94ec641a4010f931234e4
| 3,540
|
py
|
Python
|
options/running_options.py
|
kwshh/ImageDeconvlution
|
561468463372a5727b553efa0330fc75901e29fc
|
[
"MIT"
] | 25
|
2019-05-10T13:51:25.000Z
|
2021-10-13T01:35:43.000Z
|
options/running_options.py
|
kwshh/ImageDeconvlution
|
561468463372a5727b553efa0330fc75901e29fc
|
[
"MIT"
] | 8
|
2019-05-10T13:51:07.000Z
|
2021-06-03T07:13:28.000Z
|
options/running_options.py
|
kwshh/ImageDeconvlution
|
561468463372a5727b553efa0330fc75901e29fc
|
[
"MIT"
] | 7
|
2020-08-15T09:16:11.000Z
|
2021-07-06T21:54:20.000Z
|
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
class Options():
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self):
parser = argparse.ArgumentParser()
parser.add_argument('--ModelName', help='Model Name', default='RGDNbasic')
parser.add_argument('--UseCUDA', help='Use CUDA?', type=str2bool, nargs='?', const=True, default=True)
parser.add_argument('--UseGradAdj',
help='Use grad adj module?',
type=str2bool,
nargs='?',
const=True,
default=True)
parser.add_argument('--UseReg',
help='Use Reg?',
type=str2bool,
nargs='?',
const=True,
default=True)
parser.add_argument('--UseGradScaler',
help='Add the grad scaler?',
type=str2bool,
nargs='?',
const=True,
default=True)
parser.add_argument('--StepNum',
help='maximum number of steps',
type=int,
nargs='?',
const=True,
default=40)
parser.add_argument('--StopEpsilon',
help='stopping condition',
type=float,
# default=1e-7)
default=float("inf"))
# CropSize =0 when no padding applied on y in advance; -1 for padding with kernel size in advance.
parser.add_argument('--CropSize', help='crop boundaies of results', type=int, default=-1)
parser.add_argument('--ImgPad', help='pad image before processing', type=str2bool, default=False)
parser.add_argument('--DataPath', help='DataPath', type=str, default='../rgdn_dataset/')
parser.add_argument('--OutPath', help='Path for output', type=str, default='../rgdn_results/')
parser.add_argument('--TrainedModelPath', help='path of trained model', type=str, default='./rgdn.tr')
parser.add_argument('--Suffix', type=str, help='Manually set suffix', default='Debug')
self.initialized = True
self.parser = parser
return parser
def print_options(self, opt):
# This function is adapted from 'cycleGAN' project.
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
self.message = message
def parse(self, is_print):
parser = self.initialize()
opt = parser.parse_args()
if(is_print):
self.print_options(opt)
self.opt = opt
return self.opt
| 41.647059
| 110
| 0.487288
| 342
| 3,540
| 4.97076
| 0.383041
| 0.068824
| 0.13
| 0.061765
| 0.127059
| 0.127059
| 0.127059
| 0.127059
| 0.127059
| 0.127059
| 0
| 0.00849
| 0.367797
| 3,540
| 84
| 111
| 42.142857
| 0.751117
| 0.062712
| 0
| 0.197183
| 0
| 0
| 0.177093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070423
| false
| 0
| 0.014085
| 0
| 0.15493
| 0.070423
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2743e4857e6a05d46ea22a4c0d0bcac87b1eae86
| 982
|
py
|
Python
|
python/scanr_doiresolver-0.10/scanr_doiresolver/main.py
|
dataesr/scanr-backend
|
39681be69b9a96b4a07b9410754c897cd5b65c24
|
[
"MIT"
] | null | null | null |
python/scanr_doiresolver-0.10/scanr_doiresolver/main.py
|
dataesr/scanr-backend
|
39681be69b9a96b4a07b9410754c897cd5b65c24
|
[
"MIT"
] | null | null | null |
python/scanr_doiresolver-0.10/scanr_doiresolver/main.py
|
dataesr/scanr-backend
|
39681be69b9a96b4a07b9410754c897cd5b65c24
|
[
"MIT"
] | null | null | null |
import json
from companies_plugin import extractor
from companies_plugin.utils import add_logger
from scanr_doiresolver import LIB_PATH
from scanr_doiresolver.resolver import resolve_publications
@add_logger
class Extractor(extractor.Extractor):
def extract(self, headers, properties, message):
"""
The message is only {"url": ""} as input
Output is {"url": "", "publications": ["", ""]}
"""
reply_to = properties["reply_to"]
msg = json.loads(message)
return json.dumps({
"id": msg.get("id"),
"url": msg.get("url"),
"publications": resolve_publications(msg.get("dois", []), msg.get("references", []))
}), reply_to
if __name__ == "__main__":
m = extractor.Main(batch_name="PUBLICATION_RESOLVER",
queue_name="PUBLICATION_RESOLVER",
extractor_class=Extractor,
mod_path=LIB_PATH)
m.launch()
| 30.6875
| 96
| 0.602851
| 104
| 982
| 5.432692
| 0.471154
| 0.042478
| 0.067257
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.272912
| 982
| 31
| 97
| 31.677419
| 0.791317
| 0.089613
| 0
| 0
| 0
| 0
| 0.107728
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.238095
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27484ca2bc8228c9f67c0d19e4ae6ba4c2b57e35
| 555
|
py
|
Python
|
tests/solvers/__init__.py
|
neuyhwu/MIPLearn
|
c6b31a827d6c6e682d45171f21478162c0bc46d6
|
[
"BSD-3-Clause"
] | 64
|
2020-02-27T01:24:40.000Z
|
2022-03-31T12:38:56.000Z
|
tests/solvers/__init__.py
|
neuyhwu/MIPLearn
|
c6b31a827d6c6e682d45171f21478162c0bc46d6
|
[
"BSD-3-Clause"
] | 3
|
2020-04-07T14:43:31.000Z
|
2021-01-15T14:02:01.000Z
|
tests/solvers/__init__.py
|
zclab/MIPLearn
|
9bd64c885aa645d41c30fa0ec4e0eedfaf703dac
|
[
"BSD-3-Clause"
] | 13
|
2020-03-30T16:41:38.000Z
|
2022-02-17T15:38:01.000Z
|
# MIPLearn: Extensible Framework for Learning-Enhanced Mixed-Integer Optimization
# Copyright (C) 2020-2021, UChicago Argonne, LLC. All rights reserved.
# Released under the modified BSD license. See COPYING.md for more details.
from io import StringIO
from miplearn.solvers import _RedirectOutput
def test_redirect_output() -> None:
import sys
original_stdout = sys.stdout
io = StringIO()
with _RedirectOutput([io]):
print("Hello world")
assert sys.stdout == original_stdout
assert io.getvalue() == "Hello world\n"
| 30.833333
| 82
| 0.731532
| 70
| 555
| 5.714286
| 0.728571
| 0.07
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01766
| 0.183784
| 555
| 17
| 83
| 32.647059
| 0.865342
| 0.403604
| 0
| 0
| 0
| 0
| 0.07362
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.4
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27486b3592aa44c1f3081be500edb4d9d40e6414
| 1,717
|
py
|
Python
|
QuantumBlack Machine Learning Software Engineer 2019/Correlation.py
|
sivolko/codeforce
|
4b00c4c012780036e56d2f0e79adb2f5db7559df
|
[
"MIT"
] | 3
|
2021-04-21T07:11:33.000Z
|
2022-01-09T00:05:55.000Z
|
InterviewChallenges/QuantumBlack Machine Learning Software Engineer 2019/Correlation.py
|
sweetpand/Algorithms
|
2e4dcf2d42de25531fae5b4ec0d96ce100043117
|
[
"MIT"
] | null | null | null |
InterviewChallenges/QuantumBlack Machine Learning Software Engineer 2019/Correlation.py
|
sweetpand/Algorithms
|
2e4dcf2d42de25531fae5b4ec0d96ce100043117
|
[
"MIT"
] | null | null | null |
import math
def Correlation(scores):
physics = []
maths = []
chemistry = []
for each_scores in scores:
values = each_scores.split("\t")
maths.append(int(values[0]))
physics.append(int(values[1]))
chemistry.append(int(values[2]))
length = len(physics)
value1 = calculate_correlation(maths, physics, length)
value2 = calculate_correlation(physics, chemistry, length)
value3 = calculate_correlation(chemistry, maths, length)
# print(value1)
# print(value2)
# print(value3)
return [str(value1), str(value2), str(value3)]
# return '{}\{}{}'.format(value1, value2, value3)
def calculate_correlation(list1, list2, length):
# print("into calculate_correlation", list2, list1, length)
multiply_list = [each[0] * each[1] for each in zip(list1, list2)]
num_termA = sum(multiply_list) * length
num_termB = sum(list1) * sum(list2)
numerator = num_termA - num_termB
# print("tA: {}, tB: {}, n: {}".format(num_termA, num_termB, numerator))
denom_calculator = lambda lis, l: math.sqrt((sum(list([pow(each, 2) for each in lis])) * l) - pow(sum(lis), 2))
denominator = denom_calculator(list1, length) * denom_calculator(list2, length)
value = round(numerator / denominator, 2)
# print("deno: {}, value: {}".format(denominator, value))
return value
class_scores = ['73\t72\t76', '48\t67\t76', '95\t92\t95', '95\t95\t96', '33\t59\t79', '47\t58\t74', '98\t95\t97',
'91\t94\t97', '95\t84\t90', '93\t83\t90', '70\t70\t78', '85\t79\t91', '33\t67\t76', '47\t73\t90',
'95\t87\t95', '84\t86\t95', '43\t63\t75', '95\t92\t100', '54\t80\t87', '72\t76\t90']
res = Correlation(class_scores)
print(res)
| 33.019231
| 115
| 0.638905
| 235
| 1,717
| 4.582979
| 0.404255
| 0.092851
| 0.041783
| 0.029712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107321
| 0.180547
| 1,717
| 51
| 116
| 33.666667
| 0.658138
| 0.159581
| 0
| 0
| 0
| 0
| 0.141463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.172414
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27491dfc485679f55f2ea50c1629c408b70c365b
| 2,292
|
py
|
Python
|
src/gpuz/data_helper.py
|
scalasm/gpuz-log-analysis
|
c9a6dd46bf8a12120c9d284411c5b1562b97fdfd
|
[
"Apache-2.0"
] | null | null | null |
src/gpuz/data_helper.py
|
scalasm/gpuz-log-analysis
|
c9a6dd46bf8a12120c9d284411c5b1562b97fdfd
|
[
"Apache-2.0"
] | null | null | null |
src/gpuz/data_helper.py
|
scalasm/gpuz-log-analysis
|
c9a6dd46bf8a12120c9d284411c5b1562b97fdfd
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import Optional, List, Any
import gpuz.utility as utility
from pathlib import Path
import os
import pandas as pd
from pandas.core.frame import DataFrame
import numpy as np
import matplotlib as mp
logger: logging.Logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class DataHelper:
def __init__(self, root_dir: str) -> None:
self.root_dir = root_dir
logger.info( f"DataHelper working on root dir \"{root_dir}\" with"
f"\n\t - Dataset dir = {self.data_dir}"
f"\n\t - Work dataset dir= {self.work_dir}"
)
if not self.work_dir.exists():
logger.debug( f"Work dir {self.work_dir} does not exist: creating it ..." )
self.work_dir.mkdir()
@property
def data_dir(self) -> Path:
return Path(self.root_dir) / "datasets"
@property
def work_dir(self) -> Path:
return Path(self.root_dir) / "datasets_work"
def get_dataset_path(self, dataset_name: str) -> str:
dataset_path = self.data_dir / dataset_name
return str(dataset_path)
def get_work_dataset_path(self, dataset_name: str) -> str:
dataset_path = self.work_dir / dataset_name
return str(dataset_path)
def create_clean_csv_dataset(self, dataset_name: str, clean_dataset_name: str) -> None:
original_dataset_path = self.get_dataset_path(dataset_name)
clean_dataset_path = self.get_work_dataset_path(clean_dataset_name)
utility.preprocess_gpuz_log_file(original_dataset_path, clean_dataset_path)
def load_gpuz_dataset(self, dataset_name: str) -> DataFrame:
dataset_path = self.get_work_dataset_path(dataset_name)
df: DataFrame = pd.read_csv( dataset_path )
# Force the right column data types
for column in df.columns:
print( column )
if str(column) == "date":
df[column] = pd.to_datetime( df[column], errors="coerce" )
else:
df[column] = pd.to_numeric( df[column], errors="coerce" )
return df
if __name__ == "__main__":
root_dir = os.getcwd()
data_helper = DataHelper( root_dir )
data_helper.create_clean_csv_dataset( "gpuz_sensor_log.txt", "clean_gpuz_sensor_log.csv" )
| 32.742857
| 94
| 0.665794
| 313
| 2,292
| 4.57508
| 0.284345
| 0.115223
| 0.073324
| 0.050279
| 0.25419
| 0.219274
| 0.219274
| 0.173184
| 0.121508
| 0.065642
| 0
| 0
| 0.238656
| 2,292
| 69
| 95
| 33.217391
| 0.82063
| 0.014398
| 0
| 0.078431
| 0
| 0
| 0.116083
| 0.011077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137255
| false
| 0
| 0.176471
| 0.039216
| 0.431373
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
274a678ce7ef66ccf7cfb21453ee41a8617d1632
| 4,173
|
py
|
Python
|
m5-101/content/solutions/web-crawler/section1&2&3.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | 4
|
2021-03-25T13:15:38.000Z
|
2021-11-10T12:29:19.000Z
|
m5-101/content/solutions/web-crawler/section1&2&3.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | null | null | null |
m5-101/content/solutions/web-crawler/section1&2&3.py
|
PaulCCCCCCH/m5-101
|
81201b00cd81c1747ea0cd5f042a09eda02d6d1c
|
[
"MIT"
] | 4
|
2021-03-25T13:18:10.000Z
|
2021-04-08T13:44:48.000Z
|
from posix import listdir
import requests
from bs4 import BeautifulSoup as bs
import math
import sys, getopt
import re
import os
def re_cleaner(target: str, rep: str) -> str:
return re.sub("[^0-9a-zA-Z]+", rep, target)
# For Oxford ==============================================================================
# base_url = "https://www.ox.ac.uk/"
# base_dir = "pages/oxford"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('https://www.ox.ac.uk/admissions/graduate/courses/courses-a-z-listing')
# root_soup = bs(url_pages.text, 'html.parser')
# # print(root_soup.prettify())
# # find by class attr
# course_divs = root_soup.find_all(attrs={"class": "course-title"})
# for div in course_divs:
# # 从div中取出a然后解析url
# # 用re直接find_all 符合 ** graduate/courses/ ** 的url更好解释
# link, degree = div.children
# degree = degree.strip()
# if re.search("D", degree) is None and re.match("PG", degree) is None:
# r = requests.get(base_url + link.get('href'))
# course_name = link.text
# with open(os.path.join(base_dir, re_cleaner(course_name+' '+degree, '-')+'.html'), mode='wb') as f:
# f.write(r.content)
#UIUC ==============================================================================
# base_url = "http://catalog.illinois.edu/"
# base_dir = "pages/uiuc"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('http://catalog.illinois.edu/graduate/')
# root_soup = bs(url_pages.text, 'html.parser')
# # print(root_soup.prettify())
# course_heads = root_soup.find_all("h4")
# for h in course_heads:
# # 从head中取出a然后解析url, 若有margin left, 则不考虑
# if 'style' not in h.attrs:
# # 最多分成两端,此处degree会有冗余, 但生成文件时正确的degree会在最后一个破折号处,优雅
# major, degree = h.text.split(',' ,1)
# degree = degree.strip()
# if re.search("D", degree) is None and re.match("PG", degree) is None:
# r = requests.get(base_url + h.a['href'])
# with open(os.path.join(base_dir, re_cleaner(major + ' ' + degree, ' ')+'.html'), mode='wb') as f:
# f.write(r.content)
# IC ==============================================================================
#
# base_url = "https://www.imperial.ac.uk/"
# base_dir = "pages/ic"
# if not os.path.exists(base_dir):
# os.makedirs(base_dir)
# # Get the root page and extract target urls
# url_pages = requests.get('https://www.imperial.ac.uk/study/pg/courses/')
# root_soup = bs(url_pages.text, 'html.parser')
# # find by class attr
# course_lis = root_soup.find_all(attrs={"class": "course"})
# for li in course_lis:
# degree = li.a.contents[5].contents[1].strip()
# if re.match("D", degree) is None and re.match("PG", degree) is None:
# url = base_url + li.a['href']
# major = li.a['title']
# r = requests.get(url)
# with open(os.path.join(base_dir, re_cleaner(major + ' ' + degree, '-')+'.html'), mode='wb') as f:
# f.write(r.content)
# Make Index ==============================================================================
import json
import pickle
def clean_html(soup: bs):
ss = soup.find_all('script')
for s in ss:
s.decompose()
return re_cleaner(soup.get_text(), ' ')
data = {}
pages_path = os.path.join(os.getcwd(), 'pages')
idx = 1
for school in os.listdir(pages_path):
school_path = os.path.join(pages_path, school)
for filename in os.listdir(school_path):
filepath = os.path.join(school_path, filename)
program, degree_html = filename.rsplit('-', 1)
degree,_ = degree_html.split('.', 1)
print(filename)
with open(filepath) as f:
soup = bs(f, 'html.parser')
desc = clean_html(soup)
jsobj = json.dumps({"document_id": idx, "school_name": school, "program_name": program, "degree": degree, "file_path": filepath, "program_desc": desc})
data[idx] = jsobj
idx += 1
pkfile = 'programs.pkl'
with open(pkfile, 'wb') as f:
pickle.dump(data, f)
| 32.601563
| 163
| 0.578481
| 564
| 4,173
| 4.161348
| 0.264184
| 0.03579
| 0.030677
| 0.014061
| 0.438858
| 0.388581
| 0.388581
| 0.362164
| 0.34853
| 0.334043
| 0
| 0.00328
| 0.196262
| 4,173
| 127
| 164
| 32.858268
| 0.696482
| 0.677211
| 0
| 0
| 0
| 0
| 0.088767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.257143
| 0.028571
| 0.371429
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
274f3dedfd8af9c5162a6375c14921ce4ca86095
| 6,319
|
py
|
Python
|
sandbox/andrew/run_trpo_strike.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | 49
|
2017-12-11T11:00:02.000Z
|
2022-03-30T05:19:31.000Z
|
sandbox/andrew/run_trpo_strike.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | 2
|
2018-01-01T17:39:56.000Z
|
2019-07-24T04:49:08.000Z
|
sandbox/andrew/run_trpo_strike.py
|
leopauly/Observation-Learning-Simulations
|
462c04a87c45aae51537b8ea5b44646afa31d3a5
|
[
"MIT"
] | 12
|
2017-12-13T11:52:17.000Z
|
2020-12-03T00:53:29.000Z
|
import os
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
# from rllab.envs.mujoco.gather.swimmer_gather_env import SwimmerGatherEnv
os.environ["THEANO_FLAGS"] = "device=cpu"
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.envs.normalized_env import NormalizedEnv
from rllab.algos.trpo import TRPO
from rllab.misc.instrument import stub, run_experiment_lite
import itertools
from rllab import config
from sandbox.bradly.third_person.launchers.cyberpunk_aws import CyberpunkAWS
from sandbox.bradly.third_person.launchers.cyberpunk_aws_gail import CyberpunkAWSGAIL
stub(globals())
from distutils.dir_util import copy_tree
import numpy as np
import os, shutil
srcmodeldirs = ['../models/strikeinc/']
modeldir = 'model/'
if os.path.exists(modeldir):
shutil.rmtree(modeldir)
for srcdir in srcmodeldirs:
copy_tree(srcdir, modeldir)
# config.AWS_IMAGE_ID = "ami-7d23496b"#"ami-1263eb04"
# config.AWS_INSTANCE_TYPE = "g2.8xlarge"
# config.AWS_SPOT_PRICE = "2.6001"
# subnet = 'us-east-1d'
config.AWS_IMAGE_ID = "ami-20c1e740"
config.AWS_INSTANCE_TYPE = "g2.2xlarge"
config.AWS_SPOT_PRICE = "0.903"
subnet = 'us-west-1c'
# config.AWS_IMAGE_ID = "ami-ecdd408c"
# config.AWS_INSTANCE_TYPE = "g2.8xlarge"
# config.AWS_SPOT_PRICE = "2.601"
# subnet = 'us-west-2b'
# config.AWS_IMAGE_ID = "ami-b8f069d8"
# config.AWS_INSTANCE_TYPE = "g2.2xlarge"
# config.AWS_SPOT_PRICE = "0.601"
# subnet = 'us-west-2b'
config.AWS_NETWORK_INTERFACES = [
dict(
SubnetId=config.ALL_SUBNET_INFO[subnet]["SubnetID"],
Groups=[config.ALL_SUBNET_INFO[subnet]["Groups"]],
DeviceIndex=0,
AssociatePublicIpAddress=True,
)
]
def rand_strike():
vp = np.random.uniform(low=0, high=360, size=10).tolist()
angle = [45]#np.random.uniform(low=0, high=90, size=10).tolist()
ball = np.array([0.5, -0.175])
while True:
goal = np.concatenate([
np.random.uniform(low=0.15, high=0.7, size=1),
np.random.uniform(low=0.1, high=1.0, size=1)])
if np.linalg.norm(ball - goal) > 0.17:
break
return dict(vp=vp, goal=goal.tolist(), angle=angle,
imsize=(64, 64), name="strike", nvp=1,
modelname='model/model_90000_1408.57_1291.54_110.72',
modeldata='model/vdata_train.npy')
strike_params = {
"env" : "Striker-v0",
"rand" : rand_strike,
}
oracle_mode = dict(mode='oracle', mode2='oracle')
# inception_mode = dict(mode='inception', imsize=(299, 299))
oursinception_mode = dict(mode='oursinception', mode2='oursinception', scale=0.1, imsize=(299, 299),
modelname='model/model_70000_225002.77_128751.15_96043.16_0')
ours_mode = dict(mode='ours', mode2='ours', scale=0.1)
ours_recon = dict(mode='ours', mode2='oursrecon', scale=1.0, ablation_type='recon')
tpil_mode = dict(mode='tpil', mode2='tpil', imsize=(48, 48))
gail_mode = dict(mode='tpil', mode2='gail')
ours_nofeat = dict(mode='ours', mode2='ours_nofeat', scale=1.0, ablation_type='nofeat')
ours_noimage = dict(mode='ours', mode2='ours_noimage', scale=1.0, ablation_type='noimage')
seeds = [123]
sanity = 'changing'
for params in [strike_params]:
for nvar in range(5):
randparams = params['rand']()
for modeparams in [oursinception_mode]:
for scale in [0.0, 0.1, 100.0]:#[1.0, 10.0, 100.0, 0.1]:
copyparams = randparams.copy()
copyparams.update(modeparams)
copyparams['scale'] = scale
mdp = normalize(GymEnv(params['env'], **copyparams))
if copyparams['mode'] == 'tpil':
if sanity == 'change1':
copyparams = params['rand']()
copyparams.update(modeparams)
mdp2 = normalize(GymEnv(params['env'], **copyparams))
elif sanity == 'same':
mdp2 = mdp
elif sanity == 'changing':
mdp2 = normalize(GymEnv(params['env'], mode='tpil'))
if 'imsize' in copyparams:
imsize = copyparams['imsize']
for seed in seeds:
if copyparams['mode'] == 'tpil':
del copyparams['imsize']
awsalgo = CyberpunkAWS
if modeparams == gail_mode:
awsalgo = CyberpunkAWSGAIL
algo = awsalgo(
expert_env=mdp2,#normalize(GymEnv(params['env'], mode='tpil')),
novice_env=mdp,
horizon=50,
itrs=200,
trajs=250,
imsize=imsize,
expert_pkl='expert_striker.pkl',
sanity=sanity,
**copyparams,
)
else:
policy = GaussianMLPPolicy(
env_spec=mdp.spec,
hidden_sizes=(32, 32),
init_std=1.0
)
baseline = LinearFeatureBaseline(
mdp.spec,
)
batch_size = 50*250
algo = TRPO(
env=mdp,
policy=policy,
baseline=baseline,
batch_size=batch_size,
whole_paths=True,
max_path_length=50,
n_itr=200,
step_size=0.01,
subsample_factor=1.0,
**copyparams
)
run_experiment_lite(
algo.train(),
exp_prefix="r-strike-ours-inception-7c-quad2",
n_parallel=4,
# dry=True,
snapshot_mode="all",
seed=seed,
mode="ec2_mujoco",
# terminate_machine=False
)
| 37.613095
| 100
| 0.547238
| 689
| 6,319
| 4.869376
| 0.34688
| 0.034873
| 0.021461
| 0.019076
| 0.273323
| 0.157377
| 0.12459
| 0.08763
| 0.05842
| 0.05842
| 0
| 0.057545
| 0.337237
| 6,319
| 167
| 101
| 37.838323
| 0.743553
| 0.109828
| 0
| 0.030534
| 0
| 0
| 0.090114
| 0.025161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007634
| false
| 0
| 0.114504
| 0
| 0.129771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27516da8559c76c6cbc57c679759132dc516e07e
| 6,241
|
py
|
Python
|
kotori/config.py
|
joshuaavalon/kotori
|
3e7e1cf7b2c1834aea9a9404e80a53f8282aba1b
|
[
"Apache-2.0"
] | null | null | null |
kotori/config.py
|
joshuaavalon/kotori
|
3e7e1cf7b2c1834aea9a9404e80a53f8282aba1b
|
[
"Apache-2.0"
] | null | null | null |
kotori/config.py
|
joshuaavalon/kotori
|
3e7e1cf7b2c1834aea9a9404e80a53f8282aba1b
|
[
"Apache-2.0"
] | null | null | null |
import json
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from os.path import splitext
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from PIL import Image
from ruamel.yaml import YAML
from kotori.error import ConfigError
__all__ = [
"ItemKey", "RouteConfig", "TransformConfig", "StorageConfig", "Config",
"ConfigLoader", "DictConfigLoader", "JsonConfigLoader", "YamlConfigLoader"
]
@dataclass
class ItemKey:
path: str
key_path: str = field(init=False)
key: str = field(init=False)
transform: str = field(init=False)
suffix: str = field(init=False)
folder: str = field(init=False)
name: str = field(init=False)
format: str = field(init=False)
def __post_init__(self):
if self.path.endswith("/"):
raise ValueError("path cannot end with /")
path, suffix = splitext(self.path)
parts: List[str] = list(filter(None, path.split("/")))
self.format = Image.registered_extensions().get(suffix)
if self.format is None:
raise ValueError("Unknown format")
if len(parts) < 2:
raise ValueError("Too few arguments")
self.transform = parts[0]
key_parts = parts[1:]
self.key = "/".join(key_parts)
self.key_path = f"/{self.key}"
self.suffix = suffix
self.name = key_parts.pop()
self.folder = f"/{'/'.join(key_parts)}"
@dataclass
class SaveConfig:
format: str
options: Dict[str, Any]
@dataclass
class RouteConfig:
storage: str
transform: Union[bool, List[str], str] = False
expire: Optional[int] = None
save: Dict[str, Dict[str, Any]] = field(default_factory=dict)
@dataclass
class TransformConfig:
type: str
options: List[str] = field(default_factory=list)
@staticmethod
def from_query(query: str) -> "TransformConfig":
parts = [t.strip() for t in query.split("_")]
return TransformConfig(type=parts[0], options=parts[1:])
@staticmethod
def from_queries(queries: str) -> List["TransformConfig"]:
queries = [t.strip() for t in queries.split(",")]
for query in queries:
yield TransformConfig.from_query(query)
@dataclass
class StorageConfig:
type: str
options: Dict[str, Any] = field(default_factory=dict)
@dataclass
class Config:
storage: Dict[str, StorageConfig]
transform: Dict[str, List[TransformConfig]]
route: List[Tuple[str, RouteConfig]]
cache: Dict[str, Any]
def storage_of(self, key: ItemKey) -> StorageConfig:
route = self.route_of(key)
return self.storage[route.storage]
def route_of(self, key: ItemKey) -> RouteConfig:
for route in self.route:
pattern, config = route
if re.search(pattern, key.path) is not None:
return config
raise ConfigError(f"Cannot find config for {key.path}")
def transforms_of(self, key: ItemKey) -> List[TransformConfig]:
if key.transform in self.transform.keys():
return self.transform[key.transform]
return TransformConfig.from_queries(key.transform)
def allow_transform(self, key: ItemKey) -> bool:
route = self.route_of(key)
if not route.transform:
return False
if isinstance(route.transform, bool):
return True
if isinstance(route.transform, str):
transforms = [route.transform]
else:
transforms = route.transform
if key.transform in self.transform.keys():
return key.transform in transforms
configs = TransformConfig.from_queries(key.transform)
for config in configs:
if config.type not in transforms:
return False
return True
class ConfigLoader(ABC):
loaders: Dict[str, Type["ConfigLoader"]] = {}
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
for suffix in cls.support_suffixes():
if suffix not in cls.loaders:
cls.loaders[suffix] = cls
@classmethod
@abstractmethod
def support_suffixes(cls) -> List[str]:
raise NotImplementedError()
@classmethod
def load(cls, path: Union[Path, str]) -> Config:
if isinstance(path, str):
path = Path(path)
suffix = path.suffix
if suffix not in cls.loaders:
raise ConfigError(f"{suffix} is a unknown format")
loader = cls.loaders[suffix]()
config = loader._load(path) # pylint: disable=protected-access
return config
@abstractmethod
def _load(self, path: Path) -> Config:
raise NotImplementedError()
class DictConfigLoader(ConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return []
def _load(self, path: Path) -> Config:
config = self._load_dict(path)
storage = {}
for name, cfg in config.get("storage", {}).items():
storage[name] = StorageConfig(**cfg)
transform = {}
for name, cfg in config.get("transform", {}).items():
transform[name] = [TransformConfig(**c) for c in cfg]
route = []
for name, cfg in config.get("route", {}).items():
route.append((name, RouteConfig(**cfg)))
return Config(
storage=storage,
transform=transform,
route=route,
cache=config.get("cache", {})
)
@abstractmethod
def _load_dict(self, path: Path) -> Dict[str, Any]:
raise NotImplementedError()
class JsonConfigLoader(DictConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return [".json"]
def _load_dict(self, path: Path) -> Dict[str, Any]:
with open(path, "r", encoding="utf-8") as file:
return json.load(file)
class YamlConfigLoader(DictConfigLoader):
@classmethod
def support_suffixes(cls) -> List[str]:
return [".yml", ".yaml"]
def _load_dict(self, path: Path) -> Dict[str, Any]:
yaml = YAML(typ="safe")
with open(path, "r", encoding="utf-8") as file:
return yaml.load(file)
| 30.296117
| 78
| 0.621054
| 723
| 6,241
| 5.282158
| 0.193638
| 0.020162
| 0.021995
| 0.03116
| 0.227023
| 0.183032
| 0.134067
| 0.134067
| 0.101859
| 0.019377
| 0
| 0.001516
| 0.260054
| 6,241
| 205
| 79
| 30.443902
| 0.825466
| 0.005127
| 0
| 0.263473
| 0
| 0
| 0.058321
| 0.003544
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107784
| false
| 0
| 0.05988
| 0.017964
| 0.467066
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27542459664ee95574391c6b162f6e4cf09c76b9
| 4,587
|
py
|
Python
|
parser_utils.py
|
fgypas/panoptes
|
d85bf83905fd0e546cce11e00d4daf4da2199fbf
|
[
"MIT"
] | 1
|
2019-11-03T22:08:19.000Z
|
2019-11-03T22:08:19.000Z
|
parser_utils.py
|
fgypas/panoptes
|
d85bf83905fd0e546cce11e00d4daf4da2199fbf
|
[
"MIT"
] | 27
|
2019-10-23T19:24:38.000Z
|
2022-02-10T19:40:24.000Z
|
parser_utils.py
|
fgypas/panoptes
|
d85bf83905fd0e546cce11e00d4daf4da2199fbf
|
[
"MIT"
] | null | null | null |
import os
import re
from pprint import pprint
from pandas.io.json import json_normalize
import datetime
import argparse
import sys
DATE = 'date'
LEVEL = 'level'
TYPE = 'type'
CLASS = 'class'
MESSAGE = 'message'
def match_date(line):
match_this = ''
matched = re.match(r'\[\w\w\w\s\w\w\w \d\d \d\d:\d\d:\d\d\s\d\d\d\d\]', line)
if matched:
# matches a date and adds it to match_this
match_this = matched.group()
else:
match_this = 'NONE'
return match_this
def generate_dicts(log_fh):
current_dict = {}
for line in log_fh:
if line.startswith(match_date(line)):
if current_dict:
yield current_dict
current_dict = {DATE: line.split('__')[0][1:25],
# TYPE: temp[0],
# CLASS: temp[1].split(' ')[2],
MESSAGE: ''}
else:
if DATE in current_dict:
current_dict[MESSAGE] += line[:]
else:
pass
yield current_dict
def structure_snakemake_logs(logs):
"""
Takes as input a parced log dictionary.
Returns a structured object for each entry.
Two types of entries exist:
- Submitted rules/jobs
- Finished rules/jobs
Returns list of structured entries
"""
snakemake_log_objects = []
for log in logs:
if 'rule' in log['message']:
print(log["message"])
try:
rule = re.search(r'rule (\w+):', log['message']).group(1)
except:
rule = None
try:
input = re.search(r'input:\s(.*)', log['message']).group(1).split(",")
except Exception as e:
input = None
try:
output = re.search(r'output:\s(.*)', log['message']).group(1).split(",")
except:
output = None
try:
log_c = re.search(r'log:\s(.*)', log['message']).group(1)
except:
log_c = None
try:
wildcards = re.search(r'wildcards:\s(.*)', log['message']).group(1).split(",")
except Exception as e:
wildcards = None
try:
jobid = re.search(r'jobid:\s(\d+)', log['message']).group(1)
except Exception as e:
jobid = None
snakemake_log_objects.append({"job_type": 'submitted',
"job_id": jobid,
"rule": rule,
"input": input,
"output": output,
"log": log_c,
"wildcards": wildcards
})
elif "Finished job" in log['message']:
try:
job_id = re.search(r'Finished job (\d+)\.', log['message']).group(1)
progress = re.search(r'(\d+) of (\d+) steps \((\d+%)\) done', log['message']).group(1,2,3)
current_job = progress[0]
total_jobs = progress[1]
percent = progress[2]
except Exception as e:
current_job = None
total_jobs = None
percent = None
snakemake_log_objects.append({"job_type": 'finished',
"job_id": job_id,
"current_job": current_job,
"total_jobs": total_jobs,
"percent": percent
})
return snakemake_log_objects
def main():
"""
-import_file "example_files/example.log"
-export_csv_file "exported_tabular.csv"
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('-import_file', metavar='import_file', type=str,
help='Path to import the simulation json.')
parser.add_argument('-export_csv_file', metavar='export_csv_file', type=str,
help='Path to export the results')
args = parser.parse_args()
import_file = args.import_file
with open(import_file) as f:
parced_logs = list(generate_dicts(f))
pprint(parced_logs)
print(structure_snakemake_logs(parced_logs))
#data = parced_logs.jason_normalize()
#data.to_csv('exported.csv')
if __name__ == '__main__':
main()
| 30.177632
| 106
| 0.480706
| 490
| 4,587
| 4.334694
| 0.257143
| 0.051789
| 0.011299
| 0.060264
| 0.148305
| 0.108286
| 0.088512
| 0.037665
| 0.037665
| 0.037665
| 0
| 0.007278
| 0.400916
| 4,587
| 151
| 107
| 30.377483
| 0.765648
| 0.094615
| 0
| 0.203884
| 0
| 0.009709
| 0.129657
| 0.006373
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038835
| false
| 0.009709
| 0.106796
| 0
| 0.165049
| 0.038835
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2758815a9272e4ba22d391476a74de67b2fc6c02
| 7,746
|
py
|
Python
|
python/beached_probability.py
|
OceanParcels/BayesianAnalysis_SouthAtlantic
|
a808896ea9104931a6ad625531231525c6c12826
|
[
"MIT"
] | 1
|
2022-01-12T08:24:14.000Z
|
2022-01-12T08:24:14.000Z
|
python/beached_probability.py
|
OceanParcels/BayesianAnalysis_SouthAtlantic
|
a808896ea9104931a6ad625531231525c6c12826
|
[
"MIT"
] | null | null | null |
python/beached_probability.py
|
OceanParcels/BayesianAnalysis_SouthAtlantic
|
a808896ea9104931a6ad625531231525c6c12826
|
[
"MIT"
] | null | null | null |
"""
Computes the probability field of beached particles from Ocean Parcels
simulations. Computes the posterior probability in the latitude of the beached
particles.
"""
import numpy as np
import xarray as xr
import pandas as pd
import os
def time_averaging_coast(array, window=30):
"""It averages the counts_america and computes a probability map that adds
up to 100%. It is built for the Beached particles 2D array.
Parameters
----------
array: array
2D array with dimensions (time, space). The time averaging
happens in axis=0 of the array.
window: int, optional
The time window for the averaging. Default value is 30 (days).
normalized: bool, optional
Normalizes the average in space, axis=1&2. Default True.
Returns
-------
averaged: array
time averaged fields dimensions (time//window, space).
time_array:
1D array showing the window jumps. Its useless...
"""
nt, ny = array.shape
new_t_dim = nt//window
averaged = np.zeros((new_t_dim, ny))
time_array = np.arange(window, nt, window)
for t in range(0, new_t_dim):
index_slice = slice((t)*window, (t+1)*window)
mean_aux = np.mean(array[index_slice, :], axis=0)
if mean_aux.sum() == 0:
print(f'-- mean_aux.sum() = {mean_aux.sum()}')
averaged[t] = np.zeros_like(mean_aux)
else:
averaged[t] = mean_aux/mean_aux.sum()
print('-- Normalized?', averaged[t].sum())
return averaged, time_array
# Creating the directory to store the analysis dataset
newpath = r'../analysis/'
if not os.path.exists(newpath):
os.makedirs(newpath)
path2folder = '../PierardBassottoMeirervanSebille_AttributionofPlastic/'
###############################################################################
# Setting the parameters
###############################################################################
compute_mean = True # True if you want to compute the average probability
average_window = 1234 # window size for computing the probability
print(f'Compute mean == {compute_mean}!')
domain_limits = [[-73, 25], [-80, -5]]
number_bins = (98, 75) # defined with respect to domain_limits to be 1x1 cell
half_point = number_bins[0]//2
lat_range = np.linspace(domain_limits[1][0], domain_limits[1][1],
number_bins[1])
# Loading priors. Computed with release_points.py script.
priors = pd.read_csv(path2folder + 'priors_river_inputs.csv',
index_col=0)
sources = list(priors.index)
number_sources = len(sources)
# Empty dictionaries to store computed probabilities.
counts_america = {}
counts_africa = {}
likelihood_america = {}
posterior_america = {}
likelihood_africa = {}
posterior_africa = {}
avg_label = ''
###############################################################################
# Building the histograms
###############################################################################
print('Building histograms')
time_dimensions = []
for loc in sources:
print(f'- {loc}')
path_2_file = path2folder + f"sa-s06-{loc}.nc"
particles = xr.load_dataset(path_2_file)
n = particles.dims['traj']
time = particles.dims['obs']
time_dimensions.append(time)
# filter the particles that beached
particles = particles.where((particles.beach == 1))
h_ame = np.zeros((time, number_bins[1]))
h_afr = np.zeros((time, number_bins[1]))
# beached_loc = np.zeros(time)
for t in range(time):
lons = particles['lon'][:, t].values
lats = particles['lat'][:, t].values
index = np.where(~np.isnan(lons))
lons = lons[index]
lats = lats[index]
# Compute the histogram
H, x_edges, y_edges = np.histogram2d(lons, lats, bins=number_bins,
range=domain_limits)
H = np.nan_to_num(H) # drop nans or covert them to zeros
count_ame = np.sum(H[:55, :], axis=0) # west meridional sum
count_afr = np.sum(H[80:-5, :], axis=0) # east meridional sum
h_ame[t] = count_ame
h_afr[t] = count_afr
counts_america[loc] = h_ame
counts_africa[loc] = h_afr
time = min(time_dimensions)
###############################################################################
# To average or not to average, that's the question.
###############################################################################
if compute_mean:
print('Averaging histograms and computing likelihood')
for loc in sources:
print(f'- {loc}')
mean_ame, time_range = time_averaging_coast(counts_america[loc],
window=average_window)
mean_afr, _ = time_averaging_coast(counts_africa[loc],
window=average_window)
likelihood_america[loc] = mean_ame
likelihood_africa[loc] = mean_afr
time = time//average_window
avg_label = f'average_{average_window}'
else:
# convert counts to likelihood. The counts were normalized in line ~120.
likelihood_america = counts_america
likelihood_africa = counts_africa
time_range = np.arange(0, time, 1)
###############################################################################
# Normalizing constant (sum of all hypothesis)
###############################################################################
print('Computing Normailizing constant')
normalizing_constant = np.zeros((time, 2, number_bins[1]))
# normalizing_constant_afr = np.zeros((time, 2, number_bins))
for t in range(time):
total = np.zeros((number_sources, 2, number_bins[1]))
for j, loc in enumerate(sources):
total[j, 0] = likelihood_america[loc][t]*priors['prior'][loc]
total[j, 1] = likelihood_africa[loc][t]*priors['prior'][loc]
normalizing_constant[t, 0] = np.sum(total[:, 0, :], axis=0)
normalizing_constant[t, 1] = np.sum(total[:, 1, :], axis=0)
###############################################################################
# Posterior probability
###############################################################################
print('Computing posterior probability')
for k, loc in enumerate(sources):
aux_ame = np.zeros((time, number_bins[1]))
aux_afr = np.zeros((time, number_bins[1]))
for t in range(time):
aux_ame[t] = likelihood_america[loc][t]*priors['prior'][loc] / \
normalizing_constant[t, 0]
aux_afr[t] = likelihood_africa[loc][t]*priors['prior'][loc] / \
normalizing_constant[t, 1]
posterior_america[loc] = (["time", "y"], aux_ame)
posterior_africa[loc] = (["time", "y"], aux_afr)
###############################################################################
# Saving the likelihood & posteior as netCDFs
###############################################################################
coordinates = dict(time=time_range,
lat=(["y"], lat_range))
attributes = {'description': "Beached posterior probability for America.",
'average_window': average_window}
# Posterior dataset
post_ame = xr.Dataset(data_vars=posterior_america,
coords=coordinates,
attrs=attributes)
attributes = {'description': "Beached posterior probability for Africa.",
'average_window': average_window}
# Posterior dataset
post_afr = xr.Dataset(data_vars=posterior_africa,
coords=coordinates,
attrs=attributes)
output_path_ame = newpath + f'beach_posterior_America_{avg_label}.nc'
output_path_afr = newpath + f'beach_posterior_Africa_{avg_label}.nc'
post_ame.to_netcdf(output_path_ame)
post_afr.to_netcdf(output_path_afr)
| 35.53211
| 79
| 0.572941
| 901
| 7,746
| 4.748058
| 0.251942
| 0.025713
| 0.017999
| 0.010285
| 0.159654
| 0.13698
| 0.102852
| 0.034596
| 0.034596
| 0.025245
| 0
| 0.013271
| 0.202298
| 7,746
| 217
| 80
| 35.695853
| 0.679074
| 0.207333
| 0
| 0.139344
| 0
| 0
| 0.118491
| 0.034977
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008197
| false
| 0
| 0.032787
| 0
| 0.04918
| 0.07377
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2759eded27b5f65ad495b43a6846ccc30736fba9
| 2,985
|
py
|
Python
|
models/cam_decoder.py
|
BwCai/DCAA-UDA
|
359c2122060aebfbe4384c918768c261fe2dc9c7
|
[
"Apache-2.0"
] | 2
|
2022-01-28T10:35:53.000Z
|
2022-03-09T14:38:59.000Z
|
models/cam_decoder.py
|
BwCai/DCAA-UDA
|
359c2122060aebfbe4384c918768c261fe2dc9c7
|
[
"Apache-2.0"
] | 1
|
2022-03-07T10:48:11.000Z
|
2022-03-07T10:48:11.000Z
|
models/cam_decoder.py
|
BwCai/DCAA-UDA
|
359c2122060aebfbe4384c918768c261fe2dc9c7
|
[
"Apache-2.0"
] | null | null | null |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
from models.aspp import build_aspp
from models.decoder import build_decoder
import pdb
class AttentionDecoder(nn.Module):
def __init__(self, num_classes, modal_num, backbone, BatchNorm):
super(AttentionDecoder, self).__init__()
backbone = 'resnet'
if backbone == 'resnet' or backbone == 'drn':
inplanes = 256 * modal_num
elif backbone == 'xception':
inplanes = 128 * modal_num
elif backbone == 'mobilenet':
inplanes = 24 * modal_num
else:
raise NotImplementedError
self.modal_num = modal_num
# attention sequential
self.att_conv = nn.Sequential(
nn.Conv2d(inplanes, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256) if BatchNorm!=nn.GroupNorm else BatchNorm(16, 256),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Conv2d(256, modal_num, kernel_size=1, stride=1, bias=False),
nn.Softmax(),
)
self.last_conv = nn.Sequential(
nn.Conv2d(256 * (modal_num + 1), 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256) if BatchNorm!=nn.GroupNorm else BatchNorm(16, 256),
nn.ReLU(),
nn.Dropout(0.5),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm(256) if BatchNorm!=nn.GroupNorm else BatchNorm(16, 256),
nn.ReLU(),
nn.Dropout(0.1),
nn.Conv2d(256, num_classes, kernel_size=1, stride=1))
self._init_weight()
def forward(self, x_in, low_level_feat):
x = x_in.copy()
_b, _c, _w, _h = x[0].size()
modal_x = torch.cat(x, dim=1) # B x 2C x W x H
# attention module
att_mask = self.att_conv(modal_x) # B x 2 x W x H
feat_x = x[0] * torch.unsqueeze(att_mask[:, 0, :, :], 1)
for _i in range(1, self.modal_num):
feat_x += x[_i] * torch.unsqueeze(att_mask[:, _i, :, :], 1)
x.append(feat_x)
residual_x = torch.cat(x, dim=1)
for _j in range(len(self.last_conv)-1):
residual_x = self.last_conv[_j](residual_x)
out = self.last_conv[-1](residual_x)
return att_mask, residual_x, out
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_attention_decoder(num_classes, modal_num, backbone, BatchNorm):
return AttentionDecoder(num_classes, modal_num, backbone, BatchNorm)
| 37.78481
| 98
| 0.59665
| 401
| 2,985
| 4.25187
| 0.246883
| 0.051613
| 0.02346
| 0.031672
| 0.398827
| 0.323754
| 0.219941
| 0.219941
| 0.219941
| 0.185924
| 0
| 0.045092
| 0.286767
| 2,985
| 78
| 99
| 38.269231
| 0.755754
| 0.022111
| 0
| 0.151515
| 0
| 0
| 0.010981
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0.015152
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
275bb771b2d649e2d94b8d769d96499bc3dc8e16
| 2,505
|
py
|
Python
|
src/server/noize_reduction.py
|
kikuchiken-waseda/MRIVuewer
|
66cfe30d006b6491a093a9dfab5b538c5e49409e
|
[
"MIT"
] | null | null | null |
src/server/noize_reduction.py
|
kikuchiken-waseda/MRIVuewer
|
66cfe30d006b6491a093a9dfab5b538c5e49409e
|
[
"MIT"
] | 23
|
2020-02-11T19:13:24.000Z
|
2020-05-16T07:49:43.000Z
|
src/server/noize_reduction.py
|
kikuchiken-waseda/MRIVuewer
|
66cfe30d006b6491a093a9dfab5b538c5e49409e
|
[
"MIT"
] | null | null | null |
import scipy as sp
from pyssp.util import (
get_frame, add_signal, compute_avgpowerspectrum
)
def writeWav(param, signal, filename):
import wave
with wave.open(filename, 'wb') as wf:
wf.setparams(param)
s = sp.int16(signal * 32767.0).tostring()
wf.writeframes(s)
def jointMap(signal, params, **kwargs):
from pyssp.voice_enhancement import JointMap
# Setting for JM
ntime = kwargs.get('ntime', 300)
ratio = kwargs.get('ratio', 0.9)
winsize = kwargs.get('winsize', 256)
alpha = kwargs.get('alpha', 0.99)
constant = kwargs.get('constant', 0.001)
window = sp.hanning(winsize)
n_pow = compute_avgpowerspectrum(
signal[0:winsize * int(params[2] / float(winsize) / (1000.0/ntime))],
winsize, window
)
nf = int(len(signal) / (winsize / 2) - 1)
result = sp.zeros(len(signal), sp.float32)
ss = JointMap(
winsize, window,
alpha=alpha, ratio=ratio,
constant=constant
)
for no in range(nf):
s = get_frame(signal, winsize, no)
add_signal(result, ss.compute_by_noise_pow(s, n_pow), winsize, no)
return params, result
def videoRead(videoclip, winsize=256):
from wave import open
from os import remove
tmp = 'tmp.wav'
audioclip = videoclip.audio
audioclip.write_audiofile(tmp)
with open(tmp) as wf:
n = wf.getnframes()
frames = wf.readframes(n)
params = (
(
wf.getnchannels(), wf.getsampwidth(), wf.getframerate(),
wf.getnframes(), wf.getcomptype(), wf.getcompname()
)
)
siglen = ((int)(len(frames) / 2 / winsize) + 1) * winsize
signal = sp.zeros(siglen, sp.float32)
signal[0:int(len(frames) / 2)] = sp.float32(
sp.fromstring(frames, sp.int16)
) / 32767.0
remove(tmp)
return signal, params
def normalization_from_video(fname, outfile, **kwargs):
from glob import glob
from os import remove
from moviepy.editor import VideoFileClip, AudioFileClip
tmp = 'tmp.wav'
video = VideoFileClip(fname)
winsize = kwargs.get('winsize', 256)
signal, params = videoRead(video, winsize)
kwargs.update({'params': params})
params, result = jointMap(signal, **kwargs)
writeWav(params, result, tmp)
newAudio = AudioFileClip(tmp)
newVideo = video.set_audio(newAudio)
newVideo.write_videofile(outfile)
remove(tmp)
for t in glob('*.mp3'):
remove(t)
| 30.54878
| 77
| 0.621158
| 308
| 2,505
| 4.996753
| 0.337662
| 0.035088
| 0.020793
| 0.02989
| 0.033788
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030514
| 0.254291
| 2,505
| 81
| 78
| 30.925926
| 0.793362
| 0.005589
| 0
| 0.111111
| 0
| 0
| 0.025713
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.125
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
275bf3b0ea75846995ad189f786825044efb445e
| 4,940
|
py
|
Python
|
chord_rec/harmalysis/classes/scale.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
chord_rec/harmalysis/classes/scale.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
chord_rec/harmalysis/classes/scale.py
|
TianxueHu/ChordSymbolRec
|
d64a5be4f4914e6f682cb6d4079d7ba8a6fc2eac
|
[
"Unlicense",
"MIT"
] | null | null | null |
'''
harmalysis - a language for harmonic analysis and roman numerals
Copyright (C) 2020 Nestor Napoles Lopez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
import harmalysis.common
from harmalysis.classes import interval
class MajorScale(object):
def __init__(self):
self._qualities = [
# Starting from I
['P', 'M', 'M', 'P', 'P', 'M', 'M'],
# Starting from II
['P', 'M', 'm', 'P', 'P', 'M', 'm'],
# Starting from III
['P', 'm', 'm', 'P', 'P', 'm', 'm'],
# Starting from IV
['P', 'M', 'M', 'A', 'P', 'M', 'M'],
# Starting from V
['P', 'M', 'M', 'P', 'P', 'M', 'm'],
# Starting from VI
['P', 'M', 'm', 'P', 'P', 'm', 'm'],
# Starting from VII
['P', 'm', 'm', 'P', 'D', 'm', 'm'],
]
self._semitones = [
# Starting from I
[0, 2, 4, 5, 7, 9, 11],
# Starting from II
[0, 2, 3, 5, 7, 9, 10],
# Starting from III
[0, 1, 3, 5, 7, 8, 10],
# Starting from IV
[0, 2, 4, 6, 7, 9, 11],
# Starting from V
[0, 2, 4, 5, 7, 9, 10],
# Starting from VI
[0, 2, 3, 5, 7, 8, 10],
# Starting from VII
[0, 1, 3, 5, 6, 8, 10],
]
def step_to_interval_spelling(self, step, mode=1):
qualities = self._qualities[(mode - 1) % harmalysis.common.DIATONIC_CLASSES]
quality = qualities[(step - 1) % harmalysis.common.DIATONIC_CLASSES]
return interval.IntervalSpelling(quality, step)
def step_to_semitones(self, step, mode=1):
semitones = self._semitones[(mode - 1) % harmalysis.common.DIATONIC_CLASSES]
step_semitones = semitones[(step - 1) % harmalysis.common.DIATONIC_CLASSES]
octaves = (step - 1) // harmalysis.common.DIATONIC_CLASSES
distance = (12 * octaves) + step_semitones
return distance
class NaturalMinorScale(MajorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'm', 'm'],
['P', 'm', 'm', 'P', 'D', 'm', 'm'],
['P', 'M', 'M', 'P', 'P', 'M', 'M'],
['P', 'M', 'm', 'P', 'P', 'M', 'm'],
['P', 'm', 'm', 'P', 'P', 'm', 'm'],
['P', 'M', 'M', 'A', 'P', 'M', 'M'],
['P', 'M', 'M', 'P', 'P', 'M', 'm'],
]
self._semitones = [
[0, 2, 3, 5, 7, 8, 10],
[0, 1, 3, 5, 6, 8, 10],
[0, 2, 4, 5, 7, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[0, 1, 3, 5, 7, 8, 10],
[0, 2, 4, 6, 7, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
]
class HarmonicMinorScale(NaturalMinorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'm', 'M'],
['P', 'm', 'm', 'P', 'D', 'M', 'm'],
['P', 'M', 'M', 'P', 'A', 'M', 'M'],
['P', 'M', 'm', 'A', 'P', 'M', 'm'],
['P', 'm', 'M', 'P', 'P', 'm', 'm'],
['P', 'A', 'M', 'A', 'P', 'M', 'M'],
['P', 'm', 'm', 'D', 'D', 'm', 'D'],
]
self._semitones = [
[0, 2, 3, 5, 7, 8, 11],
[0, 1, 3, 5, 6, 9, 10],
[0, 2, 4, 5, 6, 9, 11],
[0, 2, 3, 6, 7, 9, 10],
[0, 1, 4, 5, 7, 8, 10],
[0, 3, 4, 6, 7, 9, 11],
[0, 1, 3, 4, 6, 8, 9],
]
class AscendingMelodicMinorScale(HarmonicMinorScale):
def __init__(self):
super().__init__()
self._qualities = [
['P', 'M', 'm', 'P', 'P', 'M', 'M'],
['P', 'm', 'm', 'P', 'P', 'M', 'm'],
['P', 'M', 'M', 'A', 'A', 'M', 'M'],
['P', 'M', 'M', 'A', 'P', 'M', 'm'],
['P', 'M', 'M', 'P', 'P', 'm', 'm'],
['P', 'M', 'm', 'P', 'D', 'm', 'm'],
['P', 'm', 'm', 'D', 'D', 'm', 'm'],
]
self._semitones = [
[0, 2, 3, 5, 7, 9, 11],
[0, 1, 3, 5, 7, 9, 10],
[0, 2, 4, 6, 8, 9, 11],
[0, 2, 4, 6, 7, 9, 10],
[0, 2, 4, 5, 7, 8, 10],
[0, 2, 3, 5, 6, 8, 10],
[0, 1, 3, 4, 6, 8, 10]
]
| 35.285714
| 84
| 0.411943
| 678
| 4,940
| 2.927729
| 0.187316
| 0.054408
| 0.071033
| 0.066499
| 0.488161
| 0.438287
| 0.271537
| 0.198992
| 0.182368
| 0.115365
| 0
| 0.076059
| 0.369231
| 4,940
| 140
| 85
| 35.285714
| 0.560976
| 0.193927
| 0
| 0.452632
| 0
| 0
| 0.050077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063158
| false
| 0
| 0.021053
| 0
| 0.147368
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
275c77321e139727fd234a2be346f888681c1954
| 8,350
|
py
|
Python
|
idaes/apps/caprese/examples/cstr_rodrigo/nmpc_main.py
|
Robbybp/idaes-pse
|
8a41dbd05819f82806cf17a6e5f06aef79a775e3
|
[
"RSA-MD"
] | null | null | null |
idaes/apps/caprese/examples/cstr_rodrigo/nmpc_main.py
|
Robbybp/idaes-pse
|
8a41dbd05819f82806cf17a6e5f06aef79a775e3
|
[
"RSA-MD"
] | 2
|
2021-08-18T19:42:02.000Z
|
2021-10-22T04:44:31.000Z
|
idaes/apps/caprese/examples/cstr_rodrigo/nmpc_main.py
|
Robbybp/idaes-pse
|
8a41dbd05819f82806cf17a6e5f06aef79a775e3
|
[
"RSA-MD"
] | 1
|
2021-03-17T20:31:17.000Z
|
2021-03-17T20:31:17.000Z
|
##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
"""
Example for Caprese's module for NMPC.
Main script for running the example.
"""
import random
from idaes.apps.caprese.dynamic_builder import DynamicSim
from idaes.apps.caprese.util import apply_noise_with_bounds
from pyomo.environ import SolverFactory, Reference
from pyomo.dae.initialization import solve_consistent_initial_conditions
import idaes.logger as idaeslog
from idaes.apps.caprese.examples.cstr_rodrigo.cstr_rodrigo_model import make_model
from idaes.apps.caprese.data_manager import PlantDataManager
from idaes.apps.caprese.data_manager import ControllerDataManager
from idaes.apps.caprese.plotlibrary import (
plot_setpoint_tracking_results,
plot_control_input)
__author__ = "Kuan-Han Lin"
# See if ipopt is available and set up solver
if SolverFactory('ipopt').available():
solver = SolverFactory('ipopt')
solver.options = {
'tol': 1e-6,
'bound_push': 1e-8,
'halt_on_ampl_error': 'yes',
'linear_solver': 'ma57',
}
else:
solver = None
def main():
m_controller = make_model(horizon=10, ntfe=5, ntcp=2, bounds=True)
sample_time = 2.
m_plant = make_model(horizon=sample_time, ntfe=2, ntcp=2, bounds = True)
time_plant = m_plant.t
simulation_horizon = 20
n_samples_to_simulate = round(simulation_horizon/sample_time)
samples_to_simulate = [time_plant.first() + i*sample_time
for i in range(1, n_samples_to_simulate)]
# We must identify for the controller which variables are our
# inputs and measurements.
inputs = [
m_plant.Tjinb[0],
]
measurements = [
m_plant.Tall[0, "T"],
m_plant.Tall[0, "Tj"],
m_plant.Ca[0],
]
# Construct the "NMPC simulator" object
nmpc = DynamicSim(
plant_model=m_plant,
plant_time_set=m_plant.t,
controller_model=m_controller,
controller_time_set=m_controller.t,
inputs_at_t0=inputs,
measurements_at_t0=measurements,
sample_time=sample_time,
)
plant = nmpc.plant
controller = nmpc.controller
p_t0 = nmpc.plant.time.first()
c_t0 = nmpc.controller.time.first()
p_ts = nmpc.plant.sample_points[1]
c_ts = nmpc.controller.sample_points[1]
#--------------------------------------------------------------------------
# Declare variables of interest for plotting.
# It's ok not declaring anything. The data manager will still save some
# important data.
states_of_interest = [Reference(nmpc.plant.mod.Ca[:]),
Reference(nmpc.plant.mod.Tall[:, "T"])]
plant_data= PlantDataManager(plant, states_of_interest)
controller_data= ControllerDataManager(controller, states_of_interest)
#--------------------------------------------------------------------------
solve_consistent_initial_conditions(plant, plant.time, solver)
solve_consistent_initial_conditions(controller, controller.time, solver)
# We now perform the "RTO" calculation: Find the optimal steady state
# to achieve the following setpoint
setpoint = [(controller.mod.Ca[0], 0.018)]
setpoint_weights = [(controller.mod.Ca[0], 1.)]
# nmpc.controller.add_setpoint_objective(setpoint, setpoint_weights)
# nmpc.controller.solve_setpoint(solver)
nmpc.controller.add_single_time_optimization_objective(setpoint,
setpoint_weights)
nmpc.controller.solve_single_time_optimization(solver,
ic_type = "measurement_var",
require_steady = True,
load_setpoints = True)
# Now we are ready to construct the tracking NMPC problem
tracking_weights = [
*((v, 1.) for v in nmpc.controller.vectors.differential[:,0]),
*((v, 1.) for v in nmpc.controller.vectors.input[:,0]),
]
nmpc.controller.add_tracking_objective(tracking_weights)
nmpc.controller.constrain_control_inputs_piecewise_constant()
nmpc.controller.initialize_to_initial_conditions()
# Solve the first control problem
nmpc.controller.vectors.input[...].unfix()
nmpc.controller.vectors.input[:,0].fix()
solver.solve(nmpc.controller, tee=True)
controller_data.save_controller_data(iteration = 0)
#-------------------------------------------------------------------------
#noise for measurements
variance = [
(nmpc.controller.mod.Tall[0, "T"], 0.05),
(nmpc.controller.mod.Tall[0, "Tj"], 0.02),
(nmpc.controller.mod.Ca[0], 1.0E-5),
]
nmpc.controller.set_variance(variance)
measurement_variance = [
v.variance for v in controller.MEASUREMENT_BLOCK[:].var
]
measurement_noise_bounds = [
(var[c_t0].lb, var[c_t0].ub)
for var in controller.MEASUREMENT_BLOCK[:].var
]
# noise for inputs
variance = [
(plant.mod.Tjinb[0], 0.01),
]
nmpc.plant.set_variance(variance)
input_variance = [v.variance for v in plant.INPUT_BLOCK[:].var]
input_noise_bounds = [
(var[p_t0].lb, var[p_t0].ub) for var in plant.INPUT_BLOCK[:].var
]
random.seed(246)
#-------------------------------------------------------------------------
# Extract inputs from controller and inject them into plant
inputs = controller.generate_inputs_at_time(c_ts)
plant.inject_inputs(inputs)
plant_data.save_initial_plant_data()
# This "initialization" really simulates the plant with the new inputs.
nmpc.plant.initialize_by_solving_elements(solver)
nmpc.plant.vectors.input[...].fix() #Fix the input to solve the plant
solver.solve(nmpc.plant, tee = True)
plant_data.save_plant_data(iteration = 0)
for i in range(1, n_samples_to_simulate +1):
print('\nENTERING NMPC LOOP ITERATION %s\n' % i)
measured = nmpc.plant.generate_measurements_at_time(p_ts)
nmpc.plant.advance_one_sample()
nmpc.plant.initialize_to_initial_conditions()
measured = apply_noise_with_bounds(
measured,
measurement_variance,
random.gauss,
measurement_noise_bounds,
)
nmpc.controller.advance_one_sample()
nmpc.controller.load_initial_conditions(measured)
solver.solve(nmpc.controller, tee=True)
controller_data.save_controller_data(iteration = i)
inputs = controller.generate_inputs_at_time(c_ts)
inputs = apply_noise_with_bounds(
inputs,
input_variance,
random.gauss,
input_noise_bounds,
)
plant.inject_inputs(inputs)
nmpc.plant.initialize_by_solving_elements(solver)
nmpc.plant.vectors.input[...].fix() #Fix the input to solve the plant
solver.solve(nmpc.plant, tee = True)
plant_data.save_plant_data(iteration = i)
plot_setpoint_tracking_results(states_of_interest,
plant_data.plant_df,
controller_data.setpoint_df)
inputs_to_plot = [Reference(nmpc.plant.mod.Tjinb[:])]
plot_control_input(inputs_to_plot, plant_data.plant_df)
return nmpc, plant_data, controller_data
if __name__ == '__main__':
nmpc, plant_data, controller_data = main()
| 38.127854
| 82
| 0.623713
| 964
| 8,350
| 5.173237
| 0.274896
| 0.061761
| 0.015641
| 0.024063
| 0.232404
| 0.175657
| 0.166433
| 0.131141
| 0.10387
| 0.091839
| 0
| 0.011456
| 0.236886
| 8,350
| 218
| 83
| 38.302752
| 0.771186
| 0.216287
| 0
| 0.111111
| 0
| 0
| 0.021739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006944
| false
| 0
| 0.069444
| 0
| 0.083333
| 0.006944
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
275e8507ce35dccac0615b8d962c545d75b3823a
| 2,555
|
py
|
Python
|
connect_four/transposition/sqlite_transposition_table_test.py
|
rpachauri/connect4
|
6caf6965afaaff6883193ac295c6ac5b1f4e9c4a
|
[
"MIT"
] | null | null | null |
connect_four/transposition/sqlite_transposition_table_test.py
|
rpachauri/connect4
|
6caf6965afaaff6883193ac295c6ac5b1f4e9c4a
|
[
"MIT"
] | null | null | null |
connect_four/transposition/sqlite_transposition_table_test.py
|
rpachauri/connect4
|
6caf6965afaaff6883193ac295c6ac5b1f4e9c4a
|
[
"MIT"
] | null | null | null |
import unittest
import gym
import numpy as np
from connect_four.hashing import TicTacToeHasher
from connect_four.transposition.sqlite_transposition_table import SQLiteTranspositionTable
class TestSQLiteTranspositionTable(unittest.TestCase):
def setUp(self) -> None:
self.env = gym.make('tic_tac_toe-v0')
def test_save_and_retrieve(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = SQLiteTranspositionTable(database_file=":memory:")
want_phi, want_delta = 1, 1
tt.save(transposition=transposition, phi=want_phi, delta=want_delta)
self.assertIn(transposition, tt)
got_phi, got_delta = tt.retrieve(transposition=transposition)
self.assertEqual(want_phi, got_phi)
self.assertEqual(want_delta, got_delta)
def test_overwrite_save(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = SQLiteTranspositionTable(database_file=":memory:")
tt.save(transposition=transposition, phi=1, delta=1)
want_phi, want_delta = 2, 2
tt.save(transposition=transposition, phi=want_phi, delta=want_delta)
got_phi, got_delta = tt.retrieve(transposition=transposition)
self.assertEqual(want_phi, got_phi)
self.assertEqual(want_delta, got_delta)
def test_close_and_reload(self):
self.env.state = np.array([
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
[
[0, 0, 0, ],
[0, 0, 0, ],
[0, 0, 0, ],
],
])
transposition = TicTacToeHasher(self.env).hash()
tt = SQLiteTranspositionTable(database_file="sqlite_test.db")
tt.save(transposition=transposition, phi=1, delta=1)
tt.close()
tt2 = SQLiteTranspositionTable(database_file="sqlite_test.db")
self.assertIn(transposition, tt2)
tt2.close()
if __name__ == '__main__':
unittest.main()
| 30.416667
| 90
| 0.523288
| 269
| 2,555
| 4.791822
| 0.208178
| 0.079131
| 0.111715
| 0.139643
| 0.664081
| 0.664081
| 0.617533
| 0.617533
| 0.552366
| 0.552366
| 0
| 0.039879
| 0.35225
| 2,555
| 83
| 91
| 30.783133
| 0.738973
| 0
| 0
| 0.625
| 0
| 0
| 0.025832
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.055556
| false
| 0
| 0.069444
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
275ea88f14a647fe3701cbe45b6f30ea2d89fba6
| 1,226
|
py
|
Python
|
test/unittests/study/mri/test_mc.py
|
szho42/banana
|
131804803e6293716e9d11cbb6f3ca244b2931f9
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/study/mri/test_mc.py
|
szho42/banana
|
131804803e6293716e9d11cbb6f3ca244b2931f9
|
[
"Apache-2.0"
] | null | null | null |
test/unittests/study/mri/test_mc.py
|
szho42/banana
|
131804803e6293716e9d11cbb6f3ca244b2931f9
|
[
"Apache-2.0"
] | null | null | null |
from nipype import config
config.enable_debug_mode()
from banana.testing import BaseTestCase as TestCase # @IgnorePep8 @Reimport
# from banana.study.multimodal.test_motion_detection import ( # @IgnorePep8 @Reimport
# MotionDetection, inputs)
from banana.study.multimodal.mrpet import create_motion_correction_class # @IgnorePep8 @Reimport
ref = 'ref'
ref_type = 't1'
t1s = ['ute']
t2s = ['t2']
epis = ['epi']
dwis = [['dwi_main', '0'], ['dwi_opposite', '-1']]
class TestMC(TestCase):
# def test_epi_mc(self):
#
# study = self.create_study(
# MotionDetection, 'MotionDetection', inputs=inputs,
# enforce_inputs=False)
# study.data('motion_detection_output')
# self.assertFilesetCreated('motion_detection_output', study.name)
def test_motion_correction(self):
MotionCorrection, inputs, out_data = create_motion_correction_class(
'MotionCorrection', ref, ref_type, t1s=t1s, t2s=t2s, dwis=dwis,
epis=epis)
study = self.create_study(
MotionCorrection, 'MotionCorrection', inputs=inputs,
enforce_inputs=False)
study.data(out_data)
self.assertFilesetCreated(out_data, study.name)
| 32.263158
| 97
| 0.681892
| 136
| 1,226
| 5.933824
| 0.382353
| 0.037175
| 0.037175
| 0.061958
| 0.096654
| 0.096654
| 0.096654
| 0
| 0
| 0
| 0
| 0.013333
| 0.204731
| 1,226
| 37
| 98
| 33.135135
| 0.814359
| 0.355628
| 0
| 0
| 0
| 0
| 0.087516
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27616031c83ae4189c96219a4ca18b7e0e254aed
| 1,052
|
py
|
Python
|
tests/test_country_service.py
|
gothill/python-fedex
|
62dc8f554babd7066d6c6e7c478944f30fc2b75b
|
[
"BSD-3-Clause"
] | 100
|
2016-01-22T23:46:10.000Z
|
2022-03-26T05:00:53.000Z
|
tests/test_country_service.py
|
gothill/python-fedex
|
62dc8f554babd7066d6c6e7c478944f30fc2b75b
|
[
"BSD-3-Clause"
] | 77
|
2016-01-19T06:10:22.000Z
|
2022-03-26T06:04:14.000Z
|
tests/test_country_service.py
|
gothill/python-fedex
|
62dc8f554babd7066d6c6e7c478944f30fc2b75b
|
[
"BSD-3-Clause"
] | 102
|
2016-02-08T23:28:45.000Z
|
2022-02-28T11:37:27.000Z
|
"""
Test module for the Fedex CountryService WSDL.
"""
import unittest
import logging
import sys
sys.path.insert(0, '..')
from fedex.services.country_service import FedexValidatePostalRequest
# Common global config object for testing.
from tests.common import get_fedex_config
CONFIG_OBJ = get_fedex_config()
logging.getLogger('suds').setLevel(logging.ERROR)
logging.getLogger('fedex').setLevel(logging.INFO)
@unittest.skipIf(not CONFIG_OBJ.account_number, "No credentials provided.")
class PackageMovementServiceTests(unittest.TestCase):
"""
These tests verify that the country service WSDL is in good shape.
"""
def test_postal_inquiry(self):
inquiry = FedexValidatePostalRequest(CONFIG_OBJ)
inquiry.Address.PostalCode = '29631'
inquiry.Address.CountryCode = 'US'
inquiry.send_request()
assert inquiry.response
assert inquiry.response.HighestSeverity == 'SUCCESS'
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
unittest.main()
| 25.658537
| 75
| 0.742395
| 121
| 1,052
| 6.289256
| 0.586777
| 0.03548
| 0.036794
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00678
| 0.158745
| 1,052
| 40
| 76
| 26.3
| 0.853107
| 0.147338
| 0
| 0
| 0
| 0
| 0.065217
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.047619
| false
| 0
| 0.238095
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
27621f7f93d71cdc400cae5017e3588d01c3c114
| 3,730
|
py
|
Python
|
setup_tools/getkucore.py
|
dougmassay/kindleunpack-calibre-plugin
|
906fc3820a9b1c179fc754ae5774ebe689a61419
|
[
"Unlicense",
"MIT"
] | 101
|
2015-03-24T10:29:15.000Z
|
2022-03-25T07:15:45.000Z
|
setup_tools/getkucore.py
|
dougmassay/kindleunpack-calibre-plugin
|
906fc3820a9b1c179fc754ae5774ebe689a61419
|
[
"Unlicense",
"MIT"
] | 3
|
2016-09-14T10:47:02.000Z
|
2018-01-09T13:32:29.000Z
|
setup_tools/getkucore.py
|
dougmassay/kindleunpack-calibre-plugin
|
906fc3820a9b1c179fc754ae5774ebe689a61419
|
[
"Unlicense",
"MIT"
] | 13
|
2015-09-28T07:05:18.000Z
|
2022-02-13T15:16:13.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
import os
import sys
import shutil
import inspect
import glob
import zipfile
import pythonpatch
if sys.version_info >= (3,):
import urllib
else:
import urllib2
SCRIPT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
SOURCE_DIR = os.path.dirname(SCRIPT_DIR)
COMMIT_SHA = 'c8be31a196fd92803f78ad34a3f18d40319bbac5'
# REMOTE_URL = 'https://github.com/kevinhendricks/KindleUnpack/archive/master.zip'
REMOTE_URL = 'https://github.com/kevinhendricks/KindleUnpack/archive/{}.zip'.format(COMMIT_SHA)
# FILE_NAME = os.path.join(SCRIPT_DIR, REMOTE_URL.split('/')[-1])
FILE_NAME = os.path.join(SCRIPT_DIR, 'KindleUnpack-{}'.format(REMOTE_URL.split('/')[-1]))
# CORE_DIR = 'KindleUnpack-master/lib/'
CORE_DIR = 'KindleUnpack-{}/lib'.format(COMMIT_SHA)
CORE_EXCLUDES = ['askfolder_ed.py', 'mobiml2xhtml.py', 'prefs.py', 'scrolltextwidget.py']
TARGET_DIR = os.path.join(SOURCE_DIR, 'kindleunpackcore')
def retrieveKindleUnpack():
if os.path.exists(FILE_NAME) and os.path.isfile(FILE_NAME):
os.remove(FILE_NAME)
if sys.version_info >= (3,):
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
urllib.request.urlretrieve(REMOTE_URL, FILE_NAME, reporthook)
else:
u = urllib2.urlopen(REMOTE_URL)
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
with open(FILE_NAME, 'wb') as f:
print('Downloading: %s Bytes: %s' % (FILE_NAME, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r'%10d [%3.2f%%]' % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print(status),
retrieveKindleUnpack()
if os.path.exists(TARGET_DIR) and os.path.isdir(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
os.mkdir(TARGET_DIR)
with zipfile.ZipFile(FILE_NAME) as zip_file:
for member in zip_file.namelist():
if member.startswith(CORE_DIR):
name = os.path.basename(member)
if not name or name in CORE_EXCLUDES:
continue
source = zip_file.open(member)
target = open(os.path.join(TARGET_DIR, name), "wb")
with source, target:
shutil.copyfileobj(source, target)
# Patch kindleunpack.py, mobi_nav.py
print('Attempting to patch KindleUnpack file(s) ...')
patchfiles = glob.glob('*.patch')
for patch in patchfiles:
parsedPatchSet = pythonpatch.fromfile(patch)
if parsedPatchSet is not False:
if parsedPatchSet.apply():
print(parsedPatchSet.diffstat())
else:
os.chdir('..')
sys.exit('Cannot apply patch to KindleUnpack file(s)!')
else:
os.chdir('..')
sys.exit('Cannot parse patch file(s)!')
if os.path.exists(FILE_NAME) and os.path.isfile(FILE_NAME):
os.remove(FILE_NAME)
| 36.930693
| 95
| 0.627346
| 464
| 3,730
| 4.903017
| 0.353448
| 0.036923
| 0.017582
| 0.018462
| 0.186374
| 0.147692
| 0.126593
| 0.102857
| 0.053626
| 0.053626
| 0
| 0.01951
| 0.244236
| 3,730
| 100
| 96
| 37.3
| 0.787513
| 0.091689
| 0
| 0.158537
| 0
| 0
| 0.124593
| 0.011838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.121951
| 0
| 0.146341
| 0.060976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2764c5afd2309a3e37c20e039e1d0182465889f2
| 5,696
|
py
|
Python
|
pymoku/_oscilloscope_data.py
|
liquidinstruments/pymoku
|
a10c2516e5953722a5f5b52aec7944bec22492c2
|
[
"MIT"
] | 11
|
2016-10-11T04:37:14.000Z
|
2021-09-10T02:34:03.000Z
|
pymoku/_oscilloscope_data.py
|
liquidinstruments/pymoku
|
a10c2516e5953722a5f5b52aec7944bec22492c2
|
[
"MIT"
] | 8
|
2017-06-02T18:19:49.000Z
|
2020-09-07T06:15:53.000Z
|
pymoku/_oscilloscope_data.py
|
liquidinstruments/pymoku
|
a10c2516e5953722a5f5b52aec7944bec22492c2
|
[
"MIT"
] | 11
|
2018-07-12T04:18:40.000Z
|
2022-03-04T10:10:48.000Z
|
import struct
from pymoku import _frame_instrument
_OSC_SCREEN_WIDTH = 1024
class VoltsData(_frame_instrument.InstrumentData):
"""
Object representing a frame of dual-channel data in units of Volts, and
time in units of seconds. This is the native output format of the
:any:`Oscilloscope` instrument. The *waveformid* property enables
identification of uniqueness of a frame of data, as it is possible to
retrieve the same data more than once (i.e. if the instrument has been
paused).
This object should not be instantiated directly, but will be returned by a
call to :any:`get_data <pymoku.instruments.Oscilloscope.get_data>` or
:any:`get_realtime_data
<pymoku.instruments.Oscilloscope.get_realtime_data>` on the associated
:any:`Oscilloscope` instrument.
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.ch1
:annotation: = [CH1_DATA]
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.ch2
:annotation: = [CH2_DATA]
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.time
:annotation: = [TIME]
.. autoinstanceattribute:: pymoku._frame_instrument.VoltsData.waveformid
:annotation: = n
"""
def __init__(self, instrument, scales):
super(VoltsData, self).__init__(instrument)
# : Channel 1 data array in units of Volts. Present whether or not the
# channel is enabled, but the
# : contents are undefined in the latter case.
self.ch1 = []
#: Channel 2 data array in units of Volts.
self.ch2 = []
#: Timebase
self.time = []
self._scales = scales
def __json__(self):
return {'ch1': self.ch1,
'ch2': self.ch2,
'time': self.time,
'waveform_id': self.waveformid}
def process_complete(self):
super(VoltsData, self).process_complete()
if self._stateid not in self._scales:
return
scales = self._scales[self._stateid]
scale_ch1 = scales['scale_ch1']
scale_ch2 = scales['scale_ch2']
t1 = scales['time_min']
ts = scales['time_step']
try:
smpls = int(len(self._raw1) / 4)
dat = struct.unpack('<' + 'i' * smpls, self._raw1)
dat = [x if x != -0x80000000 else None for x in dat]
self._ch1_bits = [float(x) if x is not None
else None for x in dat[:_OSC_SCREEN_WIDTH]]
self.ch1 = [x * scale_ch1 if x is not None
else None for x in self._ch1_bits]
smpls = int(len(self._raw2) / 4)
dat = struct.unpack('<' + 'i' * smpls, self._raw2)
dat = [x if x != -0x80000000 else None for x in dat]
self._ch2_bits = [float(x) if x is not None
else None for x in dat[:_OSC_SCREEN_WIDTH]]
self.ch2 = [x * scale_ch2 if x is not None
else None for x in self._ch2_bits]
except (IndexError, TypeError, struct.error):
# If the data is bollocksed, force a reinitialisation on next
# packet
self._frameid = None
self._complete = False
self.time = [t1 + (x * ts) for x in range(_OSC_SCREEN_WIDTH)]
return True
def process_buffer(self):
# Compute the x-axis of the buffer
if self._stateid not in self._scales:
return
scales = self._scales[self._stateid]
self.time = [scales['buff_time_min'] + (scales['buff_time_step'] * x)
for x in range(len(self.ch1))]
return True
def _get_timescale(self, tspan):
# Returns a scaling factor and units for time 'T'
if(tspan < 1e-6):
scale_str = 'ns'
scale_const = 1e9
elif (tspan < 1e-3):
scale_str = 'us'
scale_const = 1e6
elif (tspan < 1):
scale_str = 'ms'
scale_const = 1e3
else:
scale_str = 's'
scale_const = 1.0
return [scale_str, scale_const]
def _get_xaxis_fmt(self, x, pos):
# This function returns a format string for the x-axis ticks and
# x-coordinates along the time scale. Use this to set an x-axis format
# during plotting of Oscilloscope frames
if self._stateid not in self._scales:
return
scales = self._scales[self._stateid]
ts = scales['time_step']
tscale_str, tscale_const = self._get_timescale(ts * _OSC_SCREEN_WIDTH)
return {'xaxis': '%.1f %s' % (x * tscale_const, tscale_str),
'xcoord': '%.3f %s' % (x * tscale_const, tscale_str)}
def get_xaxis_fmt(self, x, pos):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for X (time) axis """
return self._get_xaxis_fmt(x, pos)['xaxis']
def get_xcoord_fmt(self, x):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for X (time) coordinate """
return self._get_xaxis_fmt(x, None)['xcoord']
def _get_yaxis_fmt(self, y, pos):
return {'yaxis': '%.1f %s' % (y, 'V'), 'ycoord': '%.3f %s' % (y, 'V')}
def get_yaxis_fmt(self, y, pos):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for Y (voltage) axis """
return self._get_yaxis_fmt(y, pos)['yaxis']
def get_ycoord_fmt(self, y):
""" Function suitable to use as argument to a matplotlib FuncFormatter
for Y (voltage) coordinate """
return self._get_yaxis_fmt(y, None)['ycoord']
| 35.6
| 78
| 0.596208
| 735
| 5,696
| 4.434014
| 0.251701
| 0.012274
| 0.014728
| 0.022093
| 0.399202
| 0.345812
| 0.257441
| 0.216324
| 0.216324
| 0.216324
| 0
| 0.018219
| 0.30618
| 5,696
| 159
| 79
| 35.823899
| 0.806427
| 0.330758
| 0
| 0.204819
| 0
| 0
| 0.048613
| 0
| 0
| 0
| 0.005493
| 0
| 0
| 1
| 0.13253
| false
| 0
| 0.024096
| 0.024096
| 0.325301
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2764e8cf2af125cde1e1dea98f00be38d0e21369
| 6,205
|
py
|
Python
|
FlightRisingColiseum/Bot_FR.py
|
Eternal05/Flightrising-Coliseum-Bot
|
8f4895ff8a2d5533fe6a6546e09361738fd54910
|
[
"MIT"
] | 1
|
2021-05-17T02:52:40.000Z
|
2021-05-17T02:52:40.000Z
|
FlightRisingColiseum/Bot_FR.py
|
Eternal05/Flightrising-Coliseum-Bot
|
8f4895ff8a2d5533fe6a6546e09361738fd54910
|
[
"MIT"
] | null | null | null |
FlightRisingColiseum/Bot_FR.py
|
Eternal05/Flightrising-Coliseum-Bot
|
8f4895ff8a2d5533fe6a6546e09361738fd54910
|
[
"MIT"
] | null | null | null |
import os
from PIL import ImageGrab
import time
import win32api, win32con
from PIL import ImageOps
from numpy import *
import pyautogui
import random
from ctypes import windll
user32 = windll.user32
user32.SetProcessDPIAware()
#some sort of DPS problem unrelated to project
#this stops the images from being cut off while using screengrab
# ------------------
x_pad = 475 #These pads is so it works for different resolutions. Instead of
y_pad = 699 #changing all the coordinates, other users of the bot would just
#have to adjust the pads using screenGrab() defined further below
class Cord: #All important coordinates that are checked often are stored here
mainmenu = (835, 893)
attack = (922, 806)
scratch = (1106, 835)
shred = (919, 950)
attacker1 = (974, 177)
hpattacker1 = (924, 13)
attacker2 = (1091, 331)
hpattacker2 = (1044, 147)
attacker3 = (1223, 477)
hpattacker3 = (1164, 305)
attacker4 = (1031, 265)
hpattacker4 = (984, 67)
attacker5 = (1145, 433)
hpattacker5 = (1104, 227)
boss = (1007, 292)
hpboss = (893, 67)
def screenGrab(): #Originally used as a tool to get x_pad and y_pad
#Currently used to scan the for RGB values in startGame(). See previous versions in journal
box = (x_pad+1,y_pad+1,x_pad+1371,y_pad+1220)
im = ImageGrab.grab(box)
hm = im.getpixel(Cord.hpboss) #put any coordinate u want
print(hm)
return im
def leftClick(): #just for clicking
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0) #Press left click
time.sleep(.1) #delays
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0) #Release left click
print('Click')
def mousePos(cord): #Moves the mouse to the given coordinates. This changed a lot, see previous versions in journal
pyautogui.moveTo(x_pad + cord[0] + random.randint(0, 20), y_pad + cord[1] + random.randint(0, 20), duration=0.25)
#Receives coordinates given in startGame(), goes to location taking into account the pads
#random.randint(0,20) randomizes the coordinates a bit to avoid bot detection
def get_cords(): #Tool that was used to get the coordinates of all the buttons and attackers in the game.
#No longer used now that the bot is completed
x,y = win32api.GetCursorPos()
x = x - x_pad #Takes into account pads, like all the other functions
y = y - y_pad
print(x,y)
# ------------------
def startGame(): #Start of the main function
wait = 0 #Used and explained further below
while x_pad == 475: #Just needed this to loop forever so picked random variable
#location of first menu
mousePos((257, 559))
leftClick()
leftClick()
time.sleep(1.5)
#location of second menu
mousePos((489, 771))
leftClick()
time.sleep(3.5)
while x_pad == 475: #Loop for the actual game once past menus
x = round(random.uniform(0, 0.2),2) #Generates random float that'll be added to randomize wait intervals
screenGrab()
s = screenGrab() #Takes picture of the screen and assigns it to s
if s.getpixel((205, 57)) == (93, 94, 134): #Checks if bot got past the menu, good for checking 'camping' (explained in journal)
wait = 0 #Resets the counter for amount of times 'waiting', used farther below
if s.getpixel(Cord.mainmenu) == (222, 214, 202):
#Checks if coordinates of mainmenu match RGB value. If so, that means this menu popped up, and level is complete
#The coordinates & RGB values are from using get_cords() & screenGrab() as tools. Check journal for how
print('level complete')
mousePos((811, 822)) #Goes to the button that sends us back to the mainmenu
leftClick()
time.sleep(1.4 + x) #Pauses after clicking for 1.4 + (randomized number) seconds
break #Breaks out of this loop to go back to the menu loop
#All the other if statements have the same idea as the above if statement
if s.getpixel(Cord.attack) == (236, 234, 231):
wait=0
print('attacking')
mousePos(Cord.attack)
leftClick()
time.sleep(0.1 + x)
screenGrab()
s = screenGrab() #Important screen change here, picture of screen taken again
if s.getpixel(Cord.shred) == (214, 172, 99): #Special attack option
mousePos(Cord.shred)
leftClick()
time.sleep(0.4 + x)
else:
mousePos(Cord.scratch) #Normal attack option
leftClick()
time.sleep(0.4 + x)
if s.getpixel(Cord.hpattacker1) == (49, 61, 48):
mousePos(Cord.attacker1)
leftClick()
time.sleep(1.2+ x)
elif s.getpixel(Cord.hpattacker2) == (49, 61, 48):
mousePos(Cord.attacker2)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker3) == (49, 61, 48):
mousePos(Cord.attacker3)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker4) == (49, 61, 48):
mousePos(Cord.attacker4)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpattacker5) == (49, 61, 48):
mousePos(Cord.attacker5)
leftClick()
time.sleep(1.2 + x)
elif s.getpixel(Cord.hpboss) == (10, 10, 13):
mousePos(Cord.boss)
leftClick()
time.sleep(1.2 + x)
else: #If no hp bars or attack buttons are detected, page is probably loading or enemies are attacking
wait = wait+1 #Wait counter goes up 1 every loop
print('waiting')
if wait == 15: #If computer waited 15 consecutive times, something must've went wrong. So, program exits
exit()
time.sleep(2) #Pauses for 2 seconds to wait, then loops back to recheck if they're hp bars or attack buttons
| 34.859551
| 134
| 0.604674
| 833
| 6,205
| 4.480192
| 0.393758
| 0.033762
| 0.057878
| 0.040729
| 0.137996
| 0.067792
| 0.050911
| 0.050911
| 0.050911
| 0.050911
| 0
| 0.072878
| 0.301209
| 6,205
| 177
| 135
| 35.056497
| 0.787823
| 0.384367
| 0
| 0.275862
| 0
| 0
| 0.009725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043103
| false
| 0
| 0.077586
| 0
| 0.275862
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|