hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6edc5a13e61a1bdcdf25bd7cc6d12ff98125bfdd | 39 | py | Python | Python/Tests/TestData/SendToInteractiveWorkspace/PrintInterpreter.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 404 | 2019-05-07T02:21:57.000Z | 2022-03-31T17:03:04.000Z | Python/Tests/TestData/SendToInteractiveWorkspace/PrintInterpreter.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 1,672 | 2019-05-06T21:09:38.000Z | 2022-03-31T23:16:04.000Z | Python/Tests/TestData/SendToInteractiveWorkspace/PrintInterpreter.py | techkey/PTVS | 8355e67eedd8e915ca49bd38a2f36172696fd903 | [
"Apache-2.0"
] | 186 | 2019-05-13T03:17:37.000Z | 2022-03-31T16:24:05.000Z | import sys
print(sys.version_info[:2])
| 13 | 27 | 0.769231 |
6eddc54b10cac9432a4ae367a239a6cc76b7801e | 2,467 | py | Python | datename/filetime.py | tekemperor/datename | 6e2d27c6e64ecb8e07a673c2aa10ed1e661aad03 | [
"MIT"
] | null | null | null | datename/filetime.py | tekemperor/datename | 6e2d27c6e64ecb8e07a673c2aa10ed1e661aad03 | [
"MIT"
] | null | null | null | datename/filetime.py | tekemperor/datename | 6e2d27c6e64ecb8e07a673c2aa10ed1e661aad03 | [
"MIT"
] | null | null | null | """This module is designed to build timestrings from file metadata.
Everything is done in seconds since the unix epoch or UTC.
Author: Brian Lindsay
Author Email: tekemperor@gmail.com
"""
import sys
import os
import datetime
def filetime(file_path):
"""Returns formatted time string from file metadata.
Uses modification time.
formattedtime string is in UTC.
file_path - full path of file to get metadata time from.
"""
epoch_timestamp = __get_epochtime(file_path)
time_string = __build_timestring(epoch_timestamp)
return time_string
def __build_timestring(epoch_timestamp, format_string='%Y%m%dT%H%M%SZ'):
"""Builds fromatted time string from seconds since the epoch.
Currnetly only works for UTC.
timestamp - seconds since the unix epoch.
format - strftime format string
TODO: support other timezones.
"""
time_object = datetime.datetime.utcfromtimestamp(epoch_timestamp)
time_string = time_object.strftime(format_string)
return time_string
def __get_epochtime(file_path, metadata_type="modified"):
"""Get file metadata time in seconds since the unix epoch.
file_path - full file path.
metadata_type - metadata time type in {'accessed','created','modified'}
Note: windows resets 'created' time on copy or move.
"""
if metadata_type == "accessed":
time_function = os.path.getatime
elif metadata_type == "created":
time_function = os.path.getctime
elif metadata_type == "modified":
time_function = os.path.getmtime
else:
raise InvalidMetadataTimeTypeError(metadata_type)
epoch_timestamp = time_function(file_path)
return epoch_timestamp
class InvalidMetadataTimeTypeError(Exception):
"""Invalid Metadata Time Type Error
Only {'accessed','created','modified'} are supported.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#In case this is run on the command line, which is useful for tests.
if __name__ == "__main__":
# Call filetime() with all command line arguments, but not the script name.
print filetime(*sys.argv[1:])
| 34.746479 | 130 | 0.631131 |
6ee3a4796a1381dccf1edb9593a4f340f39bac8a | 9,750 | py | Python | main_cifar10.py | snu-ccl/approxCNN | 49cc0e6635682f678f8501424063102fe30d7dd6 | [
"CECILL-B"
] | 1 | 2022-01-16T03:45:43.000Z | 2022-01-16T03:45:43.000Z | main_cifar10.py | snu-ccl/approxCNN | 49cc0e6635682f678f8501424063102fe30d7dd6 | [
"CECILL-B"
] | null | null | null | main_cifar10.py | snu-ccl/approxCNN | 49cc0e6635682f678f8501424063102fe30d7dd6 | [
"CECILL-B"
] | null | null | null | from __future__ import print_function
from tqdm import *
import sys
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from models.resnet_cifar10 import *
from models.vgg_cifar10 import *
from models.utils_approx import rangeException
parser = argparse.ArgumentParser(description='Implementation of of Section V-A for `Precise Approximation of Convolutional Neural'
+ 'Networks for Homomorphically Encrypted Data.`')
parser.add_argument('--mode', default='inf', dest='mode', type=str,
help='Program mode. `train`: train randomly initialized model, '\
'`inf`: inference the proposed approximate deep learning model')
parser.add_argument('--gpu', default=0, dest='gpuid', type=int,
help='ID of GPU that is used for training and inference.')
parser.add_argument('--backbone', default='resnet20', dest='backbone', type=str,
help='Backbone model.')
parser.add_argument('--approx_method', default='proposed', dest='approx_method', type=str,
help='Method of approximating non-arithmetic operations. `proposed`: proposed composition of minimax polynomials, '\
'`square`: approximate ReLU as x^2, `relu_aq`: approximate ReLU as 2^-3*x^2+2^-1*x+2^-2. '\
'For `square` and `relu_aq`, we use exact max-pooling function.')
parser.add_argument('--batch_inf', default=128, dest='batch_inf', type=int,
help='Batch size for inference.')
parser.add_argument('--alpha', default=14, dest='alpha', type=int,
help='The precision parameter. Integers from 4 to 14 can be used.')
parser.add_argument('--B_relu', default=50.0, dest='B_relu', type=float,
help='The bound of approximation range for the approximate ReLU function.')
parser.add_argument('--B_max', default=50.0, dest='B_max', type=float,
help='The bound of approximation range for the approximate max-pooling function.')
parser.add_argument('--B_search', default=5.0, dest='B_search', type=float,
help='The size of the interval to find B such that all input values fall within the approximate region.')
parser.add_argument('--dataset_path', default='../dataset/CIFAR10', dest='dataset_path', type=str,
help='The path which contains the CIFAR10.')
parser.add_argument('--params_name', default='ours', dest='params_name', type=str,
help='The pre-trained parameters file name. Please omit `.pt`.')
args = parser.parse_args()
torch.cuda.set_device(args.gpuid)
params_path = ''.join(['./pretrained/cifar10/', args.backbone, '_', args.params_name, '.pt'])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
cifar10_train = datasets.CIFAR10(args.dataset_path, train=True, download=True,
transform=transform_train)
loader_train = DataLoader(cifar10_train, batch_size=128)
cifar10_test = datasets.CIFAR10(args.dataset_path, train=False, download=True,
transform=transform_test)
loader_test = DataLoader(cifar10_test, batch_size=args.batch_inf)
dtype = torch.FloatTensor # the CPU datatype
gpu_dtype = torch.cuda.FloatTensor
def train(model, loss_fn, optimizer, scheduler, num_epochs=1):
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
model.train()
print('Training...')
for t, (x, y) in tqdm(enumerate(loader_train)):
torch.cuda.empty_cache()
x_var = Variable(x.cuda())
y_var = Variable(y.cuda().long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Evaluating...')
test_acc = check_accuracy(model, loader_test) * 100
print('Loss: %.4f, test accuracy: %.2f' % (loss.data, test_acc))
scheduler.step()
print('--------------------------')
def check_accuracy(model, loader, use_tqdm = False):
num_correct = 0
num_samples = 0
model.eval()
torch.cuda.empty_cache()
with torch.no_grad():
for x, y in (tqdm(loader) if use_tqdm else loader):
x_var = Variable(x.cuda())
scores = model(x_var)
_, preds = scores.data.cpu().max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
return acc
def checking_batchsize_inference(model):
model.eval()
with torch.no_grad():
for t, (x, y) in enumerate(loader_test):
x_var = Variable(x.cuda())
try:
_ = model(x_var)
except rangeException as e:
e.show()
print('The validity of the batch size cannot be checked since the given B is to small.')
print('Please give larger B_relu or B_max.')
sys.exit("Terminated.")
except Exception:
print('The batch size of INFERENCE seems to be large for your GPU.')
print('Your current batch size is ' + str(args.batch_inf) + '. Try reducing `--batch_inf`.')
sys.exit("Terminated.")
break
approx_dict_list = [{'alpha': args.alpha, 'B': args.B_relu, 'type': args.approx_method},
{'alpha': args.alpha, 'B': args.B_max, 'type': args.approx_method}]
if args.backbone == 'resnet20':
original_model = resnet20()
approx_model = resnet20(approx_dict_list)
elif args.backbone == 'resnet32':
original_model = resnet32()
approx_model = resnet32(approx_dict_list)
elif args.backbone == 'resnet44':
original_model = resnet44()
approx_model = resnet44(approx_dict_list)
elif args.backbone == 'resnet56':
original_model = resnet56()
approx_model = resnet56(approx_dict_list)
elif args.backbone == 'resnet110':
original_model = resnet110()
approx_model = resnet110(approx_dict_list)
elif args.backbone == 'vgg11bn':
original_model = vgg11_bn()
approx_model = vgg11_bn(approx_dict_list)
elif args.backbone == 'vgg13bn':
original_model = vgg13_bn()
approx_model = vgg13_bn(approx_dict_list)
elif args.backbone == 'vgg16bn':
original_model = vgg16_bn()
approx_model = vgg16_bn(approx_dict_list)
elif args.backbone == 'vgg19bn':
original_model = vgg19_bn()
approx_model = vgg19_bn(approx_dict_list)
original_model.cuda()
approx_model.cuda()
if args.mode == 'train':
if args.params_name == 'ours':
print('Please set your own name or use another name rather than `ours` '
'to avoid overwriting our pre-trained parameters used in the paper.')
sys.exit("Terminated.")
print("Training random initialized", args.backbone, "for CIFAR10")
print("")
loss_fn = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(original_model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-3, nesterov=True)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[100, 150], last_epoch=-1)
train(original_model, loss_fn, optimizer, scheduler, num_epochs=200)
torch.save(original_model.state_dict(), params_path)
print("Saved pre-trained parameters. Path:", params_path)
if args.mode == 'inf':
original_model.load_state_dict(torch.load(params_path))
approx_model.load_state_dict(torch.load(params_path))
print("Used pre-trained parameter:", params_path)
print('==========================')
print("Inference the pre-trained original", args.backbone, "for CIFAR10")
original_model.load_state_dict(torch.load(params_path))
original_acc = check_accuracy(original_model, loader_test, use_tqdm=True) * 100
print("Test accuracy: %.2f" % original_acc)
print('==========================')
print("Inference the approximate", args.backbone, "with same pre-trained parameters for CIFAR10")
print("Precision parameter:", args.alpha)
print("")
# Check if given batch size is valid.
checking_batchsize_inference(approx_model)
while True:
try:
print("Trying to approximate inference...")
print("with B_ReLU = %.1f," % approx_dict_list[0]['B'])
print("and B_max = %.1f," % approx_dict_list[1]['B'])
approx_acc = check_accuracy(approx_model, loader_test, use_tqdm=True) * 100
print("Approximation success!")
break
except rangeException as e:
e.show()
if e.type == 'relu':
print("We increase B_ReLU", args.B_search, "and try inference again.")
approx_dict_list[0]['B'] += args.B_search
elif e.type == 'max':
print("We increase B_maxpooling", args.B_search, "and try inference again.")
approx_dict_list[1]['B'] += args.B_search
print('--------------------------')
print("")
print("Test accuracy: %.2f" % approx_acc)
rate = (approx_acc - original_acc) / original_acc * 100
print("Difference from the baseline: %.2f%%" % rate)
| 41.489362 | 136 | 0.643282 |
6ee540409b07f789b14091c60316fa04528be96a | 1,355 | py | Python | annotation_predictor/concat_detection_records.py | Inch4Tk/label_server | 3d0c39dd5a0c456794a1375051ca4f93a438ebf6 | [
"MIT"
] | null | null | null | annotation_predictor/concat_detection_records.py | Inch4Tk/label_server | 3d0c39dd5a0c456794a1375051ca4f93a438ebf6 | [
"MIT"
] | null | null | null | annotation_predictor/concat_detection_records.py | Inch4Tk/label_server | 3d0c39dd5a0c456794a1375051ca4f93a438ebf6 | [
"MIT"
] | null | null | null | import argparse
import json
import os
from datetime import datetime
from settings import annotation_predictor_metadata_dir
def concat_detection_record(record1: str, record2: str):
"""
Concatenates two detection records and saves them in a new file.
Args:
record1: Path to first record, saved in a json-file
record2: Path to second record, saved in a json-file
Returns: path to new file
"""
timestamp = datetime.now().strftime('%Y_%m_%d_%H%M%S')
filename = '{}.json'.format(timestamp)
path_to_file = os.path.join(annotation_predictor_metadata_dir, filename)
with open(record1, 'r') as f:
r1 = json.load(f)
with open(record2, 'r') as f:
r2 = json.load(f)
r1.update(r2)
with open(path_to_file, 'w') as f:
json.dump(r1, f)
return path_to_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Concatenate two detection records')
parser.add_argument('path_to_record_1', type=str, metavar='path_to_record_1',
help='path to first training record')
parser.add_argument('path_to_record_2', type=str, metavar='path_to_record_2',
help='path to second training record')
args = parser.parse_args()
concat_detection_record(args.path_to_record_1, args.path_to_record_2) | 32.261905 | 81 | 0.678967 |
6ee63c9ba38d1ce17bcb6d0ca426d297c79f4912 | 2,579 | py | Python | OPTIMAQS/controller/controller/device.py | jeremyforest/whole_optic_gui | 7af257e739da90b8ce8b2531aa1c520959fa7bff | [
"MIT"
] | null | null | null | OPTIMAQS/controller/controller/device.py | jeremyforest/whole_optic_gui | 7af257e739da90b8ce8b2531aa1c520959fa7bff | [
"MIT"
] | 6 | 2021-02-02T23:09:14.000Z | 2022-03-12T00:52:01.000Z | OPTIMAQS/controller/controller/device.py | jeremyforest/whole_optic_gui | 7af257e739da90b8ce8b2531aa1c520959fa7bff | [
"MIT"
] | null | null | null | """
Generic Device class for manipulators.
To make a new device, one must implement at least:
* position
* absolute_move
"""
from numpy import array
import time
__all__ = ['Device']
class Device(object):
def __init__(self):
pass
def position(self, axis):
'''
Current position along an axis.
Parameters
----------
axis : axis number
Returns
-------
The current position of the device axis in um.
'''
return 0. # fake
def absolute_move(self, x, axis):
'''
Moves the device axis to position x.
Parameters
----------
axis: axis number
x : target position in um.
'''
pass
def relative_move(self, x, axis):
'''
Moves the device axis by relative amount x in um.
Parameters
----------
axis: axis number
x : position shift in um.
'''
self.absolute_move(self.position(axis)+x, axis)
def position_group(self, axes):
'''
Current position along a group of axes.
Parameters
----------
axes : list of axis numbers
Returns
-------
The current position of the device axis in um (vector).
'''
return array([[self.position(axis) for axis in axes]])
def absolute_move_group(self, x, axes):
'''
Moves the device group of axes to position x.
Parameters
----------
axes : list of axis numbers
x : target position in um (vector or list).
'''
for xi,axis in zip(x,axes):
self.absolute_move(xi, axis)
def relative_move_group(self, x, axes):
'''
Moves the device group of axes by relative amount x in um.
Parameters
----------
axes : list of axis numbers
x : position shift in um (vector or list).
'''
self.absolute_move_group(array(self.position_group(axes))+array(x), axes)
def stop(self, axis):
"""
Stops current movements.
"""
pass
def wait_until_still(self, axis = None):
"""
Waits until motors have stopped.
Parameters
----------
axes : list of axis numbers
"""
previous_position = self.position(axis)
new_position = None
while array(previous_position != new_position).any():
previous_position = new_position
new_position = self.position(axis)
time.sleep(0.1) # 100 us | 23.66055 | 81 | 0.535479 |
6ee9460f4f92c18fd62442a8f5aa3304ac1ef00f | 9,068 | py | Python | phot/command_test.py | SilverRon/gppy | 0ee56ca270af62afe1702fce37bef30add14f12a | [
"MIT"
] | 4 | 2019-05-08T08:08:59.000Z | 2021-12-22T08:57:46.000Z | phot/command_test.py | SilverRon/gppy | 0ee56ca270af62afe1702fce37bef30add14f12a | [
"MIT"
] | null | null | null | phot/command_test.py | SilverRon/gppy | 0ee56ca270af62afe1702fce37bef30add14f12a | [
"MIT"
] | 2 | 2019-05-08T08:09:02.000Z | 2019-06-27T13:41:44.000Z | # PHOTOMETRY CODE (TEST) FOR PYTHON 3.X
# 2019.03.09
# GREGORY S.H. PAEK
#============================================================
import os, glob
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
from astropy.io import fits
#from imsng import zpcal
#============================================================
# USER SETTING
#============================================================
sharepath = '/home/sonic/Research/yourpy/config/'
configfile = sharepath+'targetphot.sex'
paramfile = sharepath+'targetphot.param'
nnwfile = sharepath+'targetphot.nnw'
convfile = sharepath+'targetphot.conv'
psfexconf_prese_conf = sharepath+'prepsfex.sex'
psfexconf_prese_param = sharepath+'prepsfex.param'
psfexconf_psfex_conf = sharepath+'default.psfex'
psfexconf_psfex_conv = sharepath+'default.conv'
obsinfo = ascii.read('/home/sonic/Research/table/obs.txt')
#------------------------------------------------------------
def puthdr(inim, hdrkey, hdrval, hdrcomment=''):
from astropy.io import fits
hdr = fits.getheader(inim)
fits.setval(inim, hdrkey, value=hdrval, comment=hdrcomment)
comment = inim+'\t'+'('+hdrkey+'\t'+str(hdrval)+')'
#print(comment)
#------------------------------------------------------------
# INITIAL INPUT
#------------------------------------------------------------
# TARGET COORD.
#ra1, de1 = 54.50492875, -26.94636444 # GRB 190114C
#ra1, de1 = 173.137, +27.699 # GRB 130427A
#ra1, de1 = 196.5942029, +20.35490083
#ra1, de1 = 223.3201092, 34.75006139 # AT2019ein
#ra1, de1 = 185.733875, 15.826 # SN2019ehk
ra1, de1 = 161.63775, 13.74194444
# IMAGES TO CALC.
#imlist = glob.glob('Calib-*com.fits')
os.system('ls *.fits')
imlist = glob.glob(input('image to process\t: '))
imlist.sort()
for img in imlist: print(img)
# REF. CATALOG
refcatname = 'PS1' #PS1/SDSS/APASS/2MASS
# RESULT FILE
f = open('phot.dat', 'w')
colline = '#obs\tdate-obs\taperture\tseeing\tzp\tzperr\tinstmag\tinstmagerr\tmag\tmagerr\n'
f.write(colline)
#============================================================
# MAIN COMMAND
#============================================================
imfail = []
for inim in imlist:
query_checklist = glob.glob('*.cat')
try:
hdr = fits.getheader(inim)
part = inim.split('-')
obs = part[1]
name = part[2]
exptime = part[6]
refmagkey = part[5]
refmagerkey = refmagkey+'err'
gain = obsinfo[obsinfo['obs']==obs]['gain'][0]
pixscale = obsinfo[obsinfo['obs']==obs]['pixelscale'][0]
# SourceEXtractor
intbl0, incat, fwhm_pix, fwhm_arcsec = secom(inim, gain=gain, pixscale=pixscale, det_sigma=3.0, backsize=str(64))
# APPROXIMATE CENTER POS. & DIST CUT
xim_cent, yim_cent = np.max(intbl0['X_IMAGE'])/2, np.max(intbl0['Y_IMAGE'])/2
im_dist = sqsum((xim_cent-intbl0['X_IMAGE']), (yim_cent-intbl0['Y_IMAGE']))
indx_dist = np.where( im_dist < 0.99*(xim_cent+yim_cent)/2. ) # 90% area
intbl = intbl0[indx_dist]
intbl.write(incat, format='ascii', overwrite=True)
# NEAR CENTER RA DEC
radeg = np.median(intbl['ALPHA_J2000'])
dedeg = np.median(intbl['DELTA_J2000'])
#------------------------------------------------------------
# REF. CATALOG QUERY
#------------------------------------------------------------
if refcatname == 'PS1':
if 'ps1-'+name+'.cat' not in query_checklist:
querytbl = ps1_query(name, radeg, dedeg, radius=0.65)
else:
querytbl = ascii.read('ps1-'+name+'.cat')
reftbl, refcat = ps1_Tonry(querytbl, name)
elif refcatname == 'SDSS':
if 'sdss-'+name+'.cat' not in query_checklist:
querytbl = sdss_query(name, radeg, dedeg)
else:
querytbl = ascii.read('sdss-'+name+'.cat')
reftbl, refcat = sdss_Blaton(querytbl, name)
elif refcatname == 'APASS':
if 'apass-'+name+'.cat' not in query_checklist:
querytbl = apass_query(name, radeg, dedeg)
else:
querytbl = ascii.read('apass-'+name+'.cat')
reftbl, refcat = apass_Blaton(querytbl, name)
elif refcatname == '2MASS':
if '2mass-'+name+'.cat' not in query_checklist:
querytbl = twomass_query(name, radeg, dedeg, band=refmagkey, radius=1.0)
else:
querytbl = ascii.read('2mass-'+name+'.cat')
reftbl, refcat = querytbl, '2mass-'+name+'.cat'
#------------------------------------------------------------
# MATCHING
#------------------------------------------------------------
merge_raw = matching(incat, refcat)
colnames = merge_raw.colnames
maglist = []
magerlist = []
for col in colnames:
if 'MAG_APER_7' in col:
#print(col)
maglist.append(col)
elif 'MAGERR_APER_7' in col:
#print(col)
magerlist.append(col)
#intbl = ascii.read(incat)
for i in range(0, len(maglist)):
mtbl = merge_raw
inmagkey = maglist[i]
inmagerkey = magerlist[i]
param_st4zp = dict( intbl=mtbl,
inmagerkey=inmagerkey,
refmagkey=refmagkey,
refmagerkey=refmagerkey,
refmaglower=13,
refmagupper=16.5,
refmagerupper=0.05,
inmagerupper=0.1,
class_star_cut=0.001)
stars_zp = star4zp(**param_st4zp)
#stars_zp = star4zp(mtbl, inmagerkey, refmagkey, refmagerkey, refmaglower=14, refmagupper=16.5, refmagerupper=0.05, inmagerupper=0.1, class_star_cut=0.001)
#stars_zp, stdnumb = star4zp(mtbl, inmagerkey, refmagkey, refmagerkey, refmaglower=14, refmagupper=18, refmagerupper=0.05, inmagerupper=0.1, class_star_cut=0.01)
zp, zper, intbl_alive, intbl_exile = zpcal(stars_zp, inmagkey, inmagerkey, refmagkey, refmagerkey)
zp_plot(inim, inmagkey, zp, zper, intbl_alive[inmagkey], intbl_alive[refmagkey], intbl_alive['zp'], intbl_exile[inmagkey], intbl_exile[refmagkey], intbl_exile['zp'])
intbl['REAL_'+inmagkey] = zp + intbl[inmagkey]
intbl['REAL_'+inmagerkey] = sqsum(zper, intbl[inmagerkey])
#------------------------------------------------------------
# TARGET PHOT
#------------------------------------------------------------
ra2, de2 = intbl['ALPHA_J2000'], intbl['DELTA_J2000']
indx_target = targetfind(ra1, de1, ra2, de2, sep=10)
skymean, skymed, skysig = bkgest_mask(inim)
if len(indx_target[0]) == 0:
aper = 2*fwhm_pix
ul = limitmag(3, zp, aper, skysig)
try:
comment = inim+'\t\t'+hdr['date-obs']+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t--\t\t\t'+'--\t' \
+'\t'+str(round(ul, 3))+'\t'+'0'+'\n'
except:
comment = inim+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t--\t\t\t'+'--\t' \
+'\t'+str(round(ul, 3))+'\t'+'0'+'\n'
print(comment)
f.write(comment)
else:
try:
comment = inim+'\t\t'+hdr['date-obs']+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t'+str(round(intbl[indx_target]['MAG_APER_7'][0], 3))+'\t\t'+str(round(intbl[indx_target]['MAGERR_APER_7'][0], 3)) \
+'\t'+str(round(intbl[indx_target]['REAL_MAG_APER_7'][0], 3))+'\t'+str(round(intbl[indx_target]['REAL_MAGERR_APER_7'][0], 3))+'\n'
except:
comment = inim+'\t\t'+'MAG_APER_7'+'\t\t'+str(round(fwhm_arcsec, 3))+'\t\t' \
+str(round(zp, 3))+'\t'+str(round(zper, 3)) \
+'\t'+str(round(intbl[indx_target]['MAG_APER_7'][0], 3))+'\t\t'+str(round(intbl[indx_target]['MAGERR_APER_7'][0], 3)) \
+'\t'+str(round(intbl[indx_target]['REAL_MAG_APER_7'][0], 3))+'\t'+str(round(intbl[indx_target]['REAL_MAGERR_APER_7'][0], 3))+'\n'
print(comment)
f.write(comment)
# PLOT IMAGE
numb_list = intbl_alive['NUMBER']
xim_list = intbl_alive['X_IMAGE']
yim_list = intbl_alive['Y_IMAGE']
numb_addlist= intbl_exile['NUMBER']
xim_addlist = intbl_exile['X_IMAGE']
yim_addlist = intbl_exile['Y_IMAGE']
plotshow(inim, numb_list, xim_list, yim_list, add=True, numb_addlist=numb_addlist, xim_addlist=xim_addlist, yim_addlist=yim_addlist)
puthdr(inim, 'SEEING', round(fwhm_arcsec, 3), hdrcomment='SEEING [arcsec]')
puthdr(inim, 'PEEING', round(fwhm_pix, 3), hdrcomment='SEEING [pixel]')
puthdr(inim, 'STDNUMB', len(intbl_alive), hdrcomment='# OF STD STARS')
puthdr(inim, 'OPTZP', round(zp, 3), hdrcomment='2*SEEING DIAMETER')
puthdr(inim, 'OPTZPERR', round(zper, 3), hdrcomment='2*SEEING DIAMETER')
puthdr(inim, 'SKYSIG', round(skysig, 3), hdrcomment='SKY SIGMA VALUE')
puthdr(inim, 'SKYVAL', round(skymed, 3), hdrcomment='SKY MEDIAN VALUE')
except:
imfail.append(inim)
pass
#-------------------------------------------------------------------------#
f.close()
photbl = ascii.read('phot.dat')
#photbl[photbl['mag']>20]
comment = '='*60;print(comment)
os.system('mkdir zpcal/;mv ./*zpcal.png ./zpcal/')
os.system('mkdir zpcal_test/;mv ./*zpcal_test.png ./zpcal_test/')
os.system('rm *aper.fits *xml snap*.fits psf-*.fits')
os.system('mkdir overview/;mv ./*png ./overview/')
os.system('cat phot.dat')
| 41.031674 | 168 | 0.585686 |
6eec71c6872297c8b70027ce91c50b56182bea2c | 5,440 | py | Python | LetsCook/recipes/views.py | ivo-bass/iCook | c45f97ac3d8da0c52ccd85ecac0bab51bc4c8048 | [
"MIT"
] | 1 | 2021-08-05T07:15:59.000Z | 2021-08-05T07:15:59.000Z | LetsCook/recipes/views.py | ivo-bass/iCook | c45f97ac3d8da0c52ccd85ecac0bab51bc4c8048 | [
"MIT"
] | null | null | null | LetsCook/recipes/views.py | ivo-bass/iCook | c45f97ac3d8da0c52ccd85ecac0bab51bc4c8048 | [
"MIT"
] | null | null | null | from django.db import transaction
from django.shortcuts import render, redirect
from django.urls import reverse_lazy, reverse
from django.views.generic import CreateView, ListView, DeleteView, UpdateView, DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from LetsCook.common.forms import CommentForm
from LetsCook.core.constants import CATEGORIES
from LetsCook.core.utils import save_suggestion, add_view_count, check_image_in_cloudinary
from LetsCook.recipes.forms import RecipeForm, IngredientFormSet, RecipeUpdateForm
from LetsCook.recipes.models import Recipe
class DetailsRecipe(DetailView):
"""
This view shows the recipe for the given pk.
Has a vary basic view counter.
"""
template_name = 'recipes/details.html'
model = Recipe
def post(self, request, *args, **kwargs):
save_suggestion(self.request)
return redirect('home')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# increase views count if not own recipe
add_view_count(self.request, self.object)
# check if image is in cloudinary
check_image_in_cloudinary(self.object)
# get other data
ingredients = self.object.ingredients.split(', ')
is_owner = self.object.author == self.request.user
is_liked_by_user = self.object.like_set.filter(user_id=self.request.user.id).exists()
context.update({
'recipe': self.object,
'ingredients': ingredients,
'comments': self.object.comment_set.all(),
'comment_form': CommentForm(
initial={'recipe_pk': self.object.pk}
),
'is_owner': is_owner,
'is_liked': is_liked_by_user,
})
return context
class AllRecipesView(ListView):
"""
This view shows all public recipes
and provides filtering by meal_type/category
"""
model = Recipe
template_name = 'recipes/all-recipes.html'
context_object_name = 'recipes'
paginate_by = 6
def get_context_data(self, **kwargs):
context = super().get_context_data()
context['categories'] = CATEGORIES
return context
def get_queryset(self):
public_recipes = Recipe.objects.filter(public=True)
# avoid missing images if missing in the cloud set to None
for recipe in public_recipes:
check_image_in_cloudinary(recipe)
category_name = self.request.GET.get('category')
if not category_name == '' and not category_name == 'All' and category_name is not None:
public_recipes = public_recipes.filter(meal_type=category_name)
return public_recipes
class RecipeCreate(LoginRequiredMixin, CreateView):
"""
This view shows recipe creation form with ingredients inline formset.
On success redirects to recipe details view
"""
model = Recipe
template_name = 'recipes/create.html'
form_class = RecipeForm
def get_context_data(self, **kwargs):
data = super(RecipeCreate, self).get_context_data(**kwargs)
if self.request.POST:
data['ingredients'] = IngredientFormSet(self.request.POST)
else:
data['ingredients'] = IngredientFormSet()
return data
def form_valid(self, form):
"""
If recipe form is valid validates the ingredients formset
"""
context = self.get_context_data()
ingredients = context['ingredients']
# .atomic() - If there is an exception, the changes are rolled back.
with transaction.atomic():
form.instance.author = self.request.user
self.object = form.save()
if ingredients.is_valid():
ingredients.instance = self.object
ingredients.save()
return super(RecipeCreate, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('details-recipe', kwargs={'pk': self.object.pk})
class RecipeUpdate(LoginRequiredMixin, UpdateView):
"""
Updates the recipe and all of the ingredients
"""
model = Recipe
template_name = 'recipes/update.html'
form_class = RecipeUpdateForm
def get_context_data(self, **kwargs):
data = super(RecipeUpdate, self).get_context_data(**kwargs)
if self.request.POST:
data['ingredients'] = IngredientFormSet(self.request.POST, instance=self.object)
else:
data['ingredients'] = IngredientFormSet(instance=self.object)
return data
def form_valid(self, form):
context = self.get_context_data()
ingredients = context['ingredients']
with transaction.atomic():
if ingredients.is_valid():
ingredients.instance = self.object
ingredients.save()
return super(RecipeUpdate, self).form_valid(form)
def get_success_url(self):
return reverse_lazy('details-recipe', kwargs={'pk': self.object.pk})
class RecipeDelete(LoginRequiredMixin, DeleteView):
"""
Confirmation view for recipe deletion.
"""
model = Recipe
template_name = 'recipes/delete.html'
def get_success_url(self):
return reverse('my-recipes')
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return redirect('details-recipe', kwargs['pk'])
return super().post(request, *args, **kwargs)
| 35.096774 | 96 | 0.662316 |
6eeeabe42368fcdd5282d7697a76a1888678e252 | 4,540 | py | Python | myhoodApp/forms.py | MutuaFranklin/MyHood | 6ddd21c4a67936c8926d6f5a8665a06edf81f39e | [
"MIT"
] | null | null | null | myhoodApp/forms.py | MutuaFranklin/MyHood | 6ddd21c4a67936c8926d6f5a8665a06edf81f39e | [
"MIT"
] | null | null | null | myhoodApp/forms.py | MutuaFranklin/MyHood | 6ddd21c4a67936c8926d6f5a8665a06edf81f39e | [
"MIT"
] | null | null | null |
from . models import Profile, Business,Myhood, PolicePosts, HealthFacilities, UserPost
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import widgets
class UserRegistrationForm(UserCreationForm):
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserRegistrationForm, self).__init__(*args, **kwargs)
for fieldname in ['username', 'password1', 'password2']:
self.fields[fieldname].help_text = None
fields = ('first_name', 'last_name', 'email','username', 'password1', 'password2')
widgets = {
'first_name':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"First Name", 'label': 'First Name'}),
'last_name':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"Second Name", 'label': 'Second Name'}),
'email':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"Email Address", 'label': 'Email Address'}),
'username':forms.TextInput(attrs = {'class':'form-control names', 'placeholder':"Username", 'label': 'Username'}),
'password1':forms.PasswordInput(attrs = {'class':'form-control ', 'placeholder':"Password", 'label': 'Password'}),
'password2':forms.PasswordInput(attrs = {'class':'form-control', 'type':'password', 'placeholder':"Confirm Password", 'label': 'Confirm Password'}),
}
class UpdateUserProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = [ 'profile_pic','family_name', 'bio','gender','general_location', 'mobile','hood' ]
widgets = {
'profile_pic': forms.FileInput(attrs={'class':'form-control'}),
'family_name':forms.TextInput(attrs={'class': 'form-control'}),
'bio':forms.Textarea(attrs={'class': 'form-control'}),
'gender':forms.Select(attrs={'class': 'form-control'}),
'general_location':forms.TextInput(attrs={'class': 'form-control'}),
'hood':forms.Select(attrs={'class': 'form-control'}),
}
class HoodMemberPostForm(forms.ModelForm):
class Meta:
model = UserPost
fields = ('title', 'post')
class UpdatePostForm(forms.ModelForm):
class Meta:
model = UserPost
fields = [ 'post_pic', 'title','post']
widgets = {
'project_pic':forms.FileInput(attrs={'class': 'form-control'}),
'title': forms.TextInput(attrs={'class':'form-control'}),
'post':forms.Textarea(attrs={'class': 'form-control'}),
}
class RegisterBizForm(forms.ModelForm):
class Meta:
model = Business
fields = [ 'biz_image', 'name', 'description','phone','email']
widgets = {
'biz_image':forms.FileInput(attrs={'class': 'form-control'}),
'name': forms.TextInput(attrs={'class':'form-control'}),
'description':forms.TextInput(attrs={'class': 'form-control'}),
'phone':forms.TextInput(attrs={'class': 'form-control'}),
'email':forms.TextInput(attrs={'class': 'form-control'}),
}
class RegisterMyhoodForm(forms.ModelForm):
class Meta:
model = Myhood
fields = [ 'name','location','sample_hood_image', 'description','police_contacts']
widgets = {
'name': forms.TextInput(attrs={'class':'form-control'}),
'location':forms.TextInput(attrs={'class': 'form-control'}),
'sample_hood_image':forms.FileInput(attrs={'class': 'form-control'}),
'description':forms.TextInput(attrs={'class': 'form-control'}),
'police_contacts':forms.TextInput(attrs={'class': 'form-control'}),
}
class RegisterHoodHospital(forms.ModelForm):
class Meta:
model = HealthFacilities
fields = [ 'hospital_image','name','phone','email', 'description']
widgets = {
'hospital_image':forms.FileInput(attrs={'class': 'form-control'}),
'name': forms.TextInput(attrs={'class':'form-control'}),
'phone':forms.TextInput(attrs={'class': 'form-control'}),
'email':forms.TextInput(attrs={'class': 'form-control'}),
'description':forms.TextInput(attrs={'class': 'form-control'}),
}
| 35.46875 | 160 | 0.586344 |
6eef0375ed02fe1987ef09d32e826630ef2e2fef | 4,599 | py | Python | ponyo/train_vae_modules.py | ajlee21/ponyo | f68a461b2edd4ab3d9c0699a1e5e8dcd1308cc75 | [
"BSD-3-Clause"
] | 1 | 2020-12-17T17:34:53.000Z | 2020-12-17T17:34:53.000Z | ponyo/train_vae_modules.py | ajlee21/ponyo | f68a461b2edd4ab3d9c0699a1e5e8dcd1308cc75 | [
"BSD-3-Clause"
] | 37 | 2020-06-15T18:15:10.000Z | 2022-02-10T02:34:29.000Z | ponyo/train_vae_modules.py | ajlee21/ponyo | f68a461b2edd4ab3d9c0699a1e5e8dcd1308cc75 | [
"BSD-3-Clause"
] | 3 | 2020-06-12T19:56:16.000Z | 2021-04-21T15:22:33.000Z | """
Author: Alexandra Lee
Date Created: 11 March 2020
Scripts related to training the VAE including
1. Normalizing gene expression data
2. Wrapper function to input training parameters and run vae
training in `vae.tybalt_2layer_model`
"""
from ponyo import vae, utils
import os
import pickle
import pandas as pd
from sklearn import preprocessing
import tensorflow as tf
import numpy as np
import random
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
def set_all_seeds(seed_val=42):
"""
This function sets all seeds to get reproducible VAE trained
models.
"""
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
os.environ["PYTHONHASHSEED"] = "0"
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(seed_val)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(seed_val)
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
tf.set_random_seed(seed_val)
def normalize_expression_data(
base_dir, config_filename, raw_input_data_filename, normalized_data_filename
):
"""
0-1 normalize the expression data.
Arguments
----------
base_dir: str
Root directory containing analysis subdirectories
config_filename: str
File containing user defined parameters
raw_input_data_filename: str
File containing raw expression data
normalize_data_filename:
Output file containing normalized expression data
"""
# Read in config variables
params = utils.read_config(config_filename)
# Read data
data = pd.read_csv(raw_input_data_filename, header=0, sep="\t", index_col=0)
print(
"input: dataset contains {} samples and {} genes".format(
data.shape[0], data.shape[1]
)
)
# 0-1 normalize per gene
scaler = preprocessing.MinMaxScaler()
data_scaled_df = scaler.fit_transform(data)
data_scaled_df = pd.DataFrame(
data_scaled_df, columns=data.columns, index=data.index
)
print(
"Output: normalized dataset contains {} samples and {} genes".format(
data_scaled_df.shape[0], data_scaled_df.shape[1]
)
)
# Save scaler transform
scaler_filename = params["scaler_transform_filename"]
outfile = open(scaler_filename, "wb")
pickle.dump(scaler, outfile)
outfile.close()
# Save scaled data
data_scaled_df.to_csv(normalized_data_filename, sep="\t", compression="xz")
def train_vae(config_filename, input_data_filename):
"""
Trains VAE model using parameters set in config file
Arguments
----------
config_filename: str
File containing user defined parameters
input_data_filename: str
File path corresponding to input dataset to use
"""
# Read in config variables
params = utils.read_config(config_filename)
# Load parameters
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../"))
dataset_name = params["dataset_name"]
learning_rate = params["learning_rate"]
batch_size = params["batch_size"]
epochs = params["epochs"]
kappa = params["kappa"]
intermediate_dim = params["intermediate_dim"]
latent_dim = params["latent_dim"]
epsilon_std = params["epsilon_std"]
train_architecture = params["NN_architecture"]
validation_frac = params["validation_frac"]
# Read data
normalized_data = pd.read_csv(input_data_filename, header=0, sep="\t", index_col=0)
print(
"input dataset contains {} samples and {} genes".format(
normalized_data.shape[0], normalized_data.shape[1]
)
)
# Train (VAE)
vae.tybalt_2layer_model(
learning_rate,
batch_size,
epochs,
kappa,
intermediate_dim,
latent_dim,
epsilon_std,
normalized_data,
base_dir,
dataset_name,
train_architecture,
validation_frac,
)
| 27.375 | 112 | 0.68928 |
42b27e1114addb6efa22983ea1b8536333e5b90e | 3,096 | py | Python | datar/forcats/misc.py | stjordanis/datar | 4e2b5db026ad35918954576badef9951928c0cb1 | [
"MIT"
] | 110 | 2021-03-09T04:10:40.000Z | 2022-03-13T10:28:20.000Z | datar/forcats/misc.py | sthagen/datar | 1218a549e2f0547c7b5a824ca6d9adf1bf96ba46 | [
"MIT"
] | 54 | 2021-06-20T18:53:44.000Z | 2022-03-29T22:13:07.000Z | datar/forcats/misc.py | sthagen/datar | 1218a549e2f0547c7b5a824ca6d9adf1bf96ba46 | [
"MIT"
] | 11 | 2021-06-18T03:03:14.000Z | 2022-02-25T11:48:26.000Z | """Provides other helper functions for factors"""
from typing import Any, Iterable
import numpy
from pandas import Categorical, DataFrame
from pipda import register_verb
from pipda.utils import CallingEnvs
from ..core.types import ForcatsRegType, ForcatsType, is_null, is_scalar
from ..core.utils import Array
from ..core.contexts import Context
from ..core.defaults import f
from ..base import (
factor,
tabulate,
prop_table,
nlevels,
levels,
NA,
setdiff,
is_ordered,
)
from ..dplyr import arrange, desc, mutate
from .utils import check_factor
from .lvl_order import fct_inorder
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_count(_f: ForcatsType, sort: bool = False, prop=False) -> Categorical:
"""Count entries in a factor
Args:
_f: A factor
sort: If True, sort the result so that the most common values float to
the top
prop: If True, compute the fraction of marginal table.
Returns:
A data frame with columns `f`, `n` and `p`, if prop is True
"""
f2 = check_factor(_f)
n_na = sum(is_null(f2))
df = DataFrame(
{
"f": fct_inorder(
levels(f2, __calling_env=CallingEnvs.REGULAR),
__calling_env=CallingEnvs.REGULAR,
),
"n": tabulate(
f2,
nlevels(f2, __calling_env=CallingEnvs.REGULAR),
__calling_env=CallingEnvs.REGULAR,
),
}
)
if n_na > 0:
df = df.append({"f": NA, "n": n_na}, ignore_index=True)
if sort:
df = arrange(
df,
desc(f.n, __calling_env=CallingEnvs.PIPING),
__calling_env=CallingEnvs.REGULAR,
)
if prop:
df = mutate(
df,
p=prop_table(f.n, __calling_env=CallingEnvs.PIPING),
__calling_env=CallingEnvs.REGULAR,
)
return df
@register_verb(ForcatsRegType, context=Context.EVAL)
def fct_match(_f: ForcatsType, lvls: Any) -> Iterable[bool]:
"""Test for presence of levels in a factor
Do any of `lvls` occur in `_f`?
Args:
_f: A factor
lvls: A vector specifying levels to look for.
Returns:
A logical factor
"""
_f = check_factor(_f)
if is_scalar(lvls):
lvls = [lvls]
bad_lvls = setdiff(
lvls,
levels(_f, __calling_env=CallingEnvs.REGULAR),
__calling_env=CallingEnvs.REGULAR,
)
if len(bad_lvls) > 0:
bad_lvls = Array(bad_lvls)[~is_null(bad_lvls)]
if len(bad_lvls) > 0:
raise ValueError(f"Levels not present in factor: {bad_lvls}.")
return numpy.isin(_f, lvls)
@register_verb(ForcatsRegType)
def fct_unique(_f: ForcatsType) -> Categorical:
"""Unique values of a factor
Args:
_f: A factor
Returns:
The factor with the unique values in `_f`
"""
lvls = levels(_f, __calling_env=CallingEnvs.REGULAR)
is_ord = is_ordered(_f, __calling_env=CallingEnvs.REGULAR)
return factor(lvls, lvls, exclude=None, ordered=is_ord)
| 25.377049 | 78 | 0.622739 |
42b41a2f0adeb62c8c1ac936436eafb38cf4632a | 71 | py | Python | gnn_fw/__init__.py | krzysztoffiok/gnn-classification-pipeline | b0bfb209bcf8856e68d2714b04bada9104e3c75b | [
"MIT"
] | 1 | 2021-12-14T20:14:58.000Z | 2021-12-14T20:14:58.000Z | gnn_fw/__init__.py | krzysztoffiok/gnn-classification-pipeline | b0bfb209bcf8856e68d2714b04bada9104e3c75b | [
"MIT"
] | null | null | null | gnn_fw/__init__.py | krzysztoffiok/gnn-classification-pipeline | b0bfb209bcf8856e68d2714b04bada9104e3c75b | [
"MIT"
] | null | null | null | from . import utils
from . import models
__all__ = ['utils', 'models']
| 17.75 | 29 | 0.690141 |
42b603082633608e2a31d1e0d368cdcfc8b30d98 | 6,585 | py | Python | qucumber/utils/training_statistics.py | silky/QuCumber | f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-06-27T11:26:29.000Z | 2019-06-27T11:26:29.000Z | qucumber/utils/training_statistics.py | silky/QuCumber | f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | qucumber/utils/training_statistics.py | silky/QuCumber | f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2018 PIQuIL - All Rights Reserved
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import torch
import qucumber.utils.cplx as cplx
import qucumber.utils.unitaries as unitaries
def fidelity(nn_state, target_psi, space, **kwargs):
r"""Calculates the square of the overlap (fidelity) between the reconstructed
wavefunction and the true wavefunction (both in the computational basis).
:param nn_state: The neural network state (i.e. complex wavefunction or
positive wavefunction).
:type nn_state: WaveFunction
:param target_psi: The true wavefunction of the system.
:type target_psi: torch.Tensor
:param space: The hilbert space of the system.
:type space: torch.Tensor
:param \**kwargs: Extra keyword arguments that may be passed. Will be ignored.
:returns: The fidelity.
:rtype: torch.Tensor
"""
Z = nn_state.compute_normalization(space)
F = torch.tensor([0.0, 0.0], dtype=torch.double, device=nn_state.device)
target_psi = target_psi.to(nn_state.device)
for i in range(len(space)):
psi = nn_state.psi(space[i]) / Z.sqrt()
F[0] += target_psi[0, i] * psi[0] + target_psi[1, i] * psi[1]
F[1] += target_psi[0, i] * psi[1] - target_psi[1, i] * psi[0]
return cplx.norm_sqr(F)
def rotate_psi(nn_state, basis, space, unitaries, psi=None):
r"""A function that rotates the reconstructed wavefunction to a different
basis.
:param nn_state: The neural network state (i.e. complex wavefunction or
positive wavefunction).
:type nn_state: WaveFunction
:param basis: The basis to rotate the wavefunction to.
:type basis: str
:param space: The hilbert space of the system.
:type space: torch.Tensor
:param unitaries: A dictionary of (2x2) unitary operators.
:type unitaries: dict
:param psi: A wavefunction that the user can input to override the neural
network state's wavefunction.
:type psi: torch.Tensor
:returns: A wavefunction in a new basis.
:rtype: torch.Tensor
"""
N = nn_state.num_visible
v = torch.zeros(N, dtype=torch.double, device=nn_state.device)
psi_r = torch.zeros(2, 1 << N, dtype=torch.double, device=nn_state.device)
for x in range(1 << N):
Upsi = torch.zeros(2, dtype=torch.double, device=nn_state.device)
num_nontrivial_U = 0
nontrivial_sites = []
for jj in range(N):
if basis[jj] != "Z":
num_nontrivial_U += 1
nontrivial_sites.append(jj)
sub_state = nn_state.generate_hilbert_space(num_nontrivial_U)
for xp in range(1 << num_nontrivial_U):
cnt = 0
for j in range(N):
if basis[j] != "Z":
v[j] = sub_state[xp][cnt]
cnt += 1
else:
v[j] = space[x, j]
U = torch.tensor([1.0, 0.0], dtype=torch.double, device=nn_state.device)
for ii in range(num_nontrivial_U):
tmp = unitaries[basis[nontrivial_sites[ii]]]
tmp = tmp[
:, int(space[x][nontrivial_sites[ii]]), int(v[nontrivial_sites[ii]])
].to(nn_state.device)
U = cplx.scalar_mult(U, tmp)
if psi is None:
Upsi += cplx.scalar_mult(U, nn_state.psi(v))
else:
index = 0
for k in range(len(v)):
index = (index << 1) | int(v[k].item())
Upsi += cplx.scalar_mult(U, psi[:, index])
psi_r[:, x] = Upsi
return psi_r
def KL(nn_state, target_psi, space, bases=None, **kwargs):
r"""A function for calculating the total KL divergence.
:param nn_state: The neural network state (i.e. complex wavefunction or
positive wavefunction).
:type nn_state: WaveFunction
:param target_psi: The true wavefunction of the system.
:type target_psi: torch.Tensor
:param space: The hilbert space of the system.
:type space: torch.Tensor
:param bases: An array of unique bases.
:type bases: np.array(dtype=str)
:param \**kwargs: Extra keyword arguments that may be passed. Will be ignored.
:returns: The KL divergence.
:rtype: torch.Tensor
"""
psi_r = torch.zeros(
2, 1 << nn_state.num_visible, dtype=torch.double, device=nn_state.device
)
KL = 0.0
unitary_dict = unitaries.create_dict()
target_psi = target_psi.to(nn_state.device)
Z = nn_state.compute_normalization(space)
eps = 0.000001
if bases is None:
num_bases = 1
for i in range(len(space)):
KL += (
cplx.norm_sqr(target_psi[:, i])
* (cplx.norm_sqr(target_psi[:, i]) + eps).log()
)
KL -= (
cplx.norm_sqr(target_psi[:, i])
* (cplx.norm_sqr(nn_state.psi(space[i])) + eps).log()
)
KL += cplx.norm_sqr(target_psi[:, i]) * Z.log()
else:
num_bases = len(bases)
for b in range(1, len(bases)):
psi_r = rotate_psi(nn_state, bases[b], space, unitary_dict)
target_psi_r = rotate_psi(
nn_state, bases[b], space, unitary_dict, target_psi
)
for ii in range(len(space)):
if cplx.norm_sqr(target_psi_r[:, ii]) > 0.0:
KL += (
cplx.norm_sqr(target_psi_r[:, ii])
* cplx.norm_sqr(target_psi_r[:, ii]).log()
)
KL -= (
cplx.norm_sqr(target_psi_r[:, ii])
* cplx.norm_sqr(psi_r[:, ii]).log().item()
)
KL += cplx.norm_sqr(target_psi_r[:, ii]) * Z.log()
return KL / float(num_bases)
| 39.431138 | 88 | 0.603037 |
42b6226b2537a6c2f5c9d9708d383f584b883497 | 214 | py | Python | tests/test0.py | Rfys/ConcreteStructures | a6cf18dc4697643bc6acffe6d228c4f9fc55f127 | [
"MIT"
] | 1 | 2021-08-20T19:28:11.000Z | 2021-08-20T19:28:11.000Z | tests/test0.py | arifyunando/ConcreteStructures | a6cf18dc4697643bc6acffe6d228c4f9fc55f127 | [
"MIT"
] | null | null | null | tests/test0.py | arifyunando/ConcreteStructures | a6cf18dc4697643bc6acffe6d228c4f9fc55f127 | [
"MIT"
] | null | null | null | cond = True
while cond:
x = int(input("Input X Value :"))
y = 10 if (x > 10) else 5 if (x < 5) else x
print("{} is the Y value".format(y))
cond = True if input("continue? y/n :") == "y" else False
| 26.75 | 61 | 0.546729 |
42b7e07ad45d9d0be2cad9161c36276cb3b1762f | 1,433 | py | Python | 14.py | niharikasingh/aoc2018 | 21d430d393321e6066eca22d7c6b49e5eb42d756 | [
"MIT"
] | null | null | null | 14.py | niharikasingh/aoc2018 | 21d430d393321e6066eca22d7c6b49e5eb42d756 | [
"MIT"
] | null | null | null | 14.py | niharikasingh/aoc2018 | 21d430d393321e6066eca22d7c6b49e5eb42d756 | [
"MIT"
] | null | null | null | import copy
def next10(i):
# start condition
board = [3, 7]
elves = [0, 1]
found = False
# while (len(board) < i + 10):
while (not found):
to_add = board[elves[0]] + board[elves[1]]
if (to_add < 10):
board.append(to_add)
if (board[-1*len(i):] == i):
found = len(board[:-1*len(i)])
else:
board.append(1)
board.append(to_add%10)
if (board[-1*len(i):] == i):
found = len(board[:-1*len(i)])
elif (board[-1*len(i)-1:-1] == i):
found = len(board[:-1*len(i)-1])
elves[0] = (elves[0] + board[elves[0]] + 1) % len(board)
elves[1] = (elves[1] + board[elves[1]] + 1) % len(board)
# print board
# to_print = copy.deepcopy(board)
# to_print[elves[0]] = "(" + str(to_print[elves[0]]) + ")"
# to_print[elves[1]] = "[" + str(to_print[elves[1]]) + "]"
# print(to_print)
# print(board[i:i+10])
# return board[i:i+10]
print(found)
return found
# assert next10(5) == [0,1,2,4,5,1,5,8,9,1]
# assert next10(9) == [5,1,5,8,9,1,6,7,7,9]
# assert next10(18) == [9,2,5,1,0,7,1,0,8,5]
# assert next10(2018) == [5,9,4,1,4,2,9,8,8,2]
# print(next10(760221))
assert next10([0,1,2,4,5]) == 5
assert next10([5,1,5,8,9]) == 9
assert next10([9,2,5,1,0]) == 18
assert next10([5,9,4,1,4]) == 2018
print(next10([7,6,0,2,2,1]))
| 31.844444 | 66 | 0.491975 |
42b83fe05de3f7690454c9ae7844d6d5c0896fb0 | 892 | py | Python | rpc inv matriz/ServerRPC.py | Aldair47x/DISTRIBUIDOS-UTP | 182f143b3a5d73744f78eb4fe1428cbca22387c2 | [
"MIT"
] | null | null | null | rpc inv matriz/ServerRPC.py | Aldair47x/DISTRIBUIDOS-UTP | 182f143b3a5d73744f78eb4fe1428cbca22387c2 | [
"MIT"
] | null | null | null | rpc inv matriz/ServerRPC.py | Aldair47x/DISTRIBUIDOS-UTP | 182f143b3a5d73744f78eb4fe1428cbca22387c2 | [
"MIT"
] | null | null | null | import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import numpy as np
from io import StringIO
from numpy.linalg import inv
from scipy.linalg import *
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Create server
server = SimpleXMLRPCServer(("localhost", 9000),
requestHandler=RequestHandler)
server.register_introspection_functions()
s = xmlrpclib.ServerProxy('http://localhost:9000')
def operacion(name):
matriz = []
print ("Franquito")
archivo = open(name)
for linea in archivo:
matriz.append(linea.strip().split())
archivo.close()
matrizInv=inv(matriz)
return str(matrizInv)
server.register_function(operacion, 'operacion')
# Run the server's main loop
server.serve_forever() | 27.030303 | 59 | 0.734305 |
42b8718fafc7a5efe59718792e559a9ba4afb7ac | 38 | py | Python | jetbrains-academy/Zookeeper/Problems/Print an integer/task.py | robinpatra/ML-Study-3 | 6f401706a8da4cac5e63304ce09ff6ff62756d0b | [
"MIT"
] | null | null | null | jetbrains-academy/Zookeeper/Problems/Print an integer/task.py | robinpatra/ML-Study-3 | 6f401706a8da4cac5e63304ce09ff6ff62756d0b | [
"MIT"
] | null | null | null | jetbrains-academy/Zookeeper/Problems/Print an integer/task.py | robinpatra/ML-Study-3 | 6f401706a8da4cac5e63304ce09ff6ff62756d0b | [
"MIT"
] | null | null | null | # put your python code here
print(10)
| 12.666667 | 27 | 0.736842 |
42ba676a4b1855f63fba242958ff64fc7b10d468 | 1,524 | py | Python | damq/api/management/commands/check_settings.py | zhanghui9700/clouddam | 18c7c7578fb727bcab50737b51b8fb5c09070b48 | [
"Apache-2.0"
] | null | null | null | damq/api/management/commands/check_settings.py | zhanghui9700/clouddam | 18c7c7578fb727bcab50737b51b8fb5c09070b48 | [
"Apache-2.0"
] | null | null | null | damq/api/management/commands/check_settings.py | zhanghui9700/clouddam | 18c7c7578fb727bcab50737b51b8fb5c09070b48 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
from smtplib import SMTPException
from django.conf import settings
from django.core.management import BaseCommand
from django.core.mail import send_mail
class Command(BaseCommand):
def _log(self, tag, result):
label = self.style.ERROR("XXX")
if result:
label = self.style.MIGRATE_SUCCESS(":-)")
self.stdout.write("{:<30}{:<5}".format(tag, label))
def _check_mail(self):
if len(settings.ADMINS) < 1:
self._log("CHECK_MAIL No Admin", False)
return
try:
title = "%sCheck Settings" % settings.EMAIL_SUBJECT_PREFIX
msg = "This message used for checking email settings."
result = send_mail(title, msg,
settings.EMAIL_FROM, [settings.ADMINS[0]])
except SMTPException as e:
result = False
raise e
self._log("CHECK_MAIL", result)
def _check_rpc_send(self):
try:
from rpc import notify
msg = "{'test': 'This message used for checking email settings.'}"
notify(msg, routing="transResponse")
except Exception as e:
raise e
self._log("CHECK_RPC_SEND", True)
def handle(self, *args, **kwargs):
self.stdout.write(self.style.WARNING("************CHECK START************"))
self._check_mail()
self._check_rpc_send()
self.stdout.write(self.style.WARNING("************CHECK END*************"))
| 30.48 | 84 | 0.57874 |
42ba9ea7e400e5ef293ccdc589dfbbce586a2405 | 4,113 | py | Python | sidomo/sidomo.py | noajshu/sdpm | b70825d9017eb0c2c6b6389345cccbcbd52cf669 | [
"Unlicense"
] | 358 | 2016-02-24T01:36:55.000Z | 2022-02-20T00:10:22.000Z | sidomo/sidomo.py | noajshu/sdpm | b70825d9017eb0c2c6b6389345cccbcbd52cf669 | [
"Unlicense"
] | 5 | 2016-02-24T22:50:25.000Z | 2017-01-30T07:58:00.000Z | sidomo/sidomo.py | noajshu/sdpm | b70825d9017eb0c2c6b6389345cccbcbd52cf669 | [
"Unlicense"
] | 27 | 2016-02-24T13:40:22.000Z | 2021-06-30T12:04:41.000Z | """Manages the lifecycle of a docker container.
Use via the with statement:
with Container(some_image) as c:
for line in c.run("some_command"):
print line
"""
import docker
import click, os
# sets the docker host from your environment variables
client = docker.Client(
**docker.utils.kwargs_from_env(assert_hostname=False))
class Container:
"""
Represents a single docker container on the host.
Volumes should be a list of mapped paths, e.g. ['/var/log/docker:/var/log/docker'].
"""
def __init__(self, image, memory_limit_gb=4, stderr=True, stdout=True, volumes=[], cleanup=False, environment=[]):
self.image = image
self.memory_limit_bytes = int(memory_limit_gb * 1e9)
self.stderr = stderr
self.stdout = stdout
self.volumes = [x[1] for x in map(lambda vol: vol.split(':'), volumes)]
self.binds = volumes
self.cleanup = cleanup
self.environment = environment
def __enter__(self):
"""Power on."""
self.container_id = client.create_container(
image=self.image,
volumes=self.volumes,
host_config=client.create_host_config(
mem_limit=self.memory_limit_bytes,
binds=self.binds,
),
environment=self.environment,
stdin_open=True
)['Id']
client.start(self.container_id)
return self
def __exit__(self, type, value, traceback):
"""Power off."""
client.stop(self.container_id)
if self.cleanup:
client.remove_container(self.container_id)
def run(self, command):
"""Just like 'docker run CMD'.
This is a generator that yields lines of container output.
"""
exec_id = client.exec_create(
container=self.container_id,
cmd=command,
stdout=self.stdout,
stderr=self.stderr
)['Id']
for line in client.exec_start(exec_id, stream=True):
yield line
@click.command()
@click.argument('do', nargs=-1)
@click.option('--image', '-i', help='Image name in which to run do', default=None)
@click.option('--sharedir', '-s', help='Directory on host machine to mount to docker.', default=os.path.abspath(os.getcwd()))
@click.option('--display', '-d', help='Display variable to set for X11 forwarding.', default=None)
def dodo(do, image, sharedir, display):
""" dodo (like sudo but for docker) runs argument in a docker image.
do is the command to run in the image.
image taken from (1) command-line, (2) "DODOIMAGE" environment variable, or (3) first built image.
sharedir (e.g., to pass data to command) is mounted (default: current directory). empty string does no mounting.
display is environment variable to set in docker image that allows X11 forwarding.
"""
# try to set image three ways
if not image:
if 'DODOIMAGE' in os.environ:
image = os.environ['DODOIMAGE']
else:
ims = client.images()
if len(ims) >= 1:
image = [im['RepoTags'][0] for im in client.images()][0]
assert image, 'No image given or found locally.'
# get image if not available locally
imnames = [im['RepoTags'][0] for im in client.images()]
if (not any([image in imname for imname in imnames])) and client.search(image):
print('Image {} not found locally. Pulling from docker hub.'.format(image))
client.pull(image)
# mount directory in docker
if sharedir:
volumes = ['{}:/home'.format(sharedir)]
else:
volumes = []
# set docker environment to display X11 locally
if display:
environment = ['DISPLAY={}'.format(display)]
elif 'DODODISPLAY' in os.environ:
environment = ['DISPLAY={}'.format(os.environ['DODODISPLAY'])]
else:
environment = []
with Container(image, volumes=volumes, cleanup=True, environment=environment) as c:
for output_line in c.run(do):
print('{}:\t {}'.format(image, output_line.decode('utf-8')))
| 33.713115 | 125 | 0.623389 |
42bb4531b3deb62a4952ce2f40bb5fa396ce9810 | 4,321 | py | Python | scripts/utils/prepare_data.py | Harshs27/mGLAD | f85d5a7cb2091a4528c762dc550d8c9b35d190b1 | [
"MIT"
] | null | null | null | scripts/utils/prepare_data.py | Harshs27/mGLAD | f85d5a7cb2091a4528c762dc550d8c9b35d190b1 | [
"MIT"
] | null | null | null | scripts/utils/prepare_data.py | Harshs27/mGLAD | f85d5a7cb2091a4528c762dc550d8c9b35d190b1 | [
"MIT"
] | null | null | null | import networkx as nx
import numpy as np
from sklearn import covariance
import torch
def convertToTorch(data, req_grad=False, use_cuda=False):
"""Convert data from numpy to torch variable, if the req_grad
flag is on then the gradient calculation is turned on.
"""
if not torch.is_tensor(data):
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
data = torch.from_numpy(data.astype(np.float, copy=False)).type(dtype)
data.requires_grad = req_grad
return data
def eigVal_conditionNum(A):
"""Calculates the eigenvalues and the condition
number of the input matrix A
condition number = max(|eig|)/min(|eig|)
"""
eig = [v.real for v in np.linalg.eigvals(A)]
condition_number = max(np.abs(eig)) / min(np.abs(eig))
return eig, condition_number
def getCovariance(Xb, offset = 0.1):
"""Calculate the batch covariance matrix
Args:
Xb (3D np array): The input sample matrices (B x M x D)
offset (float): The eigenvalue offset in case of bad
condition number
Returns:
Sb (3D np array): Covariance matrices (B x D x D)
"""
Sb = []
for X in Xb:
S = covariance.empirical_covariance(X, assume_centered=False)
# calculate the eigenvalue of the covariance S
eig, con = eigVal_conditionNum(S)
if min(eig)<=1e-3:
# adjust the eigenvalue
print(f'Adjust the eval: min {min(eig)}, con {con}')
S += np.eye(S.shape[-1]) * (offset-min(eig))
Sb.append(S)
return np.array(Sb)
def generateRandomGraph(num_nodes, sparsity, seed=None):
"""Generate a random erdos-renyi graph with a given
sparsity.
Args:
num_nodes (int): The number of nodes in the DAG
sparsity (float): = #edges-present/#total-edges
seed (int, optional): set the numpy random seed
Returns:
edge_connections (2D np array (float)): Adj matrix
"""
if seed: np.random.seed(seed)
G = nx.generators.random_graphs.gnp_random_graph(
num_nodes,
sparsity,
seed=seed,
directed=False
)
edge_connections = nx.adjacency_matrix(G).todense()
return edge_connections
def simulateGaussianSamples(
num_nodes,
edge_connections,
num_samples,
seed=None,
u=0.1,
w_min=0.5,
w_max=1.0,
):
"""Simulating num_samples from a Gaussian distribution. The
precision matrix of the Gaussian is determined using the
edge_connections
Args:
num_nodes (int): The number of nodes in the DAG
edge_connections (2D np array (float)): Adj matrix
num_sample (int): The number of samples
seed (int, optional): set the numpy random seed
u (float): Min eigenvalue offset for the precision matrix
w_min (float): Precision matrix entries ~Unif[w_min, w_max]
w_max (float): Precision matrix entries ~Unif[w_min, w_max]
Returns:
X (2D np array (float)): num_samples x num_nodes
precision_mat (2D np array (float)): num_nodes x num_nodes
"""
# zero mean of Gaussian distribution
mean_value = 0
mean_normal = np.ones(num_nodes) * mean_value
# Setting the random seed
if seed: np.random.seed(seed)
# uniform entry matrix [w_min, w_max]
U = np.matrix(np.random.random((num_nodes, num_nodes))
* (w_max - w_min) + w_min)
theta = np.multiply(edge_connections, U)
# making it symmetric
theta = (theta + theta.T)/2 + np.eye(num_nodes)
smallest_eigval = np.min(np.linalg.eigvals(theta))
# Just in case : to avoid numerical error in case an
# epsilon complex component present
smallest_eigval = smallest_eigval.real
# making the min eigenvalue as u
precision_mat = theta + np.eye(num_nodes)*(u - smallest_eigval)
# print(f'Smallest eval: {np.min(np.linalg.eigvals(precision_mat))}')
# getting the covariance matrix (avoid the use of pinv)
cov = np.linalg.inv(precision_mat)
# get the samples
if seed: np.random.seed(seed)
# Sampling data from multivariate normal distribution
data = np.random.multivariate_normal(
mean=mean_normal,
cov=cov,
size=num_samples
)
return data, precision_mat # MxD, DxD | 33.757813 | 78 | 0.649618 |
42bc62f46cb6d0412a2527cc37f497de098a673f | 1,475 | py | Python | Exercicios/multplica_matriz.py | eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_2_Coursera | b5b9198e16b4b67894b85766eb521ae96010accf | [
"MIT"
] | 1 | 2020-08-28T20:29:23.000Z | 2020-08-28T20:29:23.000Z | Exercicios/multplica_matriz.py | eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_2_Coursera | b5b9198e16b4b67894b85766eb521ae96010accf | [
"MIT"
] | null | null | null | Exercicios/multplica_matriz.py | eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_2_Coursera | b5b9198e16b4b67894b85766eb521ae96010accf | [
"MIT"
] | null | null | null | def multiplica_matrizes(m1, m2):
'''Minha solução para multiplicação de matrizes'''
matriz = []
cont = 0
b1 = 0
for t in range(len(m1)): # números de linhas mat1
linhanova = []
for t1 in range(len(m2[0])): #números de colunas mat2
while cont < len(m2):
#a1 = m1[t][cont] * m2[cont][t1]
#b1 = b1 + a1
b1 = b1 + m1[t][cont] * m2[cont][t1] # refatorado
cont += 1
linhanova.append(b1)
cont = b1 = 0
matriz.append(linhanova)
return matriz
def mat_mul (A, B):
num_linhas_A, num_colunas_A = len(A), len(A[0])
num_linhas_B, num_colunas_B = len(B), len(B[0])
assert num_colunas_A == num_linhas_B
C = []
for linha in range(num_linhas_A):
C.append([])
for coluna in range(num_colunas_B):
C[linha].append(0)
for k in range(num_colunas_A):
C[linha][coluna] += A[linha][k] * B[k][coluna]
return C
# mat1 = [[2,3,1], [-1, 0, 2]]
# mat2 = [[1, -2], [0, 5],[4, 1]]
# mat1 = [[5, 8, -4], [6, 9, -5],[4, 7, -2]]
# mat2 = [[2], [-3], [1]]
# mat1 = [[2,5,9], [3, 6, 8]]
mat1 = [[1, 2, 3], [4, 5, 6]]
mat2 = [[1, 2],[3, 4],[5, 6]]
# mat2 = [[2,7],[4,3],[5,2]]
#https://brasilescola.uol.com.br/matematica/multiplicacao-matrizes.htm
#
print(multiplica_matrizes(mat1, mat2))
# print(mat_mul (mat1, mat2)) | 28.921569 | 70 | 0.492203 |
42be5593e5ad34672a3cc8c35480ab8277503b75 | 366 | py | Python | zoo_checks/migrations/0016_auto_20190609_2337.py | falkben/zoo-checks | b1d6cf980088664b1feded4a1e08a82c6f358c33 | [
"Apache-2.0"
] | null | null | null | zoo_checks/migrations/0016_auto_20190609_2337.py | falkben/zoo-checks | b1d6cf980088664b1feded4a1e08a82c6f358c33 | [
"Apache-2.0"
] | 3 | 2019-06-01T14:56:19.000Z | 2019-06-03T13:53:27.000Z | zoo_checks/migrations/0016_auto_20190609_2337.py | falkben/zoo-checks | b1d6cf980088664b1feded4a1e08a82c6f358c33 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.2 on 2019-06-10 03:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zoo_checks', '0015_auto_20190609_2303'),
]
operations = [
migrations.RenameField(
model_name='enclosure',
old_name='user',
new_name='users',
),
]
| 19.263158 | 50 | 0.587432 |
42c012688f03cf2033f2ea77e4e8d937fb973de4 | 996 | py | Python | bifacialvf/tests/test_vf.py | shirubana/bifacialvf | 7cd1c4c658bb7a68f0815b2bd1a6d5c492ca7300 | [
"BSD-3-Clause"
] | 16 | 2018-01-17T06:03:23.000Z | 2021-11-08T18:54:20.000Z | bifacialvf/tests/test_vf.py | shirubana/bifacialvf | 7cd1c4c658bb7a68f0815b2bd1a6d5c492ca7300 | [
"BSD-3-Clause"
] | 36 | 2018-03-16T15:17:58.000Z | 2022-03-18T17:54:49.000Z | bifacialvf/tests/test_vf.py | shirubana/bifacialvf | 7cd1c4c658bb7a68f0815b2bd1a6d5c492ca7300 | [
"BSD-3-Clause"
] | 15 | 2018-01-11T09:11:13.000Z | 2022-03-21T11:37:42.000Z | """
Tests of the view factors module
"""
import pytest
import numpy as np
from bifacialvf.vf import getSkyConfigurationFactors
from bifacialvf.tests import (
SKY_BETA160_C05_D1, SKY_BETA20_C05_D1, SKY_BETA20_C0_D1, SKY_BETA160_C0_D1,
SKY_BETA160_C1_D1, SKY_BETA20_C1_D1, SKY_BETA20_C1_D0, SKY_BETA160_C1_D0,
SKY_BETA160_C05_D0, SKY_BETA20_C05_D0)
@pytest.mark.parametrize('beta, C, D, expected',
[(160, 0.5, 1, SKY_BETA160_C05_D1), (20, 0.5, 1, SKY_BETA20_C05_D1),
(20, 0, 1, SKY_BETA20_C0_D1), (160, 0, 1, SKY_BETA160_C0_D1),
(160, 1, 1, SKY_BETA160_C1_D1), (20, 1, 1, SKY_BETA20_C1_D1),
(20, 1, 0, SKY_BETA20_C1_D0), (160, 1, 0, SKY_BETA160_C1_D0),
(160, 0.5, 0, SKY_BETA160_C05_D0), (20, 0.5, 0, SKY_BETA20_C05_D0)])
def test_getSkyConfigurationFactors(beta, C, D, expected):
"""
Benchmark against to the master branch on 2018-08-20 at 91e785d.
"""
assert np.allclose(
getSkyConfigurationFactors("interior", beta, C, D), expected)
| 39.84 | 79 | 0.715863 |
42c0646e767e46f559cbd944cee5d0ed57e7deeb | 732 | py | Python | test_horovod.py | lu-wang-dl/test-horovod | 0b1699057fe03f84bbea46c3e63197a6c9e21c14 | [
"Apache-2.0"
] | null | null | null | test_horovod.py | lu-wang-dl/test-horovod | 0b1699057fe03f84bbea46c3e63197a6c9e21c14 | [
"Apache-2.0"
] | null | null | null | test_horovod.py | lu-wang-dl/test-horovod | 0b1699057fe03f84bbea46c3e63197a6c9e21c14 | [
"Apache-2.0"
] | null | null | null | # Databricks notebook source
import horovod.tensorflow.keras as hvd
def run_training_horovod():
# Horovod: initialize Horovod.
hvd.init()
import os
print(os.environ.get('PYTHONPATH'))
print(os.environ.get('PYTHONHOME'))
print(f"Rank is: {hvd.rank()}")
print(f"Size is: {hvd.size()}")
# COMMAND ----------
from sparkdl import HorovodRunner
hr = HorovodRunner(np=-spark.sparkContext.defaultParallelism, driver_log_verbosity="all")
hr.run(run_training_horovod)
# COMMAND ----------
from sparkdl import HorovodRunner
hr = HorovodRunner(np=spark.sparkContext.defaultParallelism, driver_log_verbosity="all")
hr.run(run_training_horovod) # manually stopping b/c it's just hanging
# COMMAND ----------
| 24.4 | 89 | 0.715847 |
42c257f8e741fb7454d04ca5e731e44ef13939f5 | 1,973 | py | Python | scripts/upload_certificates_to_ssm.py | ministryofjustice/bichard7-next-shared-infrastructure | 97ca93e00494e0e7df37dbf03dc6639a60fef956 | [
"MIT"
] | 2 | 2021-12-17T10:42:44.000Z | 2022-03-02T11:05:57.000Z | scripts/upload_certificates_to_ssm.py | ministryofjustice/bichard7-next-shared-infrastructure | 97ca93e00494e0e7df37dbf03dc6639a60fef956 | [
"MIT"
] | 4 | 2022-01-24T17:25:29.000Z | 2022-03-07T10:20:52.000Z | scripts/upload_certificates_to_ssm.py | ministryofjustice/bichard7-next-shared-infrastructure | 97ca93e00494e0e7df37dbf03dc6639a60fef956 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import boto3
import os
import base64
"""
Uploads pre-generated certificates to SSM for the ci to consume
example
WORKSPACE=dev-next ENVIRONMENT=sandbox aws-vault exec sandbox-shared -- make upload-certificates
"""
class UploadCertificates(object):
ssm_client = None
environment = None
workspace = None
def __init__(self):
self.ssm_client = boto3.client('ssm')
self.environment = os.getenv("ENVIRONMENT", "sandbox")
self.workspace = os.getenv("WORKSPACE", "dev-next")
def upload_file(self, file_name):
print("Uploading {} to /ci/certs/{}/{}".format(file_name, self.workspace, file_name))
file = open("terraform/shared_account_{}_infra/VpnCerts/{}/{}".format(
self.environment,
self.workspace,
file_name
))
contents = file.read()
contents = base64.b64encode(contents.encode())
file.close()
self.ssm_client.put_parameter(
Name="/ci/certs/{}/{}".format(self.workspace, file_name),
Value=contents.decode(),
Type="SecureString",
Overwrite=True,
Tier="Advanced"
)
def run(self):
self.__init__()
path = "terraform/shared_account_{}_infra/VpnCerts/{}".format(
self.environment,
self.workspace
)
if os.path.isdir(path):
self.upload_file(file_name="ca.crt")
self.upload_file(file_name="client1.domain.tld.crt")
self.upload_file(file_name="client1.domain.tld.key")
self.upload_file(file_name="server.crt")
self.upload_file(file_name="server.key")
else:
print(
"No certificates exist in {}, please see make generate-certificates to create them!".format(
path
)
)
if __name__ == "__main__":
uploader = UploadCertificates()
uploader.run()
| 29.893939 | 108 | 0.598074 |
42c29f19d671bb8c233ec4e951db0332a942d9aa | 1,709 | py | Python | OPManager.py | eduze/GenerativeDetector | 4aa85e1977abe0e400962dff12b3d0a26900bfd3 | [
"Apache-2.0"
] | 1 | 2018-05-11T03:12:11.000Z | 2018-05-11T03:12:11.000Z | OPManager.py | eduze/GenerativeDetector | 4aa85e1977abe0e400962dff12b3d0a26900bfd3 | [
"Apache-2.0"
] | null | null | null | OPManager.py | eduze/GenerativeDetector | 4aa85e1977abe0e400962dff12b3d0a26900bfd3 | [
"Apache-2.0"
] | null | null | null | import multiprocessing
from multiprocessing import Queue
from OpenPersonDetector import OpenPersonDetector
from newgen.GenerativeDetector import AbstractDetectorGenerator
class ManagedOPDetector:
def __init__(self, input_queue, output_queue):
self.input_queue = input_queue
self.output_queue = output_queue
def detectPersons(self, image, discardedGrayImage):
self.input_queue.put(image)
return self.output_queue.get()
class ManagedOPDetectorGenerator(AbstractDetectorGenerator):
def __init__(self, input_queue, output_queue):
super().__init__()
self.input_queue = input_queue
self.output_queue = output_queue
def generate_detector(self):
return ManagedOPDetector(self.input_queue, self.output_queue)
class ManagedOP:
def __init__(self):
self.queue_pairs = []
self.worker = None
def obtainGenerator(self):
input_queue = Queue()
output_queue = Queue()
self.queue_pairs.append((input_queue, output_queue))
return ManagedOPDetectorGenerator(input_queue, output_queue)
def _async_worker(self, queue_pairs):
person_detector = OpenPersonDetector(preview=False)
while True:
for input_queue, output_queue in queue_pairs:
if input_queue.qsize() > 0:
frame = input_queue.get()
person_detections = person_detector.detectPersons(frame, None)
output_queue.put(person_detections)
def startAsync(self):
self.worker = multiprocessing.Process(target=self._async_worker, args=(self.queue_pairs,))
self.worker.daemon = True
self.worker.start()
| 32.245283 | 98 | 0.693973 |
42c3250899086a2d423b9d8448bed7aa2e3d35b4 | 1,832 | py | Python | datasets.py | Liuhongzhi2018/Car_detection | f32fea9c348c691ccc30b9804a4f3fa32732bbae | [
"MIT"
] | 1 | 2022-03-05T04:20:46.000Z | 2022-03-05T04:20:46.000Z | datasets.py | Liuhongzhi2018/Car_detection | f32fea9c348c691ccc30b9804a4f3fa32732bbae | [
"MIT"
] | null | null | null | datasets.py | Liuhongzhi2018/Car_detection | f32fea9c348c691ccc30b9804a4f3fa32732bbae | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 12 10:11:09 2020
@author: NAT
"""
import torch
from torch.utils.data import Dataset
import json
import os
from PIL import Image
from utils import transform
class VOCDataset(Dataset):
def __init__(self, DataFolder, split):
"""
DataFolder: folder where data files are stored
split: split {"TRAIN", "TEST"}
"""
self.split = str(split.upper())
if self.split not in {"TRAIN", "TEST"}:
print("Param split not in {TRAIN, TEST}")
assert self.split in {"TRAIN", "TEST"}
self.DataFolder = DataFolder
#read data file from json file
with open(os.path.join(DataFolder, self.split+ '_images.json'), 'r') as j:
self.images = json.load(j)
with open(os.path.join(DataFolder, self.split+ '_objects.json'), 'r') as j:
self.objects = json.load(j)
assert len(self.images) == len(self.objects)
def __len__(self):
return len(self.images)
def __getitem__(self, i):
image = Image.open(self.images[i], mode= "r")
image = image.convert("RGB")
#Read objects in this image
objects = self.objects[i]
boxes = torch.FloatTensor(objects["boxes"])
labels = torch.LongTensor(objects['labels'])
difficulties = torch.ByteTensor(objects['difficulties'])
#Apply transforms
new_image, new_boxes, new_labels, new_difficulties = transform(image, boxes,
labels, difficulties, self.split)
return new_image, new_boxes, new_labels, new_difficulties
| 33.309091 | 105 | 0.543668 |
42c34955df6c0e7aa377ac0cc57e813a5826e6fa | 7,564 | py | Python | roles/gitlab_runner/filter_plugins/from_toml.py | wikimedia/operations-gitlab-ansible | f6433674ff812ea6e07ee192ff6fd848ba252aaa | [
"MIT"
] | 17 | 2019-03-08T15:33:46.000Z | 2021-11-02T18:22:47.000Z | roles/gitlab_runner/filter_plugins/from_toml.py | wikimedia/operations-gitlab-ansible | f6433674ff812ea6e07ee192ff6fd848ba252aaa | [
"MIT"
] | 8 | 2018-12-23T21:17:36.000Z | 2019-12-10T13:52:13.000Z | roles/gitlab_runner/filter_plugins/from_toml.py | wikimedia/operations-gitlab-ansible | f6433674ff812ea6e07ee192ff6fd848ba252aaa | [
"MIT"
] | 12 | 2019-01-26T15:00:32.000Z | 2022-03-15T08:04:17.000Z | #!/usr/bin/python
DOCUMENTATION = '''
---
module: to_toml, from_toml
version_added: "2.8"
short_description: Converts Python data to TOML and TOML to Python data.
author:
- "Samy Coenen (contact@samycoenen.be)"
'''
import datetime
import sys
from collections import OrderedDict
#pip3 install python-toml
def to_toml(data):
''' Convert the value to TOML '''
return dumps(data)
def from_toml(data):
''' Convert TOML to Python data '''
return loads(data)
class FilterModule(object):
''' Ansible TOML jinja2 filters '''
def filters(self):
return {
# toml
'to_toml': to_toml,
'from_toml': from_toml
}
def loads(s, *args, **kwargs):
if not isinstance(s, basestring):
raise TypeError("It's not a string.")
try:
s = s.decode('utf-8')
except AttributeError:
pass
s = _clear_r_n_t(s)
return _loads(s)
def load(file, *args, **kwargs):
return loads(_read(file, *args, **kwargs))
def dumps(s, *args, **kwargs):
if not isinstance(s, dict):
raise TypeError("It's not a dict.")
return un_utf_8(_json_transition_str(s))
def dump(file, s, *args, **kwargs):
_write(file, dumps(s))
def _clear_r_n_t(v):
return v.replace('\r', '').replace('\t', '').split('\n')
def _clear_empty_l_r(v):
return v.rstrip(' ').lstrip(' ')
def _clear_empty(v):
return v.replace(' ', '')
def _is_empty(v):
return v[0] if v else v
def _get_key(v):
key = _re('\[\[(.*?)\]\]', v)
if key:
return key, True
return _re('\[(.*?)\]', v), False
def _loads(s):
items, nd, it, fg = ordict(), ordict(), [], False
key_status = False
for v in s:
if not v or _is_empty(_clear_empty(v)) == '#':
continue
if '[' == _is_empty(_clear_empty(v)) and ']' in v:
key, key_status = _get_key(v)
nd = ordict()
else:
_it = v.split('=')
_it[0] = _clear_empty(_is_empty(_it))
"""
arr_arr = [
'zbc',
'sdf',
]
"""
try:
if '[' not in _it[0] and _it[0][-1] == ']':
it.append(_it[0])
fg = False
elif _it[1].replace(' ', '')[0] == '[' and ']' not in _it[1]:
it.append(_it[0])
fg = True
except Exception as e:
pass
if fg:
it.append(_it[1] if len(_it) > 1 else _it[0])
elif not fg and it:
_it = [it[0], ''.join(it[1:])]
it = []
nd.update(_str_transition_json(_it))
ite = items
try:
# [1][:-1] = []
for k in key[:-1]:
try:
ite = ite[k]
except Exception as e:
ite[k] = ordict()
ite = ite[k]
if isinstance(ite, list):
ite = ite[-1]
try:
ite[key[-1]]
if key_status:
ite[key[-1]].append(nd)
except Exception as e:
ite[key[-1]] = [nd] if key_status else nd
finally:
key_status = False
except Exception as e:
ite.update(nd)
pass
return items
def _str_transition_json(v):
item = ordict()
if not isinstance(v, (list, tuple)):
raise TypeError("It's not a list/tuple.")
if (len(v) == 2):
item[v[0]] = _str_transition_obj(_clear_empty_l_r(v[1]))
elif (len(v) > 2):
item[v[0]] = _str_transition_obj(_clear_empty_l_r('='.join(v[1:])))
return item
def _str_transition_obj(v):
if not isinstance(v, basestring):
raise TypeError("It's not a string")
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
try:
if _re('\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z', v):
return str_to_datetime(v)
except Exception as e:
raise e
try:
_veal = eval(v.replace(',', ', '))
if isinstance(_veal, basestring):
return escape(_veal)
return _veal
except SyntaxError as e:
pass
return v
def _json_transition_str(s, _k='', index=0):
_s = ''
for k, v in s.items():
_k = _k.rstrip('.') + '.' if _k else ''
if isinstance(v, dict):
_s += '\n' + '\t' * index + '[{}]\n'.format(_k + k)
_s += _json_transition_str(v, _k + k, index=index + 1)
elif isinstance(v, list) and isinstance(v[0], dict):
for _v in v:
_s += '\n' + '\t' * index + '[[{}]]\n'.format(_k + k)
_s += _json_transition_str(_v, _k + k, index=index + 1)
elif not isinstance(v, dict):
_s += '\t' * index + _key_equal_value(k, v)
else:
_s += '\n'
return _s
def _key_equal_value(k, v):
if isinstance(v, datetime.datetime):
v = datetime_to_str(v)
elif isinstance(v, bool):
v = str(v).lower()
elif not isinstance(v, basestring):
v = str(v)
else:
v = '"' + str(v) + '"'
return k + ' = ' + _utf_8(v) + '\n'
def _read(file, *args, **kwargs):
if PY3:
with open(file, encoding='utf-8', *args, **kwargs) as fp:
v = fp.read()
else:
with open(file, *args, **kwargs) as fp:
v = fp.read()
return v
def _write(file, text, model='w', *args, **kwargs):
if PY3:
with open(file, model, encoding='utf-8', *args, **kwargs) as fp:
fp.write(text)
else:
with open(file, model, *args, **kwargs) as fp:
fp.write(text)
def _re(reg, text):
reg = re.findall(re.compile(reg), text)
reg = reg[0].split('.') if reg else []
return reg
def escape(v):
if not isinstance(v, basestring):
return v
return v.replace(
'\\', '\\\\').replace(
'\b', '\\b').replace(
'\t', '\\t').replace(
'\f', '\\f').replace(
'\r', '\\r').replace(
'\"', '\\"').replace(
'\/', '\\/').replace(
'\n', '\\n')
def escape_u(v):
if not isinstance(v, basestring):
return v
# v = escape(v)
v = v.encode('unicode-escape').decode()
if PY2:
return v.replace('\\x', '\\u00')
return v
def unescape_u(v):
if not isinstance(v, basestring):
return v
v = unescape(v)
return v.encode().decode('unicode-escape')
def _utf_8(v):
if PY2:
return v.decode('utf-8')
return v
def un_utf_8(v):
if PY2:
return v.encode('utf-8')
return v
def str_to_datetime(dtstr, strftime='%Y-%m-%dT%H:%M:%SZ'):
if not isinstance(dtstr, basestring):
raise TypeError("It's not a string.")
return datetime.datetime.strptime(dtstr, strftime)
def datetime_to_str(dttime, strftime='%Y-%m-%dT%H:%M:%SZ'):
if not isinstance(dttime, datetime.datetime):
raise TypeError("It's not a datetime.")
return dttime.strftime(strftime)
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY35 = sys.version_info[:2] == (3, 5)
PY36 = sys.version_info[:2] == (3, 6)
if PY3:
basestring = str,
integer_types = int,
unicode = str
unichr = chr
_range = range
else:
integer_types = (int, long)
_range = xrange
def ordict():
return {} if PY36 else OrderedDict()
if __name__ == '__main__':
pass | 22.714715 | 77 | 0.504098 |
42c37f3f064078bde91e95903b77950bc9bd114f | 414 | py | Python | ABC190/D.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | ABC190/D.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | ABC190/D.py | shimomura314/AtcoderCodes | db1d62a7715f5f1b3c40eceff8d34f0f34839f41 | [
"MIT"
] | null | null | null | def divisor(n: int):
divisors = []
for integer in range(1, int(n**0.5)+1):
if not n % integer:
divisors.append(integer)
divisors.append(n//integer)
divisors.sort()
return divisors
n = int(input())
divisors = divisor(2*n)
answer = 0
for integer in divisors:
pair = 2*n // integer
a2 = pair + 1 - integer
if a2 % 2 == 0:
answer += 1
print(answer) | 21.789474 | 43 | 0.562802 |
42c55d5c799cf1af35cb63cb32b363a33a23a6ae | 862 | py | Python | TV/models/episode.py | viswas163/Parse-bot | 881df2767cc5bdf88ff5dcc451a97c2ed96fc073 | [
"MIT"
] | null | null | null | TV/models/episode.py | viswas163/Parse-bot | 881df2767cc5bdf88ff5dcc451a97c2ed96fc073 | [
"MIT"
] | null | null | null | TV/models/episode.py | viswas163/Parse-bot | 881df2767cc5bdf88ff5dcc451a97c2ed96fc073 | [
"MIT"
] | null | null | null | from mongoengine import Document, IntField, StringField, FloatField, connect
from pymongo import UpdateOne
class Episode(Document):
title = StringField(required=True)
show = StringField(required=True)
rating = FloatField(required=True)
votes = IntField(required=True)
def bulk_upsert(episodes):
bulk_operations = []
for entity in episodes:
try:
entity.validate()
filter = {
'title': entity.title,
'show': entity.show
}
bulk_operations.append(
UpdateOne(filter, {'$set': entity.to_mongo().to_dict()}, upsert=True)
)
except ValidationError:
pass
if bulk_operations:
with connect("tvdb") as c:
collection = Episode._get_collection().bulk_write(bulk_operations, ordered=False) | 29.724138 | 93 | 0.611369 |
42c5815338a4f22f9d03e6f124f41c7f94882e58 | 5,702 | py | Python | tests/__init__.py | open-contracting/pelican-backend | dee9afb48f7485f94544bcfbb977558d638098cd | [
"BSD-3-Clause"
] | 1 | 2021-07-21T15:23:22.000Z | 2021-07-21T15:23:22.000Z | tests/__init__.py | open-contracting/pelican-backend | dee9afb48f7485f94544bcfbb977558d638098cd | [
"BSD-3-Clause"
] | 40 | 2021-06-29T23:53:14.000Z | 2022-02-23T20:14:11.000Z | tests/__init__.py | open-contracting/pelican-backend | dee9afb48f7485f94544bcfbb977558d638098cd | [
"BSD-3-Clause"
] | null | null | null | import json
import os
from contextlib import AbstractContextManager
from tools import settings
def is_subset_dict(subset, superset):
return subset.items() <= superset.items()
def read(basename):
with open(os.path.join("tests", "fixtures", f"{basename}.json")) as f:
return json.load(f)
class override_settings(AbstractContextManager):
def __init__(self, **kwargs):
self.new = kwargs
self.old = {}
for key, value in self.new.items():
self.old[key] = getattr(settings, key)
def __enter__(self):
for key, value in self.new.items():
setattr(settings, key, value)
def __exit__(self, *args):
for key, value in self.old.items():
setattr(settings, key, value)
# I'm not sure if the below can be accomplished with pytest, so using unittest with pytest-subtests. At first glance,
# we would need to auto-generate parametrized `test_` functions, with the module as a bound variable.
class FieldCoverageTests:
def test_passing(self):
# Ensure the child class is configured.
assert self.passing
for item in self.passing:
with self.subTest(item=item):
result = self.module.calculate(item, "key")
self.assertEqual(
result,
{
"name": self.module.name,
"result": True,
"value": None,
"reason": None,
"version": 1.0,
},
)
def test_failing(self):
# Ensure the child class is configured.
assert self.failing
for params in self.failing:
item = params[0]
reason = params[1]
if len(params) > 2:
return_value = params[2]
else:
return_value = None
with self.subTest(item=item):
result = self.module.calculate(item, "key")
self.assertEqual(
result,
{
"name": self.module.name,
"result": False,
"value": return_value,
"reason": reason,
"version": 1.0,
},
)
class FieldQualityTests:
passing_kwargs = {}
failing_kwargs = {}
method = "calculate"
def setUp(self):
self.method = getattr(self.module, self.method)
def test_passing(self):
for value in self.passing:
with self.subTest(value=value):
result = self.method({"xxx": value}, "xxx", **self.passing_kwargs)
self.assertEqual(
result,
{
"name": self.module.name,
"result": True,
"value": None,
"reason": None,
"version": 1.0,
},
)
def test_failing(self):
for params in self.failing:
value = params[0]
reason = params[1]
if len(params) > 2:
return_value = params[2]
else:
return_value = value
with self.subTest(value=value):
result = self.method({"xxx": value}, "xxx", **self.failing_kwargs)
self.assertEqual(
result,
{
"name": self.module.name,
"result": False,
"value": return_value,
"reason": reason,
"version": 1.0,
},
)
class CompiledReleaseTests:
maxDiff = None
passing_kwargs = {}
failing_kwargs = {}
method = "calculate"
def setUp(self):
self.method = getattr(self.module, self.method)
def test_skipping(self):
skipping = self.skipping
skipping.append(({}, skipping[0][1]))
for item, reason in skipping:
with self.subTest(item=item):
result = self.method(item, **self.passing_kwargs)
self.assertEqual(
result,
{
"result": None,
"meta": {"reason": reason},
"application_count": None,
"pass_count": None,
"version": 1.0,
},
)
def test_passing(self):
for item, meta, count in self.passing:
with self.subTest(item=item):
result = self.method(item, **self.passing_kwargs)
self.assertEqual(
result,
{
"result": True,
"meta": meta,
"application_count": count,
"pass_count": count,
"version": 1.0,
},
)
def test_failing(self):
for item, meta, application_count, pass_count in self.failing:
with self.subTest(item=item):
result = self.method(item, **self.failing_kwargs)
self.assertEqual(
result,
{
"result": False,
"meta": meta,
"application_count": application_count,
"pass_count": pass_count,
"version": 1.0,
},
)
| 29.853403 | 117 | 0.452999 |
42c7d640438be57526485791184f5fce55d31e34 | 276 | py | Python | setup.py | pystatgen/sgk | f39e1b1bc3b16d05c5043ab5d445076424dad229 | [
"Apache-2.0"
] | 74 | 2020-06-16T18:08:24.000Z | 2022-02-10T06:42:30.000Z | setup.py | pystatgen/sgk | f39e1b1bc3b16d05c5043ab5d445076424dad229 | [
"Apache-2.0"
] | 677 | 2020-06-18T15:57:33.000Z | 2022-03-31T16:20:50.000Z | setup.py | pystatgen/sgk | f39e1b1bc3b16d05c5043ab5d445076424dad229 | [
"Apache-2.0"
] | 20 | 2020-06-22T13:40:10.000Z | 2022-03-05T03:33:13.000Z | #!/usr/bin/env python
from setuptools import setup
setup(
# The package name along with all the other metadata is specified in setup.cfg
# However, GitHub's dependency graph can't see the package unless we put this here.
name="sgkit",
use_scm_version=True,
)
| 27.6 | 87 | 0.728261 |
42c7fd5ab2f8f92a26fe730d8bb3070bbc988e18 | 238 | py | Python | Python/5. Math/exercise2.py | mukeshmithrakumar/HackerRankSolutions | cd9e71be5e8703287b9f4efc042df8827175af1b | [
"MIT"
] | 12 | 2019-10-29T09:49:26.000Z | 2022-02-21T09:43:41.000Z | Python/5. Math/exercise2.py | ozturkosu/HackerRankSolutions | cd9e71be5e8703287b9f4efc042df8827175af1b | [
"MIT"
] | null | null | null | Python/5. Math/exercise2.py | ozturkosu/HackerRankSolutions | cd9e71be5e8703287b9f4efc042df8827175af1b | [
"MIT"
] | 10 | 2019-12-22T03:18:50.000Z | 2021-09-23T16:55:25.000Z | # Mod Divmod "https://www.hackerrank.com/challenges/python-mod-divmod/problem"
# Enter your code here. Read input from STDIN. Print output to STDOUT
a, b = (int(input()) for _ in range(2))
print(a // b)
print(a % b)
print(divmod(a, b))
| 26.444444 | 78 | 0.693277 |
42caa75d97d78a5da176444b0c283b314888e8e5 | 4,161 | py | Python | BasicReport.py | nikneural/Report | 414b08c157ef14345372fd5b84f134eb7c911fe4 | [
"MIT"
] | null | null | null | BasicReport.py | nikneural/Report | 414b08c157ef14345372fd5b84f134eb7c911fe4 | [
"MIT"
] | null | null | null | BasicReport.py | nikneural/Report | 414b08c157ef14345372fd5b84f134eb7c911fe4 | [
"MIT"
] | null | null | null | import subprocess
import docx.table
import pandas as pd
from docx import Document
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
from docx.oxml import OxmlElement
from docx.oxml import ns
from docx.oxml.ns import qn
from docx.shared import Inches, Pt
from docx.table import _Cell
from docx2pdf import convert
class BasicReport:
def __init__(self):
pass
@staticmethod
def column_text_change(data: pd.DataFrame,
table: docx.table.Table,
size: int,
bold: bool = False):
for i in range(len(data.columns)):
run = table.cell(0, i).paragraphs[0].runs[0]
run.font.size = Pt(size)
run.font.bold = bold
@staticmethod
def cell_text_change(data: pd.DataFrame,
table: docx.table.Table,
size: int,
bold: bool = False):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
run = table.cell(i + 1, j).paragraphs[0].runs[0]
run.font.size = Pt(size)
run.font.bold = bold
@staticmethod
def delete_columns(table, columns):
# sort columns descending
columns.sort(reverse=True)
grid = table._tbl.find("w:tblGrid", table._tbl.nsmap)
for ci in columns:
for cell in table.column_cells(ci):
cell._tc.getparent().remove(cell._tc)
# Delete column reference.
col_elem = grid[ci]
grid.remove(col_elem)
@staticmethod
def generate_pdf_windows(doc_path: str, out_path: str):
"""Generate pdf file for windows system"""
convert(doc_path, out_path)
@staticmethod
def generate_pdf_Linux(doc_path, out_path):
"""Generate pdf file for windows system"""
subprocess.call(['soffice',
# '--headless',
'--convert-to',
'pdf',
'--outdir',
out_path,
doc_path])
return doc_path
@staticmethod
def set_row_height(row, height):
trPr = row.tr.get_or_add_trPr()
trHeight = OxmlElement('w:trHeight')
trHeight.set(qn('w:val'), str(height))
trPr.append(trHeight)
@staticmethod
def set_vertical_cell_direction(cell: _Cell, direction: str):
# direction: tbRl -- top to bottom, btLr -- bottom to top
assert direction in ("tbRl", "btLr")
tc = cell._tc
tcPr = tc.get_or_add_tcPr()
textDirection = OxmlElement('w:textDirection')
textDirection.set(qn('w:val'), direction) # btLr tbRl
tcPr.append(textDirection)
@staticmethod
def create_element(name):
return OxmlElement(name)
@staticmethod
def create_attribute(element, name, value):
element.set(ns.qn(name), value)
def create_document(self, header):
document = Document()
section = document.sections[-1]
section.left_martin = Inches(0.1)
paragraph_format = document.styles['Normal'].paragraph_format
paragraph_format.space_before = 0
paragraph_format.space_after = 0
document.add_paragraph().add_run(header).bold = True
document.add_paragraph(" ")
section.footer.paragraphs[0].text = header
section.footer.add_paragraph()
self.add_page_number(section.footer.paragraphs[1].add_run())
section.footer.paragraphs[1].alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT
return document
def add_page_number(self, run):
fldChar1 = self.create_element('w:fldChar')
self.create_attribute(fldChar1, 'w:fldCharType', 'begin')
instrText = self.create_element('w:instrText')
self.create_attribute(instrText, 'xml:space', 'preserve')
instrText.text = "PAGE"
fldChar2 = self.create_element('w:fldChar')
self.create_attribute(fldChar2, 'w:fldCharType', 'end')
run._r.append(fldChar1)
run._r.append(instrText)
run._r.append(fldChar2)
| 32.76378 | 77 | 0.59553 |
42cafc4dfa6b35f9ab782a932c1fbfc6c8a9e175 | 357 | py | Python | src/tests/test_import.py | williamsardar/pythonnet | a97a7768e0289e1b53ffc1ab67d89343d15bca44 | [
"MIT"
] | 1 | 2019-03-09T21:03:07.000Z | 2019-03-09T21:03:07.000Z | src/tests/test_import.py | williamsardar/pythonnet | a97a7768e0289e1b53ffc1ab67d89343d15bca44 | [
"MIT"
] | null | null | null | src/tests/test_import.py | williamsardar/pythonnet | a97a7768e0289e1b53ffc1ab67d89343d15bca44 | [
"MIT"
] | 1 | 2018-06-27T10:04:11.000Z | 2018-06-27T10:04:11.000Z | # -*- coding: utf-8 -*-
"""Test the import statement."""
import pytest
def test_relative_missing_import():
"""Test that a relative missing import doesn't crash.
Some modules use this to check if a package is installed.
Relative import in the site-packages folder"""
with pytest.raises(ImportError):
from . import _missing_import
| 25.5 | 61 | 0.703081 |
42cb33c39eef7cfef0c1bffc6049d2b0b685ecd4 | 2,879 | py | Python | src/metrics/metrics.py | koonn/bunseki | deb397e40a02bb709825c70c9be81f54449ac195 | [
"BSD-3-Clause"
] | null | null | null | src/metrics/metrics.py | koonn/bunseki | deb397e40a02bb709825c70c9be81f54449ac195 | [
"BSD-3-Clause"
] | null | null | null | src/metrics/metrics.py | koonn/bunseki | deb397e40a02bb709825c70c9be81f54449ac195 | [
"BSD-3-Clause"
] | null | null | null | """精度評価指標を計算するモジュール
y_trueと、y_pred_probaから精度評価指標を計算するための関数群
"""
import sklearn.metrics as skm
import numpy as np
def auc(y_true, y_pred_proba):
"""AUCを計算する関数
Args:
y_true(1-D array-like shape of [n_samples, ]): 2値の目的ラベルの配列(ラベルは0または1)
y_pred_proba(1-D array-like shape of [n_samples, ]): 陽性(ラベルが1)である確率の予測値の配列
Returns:
float: (ROCの)AUCの値
"""
return skm.roc_auc_score(y_true, y_pred_proba)
def average_precision(y_true, y_pred_proba):
"""平均Precisionを計算する関数
Args:
y_true(1-D array-like shape of [n_samples, ]): 2値の目的ラベルの配列(ラベルは0または1)
y_pred_proba(1-D array-like shape of [n_samples, ]): 陽性(ラベルが1)である確率の予測値の配列
Returns:
float: 平均Precisionの値
"""
return skm.average_precision_score(y_true, y_pred_proba)
def acc(y_true, y_pred_proba, threshold=0.5):
"""Accuracyを計算する関数
Args:
y_true(1-D array-like shape of [n_samples, ]): 2値の目的ラベルの配列(ラベルは0または1)
y_pred_proba(1-D array-like shape of [n_samples, ]): 陽性(ラベルが1)である確率の予測値の配列
threshold(float, default=0.5): 陽性と分類する確率の閾値. 陽性(ラベルが1)である確率の予測値がthreshold以上なら1に変換する
Returns:
float: Accuracyの値
"""
# 予測確率を2値ラベルに変換する
y_pred_label = np.where(y_pred_proba >= threshold, 1, 0)
return skm.accuracy_score(y_true, y_pred_label)
def precision(y_true, y_pred_proba, threshold=0.5):
"""Accuracyを計算する関数
Args:
y_true(1-D array-like shape of [n_samples, ]): 2値の目的ラベルの配列(ラベルは0または1)
y_pred_proba(1-D array-like shape of [n_samples, ]): 陽性(ラベルが1)である確率の予測値の配列
threshold(float, default=0.5): 陽性と分類する確率の閾値. 陽性(ラベルが1)である確率の予測値がthreshold以上なら1に変換する
Returns:
float: Accuracyの値
"""
# 予測確率を2値ラベルに変換する
y_pred_label = np.where(y_pred_proba >= threshold, 1, 0)
return skm.precision_score(y_true, y_pred_label)
def recall(y_true, y_pred_proba, threshold=0.5):
"""Accuracyを計算する関数
Args:
y_true(1-D array-like shape of [n_samples, ]): 2値の目的ラベルの配列(ラベルは0または1)
y_pred_proba(1-D array-like shape of [n_samples, ]): 陽性(ラベルが1)である確率の予測値の配列
threshold(float, default=0.5): 陽性と分類する確率の閾値. 陽性(ラベルが1)である確率の予測値がthreshold以上なら1に変換する
Returns:
float: Accuracyの値
"""
# 予測確率を2値ラベルに変換する
y_pred_label = np.where(y_pred_proba >= threshold, 1, 0)
return skm.recall_score(y_true, y_pred_label)
def f1_score(y_true, y_pred_proba, threshold=0.5):
"""Accuracyを計算する関数
Args:
y_true(1-D array-like shape of [n_samples, ]): 2値の目的ラベルの配列(ラベルは0または1)
y_pred_proba(1-D array-like shape of [n_samples, ]): 陽性(ラベルが1)である確率の予測値の配列
threshold(float, default=0.5): 陽性と分類する確率の閾値. 陽性(ラベルが1)である確率の予測値がthreshold以上なら1に変換する
Returns:
float: Accuracyの値
"""
# 予測確率を2値ラベルに変換する
y_pred_label = np.where(y_pred_proba >= threshold, 1, 0)
return skm.f1_score(y_true, y_pred_label)
| 27.951456 | 91 | 0.688086 |
42ccc44e8009b96f149e5a73b46e05451f065677 | 19,759 | py | Python | google/ads/google_ads/v2/proto/services/campaign_criterion_service_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v2/proto/services/campaign_criterion_service_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v2/proto/services/campaign_criterion_service_pb2.py | jiulongw/google-ads-python | 6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e | [
"Apache-2.0"
] | 1 | 2020-03-13T00:14:31.000Z | 2020-03-13T00:14:31.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v2/proto/services/campaign_criterion_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v2.proto.resources import campaign_criterion_pb2 as google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__criterion__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v2/proto/services/campaign_criterion_service.proto',
package='google.ads.googleads.v2.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v2.servicesB\035CampaignCriterionServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V2.Services\312\002 Google\\Ads\\GoogleAds\\V2\\Services\352\002$Google::Ads::GoogleAds::V2::Services'),
serialized_pb=_b('\nGgoogle/ads/googleads_v2/proto/services/campaign_criterion_service.proto\x12 google.ads.googleads.v2.services\x1a@google/ads/googleads_v2/proto/resources/campaign_criterion.proto\x1a\x1cgoogle/api/annotations.proto\x1a google/protobuf/field_mask.proto\x1a\x17google/rpc/status.proto\x1a\x17google/api/client.proto\"4\n\x1bGetCampaignCriterionRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"\xb6\x01\n\x1dMutateCampaignCriteriaRequest\x12\x13\n\x0b\x63ustomer_id\x18\x01 \x01(\t\x12P\n\noperations\x18\x02 \x03(\x0b\x32<.google.ads.googleads.v2.services.CampaignCriterionOperation\x12\x17\n\x0fpartial_failure\x18\x03 \x01(\x08\x12\x15\n\rvalidate_only\x18\x04 \x01(\x08\"\xfc\x01\n\x1a\x43\x61mpaignCriterionOperation\x12/\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x46\n\x06\x63reate\x18\x01 \x01(\x0b\x32\x34.google.ads.googleads.v2.resources.CampaignCriterionH\x00\x12\x46\n\x06update\x18\x02 \x01(\x0b\x32\x34.google.ads.googleads.v2.resources.CampaignCriterionH\x00\x12\x10\n\x06remove\x18\x03 \x01(\tH\x00\x42\x0b\n\toperation\"\xa5\x01\n\x1eMutateCampaignCriteriaResponse\x12\x31\n\x15partial_failure_error\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12P\n\x07results\x18\x02 \x03(\x0b\x32?.google.ads.googleads.v2.services.MutateCampaignCriterionResult\"6\n\x1dMutateCampaignCriterionResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xe1\x03\n\x18\x43\x61mpaignCriterionService\x12\xc7\x01\n\x14GetCampaignCriterion\x12=.google.ads.googleads.v2.services.GetCampaignCriterionRequest\x1a\x34.google.ads.googleads.v2.resources.CampaignCriterion\":\x82\xd3\xe4\x93\x02\x34\x12\x32/v2/{resource_name=customers/*/campaignCriteria/*}\x12\xdd\x01\n\x16MutateCampaignCriteria\x12?.google.ads.googleads.v2.services.MutateCampaignCriteriaRequest\x1a@.google.ads.googleads.v2.services.MutateCampaignCriteriaResponse\"@\x82\xd3\xe4\x93\x02:\"5/v2/customers/{customer_id=*}/campaignCriteria:mutate:\x01*\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x84\x02\n$com.google.ads.googleads.v2.servicesB\x1d\x43\x61mpaignCriterionServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v2/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V2.Services\xca\x02 Google\\Ads\\GoogleAds\\V2\\Services\xea\x02$Google::Ads::GoogleAds::V2::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__criterion__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,])
_GETCAMPAIGNCRITERIONREQUEST = _descriptor.Descriptor(
name='GetCampaignCriterionRequest',
full_name='google.ads.googleads.v2.services.GetCampaignCriterionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.services.GetCampaignCriterionRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=289,
serialized_end=341,
)
_MUTATECAMPAIGNCRITERIAREQUEST = _descriptor.Descriptor(
name='MutateCampaignCriteriaRequest',
full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partial_failure', full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaRequest.partial_failure', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validate_only', full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaRequest.validate_only', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=344,
serialized_end=526,
)
_CAMPAIGNCRITERIONOPERATION = _descriptor.Descriptor(
name='CampaignCriterionOperation',
full_name='google.ads.googleads.v2.services.CampaignCriterionOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='update_mask', full_name='google.ads.googleads.v2.services.CampaignCriterionOperation.update_mask', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v2.services.CampaignCriterionOperation.create', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='google.ads.googleads.v2.services.CampaignCriterionOperation.update', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove', full_name='google.ads.googleads.v2.services.CampaignCriterionOperation.remove', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v2.services.CampaignCriterionOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=529,
serialized_end=781,
)
_MUTATECAMPAIGNCRITERIARESPONSE = _descriptor.Descriptor(
name='MutateCampaignCriteriaResponse',
full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partial_failure_error', full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaResponse.partial_failure_error', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v2.services.MutateCampaignCriteriaResponse.results', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=784,
serialized_end=949,
)
_MUTATECAMPAIGNCRITERIONRESULT = _descriptor.Descriptor(
name='MutateCampaignCriterionResult',
full_name='google.ads.googleads.v2.services.MutateCampaignCriterionResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v2.services.MutateCampaignCriterionResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=951,
serialized_end=1005,
)
_MUTATECAMPAIGNCRITERIAREQUEST.fields_by_name['operations'].message_type = _CAMPAIGNCRITERIONOPERATION
_CAMPAIGNCRITERIONOPERATION.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_CAMPAIGNCRITERIONOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__criterion__pb2._CAMPAIGNCRITERION
_CAMPAIGNCRITERIONOPERATION.fields_by_name['update'].message_type = google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__criterion__pb2._CAMPAIGNCRITERION
_CAMPAIGNCRITERIONOPERATION.oneofs_by_name['operation'].fields.append(
_CAMPAIGNCRITERIONOPERATION.fields_by_name['create'])
_CAMPAIGNCRITERIONOPERATION.fields_by_name['create'].containing_oneof = _CAMPAIGNCRITERIONOPERATION.oneofs_by_name['operation']
_CAMPAIGNCRITERIONOPERATION.oneofs_by_name['operation'].fields.append(
_CAMPAIGNCRITERIONOPERATION.fields_by_name['update'])
_CAMPAIGNCRITERIONOPERATION.fields_by_name['update'].containing_oneof = _CAMPAIGNCRITERIONOPERATION.oneofs_by_name['operation']
_CAMPAIGNCRITERIONOPERATION.oneofs_by_name['operation'].fields.append(
_CAMPAIGNCRITERIONOPERATION.fields_by_name['remove'])
_CAMPAIGNCRITERIONOPERATION.fields_by_name['remove'].containing_oneof = _CAMPAIGNCRITERIONOPERATION.oneofs_by_name['operation']
_MUTATECAMPAIGNCRITERIARESPONSE.fields_by_name['partial_failure_error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_MUTATECAMPAIGNCRITERIARESPONSE.fields_by_name['results'].message_type = _MUTATECAMPAIGNCRITERIONRESULT
DESCRIPTOR.message_types_by_name['GetCampaignCriterionRequest'] = _GETCAMPAIGNCRITERIONREQUEST
DESCRIPTOR.message_types_by_name['MutateCampaignCriteriaRequest'] = _MUTATECAMPAIGNCRITERIAREQUEST
DESCRIPTOR.message_types_by_name['CampaignCriterionOperation'] = _CAMPAIGNCRITERIONOPERATION
DESCRIPTOR.message_types_by_name['MutateCampaignCriteriaResponse'] = _MUTATECAMPAIGNCRITERIARESPONSE
DESCRIPTOR.message_types_by_name['MutateCampaignCriterionResult'] = _MUTATECAMPAIGNCRITERIONRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCampaignCriterionRequest = _reflection.GeneratedProtocolMessageType('GetCampaignCriterionRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCAMPAIGNCRITERIONREQUEST,
__module__ = 'google.ads.googleads_v2.proto.services.campaign_criterion_service_pb2'
,
__doc__ = """Request message for
[CampaignCriterionService.GetCampaignCriterion][google.ads.googleads.v2.services.CampaignCriterionService.GetCampaignCriterion].
Attributes:
resource_name:
The resource name of the criterion to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.GetCampaignCriterionRequest)
))
_sym_db.RegisterMessage(GetCampaignCriterionRequest)
MutateCampaignCriteriaRequest = _reflection.GeneratedProtocolMessageType('MutateCampaignCriteriaRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATECAMPAIGNCRITERIAREQUEST,
__module__ = 'google.ads.googleads_v2.proto.services.campaign_criterion_service_pb2'
,
__doc__ = """Request message for
[CampaignCriterionService.MutateCampaignCriteria][google.ads.googleads.v2.services.CampaignCriterionService.MutateCampaignCriteria].
Attributes:
customer_id:
The ID of the customer whose criteria are being modified.
operations:
The list of operations to perform on individual criteria.
partial_failure:
If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will
be carried out in one transaction if and only if they are all
valid. Default is false.
validate_only:
If true, the request is validated but not executed. Only
errors are returned, not results.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.MutateCampaignCriteriaRequest)
))
_sym_db.RegisterMessage(MutateCampaignCriteriaRequest)
CampaignCriterionOperation = _reflection.GeneratedProtocolMessageType('CampaignCriterionOperation', (_message.Message,), dict(
DESCRIPTOR = _CAMPAIGNCRITERIONOPERATION,
__module__ = 'google.ads.googleads_v2.proto.services.campaign_criterion_service_pb2'
,
__doc__ = """A single operation (create, update, remove) on a campaign criterion.
Attributes:
update_mask:
FieldMask that determines which resource fields are modified
in an update.
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
criterion.
update:
Update operation: The criterion is expected to have a valid
resource name.
remove:
Remove operation: A resource name for the removed criterion is
expected, in this format: ``customers/{customer_id}/campaignC
riteria/{campaign_id}~{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.CampaignCriterionOperation)
))
_sym_db.RegisterMessage(CampaignCriterionOperation)
MutateCampaignCriteriaResponse = _reflection.GeneratedProtocolMessageType('MutateCampaignCriteriaResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATECAMPAIGNCRITERIARESPONSE,
__module__ = 'google.ads.googleads_v2.proto.services.campaign_criterion_service_pb2'
,
__doc__ = """Response message for campaign criterion mutate.
Attributes:
partial_failure_error:
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial\_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results:
All results for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.MutateCampaignCriteriaResponse)
))
_sym_db.RegisterMessage(MutateCampaignCriteriaResponse)
MutateCampaignCriterionResult = _reflection.GeneratedProtocolMessageType('MutateCampaignCriterionResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATECAMPAIGNCRITERIONRESULT,
__module__ = 'google.ads.googleads_v2.proto.services.campaign_criterion_service_pb2'
,
__doc__ = """The result for the criterion mutate.
Attributes:
resource_name:
Returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v2.services.MutateCampaignCriterionResult)
))
_sym_db.RegisterMessage(MutateCampaignCriterionResult)
DESCRIPTOR._options = None
_CAMPAIGNCRITERIONSERVICE = _descriptor.ServiceDescriptor(
name='CampaignCriterionService',
full_name='google.ads.googleads.v2.services.CampaignCriterionService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=1008,
serialized_end=1489,
methods=[
_descriptor.MethodDescriptor(
name='GetCampaignCriterion',
full_name='google.ads.googleads.v2.services.CampaignCriterionService.GetCampaignCriterion',
index=0,
containing_service=None,
input_type=_GETCAMPAIGNCRITERIONREQUEST,
output_type=google_dot_ads_dot_googleads__v2_dot_proto_dot_resources_dot_campaign__criterion__pb2._CAMPAIGNCRITERION,
serialized_options=_b('\202\323\344\223\0024\0222/v2/{resource_name=customers/*/campaignCriteria/*}'),
),
_descriptor.MethodDescriptor(
name='MutateCampaignCriteria',
full_name='google.ads.googleads.v2.services.CampaignCriterionService.MutateCampaignCriteria',
index=1,
containing_service=None,
input_type=_MUTATECAMPAIGNCRITERIAREQUEST,
output_type=_MUTATECAMPAIGNCRITERIARESPONSE,
serialized_options=_b('\202\323\344\223\002:\"5/v2/customers/{customer_id=*}/campaignCriteria:mutate:\001*'),
),
])
_sym_db.RegisterServiceDescriptor(_CAMPAIGNCRITERIONSERVICE)
DESCRIPTOR.services_by_name['CampaignCriterionService'] = _CAMPAIGNCRITERIONSERVICE
# @@protoc_insertion_point(module_scope)
| 48.787654 | 2,321 | 0.78673 |
42cd0e4c33a465776d2f55cc4beb83f4edfc71a6 | 4,568 | py | Python | main.py | meaug/indoor_air_quality_dht22_sgp30 | a746a9955903de1f7ce5e5d84493f860c1fd0b16 | [
"MIT"
] | null | null | null | main.py | meaug/indoor_air_quality_dht22_sgp30 | a746a9955903de1f7ce5e5d84493f860c1fd0b16 | [
"MIT"
] | null | null | null | main.py | meaug/indoor_air_quality_dht22_sgp30 | a746a9955903de1f7ce5e5d84493f860c1fd0b16 | [
"MIT"
] | null | null | null | from network import WLAN
import urequests as requests # from ubidots tutorial https://help.ubidots.com/en/articles/961994-connect-any-pycom-board-to-ubidots-using-wi-fi-over-http
from machine import I2C
import adafruit_sgp30 # from https://github.com/alexmrqt/micropython-sgp30
from machine import Pin
from dht import DHT # from https://github.com/JurassicPork/DHT_PyCom
import machine
import time
#Ubidots TOKEN
TOKEN = "INSERT UBIDOTS TOKEN HERE"
#wifi setup
wlan = WLAN(mode=WLAN.STA)
wlan.antenna(WLAN.INT_ANT)
# Wi-Fi credentials
wlan.connect("INSERT WIFI SSI", auth=(WLAN.WPA2, "INSERT WIFI PASSWORD"), timeout=5000)
while not wlan.isconnected ():
machine.idle()
print("Connected to Wifi\n")
# Initialize I2C bus
i2c = I2C(0, I2C.MASTER)
i2c.init(I2C.MASTER, baudrate=100000)
# Create library object on our I2C port
sgp30 = adafruit_sgp30.Adafruit_SGP30(i2c)
print("SGP30 serial #", [hex(i) for i in sgp30.serial])
# Initialize SGP-30 internal drift compensation algorithm.
sgp30.iaq_init()
# Wait 15 seconds for the SGP30 to properly initialize
print("Waiting 15 seconds for SGP30 initialization.")
time.sleep(15)
# Retrieve previously stored baselines, if any (helps the compensation algorithm).
has_baseline = False
try:
f_co2 = open('co2eq_baseline.txt', 'r')
f_tvoc = open('tvoc_baseline.txt', 'r')
co2_baseline = int(f_co2.read())
tvoc_baseline = int(f_tvoc.read())
#Use them to calibrate the sensor
sgp30.set_iaq_baseline(co2_baseline, tvoc_baseline)
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('No SGP30 baselines found')
#Store the time at which last baseline has been saved
baseline_time = time.time()
#Initialize dht22
th = DHT(Pin('P23', mode=Pin.OPEN_DRAIN), 1) #1 because dht22, change to 0 if using a DHT11
print("Waiting 2 seconds for DHT22 initialization.")
time.sleep(2)
# Builds the json to send the post request to ubidots
def build_json(variable1, value1, variable2, value2, variable3, value3, variable4, value4):
try:
#lat = 6.217
#lng = -75.567
data = {variable1: {"value": value1},
variable2: {"value": value2},
variable3: {"value": value3},
variable4: {"value": value4}}
return data
except:
return None
# Sends the post request to ubidots using the REST API
def post_var(device, value1, value2, value3, value4):
try:
url = "https://industrial.api.ubidots.com/"
url = url + "api/v1.6/devices/" + device
headers = {"X-Auth-Token": TOKEN, "Content-Type": "application/json"}
data = build_json("temperature", value1, "humidity", value2, "CO2", value3, "TVOC", value4)
if data is not None:
print(data)
req = requests.post(url=url, headers=headers, json=data)
return req.json()
else:
pass
except:
pass
while True:
#gets the temperature and humidity measurements from dht22
result = th.read()
while not result.is_valid():
time.sleep(.5)
result = th.read()
print('Temp.:', result.temperature)
print('RH:', result.humidity)
#sends the humidity and temperature from DHT22 to SGP30 for a more accurate output
sgp30.set_iaq_rel_humidity(result.humidity, result.temperature)
#gets the co2 and tvoc measurements
co2_eq, tvoc = sgp30.iaq_measure()
print('co2eq = ' + str(co2_eq) + ' ppm \t tvoc = ' + str(tvoc) + ' ppb')
#sends the data to Ubidots
temperature = result.temperature
humidity = result.humidity
post_var("pycom", temperature, humidity, co2_eq, tvoc)
#sends the data to pybytes
pybytes.send_signal(1,result.temperature)
pybytes.send_signal(2,result.humidity)
pybytes.send_signal(3,co2_eq)
pybytes.send_signal(4,tvoc)
#writes baselines after 12 hours (first time) or 1 hour
if (has_baseline and (time.time() - baseline_time >= 3600)) \
or ((not has_baseline) and (time.time() - baseline_time >= 43200)):
print('Saving baseline')
baseline_time = time.time()
try:
f_co2 = open('co2eq_baseline.txt', 'w')
f_tvoc = open('tvoc_baseline.txt', 'w')
bl_co2, bl_tvoc = sgp30.get_iaq_baseline()
f_co2.write(str(bl_co2))
f_tvoc.write(str(bl_tvoc))
f_co2.close()
f_tvoc.close()
has_baseline = True
except:
print('Impossible to write SGP30 baselines!')
# Measures every 5 minutes (300 seconds)
time.sleep(300)
| 31.722222 | 153 | 0.668345 |
42cd2ea8378c0d8edecc4b1ef21bb286fd030c27 | 5,278 | py | Python | drivers/get_imu_data.py | ndkjing/usv | 132e021432a0344a22914aaf68da7d7955d7331f | [
"MIT"
] | null | null | null | drivers/get_imu_data.py | ndkjing/usv | 132e021432a0344a22914aaf68da7d7955d7331f | [
"MIT"
] | null | null | null | drivers/get_imu_data.py | ndkjing/usv | 132e021432a0344a22914aaf68da7d7955d7331f | [
"MIT"
] | 1 | 2021-09-04T10:27:30.000Z | 2021-09-04T10:27:30.000Z | # coding:UTF-8
import queue
import serial
import time
import threading
ACCData = [0.0] * 8
GYROData = [0.0] * 8
AngleData = [0.0] * 8
FrameState = 0 # 通过0x后面的值判断属于哪一种情况
Bytenum = 0 # 读取到这一段的第几位
CheckSum = 0 # 求和校验位
a = [0.0] * 3
w = [0.0] * 3
Angle = [0.0] * 3
count=0
start_time = time.time()
interval=0.01
def DueData(inputdata,q): # 新增的核心程序,对读取的数据进行划分,各自读到对应的数组里
global FrameState # 在局部修改全局变量,要进行global的定义
global Bytenum
global CheckSum
global a
global w
global Angle
global count
global start_time
for data in inputdata: # 在输入的数据进行遍历
# data = ord(data)
if FrameState == 0: # 当未确定状态的时候,进入以下判断
if data == 0x55 and Bytenum == 0: # 0x55位于第一位时候,开始读取数据,增大bytenum
CheckSum = data
Bytenum = 1
continue
elif data == 0x51 and Bytenum == 1: # 在byte不为0 且 识别到 0x51 的时候,改变frame
CheckSum += data
FrameState = 1
Bytenum = 2
elif data == 0x52 and Bytenum == 1: # 同理
CheckSum += data
FrameState = 2
Bytenum = 2
elif data == 0x53 and Bytenum == 1:
CheckSum += data
FrameState = 3
Bytenum = 2
elif FrameState == 1: # acc #已确定数据代表加速度
if Bytenum < 10: # 读取8个数据
ACCData[Bytenum - 2] = data # 从0开始
CheckSum += data
Bytenum += 1
else:
if data == (CheckSum & 0xff): # 假如校验位正确
a = get_acc(ACCData)
CheckSum = 0 # 各数据归零,进行新的循环判断
Bytenum = 0
FrameState = 0
elif FrameState == 2: # gyro
if Bytenum < 10:
GYROData[Bytenum - 2] = data
CheckSum += data
Bytenum += 1
else:
if data == (CheckSum & 0xff):
w = get_gyro(GYROData)
CheckSum = 0
Bytenum = 0
FrameState = 0
elif FrameState == 3: # angle
if Bytenum < 10:
AngleData[Bytenum - 2] = data
CheckSum += data
Bytenum += 1
else:
if data == (CheckSum & 0xff):
Angle = get_angle(AngleData)
d = a + w + Angle
# 元组类型
# print("a(g):%10.3f %10.3f %10.3f w(deg/s):%10.3f %10.3f %10.3f Angle(deg):%10.3f %10.3f %10.3f" % d)
q.put(d)
count+=1
if count%1000==0:
print('count 1 cost time',(time.time()-start_time)/count)
if count>100000000:
count=0
# return d
CheckSum = 0
Bytenum = 0
FrameState = 0
def get_acc(datahex):
axl = datahex[0]
axh = datahex[1]
ayl = datahex[2]
ayh = datahex[3]
azl = datahex[4]
azh = datahex[5]
k_acc = 16.0
acc_x = (axh << 8 | axl) / 32768.0 * k_acc
acc_y = (ayh << 8 | ayl) / 32768.0 * k_acc
acc_z = (azh << 8 | azl) / 32768.0 * k_acc
if acc_x >= k_acc:
acc_x -= 2 * k_acc
if acc_y >= k_acc:
acc_y -= 2 * k_acc
if acc_z >= k_acc:
acc_z -= 2 * k_acc
return acc_x, acc_y, acc_z
def get_gyro(datahex):
wxl = datahex[0]
wxh = datahex[1]
wyl = datahex[2]
wyh = datahex[3]
wzl = datahex[4]
wzh = datahex[5]
k_gyro = 2000.0
gyro_x = (wxh << 8 | wxl) / 32768.0 * k_gyro
gyro_y = (wyh << 8 | wyl) / 32768.0 * k_gyro
gyro_z = (wzh << 8 | wzl) / 32768.0 * k_gyro
if gyro_x >= k_gyro:
gyro_x -= 2 * k_gyro
if gyro_y >= k_gyro:
gyro_y -= 2 * k_gyro
if gyro_z >= k_gyro:
gyro_z -= 2 * k_gyro
return gyro_x, gyro_y, gyro_z
def get_angle(datahex):
rxl = datahex[0]
rxh = datahex[1]
ryl = datahex[2]
ryh = datahex[3]
rzl = datahex[4]
rzh = datahex[5]
k_angle = 180.0
angle_x = (rxh << 8 | rxl) / 32768.0 * k_angle
angle_y = (ryh << 8 | ryl) / 32768.0 * k_angle
angle_z = (rzh << 8 | rzl) / 32768.0 * k_angle
if angle_x >= k_angle:
angle_x -= 2 * k_angle
if angle_y >= k_angle:
angle_y -= 2 * k_angle
if angle_z >= k_angle:
angle_z -= 2 * k_angle
return angle_x, angle_y, angle_z
class GetImuData:
def __init__(self, port, baud, timeout=0.5):
self.q = queue.Queue()
try:
self.serial_obj = serial.Serial(port, baud, timeout=timeout)
except Exception as e:
print(e)
exit(-1)
print('串口打开',self.serial_obj.is_open)
def get_data(self):
while True:
datahex = self.serial_obj.read(33)
DueData(datahex,self.q)
def imu_integration(q):
"""
imu积分计算
:param d: 当前检测到加速度与角速度
:return:
"""
if __name__ == '__main__':
obj = GetImuData(port='com4',baud=115200)
# 打印数据
t1 = threading.Thread(target=obj.get_data)
t2 = threading.Thread(target=obj.imu_integration)
t1.start()
t2.start()
t1.join()
t2.join() | 26.656566 | 122 | 0.48939 |
42cdb0ad159342fdea9a675f50b583e29f8c7d2a | 3,082 | py | Python | test/test_utils.py | dilettacal/nmt_seq2seq_evo | 1de7647fb50445d17aa0eab8f300fdcbe6b8145e | [
"MIT"
] | null | null | null | test/test_utils.py | dilettacal/nmt_seq2seq_evo | 1de7647fb50445d17aa0eab8f300fdcbe6b8145e | [
"MIT"
] | null | null | null | test/test_utils.py | dilettacal/nmt_seq2seq_evo | 1de7647fb50445d17aa0eab8f300fdcbe6b8145e | [
"MIT"
] | null | null | null | import os
import unittest
from torchtext.data import Field, Iterator
from project.utils.utils_metrics import AverageMeter
from project.utils.utils_logging import Logger
from project.utils.datasets import Seq2SeqDataset
data_dir = os.path.join(".", "test", "test_data")
class TestIOUtils(unittest.TestCase):
def test_load_data(self):
src_vocab = Field(pad_token="<p>", unk_token="<u>", lower=True)
trg_vocab = Field(init_token="<s>", eos_token="</s>",pad_token="<p>", unk_token="<u>", lower=True )
exts = (".de", ".en")
samples = Seq2SeqDataset.splits(root="", path=data_dir, exts=exts,
train="samples", fields=(src_vocab, trg_vocab), validation="",test="")
self.assertIsInstance(samples, tuple)
samples = samples[0]
self.assertIsInstance(samples, Seq2SeqDataset)
self.assertIsNotNone(samples.examples)
self.assertAlmostEqual(len(samples.examples), 15)
self.assertEqual(list(samples.fields.keys()), ["src", "trg"])
src_vocab.build_vocab(samples)
trg_vocab.build_vocab(samples)
self.assertIsNotNone(src_vocab.vocab.stoi)
self.assertIsNotNone(trg_vocab.vocab.stoi)
def test_logger(self):
path = os.path.join(data_dir, "log.log")
if os.path.exists(path):
os.remove(path)
logger = Logger(path=data_dir)
self.assertIsNotNone(logger)
logger.log("test_logging", stdout=False)
logger.log("test_second_logging", stdout=False)
with open(path, mode="r") as f:
content = f.read().strip().split("\n")
self.assertEqual(content[0], "test_logging")
self.assertEqual(content[1], "test_second_logging")
def test_save_model(self):
path = os.path.join(data_dir, "log.log")
if os.path.exists(path):
os.remove(path)
logger = Logger(path=data_dir)
self.assertIsNotNone(logger)
model = dict({"model": [1,2,3,4,2]})
logger.save_model(model)
files = os.listdir(data_dir)
self.assertIn("model.pkl", files)
os.remove(os.path.join(data_dir, "model.pkl"))
def test_plot_metrics(self):
path = os.path.join(data_dir, "log.log")
if os.path.exists(path):
os.remove(path)
logger = Logger(path=data_dir)
self.assertIsNotNone(logger)
metric = [1,2,5,1,6,1]
logger.plot(metric, "", "", "metric")
files = os.listdir(data_dir)
self.assertIn("metric.png", files)
os.remove(os.path.join(data_dir, "metric.png"))
def test_metric(self):
metric = AverageMeter()
for i in range(10):
metric.update(i)
self.assertEqual(metric.count, 10)
self.assertEqual(metric.val, 9)
self.assertEqual(metric.avg, 4.5)
self.assertEqual(metric.sum, 45)
metric.reset()
self.assertEqual(metric.count, 0)
self.assertEqual(metric.val, 0)
self.assertEqual(metric.avg, 0)
self.assertEqual(metric.sum, 0) | 36.690476 | 110 | 0.621999 |
42d1b2020952de616b4d4ac7d2ca23c0bbc1bae9 | 144 | py | Python | tests/test_sass-director.py | Sass-Director/Sass-Director_Sublime | 57dff551213b4884c603cb69700fa2583f646202 | [
"MIT"
] | 4 | 2015-07-08T14:25:24.000Z | 2021-01-20T22:11:09.000Z | tests/test_sass-director.py | Sass-Director/Sass-Director_Sublime | 57dff551213b4884c603cb69700fa2583f646202 | [
"MIT"
] | 4 | 2015-06-16T19:48:59.000Z | 2020-06-23T17:17:38.000Z | tests/test_sass-director.py | Sass-Director/Sass-Director_Sublime | 57dff551213b4884c603cb69700fa2583f646202 | [
"MIT"
] | 2 | 2015-01-24T17:38:48.000Z | 2017-04-18T13:23:46.000Z | # Load in test framework
from sublime_plugin_tests import framework
class TestExample(framework.TestCase):
def sampleTest():
pass
| 18 | 42 | 0.75 |
42d1f1c104a654530b6968dd6b6bff5cdf01c509 | 2,156 | py | Python | networks/cifar_net.py | DQle38/Fair-Feature-Distillation-for-Visual-Recognition | f0f98728f36528218bf19dce9a26d6ee1ba96e58 | [
"MIT"
] | 5 | 2021-09-07T13:33:45.000Z | 2022-02-12T18:56:45.000Z | networks/cifar_net.py | DQle38/Fair-Feature-Distillation-for-Visual-Recognition | f0f98728f36528218bf19dce9a26d6ee1ba96e58 | [
"MIT"
] | null | null | null | networks/cifar_net.py | DQle38/Fair-Feature-Distillation-for-Visual-Recognition | f0f98728f36528218bf19dce9a26d6ee1ba96e58 | [
"MIT"
] | 4 | 2021-09-25T06:56:38.000Z | 2022-03-24T18:06:08.000Z | import torch
import torch.nn as nn
import numpy as np
class Net(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, padding=1)
s = compute_conv_output_size(32, 3, padding=1) # 32
self.conv2 = nn.Conv2d(32, 32, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 32
s = s // 2 # 16
self.conv3 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 16
self.conv4 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 16
s = s // 2 # 8
self.conv5 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 8
self.conv6 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
s = compute_conv_output_size(s, 3, padding=1) # 8
s = s // 2 # 4
self.fc1 = nn.Linear(s * s * 128, 256) # 2048
self.drop1 = nn.Dropout(0.25)
self.drop2 = nn.Dropout(0.5)
self.MaxPool = torch.nn.MaxPool2d(2)
self.last = torch.nn.Linear(256, num_classes)
self.relu = torch.nn.ReLU()
def forward(self, x, get_inter=False, before_fc=False):
act1 = self.relu(self.conv1(x))
act2 = self.relu(self.conv2(act1))
h = self.drop1(self.MaxPool(act2))
act3 = self.relu(self.conv3(h))
act4 = self.relu(self.conv4(act3))
h = self.drop1(self.MaxPool(act4))
act5 = self.relu(self.conv5(h))
act6 = self.relu(self.conv6(act5))
h = self.drop1(self.MaxPool(act6))
h = h.view(x.shape[0], -1)
act7 = self.relu(self.fc1(h))
# h = self.drop2(act7)
y=self.last(act7)
if get_inter:
if before_fc:
return act6, y
else:
return act7, y
else:
return y
def compute_conv_output_size(l_in, kernel_size, stride=1, padding=0, dilation=1):
return int(np.floor((l_in + 2 * padding - dilation * (kernel_size - 1) - 1) / float(stride) + 1))
| 35.933333 | 101 | 0.574212 |
42d24e6e9b4725307355ace50aeaa70550864c9b | 82 | py | Python | pypi_generation_files/build/lib/cab-dynamic-pricing/__init__.py | nayantaramohan/cab-dynamic-pricing | 803cfaf9d1ef1d8d20e90dfdfd741b1411750328 | [
"MIT"
] | 5 | 2022-03-08T02:06:16.000Z | 2022-03-17T22:04:30.000Z | pypi_generation_files/build/lib/cab-dynamic-pricing/__init__.py | nayantaramohan/cab-dynamic-pricing | 803cfaf9d1ef1d8d20e90dfdfd741b1411750328 | [
"MIT"
] | null | null | null | pypi_generation_files/build/lib/cab-dynamic-pricing/__init__.py | nayantaramohan/cab-dynamic-pricing | 803cfaf9d1ef1d8d20e90dfdfd741b1411750328 | [
"MIT"
] | 1 | 2022-03-01T02:55:45.000Z | 2022-03-01T02:55:45.000Z | # __init__.py
# Version of the cab-dynamic-pricing package
__version__ = "1.0.0"
| 16.4 | 44 | 0.731707 |
42d4aca626e7056c3cd312d444ec2606808efc07 | 1,207 | py | Python | solutions/python3/problem654.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | solutions/python3/problem654.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | solutions/python3/problem654.py | tjyiiuan/LeetCode | abd10944c6a1f7a7f36bd9b6218c511cf6c0f53e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
654. Maximum Binary Tree
Given an integer array with no duplicates. A maximum tree building on this array is defined as follow:
The root is the maximum number in the array.
The left subtree is the maximum tree constructed from left part subarray divided by the maximum number.
The right subtree is the maximum tree constructed from right part subarray divided by the maximum number.
Construct the maximum tree by the given array and output the root node of this tree.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def constructMaximumBinaryTree(self, nums) -> TreeNode:
if not nums:
return None
max_val = nums[0]
max_ind = 0
for ind, val in enumerate(nums):
if val > max_val:
max_ind = ind
max_val = val
l_node = self.constructMaximumBinaryTree(nums[:max_ind])
r_node = self.constructMaximumBinaryTree(nums[max_ind + 1:])
root = TreeNode(val=max_val, left=l_node, right=r_node)
return root
| 30.948718 | 105 | 0.666114 |
42d54535865b205f51d1935bf40792c7ce95c829 | 5,189 | py | Python | pparser.py | deadsurgeon42/StarryPy3k | 9291e5a7ca97004675a4868165ce5690c111c492 | [
"WTFPL"
] | 44 | 2015-11-18T07:45:11.000Z | 2022-03-30T06:32:18.000Z | pparser.py | deadsurgeon42/StarryPy3k | 9291e5a7ca97004675a4868165ce5690c111c492 | [
"WTFPL"
] | 110 | 2016-08-01T06:45:13.000Z | 2021-11-30T18:45:36.000Z | pparser.py | deadsurgeon42/StarryPy3k | 9291e5a7ca97004675a4868165ce5690c111c492 | [
"WTFPL"
] | 32 | 2015-01-31T09:54:38.000Z | 2022-03-31T06:12:21.000Z | import asyncio
import traceback
from configuration_manager import ConfigurationManager
from data_parser import *
parse_map = {
0: ProtocolRequest,
1: ProtocolResponse,
2: ServerDisconnect,
3: ConnectSuccess,
4: ConnectFailure,
5: HandshakeChallenge,
6: ChatReceived,
7: None,
8: None,
9: PlayerWarpResult,
10: None,
11: None,
12: None,
13: ClientConnect,
14: ClientDisconnectRequest,
15: None,
16: PlayerWarp,
17: FlyShip,
18: ChatSent,
19: None,
20: ClientContextUpdate,
21: WorldStart,
22: WorldStop,
23: None,
24: None,
25: None,
26: None,
27: None,
28: None,
29: None,
30: None,
31: GiveItem,
32: None,
33: None,
34: None,
35: None,
36: None,
37: None,
38: None,
39: ModifyTileList,
40: None,
41: None,
42: None,
43: SpawnEntity,
44: None,
45: None,
46: None,
47: None,
48: None,
49: None,
50: EntityCreate,
51: None,
52: None,
53: EntityInteract,
54: EntityInteractResult,
55: None,
56: DamageRequest,
57: DamageNotification,
58: EntityMessage,
59: EntityMessageResponse,
60: DictVariant,
61: StepUpdate,
62: None,
63: None,
64: None,
65: None,
66: None,
67: None,
68: None
}
class PacketParser:
"""
Object for handling the parsing and caching of packets.
"""
def __init__(self, config: ConfigurationManager):
self._cache = {}
self.config = config
self.loop = asyncio.get_event_loop()
self._reaper = self.loop.create_task(self._reap())
@asyncio.coroutine
def parse(self, packet):
"""
Given a packet preped packet from the stream, parse it down to its
parts. First check if the packet is one we've seen before; if it is,
pull its parsed form from the cache, and run with that. Otherwise,
pass it to the appropriate parser for parsing.
:param packet: Packet with header information parsed.
:return: Fully parsed packet.
"""
try:
if packet["size"] >= self.config.config["min_cache_size"]:
packet["hash"] = hash(packet["original_data"])
if packet["hash"] in self._cache:
self._cache[packet["hash"]].count += 1
packet["parsed"] = self._cache[packet["hash"]].packet[
"parsed"]
else:
packet = yield from self._parse_and_cache_packet(packet)
else:
packet = yield from self._parse_packet(packet)
except Exception as e:
print("Error during parsing.")
print(traceback.print_exc())
finally:
return packet
@asyncio.coroutine
def _reap(self):
"""
Prune packets from the cache that are not being used, and that are
older than the "packet_reap_time".
:return: None.
"""
while True:
yield from asyncio.sleep(self.config.config["packet_reap_time"])
for h, cached_packet in self._cache.copy().items():
cached_packet.count -= 1
if cached_packet.count <= 0:
del (self._cache[h])
@asyncio.coroutine
def _parse_and_cache_packet(self, packet):
"""
Take a new packet and pass it to the parser. Once we get it back,
make a copy of it to the cache.
:param packet: Packet with header information parsed.
:return: Fully parsed packet.
"""
packet = yield from self._parse_packet(packet)
self._cache[packet["hash"]] = CachedPacket(packet=packet)
return packet
@asyncio.coroutine
def _parse_packet(self, packet):
"""
Parse the packet by giving it to the appropriate parser.
:param packet: Packet with header information parsed.
:return: Fully parsed packet.
"""
res = parse_map[packet["type"]]
if res is None:
packet["parsed"] = {}
else:
#packet["parsed"] = yield from self.loop.run_in_executor(
# self.loop.executor, res.parse, packet["data"])
# Removed due to issues with testers. Need to evaluate what's going
# on.
packet["parsed"] = res.parse(packet["data"])
return packet
# def __del__(self):
# self._reaper.cancel()
class CachedPacket:
"""
Prototype for cached packets. Keep track of how often it is used,
as well as the full packet's contents.
"""
def __init__(self, packet):
self.count = 1
self.packet = packet
def build_packet(packet_id, data, compressed=False):
"""
Convenience method for building a packet.
:param packet_id: ID value of packet.
:param data: Contents of packet.
:param compressed: Whether or not to compress the packet.
:return: Built packet object.
"""
return BasePacket.build({"id": packet_id,
"data": data,
"compressed": compressed})
| 27.167539 | 79 | 0.578724 |
42d61bf690c775be557f3dd6decdfb665b54a7c4 | 4,224 | py | Python | template_capture_performance.py | Dino-s26/f5_web_gui_capture | 5dd00d6d84e9dd7c79f90446133c99190cbee1b0 | [
"MIT"
] | null | null | null | template_capture_performance.py | Dino-s26/f5_web_gui_capture | 5dd00d6d84e9dd7c79f90446133c99190cbee1b0 | [
"MIT"
] | null | null | null | template_capture_performance.py | Dino-s26/f5_web_gui_capture | 5dd00d6d84e9dd7c79f90446133c99190cbee1b0 | [
"MIT"
] | null | null | null | # Import Selenium Module
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# Import Path
from pathlib import Path
# Import Base Module for Code
import os
import datetime
# Check Path
path = os.getcwd()
# Date
ddt = str(datetime.datetime.now().strftime("%d-%m-%y"))
# Site
site = "<Replace with your F5 Hostname>"
folder = path+"<Replace with your directory>"+site+"--"+ddt+"\/"
check_folder = Path(folder).mkdir(parents=True, exist_ok=True)
#print (check_folder)
# Import WebDriver
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
# If You need to utilize in background capture, you can uncomment this line. This will help to run the script in background without any interruption.
#options.add_argument('--headless')
driver = webdriver.Chrome(chrome_options=options, executable_path=<Replace with path to your chrome driver>+"\driver\chromedriver.exe")
wait = WebDriverWait(driver, 60)
# Maximize Browser Window
driver.maximize_window()
# Begin to Access Web
driver.get("https://<Replace with your F5 IP>")
# Login & entering credential
driver.implicitly_wait(180)
user = driver.find_element_by_id("username")
user.click()
user.send_keys("<Replace with your F5 username>")
password = driver.find_element_by_id("passwd")
password.click()
password.send_keys("<Replace with your F5 password>")
login = driver.find_element_by_xpath("//*[@id='loginform']/button")
login.click()
driver.implicitly_wait(180)
# Check Device Health, Accessing Performance Menu
performance_menu = driver.find_element_by_xpath("//*[@id='mainmenu-statistics-performance']/a")
performance_menu.click()
performances = driver.find_element_by_xpath("//*[@id='mainmenu-statistics-performance-General']/a")
performances.click()
performances.is_selected()
# Handle iFrame Tag
driver.implicitly_wait(90)
driver.switch_to.frame(driver.find_element_by_name("contentframe"))
# This handle when to select the Data we want to generate, for this code we want to get data for Last 7 Days
driver.implicitly_wait(90)
graph_interval = driver.find_elements_by_xpath("//tr[@id='graph_interval_row']//select[@name='int_select']/option[@value='2']")
for option in graph_interval:
option.click()
wait.until(EC.staleness_of(option))
# This capture Memory Usage
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr:nth-of-type(2) img")))
memory = driver.find_element_by_css_selector("tr:nth-of-type(2) img").screenshot(folder+site+"-memory-"+ddt+".png")
# This capture CPU Usage
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr:nth-of-type(4) img")))
cpu = driver.find_element_by_css_selector("tr:nth-of-type(4) img").screenshot(folder+site+"-cpu-"+ddt+".png")
# This capture Active Connection
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr:nth-of-type(6) img")))
active_c = driver.find_element_by_css_selector("tr:nth-of-type(6) img").screenshot(folder+site+"-active_connection-"+ddt+".png")
# This capture Total Connection
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr:nth-of-type(8) img")))
tota_c = driver.find_element_by_css_selector("tr:nth-of-type(8) img").screenshot(folder+site+"-total_connection-"+ddt+".png")
# This capture Throughput (Bits)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr:nth-of-type(10) img")))
throughput_b = driver.find_element_by_css_selector("tr:nth-of-type(10) img").screenshot(folder+site+"-throughput_bit-"+ddt+".png")
# This capture HTTP Requests
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr:nth-of-type(18) img")))
http_r = driver.find_element_by_css_selector("tr:nth-of-type(18) img").screenshot(folder+site+"-http_request-"+ddt+".png")
# This capture SSL TPS
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "tr:nth-of-type(22) img")))
ssl = driver.find_element_by_css_selector("tr:nth-of-type(22) img").screenshot(folder+site+"-ssl-"+ddt+".png")
# Nothing to do here, the script has complete it is jobs :)
print("DONE!")
driver.close()
| 39.111111 | 149 | 0.766335 |
42d72c0c58e56c65e8f873c5b25c452eaaf9e7cc | 3,032 | py | Python | deploy/testData.py | yaoguxiao/insightface | 731f9ec7503cda3a5f3433525aa57709a78b2118 | [
"MIT"
] | null | null | null | deploy/testData.py | yaoguxiao/insightface | 731f9ec7503cda3a5f3433525aa57709a78b2118 | [
"MIT"
] | null | null | null | deploy/testData.py | yaoguxiao/insightface | 731f9ec7503cda3a5f3433525aa57709a78b2118 | [
"MIT"
] | null | null | null | import sys
import os
import mxnet as mx
import argparse
sys.path.append(os.path.join(os.getcwd(), "../src/common"))
sys.path.append(os.path.join(os.getcwd(), "../src/eval"))
import verification
def argParser():
parser = argparse.ArgumentParser(description='test network')
parser.add_argument('--model', default='../../insightface/models/model-res4-8-16-4-dim512/model,0', help='path of model')
parser.add_argument('--data-dir', default='../../insightface/datasets/faces_ms1m_112x112/', help='path of test data')
parser.add_argument('--target', default='lfw', help='name of test data')
parser.add_argument('--output', default='fc1', help='output name')
parser.add_argument('--batch-size', default=50, help='batch size')
# parser.add_argument('add_argument')
args = parser.parse_args()
return args
def reaTestData():
verList = {}
for name in args.target.split(','):
print("============", name)
path = os.path.join(args.data_dir,name+".bin")
print(path)
if not os.path.exists(path):break
verList[name] = verification.load_bin(path, [112,112])
print('ver', name)
return verList
def verTest(model, nbatch):
results = []
verList = reaTestData()
print("===============, line:", sys._getframe().f_lineno)
if verList is None:
print("read test data err")
return
print("===============, line:", sys._getframe().f_lineno)
for i in verList:
print("===============, line:", sys._getframe().f_lineno)
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(verList[i], model, args.batch_size, 10, None, None)
print('[%s][%d]XNorm: %f' % (i, nbatch, xnorm))
# print('[%s][%d]Accuracy: %1.5f+-%1.5f' % (i, nbatch, acc1, std1))
print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (i, nbatch, acc2, std2))
results.append(acc2)
return results
# class faceMode:
# def __init__(self, args):
# self.arts = args
# modelid = args.model.split(',')
# print(modelid[0], modelid[1])
# sym, argParams, auxParams = mx.model.load_checkpoint(modelid[0], int(modelid[1]))#type:mx.symbol.symbol.Symbol
# sym = sym.get_internals()[args.output + '_output']
# self.model = mx.mod.Module(symbol=sym, label_names=None)
# self.model.bind(('data', (1, 3, 112,112)))
# self.model.set_params(argParams, auxParams)
# print(type(sym))
if __name__ == "__main__":
args = argParser()
# faceMode(args)
modelid = args.model.split(',')
print(modelid[0], modelid[1])
sym, argParams, auxParams = mx.model.load_checkpoint(modelid[0], int(modelid[1])) # type:mx.symbol.symbol.Symbol
sym = sym.get_internals()[args.output + '_output']
model = mx.mod.Module(symbol=sym, context=mx.gpu(0), label_names=None)
# model.bind(data_shapes=('data', (args.batch_size, 3, 112, 112)))
model.bind(data_shapes=[('data', (args.batch_size, 3, 112,112))])
model.set_params(argParams, auxParams)
verTest(model, args.batch_size) | 41.534247 | 125 | 0.632256 |
42dc77f7900d79cb250ea17552132e0f738917bd | 4,482 | py | Python | test/test_plugin_spontit.py | NiNiyas/apprise | 8d96e95acd7cb89f082685ae161bd0e268203f0c | [
"MIT"
] | 1 | 2022-01-19T01:40:04.000Z | 2022-01-19T01:40:04.000Z | test/test_plugin_spontit.py | NiNiyas/apprise | 8d96e95acd7cb89f082685ae161bd0e268203f0c | [
"MIT"
] | null | null | null | test/test_plugin_spontit.py | NiNiyas/apprise | 8d96e95acd7cb89f082685ae161bd0e268203f0c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import requests
from apprise import plugins
from helpers import AppriseURLTester
# Disable logging for a cleaner testing output
import logging
logging.disable(logging.CRITICAL)
# Our Testing URLs
apprise_url_tests = (
('spontit://', {
# invalid url
'instance': TypeError,
}),
# Another bad url
('spontit://:@/', {
'instance': TypeError,
}),
# No user specified
('spontit://%s' % ('a' * 100), {
'instance': TypeError,
}),
# Invalid API Key specified
('spontit://user@%%20_', {
'instance': TypeError,
}),
# Provide a valid user and API Key
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# Our expected url(privacy=True) startswith() response:
'privacy_url': 'spontit://{}@b...b/'.format('u' * 11),
}),
# Provide a valid user and API Key, but provide an invalid channel
('spontit://%s@%s/#!!' % ('u' * 11, 'b' * 100), {
# An instance is still created, but the channel won't be notified
'instance': plugins.NotifySpontit,
}),
# Provide a valid user, API Key and a valid channel
('spontit://%s@%s/#abcd' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide a valid user, API Key, and a subtitle
('spontit://%s@%s/?subtitle=Test' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide a valid user, API Key, and a lengthy subtitle
('spontit://%s@%s/?subtitle=%s' % ('u' * 11, 'b' * 100, 'c' * 300), {
'instance': plugins.NotifySpontit,
}),
# Provide a valid user and API Key, but provide a valid channel (that is
# not ours).
# Spontit uses a slash (/) to delimite the user from the channel id when
# specifying channel entries. For Apprise we need to encode this
# so we convert the slash (/) into %2F
('spontit://{}@{}/#1245%2Fabcd'.format('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide multipe channels
('spontit://{}@{}/#1245%2Fabcd/defg'.format('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
# Provide multipe channels through the use of the to= variable
('spontit://{}@{}/?to=#1245/abcd'.format('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
}),
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# force a failure
'response': False,
'requests_response_code': requests.codes.internal_server_error,
}),
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# throw a bizzare code forcing us to fail to look it up
'response': False,
'requests_response_code': 999,
}),
('spontit://%s@%s' % ('u' * 11, 'b' * 100), {
'instance': plugins.NotifySpontit,
# Throws a series of connection and transfer exceptions when this flag
# is set and tests that we gracfully handle them
'test_requests_exceptions': True,
}),
)
def test_plugin_spontit_urls():
"""
NotifySpontit() Apprise URLs
"""
# Run our general tests
AppriseURLTester(tests=apprise_url_tests).run_all()
| 37.663866 | 79 | 0.629407 |
42dc9cb1aa466dc4d81d1303416d9c0741104c68 | 2,751 | py | Python | img_striper.py | tacensi/image_striper | d361c5c4b7e9b8588b50d8f992b90d14fd64d4f0 | [
"MIT"
] | null | null | null | img_striper.py | tacensi/image_striper | d361c5c4b7e9b8588b50d8f992b90d14fd64d4f0 | [
"MIT"
] | null | null | null | img_striper.py | tacensi/image_striper | d361c5c4b7e9b8588b50d8f992b90d14fd64d4f0 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import argparse
import textwrap
import math
from PIL import Image
parser = argparse.ArgumentParser(
prog='img_striper.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
Image striper
This is a simple program to make stripes out of images
and join them together again. It was inspired by this great
video: https://www.instagram.com/p/BhZU4XMgdYA/
This script follows the WTFPL, so go ahead and do whatever
the fuck you want with it.
'''),
epilog=textwrap.dedent('''\
This is just a simple exercise.
Please don't hate me for my noobiness.
'''))
parser.add_argument('--i',
'-input',
help='File to be shifted',
type=argparse.FileType('rb', 0),
required=True
)
parser.add_argument('--o',
'-output',
help='Image to be saved',
type=argparse.FileType('wb', 0),
required=True
)
args = parser.parse_args()
# open image and create new one
original_doggo = Image.open(args.i)
original_w, original_h = original_doggo.size
inter_w = int(math.floor(original_w / 2))
inter_h = original_h * 2
inter_doggo = Image.new('RGB', [inter_w, inter_h], 'white')
# calculate the number of strips
no_strips = int(math.floor(original_w / 15))
for n in range(0, no_strips):
# calculate xs from the cropped strip
x1 = n * 15
x2 = x1 + 15
# create crop box
crop_box = (x1, 0, x2, original_h)
# cropped section
section = original_doggo.crop(crop_box)
y1 = 0
# calculate xs for the placement of the paste
if n % 2:
y1 = original_h
y2 = y1 + original_h
x3 = 15 * int(math.floor(n / 2))
x4 = x3 + 15
paste_box = (x3, y1, x4, y2)
inter_doggo.paste(section, paste_box)
original_w, original_h = inter_doggo.size
new_h = int(math.floor(inter_h / 2))
new_w = inter_w * 2
new_doggo = Image.new('RGB', [new_w, new_h], 'white')
# calculate the number of strips
no_strips = int(math.floor(inter_h / 15))
for n in range(0, no_strips):
# calculate xs from the cropped strip
y1 = n * 15
y2 = y1 + 15
# create crop box
crop_box = (0, y1, inter_w, y2)
# cropped section
section = inter_doggo.crop(crop_box)
x1 = 0
# calculate xs for the placement of the paste
if n % 2:
x1 = inter_w
x2 = x1 + inter_w
y3 = 15 * int(math.floor(n / 2))
y4 = y3 + 15
paste_box = (x1, y3, x2, y4)
new_doggo.paste(section, paste_box)
new_doggo.save(args.o)
# print(original_w, original_h)
# parser.print_help()
| 25.472222 | 67 | 0.607779 |
42dcb97d77131e74ecfe71c62c27b3b22cca853a | 7,590 | py | Python | ds/web/views.py | brainmorsel/python-dhcp-sprout | c8da1b19558e404fdfef24304e1996c696fc13b1 | [
"MIT"
] | null | null | null | ds/web/views.py | brainmorsel/python-dhcp-sprout | c8da1b19558e404fdfef24304e1996c696fc13b1 | [
"MIT"
] | 1 | 2019-05-03T07:54:57.000Z | 2019-05-03T07:54:57.000Z | ds/web/views.py | brainmorsel/python-dhcp-sprout | c8da1b19558e404fdfef24304e1996c696fc13b1 | [
"MIT"
] | null | null | null | import datetime
from aiohttp import web
from aiohttp_jinja2 import template
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import psycopg2
from ds import db
from . import forms
@template('index.jinja2')
async def index(request):
return {}
@template('profile_list.jinja2')
async def profile_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.profile, 'ips_used',
(sa.func.broadcast(db.profile.c.network_addr) - db.profile.c.network_addr - 2).label('ips_total')
]).
select_from(
db.profile.
join(sa.select([
db.owner.c.profile_id, sa.func.count(db.owner.c.id).label('ips_used')
]).group_by(db.owner.c.profile_id).alias('cnts'))
).
order_by(db.profile.c.name)
)).fetchall()
return {'items': items}
def _cast_str_to_inet_arr(ip_list_str):
return sa.cast(map(str, forms.str_to_ip_list(ip_list_str)), pg.ARRAY(pg.INET))
@template('profile_edit.jinja2')
async def profile_edit(request):
tbl = db.profile
item_id = request.match_info.get('id')
await request.post()
async with request.app.db.acquire() as conn:
async with conn.begin():
item = await (await conn.execute(
tbl.select().where(tbl.c.id == item_id)
)).fetchone()
form = forms.ProfileEditForm(await request.post(), item)
if request.method == 'POST' and form.validate():
params = db.fit_params_dict(form.data, tbl.c.keys())
print(params['dns_ips'])
params['dns_ips'] = _cast_str_to_inet_arr(params['dns_ips'])
params['ntp_ips'] = _cast_str_to_inet_arr(params['ntp_ips'])
if item_id is None:
await conn.execute(tbl.insert().values(params))
else:
await conn.execute(
tbl.update().values(params).where(tbl.c.id == item_id)
)
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'RELOAD_PROFILE {}'.format(item_id))])
)
return web.HTTPFound('/profile/')
return {'form': form}
async def profile_delete(request):
tbl = db.profile
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
return web.HTTPFound('/profile/')
@template('staging_list.jinja2')
async def staging_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.ip_addr == None).
order_by(sa.desc(db.owner.c.create_date))
)).fetchall()
return {'items': items}
async def staging_assign_ip(request):
item_id = int(request.match_info.get('id'))
async with request.app.db.acquire() as conn:
async with conn.begin():
profile_id = await conn.scalar(
sa.select([db.owner.c.profile_id]).where(db.owner.c.id == item_id)
)
gen = sa.select([
(sa.cast('0.0.0.0', pg.INET) + sa.func.generate_series(
sa.cast(db.profile.c.network_addr, pg.INET) - '0.0.0.0' + 1,
sa.func.broadcast(db.profile.c.network_addr) - '0.0.0.0' - 1
)).label('ip_addr')
]).\
select_from(db.profile.join(db.owner)). \
where(db.profile.c.id == profile_id)
sel = sa.select([db.owner.c.ip_addr]). \
where(db.owner.c.profile_id == profile_id). \
where(db.owner.c.ip_addr != None)
ip_addr = gen.except_(sel).order_by('ip_addr').limit(1)
await conn.execute(
db.owner.update().values(
ip_addr=ip_addr,
modify_date=sa.func.now()
).
where(db.owner.c.id == item_id)
)
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'RELOAD_ITEM {}'.format(item_id))])
)
if 'edit' in request.rel_url.query:
return web.HTTPFound('/assigned/{}/edit?redirect=/staging/'.format(item_id))
return web.HTTPFound('/staging/')
async def staging_delete(request):
tbl = db.owner
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
async with conn.begin():
mac_addr = await conn.scalar(
sa.select([tbl.c.mac_addr]).
where(tbl.c.id == item_id)
)
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'REMOVE_STAGING {}'.format(mac_addr))])
)
return web.HTTPFound('/staging/')
@template('assigned_list.jinja2')
async def assigned_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.ip_addr != None).
order_by(sa.desc(db.owner.c.lease_date))
)).fetchall()
return {'items': items}
@template('assigned_edit.jinja2')
async def assigned_edit(request):
item_id = request.match_info.get('id')
await request.post()
async with request.app.db.acquire() as conn:
item = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.id == item_id)
)).fetchone()
form = forms.AssignedItemEditForm(await request.post(), item)
if request.method == 'POST' and form.validate():
params = db.fit_params_dict(form.data, db.owner.c.keys())
await conn.execute(
db.owner.update().values(params).where(db.owner.c.id == item_id)
)
if 'redirect' in request.rel_url.query:
return web.HTTPFound(request.rel_url.query['redirect'])
return web.HTTPFound('/assigned/')
return {'item': item, 'form': form}
async def assigned_delete(request):
tbl = db.owner
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
async with conn.begin():
mac_addr = await conn.scalar(
sa.select([tbl.c.mac_addr]).
where(tbl.c.id == item_id)
)
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'REMOVE_ACTIVE {}'.format(mac_addr))])
)
return web.HTTPFound('/assigned/')
| 35.633803 | 113 | 0.548221 |
42dd96f3e39ccca7beaece3559fe4cb2d0d639e4 | 488 | py | Python | django_fastapi/api/migrations/0008_test1.py | ehddn5252/FastAPI_Django | a179aedb62c28d1700578882e681002a61576060 | [
"MIT"
] | null | null | null | django_fastapi/api/migrations/0008_test1.py | ehddn5252/FastAPI_Django | a179aedb62c28d1700578882e681002a61576060 | [
"MIT"
] | null | null | null | django_fastapi/api/migrations/0008_test1.py | ehddn5252/FastAPI_Django | a179aedb62c28d1700578882e681002a61576060 | [
"MIT"
] | 1 | 2021-11-26T08:22:57.000Z | 2021-11-26T08:22:57.000Z | # Generated by Django 3.2.9 on 2021-11-19 15:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0007_alter_login_owner'),
]
operations = [
migrations.CreateModel(
name='Test1',
fields=[
('test1', models.CharField(max_length=50, primary_key=True, serialize=False)),
('test2', models.CharField(max_length=50)),
],
),
]
| 23.238095 | 94 | 0.567623 |
42ddb2012f057cfc1204b7ac846fc3824c272f11 | 47,110 | py | Python | tb_paddle/proto/api_pb2.py | GT-AcerZhang/tb-paddle | a129520339f4d4e7a9bed05feb733f2565673960 | [
"MIT"
] | null | null | null | tb_paddle/proto/api_pb2.py | GT-AcerZhang/tb-paddle | a129520339f4d4e7a9bed05feb733f2565673960 | [
"MIT"
] | null | null | null | tb_paddle/proto/api_pb2.py | GT-AcerZhang/tb-paddle | a129520339f4d4e7a9bed05feb733f2565673960 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tb_paddle/proto/api.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tb_paddle/proto/api.proto',
package='tb_paddle',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x19tb_paddle/proto/api.proto\x12\ttb_paddle\x1a\x1cgoogle/protobuf/struct.proto\"\xb2\x01\n\nExperiment\x12\x0c\n\x04name\x18\x06 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\x12\x19\n\x11time_created_secs\x18\x03 \x01(\x01\x12+\n\x0chparam_infos\x18\x04 \x03(\x0b\x32\x15.tb_paddle.HParamInfo\x12+\n\x0cmetric_infos\x18\x05 \x03(\x0b\x32\x15.tb_paddle.MetricInfo\"\xdb\x01\n\nHParamInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12#\n\x04type\x18\x04 \x01(\x0e\x32\x15.tb_paddle.H_DataType\x12\x35\n\x0f\x64omain_discrete\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x00\x12.\n\x0f\x64omain_interval\x18\x06 \x01(\x0b\x32\x13.tb_paddle.IntervalH\x00\x42\x08\n\x06\x64omain\"0\n\x08Interval\x12\x11\n\tmin_value\x18\x01 \x01(\x01\x12\x11\n\tmax_value\x18\x02 \x01(\x01\"(\n\nMetricName\x12\r\n\x05group\x18\x01 \x01(\t\x12\x0b\n\x03tag\x18\x02 \x01(\t\"\x8a\x01\n\nMetricInfo\x12#\n\x04name\x18\x01 \x01(\x0b\x32\x15.tb_paddle.MetricName\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\x12,\n\x0c\x64\x61taset_type\x18\x05 \x01(\x0e\x32\x16.tb_paddle.DatasetType\"\x85\x02\n\x0cSessionGroup\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x07hparams\x18\x02 \x03(\x0b\x32$.tb_paddle.SessionGroup.HparamsEntry\x12-\n\rmetric_values\x18\x03 \x03(\x0b\x32\x16.tb_paddle.MetricValue\x12$\n\x08sessions\x18\x04 \x03(\x0b\x32\x12.tb_paddle.Session\x12\x13\n\x0bmonitor_url\x18\x05 \x01(\t\x1a\x46\n\x0cHparamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value:\x02\x38\x01\"p\n\x0bMetricValue\x12#\n\x04name\x18\x01 \x01(\x0b\x32\x15.tb_paddle.MetricName\x12\r\n\x05value\x18\x02 \x01(\x01\x12\x15\n\rtraining_step\x18\x03 \x01(\x05\x12\x16\n\x0ewall_time_secs\x18\x04 \x01(\x01\"\xc1\x01\n\x07Session\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fstart_time_secs\x18\x02 \x01(\x01\x12\x15\n\rend_time_secs\x18\x03 \x01(\x01\x12!\n\x06status\x18\x04 \x01(\x0e\x32\x11.tb_paddle.Status\x12\x11\n\tmodel_uri\x18\x05 \x01(\t\x12-\n\rmetric_values\x18\x06 \x03(\x0b\x32\x16.tb_paddle.MetricValue\x12\x13\n\x0bmonitor_url\x18\x07 \x01(\t\"/\n\x14GetExperimentRequest\x12\x17\n\x0f\x65xperiment_name\x18\x01 \x01(\t\"\x9c\x02\n\x18ListSessionGroupsRequest\x12\x17\n\x0f\x65xperiment_name\x18\x06 \x01(\t\x12+\n\x10\x61llowed_statuses\x18\x07 \x03(\x0e\x32\x11.tb_paddle.Status\x12(\n\ncol_params\x18\x01 \x03(\x0b\x32\x14.tb_paddle.ColParams\x12\x34\n\x10\x61ggregation_type\x18\x02 \x01(\x0e\x32\x1a.tb_paddle.AggregationType\x12\x31\n\x12\x61ggregation_metric\x18\x03 \x01(\x0b\x32\x15.tb_paddle.MetricName\x12\x13\n\x0bstart_index\x18\x04 \x01(\x05\x12\x12\n\nslice_size\x18\x05 \x01(\x05\"\xbb\x02\n\tColParams\x12\'\n\x06metric\x18\x01 \x01(\x0b\x32\x15.tb_paddle.MetricNameH\x00\x12\x10\n\x06hparam\x18\x02 \x01(\tH\x00\x12#\n\x05order\x18\x03 \x01(\x0e\x32\x14.tb_paddle.SortOrder\x12\x1c\n\x14missing_values_first\x18\x04 \x01(\x08\x12\x17\n\rfilter_regexp\x18\x05 \x01(\tH\x01\x12.\n\x0f\x66ilter_interval\x18\x06 \x01(\x0b\x32\x13.tb_paddle.IntervalH\x01\x12\x35\n\x0f\x66ilter_discrete\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.ListValueH\x01\x12\x1e\n\x16\x65xclude_missing_values\x18\x08 \x01(\x08\x42\x06\n\x04nameB\x08\n\x06\x66ilter\"`\n\x19ListSessionGroupsResponse\x12/\n\x0esession_groups\x18\x01 \x03(\x0b\x32\x17.tb_paddle.SessionGroup\x12\x12\n\ntotal_size\x18\x03 \x01(\x05\"s\n\x16ListMetricEvalsRequest\x12\x17\n\x0f\x65xperiment_name\x18\x03 \x01(\t\x12\x14\n\x0csession_name\x18\x01 \x01(\t\x12*\n\x0bmetric_name\x18\x02 \x01(\x0b\x32\x15.tb_paddle.MetricName*b\n\nH_DataType\x12\x13\n\x0f\x44\x41TA_TYPE_UNSET\x10\x00\x12\x14\n\x10\x44\x41TA_TYPE_STRING\x10\x01\x12\x12\n\x0e\x44\x41TA_TYPE_BOOL\x10\x02\x12\x15\n\x11\x44\x41TA_TYPE_FLOAT64\x10\x03*P\n\x0b\x44\x61tasetType\x12\x13\n\x0f\x44\x41TASET_UNKNOWN\x10\x00\x12\x14\n\x10\x44\x41TASET_TRAINING\x10\x01\x12\x16\n\x12\x44\x41TASET_VALIDATION\x10\x02*X\n\x06Status\x12\x12\n\x0eSTATUS_UNKNOWN\x10\x00\x12\x12\n\x0eSTATUS_SUCCESS\x10\x01\x12\x12\n\x0eSTATUS_FAILURE\x10\x02\x12\x12\n\x0eSTATUS_RUNNING\x10\x03*A\n\tSortOrder\x12\x15\n\x11ORDER_UNSPECIFIED\x10\x00\x12\r\n\tORDER_ASC\x10\x01\x12\x0e\n\nORDER_DESC\x10\x02*\x7f\n\x0f\x41ggregationType\x12\x15\n\x11\x41GGREGATION_UNSET\x10\x00\x12\x13\n\x0f\x41GGREGATION_AVG\x10\x01\x12\x16\n\x12\x41GGREGATION_MEDIAN\x10\x02\x12\x13\n\x0f\x41GGREGATION_MIN\x10\x03\x12\x13\n\x0f\x41GGREGATION_MAX\x10\x04\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_H_DATATYPE = _descriptor.EnumDescriptor(
name='H_DataType',
full_name='tb_paddle.H_DataType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DATA_TYPE_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_TYPE_STRING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_TYPE_BOOL', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_TYPE_FLOAT64', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2149,
serialized_end=2247,
)
_sym_db.RegisterEnumDescriptor(_H_DATATYPE)
H_DataType = enum_type_wrapper.EnumTypeWrapper(_H_DATATYPE)
_DATASETTYPE = _descriptor.EnumDescriptor(
name='DatasetType',
full_name='tb_paddle.DatasetType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DATASET_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_TRAINING', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATASET_VALIDATION', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2249,
serialized_end=2329,
)
_sym_db.RegisterEnumDescriptor(_DATASETTYPE)
DatasetType = enum_type_wrapper.EnumTypeWrapper(_DATASETTYPE)
_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='tb_paddle.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNKNOWN', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATUS_SUCCESS', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATUS_FAILURE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STATUS_RUNNING', index=3, number=3,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2331,
serialized_end=2419,
)
_sym_db.RegisterEnumDescriptor(_STATUS)
Status = enum_type_wrapper.EnumTypeWrapper(_STATUS)
_SORTORDER = _descriptor.EnumDescriptor(
name='SortOrder',
full_name='tb_paddle.SortOrder',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ORDER_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ORDER_ASC', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ORDER_DESC', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2421,
serialized_end=2486,
)
_sym_db.RegisterEnumDescriptor(_SORTORDER)
SortOrder = enum_type_wrapper.EnumTypeWrapper(_SORTORDER)
_AGGREGATIONTYPE = _descriptor.EnumDescriptor(
name='AggregationType',
full_name='tb_paddle.AggregationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='AGGREGATION_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AGGREGATION_AVG', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AGGREGATION_MEDIAN', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AGGREGATION_MIN', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AGGREGATION_MAX', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=2488,
serialized_end=2615,
)
_sym_db.RegisterEnumDescriptor(_AGGREGATIONTYPE)
AggregationType = enum_type_wrapper.EnumTypeWrapper(_AGGREGATIONTYPE)
DATA_TYPE_UNSET = 0
DATA_TYPE_STRING = 1
DATA_TYPE_BOOL = 2
DATA_TYPE_FLOAT64 = 3
DATASET_UNKNOWN = 0
DATASET_TRAINING = 1
DATASET_VALIDATION = 2
STATUS_UNKNOWN = 0
STATUS_SUCCESS = 1
STATUS_FAILURE = 2
STATUS_RUNNING = 3
ORDER_UNSPECIFIED = 0
ORDER_ASC = 1
ORDER_DESC = 2
AGGREGATION_UNSET = 0
AGGREGATION_AVG = 1
AGGREGATION_MEDIAN = 2
AGGREGATION_MIN = 3
AGGREGATION_MAX = 4
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='tb_paddle.Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tb_paddle.Experiment.name', index=0,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='tb_paddle.Experiment.description', index=1,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='user', full_name='tb_paddle.Experiment.user', index=2,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='time_created_secs', full_name='tb_paddle.Experiment.time_created_secs', index=3,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hparam_infos', full_name='tb_paddle.Experiment.hparam_infos', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metric_infos', full_name='tb_paddle.Experiment.metric_infos', index=5,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=71,
serialized_end=249,
)
_HPARAMINFO = _descriptor.Descriptor(
name='HParamInfo',
full_name='tb_paddle.HParamInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tb_paddle.HParamInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_name', full_name='tb_paddle.HParamInfo.display_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='tb_paddle.HParamInfo.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tb_paddle.HParamInfo.type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='domain_discrete', full_name='tb_paddle.HParamInfo.domain_discrete', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='domain_interval', full_name='tb_paddle.HParamInfo.domain_interval', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='domain', full_name='tb_paddle.HParamInfo.domain',
index=0, containing_type=None, fields=[]),
],
serialized_start=252,
serialized_end=471,
)
_INTERVAL = _descriptor.Descriptor(
name='Interval',
full_name='tb_paddle.Interval',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_value', full_name='tb_paddle.Interval.min_value', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_value', full_name='tb_paddle.Interval.max_value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=473,
serialized_end=521,
)
_METRICNAME = _descriptor.Descriptor(
name='MetricName',
full_name='tb_paddle.MetricName',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='group', full_name='tb_paddle.MetricName.group', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='tb_paddle.MetricName.tag', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=523,
serialized_end=563,
)
_METRICINFO = _descriptor.Descriptor(
name='MetricInfo',
full_name='tb_paddle.MetricInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tb_paddle.MetricInfo.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='display_name', full_name='tb_paddle.MetricInfo.display_name', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='tb_paddle.MetricInfo.description', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_type', full_name='tb_paddle.MetricInfo.dataset_type', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=566,
serialized_end=704,
)
_SESSIONGROUP_HPARAMSENTRY = _descriptor.Descriptor(
name='HparamsEntry',
full_name='tb_paddle.SessionGroup.HparamsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tb_paddle.SessionGroup.HparamsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tb_paddle.SessionGroup.HparamsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=898,
serialized_end=968,
)
_SESSIONGROUP = _descriptor.Descriptor(
name='SessionGroup',
full_name='tb_paddle.SessionGroup',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tb_paddle.SessionGroup.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hparams', full_name='tb_paddle.SessionGroup.hparams', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metric_values', full_name='tb_paddle.SessionGroup.metric_values', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sessions', full_name='tb_paddle.SessionGroup.sessions', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='monitor_url', full_name='tb_paddle.SessionGroup.monitor_url', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SESSIONGROUP_HPARAMSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=707,
serialized_end=968,
)
_METRICVALUE = _descriptor.Descriptor(
name='MetricValue',
full_name='tb_paddle.MetricValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tb_paddle.MetricValue.name', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tb_paddle.MetricValue.value', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_step', full_name='tb_paddle.MetricValue.training_step', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='wall_time_secs', full_name='tb_paddle.MetricValue.wall_time_secs', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=970,
serialized_end=1082,
)
_SESSION = _descriptor.Descriptor(
name='Session',
full_name='tb_paddle.Session',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tb_paddle.Session.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_time_secs', full_name='tb_paddle.Session.start_time_secs', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end_time_secs', full_name='tb_paddle.Session.end_time_secs', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='tb_paddle.Session.status', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_uri', full_name='tb_paddle.Session.model_uri', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metric_values', full_name='tb_paddle.Session.metric_values', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='monitor_url', full_name='tb_paddle.Session.monitor_url', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1085,
serialized_end=1278,
)
_GETEXPERIMENTREQUEST = _descriptor.Descriptor(
name='GetExperimentRequest',
full_name='tb_paddle.GetExperimentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='experiment_name', full_name='tb_paddle.GetExperimentRequest.experiment_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1280,
serialized_end=1327,
)
_LISTSESSIONGROUPSREQUEST = _descriptor.Descriptor(
name='ListSessionGroupsRequest',
full_name='tb_paddle.ListSessionGroupsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='experiment_name', full_name='tb_paddle.ListSessionGroupsRequest.experiment_name', index=0,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowed_statuses', full_name='tb_paddle.ListSessionGroupsRequest.allowed_statuses', index=1,
number=7, type=14, cpp_type=8, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='col_params', full_name='tb_paddle.ListSessionGroupsRequest.col_params', index=2,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aggregation_type', full_name='tb_paddle.ListSessionGroupsRequest.aggregation_type', index=3,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aggregation_metric', full_name='tb_paddle.ListSessionGroupsRequest.aggregation_metric', index=4,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_index', full_name='tb_paddle.ListSessionGroupsRequest.start_index', index=5,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='slice_size', full_name='tb_paddle.ListSessionGroupsRequest.slice_size', index=6,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1330,
serialized_end=1614,
)
_COLPARAMS = _descriptor.Descriptor(
name='ColParams',
full_name='tb_paddle.ColParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='metric', full_name='tb_paddle.ColParams.metric', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hparam', full_name='tb_paddle.ColParams.hparam', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='order', full_name='tb_paddle.ColParams.order', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='missing_values_first', full_name='tb_paddle.ColParams.missing_values_first', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter_regexp', full_name='tb_paddle.ColParams.filter_regexp', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter_interval', full_name='tb_paddle.ColParams.filter_interval', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter_discrete', full_name='tb_paddle.ColParams.filter_discrete', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exclude_missing_values', full_name='tb_paddle.ColParams.exclude_missing_values', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='name', full_name='tb_paddle.ColParams.name',
index=0, containing_type=None, fields=[]),
_descriptor.OneofDescriptor(
name='filter', full_name='tb_paddle.ColParams.filter',
index=1, containing_type=None, fields=[]),
],
serialized_start=1617,
serialized_end=1932,
)
_LISTSESSIONGROUPSRESPONSE = _descriptor.Descriptor(
name='ListSessionGroupsResponse',
full_name='tb_paddle.ListSessionGroupsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='session_groups', full_name='tb_paddle.ListSessionGroupsResponse.session_groups', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_size', full_name='tb_paddle.ListSessionGroupsResponse.total_size', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1934,
serialized_end=2030,
)
_LISTMETRICEVALSREQUEST = _descriptor.Descriptor(
name='ListMetricEvalsRequest',
full_name='tb_paddle.ListMetricEvalsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='experiment_name', full_name='tb_paddle.ListMetricEvalsRequest.experiment_name', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='session_name', full_name='tb_paddle.ListMetricEvalsRequest.session_name', index=1,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metric_name', full_name='tb_paddle.ListMetricEvalsRequest.metric_name', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2032,
serialized_end=2147,
)
_EXPERIMENT.fields_by_name['hparam_infos'].message_type = _HPARAMINFO
_EXPERIMENT.fields_by_name['metric_infos'].message_type = _METRICINFO
_HPARAMINFO.fields_by_name['type'].enum_type = _H_DATATYPE
_HPARAMINFO.fields_by_name['domain_discrete'].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE
_HPARAMINFO.fields_by_name['domain_interval'].message_type = _INTERVAL
_HPARAMINFO.oneofs_by_name['domain'].fields.append(
_HPARAMINFO.fields_by_name['domain_discrete'])
_HPARAMINFO.fields_by_name['domain_discrete'].containing_oneof = _HPARAMINFO.oneofs_by_name['domain']
_HPARAMINFO.oneofs_by_name['domain'].fields.append(
_HPARAMINFO.fields_by_name['domain_interval'])
_HPARAMINFO.fields_by_name['domain_interval'].containing_oneof = _HPARAMINFO.oneofs_by_name['domain']
_METRICINFO.fields_by_name['name'].message_type = _METRICNAME
_METRICINFO.fields_by_name['dataset_type'].enum_type = _DATASETTYPE
_SESSIONGROUP_HPARAMSENTRY.fields_by_name['value'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_SESSIONGROUP_HPARAMSENTRY.containing_type = _SESSIONGROUP
_SESSIONGROUP.fields_by_name['hparams'].message_type = _SESSIONGROUP_HPARAMSENTRY
_SESSIONGROUP.fields_by_name['metric_values'].message_type = _METRICVALUE
_SESSIONGROUP.fields_by_name['sessions'].message_type = _SESSION
_METRICVALUE.fields_by_name['name'].message_type = _METRICNAME
_SESSION.fields_by_name['status'].enum_type = _STATUS
_SESSION.fields_by_name['metric_values'].message_type = _METRICVALUE
_LISTSESSIONGROUPSREQUEST.fields_by_name['allowed_statuses'].enum_type = _STATUS
_LISTSESSIONGROUPSREQUEST.fields_by_name['col_params'].message_type = _COLPARAMS
_LISTSESSIONGROUPSREQUEST.fields_by_name['aggregation_type'].enum_type = _AGGREGATIONTYPE
_LISTSESSIONGROUPSREQUEST.fields_by_name['aggregation_metric'].message_type = _METRICNAME
_COLPARAMS.fields_by_name['metric'].message_type = _METRICNAME
_COLPARAMS.fields_by_name['order'].enum_type = _SORTORDER
_COLPARAMS.fields_by_name['filter_interval'].message_type = _INTERVAL
_COLPARAMS.fields_by_name['filter_discrete'].message_type = google_dot_protobuf_dot_struct__pb2._LISTVALUE
_COLPARAMS.oneofs_by_name['name'].fields.append(
_COLPARAMS.fields_by_name['metric'])
_COLPARAMS.fields_by_name['metric'].containing_oneof = _COLPARAMS.oneofs_by_name['name']
_COLPARAMS.oneofs_by_name['name'].fields.append(
_COLPARAMS.fields_by_name['hparam'])
_COLPARAMS.fields_by_name['hparam'].containing_oneof = _COLPARAMS.oneofs_by_name['name']
_COLPARAMS.oneofs_by_name['filter'].fields.append(
_COLPARAMS.fields_by_name['filter_regexp'])
_COLPARAMS.fields_by_name['filter_regexp'].containing_oneof = _COLPARAMS.oneofs_by_name['filter']
_COLPARAMS.oneofs_by_name['filter'].fields.append(
_COLPARAMS.fields_by_name['filter_interval'])
_COLPARAMS.fields_by_name['filter_interval'].containing_oneof = _COLPARAMS.oneofs_by_name['filter']
_COLPARAMS.oneofs_by_name['filter'].fields.append(
_COLPARAMS.fields_by_name['filter_discrete'])
_COLPARAMS.fields_by_name['filter_discrete'].containing_oneof = _COLPARAMS.oneofs_by_name['filter']
_LISTSESSIONGROUPSRESPONSE.fields_by_name['session_groups'].message_type = _SESSIONGROUP
_LISTMETRICEVALSREQUEST.fields_by_name['metric_name'].message_type = _METRICNAME
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
DESCRIPTOR.message_types_by_name['HParamInfo'] = _HPARAMINFO
DESCRIPTOR.message_types_by_name['Interval'] = _INTERVAL
DESCRIPTOR.message_types_by_name['MetricName'] = _METRICNAME
DESCRIPTOR.message_types_by_name['MetricInfo'] = _METRICINFO
DESCRIPTOR.message_types_by_name['SessionGroup'] = _SESSIONGROUP
DESCRIPTOR.message_types_by_name['MetricValue'] = _METRICVALUE
DESCRIPTOR.message_types_by_name['Session'] = _SESSION
DESCRIPTOR.message_types_by_name['GetExperimentRequest'] = _GETEXPERIMENTREQUEST
DESCRIPTOR.message_types_by_name['ListSessionGroupsRequest'] = _LISTSESSIONGROUPSREQUEST
DESCRIPTOR.message_types_by_name['ColParams'] = _COLPARAMS
DESCRIPTOR.message_types_by_name['ListSessionGroupsResponse'] = _LISTSESSIONGROUPSRESPONSE
DESCRIPTOR.message_types_by_name['ListMetricEvalsRequest'] = _LISTMETRICEVALSREQUEST
DESCRIPTOR.enum_types_by_name['H_DataType'] = _H_DATATYPE
DESCRIPTOR.enum_types_by_name['DatasetType'] = _DATASETTYPE
DESCRIPTOR.enum_types_by_name['Status'] = _STATUS
DESCRIPTOR.enum_types_by_name['SortOrder'] = _SORTORDER
DESCRIPTOR.enum_types_by_name['AggregationType'] = _AGGREGATIONTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), {
'DESCRIPTOR' : _EXPERIMENT,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.Experiment)
})
_sym_db.RegisterMessage(Experiment)
HParamInfo = _reflection.GeneratedProtocolMessageType('HParamInfo', (_message.Message,), {
'DESCRIPTOR' : _HPARAMINFO,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.HParamInfo)
})
_sym_db.RegisterMessage(HParamInfo)
Interval = _reflection.GeneratedProtocolMessageType('Interval', (_message.Message,), {
'DESCRIPTOR' : _INTERVAL,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.Interval)
})
_sym_db.RegisterMessage(Interval)
MetricName = _reflection.GeneratedProtocolMessageType('MetricName', (_message.Message,), {
'DESCRIPTOR' : _METRICNAME,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.MetricName)
})
_sym_db.RegisterMessage(MetricName)
MetricInfo = _reflection.GeneratedProtocolMessageType('MetricInfo', (_message.Message,), {
'DESCRIPTOR' : _METRICINFO,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.MetricInfo)
})
_sym_db.RegisterMessage(MetricInfo)
SessionGroup = _reflection.GeneratedProtocolMessageType('SessionGroup', (_message.Message,), {
'HparamsEntry' : _reflection.GeneratedProtocolMessageType('HparamsEntry', (_message.Message,), {
'DESCRIPTOR' : _SESSIONGROUP_HPARAMSENTRY,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.SessionGroup.HparamsEntry)
})
,
'DESCRIPTOR' : _SESSIONGROUP,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.SessionGroup)
})
_sym_db.RegisterMessage(SessionGroup)
_sym_db.RegisterMessage(SessionGroup.HparamsEntry)
MetricValue = _reflection.GeneratedProtocolMessageType('MetricValue', (_message.Message,), {
'DESCRIPTOR' : _METRICVALUE,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.MetricValue)
})
_sym_db.RegisterMessage(MetricValue)
Session = _reflection.GeneratedProtocolMessageType('Session', (_message.Message,), {
'DESCRIPTOR' : _SESSION,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.Session)
})
_sym_db.RegisterMessage(Session)
GetExperimentRequest = _reflection.GeneratedProtocolMessageType('GetExperimentRequest', (_message.Message,), {
'DESCRIPTOR' : _GETEXPERIMENTREQUEST,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.GetExperimentRequest)
})
_sym_db.RegisterMessage(GetExperimentRequest)
ListSessionGroupsRequest = _reflection.GeneratedProtocolMessageType('ListSessionGroupsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTSESSIONGROUPSREQUEST,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.ListSessionGroupsRequest)
})
_sym_db.RegisterMessage(ListSessionGroupsRequest)
ColParams = _reflection.GeneratedProtocolMessageType('ColParams', (_message.Message,), {
'DESCRIPTOR' : _COLPARAMS,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.ColParams)
})
_sym_db.RegisterMessage(ColParams)
ListSessionGroupsResponse = _reflection.GeneratedProtocolMessageType('ListSessionGroupsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTSESSIONGROUPSRESPONSE,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.ListSessionGroupsResponse)
})
_sym_db.RegisterMessage(ListSessionGroupsResponse)
ListMetricEvalsRequest = _reflection.GeneratedProtocolMessageType('ListMetricEvalsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTMETRICEVALSREQUEST,
'__module__' : 'tb_paddle.proto.api_pb2'
# @@protoc_insertion_point(class_scope:tb_paddle.ListMetricEvalsRequest)
})
_sym_db.RegisterMessage(ListMetricEvalsRequest)
_SESSIONGROUP_HPARAMSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 41.912811 | 4,591 | 0.751221 |
42e0044ddc8db8684b032fa92b309e589628c115 | 6,123 | py | Python | etsyapi/__init__.py | DempDemp/etsyapi | 995250d2f76dcac7edf3b2404bfbce1df732765e | [
"BSD-3-Clause"
] | 1 | 2021-02-19T01:45:49.000Z | 2021-02-19T01:45:49.000Z | etsyapi/__init__.py | DempDemp/etsyapi | 995250d2f76dcac7edf3b2404bfbce1df732765e | [
"BSD-3-Clause"
] | null | null | null | etsyapi/__init__.py | DempDemp/etsyapi | 995250d2f76dcac7edf3b2404bfbce1df732765e | [
"BSD-3-Clause"
] | 2 | 2016-04-10T21:28:05.000Z | 2019-09-20T19:51:37.000Z | import six
import json
import logging
import requests
from requests_oauthlib import OAuth1
if six.PY3:
from urllib.parse import parse_qs
from urllib.parse import urlencode
else:
from urlparse import parse_qs
from urllib import urlencode
log = logging.getLogger(__name__)
class EtsyError(Exception):
def __init__(self, message, response):
super(EtsyError, self).__init__(message)
self.response = response
class Etsy(object):
"""
Represents the etsy API
"""
url_base = "https://openapi.etsy.com/v2"
def __init__(self, consumer_key, consumer_secret, oauth_token=None, oauth_token_secret=None, sandbox=False):
self.params = {'api_key': consumer_key}
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
if sandbox:
self.url_base = "http://sandbox.openapi.etsy.com/v2"
# generic authenticated oauth hook
self.simple_oauth = OAuth1(consumer_key, client_secret=consumer_secret)
if oauth_token and oauth_token_secret:
# full oauth hook for an authenticated user
self.full_oauth = OAuth1(consumer_key, client_secret=consumer_secret,
resource_owner_key=oauth_token, resource_owner_secret=oauth_token_secret)
def show_listings(self, color=None, color_wiggle=5):
"""
Show all listings on the site.
color should be a RGB ('#00FF00') or a HSV ('360;100;100')
"""
endpoint = '/listings/active'
params = {}
if color:
params['color'] = color
params['color_accuracy'] = color_wiggle
response = self.execute(endpoint, params=params)
return response
def get_user_info(self, user):
"""
Get basic info about a user, pass in a username or a user_id
"""
endpoint = '/users/%s' % user
auth = {}
if user == '__SELF__':
auth = {'oauth': self.full_oauth}
response = self.execute(endpoint, **auth)
return response
def find_user(self, keywords):
"""
Search for a user given the
"""
endpoint = '/users'
params = {'keywords': keywords}
response = self.execute(endpoint, params=params)
return response
def get_auth_url(self, permissions=[]):
"""
Returns a url that a user is redirected to in order to authenticate with
the etsy API. This is step one in the authentication process.
oauth_token and oauth_token_secret need to be saved for step two.
"""
endpoint = '/oauth/request_token'
params = {}
if permissions:
params = {'scope': " ".join(permissions)}
self.oauth = self.simple_oauth
response = self.execute(endpoint, oauth=self.oauth, params=params)
parsed = parse_qs(response)
url = parsed['login_url'][0]
token = parsed['oauth_token'][0]
secret = parsed['oauth_token_secret'][0]
return {'oauth_token': token, 'url': url, 'oauth_token_secret': secret}
def get_auth_token(self, verifier, oauth_token, oauth_token_secret):
"""
Step two in the authentication process. oauth_token and oauth_token_secret
are the same that came from the get_auth_url function call. Returned is
the permanent oauth_token and oauth_token_secret that will be used in
every subsiquent api request that requires authentication.
"""
endpoint = '/oauth/access_token'
oauth = OAuth1(self.consumer_key, client_secret=self.consumer_secret,
resource_owner_key=oauth_token,
resource_owner_secret=oauth_token_secret,
verifier=verifier)
response = requests.post(url="%s%s" % (self.url_base, endpoint), auth=oauth)
parsed = parse_qs(response.text)
return {'oauth_token': parsed['oauth_token'][0], 'oauth_token_secret': parsed['oauth_token_secret'][0]}
def execute(self, endpoint, method='get', oauth=None, params=None, files=None, **hooks):
"""
Actually do the request, and raise exception if an error comes back.
"""
if oauth:
# making an authenticated request, add the oauth hook to the request
hooks['auth'] = oauth
if params is None:
params = {}
else:
if params is None:
params = self.params
else:
params.update(self.params)
querystring = urlencode(params)
url = "%s%s" % (self.url_base, endpoint)
if querystring:
url = "%s?%s" % (url, querystring)
response = getattr(requests, method)(url, files=files, **hooks)
if response.status_code > 201:
e = response.text
code = response.status_code
raise EtsyError('API returned %s response: %s' % (code, e), response)
try:
return json.loads(response.text)
except (TypeError, ValueError):
return response.text
def execute_authed(self, endpoint, method='get', params=None, **hooks):
return self.execute(endpoint, method, oauth=self.full_oauth, params=params, **hooks)
def iterate_pages(self, f, *p, **d):
'''
Iterates through pages in a response.
Use this method when the response is valid json and has pagination
Example:
pages = e.iterate_pages('execute_authed', '/shops/GreenTurtleTshirts/receipts',
params={'was_paid': True, 'was_shipped': False})
for page in pages:
print page
'''
f = getattr(self, f)
r = f(*p, **d)
yield r
while r['pagination']['next_page'] is not None:
if not d:
d = {}
if 'params' not in d:
d['params'] = {}
d['params']['page'] = r['pagination']['next_page']
r = f(*p, **d)
yield r
| 36.230769 | 112 | 0.594806 |
42e109eb76a25424069247c9b529582b0044ded2 | 2,996 | py | Python | SatTrack/tle.py | ed-ortizm/satellite-tracking | 9eb2b4a7f31b43035a425d8e2e51044f2e80712d | [
"MIT"
] | 2 | 2021-10-05T10:04:56.000Z | 2021-10-13T18:31:35.000Z | SatTrack/tle.py | ed-ortizm/satellite-tracking | 9eb2b4a7f31b43035a425d8e2e51044f2e80712d | [
"MIT"
] | 14 | 2021-09-01T12:30:59.000Z | 2022-02-14T18:53:44.000Z | SatTrack/tle.py | ed-ortizm/satellite-tracking | 9eb2b4a7f31b43035a425d8e2e51044f2e80712d | [
"MIT"
] | null | null | null | import datetime
import os
import re
import sys
import urllib
from SatTrack.superclasses import FileDirectory
###############################################################################
# CONSTANTS
TLE_URL = f"https://celestrak.com/NORAD/elements/supplemental"
###############################################################################
class TLE(FileDirectory):
def __init__(self, satellite_brand: str, directory: str):
"""
Handles tle files
PARAMETERS
satellite_brand: Name of satellite type, e.g, oneweb
directory: The location of the tle files
"""
self.satellite_brand = satellite_brand
self.directory = directory
###########################################################################
def download(self) -> str:
"""
Downloads the tle_file pass in the costructor from
TLE_URL = f"https://celestrak.com/NORAD/elements/supplemental"
OUTPUTS
string with name of the tle file in the format
"tle_{satellite_brand}_{time_stamp}.txt".
time_stamp -> "%Y-%m-%d %H:%M:%S"
example: "tle_oneweb_2021-10-09 16:18:16.txt"
"""
tle_query = f"{TLE_URL}/{self.satellite_brand}.txt"
time_stamp = self._get_time_stamp()
tle_file_name = f"tle_{self.satellite_brand}_{time_stamp}.txt"
super().check_directory(directory=self.directory, exit=False)
urllib.request.urlretrieve(
tle_query, f"{self.directory}/{tle_file_name}"
)
return tle_file_name
###########################################################################
def get_satellites_from_tle(self, file_location: str) -> list:
"""
Retrieves the names of satellites present in tle file.
The tle file must be stored locally.
PARAMETERS
file_location: path of the tle file
RETURNS
list with all the sattelites available in tle file
example: [oneweb-000, ...]
"""
super().file_exists(file_location, exit=True)
# oneweb -> ONEWEB
satellite = self.satellite_brand.upper()
regular_expression = f"{satellite}-[0-9]*.*\)|{satellite}.[0-9]*"
pattern = re.compile(regular_expression)
with open(f"{file_location}", "r") as tle:
content = tle.read()
satellites = pattern.findall(content)
return satellites
###########################################################################
def _get_time_stamp(self) -> str:
"""
Returns time stamp for tle file download: "2021-10-09 16:18:16"
"""
now = datetime.datetime.now(tz=datetime.timezone.utc)
time_stamp = f"{now:%Y-%m-%d %H:%M:%S}"
return time_stamp
###########################################################################
###########################################################################
| 32.215054 | 79 | 0.496996 |
42e13e620ce8965d49cd0e6e2ae37165c0735674 | 21,970 | py | Python | vinfo/dataset.py | john-hewitt/conditional-probing | bebc90aa0c910395e2370910409076a945279fe0 | [
"Apache-2.0"
] | 13 | 2021-09-21T11:07:33.000Z | 2022-03-25T08:46:46.000Z | vinfo/dataset.py | john-hewitt/conditional-probing | bebc90aa0c910395e2370910409076a945279fe0 | [
"Apache-2.0"
] | 2 | 2021-09-25T15:45:19.000Z | 2021-12-10T15:57:35.000Z | vinfo/dataset.py | john-hewitt/conditional-probing | bebc90aa0c910395e2370910409076a945279fe0 | [
"Apache-2.0"
] | 2 | 2021-09-27T01:21:49.000Z | 2021-09-28T06:08:19.000Z | import os
import h5py
import torch
import torch.nn as nn
from torch.utils.data import Dataset, IterableDataset, DataLoader
import Levenshtein as levenshtein
from tqdm import tqdm
from yaml import YAMLObject
from transformers import AutoTokenizer, AutoModel
from allennlp.modules.elmo import batch_to_ids
from utils import TRAIN_STR, DEV_STR, TEST_STR, InitYAMLObject
BATCH_SIZE = 50
"""
Classes for loading, caching, and yielding text datasets
"""
#class Dataset(Dataset, InitYAMLObject):
# """
# Base class for objects that serve batches of
# tensors. For decoration/explanation only
# """
# yaml_tag = '!Dataset'
class IterableDatasetWrapper(Dataset):#(IterableDataset):
"""
Wrapper class to pass to a DataLoader so it doesn't
think the underlying generator should have a len() fn.
But I gave up on this for various reasons so it's just
a normal dataset, here in case I try again.
"""
def __init__(self, generator):
self.generator = generator #[x for x in generator]
def __iter__(self):
return iter(self.generator)
def __len__(self):
return len(self.generator)
def __getitem__(self, idx):
return self.generator[idx]
class ListDataset(Dataset, InitYAMLObject):
"""
Container class for collecting multiple annotation or
representation datasets and a single target task dataset
, and serving all of them
"""
yaml_tag = '!ListDataset'
def __init__(self, args, data_loader, output_dataset, input_datasets):
"""
Arguments:
output_datset:
"""
self.args = args
self.input_datasets = input_datasets
self.output_dataset = output_dataset
self.data_loader = data_loader
self.train_data = None
self.dev_data = None
self.test_data = None
def get_train_dataloader(self, shuffle=True):
"""Returns a PyTorch DataLoader object with the training data
"""
if self.train_data is None:
self.train_data = list(self.load_data(TRAIN_STR))
#generator = IterableDatasetWrapper(self.load_data(TRAIN_STR))
generator = IterableDatasetWrapper(self.train_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def get_dev_dataloader(self, shuffle=False):
"""Returns a PyTorch DataLoader object with the dev data
"""
if self.dev_data is None:
self.dev_data = list(self.load_data(DEV_STR))
#generator = IterableDatasetWrapper(self.load_data(DEV_STR))
generator = IterableDatasetWrapper(self.dev_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def get_test_dataloader(self, shuffle=False):
"""Returns a PyTorch DataLoader object with the test data
"""
if self.test_data is None:
self.test_data = list(self.load_data(TEST_STR))
#generator = IterableDatasetWrapper(self.load_data(TEST_STR))
generator = IterableDatasetWrapper(self.test_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def load_data(self, split_string):
"""Loads data from disk into RAM tensors for passing to a network on GPU
Iterates through the training set once, passing each sentence to each
input Dataset and the output Dataset
"""
for sentence in tqdm(self.data_loader.yield_dataset(split_string),desc='[loading]'):
input_tensors = []
for dataset in self.input_datasets:
input_tensors.append(dataset.tensor_of_sentence(sentence, split_string))
output_tensor = self.output_dataset.tensor_of_sentence(sentence, split_string)
yield (input_tensors, output_tensor, sentence)
def collate_fn(self, observation_list):
"""
Combines observations (input_tensors, output_tensor, sentence) tuples
input_tensors is of the form ((annotation, alignment), ..., (annotation, alignment))
output_tensor is of the form (annotation, alignment),
to batches of observations ((batches_input_1, batches_input_2), batches_output, sentences)
"""
sentences = (x[2] for x in observation_list)
max_corpus_token_len = max((len(x) for x in sentences))
input_annotation_tensors = []
input_alignment_tensors = []
input_tensor_count = len(observation_list[0][0])
for input_tensor_index in range(input_tensor_count):
max_annotation_token_len = max([x[0][input_tensor_index][0].shape[0] for x in observation_list])
intermediate_annotation_list = []
intermediate_alignment_list = []
for input_annotation, input_alignment in ((x[0][input_tensor_index][0],
x[0][input_tensor_index][1]) for x in observation_list):
if len(input_annotation.shape) == 1: # word-level ids
new_annotation_tensor = torch.zeros(max_annotation_token_len, dtype=torch.long)
new_annotation_tensor[:len(input_annotation)] = input_annotation
elif len(input_annotation.shape) == 2: # characeter-level ids
new_annotation_tensor = torch.zeros(max_annotation_token_len, input_annotation.shape[1]).long()
new_annotation_tensor[:len(input_annotation),:] = input_annotation
intermediate_annotation_list.append(new_annotation_tensor)
new_alignment_tensor = torch.zeros(max_annotation_token_len, max_corpus_token_len)
new_alignment_tensor[:input_alignment.shape[0], :input_alignment.shape[1]] = input_alignment
intermediate_alignment_list.append(new_alignment_tensor)
input_annotation_tensors.append(torch.stack(intermediate_annotation_list).to(self.args['device']))
input_alignment_tensors.append(torch.stack(intermediate_alignment_list).to(self.args['device']))
intermediate_annotation_list = []
intermediate_alignment_list = []
max_output_annotation_len = max([x[1][0].shape[0] for x in observation_list])
for output_annotation, output_alignment in (x[1] for x in observation_list):
new_annotation_tensor = torch.zeros(max_output_annotation_len, dtype=torch.long)
new_annotation_tensor[:len(output_annotation)] = output_annotation
intermediate_annotation_list.append(new_annotation_tensor)
output_annotation_tensor = torch.stack(intermediate_annotation_list).to(self.args['device'])
sentences = [x[2] for x in observation_list]
return ((input_annotation_tensors, input_alignment_tensors), output_annotation_tensor, sentences)
class ELMoData(InitYAMLObject):
"""
Loading and serving minibatches of tokens to input to
ELMo, as mediated by allennlp.
"""
yaml_tag = '!ELMoData'
def __init__(self, args):
self.args = args
def tensor_of_sentence(self, sentence, split_string):
"""
Provides character indices for a single sentence.
"""
words = [x[1] for x in sentence]
alignment = torch.eye(len(words))
return batch_to_ids([words])[0,:,:], alignment
#for index, token in enumerate([x[1] for x in sentence]):
class HuggingfaceData(InitYAMLObject):
"""
Loading and serving minibatches of tokens to input
to a Huggingface-loaded model.
"""
yaml_tag = '!HuggingfaceData'
def __init__(self, args, model_string, cache=None):
print('Constructing HuggingfaceData of {}'.format(model_string))
self.tokenizer = AutoTokenizer.from_pretrained(model_string) #, add_prefix_space=True)
self.args = args
self.cache = cache
self.task_name = 'hfacetokens.{}'.format(model_string)
self.cache_is_setup = False
def levenshtein_matrix(self, string1, string2):
opcodes = levenshtein.opcodes(string1, string2)
mtx = torch.zeros(len(string1), len(string2))
cumulative = 0
for opcode in opcodes:
opcode_type, str1b, str1e, str2b, str2e = opcode
if opcode_type in {'equal', 'replace'}:
diff = str1e - str1b
for i in range(diff):
mtx[str1b+i,str2b+i] = 1
if opcode_type == 'delete':
diff = str1e - str1b
for i in range(diff):
mtx[str1b+i, str2b] = 1
if opcode_type == 'insert':
diff = str2e - str2b
for i in range(diff):
mtx[str1b, str2b+i] = 1
return mtx
def token_to_character_alignment(self, tokens):
ptb_sentence_length = sum((len(tok) for tok in tokens))
ptb_string_token_alignment = []
cumulative = 0
for token in tokens:
new_alignment = torch.zeros(ptb_sentence_length)
for i, char in enumerate(token):
if char == ' ':
continue
new_alignment[i+cumulative] = 1
new_alignment = new_alignment / sum(new_alignment)
cumulative += len(token)
ptb_string_token_alignment.append(new_alignment)
return torch.stack(ptb_string_token_alignment)
def de_ptb_tokenize(self, tokens):
tokens_with_spaces = []
new_tokens_with_spaces = []
ptb_sentence_length = sum((len(tok) for tok in tokens))
token_alignments = []
cumulative = 0
for i, _ in enumerate(tokens):
token = tokens[i]
next_token = tokens[i+1] if i < len(tokens)-1 else '<EOS>'
# Handle LaTeX-style quotes
if token.strip() in {"``", "''"}:
new_token = '"'
elif token.strip() == '-LRB-':
new_token = '('
elif token.strip() == '-RRB-':
new_token = ')'
elif token.strip() == '-LSB-':
new_token = '['
elif token.strip() == '-RSB-':
new_token = ']'
elif token.strip() == '-LCB-':
new_token = '{'
elif token.strip() == '-RCB-':
new_token = '}'
else:
new_token = token
use_space = (token.strip() not in {'(', '[', '{', '"', "'", '``', "''"} and
next_token.strip() not in {"'ll", "'re", "'ve", "n't",
"'s", "'LL", "'RE", "'VE",
"N'T", "'S", '"', "'", '``', "''", ')', '}', ']',
'.', ';', ':', '!', '?'}
and i != len(tokens) - 1)
new_token = new_token.strip() + (' ' if use_space else '')
new_tokens_with_spaces.append(new_token)
tokens_with_spaces.append(token)
new_alignment = torch.zeros(ptb_sentence_length)
for index, char in enumerate(token):
new_alignment[index+cumulative] = 1
#new_alignment = new_alignment / sum(new_alignment)
for new_char in new_token:
token_alignments.append(new_alignment)
cumulative += len(token)
return new_tokens_with_spaces, torch.stack(token_alignments)
def hface_ontonotes_alignment(self, sentence):
tokens = [x[1] for x in sentence]
tokens = [ x + (' ' if i !=len(tokens)-1 else '') for (i, x) in enumerate(tokens)]
raw_tokens, ptb_to_deptb_alignment = self.de_ptb_tokenize(tokens)
raw_string = ''.join(raw_tokens)
ptb_token_to_ptb_string_alignment = self.token_to_character_alignment(tokens)
#tokenizer = transformers.AutoTokenizer.from_pretrained('roberta-base')
hface_tokens = self.tokenizer.tokenize(raw_string)
hface_tokens_with_spaces = [x+ (' ' if i != len(hface_tokens)-1 else '')for (i, x) in enumerate(hface_tokens)]
hface_token_to_hface_string_alignment = self.token_to_character_alignment(hface_tokens_with_spaces)
hface_string = ' '.join(hface_tokens)
hface_character_to_deptb_character_alignment = self.levenshtein_matrix(hface_string, raw_string)
unnormalized_alignment = torch.matmul(torch.matmul(hface_token_to_hface_string_alignment.to(self.args['device']), hface_character_to_deptb_character_alignment.to(self.args['device'])),
torch.matmul(ptb_token_to_ptb_string_alignment.to(self.args['device']), ptb_to_deptb_alignment.to(self.args['device']).t()).t())
return (unnormalized_alignment / torch.sum(unnormalized_alignment, dim=0)).cpu(), hface_tokens, raw_string
def _setup_cache(self):
"""
Constructs readers for caches that exist
and writers for caches that do not.
"""
if self.cache is None:
return
if self.cache_is_setup:
return
# Check cache readable/writeable
train_cache_path, train_cache_readable, train_cache_writeable = \
self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)
dev_cache_path, dev_cache_readable, dev_cache_writeable = \
self.cache.get_cache_path_and_check(DEV_STR, self.task_name)
test_cache_path, test_cache_readable, test_cache_writeable = \
self.cache.get_cache_path_and_check(TEST_STR, self.task_name)
# If any of the train/dev/test are neither readable nor writeable, do not use cache.
if ((not train_cache_readable and not train_cache_writeable) or
(not dev_cache_readable and not dev_cache_writeable) or
(not test_cache_readable and not test_cache_writeable)):
self.cache = None
print("Not using the cache at all, since at least of one "
"of {train,dev,test} cache neither readable nor writable.")
return
# Load readers or writers
self.train_cache_writer = None
self.dev_cache_writer = None
self.test_cache_writer = None
if train_cache_readable:
f = h5py.File(train_cache_path, 'r')
self.train_cache_tokens = (torch.tensor(f[str(i)+'tok'][()]) for i in range(len(f.keys())))
self.train_cache_alignments = (torch.tensor(f[str(i)+'aln'][()]) for i in range(len(f.keys())))
elif train_cache_writeable:
#self.train_cache_writer = h5py.File(train_cache_path, 'w')
self.train_cache_writer = self.cache.get_hdf5_cache_writer(train_cache_path)
self.train_cache_tokens = None
self.train_cache_alignments = None
else:
raise ValueError("Train cache neither readable nor writeable")
if dev_cache_readable:
f2 = h5py.File(dev_cache_path, 'r')
self.dev_cache_tokens = (torch.tensor(f2[str(i)+'tok'][()]) for i in range(len(f2.keys())))
self.dev_cache_alignments = (torch.tensor(f2[str(i)+'aln'][()]) for i in range(len(f2.keys())))
elif dev_cache_writeable:
#self.dev_cache_writer = h5py.File(dev_cache_path, 'w')
self.dev_cache_writer = self.cache.get_hdf5_cache_writer(dev_cache_path)
self.dev_cache_tokens = None
self.dev_cache_alignments = None
else:
raise ValueError("Dev cache neither readable nor writeable")
if test_cache_readable:
f3 = h5py.File(test_cache_path, 'r')
self.test_cache_tokens = (torch.tensor(f3[str(i)+'tok'][()]) for i in range(len(f3.keys())))
self.test_cache_alignments = (torch.tensor(f3[str(i)+'aln'][()]) for i in range(len(f3.keys())))
elif test_cache_writeable:
#self.test_cache_writer = h5py.File(test_cache_path, 'w')
self.test_cache_writer = self.cache.get_hdf5_cache_writer(test_cache_path)
self.test_cache_tokens = None
self.test_cache_alignments = None
else:
raise ValueError("Test cache neither readable nor writeable")
self.cache_is_setup = True
def tensor_of_sentence(self, sentence, split):
self._setup_cache()
if self.cache is None:
labels = self._tensor_of_sentence(sentence, split)
return labels
# Otherwise, either read from or write to cache
if split == TRAIN_STR and self.train_cache_tokens is not None:
return next(self.train_cache_tokens), next(self.train_cache_alignments)
if split == DEV_STR and self.dev_cache_tokens is not None:
return next(self.dev_cache_tokens), next(self.dev_cache_alignments)
if split == TEST_STR and self.test_cache_tokens is not None:
return next(self.test_cache_tokens), next(self.test_cache_alignments)
cache_writer = (self.train_cache_writer if split == TRAIN_STR else (
self.dev_cache_writer if split == DEV_STR else (
self.test_cache_writer if split == TEST_STR else None)))
if cache_writer is None:
raise ValueError("Unknown split: {}".format(split))
wordpiece_indices, alignments = self._tensor_of_sentence(sentence, split)
tok_string_key = str(len(list(filter(lambda x: 'tok' in x, cache_writer.keys())))) + 'tok'
tok_dset = cache_writer.create_dataset(tok_string_key, wordpiece_indices.shape)
tok_dset[:] = wordpiece_indices
aln_string_key = str(len(list(filter(lambda x: 'aln' in x, cache_writer.keys())))) + 'aln'
aln_dset = cache_writer.create_dataset(aln_string_key, alignments.shape)
aln_dset[:] = alignments
return wordpiece_indices, alignments
def _tensor_of_sentence(self, sentence, split):
alignment, wordpiece_strings, raw_string = self.hface_ontonotes_alignment(sentence)
# add [SEP] and [CLS] empty alignments
empty = torch.zeros(1, alignment.shape[1])
alignment = torch.cat((empty, alignment, empty))
#wordpiece_indices = torch.tensor(self.tokenizer(wordpiece_strings)
wordpiece_indices = torch.tensor(self.tokenizer(raw_string).input_ids) #, is_split_into_words=True))
return wordpiece_indices, alignment
def _naive_tensor_of_sentence(self, sentence, split_string):
"""
Converts from a tuple-formatted sentence (e.g, from conll-formatted data)
to a Torch tensor of integers representing subword piece ids for input to
a Huggingface-formatted neural model
"""
# CLS token given by tokenizer
wordpiece_indices = []
wordpiece_alignment_vecs = [torch.zeros(len(sentence))]
# language tokens
for index, token in enumerate([x[1] for x in sentence]):
new_wordpieces = self.tokenizer.tokenize(token)
wordpiece_alignment = torch.zeros(len(sentence))
wordpiece_alignment[index] = 1
for wordpiece in new_wordpieces:
wordpiece_alignment_vecs.append(torch.clone(wordpiece_alignment))
wordpiece_indices.extend(new_wordpieces)
# SEP token given by tokenizer
wordpiece_indices = torch.tensor(self.tokenizer.encode(wordpiece_indices))
wordpiece_alignment_vecs.append(torch.zeros(len(sentence)))
wordpiece_alignment_vecs = torch.stack(wordpiece_alignment_vecs)
return wordpiece_indices, wordpiece_alignment_vecs
class AnnotationData(InitYAMLObject):
"""
Loading and serving minibatches of data from annotations
"""
yaml_tag = '!AnnotationDataset'
def __init__(self, args, task):
self.args = args
self.task = task
#self.task.setup_cache()
def tensor_of_sentence(self, sentence, split_string):
"""
Converts from a tuple-formatted sentence (e.g, from conll-formatted data)
to a Torch tensor of integers representing the annotation
"""
alignment = torch.eye(len(sentence))
return self.task.labels_of_sentence(sentence, split_string), alignment
class Loader(InitYAMLObject):
"""
Base class for objects that read datasets from disk
and yield sentence buffers for tokenization and labeling
Strictly for description
"""
yaml_tag = '!Loader'
class OntonotesReader(Loader):
"""
Minutae for reading the Ontonotes dataset,
as formatted as described in the readme
"""
yaml_tag = '!OntonotesReader'
def __init__(self, args, train_path, dev_path, test_path, cache):
print('Constructing OntoNotesReader')
self.train_path = train_path
self.dev_path = dev_path
self.test_path = test_path
self.cache = cache
@staticmethod
def sentence_lists_of_stream(ontonotes_stream):
"""
Yield sentences from raw ontonotes stream
Arguments:
ontonotes_stream: iterable of ontonotes file lines
Yields:
a buffer for each sentence in the stream; elements
in the buffer are lists defined by TSV fields of the
ontonotes stream
"""
buf = []
for line in ontonotes_stream:
if line.startswith('#'):
continue
if not line.strip():
yield buf
buf = []
else:
buf.append([x.strip() for x in line.split('\t')])
if buf:
yield buf
def yield_dataset(self, split_string):
"""
Yield a list of attribute lines, given by ontonotes_fields,
for each sentence in the training set of ontonotes
"""
path = (self.train_path if split_string == TRAIN_STR else
(self.dev_path if split_string == DEV_STR else
(self.test_path if split_string == TEST_STR else
None)))
if path is None:
raise ValueError("Unknown split string: {}".format(split_string))
with open(path) as fin:
for sentence in OntonotesReader.sentence_lists_of_stream(fin):
yield sentence
class SST2Reader(Loader):
"""
Minutae for reading the Stanford Sentiment (SST-2)
dataset, as downloaded from the GLUE website.
"""
yaml_tag = '!SST2Reader'
def __init__(self, args, train_path, dev_path, test_path, cache):
print('Constructing SST2Reader')
self.train_path = train_path
self.dev_path = dev_path
self.test_path = test_path
self.cache = cache
@staticmethod
def sentence_lists_of_stream(sst2_stream):
"""
Yield sentences from raw sst2 stream
Arguments:
sst2_stream: iterable of sst2_stream lines
Yields:
a buffer for each sentence in the stream;
elements in the buffer are lists defined by TSV
fields of the ontonotes stream
"""
_ = next(sst2_stream) # Get rid of the column labels
for line in sst2_stream:
word_string, label_string = [x.strip() for x in line.split('\t')]
word_tokens = word_string.split(' ')
indices = [str(i) for i, _ in enumerate(word_tokens)]
label_tokens = [label_string for _ in word_tokens]
yield list(zip(indices, word_tokens, label_tokens))
def yield_dataset(self, split_string):
"""
Yield a list of attribute lines, given by ontonotes_fields,
for each sentence in the training set of ontonotes
"""
path = (self.train_path if split_string == TRAIN_STR else
(self.dev_path if split_string == DEV_STR else
(self.test_path if split_string == TEST_STR else
None)))
if path is None:
raise ValueError("Unknown split string: {}".format(split_string))
with open(path) as fin:
for sentence in SST2Reader.sentence_lists_of_stream(fin):
yield sentence
| 40.238095 | 188 | 0.697679 |
42e5f4e73286a21248ce1ddfed9d74e5f150cd54 | 7,012 | py | Python | accelbyte_py_sdk/api/basic/models/a_dto_object_for_equ8_user_status.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/basic/models/a_dto_object_for_equ8_user_status.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/basic/models/a_dto_object_for_equ8_user_status.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# justice-basic-service (1.36.3)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
class ADTOObjectForEqu8UserStatus(Model):
"""A DTO object for equ8 user status (A DTO object for equ8 user status)
Properties:
action_comment: (actionComment) OPTIONAL str
action_id: (actionId) OPTIONAL int
expires: (expires) OPTIONAL str
user_id: (userId) OPTIONAL str
when: (when) OPTIONAL str
"""
# region fields
action_comment: str # OPTIONAL
action_id: int # OPTIONAL
expires: str # OPTIONAL
user_id: str # OPTIONAL
when: str # OPTIONAL
# endregion fields
# region with_x methods
def with_action_comment(self, value: str) -> ADTOObjectForEqu8UserStatus:
self.action_comment = value
return self
def with_action_id(self, value: int) -> ADTOObjectForEqu8UserStatus:
self.action_id = value
return self
def with_expires(self, value: str) -> ADTOObjectForEqu8UserStatus:
self.expires = value
return self
def with_user_id(self, value: str) -> ADTOObjectForEqu8UserStatus:
self.user_id = value
return self
def with_when(self, value: str) -> ADTOObjectForEqu8UserStatus:
self.when = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "action_comment"):
result["actionComment"] = str(self.action_comment)
elif include_empty:
result["actionComment"] = ""
if hasattr(self, "action_id"):
result["actionId"] = int(self.action_id)
elif include_empty:
result["actionId"] = 0
if hasattr(self, "expires"):
result["expires"] = str(self.expires)
elif include_empty:
result["expires"] = ""
if hasattr(self, "user_id"):
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
if hasattr(self, "when"):
result["when"] = str(self.when)
elif include_empty:
result["when"] = ""
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
action_comment: Optional[str] = None,
action_id: Optional[int] = None,
expires: Optional[str] = None,
user_id: Optional[str] = None,
when: Optional[str] = None,
) -> ADTOObjectForEqu8UserStatus:
instance = cls()
if action_comment is not None:
instance.action_comment = action_comment
if action_id is not None:
instance.action_id = action_id
if expires is not None:
instance.expires = expires
if user_id is not None:
instance.user_id = user_id
if when is not None:
instance.when = when
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ADTOObjectForEqu8UserStatus:
instance = cls()
if not dict_:
return instance
if "actionComment" in dict_ and dict_["actionComment"] is not None:
instance.action_comment = str(dict_["actionComment"])
elif include_empty:
instance.action_comment = ""
if "actionId" in dict_ and dict_["actionId"] is not None:
instance.action_id = int(dict_["actionId"])
elif include_empty:
instance.action_id = 0
if "expires" in dict_ and dict_["expires"] is not None:
instance.expires = str(dict_["expires"])
elif include_empty:
instance.expires = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
if "when" in dict_ and dict_["when"] is not None:
instance.when = str(dict_["when"])
elif include_empty:
instance.when = ""
return instance
@classmethod
def create_many_from_dict(cls, dict_: dict, include_empty: bool = False) -> Dict[str, ADTOObjectForEqu8UserStatus]:
return {k: cls.create_from_dict(v, include_empty=include_empty) for k, v in dict_} if dict_ else {}
@classmethod
def create_many_from_list(cls, list_: list, include_empty: bool = False) -> List[ADTOObjectForEqu8UserStatus]:
return [cls.create_from_dict(i, include_empty=include_empty) for i in list_] if list_ else []
@classmethod
def create_from_any(cls, any_: any, include_empty: bool = False, many: bool = False) -> Union[ADTOObjectForEqu8UserStatus, List[ADTOObjectForEqu8UserStatus], Dict[Any, ADTOObjectForEqu8UserStatus]]:
if many:
if isinstance(any_, dict):
return cls.create_many_from_dict(any_, include_empty=include_empty)
elif isinstance(any_, list):
return cls.create_many_from_list(any_, include_empty=include_empty)
else:
raise ValueError()
else:
return cls.create_from_dict(any_, include_empty=include_empty)
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"actionComment": "action_comment",
"actionId": "action_id",
"expires": "expires",
"userId": "user_id",
"when": "when",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"actionComment": False,
"actionId": False,
"expires": False,
"userId": False,
"when": False,
}
# endregion static methods
| 35.06 | 202 | 0.592698 |
42e6a0854dc4ea36c5a33692e83aa3d38c0f49cc | 2,505 | py | Python | function/python/brightics/function/statistics/test/correlation_test.py | parkjh80/studio | 6d8d8384272e5e1b2838b12e5557272a19408e89 | [
"Apache-2.0"
] | 202 | 2018-10-23T04:37:35.000Z | 2022-01-27T05:51:10.000Z | function/python/brightics/function/statistics/test/correlation_test.py | data-weirdo/studio | 48852c4f097f773ce3d408b59f79fda2e2d60470 | [
"Apache-2.0"
] | 444 | 2018-11-07T08:41:14.000Z | 2022-03-16T06:48:57.000Z | function/python/brightics/function/statistics/test/correlation_test.py | data-weirdo/studio | 48852c4f097f773ce3d408b59f79fda2e2d60470 | [
"Apache-2.0"
] | 99 | 2018-11-08T04:12:13.000Z | 2022-03-30T05:36:27.000Z | """
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from brightics.function.statistics import correlation
from brightics.common.datasets import load_iris
import HtmlTestRunner
import os
class CorrelationTest(unittest.TestCase):
def setUp(self):
print("*** Correlation UnitTest Start ***")
self.testdata = load_iris()
def tearDown(self):
print("*** Correlation UnitTest End ***")
def test_first(self):
cr = correlation(self.testdata, vars=['sepal_length', 'sepal_width'], method='pearson', display_plt=True, height=2.5, corr_prec=2)
DF1 = cr['result']['corr_table'].values
# print(DF1)
np.testing.assert_equal(DF1[0][0], 'sepal_width')
np.testing.assert_equal(DF1[0][1], 'sepal_length')
np.testing.assert_almost_equal(DF1[0][2], -0.10936924995064935, 10)
np.testing.assert_almost_equal(DF1[0][3], 0.1827652152713665, 10)
def test_second(self):
cr = correlation(self.testdata, vars=['sepal_width', 'petal_length', 'petal_width'], method='spearman', display_plt=False, height=2.5, corr_prec=2)
DF2 = cr['result']['corr_table'].values
# print(DF2)
np.testing.assert_almost_equal(DF2[0][2], -0.3034206463815157, 10)
np.testing.assert_almost_equal(DF2[0][3], 0.0001603809454660342, 10)
np.testing.assert_almost_equal(DF2[1][2], -0.2775110724763029, 10)
np.testing.assert_almost_equal(DF2[1][3], 0.0005856929405699988, 10)
np.testing.assert_almost_equal(DF2[2][2], 0.9360033509355782, 10)
np.testing.assert_almost_equal(DF2[2][3], 5.383649646072797e-69, 10)
if __name__ == '__main__':
filepath = os.path.dirname(os.path.abspath(__file__))
reportFoler = filepath + "/../../../../../../../reports"
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))
| 41.065574 | 155 | 0.683433 |
42e77bb6f8a615aa18b12b83385ee014877a332f | 340 | py | Python | fdp/__init__.py | cffbots/fairdatapoint | 6142b31408b5746d1a7e9f59e61735b7ad8bfde9 | [
"Apache-2.0"
] | 9 | 2020-03-27T12:58:51.000Z | 2021-01-21T16:22:46.000Z | fdp/__init__.py | MaastrichtU-IDS/fairdatapoint | f9f38903a629acbdb74a6a20014ac424cc3d3206 | [
"Apache-2.0"
] | 26 | 2016-05-26T22:22:34.000Z | 2020-02-13T07:12:37.000Z | fdp/__init__.py | MaastrichtU-IDS/fairdatapoint | f9f38903a629acbdb74a6a20014ac424cc3d3206 | [
"Apache-2.0"
] | 4 | 2020-06-09T18:37:33.000Z | 2020-12-16T08:05:01.000Z | # -*- coding: utf-8 -*-
import logging
from .__version__ import __version__
logging.getLogger(__name__).addHandler(logging.NullHandler())
__author__ = "Rajaram Kaliyaperumal, Arnold Kuzniar, Cunliang Geng, Carlos Martinez-Ortiz"
__email__ = 'c.martinez@esciencecenter.nl'
__status__ = 'beta'
__license__ = 'Apache License, Version 2.0'
| 26.153846 | 90 | 0.770588 |
42e78c22b92189ac4df049c1a2d85684f40079f9 | 358 | py | Python | my_site/blog/admin.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | my_site/blog/admin.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | my_site/blog/admin.py | sidharth-lucy/Blog | 33afd31faf5a1da44e050b13e3364b419f108c7f | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Post,Author,Tag
# Register your models here.
class PostAdmin(admin.ModelAdmin):
list_display=('title','date','author')
list_filter=('author','tags','date')
prepopulated_fields={'slug':('title',)}
admin.site.register(Post,PostAdmin)
admin.site.register(Author)
admin.site.register(Tag)
| 23.866667 | 43 | 0.72905 |
42e887c8fdf1e23d81a9463a69d52200b7a5826e | 67 | py | Python | pyrallest/__init__.py | ivancrneto/pyrallest | 158780c418ae276935fb155e82b18db242cd98e5 | [
"MIT"
] | null | null | null | pyrallest/__init__.py | ivancrneto/pyrallest | 158780c418ae276935fb155e82b18db242cd98e5 | [
"MIT"
] | null | null | null | pyrallest/__init__.py | ivancrneto/pyrallest | 158780c418ae276935fb155e82b18db242cd98e5 | [
"MIT"
] | null | null | null |
def main():
print('This is the very beginning of pyrallest')
| 13.4 | 52 | 0.671642 |
42e8e15830841aa965ec225fd7e1715fe1c14fdd | 60,795 | py | Python | fluids/flow_meter.py | rddaz2013/fluids | acde6a6edc2110c152c59341574739b24a2f1bad | [
"MIT"
] | null | null | null | fluids/flow_meter.py | rddaz2013/fluids | acde6a6edc2110c152c59341574739b24a2f1bad | [
"MIT"
] | null | null | null | fluids/flow_meter.py | rddaz2013/fluids | acde6a6edc2110c152c59341574739b24a2f1bad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2018 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import cos, sin, tan, atan, pi, radians, exp, acos, log10
import numpy as np
from fluids.friction import friction_factor
from fluids.core import Froude_densimetric
from scipy.optimize import newton, brenth
from scipy.constants import g, inch
__all__ = ['C_Reader_Harris_Gallagher',
'differential_pressure_meter_solver',
'differential_pressure_meter_dP',
'orifice_discharge', 'orifice_expansibility',
'Reader_Harris_Gallagher_discharge',
'discharge_coefficient_to_K', 'K_to_discharge_coefficient',
'dP_orifice', 'velocity_of_approach_factor',
'flow_coefficient', 'nozzle_expansibility',
'C_long_radius_nozzle', 'C_ISA_1932_nozzle', 'C_venturi_nozzle',
'orifice_expansibility_1989', 'dP_venturi_tube',
'diameter_ratio_cone_meter', 'diameter_ratio_wedge_meter',
'cone_meter_expansibility_Stewart', 'dP_cone_meter',
'C_wedge_meter_Miller',
'C_Reader_Harris_Gallagher_wet_venturi_tube',
'dP_Reader_Harris_Gallagher_wet_venturi_tube'
]
CONCENTRIC_ORIFICE = 'concentric'
ECCENTRIC_ORIFICE = 'eccentric'
SEGMENTAL_ORIFICE = 'segmental'
CONDITIONING_4_HOLE_ORIFICE = 'Rosemount 4 hole self conditioing'
ORIFICE_HOLE_TYPES = [CONCENTRIC_ORIFICE, ECCENTRIC_ORIFICE, SEGMENTAL_ORIFICE,
CONDITIONING_4_HOLE_ORIFICE]
ORIFICE_CORNER_TAPS = 'corner'
ORIFICE_FLANGE_TAPS = 'flange'
ORIFICE_D_AND_D_2_TAPS = 'D and D/2'
ISO_5167_ORIFICE = 'ISO 5167 orifice'
LONG_RADIUS_NOZZLE = 'long radius nozzle'
ISA_1932_NOZZLE = 'ISA 1932 nozzle'
VENTURI_NOZZLE = 'venuri nozzle'
AS_CAST_VENTURI_TUBE = 'as cast convergent venturi tube'
MACHINED_CONVERGENT_VENTURI_TUBE = 'machined convergent venturi tube'
ROUGH_WELDED_CONVERGENT_VENTURI_TUBE = 'rough welded convergent venturi tube'
CONE_METER = 'cone meter'
WEDGE_METER = 'wedge meter'
__all__.extend(['ISO_5167_ORIFICE', 'LONG_RADIUS_NOZZLE', 'ISA_1932_NOZZLE',
'VENTURI_NOZZLE', 'AS_CAST_VENTURI_TUBE',
'MACHINED_CONVERGENT_VENTURI_TUBE',
'ROUGH_WELDED_CONVERGENT_VENTURI_TUBE', 'CONE_METER',
'WEDGE_METER'])
def orifice_discharge(D, Do, P1, P2, rho, C, expansibility=1.0):
r'''Calculates the flow rate of an orifice plate based on the geometry
of the plate, measured pressures of the orifice, and the density of the
fluid.
.. math::
m = \left(\frac{\pi D_o^2}{4}\right) C \frac{\sqrt{2\Delta P \rho_1}}
{\sqrt{1 - \beta^4}}\cdot \epsilon
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
C : float
Coefficient of discharge of the orifice, [-]
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Returns
-------
m : float
Mass flow rate of fluid, [kg/s]
Notes
-----
This is formula 1-12 in [1]_ and also [2]_.
Examples
--------
>>> orifice_discharge(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, rho=1.1646,
... C=0.5988, expansibility=0.9975)
0.01120390943807026
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
dP = P1 - P2
beta = Do/D
return (pi*Do*Do/4.)*C*(2*dP*rho)**0.5/(1.0 - beta**4)**0.5*expansibility
def orifice_expansibility(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for orifice plate calculations
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = 1 - (0.351 + 0.256\beta^4 + 0.93\beta^8)
\left[1-\left(\frac{P_2}{P_1}\right)^{1/\kappa}\right]
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float, optional
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.80, and for fluids
of air, steam, and natural gas. However, there is no objection to using
it for other fluids.
Examples
--------
>>> orifice_expansibility(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9974739057343425
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
return (1.0 - (0.351 + 0.256*beta**4 + 0.93*beta**8)*(
1.0 - (P2/P1)**(1./k)))
def orifice_expansibility_1989(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for orifice plate calculations
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = 1- (0.41 + 0.35\beta^4)\Delta P/\kappa/P_1
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75, and for fluids
of air, steam, and natural gas. However, there is no objection to using
it for other fluids.
This is an older formula used to calculate expansibility factors for
orifice plates.
In this standard, an expansibility factor formula transformation in terms
of the pressure after the orifice is presented as well. This is the more
standard formulation in terms of the upstream conditions. The other formula
is below for reference only:
.. math::
\epsilon_2 = \sqrt{1 + \frac{\Delta P}{P_2}} - (0.41 + 0.35\beta^4)
\frac{\Delta P}{\kappa P_2 \sqrt{1 + \frac{\Delta P}{P_2}}}
[2]_ recommends this formulation for wedge meters as well.
Examples
--------
>>> orifice_expansibility_1989(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9970510687411718
References
----------
.. [1] American Society of Mechanical Engineers. MFC-3M-1989 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2005.
.. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
'''
return 1.0 - (0.41 + 0.35*(Do/D)**4)*(P1 - P2)/(k*P1)
def C_Reader_Harris_Gallagher(D, Do, rho, mu, m, taps='corner'):
r'''Calculates the coefficient of discharge of the orifice based on the
geometry of the plate, measured pressures of the orifice, mass flow rate
through the orifice, and the density and viscosity of the fluid.
.. math::
C = 0.5961 + 0.0261\beta^2 - 0.216\beta^8 + 0.000521\left(\frac{
10^6\beta}{Re_D}\right)^{0.7}\\
+ (0.0188 + 0.0063A)\beta^{3.5} \left(\frac{10^6}{Re_D}\right)^{0.3} \\
+(0.043 + 0.080\exp(-10L_1) -0.123\exp(-7L_1))(1-0.11A)\frac{\beta^4}
{1-\beta^4} \\
- 0.031(M_2' - 0.8M_2'^{1.1})\beta^{1.3}
.. math::
M_2' = \frac{2L_2'}{1-\beta}
A = \left(\frac{19000\beta}{Re_{D}}\right)^{0.8}
Re_D = \frac{\rho v D}{\mu}
If D < 71.12 mm (2.8 in.):
.. math::
C += 0.11(0.75-\beta)\left(2.8-\frac{D}{0.0254}\right)
If the orifice has corner taps:
.. math::
L_1 = L_2' = 0
If the orifice has D and D/2 taps:
.. math::
L_1 = 1
L_2' = 0.47
If the orifice has Flange taps:
.. math::
L_1 = L_2' = \frac{0.0254}{D}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the orifice, [kg/s]
taps : str
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
The following limits apply to the orifice plate standard [1]_:
The measured pressure difference for the orifice plate should be under
250 kPa.
There are roughness limits as well; the roughness should be under 6
micrometers, although there are many more conditions to that given in [1]_.
For orifice plates with D and D/2 or corner pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 (for :math:`0.10 \le \beta \le 0.56`)
or for :math:`\beta \ge 0.56, Re_D \ge 16000\beta^2`
For orifice plates with flange pressure taps:
* Orifice bore diameter muse be larger than 12.5 mm (0.5 inches)
* Pipe diameter between 50 mm and 1 m (2 to 40 inches)
* Beta between 0.1 and 0.75 inclusive
* Reynolds number larger than 5000 and also larger than
:math:`170000\beta^2 D`.
This is also presented in Crane's TP410 (2009)publication, whereas the
1999 and 1982 editions showed only a graph for discharge coefficients.
Examples
--------
>>> C_Reader_Harris_Gallagher(D=0.07391, Do=0.0222, rho=1.165, mu=1.85E-5,
... m=0.12, taps='flange')
0.5990326277163659
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
.. [3] Reader-Harris, M. J., "The Equation for the Expansibility Factor for
Orifice Plates," Proceedings of FLOMEKO 1998, Lund, Sweden, 1998:
209-214.
.. [4] Reader-Harris, Michael. Orifice Plates and Venturi Tubes. Springer,
2015.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
if taps == 'corner':
L1, L2_prime = 0.0, 0.0
elif taps == 'D' or taps == 'D/2':
L1 = 1.0
L2_prime = 0.47
elif taps == 'flange':
L1 = L2_prime = 0.0254/D
else:
raise Exception('Unsupported tap location')
beta2 = beta*beta
beta4 = beta2*beta2
beta8 = beta4*beta4
A = (19000.0*beta/Re_D)**0.8
M2_prime = 2*L2_prime/(1.0 - beta)
delta_C_upstream = ((0.043 + 0.080*exp(-1E1*L1) - 0.123*exp(-7.0*L1))
*(1.0 - 0.11*A)*beta4/(1.0 - beta4))
# The max part is not in the ISO standard
delta_C_downstream = (-0.031*(M2_prime - 0.8*M2_prime**1.1)*beta**1.3
*(1.0 + 8*max(log10(3700./Re_D), 0.0)))
# C_inf is discharge coefficient with corner taps for infinite Re
# Cs, slope term, provides increase in discharge coefficient for lower
# Reynolds numbers.
# max term is not in the ISO standard
C_inf_C_s = (0.5961 + 0.0261*beta2 - 0.216*beta8
+ 0.000521*(1E6*beta/Re_D)**0.7
+ (0.0188 + 0.0063*A)*beta**3.5*(
max((1E6/Re_D)**0.3, 22.7 - 4700.0*(Re_D/1E6))))
C = (C_inf_C_s + delta_C_upstream + delta_C_downstream)
if D < 0.07112:
# Limit is 2.8 inches, .1 inches smaller than the internal diameter of
# a sched. 80 pipe.
# Suggested to be required not becausue of any effect of small
# diameters themselves, but because of edge radius differences.
# max term is given in [4]_ Reader-Harris, Michael book
delta_C_diameter = 0.011*(0.75 - beta)*max((2.8 - D/0.0254), 0.0)
C += delta_C_diameter
return C
def Reader_Harris_Gallagher_discharge(D, Do, P1, P2, rho, mu, k, taps='corner'):
r'''Calculates the mass flow rate of fluid through an orifice based on the
geometry of the plate, measured pressures of the orifice, and the density,
viscosity, and isentropic exponent of the fluid. This solves an equation
iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
taps : str
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2',
[-]
Returns
-------
m : float
Mass flow rate of fluid through the orifice, [kg/s]
Notes
-----
Examples
--------
>>> Reader_Harris_Gallagher_discharge(D=0.07366, Do=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33, taps='D')
7.702338035732167
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
def to_solve(m):
C = C_Reader_Harris_Gallagher(D=D, Do=Do,
rho=rho, mu=mu, m=m, taps=taps)
epsilon = orifice_expansibility(D=D, Do=Do, P1=P1, P2=P2, k=k)
m_calc = orifice_discharge(D=D, Do=Do, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return newton(to_solve, 2.81)
def discharge_coefficient_to_K(D, Do, C):
r'''Converts a discharge coefficient to a standard loss coefficient,
for use in computation of the actual pressure drop of an orifice or other
device.
.. math::
K = \left[\frac{\sqrt{1-\beta^4(1-C^2)}}{C\beta^2} - 1\right]^2
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
Examples
--------
>>> discharge_coefficient_to_K(D=0.07366, Do=0.05, C=0.61512)
5.2314291729754
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
return ((1.0 - beta4*(1.0 - C*C))**0.5/(C*beta2) - 1.0)**2
def K_to_discharge_coefficient(D, Do, K):
r'''Converts a standard loss coefficient to a discharge coefficient.
.. math::
C = \sqrt{\frac{1}{2 \sqrt{K} \beta^{4} + K \beta^{4}}
- \frac{\beta^{4}}{2 \sqrt{K} \beta^{4} + K \beta^{4}} }
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
K : float
Loss coefficient with respect to the velocity and density of the fluid
just upstream of the orifice, [-]
Returns
-------
C : float
Coefficient of discharge of the orifice, [-]
Notes
-----
If expansibility is used in the orifice calculation, the result will not
match with the specified pressure drop formula in [1]_; it can almost
be matched by dividing the calculated mass flow by the expansibility factor
and using that mass flow with the loss coefficient.
This expression was derived with SymPy, and checked numerically. There were
three other, incorrect roots.
Examples
--------
>>> K_to_discharge_coefficient(D=0.07366, Do=0.05, K=5.2314291729754)
0.6151200000000001
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
root_K = K**0.5
common_term = 2.0*root_K*beta4 + K*beta4
return (-beta4/(common_term) + 1.0/(common_term))**0.5
def dP_orifice(D, Do, P1, P2, C):
r'''Calculates the non-recoverable pressure drop of an orifice plate based
on the pressure drop and the geometry of the plate and the discharge
coefficient.
.. math::
\Delta\bar w = \frac{\sqrt{1-\beta^4(1-C^2)}-C\beta^2}
{\sqrt{1-\beta^4(1-C^2)}+C\beta^2} (P_1 - P_2)
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
C : float
Coefficient of discharge of the orifice, [-]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
This formula can be well approximated by:
.. math::
\Delta\bar w = \left(1 - \beta^{1.9}\right)(P_1 - P_2)
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the orifice plate.
Examples
--------
>>> dP_orifice(D=0.07366, Do=0.05, P1=200000.0, P2=183000.0, C=0.61512)
9069.474705745388
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-2:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 2: Orifice Plates.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
dP = P1 - P2
delta_w = ((1.0 - beta4*(1.0 - C*C))**0.5 - C*beta2)/(
(1.0 - beta4*(1.0 - C*C))**0.5 + C*beta2)*dP
return delta_w
def velocity_of_approach_factor(D, Do):
r'''Calculates a factor for orifice plate design called the `velocity of
approach`.
.. math::
\text{Velocity of approach} = \frac{1}{\sqrt{1 - \beta^4}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice at flow conditions, [m]
Returns
-------
velocity_of_approach : float
Coefficient of discharge of the orifice, [-]
Notes
-----
Examples
--------
>>> velocity_of_approach_factor(D=0.0739, Do=0.0222)
1.0040970074165514
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
'''
return (1.0 - (Do/D)**4)**-0.5
def flow_coefficient(D, Do, C):
r'''Calculates a factor for differential pressure flow meter design called
the `flow coefficient`. This should not be confused with the flow
coefficient often used when discussing valves.
.. math::
\text{Flow coefficient} = \frac{C}{\sqrt{1 - \beta^4}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of flow meter characteristic dimension at flow conditions, [m]
C : float
Coefficient of discharge of the flow meter, [-]
Returns
-------
flow_coefficient : float
Differential pressure flow meter flow coefficient, [-]
Notes
-----
This measure is used not just for orifices but for other differential
pressure flow meters [2]_.
It is sometimes given the symbol K. It is also equal to the product of the
diacharge coefficient and the velocity of approach factor [2]_.
Examples
--------
>>> flow_coefficient(D=0.0739, Do=0.0222, C=0.6)
0.6024582044499308
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
'''
return C*(1.0 - (Do/D)**4)**-0.5
def nozzle_expansibility(D, Do, P1, P2, k):
r'''Calculates the expansibility factor for a nozzle or venturi nozzle,
based on the geometry of the plate, measured pressures of the orifice, and
the isentropic exponent of the fluid.
.. math::
\epsilon = \left\{\left(\frac{\kappa \tau^{2/\kappa}}{\kappa-1}\right)
\left(\frac{1 - \beta^4}{1 - \beta^4 \tau^{2/\kappa}}\right)
\left[\frac{1 - \tau^{(\kappa-1)/\kappa}}{1 - \tau}
\right] \right\}^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of orifice of the venturi or nozzle, [m]
P1 : float
Static pressure of fluid upstream of orifice at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of orifice at the cross-section of
the pressure tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75.
Examples
--------
>>> nozzle_expansibility(D=0.0739, Do=0.0222, P1=1E5, P2=9.9E4, k=1.4)
0.9945702344566746
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
beta = Do/D
beta2 = beta*beta
beta4 = beta2*beta2
tau = P2/P1
term1 = k*tau**(2.0/k )/(k - 1.0)
term2 = (1.0 - beta4)/(1.0 - beta4*tau**(2.0/k))
term3 = (1.0 - tau**((k - 1.0)/k))/(1.0 - tau)
return (term1*term2*term3)**0.5
def C_long_radius_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of a long radius nozzle used
for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9965 - 0.00653\beta^{0.5} \left(\frac{10^6}{Re_D}\right)^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of long radius nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the long radius nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_long_radius_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9805503704679863
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
return 0.9965 - 0.00653*beta**0.5*(1E6/Re_D)**0.5
def C_ISA_1932_nozzle(D, Do, rho, mu, m):
r'''Calculates the coefficient of discharge of an ISA 1932 style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle,
mass flow rate through the nozzle, and the density and viscosity of the
fluid.
.. math::
C = 0.9900 - 0.2262\beta^{4.1} - (0.00175\beta^2 - 0.0033\beta^{4.15})
\left(\frac{10^6}{Re_D}\right)^{1.15}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
m : float
Mass flow rate of fluid through the nozzle, [kg/s]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_ISA_1932_nozzle(D=0.07391, Do=0.0422, rho=1.2, mu=1.8E-5, m=0.1)
0.9635849973250495
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
A_pipe = pi/4.*D*D
v = m/(A_pipe*rho)
Re_D = rho*v*D/mu
beta = Do/D
C = (0.9900 - 0.2262*beta**4.1
- (0.00175*beta**2 - 0.0033*beta**4.15)*(1E6/Re_D)**1.15)
return C
def C_venturi_nozzle(D, Do):
r'''Calculates the coefficient of discharge of an Venturi style nozzle
used for measuring flow rate of fluid, based on the geometry of the nozzle.
.. math::
C = 0.9858 - 0.196\beta^{4.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of nozzle orifice at flow conditions, [m]
Returns
-------
C : float
Coefficient of discharge of the nozzle orifice, [-]
Notes
-----
Examples
--------
>>> C_venturi_nozzle(D=0.07391, Do=0.0422)
0.9698996454169576
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-3:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 3: Nozzles and Venturi Nozzles.
'''
beta = Do/D
return 0.9858 - 0.198*beta**4.5
# Relative pressure loss as a function of beta reatio for venturi nozzles
# Venturi nozzles should be between 65 mm and 500 mm; there are high and low
# loss ratios , with the high losses corresponding to small diameters,
# low high losses corresponding to large diameters
# Interpolation can be performed.
venturi_tube_betas = np.array(
[0.299160, 0.299470, 0.312390, 0.319010, 0.326580, 0.337290,
0.342020, 0.347060, 0.359030, 0.365960, 0.372580, 0.384870,
0.385810, 0.401250, 0.405350, 0.415740, 0.424250, 0.434010,
0.447880, 0.452590, 0.471810, 0.473090, 0.493540, 0.499240,
0.516530, 0.523800, 0.537630, 0.548060, 0.556840, 0.573890,
0.582350, 0.597820, 0.601560, 0.622650, 0.626490, 0.649480,
0.650990, 0.668700, 0.675870, 0.688550, 0.693180, 0.706180,
0.713330, 0.723510, 0.749540, 0.749650])
venturi_tube_dP_high = np.array(
[0.164534, 0.164504, 0.163591, 0.163508, 0.163439,
0.162652, 0.162224, 0.161866, 0.161238, 0.160786,
0.160295, 0.159280, 0.159193, 0.157776, 0.157467,
0.156517, 0.155323, 0.153835, 0.151862, 0.151154,
0.147840, 0.147613, 0.144052, 0.143050, 0.140107,
0.138981, 0.136794, 0.134737, 0.132847, 0.129303,
0.127637, 0.124758, 0.124006, 0.119269, 0.118449,
0.113605, 0.113269, 0.108995, 0.107109, 0.103688,
0.102529, 0.099567, 0.097791, 0.095055, 0.087681,
0.087648])
venturi_tube_dP_low = np.array(
[0.089232, 0.089218, 0.088671, 0.088435, 0.088206,
0.087853, 0.087655, 0.087404, 0.086693, 0.086241,
0.085813, 0.085142, 0.085102, 0.084446, 0.084202,
0.083301, 0.082470, 0.081650, 0.080582, 0.080213,
0.078509, 0.078378, 0.075989, 0.075226, 0.072700,
0.071598, 0.069562, 0.068128, 0.066986, 0.064658,
0.063298, 0.060872, 0.060378, 0.057879, 0.057403,
0.054091, 0.053879, 0.051726, 0.050931, 0.049362,
0.048675, 0.046522, 0.045381, 0.043840, 0.039913,
0.039896])
#ratios_average = 0.5*(ratios_high + ratios_low)
D_bound_venturi_tube = np.array([0.065, 0.5])
def dP_venturi_tube(D, Do, P1, P2):
r'''Calculates the non-recoverable pressure drop of a venturi tube
differential pressure meter based on the pressure drop and the geometry of
the venturi meter.
.. math::
\epsilon = \frac{\Delta\bar w }{\Delta P}
The :math:`\epsilon` value is looked up in a table of values as a function
of beta ratio and upstream pipe diameter (roughness impact).
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of venturi tube at the cross-section
of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of venturi tube at the
cross-section of the pressure tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the venturi tube, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the venturi tube.
Note there is some information on the effect of Reynolds number as well
in [1]_ and [2]_, with a curve showing an increased pressure drop
from 1E5-6E5 to with a decreasing multiplier from 1.75 to 1; the multiplier
is 1 for higher Reynolds numbers. This is not currently included in this
implementation.
Examples
--------
>>> dP_venturi_tube(D=0.07366, Do=0.05, P1=200000.0, P2=183000.0)
1788.5717754177406
References
----------
.. [1] American Society of Mechanical Engineers. Mfc-3M-2004 Measurement
Of Fluid Flow In Pipes Using Orifice, Nozzle, And Venturi. ASME, 2001.
.. [2] ISO 5167-4:2003 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 4: Venturi Tubes.
'''
# Effect of Re is not currently included
beta = Do/D
epsilon_D65 = np.interp(beta, venturi_tube_betas, venturi_tube_dP_high)
epsilon_D500 = np.interp(beta, venturi_tube_betas, venturi_tube_dP_low)
epsilon = np.interp(D, D_bound_venturi_tube, [epsilon_D65, epsilon_D500])
return epsilon*(P1 - P2)
def diameter_ratio_cone_meter(D, Dc):
r'''Calculates the diameter ratio `beta` used to characterize a cone
flow meter.
.. math::
\beta = \sqrt{1 - \frac{d_c^2}{D^2}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
Returns
-------
beta : float
Cone meter diameter ratio, [-]
Notes
-----
Examples
--------
>>> diameter_ratio_cone_meter(D=0.2575, Dc=0.184)
0.6995709873957624
References
----------
.. [1] Hollingshead, Colter. "Discharge Coefficient Performance of Venturi,
Standard Concentric Orifice Plate, V-Cone, and Wedge Flow Meters at
Small Reynolds Numbers." May 1, 2011.
https://digitalcommons.usu.edu/etd/869.
'''
D_ratio = Dc/D
return (1.0 - D_ratio*D_ratio)**0.5
def cone_meter_expansibility_Stewart(D, Dc, P1, P2, k):
r'''Calculates the expansibility factor for a cone flow meter,
based on the geometry of the cone meter, measured pressures of the orifice,
and the isentropic exponent of the fluid. Developed in [1]_, also shown
in [2]_.
.. math::
\epsilon = 1 - (0.649 + 0.696\beta^4) \frac{\Delta P}{\kappa P_1}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
k : float
Isentropic exponent of fluid, [-]
Returns
-------
expansibility : float
Expansibility factor (1 for incompressible fluids, less than 1 for
real fluids), [-]
Notes
-----
This formula was determined for the range of P2/P1 >= 0.75; the only gas
used to determine the formula is air.
Examples
--------
>>> cone_meter_expansibility_Stewart(D=1, Dc=0.9, P1=1E6, P2=8.5E5, k=1.2)
0.9157343
References
----------
.. [1] Stewart, D. G., M. Reader-Harris, and NEL Dr RJW Peters. "Derivation
of an Expansibility Factor for the V-Cone Meter." In Flow Measurement
International Conference, Peebles, Scotland, UK, 2001.
.. [2] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return 1.0 - (0.649 + 0.696*beta**4)*dP/(k*P1)
def dP_cone_meter(D, Dc, P1, P2):
r'''Calculates the non-recoverable pressure drop of a cone meter
based on the measured pressures before and at the cone end, and the
geometry of the cone meter according to [1]_.
.. math::
\Delta \bar \omega = (1.09 - 0.813\beta)\Delta P
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Dc : float
Diameter of the largest end of the cone meter, [m]
P1 : float
Static pressure of fluid upstream of cone meter at the cross-section of
the pressure tap, [Pa]
P2 : float
Static pressure of fluid at the end of the center of the cone pressure
tap, [Pa]
Returns
-------
dP : float
Non-recoverable pressure drop of the orifice plate, [Pa]
Notes
-----
The recoverable pressure drop should be recovered by 6 pipe diameters
downstream of the cone meter.
Examples
--------
>>> dP_cone_meter(1, .7, 1E6, 9.5E5)
25470.093437973323
References
----------
.. [1] ISO 5167-5:2016 - Measurement of Fluid Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits Running
Full -- Part 5: Cone meters.
'''
dP = P1 - P2
beta = diameter_ratio_cone_meter(D, Dc)
return (1.09 - 0.813*beta)*dP
def diameter_ratio_wedge_meter(D, H):
r'''Calculates the diameter ratio `beta` used to characterize a wedge
flow meter as given in [1]_ and [2]_.
.. math::
\beta = \left(\frac{1}{\pi}\left\{\arccos\left[1 - \frac{2H}{D}
\right] - 2 \left[1 - \frac{2H}{D}
\right]\left(\frac{H}{D} - \left[\frac{H}{D}\right]^2
\right)^{0.5}\right\}\right)^{0.5}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
H : float
Portion of the diameter of the clear segment of the pipe up to the
wedge blocking flow; the height of the pipe up to the wedge, [m]
Returns
-------
beta : float
Wedge meter diameter ratio, [-]
Notes
-----
Examples
--------
>>> diameter_ratio_wedge_meter(D=0.2027, H=0.0608)
0.5022531424646643
References
----------
.. [1] Hollingshead, Colter. "Discharge Coefficient Performance of Venturi,
Standard Concentric Orifice Plate, V-Cone, and Wedge Flow Meters at
Small Reynolds Numbers." May 1, 2011.
https://digitalcommons.usu.edu/etd/869.
.. [2] IntraWedge WEDGE FLOW METER Type: IWM. January 2011.
http://www.intra-automation.com/download.php?file=pdf/products/technical_information/en/ti_iwm_en.pdf
'''
H_D = H/D
t0 = 1.0 - 2.0*H_D
t1 = acos(t0)
t2 = 2.0*(t0)
t3 = (H_D - H_D*H_D)**0.5
t4 = t1 - t2*t3
return (1./pi*t4)**0.5
def C_wedge_meter_Miller(D, H):
r'''Calculates the coefficient of discharge of an wedge flow meter
used for measuring flow rate of fluid, based on the geometry of the
differential pressure flow meter.
For half-inch lines:
.. math::
C = 0.7883 + 0.107(1 - \beta^2)
For 1 to 1.5 inch lines:
.. math::
C = 0.6143 + 0.718(1 - \beta^2)
For 1.5 to 24 inch lines:
.. math::
C = 0.5433 + 0.2453(1 - \beta^2)
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
H : float
Portion of the diameter of the clear segment of the pipe up to the
wedge blocking flow; the height of the pipe up to the wedge, [m]
Returns
-------
C : float
Coefficient of discharge of the wedge flow meter, [-]
Notes
-----
There is an ISO standard being developed to cover wedge meters as of 2018.
Wedge meters can have varying angles; 60 and 90 degree wedge meters have
been reported. Tap locations 1 or 2 diameters (upstream and downstream),
and 2D upstream/1D downstream have been used. Some wedges are sharp;
some are smooth. [2]_ gives some experimental values.
Examples
--------
>>> C_wedge_meter_Miller(D=0.1524, H=0.3*0.1524)
0.7267069372687651
References
----------
.. [1] Miller, Richard W. Flow Measurement Engineering Handbook. 3rd
edition. New York: McGraw-Hill Education, 1996.
.. [2] Seshadri, V., S. N. Singh, and S. Bhargava. "Effect of Wedge Shape
and Pressure Tap Locations on the Characteristics of a Wedge Flowmeter."
IJEMS Vol.01(5), October 1994.
'''
beta = diameter_ratio_wedge_meter(D, H)
if D <= 0.7*inch:
# suggested limit 0.5 inch for this equation
C = 0.7883 + 0.107*(1 - beta*beta)
elif D <= 1.4*inch:
# Suggested limit is under 1.5 inches
C = 0.6143 + 0.718*(1 - beta*beta)
else:
C = 0.5433 + 0.2453*(1 - beta*beta)
return C
def C_Reader_Harris_Gallagher_wet_venturi_tube(mg, ml, rhog, rhol, D, Do, H=1):
r'''Calculates the coefficient of discharge of the wet gas venturi tube
based on the geometry of the tube, mass flow rates of liquid and vapor
through the tube, the density of the liquid and gas phases, and an
adjustable coefficient `H`.
.. math::
C = 1 - 0.0463\exp(-0.05Fr_{gas, th}) \cdot \min\left(1,
\sqrt{\frac{X}{0.016}}\right)
Fr_{gas, th} = \frac{Fr_{\text{gas, densionetric }}}{\beta^{2.5}}
\phi = \sqrt{1 + C_{Ch} X + X^2}
C_{Ch} = \left(\frac{\rho_l}{\rho_{1,g}}\right)^n +
\left(\frac{\rho_{1, g}}{\rho_{l}}\right)^n
n = \max\left[0.583 - 0.18\beta^2 - 0.578\exp\left(\frac{-0.8
Fr_{\text{gas, densiometric}}}{H}\right),0.392 - 0.18\beta^2 \right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
rhog : float
Density of gas at `P1`, [kg/m^3]
rhol : float
Density of liquid at `P1`, [kg/m^3]
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
This model has more error than single phase differential pressure meters.
The model was first published in [1]_, and became ISO 11583 later.
The limits of this correlation according to [2]_ are as follows:
.. math::
0.4 \le \beta \le 0.75
0 < X \le 0.3
Fr_{gas, th} > 3
\frac{\rho_g}{\rho_l} > 0.02
D \ge 50 \text{ mm}
Examples
--------
>>> C_Reader_Harris_Gallagher_wet_venturi_tube(mg=5.31926, ml=5.31926/2,
... rhog=50.0, rhol=800., D=.1, Do=.06, H=1)
0.9754210845876333
References
----------
.. [1] Reader-harris, Michael, and Tuv Nel. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
beta = Do/D
beta2 = beta*beta
Fr_gas_th = Frg*beta**-2.5
n = max(0.583 - 0.18*beta2 - 0.578*exp(-0.8*Frg/H),
0.392 - 0.18*beta2)
C_Ch = (rhol/rhog)**n + (rhog/rhol)**n
X = ml/mg*(rhog/rhol)**0.5
OF = (1.0 + C_Ch*X + X*X)**0.5
C = 1.0 - 0.0463*exp(-0.05*Fr_gas_th)*min(1.0, (X/0.016)**0.5)
return C
def dP_Reader_Harris_Gallagher_wet_venturi_tube(D, Do, P1, P2, ml, mg, rhol,
rhog, H=1):
r'''Calculates the non-recoverable pressure drop of a wet gas venturi
nozzle based on the pressure drop and the geometry of the venturi nozzle,
the mass flow rates of liquid and gas through it, the densities of the
vapor and liquid phase, and an adjustable coefficient `H`.
.. math::
Y = \frac{\Delta \bar \omega}{\Delta P} - 0.0896 - 0.48\beta^9
Y_{max} = 0.61\exp\left[-11\frac{\rho_{1,g}}{\rho_l}
- 0.045 \frac{Fr_{gas}}{H}\right]
\frac{Y}{Y_{max}} = 1 - \exp\left[-35 X^{0.75} \exp
\left( \frac{-0.28Fr_{gas}}{H}\right)\right]
X = \left(\frac{m_l}{m_g}\right) \sqrt{\frac{\rho_{1,g}}{\rho_l}}
{Fr_{\text{gas, densiometric}}} = \frac{v_{gas}}{\sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
= \frac{4m_g}{\rho_{1,g} \pi D^2 \sqrt{gD}}
\sqrt{\frac{\rho_{1,g}}{\rho_l - \rho_{1,g}}}
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
Do : float
Diameter of venturi tube at flow conditions, [m]
P1 : float
Static pressure of fluid upstream of venturi tube at the cross-section
of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of venturi tube at the cross-
section of the pressure tap, [Pa]
ml : float
Mass flow rate of liquid through the venturi tube, [kg/s]
mg : float
Mass flow rate of gas through the venturi tube, [kg/s]
rhol : float
Density of liquid at `P1`, [kg/m^3]
rhog : float
Density of gas at `P1`, [kg/m^3]
H : float, optional
A surface-tension effect coefficient used to adjust for different
fluids, (1 for a hydrocarbon liquid, 1.35 for water, 0.79 for water in
steam) [-]
Returns
-------
C : float
Coefficient of discharge of the wet gas venturi tube flow meter
(includes flow rate of gas ONLY), [-]
Notes
-----
The model was first published in [1]_, and became ISO 11583 later.
Examples
--------
>>> dP_Reader_Harris_Gallagher_wet_venturi_tube(D=.1, Do=.06, H=1,
... P1=6E6, P2=6E6-5E4, ml=5.31926/2, mg=5.31926, rhog=50.0, rhol=800.,)
16957.43843129572
References
----------
.. [1] Reader-harris, Michael, and Tuv Nel. An Improved Model for
Venturi-Tube Over-Reading in Wet Gas, 2009.
.. [2] ISO/TR 11583:2012 Measurement of Wet Gas Flow by Means of Pressure
Differential Devices Inserted in Circular Cross-Section Conduits.
'''
dP = P1 - P2
beta = Do/D
X = ml/mg*(rhog/rhol)**0.5
V = 4*mg/(rhog*pi*D**2)
Frg = Froude_densimetric(V, L=D, rho1=rhol, rho2=rhog, heavy=False)
Y_ratio = 1.0 - exp(-35.0*X**0.75*exp(-0.28*Frg/H))
Y_max = 0.61*exp(-11.0*rhog/rhol - 0.045*Frg/H)
Y = Y_max*Y_ratio
rhs = -0.0896 - 0.48*beta**9
dw = dP*(Y - rhs)
return dw
# Venturi tube loss coefficients as a function of Re
as_cast_convergent_venturi_Res = [4E5, 6E4, 1E5, 1.5E5]
as_cast_convergent_venturi_Cs = [0.957, 0.966, 0.976, 0.982]
machined_convergent_venturi_Res = [5E4, 1E5, 2E5, 3E5,
7.5E5, # 5E5 to 1E6
1.5E6, # 1E6 to 2E6
5E6] # 2E6 to 1E8
machined_convergent_venturi_Cs = [0.970, 0.977, 0.992, 0.998, 0.995, 1.000, 1.010]
rough_welded_convergent_venturi_Res = [4E4, 6E4, 1E5]
rough_welded_convergent_venturi_Cs = [0.96, 0.97, 0.98]
as_cast_convergent_entrance_machined_venturi_Res = [1E4, 6E4, 1E5, 1.5E5,
3.5E5, # 2E5 to 5E5
3.2E6] # 5E5 to 3.2E6
as_cast_convergent_entrance_machined_venturi_Cs = [0.963, 0.978, 0.98, 0.987, 0.992, 0.995]
CONE_METER_C = 0.82
ROUGH_WELDED_CONVERGENT_VENTURI_TUBE_C = 0.985
MACHINED_CONVERGENT_VENTURI_TUBE_C = 0.995
AS_CAST_VENTURI_TUBE_C = 0.984
def _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho, mu, k, meter_type,
taps=None):
'''Helper function only.
'''
if meter_type == ISO_5167_ORIFICE:
C = C_Reader_Harris_Gallagher(D=D, Do=D2, rho=rho, mu=mu, m=m, taps=taps)
epsilon = orifice_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
elif meter_type == LONG_RADIUS_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_long_radius_nozzle(D=D, Do=D2, rho=rho, mu=mu, m=m)
elif meter_type == ISA_1932_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_ISA_1932_nozzle(D=D, Do=D2, rho=rho, mu=mu, m=m)
elif meter_type == VENTURI_NOZZLE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_venturi_nozzle(D=D, Do=D2)
elif meter_type == AS_CAST_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = AS_CAST_VENTURI_TUBE_C
elif meter_type == MACHINED_CONVERGENT_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = MACHINED_CONVERGENT_VENTURI_TUBE_C
elif meter_type == ROUGH_WELDED_CONVERGENT_VENTURI_TUBE:
epsilon = nozzle_expansibility(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = ROUGH_WELDED_CONVERGENT_VENTURI_TUBE_C
elif meter_type == CONE_METER:
epsilon = cone_meter_expansibility_Stewart(D=D, Dc=D2, P1=P1, P2=P2, k=k)
C = CONE_METER_C
elif meter_type == WEDGE_METER:
epsilon = orifice_expansibility_1989(D=D, Do=D2, P1=P1, P2=P2, k=k)
C = C_wedge_meter_Miller(D=D, H=D2)
return epsilon, C
def differential_pressure_meter_solver(D, rho, mu, k, D2=None, P1=None, P2=None,
m=None, meter_type=ISO_5167_ORIFICE,
taps=None):
r'''Calculates either the mass flow rate, the upstream pressure, the second
pressure value, or the orifice diameter for a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and the density, viscosity, and isentropic exponent of the
fluid. This solves an equation iteratively to obtain the correct flow rate.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
rho : float
Density of fluid at `P1`, [kg/m^3]
mu : float
Viscosity of fluid at `P1`, [Pa*s]
k : float
Isentropic exponent of fluid, [-]
D2 : float, optional
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float, optional
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float, optional
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
m : float, optional
Mass flow rate of fluid through the flow meter, [kg/s]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'venuri nozzle', 'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter',
'wedge meter'), [-]
taps : str, optional
The orientation of the taps; one of 'corner', 'flange', 'D', or 'D/2';
applies for orifice meters only, [-]
Returns
-------
ans : float
One of `m`, the mass flow rate of the fluid; `P1`, the pressure
upstream of the flow meter; `P2`, the second pressure
tap's value; and `D2`, the diameter of the measuring device; units
of respectively, [kg/s], [Pa], [Pa], or [m]
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
The solvers make some assumptions about the range of values answers may be
in.
Note that the solver for the upstream pressure uses the provided values of
density, viscosity and isentropic exponent; whereas these values all
depend on pressure (albeit to a small extent). An outer loop should be
added with pressure-dependent values calculated in it for maximum accuracy.
It would be possible to solve for the upstream pipe diameter, but there is
no use for that functionality.
Examples
--------
>>> differential_pressure_meter_solver(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
7.702338035732168
>>> differential_pressure_meter_solver(D=0.07366, m=7.702338, P1=200000.0,
... P2=183000.0, rho=999.1, mu=0.0011, k=1.33,
... meter_type='ISO 5167 orifice', taps='D')
0.04999999990831885
'''
if m is None:
def to_solve(m):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return newton(to_solve, 2.81)
elif D2 is None:
def to_solve(D2):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, D*(1-1E-9), D*5E-3)
elif P2 is None:
def to_solve(P2):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P1*(1-1E-9), P1*0.7)
elif P1 is None:
def to_solve(P1):
C, epsilon = _differential_pressure_C_epsilon(D, D2, m, P1, P2, rho,
mu, k, meter_type,
taps=taps)
m_calc = orifice_discharge(D=D, Do=D2, P1=P1, P2=P2, rho=rho,
C=C, expansibility=epsilon)
return m - m_calc
return brenth(to_solve, P2*(1+1E-9), P2*1.4)
else:
raise Exception('Solver is capable of solving for one of P2, D2, or m only.')
def differential_pressure_meter_dP(D, D2, P1, P2, C=None,
meter_type=ISO_5167_ORIFICE):
r'''Calculates either the non-recoverable pressure drop of a differential
pressure flow meter based on the geometry of the meter, measured pressures
of the meter, and for most models the meter discharge coefficient.
Parameters
----------
D : float
Upstream internal pipe diameter, [m]
D2 : float
Diameter of orifice, or venturi meter orifice, or flow tube orifice,
or cone meter end diameter, or wedge meter fluid flow height, [m]
P1 : float
Static pressure of fluid upstream of differential pressure meter at the
cross-section of the pressure tap, [Pa]
P2 : float
Static pressure of fluid downstream of differential pressure meter or
at the prescribed location (varies by type of meter) [Pa]
C : float, optional
Coefficient of discharge of the wedge flow meter, [-]
meter_type : str, optional
One of ('ISO 5167 orifice', 'long radius nozzle', 'ISA 1932 nozzle',
'as cast convergent venturi tube',
'machined convergent venturi tube',
'rough welded convergent venturi tube', 'cone meter'), [-]
Returns
-------
dP : float
Non-recoverable pressure drop of the differential pressure flow
meter, [Pa]
Notes
-----
See the appropriate functions for the documentation for the formulas and
references used in each method.
Wedge meters, and venturi nozzles do not have standard formulas available
for pressure drop computation.
Examples
--------
>>> differential_pressure_meter_dP(D=0.07366, D2=0.05, P1=200000.0,
... P2=183000.0, meter_type='as cast convergent venturi tube')
1788.5717754177406
'''
if meter_type == ISO_5167_ORIFICE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == LONG_RADIUS_NOZZLE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == ISA_1932_NOZZLE:
dP = dP_orifice(D=D, Do=D2, P1=P1, P2=P2, C=C)
elif meter_type == VENTURI_NOZZLE:
raise Exception(NotImplemented)
elif meter_type == AS_CAST_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == MACHINED_CONVERGENT_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == ROUGH_WELDED_CONVERGENT_VENTURI_TUBE:
dP = dP_venturi_tube(D=D, Do=D2, P1=P1, P2=P2)
elif meter_type == CONE_METER:
dP = dP_cone_meter(D=D, Dc=D2, P1=P1, P2=P2)
elif meter_type == WEDGE_METER:
raise Exception(NotImplemented)
return dP
| 35.407688 | 108 | 0.607385 |
42ead0688f656228fb0df39a2d45d3c1dd001507 | 532 | py | Python | iaso/migrations/0115_auto_20220124_1120.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | null | null | null | iaso/migrations/0115_auto_20220124_1120.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | null | null | null | iaso/migrations/0115_auto_20220124_1120.py | BLSQ/iaso-copy | 85fb17f408c15e8c2d730416d1312f58f8db39b7 | [
"MIT"
] | 1 | 2022-03-23T16:44:12.000Z | 2022-03-23T16:44:12.000Z | # Generated by Django 3.1.14 on 2022-01-24 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("iaso", "0114_auto_20220117_1609"),
]
operations = [
migrations.RenameField(
model_name="entity",
old_name="instance",
new_name="attributes",
),
migrations.AddField(
model_name="entity",
name="uuid",
field=models.TextField(blank=True, null=True),
),
]
| 22.166667 | 58 | 0.565789 |
42eb0db02ed2cdde4c36688526176ef0796f32f2 | 1,370 | py | Python | git_plan/cli/commands/delete.py | synek/git-plan | 4cf5429348a71fb5ea8110272fb89d20bfa38c38 | [
"MIT"
] | 163 | 2021-03-06T12:01:06.000Z | 2022-03-01T22:52:36.000Z | git_plan/cli/commands/delete.py | synek/git-plan | 4cf5429348a71fb5ea8110272fb89d20bfa38c38 | [
"MIT"
] | 61 | 2021-03-06T07:00:39.000Z | 2021-04-13T10:25:58.000Z | git_plan/cli/commands/delete.py | synek/git-plan | 4cf5429348a71fb5ea8110272fb89d20bfa38c38 | [
"MIT"
] | 9 | 2021-03-07T17:52:57.000Z | 2021-10-18T21:35:23.000Z | """Delete command
Author: Rory Byrne <rory@rory.bio>
"""
from typing import Any
from git_plan.cli.commands.command import Command
from git_plan.service.plan import PlanService
from git_plan.util.decorators import requires_initialized, requires_git_repository
@requires_initialized
@requires_git_repository
class Delete(Command):
"""Delete an existing commit"""
subcommand = 'delete'
def __init__(self, plan_service: PlanService, **kwargs):
super().__init__(**kwargs)
assert plan_service, "Plan service not injected"
self._plan_service = plan_service
def command(self, **kwargs):
"""Create a new commit"""
commits = self._plan_service.get_commits(self._repository)
if not commits:
self._ui.bold('No commits found.')
return
chosen_commit = self._ui.choose_commit(commits, 'Which plan do you want to delete?')
self._ui.bold(f'{chosen_commit.message.headline}\n')
confirm_msg = 'Are you sure you want to delete this commit?'
if not self._ui.confirm(confirm_msg):
self._ui.bold("Stopped.")
return
self._plan_service.delete_commit(chosen_commit)
self._ui.bold('Deleted.')
def register_subparser(self, subparsers: Any):
subparsers.add_parser(Delete.subcommand, help='Delete a planned commit.')
| 31.136364 | 92 | 0.687591 |
42ebcdfbf6dd3a3f1a79b5af4ed661e3aa7d93c1 | 347 | py | Python | newspaper2/newspaper2/news/admin.py | luisfer85/newspaper2 | 8522bc29e5597113af9f9714e510548057e19315 | [
"Apache-2.0"
] | null | null | null | newspaper2/newspaper2/news/admin.py | luisfer85/newspaper2 | 8522bc29e5597113af9f9714e510548057e19315 | [
"Apache-2.0"
] | null | null | null | newspaper2/newspaper2/news/admin.py | luisfer85/newspaper2 | 8522bc29e5597113af9f9714e510548057e19315 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from newspaper2.news.models import News, Event
class NewsAdmin(admin.ModelAdmin):
list_display = ('title', 'publish_date')
list_filter = ('publish_date',)
search_fields = ['title']
class EventAdmin(admin.ModelAdmin):
pass
admin.site.register(News, NewsAdmin)
admin.site.register(Event, NewsAdmin)
| 23.133333 | 46 | 0.752161 |
42ef38196b7af8975b40694b6eb1954f2a48845e | 1,926 | py | Python | vision_module.py | seongdong2/GRADUATION | c38b13a2dd82a58bdba7673916408daa0d9b471e | [
"Unlicense"
] | 2 | 2021-09-19T13:52:05.000Z | 2021-10-04T01:09:21.000Z | vision_module.py | seongdong2/graduation | c38b13a2dd82a58bdba7673916408daa0d9b471e | [
"Unlicense"
] | 1 | 2021-10-14T06:19:44.000Z | 2021-10-14T06:19:44.000Z | vision_module.py | seongdong2/graduation | c38b13a2dd82a58bdba7673916408daa0d9b471e | [
"Unlicense"
] | null | null | null | import numpy as np
import cv2
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
net = cv2.dnn.readNetFromCaffe(
"MobileNetSSD_deploy.prototxt.txt", "MobileNetSSD_deploy.caffemodel")
BLACK_CRITERIA = 60
def detect(frame):
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
net.setInput(blob)
detections = net.forward()
result_all = []
result_black = []
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.3:
idx = int(detections[0, 0, i, 1])
if CLASSES[idx] == "person":
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
startX, startY, endX, endY = box.astype("int")
x, y, w, h = startX, startY, endX - startX, endY - startY
result_all.append((confidence, (x, y, w, h)))
cut_size = int(min(w, h) / 6)
black_value = np.mean(frame[y + cut_size:y + h - cut_size, x + cut_size:x + w - cut_size])
if black_value < BLACK_CRITERIA:
result_black.append((confidence, (x, y, w, h)))
if result_black:
result_black.sort(key=lambda x: x[0])
return True, result_black[-1][1]
else:
return False, None
def find_template(template, full_img):
h, w, _ = template.shape
full_img_copy = full_img.copy()
res = cv2.matchTemplate(full_img_copy, template, cv2.TM_CCOEFF)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
x = top_left[0]
y = top_left[1]
return full_img[y:y + h, x:x + w], (x, y, w, h) | 30.571429 | 106 | 0.555556 |
42efd3e55b344db382180d65f36b45d066baab96 | 618 | py | Python | riccipy/metrics/lewis_papapetrou.py | cjayross/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | 4 | 2019-08-17T04:28:06.000Z | 2021-01-02T15:19:18.000Z | riccipy/metrics/lewis_papapetrou.py | grdbii/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | 3 | 2019-08-02T04:07:43.000Z | 2020-06-18T07:49:38.000Z | riccipy/metrics/lewis_papapetrou.py | grdbii/riccipy | 2cc0ca5e1aa4af91b203b3ff2bb1effd7d2f4846 | [
"MIT"
] | null | null | null | """
Name: Lewis Papapetrou
References: Ernst, Phys. Rev., v167, p1175, (1968)
Coordinates: Cartesian
"""
from sympy import Function, Rational, exp, symbols, zeros
coords = symbols("t x y z", real=True)
variables = ()
functions = symbols("k r s w", cls=Function)
t, x, y, z = coords
k, r, s, w = functions
metric = zeros(4)
metric[0, 0] = -exp(2 * s(x, y))
metric[3, 3] = (exp(-s(x, y)) * r(x, y) - w(x, y) * exp(s(x, y))) * (
exp(-s(x, y)) * r(x, y) + w(x, y) * exp(s(x, y))
)
metric[0, 3] = metric[3, 0] = -w(x, y) * exp(2 * s(x, y))
metric[1, 2] = metric[2, 1] = Rational(1, 2) * exp(2 * k(x, y) - 2 * s(x, y))
| 30.9 | 77 | 0.553398 |
42efdd1edf57c5e0230ae9edaa82d469b2ef9074 | 2,591 | py | Python | product/admin.py | NarminSH/e-commerce-sellshop-project | a753038c8265473021e21f75b6b095bdc25f43d6 | [
"MIT"
] | null | null | null | product/admin.py | NarminSH/e-commerce-sellshop-project | a753038c8265473021e21f75b6b095bdc25f43d6 | [
"MIT"
] | null | null | null | product/admin.py | NarminSH/e-commerce-sellshop-project | a753038c8265473021e21f75b6b095bdc25f43d6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from modeltranslation.admin import TranslationAdmin
from product.models import (Category, Discount, Review, Product, Properity, ProperityOption,
Image, ShoppingCart, Tag,Wishlist,Color)
class ReviewAdmin(admin.ModelAdmin):
list_display = ('name', 'product', 'created_at', 'email', 'description')
list_filter = ('name', 'product', 'created_at', 'email', 'description')
search_fields = ('name', 'product', 'created_at', 'email', 'description')
admin.site.register(Review, ReviewAdmin)
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ('title', 'description', 'content', 'price', 'category')
list_filter = ('category__title', 'description', 'content')
search_fields = ('title', 'category__title', 'description')
class CategoryAdmin(TranslationAdmin):
list_display = ('title', 'created_at', 'updated_at', 'parent_cat', )
list_filter = ('title', 'created_at', 'updated_at', 'parent_cat')
search_fields = ('title', 'created_at', 'updated_at', 'parent_cat')
admin.site.register(Category, CategoryAdmin)
@admin.register(Discount)
class DiscountAdmin(admin.ModelAdmin):
list_display = ('title','types','amount','is_active', 'created_at', 'updated_at')
list_filter = ('title','types','amount','is_active', 'created_at', 'updated_at')
search_fields = ('title','types','amount','is_active', 'created_at', 'updated_at')
@admin.register(Properity)
class ProperityAdmin(admin.ModelAdmin):
list_display = ('title', 'created_at', 'updated_at')
list_filter = ('title', 'created_at', 'updated_at')
search_fields = ('title', 'created_at', 'updated_at')
@admin.register(ProperityOption)
class ProperityOptionAdmin(admin.ModelAdmin):
list_display = ('title', 'created_at', 'updated_at')
list_filter = ('title', 'created_at', 'updated_at')
search_fields = ('title', 'created_at', 'updated_at')
@admin.register(Color)
class ColorAdmin(admin.ModelAdmin):
list_display = ('title', 'created_at')
list_filter = ('title', 'created_at')
search_fields = ('title', 'created_at')
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list = ('product__title')
search_fields = (['product__title'])
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = (['title'])
search_fields = (['title'])
@admin.register(ShoppingCart)
class ShoppingCartAdmin(admin.ModelAdmin):
list_display = (['product'])
@admin.register(Wishlist)
class WishlistAdmin(admin.ModelAdmin):
list_display = (['product'])
| 31.987654 | 93 | 0.703589 |
42eff7b73d4d9e9bde660bd60b5a65140cceb73c | 3,009 | py | Python | aikatsu_ranking.py | yokky21/aikatsu-ranking | 10d8e4d827414120e721640d42874c26f25c4811 | [
"MIT"
] | null | null | null | aikatsu_ranking.py | yokky21/aikatsu-ranking | 10d8e4d827414120e721640d42874c26f25c4811 | [
"MIT"
] | null | null | null | aikatsu_ranking.py | yokky21/aikatsu-ranking | 10d8e4d827414120e721640d42874c26f25c4811 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.6
# vim: ts=4 sw=4
import requests, lxml.html, json, sys, os, configparser, re
from datetime import datetime
from mastodon import *
## Initializing
host = 'https://bpnavi.jp'
ua = 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1'
url_main = host + '/s/elec/aikatsu_p5/ranking'
url_ajax = host + '/s/elec/aikatsu_p5/item_rankings/more'
rank = []
name = []
post_summary = datetime.now().strftime("%Y-%m-%d %H:%M") + ' 現在のランキング'
post_data = post_summary + "\n"
conf_select = 'aikatsu8'
csvfile = 'aikatsu8.csv'
inifile = configparser.ConfigParser()
inifile.read(os.path.dirname(os.path.abspath(__file__)) + '/mastodon.ini', 'UTF-8')
## Getting main page (CSRF Token)
headers = {'User-Agent': ua}
resp = requests.get(url_main, timeout=30, headers=headers)
main_html = resp.text
cookies = resp.cookies
root = lxml.html.fromstring(main_html)
csrf_token_data = root.xpath('/html/head/meta[@name="csrf-token"]')
csrf_token = csrf_token_data[0].attrib['content']
## Getting ranking data
headers = {'User-Agent': ua,
'Accept': '*/*',
'Origin': host,
'Referer': host + '/s/elec/aikatsu_p5/item_rankings',
'X-CSRF-Token': csrf_token,
'X-Requested-With': 'XMLHttpRequest'}
for page in range(4):
obj = {'page': str(page+1)}
resp = requests.post(url_ajax, timeout=30,
headers=headers, cookies=cookies, data=obj)
if resp.status_code != 200:
sys.exit()
data = json.loads(resp.text)
rank_html = data['attachmentPartial']
root = lxml.html.fromstring(rank_html)
for row in range(3):
for col in range(3):
rank_data = root.xpath('//tr['+ str(row+1) +']/td['+ str(col+1) +']/p["rank"]/font[1]')
name_data = root.xpath('//tr['+ str(row+1) +']/td['+ str(col+1) +']/p["name_vote"]/a[1]')
try:
rank.append(rank_data[0].text.strip())
name.append(name_data[0].text.strip())
except IndexError:
break
else:
continue
break
for num in range(len(rank)):
post_data += rank[num] + name[num] + "\n"
## Create CSV file
csv = re.sub(',*$', '', post_data.replace('\n',',')) + "\n"
try:
f = open(os.path.dirname(os.path.abspath(__file__)) + '/' + csvfile, mode='a', encoding='utf-8')
f.write(csv)
f.close()
except:
pass
# print(post_data)
# print(post_summary)
# sys.exit()
## Posting to Mastodon
mastodon = Mastodon(client_id = inifile.get(conf_select, 'id'),
client_secret = inifile.get(conf_select, 'secret'),
access_token = inifile.get(conf_select, 'token'),
api_base_url = inifile.get(conf_select, 'url'))
# mastodon.toot(post_data)
mastodon.status_post(
post_data,
spoiler_text=post_summary)
| 33.065934 | 154 | 0.599535 |
42f0f632b463ffb1c555335ca23b1393342b2700 | 1,091 | py | Python | L11-LP-farm-example.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | 10 | 2018-12-23T02:59:06.000Z | 2021-12-07T11:55:21.000Z | L11-LP-farm-example.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | null | null | null | L11-LP-farm-example.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | 7 | 2018-12-21T02:06:51.000Z | 2021-12-11T02:36:47.000Z | import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
# Lecture 11 2-user water allocation example
# First approach: scipy.optimize.linprog
# need matrix form: minimize c^T * x, subject to Ax <= b
c = [-5, -3] # negative to maximize
A = [[10,5], [1,1.5], [2,2], [-1,0], [0,-1]]
b = [20, 3, 4.5, 0, 0]
sol = optimize.linprog(c, A, b)
print('Scipy Output:')
print(sol)
# Second approach: cxvpy
# this import is easy but also could be confusing
# because it overwrites common functions (sum, mean, etc) with cvxpy functions
# from cvxpy import *
# xc = Variable(name='xc')
# xb = Variable(name='xb')
# pc = 5
# pb = 3
# obj = Maximize(pc*xc + pb*xb)
# constraints = [10*xc + 5*xb <= 20,
# xc + 1.5*xb <= 3,
# 2*xc + 2*xb <= 4.5,
# xc >= 0,
# xb >= 0]
# prob = Problem(obj, constraints)
# prob.solve()
# print('\ncvxpy Output:')
# print('Objective = %f' % obj.value)
# print('xc = %f' % xc.value)
# print('xb = %f' % xb.value)
# for c in constraints:
# print('Dual (%s) = %f' % (c, c.dual_value))
| 23.717391 | 78 | 0.582035 |
42f12d3200ce4d7e07aaba09b537e0ff03fb831a | 1,471 | py | Python | prev_ob_models/exclude/GilraBhalla2015/synapses/synapseConstantsMinimal.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | prev_ob_models/exclude/GilraBhalla2015/synapses/synapseConstantsMinimal.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | prev_ob_models/exclude/GilraBhalla2015/synapses/synapseConstantsMinimal.py | fameshpatel/olfactorybulb | 8d7a644b4560309ef177c0590ff73ed4c2432604 | [
"MIT"
] | null | null | null | ## This file used to be programmatically generated for converging to best fit Activity Dependent Inhibition curve.
## But that doesn't give decent result, so set by hand.
import sys
sys.path.extend(["../networks"])
## do not import networkConstants as that imports this file, and it's circular then!!!
from networkConstantsMinimal import *
## STRONG_SYNAPSES is defined in networkConstants, but can't import it due to reason above,
## so duplicating the directed and frac_directed check below again.
## For STRONG_SYNAPSES i.e differential connectivity set mitral -> granule base excitation to 0.2nS
## else, for random / uniform connectivity, set the base value to 0.3nS
## This is to get the same amount of activity dependent inhibition (Arevian et al)
## for the different network connectivities...
if directed and frac_directed>0.0:
mitral_granule_AMPA_Gbar = 0.2e-9 # Siemens
granule_mitral_GABA_Gbar = 1.0e-9#12.0e-09 # Siemens
else: #### confirm ADI for 0% frac_directed setting below
## 0.3e-9 for 3% frac_directed, _mod mitral,
## but 0.2e-9 for 1% frac_directed, _mod_spikeinit mitral
mitral_granule_AMPA_Gbar = 0.2e-9#0.3e-9 # Siemens
granule_mitral_GABA_Gbar = 1.5e-9#12.0e-09 # Siemens
## For the _mod mitral with _spikeinit,
## self Gbar below must be reduced to 5 pS, else huge self-inhibition
## For the _mod mitral, 50 pS is fine, it doesn't get affected much by inhibition!
self_mitral_GABA_Gbar = 5e-12#5e-12#50e-12 # Siemens
| 54.481481 | 114 | 0.755948 |
42f674ee12a896bdc6fefab4b830b689f09ef5e4 | 499 | py | Python | agoge/__init__.py | Nintorac/agoge | 0abe66e41e4fcd865854cc009374e2a52ef5671c | [
"MIT"
] | null | null | null | agoge/__init__.py | Nintorac/agoge | 0abe66e41e4fcd865854cc009374e2a52ef5671c | [
"MIT"
] | null | null | null | agoge/__init__.py | Nintorac/agoge | 0abe66e41e4fcd865854cc009374e2a52ef5671c | [
"MIT"
] | null | null | null | from .utils import defaults_f
DEFAULTS = defaults_f({
'ARTIFACTS_ROOT': '~/agoge/artifacts',
'TQDM_DISABLED': False,
'TRIAL_ROOT': 'Worker',
'BUCKET': 'nintorac_model_serving',
'BASE_URL': 'https://github.com/Nintorac/NeuralDX7-weights/raw/master'
})
from .data_handler import DataHandler
from .model import AbstractModel
from .solver import AbstractSolver
from .train_worker import TrainWorker
from .inference_worker import InferenceWorker
from .lmdb_helper import LMDBDataset | 31.1875 | 74 | 0.771543 |
42f8e8791025cfd39e8878d6744a088d9902c8a3 | 1,206 | py | Python | test/variable_type.py | bourne7/demo-python | 0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f | [
"MIT"
] | null | null | null | test/variable_type.py | bourne7/demo-python | 0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f | [
"MIT"
] | null | null | null | test/variable_type.py | bourne7/demo-python | 0c4dd12475bcada4e5826b7117bd4c4bdcedfd9f | [
"MIT"
] | null | null | null | def do_loop():
print('Being Invoked.')
# * 表示参数为 元组
def fun1(*args): # 相当于 def fun1(1,2,3) ==> args 就相当于(1,2,3)
for a in args:
print(a)
# ** 表示参数为 字典
def fun2(**args): # 相当于 def fun2({a:1,b:2,c:3}) ==>args 就相当于{a:1,b:2,c:3}
for k, v in args:
print(k, ":", v)
# Python3 的六个标准数据类型
def show_type():
# 不可变对象
var_int = 123
# 注意 isinstance(1, int) 这种可以判断父类,type不行
print('Number 数字', type(var_int))
var_str = 'Hello'
print('String 字符串', type(var_str))
var_tuple = ('Hi', 786, 2.23, 'john', 70.2)
print('Tuple 元组', type(var_tuple))
# 可变对象
var_set = {1, 2, 3, 4, 5}
print('Sets 集合', type(var_set))
var_list = [1, 2, 3, 4, 5, 6]
print('List 列表', type(var_list))
var_dict = {'a': 'apple', 'b': 'banana', 'z': 1000}
print('Dictionary 字典', type(var_dict))
def test_mutable():
a1 = [1, 2, 3]
a2 = a1
print(id(a1), id(a2))
# 这3种都不会导致对象id变化,因为都是调用内部函数。
a2.append(4)
a2 += [4]
a2.extend([4])
# 会导致对象id变化,因为创建了新的对象。
# a2 = a2 + [4]
print(id(a1), id(a2))
print(a1)
print(a2)
if __name__ == '__main__':
print('Start test as main.')
show_type()
test_mutable()
| 19.451613 | 74 | 0.543118 |
42f979541235624972aa7beb6b4040036e613c33 | 951 | py | Python | scrapystsytem/spiders/doubanmoviespider.py | mezhou887/ScrapySystem2017 | 888ac42bba36b541845244596db1644e332bf291 | [
"Apache-2.0"
] | null | null | null | scrapystsytem/spiders/doubanmoviespider.py | mezhou887/ScrapySystem2017 | 888ac42bba36b541845244596db1644e332bf291 | [
"Apache-2.0"
] | null | null | null | scrapystsytem/spiders/doubanmoviespider.py | mezhou887/ScrapySystem2017 | 888ac42bba36b541845244596db1644e332bf291 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from scrapystsytem.misc.commonspider import CommonSpider
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor as sle
logger = logging.getLogger(__name__)
class DoubanMovieSpider(CommonSpider):
name = "doubanmovie"
allowed_domains = ["douban.com"]
start_urls = [
"https://movie.douban.com/chart"
]
rules = [
Rule(sle(allow=("/subject/[0-9]+/$")), callback='parse_subject', follow=True),
]
content_css_rules = {
'rating_per': '.rating_per::text',
'rating_num': '.rating_num::text',
'title': 'h1 span:nth-child(1)::text',
'rating_people': '.rating_people span::text',
}
def parse_subject(self, response):
item = self.parse_with_rules(response, self.content_css_rules, dict)
logger.info('function: parse_subject, url: '+response.url+' , item: '+str(item));
return item | 31.7 | 89 | 0.648791 |
42faa478c98edc7e43520c1e76c93b612e769679 | 560 | py | Python | hurricane/base.py | ericflo/hurricane | c192b711b2b1c06a386d1a1a47f538b13a659cde | [
"BSD-3-Clause"
] | 8 | 2015-02-21T17:59:41.000Z | 2021-01-07T20:57:39.000Z | hurricane/base.py | ericflo/hurricane | c192b711b2b1c06a386d1a1a47f538b13a659cde | [
"BSD-3-Clause"
] | null | null | null | hurricane/base.py | ericflo/hurricane | c192b711b2b1c06a386d1a1a47f538b13a659cde | [
"BSD-3-Clause"
] | 2 | 2016-07-09T16:06:23.000Z | 2016-08-02T18:44:20.000Z | import uuid
class Message(object):
def __init__(self, kind, timestamp, raw_data):
self.kind = kind
self.timestamp = timestamp
self.raw_data = raw_data
self.uuid = str(uuid.uuid4())
def _asdict(self):
return {
'kind': self.kind,
'timestamp': self.timestamp,
'raw_data': self.raw_data,
'uuid': self.uuid,
}
def __str__(self):
return str(self._asdict())
def __repr__(self):
return '<%s %r>' % (type(self).__name__, str(self))
| 23.333333 | 59 | 0.542857 |
42fb56f78da3eca5f6dfd2e9de1258342401faa4 | 469 | py | Python | nbexchange/handlers/__init__.py | jgwerner/nbexchange | 510aa8fdff04b0873cec5dd75d3dfb0eac820c1b | [
"BSD-3-Clause"
] | 7 | 2020-04-30T20:16:18.000Z | 2021-09-11T20:31:51.000Z | nbexchange/handlers/__init__.py | jgwerner/nbexchange | 510aa8fdff04b0873cec5dd75d3dfb0eac820c1b | [
"BSD-3-Clause"
] | 86 | 2020-03-06T15:34:55.000Z | 2022-03-07T11:58:06.000Z | nbexchange/handlers/__init__.py | jgwerner/nbexchange | 510aa8fdff04b0873cec5dd75d3dfb0eac820c1b | [
"BSD-3-Clause"
] | 1 | 2020-07-25T23:04:51.000Z | 2020-07-25T23:04:51.000Z | from nbexchange.handlers.assignment import Assignment, Assignments
from nbexchange.handlers.collection import Collection, Collections
from nbexchange.handlers.feedback import FeedbackHandler
from nbexchange.handlers.pages import HomeHandler
from nbexchange.handlers.submission import Submission, Submissions
default_handlers = [
Assignment,
Assignments,
Collection,
Collections,
Submission,
Submissions,
HomeHandler,
FeedbackHandler,
]
| 27.588235 | 66 | 0.803838 |
42fe26b4d9e2cf96a145d2ebd3a33d07d37ab54e | 2,476 | py | Python | 09/09b.py | thejoeejoee/aoc-2021 | 1ae7650aea42b5fbb60e891687cf7bc84c81bd66 | [
"MIT"
] | 1 | 2021-12-01T17:43:38.000Z | 2021-12-01T17:43:38.000Z | 09/09b.py | thejoeejoee/aoc-2021 | 1ae7650aea42b5fbb60e891687cf7bc84c81bd66 | [
"MIT"
] | null | null | null | 09/09b.py | thejoeejoee/aoc-2021 | 1ae7650aea42b5fbb60e891687cf7bc84c81bd66 | [
"MIT"
] | null | null | null | #!/bin/env python3
import operator
from _operator import attrgetter, itemgetter
from collections import defaultdict, Counter
from functools import reduce, partial
from itertools import chain
from aocd import get_data
EMPTY = type('EMPTY', (int,), dict(__repr__=(f := lambda s: 'EMPTY'), __str__=f))(10)
def windowed(seq, n):
for i in range(len(seq) - n + 1):
yield seq[i: i + n]
def compose(*fs):
return reduce(lambda f, g: lambda x: f(g(x)), fs, lambda x: x)
heights = get_data().strip().splitlines()
HEIGHT = len(heights) + 2
WIDTH = len(heights[0]) + 2
def get_neighbors(data, pos):
row, col = pos
for p in (
(row, col + 1),
(row, col - 1),
(row + 1, col),
(row - 1, col),
):
r, c = p
if 0 <= r < HEIGHT and 0 <= c < WIDTH:
yield p, data[r * WIDTH + c]
def find_low_points(levels):
for triplet_i, triplet in filter(
# turbo magic to get triples (with indexes) with center item which is NOT EMPTY
compose(partial(operator.ne, EMPTY), itemgetter(1), itemgetter(1)),
enumerate(windowed(levels, 3), start=1) # wtf dunno why to start at 1
):
row = triplet_i // WIDTH
col = triplet_i % WIDTH
left, center, right = triplet
top = levels[(row - 1) * WIDTH + col]
bottom = levels[(row + 1) * WIDTH + col]
if all(map(partial(operator.lt, center), (left, right, top, bottom))):
yield row, col
def main():
data = tuple(chain(
(EMPTY for _ in range(WIDTH)),
*(((EMPTY,) + tuple(int(c) for c in line) + (EMPTY,)) for line in heights),
(EMPTY for _ in range(WIDTH)),
))
basins = Counter()
for low_point in find_low_points(data):
known = set()
to_explore = {low_point}
# not BFS, dot DFS? just JoeFS
while to_explore:
exploring = to_explore.pop()
known.add(exploring)
r, c = exploring
current = data[r * WIDTH + c]
for neighbor, level in get_neighbors(data, exploring):
if level in known:
continue
if level > current and level not in (EMPTY, 9):
to_explore.add(neighbor)
basins[low_point] = len(known)
return reduce(
operator.mul,
map(itemgetter(1), basins.most_common(3))
)
if __name__ == '__main__':
print(main())
| 26.340426 | 91 | 0.560582 |
42ff0390633d326bb027aa10d5b16efa20802940 | 1,343 | py | Python | tests/test_window.py | yogeshkumarpilli/detectron2 | f4f276dc8239b2c5a1bbbf6ed234acd25c75a522 | [
"Apache-2.0"
] | null | null | null | tests/test_window.py | yogeshkumarpilli/detectron2 | f4f276dc8239b2c5a1bbbf6ed234acd25c75a522 | [
"Apache-2.0"
] | null | null | null | tests/test_window.py | yogeshkumarpilli/detectron2 | f4f276dc8239b2c5a1bbbf6ed234acd25c75a522 | [
"Apache-2.0"
] | 3 | 2021-12-17T04:28:02.000Z | 2022-02-22T18:18:03.000Z | from detectron2.engine import DefaultPredictor
from detectron2.data import MetadataCatalog
from detectron2.config import get_cfg
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2 import model_zoo
import cv2
import numpy as np
import requests
# Load an image
res = requests.get("https://thumbor.forbes.com/thumbor/fit-in/1200x0/filters%3Aformat%28jpg%29/https%3A%2F%2Fspecials-images.forbesimg.com%2Fimageserve%2F5f15af31465263000625ce08%2F0x0.jpg")
image = np.asarray(bytearray(res.content), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
config_file = 'COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml'
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(config_file))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.75 # Threshold
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
cfg.MODEL.DEVICE = "cuda" # cpu or cuda
# Create predictor
predictor = DefaultPredictor(cfg)
# Make prediction
output = predictor(image)
print(output)
v = Visualizer(image[:, :, ::-1],
scale=0.8,
metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
instance_mode=ColorMode.IMAGE
)
v = v.draw_instance_predictions(output["instances"].to("cpu"))
cv2.imshow('images', v.get_image()[:, :, ::-1])
cv2.waitKey(0) | 37.305556 | 191 | 0.737156 |
42ff644535c1107deafd0fab424dd9161db0897b | 9,920 | py | Python | hydra/cli.py | albertoa/hydra | 8161e75829e4e76cb91ce516bbf03c258a87ce9e | [
"Apache-2.0"
] | 28 | 2020-11-05T16:04:51.000Z | 2021-02-16T22:58:10.000Z | hydra/cli.py | albertoa/hydra | 8161e75829e4e76cb91ce516bbf03c258a87ce9e | [
"Apache-2.0"
] | 43 | 2020-11-06T19:21:39.000Z | 2021-02-25T19:04:42.000Z | hydra/cli.py | albertoa/hydra | 8161e75829e4e76cb91ce516bbf03c258a87ce9e | [
"Apache-2.0"
] | 4 | 2020-11-06T08:54:57.000Z | 2021-01-18T03:26:00.000Z | import os
import yaml
import json
import click
import hydra.utils.constants as const
from hydra.utils.git import check_repo
from hydra.utils.utils import dict_to_string, inflate_options
from hydra.cloud.local_platform import LocalPlatform
from hydra.cloud.fast_local_platform import FastLocalPlatform
from hydra.cloud.google_cloud_platform import GoogleCloudPlatform
from hydra.cloud.aws_platform import AWSPlatform
from hydra.version import __version__
@click.group()
@click.version_option(__version__)
def cli():
pass
@cli.command()
# Generic options
@click.option('-y', '--yaml_path', default='hydra.yaml', type=str)
@click.option('-p', '--project_name', default=None, type=str)
@click.option('-m', '--model_path', default=None, type=str)
@click.option('--cloud', default=None, type=click.Choice(['fast_local','local', 'aws', 'gcp', 'azure'], case_sensitive=False))
@click.option('--github_token', envvar='GITHUB_TOKEN') # Takes either an option or environment var
# Cloud specific options
@click.option('--cpu_count', default=None, type=click.IntRange(0, 96), help='Number of CPU cores required')
@click.option('--memory_size', default=None, type=click.IntRange(0, 624), help='GB of RAM required')
@click.option('--gpu_count', default=None, type=click.IntRange(0, 8), help="Number of accelerator GPUs")
@click.option('--gpu_type', default=None, type=str, help="Accelerator GPU type")
@click.option('--region', default=None, type=str, help="Region of cloud server location")
# AWS specific options
@click.option('--metadata_db_hostname', default=None, type=str, help="Hostname of the RDS instance storing job metadata")
@click.option('--metadata_db_username_secret', default=None, type=str, help="Secret name in AWS of the username of the RDS instance storing job metadata")
@click.option('--metadata_db_password_secret', default=None, type=str, help="Secret name in AWS of the password of the RDS instance storing job metadata")
@click.option('--metadata_db_name', default=None, type=str, help="Database name of the RDS instance storing job metadata")
# Docker Options
@click.option('-t', '--image_tag', default=None, type=str, help="Docker image tag name")
@click.option('-u', '--image_url', default=None, type=str, help="Url to the docker image on cloud")
# Env variable of model file
@click.option('-o', '--options', default=None, type=str, help='Environmental variables for the script')
def run(
yaml_path,
project_name,
model_path,
cloud,
github_token,
cpu_count,
memory_size,
gpu_count,
gpu_type,
region,
metadata_db_hostname,
metadata_db_username_secret,
metadata_db_password_secret,
metadata_db_name,
image_tag,
image_url,
options):
# If YAML config file available to supplement the command line arguments
if os.path.isfile(yaml_path):
with open(yaml_path) as f:
print("[Hydra Info]: Loading run info from {}...".format(yaml_path))
data = yaml.load(f, Loader=yaml.FullLoader)
run_data = data.get('run', '')
project_name = run_data.get('project_name')
if project_name is None:
raise ValueError("project_name option is required")
model_path = run_data.get('model_path', const.MODEL_PATH_DEFAULT) if model_path is None else model_path
cloud = run_data.get('cloud', const.CLOUD_DEFAULT).lower() if cloud is None else cloud
image_tag = run_data.get('image_tag', const.IMAGE_TAG_DEFAULT) if image_tag is None else image_tag
image_url = run_data.get('image_url', const.IMAGE_URL_DEFAULT) if image_url is None else image_url
if image_tag == '' and image_url != '':
raise Exception("image_tag is required when passing a custom image_url")
if cloud == 'gcp' or cloud == 'aws':
region = run_data.get('region', const.REGION_DEFAULT) if region is None else region
cpu_count = run_data.get('cpu_count', const.CPU_COUNT_DEFAULT) if cpu_count is None else cpu_count
memory_size = run_data.get('memory_size', const.MEMORY_SIZE_DEFAULT) if memory_size is None else memory_size
gpu_count = run_data.get('gpu_count', const.GPU_COUNT_DEFAULT) if gpu_count is None else gpu_count
gpu_type = run_data.get('gpu_type', const.GPU_TYPE_DEFAULT) if gpu_type is None else gpu_type
if cloud == 'aws':
metadata_db_hostname = run_data.get('metadata_db_hostname', const.METADATA_DB_HOSTNAME) if metadata_db_hostname is None else metadata_db_hostname
metadata_db_username_secret = run_data.get('metadata_db_username_secret', const.METADATA_DB_USERNAME_SECRET) if metadata_db_username_secret is None else metadata_db_username_secret
metadata_db_password_secret = run_data.get('metadata_db_password_secret', const.METADATA_DB_PASSWORD_SECRET) if metadata_db_password_secret is None else metadata_db_password_secret
metadata_db_name = run_data.get('metadata_db_name', const.METADATA_DB_NAME) if metadata_db_name is None else metadata_db_name
elif cloud == 'local' or cloud == 'fast_local':
pass
else:
raise RuntimeError("Reached parts of Hydra that are either not implemented or recognized.")
options_list = run_data.get('options', const.OPTIONS_DEFAULT) if options is None else options
if type(options_list) is str:
options_list = json.loads(options_list)
# Read the options for run from CIL
else:
model_path = const.MODEL_PATH_DEFAULT if model_path is None else model_path
cloud = const.CLOUD_DEFAULT if cloud is None else cloud
region = const.REGION_DEFAULT if region is None else region
cpu_count = const.CPU_COUNT_DEFAULT if cpu_count is None else cpu_count
memory_size = const.MEMORY_SIZE_DEFAULT if memory_size is None else memory_size
gpu_count = const.GPU_COUNT_DEFAULT if gpu_count is None else gpu_count
gpu_type = const.GPU_TYPE_DEFAULT if gpu_type is None else gpu_type
image_tag = const.IMAGE_TAG_DEFAULT if image_tag is None else image_tag
image_url = const.IMAGE_URL_DEFAULT if image_url is None else image_url
options = str(const.OPTIONS_DEFAULT) if options is None else options
options_list = json.loads(options)
if cloud == 'aws':
metadata_db_hostname = const.METADATA_DB_HOSTNAME if metadata_db_hostname is None else metadata_db_hostname
metadata_db_username_secret = const.METADATA_DB_USERNAME_SECRET if metadata_db_username_secret is None else metadata_db_username_secret
metadata_db_password_secret = const.METADATA_DB_PASSWORD_SECRET if metadata_db_password_secret is None else metadata_db_password_secret
metadata_db_name = const.METADATA_DB_NAME if metadata_db_name is None else metadata_db_name
if isinstance(options_list, dict):
options_list = [options_list]
options_list_inflated = inflate_options(options_list)
if cloud == 'aws':
git_url, commit_sha = '', ''
else:
git_url, commit_sha = check_repo(github_token)
hydra_core_configs = {
'HYDRA_PLATFORM': cloud,
'HYDRA_GIT_URL': git_url or '',
'HYDRA_COMMIT_SHA': commit_sha or '',
'HYDRA_OAUTH_TOKEN': github_token,
'HYDRA_MODEL_PATH': model_path
}
print("\n[Hydra Info]: Executing experiments with the following options: \n {}\n".format(options_list_inflated))
for i, options in enumerate(options_list_inflated):
options_str = dict_to_string(options)
hydra_core_configs_str = dict_to_string(hydra_core_configs)
print("\n[Hydra Info]: Runnning experiment #{} with the following options: \n {}\n".format(i, options))
if cloud == 'fast_local':
platform = FastLocalPlatform(model_path,
f"{options_str} {hydra_core_configs_str}")
platform.run()
continue
if cloud == 'local':
platform = LocalPlatform(
model_path=model_path,
options=options_str,
git_url=git_url,
commit_sha=commit_sha,
github_token=github_token,
image_url=image_url,
image_tag=image_tag)
elif cloud == 'gcp':
platform = GoogleCloudPlatform(
model_path=model_path,
github_token=github_token,
cpu=cpu_count,
memory=memory_size,
gpu_count=gpu_count,
gpu_type=gpu_type,
region=region,
git_url=git_url,
commit_sha=commit_sha,
image_url=image_url,
image_tag=image_tag,
options=options_str)
elif cloud == 'aws':
platform = AWSPlatform(
model_path=model_path,
project_name=project_name,
github_token=github_token,
cpu=cpu_count,
memory=memory_size,
gpu_count=gpu_count,
region=region,
git_url=git_url,
commit_sha=commit_sha,
hydra_version=__version__,
metadata_db_hostname=metadata_db_hostname,
metadata_db_username_secret=metadata_db_username_secret,
metadata_db_password_secret=metadata_db_password_secret,
metadata_db_name=metadata_db_name,
image_url=image_url,
image_tag=image_tag,
options=options
)
else:
raise RuntimeError("Reached parts of Hydra that are not yet implemented.")
platform.run()
return 0
| 44.684685 | 200 | 0.674698 |
6e001fac10af046d03ee8754375ce8c560a47171 | 773 | py | Python | _estudoPython_solid/requests/request.py | carlos-freitas-gitHub/Python_Analise_De_Dados | 74a72772179f45684f4f12acd4ad607c99ed8107 | [
"Apache-2.0"
] | null | null | null | _estudoPython_solid/requests/request.py | carlos-freitas-gitHub/Python_Analise_De_Dados | 74a72772179f45684f4f12acd4ad607c99ed8107 | [
"Apache-2.0"
] | null | null | null | _estudoPython_solid/requests/request.py | carlos-freitas-gitHub/Python_Analise_De_Dados | 74a72772179f45684f4f12acd4ad607c99ed8107 | [
"Apache-2.0"
] | null | null | null | '''requests
biblioteca beaultiful solp para páginas web.
'''
from builtins import print
import requests
'''compartilhando o cabeçario http, vem junto com requesição
cabecalho = {'User-agent': 'Windows 12',
'Referer': 'https://google.com.br'}
meus_cookies = {'Ultima-visita': '10-10-2020'}
meus_dados = {'Username': 'Guigui',
'Password': '12345'}
headers=cabecalho, cookies=meus_cookies, data=meus_dados
'''
try:
'''passar estes dados somente via post'''
requisicao = requests.post('http://uniesp.edu.br/sites/maua/')
status = requisicao.status_code
text = requisicao.text
except Exception as err:
print('Erro', err)
print('+=' *30)
print('Status:', status)
print('+=' *30)
print(text)
| 28.62963 | 67 | 0.641656 |
6e01596134dc9f1610c5e8f76e5d30c43961114c | 23,738 | py | Python | Tac Tac Toe/ttt_mobile_1080p.py | promitbasak/TicTacToe-Pygame | 6114cee9498d70942f48a0b6eb360f02bcf72df0 | [
"MIT"
] | 3 | 2020-06-15T13:50:51.000Z | 2021-08-18T05:10:17.000Z | Tac Tac Toe/ttt_mobile_1080p.py | promitbasak/TicTacToe-Pygame | 6114cee9498d70942f48a0b6eb360f02bcf72df0 | [
"MIT"
] | null | null | null | Tac Tac Toe/ttt_mobile_1080p.py | promitbasak/TicTacToe-Pygame | 6114cee9498d70942f48a0b6eb360f02bcf72df0 | [
"MIT"
] | 1 | 2020-06-15T13:52:49.000Z | 2020-06-15T13:52:49.000Z | import pygame
import random
import sys
Xfactor = 1.35
Yfactor = 3.2
CELLS = 9
PLAYERS = 2
CORNERS = [1, 3, 7, 9]
NON_CORNERS = [2, 4, 6, 8]
board = {}
for i in range(9):
board[i + 1] = 0
signs = {0: " ", 1: "X", 2: "O"}
winner = None
boardX = 10
boardY = 464
icon = pygame.image.load("ttticon2.png")
pygame.display.set_icon(icon)
fpsClock = pygame.time.Clock()
boardimg = pygame.image.load("board3dr.png")
crossimg = pygame.image.load("cross3dr.png")
roundimg = pygame.image.load("cuber.png")
bannerimg = pygame.image.load("tttbannerr.png")
winimg = pygame.image.load("winsmallr.png")
loseimg = pygame.image.load("losesmallr.png")
drawimg = pygame.image.load("drawsmallr.png")
markerimg = pygame.image.load("markerr.png")
diffimg = pygame.image.load("difficultyr.png")
backimg = pygame.image.load("backr.png")
clickimg = pygame.image.load("clickr.png")
def rpermutation(a):
array = a[:]
for _ in range(len(array)):
yield array.pop(random.randint(0, len(array) - 1))
class player:
def __init__(self, name, mark):
self.name = name
self.sign = "X" if mark == 1 else "O"
self.mark = mark
self.playings = []
self.antimark = mark % 2 + 1
def getturn(self):
print(f"\n{self.name}'s Turn:")
print(f"\n{self.name} is giving his turn")
print()
class user(player):
def getturn(self):
while True:
turn = getinput()
if board[turn] == 0:
print("Good turn")
break
elif board[turn] == self.mark:
print("You have already used that cell, please choose another!!!")
else:
print("Opponent has already used that cell, please choose another!!!")
return turn
class easy(player):
def getturn(self):
super().getturn()
while True:
turn = random.choice(getemptycells())
return turn
class medium(player):
def getturn(self):
super().getturn()
for i in range(3):
row = [board[i * 3 + 1], board[i * 3 + 2], board[i * 3 + 3]]
if sum(row) == 2 * self.mark and (self.mark in row):
try:
# print("1row")
return cellvalidator(i * 3 + row.index(0) + 1)
except:
pass
col = [board[i + 1], board[i + 4], board[i + 7]]
if sum(col) == 2 * self.mark and (self.mark in col):
try:
# print("1col")
return cellvalidator(i + col.index(0) * 3 + 1)
except:
pass
diag = [board[1], board[5], board[9]]
if sum(diag) == 2 * self.mark and (self.mark in diag):
try:
# print("1diag")
return cellvalidator(diag.index(0) * 4 + 1)
except:
pass
antidiag = [board[3], board[5], board[7]]
if sum(antidiag) == 2 * self.mark and (self.mark in antidiag):
try:
# print("1antidiag")
return cellvalidator(3 + antidiag.index(0) * 2)
except:
pass
for i in range(3):
row = [board[i * 3 + 1], board[i * 3 + 2], board[i * 3 + 3]]
if sum(row) == 2 * self.antimark and (self.antimark in row):
try:
# print("row")
return cellvalidator(i * 3 + row.index(0) + 1)
except:
pass
col = [board[i + 1], board[i + 4], board[i + 7]]
if sum(col) == 2 * self.antimark and (self.antimark in col):
try:
# print("col")
return cellvalidator(i + col.index(0) * 3 + 1)
except:
pass
diag = [board[1], board[5], board[9]]
if sum(diag) == 2 * self.antimark and (self.antimark in diag):
try:
# print("diag")
return cellvalidator(diag.index(0) * 4 + 1)
except:
pass
antidiag = [board[3], board[5], board[7]]
if sum(antidiag) == 2 * self.antimark and (self.antimark in antidiag):
try:
# print("antidiag")
return cellvalidator(3 + antidiag.index(0) * 2)
except:
pass
while True:
turn = random.choice(getemptycells())
return turn
class hard(player):
def getturn(self):
super().getturn()
for i in range(3):
row = [board[i * 3 + 1], board[i * 3 + 2], board[i * 3 + 3]]
if sum(row) == 2 * self.mark and (self.mark in row):
try:
# print("1row")
return cellvalidator(i * 3 + row.index(0) + 1)
except:
pass
col = [board[i + 1], board[i + 4], board[i + 7]]
if sum(col) == 2 * self.mark and (self.mark in col):
try:
# print("1col")
return cellvalidator(i + col.index(0) * 3 + 1)
except:
pass
diag = [board[1], board[5], board[9]]
if sum(diag) == 2 * self.mark and (self.mark in diag):
try:
# print("1diag")
return cellvalidator(diag.index(0) * 4 + 1)
except:
pass
antidiag = [board[3], board[5], board[7]]
if sum(antidiag) == 2 * self.mark and (self.mark in antidiag):
try:
# print("1antidiag")
return cellvalidator(3 + antidiag.index(0) * 2)
except:
pass
for i in range(3):
row = [board[i * 3 + 1], board[i * 3 + 2], board[i * 3 + 3]]
if sum(row) == 2 * self.antimark and (self.antimark in row):
try:
# print("row")
return cellvalidator(i * 3 + row.index(0) + 1)
except:
pass
col = [board[i + 1], board[i + 4], board[i + 7]]
if sum(col) == 2 * self.antimark and (self.antimark in col):
try:
# print("col")
return cellvalidator(i + col.index(0) * 3 + 1)
except:
pass
diag = [board[1], board[5], board[9]]
if sum(diag) == 2 * self.antimark and (self.antimark in diag):
try:
# print("diag")
return cellvalidator(diag.index(0) * 4 + 1)
except:
pass
antidiag = [board[3], board[5], board[7]]
if sum(antidiag) == 2 * self.antimark and (self.antimark in antidiag):
try:
# print("antidiag")
return cellvalidator(3 + antidiag.index(0) * 2)
except:
pass
for i in list(rpermutation(CORNERS)):
if not board[i]:
if sum([board[i] for i in getadjacentcorners(i)]):
try:
return cellvalidator(i)
except:
pass
if not board[5]:
return 5
if board[5] == self.mark:
for i in list(rpermutation(NON_CORNERS)):
if board[i] == self.mark:
# print("last but one")
try:
return cellvalidator(CELLS + 1 - i)
except:
pass
# print("corner")
try:
return cellvalidator(random.choice([i for i in getemptycells() if i in CORNERS]))
except:
pass
# print("last")
return cellvalidator(random.choice(getemptycells()))
class deadly(player):
def getturn(self):
super().getturn()
################# Priority ##################
# Aggressive
for i in range(3):
row = [board[i * 3 + 1], board[i * 3 + 2], board[i * 3 + 3]]
if sum(row) == 2 * self.mark and (self.mark in row):
try:
# print("1row")
return cellvalidator(i * 3 + row.index(0) + 1)
except:
pass
col = [board[i + 1], board[i + 4], board[i + 7]]
if sum(col) == 2 * self.mark and (self.mark in col):
try:
# print("1col")
return cellvalidator(i + col.index(0) * 3 + 1)
except:
pass
diag = [board[1], board[5], board[9]]
if sum(diag) == 2 * self.mark and (self.mark in diag):
try:
# print("1diag")
return cellvalidator(diag.index(0) * 4 + 1)
except:
pass
antidiag = [board[3], board[5], board[7]]
if sum(antidiag) == 2 * self.mark and (self.mark in antidiag):
try:
# print("1antidiag")
return cellvalidator(3 + antidiag.index(0) * 2)
except:
pass
# Defensive
for i in range(3):
row = [board[i * 3 + 1], board[i * 3 + 2], board[i * 3 + 3]]
if sum(row) == 2 * self.antimark and (self.antimark in row):
try:
# print("row")
return cellvalidator(i * 3 + row.index(0) + 1)
except:
pass
col = [board[i + 1], board[i + 4], board[i + 7]]
if sum(col) == 2 * self.antimark and (self.antimark in col):
try:
# print("col")
return cellvalidator(i + col.index(0) * 3 + 1)
except:
pass
diag = [board[1], board[5], board[9]]
if sum(diag) == 2 * self.antimark and (self.antimark in diag):
try:
# print("diag")
return cellvalidator(diag.index(0) * 4 + 1)
except:
pass
antidiag = [board[3], board[5], board[7]]
if sum(antidiag) == 2 * self.antimark and (self.antimark in antidiag):
try:
# print("antidiag")
return cellvalidator(3 + antidiag.index(0) * 2)
except:
pass
########################################
emptycells = getemptycells()
mycells = self.getmycells()
oppenentcells = self.getoppenentcells()
# Only move Defensive
if len(emptycells) == 8:
if sum([board[i] for i in CORNERS]) != 0:
return 5
elif 5 in oppenentcells:
return random.choice(CORNERS)
# Only move 2 Defensive
if len(emptycells) % 2 == 0 and 5 in mycells:
for i in list(rpermutation(NON_CORNERS)):
try:
return cellvalidator(i)
except:
pass
# Aggressive
if len(emptycells) == 9:
return random.choice(CORNERS + [5] + [5])
if len(emptycells) == 7 and (5 in mycells) and sum([board[i] for i in CORNERS]) != 0:
for i in CORNERS:
if i in oppenentcells:
try:
return cellvalidator(CELLS + 1 - i)
except:
pass
if len(emptycells) % 2 != 0:
if sum([board[i] for i in CORNERS]) != 0:
for i in list(rpermutation(CORNERS)):
if not board[i] and sum([board[i] for i in getadjacentcorners(i)]):
adjcells = getadjacentcells(i)
if not (adjcells[0] in oppenentcells or adjcells[1] in oppenentcells):
try:
return cellvalidator(i)
except:
pass
else:
try:
# print("corner")
return cellvalidator(random.choice(CORNERS))
except:
pass
for i in list(rpermutation(CORNERS)):
if not board[i]:
if sum([board[i] for i in getadjacentcorners(i)]):
try:
return cellvalidator(i)
except:
pass
if not board[5]:
return 5
# Adjacent corners
for i in list(rpermutation(CORNERS)):
if not board[i]:
if sum([board[i] for i in getadjacentcorners(i)]):
try:
return cellvalidator(i)
except:
pass
# Non Corners for mid
if len(emptycells) % 2 == 0:
if board[5] == self.mark:
for i in list(rpermutation(NON_CORNERS)):
if board[i] == self.mark:
# print("last but one")
try:
return cellvalidator(CELLS + 1 - i)
except:
pass
# Corners
try:
# print("corner")
return cellvalidator(random.choice([i for i in getemptycells() if i in CORNERS]))
except:
pass
if board[5] == self.mark:
for i in list(rpermutation(NON_CORNERS)):
if board[i] == self.mark:
# print("last but one")
try:
return cellvalidator(CELLS + 1 - i)
except:
pass
# print("last")
return cellvalidator(random.choice(getemptycells()))
def getmycells(self):
return [i for i in range(1, 10) if board[i] == self.mark]
def getoppenentcells(self):
return [i for i in range(1, 10) if board[i] == self.antimark]
def getemptycells():
return [i for i in range(1, 10) if board[i] == 0]
def cellvalidator(cell):
if board[cell] == 0:
return cell
else:
# print(f"Cell {cell} is occupied!!!")
raise Exception()
def getadjacentcorners(cell):
adjacent = CORNERS[:]
adjacent.remove(cell)
adjacent.remove(CELLS + 1 - cell)
return adjacent
def getadjacentcells(cell):
if cell < 5:
return [cell * 2, 5 - cell]
else:
return [15 - cell, cell - 1]
def solve():
for i in range(3):
if board[i * 3 + 1] == board[i * 3 + 2] == board[i * 3 + 3] and board[i * 3 + 1] != 0:
return board[i * 3 + 1]
elif board[i + 1] == board[i + 4] == board[i + 7] and board[i + 1] != 0:
return board[i + 1]
if board[1] == board[5] == board[9] and board[1] != 0:
return board[1]
elif board[3] == board[5] == board[7] and board[3] != 0:
return board[3]
try:
list(board.values()).index(0)
except:
return -1
return None
def marker(cell, mark):
if 1 > cell > 10:
print(f"Cell: {cell} not exist!!!")
raise Exception()
elif board[cell] != 0:
print(f"Cell: {cell} is occupied!!!")
raise Exception()
else:
board[cell] = mark
def getinput():
begin = True
key = None
while begin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
key = keytonum(pygame.mouse.get_pos())
if key:
begin = False
if event.type == pygame.KEYDOWN:
key = keytonum(event.key)
if key:
begin = False
pygame.display.update()
showboard()
return key
def getwinner(winner):
begin = True
showboard()
while begin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
begin = False
if event.type == pygame.KEYDOWN:
begin = False
if winner == -1:
screen.blit(drawimg, (245, 15))
elif winner == human.mark:
screen.blit(winimg, (245, 15))
else:
screen.blit(loseimg, (245, 15))
screen.blit(clickimg, (332, 1600))
pygame.display.update()
fpsClock.tick(30)
board = {}
for i in range(9):
board[i + 1] = 0
winner = None
return board, winner
def headline():
begin1 = True
begin2 = True
mark = None
while begin1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
begin1 = False
if event.type == pygame.KEYDOWN:
begin1 = False
screen.blit(bannerimg, (0, 0))
pygame.display.update()
fpsClock.tick(30)
while begin2:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
x, y = pygame.mouse.get_pos()
if 212 <= x <= 552 and 924 <= y <= 1376:
mark = 1
begin2 = False
if 584 <= x <= 916 and 924 <= y <= 1376:
mark = 2
begin2 = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
mark = 1
begin2 = False
if event.key == pygame.K_2:
mark = 2
begin2 = False
screen.blit(markerimg, (0, 0))
pygame.display.update()
fpsClock.tick(30)
return mark
def init():
begin = True
diff = None
while begin:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
x, y = pygame.mouse.get_pos()
if 148 <= x <= 548 and 1008 <= y <= 1288:
diff = 1
begin = False
if 592 <= x <= 988 and 1008 <= y <= 1288:
diff = 2
begin = False
if 148 <= x <= 548 and 1376 <= y <= 1648:
diff = 3
begin = False
if 592 <= x <= 988 and 1376 <= y <= 1648:
diff = 4
begin = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
diff = 1
begin = False
if event.key == pygame.K_2:
diff = 2
begin = False
if event.key == pygame.K_3:
diff = 3
begin = False
if event.key == pygame.K_4:
diff = 4
begin = False
screen.blit(diffimg, (0, 0))
pygame.display.update()
fpsClock.tick(30)
return diff
def showboard():
screen.blit(backimg, (0, 0))
screen.blit(boardimg, (boardX, boardY))
for i in range(1, 10):
if board[i]:
putmark(i, board[i])
pygame.display.update()
def putmark(num, sign):
markX = boardX + 30 + (num - 1) % 3 * 365
markY = boardY + 30 + (num - 1) // 3 * 500
if sign == 1:
screen.blit(crossimg, (markX, markY))
elif sign == 2:
screen.blit(roundimg, (markX, markY))
else:
print("Invalid Sign!")
def keytonum(key):
if isinstance(key, tuple):
x, y = key
if 484 <= y <= 900:
if 24 <= x <= 348:
if not board[1]:
return 1
if 400 <= x <= 696:
if not board[2]:
return 2
if 748 <= x <= 1044:
if not board[3]:
return 3
if 968 <= y <= 1360:
if 24 <= x <= 348:
if not board[4]:
return 4
if 400 <= x <= 696:
if not board[5]:
return 5
if 748 <= x <= 1044:
if not board[6]:
return 6
if 1432 <= y <= 1840:
if 24 <= x <= 348:
if not board[7]:
return 7
if 400 <= x <= 696:
if not board[8]:
return 8
if 748 <= x <= 1044:
if not board[9]:
return 9
else:
if key == pygame.K_1:
if not board[1]:
return 1
elif key == pygame.K_2:
if not board[2]:
return 2
elif key == pygame.K_3:
if not board[3]:
return 3
elif key == pygame.K_4:
if not board[4]:
return 4
elif key == pygame.K_5:
if not board[5]:
return 5
elif key == pygame.K_6:
if not board[6]:
return 6
elif key == pygame.K_7:
if not board[7]:
return 7
elif key == pygame.K_8:
if not board[8]:
return 8
elif key == pygame.K_9:
if not board[9]:
return 9
pygame.init()
screen = pygame.display.set_mode((1080, 1920))
pygame.display.set_caption("TicTacToe", "tic-tac-toe.png")
screen.fill((20, 50, 80))
key = None
running = True
mark = headline()
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
running2 = False
sys.exit()
diff = init()
for i in range(100):
showboard()
running2 = True
human = user("You", mark)
if diff == 1:
comp = easy("Computer", mark % 2 + 1)
elif diff == 2:
comp = medium("Computer", mark % 2 + 1)
elif diff == 3:
comp = hard("Computer", mark % 2 + 1)
else:
comp = deadly("Computer", mark % 2 + 1)
if random.randint(0, 1):
players = [human, comp]
else:
players = [comp, human]
while running2:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
running2 = False
sys.exit()
showboard()
for p in players:
pygame.display.update()
showboard()
marker(p.getturn(), p.mark)
winner = solve()
if winner:
break
if winner:
running2 = False
board, winner = getwinner(winner)
pygame.display.update()
showboard()
pygame.display.update()
fpsClock.tick(30)
| 32.742069 | 95 | 0.433566 |
6e0596f60ea2aacca4a2e542940c06bbc4f394b7 | 25,458 | py | Python | utils/dataset_utils.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | utils/dataset_utils.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | utils/dataset_utils.py | Daipuwei/YOLO-tf2 | 1b2e7133c99507573f419c8a367a8dba4abeae5b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 2021/9/18 下午11:23
# @Author : DaiPuWei
# @Email : 771830171@qq.com
# @File : dataset_utils.py
# @Software: PyCharm
"""
这是YOLO模型数据集
"""
import cv2
import numpy as np
from PIL import Image
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
from utils.model_utils import get_classes
from utils.model_utils import get_anchors
def resize_keep_aspect_ratio(image_src, dst_size, value=[128, 128, 128]):
'''
这是opencv将源图像扩充边界成正方形,并完成图像尺寸变换
Args:
image_src: 源图像
dst_size: 缩放尺寸
value: 填充像素值
Returns:
'''
# 获取源图像和目标图像的尺寸
src_h, src_w, _ = np.shape(image_src)
dst_h, dst_w = dst_size
# 首先确定哪个方向进行填充
if src_h < src_w: # 在h方向进行填充
delta = src_w - src_h # 计算需要填充的像素个数,然后均分到上下两侧
top = int(delta // 2)
down = delta - top
left = 0
right = 0
else: # 在w方向进行填充
delta = src_h - src_w # 计算需要填充的像素个数,然后均分到左右两侧
top = 0
down = 0
left = int(delta // 2)
right = delta - left
borderType = cv2.BORDER_CONSTANT
image_dst = cv2.copyMakeBorder(image_src, top, down, left, right, borderType, None, value)
image_dst = cv2.resize(image_dst, dst_size)
return image_dst
def letterbox_image(image, size):
'''
这是PIL将源图像扩充边界成正方形,并完成图像尺寸变换
Args:
image: 图像
size: 缩放尺寸
Returns:
'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
class Dataset(object):
def __init__(self,dataset_path,classes_path,anchors_path,batch_size,target_size,
max_boxes_num=20,use_mosaic=False,random=True,model_name='yolov3'):
'''
这是目标检测数据集初始化类
Args:
dataset_path: COCO格式的数据集txt地址
classes_path: 目标分类txt地址
anchors_path: 模版框txt地址
batch_size: 小批量数规模
target_size: 目标尺寸
max_boxes_num: 最大目标框个数,默认为20
use_mosaic: 是否使用mosaic数据增强,默认为False
random: 是否进行随机数据增强标志量,默认为True
model_name: 模型名称,默认为‘yolov3’
'''
self.dataset_path = dataset_path
self.classes_path = classes_path
self.anchors_path = anchors_path
self.target_size = target_size
self.max_boxes_num = max_boxes_num
self.use_mosaic = use_mosaic
self.random = random
self.model_name = model_name
self.annotation_lines = []
with open(self.dataset_path, 'r') as f:
for line in f.readlines():
self.annotation_lines.append(line)
self.annotation_lines = np.array(self.annotation_lines)
self.annotation_lines = np.random.permutation(self.annotation_lines)
self.size = len(self.annotation_lines)
self.batch_size = batch_size
self.iter_num = self.size // self.batch_size
if self.size % self.batch_size != 0:
self.iter_num += 1
# 初始化anchors与classes
self.anchors = get_anchors(self.anchors_path)
self.classes_names = get_classes(self.classes_path)
self.num_anchors = len(self.anchors)
self.num_classes = len(self.classes_names)
# 初始化相关数据增强参数
self.jitter = 0.3
self.hue=.1
self.sat=1.5
self.val=1.5
def get_batch_data_with_mosaic(self,batch_annotation_lines):
'''
这是获取批量图像及其标签并使用mosaic数据增强的函数
Args:
batch_annotation_lines: 批量yolo数据集格式标注
Returns:
'''
batch_image_data = []
batch_boxes = []
size = len(batch_annotation_lines)
for start in np.arange(0,len(batch_annotation_lines),4):
end = int(np.min([start+4,size]))
_batch_annotation_lines = batch_annotation_lines[start:end]
image_data,box_data = self.get_random_data_with_mosaic(_batch_annotation_lines)
batch_image_data.append(image_data)
batch_boxes.append(box_data)
batch_image_data = np.array(batch_image_data)
batch_boxes = np.array(batch_boxes)
return batch_image_data,batch_boxes
def get_random_data_with_mosaic(self,batch_lines):
"""
这是4张图像及其目标标签,并对图像法进行mosaic数据增强操作的函数
:param batch_lines: 4张yolo格式数据
:return:
"""
h, w = self.target_size
min_offset_x = 0.3
min_offset_y = 0.3
scale_low = 1 - min(min_offset_x, min_offset_y)
scale_high = scale_low + 0.2
image_datas = []
box_datas = []
index = 0
place_x = [0, 0, int(w * min_offset_x), int(w * min_offset_x)]
place_y = [0, int(h * min_offset_y), int(h * min_offset_y), 0]
# 批量图像可能不足4张,随机补充
size = len(batch_lines)
if size < 4:
dif = 4 - len(batch_lines)
_batch_line = [line for line in batch_lines]
for i in np.arange(dif):
random_index = np.random.randint(0,size)
_batch_line.append(batch_lines[random_index])
batch_lines = np.array(_batch_line)
# 便利所有图像,加载真实标签
for line in batch_lines:
# 每一行进行分割
line_content = line.split()
# 打开图片
image = Image.open(line_content[0])
image = image.convert("RGB")
# 图片的大小
iw, ih = image.size
# 保存框的位置
box = np.array([np.array(list(map(int, box.split(',')))) for box in line_content[1:]])
# 是否翻转图片
flip = rand() < .5
if flip and len(box) > 0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0, 2]] = iw - box[:, [2, 0]]
# 对输入进来的图片进行缩放
new_ar = w / h
scale = rand(scale_low, scale_high)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# 进行色域变换
hue = rand(-self.hue, self.hue)
sat = rand(1, self.sat) if rand() < .5 else 1 / rand(1, self.sat)
val = rand(1, self.val) if rand() < .5 else 1 / rand(1, self.val)
x = cv2.cvtColor(np.array(image, np.float32) / 255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue * 360
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0] > 360, 0] = 360
x[:, :, 1:][x[:, :, 1:] > 1] = 1
x[x < 0] = 0
image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB) # numpy array, 0 to 1
image = Image.fromarray((image * 255).astype(np.uint8))
# 将图片进行放置,分别对应四张分割图片的位置
dx = place_x[index]
dy = place_y[index]
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image) / 255
index = index + 1
box_data = []
# 对box进行重新处理
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)]
box_data = np.zeros((len(box), 5))
box_data[:len(box)] = box
image_datas.append(image_data)
box_datas.append(box_data)
# 将图片分割,放在一起
cutx = np.random.randint(int(w * min_offset_x), int(w * (1 - min_offset_x)))
cuty = np.random.randint(int(h * min_offset_y), int(h * (1 - min_offset_y)))
new_image = np.zeros([h, w, 3])
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
# 归并边界框
merge_bbox = self.merge_bboxes(box_datas,cutx,cuty)
#print(np.shape(merge_bbox))
bbox = np.zeros((self.max_boxes_num, 5))
if len(merge_bbox) != 0:
if len(merge_bbox) > self.max_boxes_num:
merge_bbox = merge_bbox[:self.max_boxes_num]
bbox[:len(merge_bbox)] = merge_bbox
return new_image,bbox
def merge_bboxes(self,bbox_data,cutx,cuty):
'''
这是mosaic数据增强中对4张图片的边界框标签进行合并的函数
Args:
bbox_data: 边界框标签数组
cutx: x坐标轴分界值
cuty: y坐标轴分界值
Returns:
'''
merge_bbox = []
for i,bboxes in enumerate(bbox_data):
if bboxes is not None:
for box in bboxes:
tmp_box = []
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
if i == 0:
if y1 > cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
if i == 1:
if y2 < cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
if i == 2:
if y2 < cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
if i == 3:
if y1 > cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if y2 - y1 < 5: # 相差过小则放弃
continue
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if x2 - x1 < 5: # 相差过小则放弃
continue
tmp_box.append(x1)
tmp_box.append(y1)
tmp_box.append(x2)
tmp_box.append(y2)
tmp_box.append(box[-1])
merge_bbox.append(tmp_box)
del bbox_data
return np.array(merge_bbox)
def get_batch_data(self,batch_annotation_lines):
'''
这是获取批量图像及其目标框标签的函数,不使用mosaic数据增强
Args:
batch_annotation_lines: 批量yolo数据集格式标注
Returns:
'''
batch_images = []
batch_boxes = []
for annotation_line in batch_annotation_lines:
image,box_data = self.get_random_data(annotation_line)
batch_images.append(image)
batch_boxes.append(box_data)
batch_images = np.array(batch_images)
batch_boxes = np.array(batch_boxes)
return batch_images,batch_boxes
def get_random_data(self,line):
'''
这是获取图像及其目标标签,并对图像法进行实时数据增强操作的函数
Args:
line: yolo格式数据
Returns:
'''
lines =line.split()
image = Image.open(lines[0])
iw, ih = image.size
h, w = self.target_size
box = np.array([np.array(list(map(int, box.split(',')))) for box in lines[1:]])
if not self.random:
# resize image
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
dx = (w - nw) // 2
dy = (h - nh) // 2
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image, np.float32) / 255
# correct boxes
box_data = np.zeros((self.max_boxes_num, 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
if len(box) > self.max_boxes_num: box = box[:self.max_boxes_num]
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w / h * rand(1 - self.jitter, 1 + self.jitter) / rand(1 - self.jitter, 1 + self.jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale * h)
nw = int(nh * new_ar)
else:
nw = int(scale * w)
nh = int(nw / new_ar)
image = image.resize((nw, nh), Image.BICUBIC)
# place image
dx = int(rand(0, w - nw))
dy = int(rand(0, h - nh))
new_image = Image.new('RGB', (w, h), (128, 128, 128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand() < .5
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-self.hue, self.hue)
sat = rand(1, self.sat) if rand() < .5 else 1 / rand(1, self.sat)
val = rand(1, self.val) if rand() < .5 else 1 / rand(1, self.val)
x = rgb_to_hsv(np.array(image) / 255.)
x[..., 0] += hue
x[..., 0][x[..., 0] > 1] -= 1
x[..., 0][x[..., 0] < 0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x > 1] = 1
x[x < 0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((self.max_boxes_num, 5))
if len(box) > 0:
np.random.shuffle(box)
box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
box[:, 0:2][box[:, 0:2] < 0] = 0
box[:, 2][box[:, 2] > w] = w
box[:, 3][box[:, 3] > h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box
if len(box) > self.max_boxes_num:
box = box[:self.max_boxes_num]
box_data[:len(box)] = box
return image_data, box_data
# ---------------------------------------------------#
# 读入xml文件,并输出y_true
# ---------------------------------------------------#
def preprocess_true_boxes(self,true_boxes):
'''
这是根据真实标签转换成不同yolo预测输出的函数
Args:
true_boxes: 真实目标框标签
Returns:
'''
assert (true_boxes[..., 4] < self.num_classes).all(), 'class id must be less than num_classes'
# -----------------------------------------------------------#
# 获得框的坐标和图片的大小
# -----------------------------------------------------------#
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(self.target_size, dtype='int32')
# 根据不同yolo模型初始化不同anchor掩膜、网格尺寸和输出层数
if self.model_name == 'yolov3': # yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov3-spp': # yolov3-spp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4': # yolov4
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-csp': # yolov4-csp
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-p5': # yolov4-p5
anchor_mask = [[8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-p6': # yolov4-p6
anchor_mask = [[12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 4
grid_shapes = [input_shape // {0: 64, 1: 32, 2: 16, 3: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-p7': # yolov4-p7
anchor_mask = [[16, 17, 18, 19], [12, 13, 14, 15], [8, 9, 10, 11], [4, 5, 6, 7], [0, 1, 2, 3]]
num_layers = 5
grid_shapes = [input_shape // {0:128, 1: 64, 2: 32, 3: 16, 4: 8}[l] for l in range(num_layers)]
elif self.model_name == 'poly-yolo': # poly-yolo(v3)
anchor_mask = [[0,1,2,3,4,5,6,7,8]]
num_layers = 1
grid_shapes = [input_shape // {0: 8}[l] for l in range(num_layers)]
elif self.model_name == 'yolov3-tiny': # yolov3-tiny
anchor_mask = [[3, 4, 5], [0, 1, 2]]
num_layers = 2
grid_shapes = [input_shape // {0: 32, 1: 16}[l] for l in range(num_layers)]
elif self.model_name == 'yolov4-tiny': # yolov4-tiny
anchor_mask = [ [3, 4, 5], [0, 1, 2]]
num_layers = 2
grid_shapes = [input_shape // {0: 32, 1: 16}[l] for l in range(num_layers)]
print(grid_shapes)
else: # 默认为yolov3
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
num_layers = 3
grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
# -----------------------------------------------------------#
# 通过计算获得真实框的中心和宽高
# 中心点(m,n,2) 宽高(m,n,2)
# -----------------------------------------------------------#
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
# -----------------------------------------------------------#
# 将真实框归一化到小数形式
# -----------------------------------------------------------#
true_boxes[..., 0:2] = boxes_xy / input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh / input_shape[::-1]
# m为图片数量,grid_shapes为网格的shape
m = true_boxes.shape[0]
#grid_shapes = [input_shape // {0: 32, 1: 16, 2: 8}[l] for l in range(num_layers)]
# -----------------------------------------------------------#
# y_true的格式为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)
# -----------------------------------------------------------#
y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5 + self.num_classes),
dtype='float32') for l in range(num_layers)]
# -----------------------------------------------------------#
# [9,2] -> [1,9,2]
# -----------------------------------------------------------#
anchors = np.expand_dims(self.anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
# -----------------------------------------------------------#
# 长宽要大于0才有效
# -----------------------------------------------------------#
valid_mask = boxes_wh[..., 0] > 0
for b in range(m):
# 对每一张图进行处理
wh = boxes_wh[b, valid_mask[b]]
if len(wh) == 0: continue
# -----------------------------------------------------------#
# [n,2] -> [n,1,2]
# -----------------------------------------------------------#
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
# -----------------------------------------------------------#
# 计算所有真实框和先验框的交并比
# intersect_area [n,9]
# box_area [n,1]
# anchor_area [1,9]
# iou [n,9]
# -----------------------------------------------------------#
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# -----------------------------------------------------------#
# 维度是[n,] 感谢 消尽不死鸟 的提醒
# -----------------------------------------------------------#
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
# -----------------------------------------------------------#
# 找到每个真实框所属的特征层
# -----------------------------------------------------------#
for l in range(num_layers):
if n in anchor_mask[l]:
# -----------------------------------------------------------#
# floor用于向下取整,找到真实框所属的特征层对应的x、y轴坐标
# -----------------------------------------------------------#
i = np.floor(true_boxes[b, t, 0] * grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b, t, 1] * grid_shapes[l][0]).astype('int32')
# -----------------------------------------------------------#
# k指的的当前这个特征点的第k个先验框
# -----------------------------------------------------------#
k = anchor_mask[l].index(n)
# -----------------------------------------------------------#
# c指的是当前这个真实框的种类
# -----------------------------------------------------------#
c = true_boxes[b, t, 4].astype('int32')
# -----------------------------------------------------------#
# y_true的shape为(m,13,13,3,85)(m,26,26,3,85)(m,52,52,3,85)
# 最后的85可以拆分成4+1+80,4代表的是框的中心与宽高、
# 1代表的是置信度、80代表的是种类
# -----------------------------------------------------------#
y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5 + c] = 1
return y_true
def generator(self):
'''
这是数据生成器定义函数
Returns:
'''
while True:
# 随机打乱数据集
self.annotation_lines = np.random.permutation(self.annotation_lines)
for start in np.arange(0,self.size,self.batch_size):
end = int(np.min([start+self.batch_size,self.size]))
batch_annotation_lines = self.annotation_lines[start:end]
if self.use_mosaic:
batch_images,batch_boxes = self.get_batch_data_with_mosaic(batch_annotation_lines)
else:
batch_images, batch_boxes = self.get_batch_data(batch_annotation_lines)
# 对box数组进行处理,生成符合YOLO v4模型输出的标签
batch_y_true = self.preprocess_true_boxes(batch_boxes)
batch_loss = np.zeros(len(batch_images))
yield [batch_images,*batch_y_true],batch_loss | 39.902821 | 112 | 0.439351 |
6e080db2602e0c90c09249fc8d6eeaeabeabd005 | 750 | py | Python | caesar_cipher.py | DomirScire/Basic_Ciphers | 7425b306f8d0ce9ceb5ba3a59e73a52892bee5ca | [
"MIT"
] | 1 | 2021-03-31T23:29:00.000Z | 2021-03-31T23:29:00.000Z | caesar_cipher.py | DomirScire/Ciphers_Py | 127c82b14c9bd5595f924bc267b6bf238f654c22 | [
"MIT"
] | null | null | null | caesar_cipher.py | DomirScire/Ciphers_Py | 127c82b14c9bd5595f924bc267b6bf238f654c22 | [
"MIT"
] | null | null | null | import string
def caesar_cipher(text, shift, decrypt=False):
if not text.isascii() or not text.isalpha():
raise ValueError("Text must be ASCII and contain no numbers.")
lowercase = string.ascii_lowercase
uppercase = string.ascii_uppercase
result = ""
if decrypt:
shift = shift * -1
for char in text:
if char.islower():
index = lowercase.index(char)
result += lowercase[(index + shift) % 26]
else:
index = uppercase.index(char)
result += uppercase[(index + shift) % 26]
return result
if __name__ == "__main__":
print(caesar_cipher("meetMeAtOurHideOutAtTwo", 10))
print(caesar_cipher("woodWoKdYebRsnoYedKdDgy", 10, decrypt=True))
| 27.777778 | 70 | 0.630667 |
6e0977041deef6fa7bf74e2fadd3b0a89bcf73e3 | 6,953 | py | Python | hume/hume/app.py | megacorpincorporated/HOME | 0eb8009b028fabf64abb03acc0a081b2b8207eb0 | [
"MIT"
] | 1 | 2018-02-18T15:51:57.000Z | 2018-02-18T15:51:57.000Z | hume/hume/app.py | megacorpincorporated/HOME | 0eb8009b028fabf64abb03acc0a081b2b8207eb0 | [
"MIT"
] | null | null | null | hume/hume/app.py | megacorpincorporated/HOME | 0eb8009b028fabf64abb03acc0a081b2b8207eb0 | [
"MIT"
] | null | null | null | import json
import logging
from app.abc import StartError
from app.device import DeviceApp, DeviceMessage
from app.device.models import Device
from app.hint import HintApp
from app.hint.defs import HintMessage
from util.storage import DataStore
LOGGER = logging.getLogger(__name__)
class Hume:
def __init__(self, cli_args):
self.storage = DataStore()
self.device_app = DeviceApp(cli_args, self.storage)
self.hint_app = HintApp(cli_args, self.storage)
def start(self):
"""Starts the HUME."""
LOGGER.info("hume start")
self.device_app.pre_start()
self.hint_app.pre_start()
# Register callbacks prior to starting Apps in case of any
# confirmation-type messages happen on connection establishment, or in
# case of queued up messages from HINT.
self.device_app.register_callback(self._on_device_message)
self.hint_app.register_callback(self._on_hint_message)
try:
self.device_app.start()
self.hint_app.start()
except StartError:
self.stop() # may or may not raise another exception
# raise runtime error to ensure stop
raise RuntimeError("failed to start an app")
self.device_app.post_start()
self.hint_app.post_start()
def stop(self):
"""Stops the HUME."""
LOGGER.info("hume stop")
# Important to maintain same stop order as the start order!
self.device_app.pre_stop()
self.hint_app.pre_stop()
self.device_app.stop()
self.hint_app.stop()
self.device_app.post_stop()
self.hint_app.post_stop()
"""
Private
"""
def _on_device_message(self,
device: Device,
msg_type: int,
msg: bytearray):
"""
Registered to be called by the Device app when a new message is
received from a connected device.
"""
LOGGER.debug("HUME handling device message")
if msg_type == DeviceMessage.CAPABILITY.value:
decoded_msg = json.loads(msg)
LOGGER.info(f"device {device.uuid[:4]} sent capability response")
capabilities = decoded_msg
capabilities["identifier"] = device.uuid
if self.hint_app.create_device(capabilities):
LOGGER.info("device created in HINT successfully")
# This is done since BLE devices cannot provide UUID before
# capability response is gotten and are thus saved with their
# address as their primary key prior to attach success.
device = self.storage.get(Device, device.uuid)
device.uuid = capabilities["uuid"]
device.attached = True
self.storage.set(device)
else:
LOGGER.error("failed to create device in HINT")
# Detach device to clean up after unsuccessful attach.
self.device_app.detach(device)
self.hint_app.attach_failure(device.uuid)
elif msg_type == DeviceMessage.ACTION_STATEFUL.value:
decoded_msg = msg.decode()
self.hint_app.action_response(device,
HintMessage.ACTION_STATEFUL,
{
"group_id": int(decoded_msg[0]),
"state_id": int(decoded_msg[1])
})
else:
LOGGER.warning(f"got message from device {device.uuid[:4]} of an "
f"unknown type: {msg_type}, msg: {msg}")
def _on_hint_message(self, msg_type, msg):
"""
Registered to be called by the Hint app when a new message is received
from HINT.
"""
LOGGER.debug("HUME handling HINT message")
if msg_type == HintMessage.DISCOVER_DEVICES.value:
LOGGER.info("HINT requested device discovery")
self.device_app.discover(self._discovered_devices)
elif msg_type == HintMessage.ATTACH.value:
identifier = msg["identifier"]
LOGGER.info(f"HINT requested device {identifier[:4]} to "
f"be attached")
device = self.storage.get(Device, identifier)
if device is not None:
if not self.device_app.request_capabilities(device):
LOGGER.error(f"failed to attach device {identifier[:4]}")
self.hint_app.attach_failure(identifier)
elif msg_type == HintMessage.DETACH.value:
device_uuid = msg["device_uuid"]
LOGGER.info(f"HINT requested detaching device {device_uuid[:4]}")
device = self.storage.get(Device, device_uuid)
if device is not None:
self.device_app.detach(device)
else:
LOGGER.error(f"can't detach device {device_uuid[:4]}, "
f"does not exist")
elif msg_type == HintMessage.UNPAIR.value:
LOGGER.info("HINT requested unpairing, factory resetting HUME")
self.device_app.reset()
self.storage.delete_all()
elif msg_type == HintMessage.ACTION_STATEFUL.value:
device_uuid = msg.pop("device_uuid")
LOGGER.info(f"HINT requested stateful action for device "
f"{device_uuid[:4]}")
msg.pop("type")
device = self.storage.get(Device, device_uuid)
if device is not None:
self.device_app.stateful_action(device, **msg)
else:
LOGGER.error("could not execute stateful action since device "
"does not exist")
elif msg_type == HintMessage.ACTION_STATES.value:
device_uuid = msg["device_uuid"]
LOGGER.info(f"HINT requested all stateful action states for "
f"device {device_uuid[:4]}")
device = self.storage.get(Device, device_uuid)
if device is not None:
self.device_app.action_states(device)
else:
LOGGER.error("could not fetch stateful action states since "
"the device did not exist")
else:
LOGGER.warning(f"got message from hint of an unknown type: "
f"{msg_type}, msg: {msg}")
def _discovered_devices(self, devices: [Device]):
"""
Callback provided to the device app when discovering devices.
"""
for device in devices:
# Store discovered devices to remember the transport type reported
# by the individual connection types.
self.storage.set(device)
self.hint_app.discovered_devices(devices)
| 38.414365 | 78 | 0.576154 |
6e0c62be30176a8297c1bf84eb84e82bffd0d9ee | 3,281 | py | Python | scripts/generate_demo_requests.py | onedata/onezone-gui-plugin-ecrin | 2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b | [
"MIT"
] | null | null | null | scripts/generate_demo_requests.py | onedata/onezone-gui-plugin-ecrin | 2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b | [
"MIT"
] | null | null | null | scripts/generate_demo_requests.py | onedata/onezone-gui-plugin-ecrin | 2bf38b0994d1c0bf8148b1b8c5990bcf0aa4a62b | [
"MIT"
] | null | null | null | #
# Author: Michał Borzęcki
#
# This script creates empty files with study and data object metadata in
# specified space and Oneprovider. It uses JSON files located in directories
# `studies_dir` (= studies) and `data_object_dir` (= data_objects). Positional
# arguments:
# 1. Oneprovider location (IP address or domain).
# 2. Space name (it must be supported by passed Oneprovider).
# 3. Access token (can be obtained via Onezone).
# 4. Number of files metadata to upload ("100" means 100 studies and 100 data
# objects)
# 5. Name of a directory (in space), where files with metadata should be
# uploaded. Warning: if that directory already exists, it will be removed.
# Example of usage:
# python3 generate_demo_requests.py 172.17.0.16 s1 MDAzMvY...ZlOGCg 1000 ecrin1
#
# Example studies and data objects can be found at
# https://github.com/beatmix92/ct.gov_updated
#
import os
import sys
import subprocess
import json
from natsort import natsorted
provider = sys.argv[1]
space = sys.argv[2]
token = sys.argv[3]
files = int(sys.argv[4])
directory = sys.argv[5]
studies_dir = 'studies'
data_object_dir = 'data_objects'
FNULL = open(os.devnull, 'w')
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-container',
'-X', 'DELETE',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/'
]
remove_dir_proc = subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
remove_dir_proc.wait()
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-container',
'-X', 'PUT',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/'
]
create_dir_proc = subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
create_dir_proc.wait()
processes = []
for source in [studies_dir, data_object_dir]:
index = 0
for (dirpath, _, filenames) in os.walk(source):
filenames = natsorted(filenames)
for filename in filenames[:files]:
path = dirpath + '/' + filename
with open(path, 'r') as json_file:
metadata = json_file.read()
metadata_json = json.loads(metadata)
if metadata_json['object_type'] == 'study':
linked_data_objects = metadata_json['linked_data_objects']
start_id = linked_data_objects[0]['id']
for i in range(1, 20):
linked_data_objects.append({ 'id': start_id + i })
else:
related_studies = metadata_json['related_studies']
start_id = related_studies[0]['id']
for i in range(1, 20):
related_studies.append({ 'id': start_id - i })
curl = [
'curl',
'-k',
'-H', 'X-Auth-Token: ' + token,
'-H', 'X-CDMI-Specification-Version: 1.1.1',
'-H', 'Content-Type: application/cdmi-object',
'-X', 'PUT',
'-d', '{"metadata": {"onedata_json": ' + json.dumps(metadata_json) + '}}',
'https://' + provider + '/cdmi/' + space + '/' + directory + '/' + filename
]
processes.append(subprocess.Popen(curl, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
for proc in processes:
proc.wait()
| 33.824742 | 102 | 0.643401 |
6e0cbccdccc4307ec0cd8efe2c3cb65f9c612951 | 1,925 | py | Python | backend/routes/user.py | mradzikowski/flask-trackerproductivity | 029103b80e21b6c64801816fe8dc27585317cb02 | [
"MIT"
] | null | null | null | backend/routes/user.py | mradzikowski/flask-trackerproductivity | 029103b80e21b6c64801816fe8dc27585317cb02 | [
"MIT"
] | null | null | null | backend/routes/user.py | mradzikowski/flask-trackerproductivity | 029103b80e21b6c64801816fe8dc27585317cb02 | [
"MIT"
] | null | null | null | from flask import jsonify, request
import backend.services.user as user_services
from . import bp
@bp.route('/user', methods=['POST', 'GET'])
def create_user():
if request.method == "POST":
data_json = request.json
body, status = user_services.create_user(data_json)
elif request.method == "GET":
body, status = user_services.get_all_users()
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/<pk>', methods=['GET', 'DELETE'])
def get_user(pk):
if request.method == "GET":
body, status = user_services.get_user(pk)
elif request.method == "DELETE":
body, status = user_services.delete_user(pk)
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/<pk>/tasks', methods=['GET'])
def get_all_tasks_for_user(pk):
if request.method == "GET":
active = request.args.get('active')
if active is None:
body, status = user_services.get_all_tasks_for_user(pk)
if active.upper() == "TRUE":
active = True
elif active.upper() == "FALSE":
active = False
else:
return {"success": False, "message": "Invalid argument key."}, 400
body, status = user_services.get_all_active_tasks_for_user(pk, active)
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/<pk>/tasks/productivity', methods=['GET'])
def get_productivity_for_user(pk):
if request.method == "GET":
body, status = user_services.get_all_tasks_and_calculate_productivity(pk)
else:
body, status = None, 405
return jsonify(body), status
@bp.route('/user/get/all', methods=['GET'])
def get_all_users():
if request.methdod == "GET":
body, status = user_services.get_all_users()
else:
body, status = None, 405
return jsonify(body), status
| 27.112676 | 81 | 0.628052 |
6e0cf115db4bb95a08b1d4ece55fa11c8d6418e1 | 222 | py | Python | src/mot/motion_models/__init__.py | neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python | 886cd9e87283982381713dbf2e4ef695030f81de | [
"Apache-2.0"
] | 6 | 2021-11-21T10:47:01.000Z | 2022-03-17T01:14:53.000Z | src/mot/motion_models/__init__.py | neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python | 886cd9e87283982381713dbf2e4ef695030f81de | [
"Apache-2.0"
] | 3 | 2021-04-12T12:37:41.000Z | 2021-04-30T14:29:53.000Z | src/mot/motion_models/__init__.py | neer201/Multi-Object-Tracking-for-Automotive-Systems-in-python | 886cd9e87283982381713dbf2e4ef695030f81de | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from mot.motion_models.base_motion_model import MotionModel
from mot.motion_models.CT_motion_model import CoordinateTurnMotionModel
from mot.motion_models.CV_motion_model import ConstantVelocityMotionModel
| 37 | 73 | 0.891892 |
6e0db8ed1374b74b17dc4c64dad644332a33ce07 | 7,205 | py | Python | src/modu/editable/datatypes/date.py | philchristensen/modu | 795f3bc413956b98522ac514dafe35cbab0d57a3 | [
"MIT"
] | null | null | null | src/modu/editable/datatypes/date.py | philchristensen/modu | 795f3bc413956b98522ac514dafe35cbab0d57a3 | [
"MIT"
] | null | null | null | src/modu/editable/datatypes/date.py | philchristensen/modu | 795f3bc413956b98522ac514dafe35cbab0d57a3 | [
"MIT"
] | null | null | null | # modu
# Copyright (c) 2006-2010 Phil Christensen
# http://modu.bubblehouse.org
#
#
# See LICENSE for details
"""
Datatypes for managing stringlike data.
"""
import time, datetime
from zope.interface import implements
from modu.editable import IDatatype, define
from modu.util import form, tags, date
from modu.persist import sql
from modu import persist, assets
DAY = 86400
MONTH = DAY * 31
YEAR = DAY * 365
class CurrentDateField(define.definition):
"""
Display a checkbox that allows updating a date field with the current date.
"""
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
value = getattr(storable, self.get_column_name(), None)
if(value):
output = date.strftime(value, self.get('format_string', '%B %d, %Y at %I:%M%p'))
else:
output = ''
if(style == 'search'):
frm = form.FormNode(self.name)
return frm
elif(style == 'listing'):
frm = form.FormNode(self.name)
if(self.get('date_in_listing', True)):
if(output == ''):
output = '(none)'
frm(type='label', value=output)
else:
frm(type='checkbox', disabled=True, checked=bool(output))
return frm
elif(style == 'detail' and self.get('read_only', False)):
if(output == ''):
output = '(none)'
frm = form.FormNode(self.name)
frm(type='label', value=output)
return frm
checked = False
if(storable.get_id() == 0 and self.get('default_checked', False)):
checked = True
frm = form.FormNode(self.name)(
type = 'checkbox',
# this is only True if default_checked is true and it's a new item
checked = checked,
suffix = ' ' + tags.small()[output],
)
if(bool(output)):
if(self.get('one_time', True)):
frm(attributes=dict(disabled='disabled'))
else:
frm(
text = ' ' + tags.small(_class='minor-help')['check to set current date']
)
return frm
def update_storable(self, req, form, storable):
if(form[self.name].attr('checked', False)):
value = datetime.datetime.now()
save_format = self.get('save_format', 'timestamp')
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), date.convert_to_timestamp(value))
else:
setattr(storable, self.get_column_name(), value)
return True
class DateField(define.definition):
"""
Allow editing of date data via a multiple select interface or javascript popup calendar.
"""
implements(IDatatype)
def get_element(self, req, style, storable):
"""
@see: L{modu.editable.define.definition.get_element()}
"""
value = getattr(storable, self.get_column_name(), None)
if(isinstance(value, (int, long, float))):
value = datetime.datetime.utcfromtimestamp(value)
if(style == 'search'):
frm = form.FormNode(self.name)
frm['from'] = self.get_form_element(req, '_detail', storable)(
prefix='<div>from date:',
suffix=tags.br() + '</div>',
)
frm['to'] = self.get_form_element(req, '_detail', storable)(
prefix='<div>to date:',
suffix='</div>',
)
return frm
elif(style == 'listing' or (style == 'detail' and self.get('read_only', False))):
if(value):
output = date.strftime(value, self.get('format_string', '%B %d, %Y at %I:%M%p'))
else:
output = ''
frm = form.FormNode(self.name)
frm(type='label', value=output)
return frm
current_year = datetime.datetime.now().year
if(value is not None):
current_year = getattr(value, 'year', current_year)
start_year = self.get('start_year', current_year - 2)
end_year = self.get('end_year', current_year + 5)
months, days = date.get_date_arrays()
frm = form.FormNode(self.name)
frm(type='fieldset', style='brief')
frm['null'](type='checkbox', text="no value", weight=-1, suffix=tags.br(),
attributes=dict(onChange='enableDateField(this);'))
assets.activate_jquery(req)
req.content.report('header', tags.script(type='text/javascript')["""
function enableDateField(checkboxField){
var formItem = $(checkboxField).parent().parent();
if($(checkboxField).attr('checked')){
formItem.children(':enabled').attr('disabled', true);
}
else{
formItem.children(':disabled').attr('disabled', false);
}
}
"""])
attribs = {}
if(value is None):
frm['null'](checked=True)
#attribs['disabled'] = None
if(self.get('default_now', False)):
value = datetime.datetime.now()
frm['null'](checked=False)
frm['date'](
type = self.get('style', 'datetime'),
value = value,
attributes = attribs,
suffix = tags.script(type="text/javascript")["""
enableDateField($('#form-item-%s input'));
""" % self.name],
)
frm.validate = self.validate
return frm
def validate(self, req, frm):
if(not frm[self.name]['date'].attr('value', '') and self.get('required', False)):
frm.set_error(self.name, 'You must enter a value for this field.')
return False
return True
def update_storable(self, req, form, storable):
"""
@see: L{modu.editable.define.definition.update_storable()}
"""
save_format = self.get('save_format', 'timestamp')
if(self.get('read_only')):
if(self.get('default_now', False) and not storable.get_id()):
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), int(time.time()))
else:
setattr(storable, self.get_column_name(), datetime.datetime.now())
return True
data = form[self.name]['date']
if(data.attr('null', 0)):
setattr(storable, self.get_column_name(), None)
return True
date_data = req.data[form.name][self.name].get('date', None)
# if it's not a dict, it must be None, or broken
if(isinstance(date_data, dict)):
value = date.get_dateselect_value(date_data, self.get('style', 'datetime'))
else:
value = None
if(save_format == 'timestamp'):
setattr(storable, self.get_column_name(), date.convert_to_timestamp(value))
else:
setattr(storable, self.get_column_name(), value)
return True
def get_search_value(self, value, req, frm):
form_data = frm[self.name]
to_value = 0
from_value = 0
if not(value['to'].get('null')):
start_year = form_data['to']['date'].start_year
end_year = form_data['to']['date'].end_year
date_data = value['to'].get('date', None)
if(date_data):
to_value = date.get_dateselect_value(date_data, self.get('style', 'datetime'), start_year, end_year)
to_value = time.mktime(to_value.timetuple())
if not(value['from'].get('null')):
start_year = form_data['from']['date'].start_year
end_year = form_data['from']['date'].end_year
date_data = value['from'].get('date', None)
if(date_data):
from_value = date.get_dateselect_value(date_data, self.get('style', 'datetime'), start_year, end_year)
from_value = time.mktime(from_value.timetuple())
if(to_value and from_value):
if(self.get('save_format', 'timestamp') == 'datetime'):
return sql.RAW('UNIX_TIMESTAMP(%%s) BETWEEN %s AND %s' % (from_value, to_value))
else:
return sql.RAW('%%s BETWEEN %s AND %s' % (from_value, to_value))
elif(to_value):
return sql.LT(to_value)
elif(from_value):
return sql.GT(from_value)
else:
return None
| 28.82 | 106 | 0.658015 |
6e0dc799717432679f99b12ed1cdbf0dbbf71f58 | 829 | py | Python | calculator.py | MateusLinharesDeAelencarLima/Calculator | 44e836aa92fd76d21b4c5f0edfcb5419886f1df6 | [
"CC0-1.0"
] | null | null | null | calculator.py | MateusLinharesDeAelencarLima/Calculator | 44e836aa92fd76d21b4c5f0edfcb5419886f1df6 | [
"CC0-1.0"
] | 1 | 2021-09-10T21:13:16.000Z | 2021-09-23T16:13:08.000Z | calculator.py | MateusLinharesDeAelencarLima/Calculator | 44e836aa92fd76d21b4c5f0edfcb5419886f1df6 | [
"CC0-1.0"
] | null | null | null | from functions.summation import summation
from functions.subtraction import subtraction
from functions.multiplication import multiplication
from functions.division import division
from functions.exponential import exponential
from functions.root import root
num1 = float(input('número 1: '))
num2 = float(input('número 2: '))
operation_1 = summation(num1, num2)
operation_2 = subtraction(num1, num2)
operation_3 = multiplication(num1, num2)
operation_4 = division(num1, num2)
operation_5 = exponential(num1, num2)
operation_6 = root(num1, num2)
print("A soma dos números é:", operation_1)
print("A diferença dos números é:", operation_2)
print("O produto dos números é:", operation_3)
print("O quociente dos números é:", operation_4)
print("A potência dos números é:", operation_5)
print("A raiz dos números é:", operation_6)
| 34.541667 | 51 | 0.784077 |
6e0f3ad7fb4aa74ebb70351b2ab8036b7bfa68b3 | 2,949 | py | Python | tests.py | suetAndTie/ekho | fbf8a19e1babc3fc0f11220ec9440a7f05f4bfcd | [
"MIT"
] | 1 | 2019-01-31T19:17:01.000Z | 2019-01-31T19:17:01.000Z | tests.py | suetAndTie/ekho | fbf8a19e1babc3fc0f11220ec9440a7f05f4bfcd | [
"MIT"
] | null | null | null | tests.py | suetAndTie/ekho | fbf8a19e1babc3fc0f11220ec9440a7f05f4bfcd | [
"MIT"
] | null | null | null | ! pip install -q librosa nltk
import torch
import numpy as np
import librosa
import librosa.display
import IPython
from IPython.display import Audio
# need this for English text processing frontend
import nltk
! python -m nltk.downloader cmudict
preset = "20180505_deepvoice3_ljspeech.json"
checkpoint_path = "20180505_deepvoice3_checkpoint_step000640000.pth"
if not exists(preset):
!curl -O -L "https://www.dropbox.com/s/0ck82unm0bo0rxd/20180505_deepvoice3_ljspeech.json"
if not exists(checkpoint_path):
!curl -O -L "https://www.dropbox.com/s/5ucl9remrwy5oeg/20180505_deepvoice3_checkpoint_step000640000.pth"
import hparams
import json
# Load parameters from preset
with open(preset) as f:
hparams.hparams.parse_json(f.read())
# Inject frontend text processor
import synthesis
import train
from deepvoice3_pytorch import frontend
synthesis._frontend = getattr(frontend, "en")
train._frontend = getattr(frontend, "en")
# alises
fs = hparams.hparams.sample_rate
hop_length = hparams.hparams.hop_size
def tts(model, text, p=0, speaker_id=None, fast=True, figures=True):
from synthesis import tts as _tts
waveform, alignment, spectrogram, mel = _tts(model, text, p, speaker_id, fast)
if figures:
visualize(alignment, spectrogram)
IPython.display.display(Audio(waveform, rate=fs))
def visualize(alignment, spectrogram):
label_fontsize = 16
figure(figsize=(16,16))
subplot(2,1,1)
imshow(alignment.T, aspect="auto", origin="lower", interpolation=None)
xlabel("Decoder timestamp", fontsize=label_fontsize)
ylabel("Encoder timestamp", fontsize=label_fontsize)
colorbar()
subplot(2,1,2)
librosa.display.specshow(spectrogram.T, sr=fs,
hop_length=hop_length, x_axis="time", y_axis="linear")
xlabel("Time", fontsize=label_fontsize)
ylabel("Hz", fontsize=label_fontsize)
tight_layout()
colorbar()
from train import build_model
from train import restore_parts, load_checkpoint
model = build_model()
model = load_checkpoint(checkpoint_path, model, None, True)
# Try your favorite senteneces:)
texts = [
"Scientists at the CERN laboratory say they have discovered a new particle.",
"There's a way to measure the acute emotional intelligence that has never gone out of style.",
"President Trump met with other leaders at the Group of 20 conference.",
"The Senate's bill to repeal and replace the Affordable Care Act is now imperiled.",
"Generative adversarial network or variational auto-encoder.",
"The buses aren't the problem, they actually provide a solution.",
"peter piper picked a peck of pickled peppers how many peppers did peter piper pick.",
"Some have accepted this as a miracle without any physical explanation.",
]
for idx, text in enumerate(texts):
print(idx, text)
tts(model, text, figures=False)
# With attention plot
text = "Generative adversarial network or variational auto-encoder."
tts(model, text, figures=True)
| 32.406593 | 106 | 0.758901 |
6e10c0ea90829d65558f7e100bd54ed82664fe76 | 405 | py | Python | lib/utils/checks.py | Matt-cloud/Discord.py-Template | 4b2ac9f0897bb44dfd799d821e536fc34ef3064e | [
"MIT"
] | null | null | null | lib/utils/checks.py | Matt-cloud/Discord.py-Template | 4b2ac9f0897bb44dfd799d821e536fc34ef3064e | [
"MIT"
] | null | null | null | lib/utils/checks.py | Matt-cloud/Discord.py-Template | 4b2ac9f0897bb44dfd799d821e536fc34ef3064e | [
"MIT"
] | null | null | null | from discord.ext import commands
from lib import exceptions
import os
import json
configFile = os.path.join(os.getcwd(), "data", "config.json")
with open(configFile, "rb") as f:
config = json.load(f)
def is_owner():
async def predicate(ctx):
if ctx.author.id in config['owners']:
return True
raise exceptions.OwnerOnlyCommand
return commands.check(predicate)
| 22.5 | 61 | 0.681481 |
6e11308aa80bc676e3ca2d21a4edcb18f890e752 | 1,649 | py | Python | envs/fetch/interval.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | 1 | 2020-09-16T06:15:17.000Z | 2020-09-16T06:15:17.000Z | envs/fetch/interval.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | 5 | 2020-09-26T01:30:01.000Z | 2022-01-13T03:15:42.000Z | envs/fetch/interval.py | malikasng/Bbox_HGG_with_CTR_and_RRTstarFND | 2b1aae6c347f544fefface0c9f26dc4ecde51108 | [
"MIT"
] | null | null | null | import gym
import numpy as np
from torchvision.utils import save_image
from .fixobj import FixedObjectGoalEnv
class IntervalGoalEnv(FixedObjectGoalEnv):
def __init__(self, args):
self.img_size = args.img_size
FixedObjectGoalEnv.__init__(self, args)
def generate_goal(self):
if self.target_goal_center is not None:
ndim = self.target_goal_center.ndim
if ndim > 1:
ind = np.random.randint(len(self.target_goal_center))
goal_center = self.target_goal_center[ind]
else:
goal_center = self.target_goal_center
if isinstance(self.target_range, np.ndarray):
if self.target_range.size == 2:
range_to_use = np.concatenate([self.target_range, np.zeros(shape=1)])
elif self.target_range.size == 3:
range_to_use = self.target_range, np.zeros(shape=1)
offset = np.random.uniform(-range_to_use, range_to_use)
else:
offset = np.random.uniform(-self.target_range, self.target_range, size=3)
goal = goal_center + offset
goal[2] = goal_center[2]
else:
if self.has_object:
goal = self.initial_gripper_xpos[:3] + self.target_offset
if self.args.env=='FetchSlide-v1':
goal[0] += self.target_range*0.5
goal[1] += np.random.uniform(-self.target_range, self.target_range)*0.5
else:
goal[0] += np.random.uniform(-self.target_range, self.target_range)
goal[1] += np.random.uniform(-self.target_range, self.target_range)
goal[2] = self.height_offset + int(self.target_in_the_air)*0.45
else:
goal = self.initial_gripper_xpos[:3] + np.array([np.random.uniform(-self.target_range, self.target_range), self.target_range, self.target_range])
return goal.copy()
| 37.477273 | 149 | 0.726501 |
6e11fb05adb494991b86d4b22a22f936a7c8a876 | 1,908 | py | Python | cactusbot/commands/magic/alias.py | CactusBot/CactusBot | 6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5 | [
"MIT"
] | 23 | 2016-02-16T05:09:11.000Z | 2016-09-20T14:22:51.000Z | cactusbot/commands/magic/alias.py | Alkali-Metal/CactusBot | 6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5 | [
"MIT"
] | 190 | 2016-09-30T05:31:59.000Z | 2018-12-22T08:46:49.000Z | cactusbot/commands/magic/alias.py | Alkali-Metal/CactusBot | 6d035bf74bdc8f7fb3ee1e79f8d443f5b17e7ea5 | [
"MIT"
] | 16 | 2016-10-09T16:51:48.000Z | 2017-10-25T05:29:10.000Z | """Alias command."""
from . import Command
from ...packets import MessagePacket
class Alias(Command):
"""Alias command."""
COMMAND = "alias"
@Command.command(role="moderator")
async def add(self, alias: "?command", command: "?command", *_: False,
raw: "packet"):
"""Add a new command alias."""
_, _, _, _, *args = raw.split()
if args:
packet_args = MessagePacket.join(
*args, separator=' ').json["message"]
else:
packet_args = None
response = await self.api.add_alias(command, alias, packet_args)
if response.status == 201:
return "Alias !{} for !{} created.".format(alias, command)
elif response.status == 200:
return "Alias !{} for command !{} updated.".format(alias, command)
elif response.status == 404:
return "Command !{} does not exist.".format(command)
@Command.command(role="moderator")
async def remove(self, alias: "?command"):
"""Remove a command alias."""
response = await self.api.remove_alias(alias)
if response.status == 200:
return "Alias !{} removed.".format(alias)
elif response.status == 404:
return "Alias !{} doesn't exist!".format(alias)
@Command.command("list", role="moderator")
async def list_aliases(self):
"""List all aliases."""
response = await self.api.get_command()
if response.status == 200:
commands = (await response.json())["data"]
return "Aliases: {}.".format(', '.join(sorted(
"{} ({})".format(
command["attributes"]["name"],
command["attributes"]["commandName"])
for command in commands
if command.get("type") == "aliases"
)))
return "No aliases added!"
| 32.338983 | 78 | 0.545597 |
6e13a8102a55ae649fda3dcfedbae946ebff32c0 | 2,828 | py | Python | explorer/util.py | brianhouse/rlab | 4d878abd2299fd340a645ebd8b92a68c2b48f41e | [
"MIT"
] | null | null | null | explorer/util.py | brianhouse/rlab | 4d878abd2299fd340a645ebd8b92a68c2b48f41e | [
"MIT"
] | null | null | null | explorer/util.py | brianhouse/rlab | 4d878abd2299fd340a645ebd8b92a68c2b48f41e | [
"MIT"
] | null | null | null | import numpy as np
def combine(signal_x, signal_y):
return np.stack((signal_x, signal_y), axis=-1)
def normalize(signal, minimum=None, maximum=None):
"""Normalize a signal to the range 0, 1. Uses the minimum and maximum observed in the data unless explicitly passed."""
signal = np.array(signal).astype('float')
if minimum is None:
minimum = np.min(signal)
if maximum is None:
maximum = np.max(signal)
signal -= minimum
maximum -= minimum
signal /= maximum
signal = np.clip(signal, 0.0, 1.0)
return signal
def resample(ts, values, num_samples):
"""Convert a list of times and a list of values to evenly spaced samples with linear interpolation"""
assert np.all(np.diff(ts) > 0)
ts = normalize(ts)
return np.interp(np.linspace(0.0, 1.0, num_samples), ts, values)
def smooth(signal, size=10, window='blackman'):
"""Apply weighted moving average (aka low-pass filter) via convolution function to a signal"""
signal = np.array(signal)
if size < 3:
return signal
s = np.r_[2 * signal[0] - signal[size:1:-1], signal, 2 * signal[-1] - signal[-1:-size:-1]]
w = np.ones(size,'d')
y = np.convolve(w / w.sum(), s, mode='same')
return y[size - 1:-size + 1]
def detect_peaks(signal, lookahead=10, delta=0):
""" Detect the local maximas and minimas in a signal
lookahead -- samples to look ahead from a potential peak to see if a bigger one is coming
delta -- minimum difference between a peak and surrounding points to be considered a peak (no hills) and makes things faster
Note: careful if you have flat regions, may affect lookahead
"""
signal = np.array(signal)
peaks = []
valleys = []
min_value, max_value = np.Inf, -np.Inf
for index, value in enumerate(signal[:-lookahead]):
if value > max_value:
max_value = value
max_pos = index
if value < min_value:
min_value = value
min_pos = index
if value < max_value - delta and max_value != np.Inf:
if signal[index:index + lookahead].max() < max_value:
peaks.append([max_pos, max_value])
drop_first_peak = True
max_value = np.Inf
min_value = np.Inf
if index + lookahead >= signal.size:
break
continue
if value > min_value + delta and min_value != -np.Inf:
if signal[index:index + lookahead].min() > min_value:
valleys.append([min_pos, min_value])
drop_first_valley = True
min_value = -np.Inf
max_value = -np.Inf
if index + lookahead >= signal.size:
break
return peaks, valleys | 40.985507 | 132 | 0.597242 |
6e14c71363bc33135f20b63aec47306b9531737a | 2,839 | py | Python | dooly/converters/kobart_utils.py | jinmang2/DOOLY | 961c7b43b06dffa98dc8a39e72e417502e89470c | [
"Apache-2.0"
] | 17 | 2022-03-06T05:06:14.000Z | 2022-03-31T00:25:06.000Z | dooly/converters/kobart_utils.py | jinmang2/DOOLY | 961c7b43b06dffa98dc8a39e72e417502e89470c | [
"Apache-2.0"
] | 6 | 2022-03-27T18:18:40.000Z | 2022-03-31T17:35:34.000Z | dooly/converters/kobart_utils.py | jinmang2/DOOLY | 961c7b43b06dffa98dc8a39e72e417502e89470c | [
"Apache-2.0"
] | 1 | 2022-03-31T13:07:41.000Z | 2022-03-31T13:07:41.000Z | import os
import sys
import hashlib
import importlib
def is_available_boto3():
return importlib.util.find_spec("boto3")
if is_available_boto3():
import boto3
from botocore import UNSIGNED
from botocore.client import Config
else:
raise ModuleNotFoundError("Please install boto3 with: `pip install boto3`.")
class AwsS3Downloader(object):
def __init__(
self,
aws_access_key_id=None,
aws_secret_access_key=None,
):
self.resource = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
).resource("s3")
self.client = boto3.client(
"s3",
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
config=Config(signature_version=UNSIGNED),
)
def __split_url(self, url: str):
if url.startswith("s3://"):
url = url.replace("s3://", "")
bucket, key = url.split("/", maxsplit=1)
return bucket, key
def download(self, url: str, local_dir: str):
bucket, key = self.__split_url(url)
filename = os.path.basename(key)
file_path = os.path.join(local_dir, filename)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
meta_data = self.client.head_object(Bucket=bucket, Key=key)
total_length = int(meta_data.get("ContentLength", 0))
downloaded = 0
def progress(chunk):
nonlocal downloaded
downloaded += chunk
done = int(50 * downloaded / total_length)
sys.stdout.write(
"\r{}[{}{}]".format(file_path, "█" * done, "." * (50 - done))
)
sys.stdout.flush()
try:
with open(file_path, "wb") as f:
self.client.download_fileobj(bucket, key, f, Callback=progress)
sys.stdout.write("\n")
sys.stdout.flush()
except Exception as e: # E722 do not use bare 'except'
print(f"Exception occured: {e}.\ndownloading file is failed. {url}")
return file_path
def download(url, chksum=None, cachedir=".cache"):
cachedir_full = os.path.join(os.getcwd(), cachedir)
os.makedirs(cachedir_full, exist_ok=True)
filename = os.path.basename(url)
file_path = os.path.join(cachedir_full, filename)
if os.path.isfile(file_path):
if hashlib.md5(open(file_path, "rb").read()).hexdigest()[:10] == chksum:
print(f"using cached model. {file_path}")
return file_path, True
s3 = AwsS3Downloader()
file_path = s3.download(url, cachedir_full)
if chksum:
assert (
chksum == hashlib.md5(open(file_path, "rb").read()).hexdigest()[:10]
), "corrupted file!"
return file_path, False
| 31.898876 | 80 | 0.610426 |
6e154f31690fe2c1e126dc21483f4d1d4a667900 | 348 | py | Python | Python_Files/murach/book_apps/ch13/factorial_recursion.py | Interloper2448/BCGPortfolio | c4c160a835c64c8d099d44c0995197f806ccc824 | [
"MIT"
] | null | null | null | Python_Files/murach/book_apps/ch13/factorial_recursion.py | Interloper2448/BCGPortfolio | c4c160a835c64c8d099d44c0995197f806ccc824 | [
"MIT"
] | null | null | null | Python_Files/murach/book_apps/ch13/factorial_recursion.py | Interloper2448/BCGPortfolio | c4c160a835c64c8d099d44c0995197f806ccc824 | [
"MIT"
] | null | null | null | def factorial(num):
if num == 0:
return 1
else:
return num * factorial(num - 1)
def main():
print("0! =", factorial(0))
print("1! =", factorial(1))
print("2! =", factorial(2))
print("3! =", factorial(3))
print("4! =", factorial(4))
print("5! =", factorial(5))
if __name__ == "__main__":
main()
| 20.470588 | 39 | 0.514368 |
6e15e9506e9a75c167124e23e066dc0069217190 | 1,565 | py | Python | tests/uv/util/test_env.py | hartikainen/uv-metrics | 7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21 | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2020-06-17T17:33:05.000Z | 2022-03-30T17:32:05.000Z | tests/uv/util/test_env.py | hartikainen/uv-metrics | 7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21 | [
"ECL-2.0",
"Apache-2.0"
] | 28 | 2020-06-16T18:32:08.000Z | 2020-11-12T17:51:20.000Z | tests/uv/util/test_env.py | hartikainen/uv-metrics | 7b47b8ce1dff5fc41cdd540f816ea41a0cd27c21 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-08-07T20:05:49.000Z | 2021-10-21T01:43:00.000Z | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uv.util.env as ue
def test_extract_params(monkeypatch):
def mem_env(prefix):
return {
f"{prefix}_MY_KEY": "face",
f"{prefix}_ANOTHER_KEY": "sandwich",
f"{prefix}THIRD_KEY": "ham"
}
expected = {"my_key": "face", "another_key": "sandwich", "third_key": "ham"}
# with various prefixes, a custom-supplied environment will return the
# correctly parsed env variables.
assert expected == ue.extract_params(prefix="ENVVAR", env=mem_env("ENVVAR"))
assert expected == ue.extract_params(prefix="funky", env=mem_env("funky"))
k = f"{ue._ENV_VAR_PREFIX}_RANDOM_KEY"
v = "better_not_be_set"
# make sure we don't have some random value set
if os.environ.get(k):
monkeypatch.delenv(k)
# the environment should be empty.
assert ue.extract_params() == {}
# set our expected kv pair...
monkeypatch.setenv(k, v)
# and get it back from the env.
assert ue.extract_params() == {"random_key": v}
| 29.528302 | 78 | 0.705431 |
6e1651dd40e1ae6c43644b4a77456f4eb701c53a | 1,054 | py | Python | models/fleet.py | gnydick/qairon | e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257 | [
"MIT"
] | null | null | null | models/fleet.py | gnydick/qairon | e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257 | [
"MIT"
] | null | null | null | models/fleet.py | gnydick/qairon | e67af1f88ac6c614ae33adc4f42ab2ec3cc5b257 | [
"MIT"
] | null | null | null | from sqlalchemy import *
from sqlalchemy.orm import relationship
from db import db
class Fleet(db.Model):
__tablename__ = "fleet"
id = Column(String, primary_key=True)
deployment_target_id = Column(String, ForeignKey('deployment_target.id'))
fleet_type_id = Column(String, ForeignKey('fleet_type.id'))
name = Column(String(256))
defaults = Column(Text)
native_id = Column(String)
deployment_target = relationship("DeploymentTarget", back_populates="fleets")
subnets = relationship("Subnet", secondary='subnets_fleets', back_populates="fleets")
type = relationship("FleetType", back_populates="fleets")
capacities = relationship("Capacity", back_populates="fleet")
def __repr__(self):
return self.id
@db.event.listens_for(Fleet, 'before_update')
@db.event.listens_for(Fleet, 'before_insert')
def my_before_insert_listener(mapper, connection, fleet):
__update_id__(fleet)
def __update_id__(fleet):
fleet.id = ':'.join([fleet.deployment_target_id, fleet.fleet_type_id, fleet.name])
| 30.114286 | 89 | 0.736243 |
6e17097d88bd49914581f2dfe02ed8fa34bee9d4 | 254 | py | Python | backend/authentication/admin.py | jklewis99/hypertriviation | e12be87e978505fb3a73f4fc606173f41a3aee81 | [
"MIT"
] | 1 | 2022-03-27T19:39:07.000Z | 2022-03-27T19:39:07.000Z | backend/authentication/admin.py | jklewis99/hypertriviation | e12be87e978505fb3a73f4fc606173f41a3aee81 | [
"MIT"
] | 5 | 2022-03-27T19:32:54.000Z | 2022-03-31T23:25:44.000Z | backend/authentication/admin.py | jklewis99/hypertriviation | e12be87e978505fb3a73f4fc606173f41a3aee81 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import HypertriviationUser
class HypertriviationUserAdmin(admin.ModelAdmin):
model = HypertriviationUser
# Register your models here.
admin.site.register(HypertriviationUser, HypertriviationUserAdmin) | 25.4 | 66 | 0.838583 |
6e1773f3e2177f91fdf46e022af55af83edbbcb5 | 1,568 | py | Python | logs/followup_email.py | vreyespue/Movie_Bot | 192c74be62afcfda77a0984ff4da3014226c3432 | [
"Apache-2.0"
] | 26 | 2019-02-04T04:55:09.000Z | 2021-09-22T14:58:46.000Z | logs/followup_email.py | vreyespue/Movie_Bot | 192c74be62afcfda77a0984ff4da3014226c3432 | [
"Apache-2.0"
] | 2 | 2019-05-07T16:33:09.000Z | 2021-02-13T18:25:35.000Z | logs/followup_email.py | vreyespue/Movie_Bot | 192c74be62afcfda77a0984ff4da3014226c3432 | [
"Apache-2.0"
] | 27 | 2018-12-10T12:13:50.000Z | 2020-10-11T17:43:22.000Z | ###################################################################
######## Follow up email #############
###################################################################
"""
followup_email.py
This is special use case code written to assist bot developers. It consolidates topics that are not familiar to the bot
and sends it in a nicely formatted email to the developers team.
"""
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import smtplib
import os,string,sys
sys.path.append(os.path.normpath(os.getcwd()))
from config import location
SERVER = " "
FROM = ["xxxx@gmail.com"]
TO = ["xxxx@gmail.com"] # must be a list
SUBJECT = "Follow up questions email"
TEXT = """Hello,
Here are the various questions users asked me today which I have no idea about. Could you help me learn these topics?
Regards,
Kelly
"""
msg = MIMEMultipart()
msg['From'] = ", ".join(FROM)
msg['To'] = ", ".join(TO)
msg['Subject'] = SUBJECT
body = TEXT
msg.attach(MIMEText(body, 'plain'))
filename = 'followup_file.TXT'
attachment = open(location + 'followup_file.TXT', "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
message = msg.as_string()
server = smtplib.SMTP(SERVER)
server.sendmail(FROM, TO, message)
server.quit() | 26.133333 | 122 | 0.646684 |
6e18dbf82c0ab208ca098975575465ec97248c7b | 269 | py | Python | backend/validators/authorization_val.py | NelsonM9/senaSoft | d72b5ed32b86a53aac962ec440d84ecce4555780 | [
"Apache-2.0"
] | null | null | null | backend/validators/authorization_val.py | NelsonM9/senaSoft | d72b5ed32b86a53aac962ec440d84ecce4555780 | [
"Apache-2.0"
] | null | null | null | backend/validators/authorization_val.py | NelsonM9/senaSoft | d72b5ed32b86a53aac962ec440d84ecce4555780 | [
"Apache-2.0"
] | null | null | null | from marshmallow import validate, fields, Schema
class AuthorizationVal(Schema):
id_auth = fields.Str(required=True, validator=validate.Length(max=10))
id_o = fields.Str(required=True, validator=validate.Length(max=10))
file_a = fields.Raw(required=True)
| 38.428571 | 74 | 0.758364 |
6e1b6e602b092d059fb5b4b96bb130aa002770f4 | 1,213 | py | Python | wiwo/sender.py | CoreSecurity/wiwo | 44bd44b8ebea7e33105a7f4dac6480493cbb9623 | [
"Apache-1.1"
] | 76 | 2015-08-01T23:24:43.000Z | 2018-07-02T11:13:16.000Z | wiwo/sender.py | 6e726d/wiwo | 44bd44b8ebea7e33105a7f4dac6480493cbb9623 | [
"Apache-1.1"
] | 1 | 2016-01-28T22:11:17.000Z | 2016-02-03T22:14:46.000Z | wiwo/sender.py | 6e726d/wiwo | 44bd44b8ebea7e33105a7f4dac6480493cbb9623 | [
"Apache-1.1"
] | 27 | 2015-08-11T07:24:42.000Z | 2018-10-05T11:09:54.000Z | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
#
# Copyright 2003-2015 CORE Security Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# Andres Blanco (6e726d)
# Andres Gazzoli
#
import ethernet
import pcapy
class Sender(object):
@staticmethod
def send(frame_obj, iface_name):
"""
Method that inject/send a frame.
"""
frame = frame_obj.get_packet()
if len(frame) < ethernet.ETHERNET_MIN_SIZE:
padding = "\x00" * (ethernet.ETHERNET_MIN_SIZE - len(frame))
frame += padding
pd = pcapy.open_live(iface_name, ethernet.ETHERNET_MTU, 0, 100)
pd.sendpacket(frame)
return frame
| 28.880952 | 74 | 0.678483 |