id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
190546
|
import torch
import random
import numpy as np
import os
import pandas as pd
import config
import cv2
import matplotlib.pyplot as plt
from dataset import ImageFolder
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
import albumentations as A
from albumentations.pytorch import ToTensorV2
TRANSFORM = A.Compose([
#ToTensor --> Normalize(mean, std)
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value = 255,
),
ToTensorV2()
])
def seed_all(SEED_VALUE= config.SEED):
random.seed(SEED_VALUE)
os.environ['PYTHONHASHSEED'] = str(SEED_VALUE)
np.random.seed(SEED_VALUE)
torch.manual_seed(SEED_VALUE)
torch.cuda.manual_seed(SEED_VALUE)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def get_data_loaders(data_path = config.DATAPATH):
df = pd.read_csv(data_path)
train_data, test_data = train_test_split(df, test_size = 0.2, random_state = config.SEED, stratify = df.hasTable)
train_dataset = ImageFolder(train_data, isTrain = True, transform = None)
test_dataset = ImageFolder(test_data, isTrain = False, transform = None)
train_loader = DataLoader(train_dataset, batch_size = config.BATCH_SIZE, shuffle=True, num_workers = 4, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size = 8, shuffle=False, num_workers = 4, pin_memory=True)
return train_loader, test_loader
#Checkpoint
def save_checkpoint(state, filename = "model_checkpoint.pth.tar"):
torch.save(state, filename)
print("Checkpoint Saved at ",filename)
def load_checkpoint(checkpoint, model, optimizer = None):
print("Loading checkpoint...")
model.load_state_dict(checkpoint['state_dict'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
last_epoch = checkpoint['epoch']
tr_metrics = checkpoint['train_metrics']
te_metrics = checkpoint['test_metrics']
return last_epoch, tr_metrics, te_metrics
def write_summary(writer, tr_metrics, te_metrics, epoch):
writer.add_scalar("Table loss/Train", tr_metrics['table_loss'], global_step=epoch)
writer.add_scalar("Table loss/Test", te_metrics['table_loss'], global_step=epoch)
writer.add_scalar("Table Acc/Train", tr_metrics['table_acc'], global_step=epoch)
writer.add_scalar("Table Acc/Test", te_metrics['table_acc'], global_step=epoch)
writer.add_scalar("Table F1/Train", tr_metrics['table_f1'], global_step=epoch)
writer.add_scalar("Table F1/Test", te_metrics['table_f1'], global_step=epoch)
writer.add_scalar("Table Precision/Train", tr_metrics['table_precision'], global_step=epoch)
writer.add_scalar("Table Precision/Test", te_metrics['table_precision'], global_step=epoch)
writer.add_scalar("Table Recall/Train", tr_metrics['table_recall'], global_step=epoch)
writer.add_scalar("Table Recall/Test", te_metrics['table_recall'], global_step=epoch)
writer.add_scalar("Column loss/Train", tr_metrics['column_loss'], global_step=epoch)
writer.add_scalar("Column loss/Test", te_metrics['column_loss'], global_step=epoch)
writer.add_scalar("Column Acc/Train", tr_metrics['col_acc'], global_step=epoch)
writer.add_scalar("Column Acc/Test", te_metrics['col_acc'], global_step=epoch)
writer.add_scalar("Column F1/Train", tr_metrics['col_f1'], global_step=epoch)
writer.add_scalar("Column F1/Test", te_metrics['col_f1'], global_step=epoch)
writer.add_scalar("Column Precision/Train", tr_metrics['col_precision'], global_step=epoch)
writer.add_scalar("Column Precision/Test", te_metrics['col_precision'], global_step=epoch)
writer.add_scalar("Column Recall/Train", tr_metrics['col_recall'], global_step=epoch)
writer.add_scalar("Column Recall/Test", te_metrics['col_recall'], global_step=epoch)
def display_metrics(epoch, tr_metrics,te_metrics):
nl = '\n'
print(f"Epoch: {epoch} {nl}\
Table Loss -- Train: {tr_metrics['table_loss']:.3f} Test: {te_metrics['table_loss']:.3f}{nl}\
Table Acc -- Train: {tr_metrics['table_acc']:.3f} Test: {te_metrics['table_acc']:.3f}{nl}\
Table F1 -- Train: {tr_metrics['table_f1']:.3f} Test: {te_metrics['table_f1']:.3f}{nl}\
Table Precision -- Train: {tr_metrics['table_precision']:.3f} Test: {te_metrics['table_precision']:.3f}{nl}\
Table Recall -- Train: {tr_metrics['table_recall']:.3f} Test: {te_metrics['table_recall']:.3f}{nl}\
{nl}\
Col Loss -- Train: {tr_metrics['column_loss']:.3f} Test: {te_metrics['column_loss']:.3f}{nl}\
Col Acc -- Train: {tr_metrics['col_acc']:.3f} Test: {te_metrics['col_acc']:.3f}{nl}\
Col F1 -- Train: {tr_metrics['col_f1']:.3f} Test: {te_metrics['col_f1']:.3f}{nl}\
Col Precision -- Train: {tr_metrics['col_precision']:.3f} Test: {te_metrics['col_precision']:.3f}{nl}\
Col Recall -- Train: {tr_metrics['col_recall']:.3f} Test: {te_metrics['col_recall']:.3f}{nl}\
")
def compute_metrics(ground_truth, prediction, threshold = 0.5):
#https://stackoverflow.com/a/56649983
ground_truth = ground_truth.int()
prediction = (torch.sigmoid(prediction) > threshold).int()
TP = torch.sum(prediction[ground_truth==1]==1)
TN = torch.sum(prediction[ground_truth==0]==0)
FP = torch.sum(prediction[ground_truth==1]==0)
FN = torch.sum(prediction[ground_truth==0]==1)
acc = (TP + TN)/(TP + TN + FP+ FN)
precision = TP /(FP + TP + 1e-4)
recall = TP /(FN + TP + 1e-4)
f1 = 2 * precision * recall / (precision + recall + 1e-4)
metrics = {
'acc': acc.item(),
'precision':precision.item(),
'recall': recall.item(),
'f1': f1.item()
}
return metrics
def display(img, table, column, title = 'Original'):
f, ax = plt.subplots(1,3, figsize = (15,8))
ax[0].imshow(img)
ax[0].set_title(f'{title} Image')
ax[1].imshow(table)
ax[1].set_title(f'{title} Table Mask')
ax[2].imshow(column)
ax[2].set_title(f'{title} Column Mask')
plt.show()
def get_TableMasks(test_img, model, transform = TRANSFORM, device = config.DEVICE):
image = transform(image = test_img)["image"]
#get predictions
model.eval()
with torch.no_grad():
image = image.to(device).unsqueeze(0)
#with torch.cuda.amp.autocast():
table_out, column_out = model(image)
table_out = torch.sigmoid(table_out)
column_out = torch.sigmoid(column_out)
#remove gradients
table_out = (table_out.cpu().detach().numpy().squeeze(0).transpose(1,2,0) > 0.5).astype(int)
column_out = (column_out.cpu().detach().numpy().squeeze(0).transpose(1,2,0) > 0.5).astype(int)
return table_out, column_out
def is_contour_bad(c):
#ref: https://www.pyimagesearch.com/2015/02/09/removing-contours-image-using-python-opencv/
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# the contour is 'bad' if it is not a rectangle
return not len(approx) == 4
def fixMasks(image, table_mask, column_mask):
"""
Fix Table Bounding Box to get better OCR predictions
"""
table_mask = table_mask.reshape(1024,1024).astype(np.uint8)
column_mask = column_mask.reshape(1024,1024).astype(np.uint8)
#get contours of the mask to get number of tables
contours, table_heirarchy = cv2.findContours(table_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
table_contours = []
#ref: https://www.pyimagesearch.com/2015/02/09/removing-contours-image-using-python-opencv/
#remove bad contours
#print(contours)
for c in contours:
# if the contour is bad, draw it on the mask
#if not is_contour_bad(c):
if cv2.contourArea(c) > 2000:
table_contours.append(c)
if len(table_contours) == 0:
return None
#ref : https://docs.opencv.org/4.5.2/da/d0c/tutorial_bounding_rects_circles.html
#get bounding box for the contour
table_boundRect = [None]*len(table_contours)
for i, c in enumerate(table_contours):
polygon = cv2.approxPolyDP(c, 3, True)
table_boundRect[i] = cv2.boundingRect(polygon)
#table bounding Box
table_boundRect.sort()
col_boundRects = []
for x,y,w,h in table_boundRect:
col_mask_crop = column_mask[y:y+h,x:x+w]
#get contours of the mask to get number of tables
contours, col_heirarchy = cv2.findContours(col_mask_crop, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#get bounding box for the contour
boundRect = [None]*len(contours)
for i, c in enumerate(contours):
polygon = cv2.approxPolyDP(c, 3, True)
boundRect[i] = cv2.boundingRect(polygon)
#adjusting columns as per table coordinates
boundRect[i] = (boundRect[i][0] + x ,
boundRect[i][1] + y ,
boundRect[i][2],
boundRect[i][3])
col_boundRects.append(boundRect)
image = image[...,0].reshape(1024, 1024).astype(np.uint8)
#draw bounding boxes
color = (0,255,0)
thickness = 4
for x,y,w,h in table_boundRect:
image = cv2.rectangle(image, (x,y),(x+w,y+h), color, thickness)
return image, table_boundRect, col_boundRects
|
190557
|
import numpy as np
print '# [Numpy example1] ----------'
templist = [1,2,3,4,5,6]
templist = range(1,7)
np_templist = np.array(templist)
print np_templist
print '# Numpy example 2] --------'
np_tepmlist_int = np.array(templist,dtype=np.int8)
print '# Numpy example 3] -------'
|
190584
|
from .sorting_algorithms import *
class Policy:
context = None
def __init__(self, context):
self.context = context
def configure(self):
if len(self.context.numbers) > 10:
print('More than 10 numbers, choosing merge sort!')
self.context.sorting_algorithm = MergeSort()
else:
print('Less or equal than 10 numbers, choosing bubble sort!')
self.context.sorting_algorithm = BubbleSort()
|
190639
|
import gdal_array as gd
try:
import Image
except:
from PIL import Image
relief = "relief.asc"
dem = "dem.asc"
target = "hillshade.tif"
# Load the relief as the background image
bg = gd.numpy.loadtxt(relief, skiprows=6)
# Load the DEM into a numpy array as the foreground image
fg = gd.numpy.loadtxt(dem, skiprows=6)[:-2, :-2]
# Create a blank 3-band image to colorize the DEM
rgb = gd.numpy.zeros((3, len(fg), len(fg[0])), gd.numpy.uint8)
# Class list with DEM upper elevation range values.
classes = [356, 649, 942, 1235, 1528,
1821, 2114, 2300, 2700]
# Color look-up table (lut)
# The lut must match the number of classes.
# Specified as R, G, B tuples
lut = [[63, 159, 152], [96, 235, 155], [100, 246, 174],
[248, 251, 155], [246, 190, 39], [242, 155, 39],
[165, 84, 26], [236, 119, 83], [203, 203, 203]]
# Starting elevation value of the first class
start = 1
# Process all classes.
for i in range(len(classes)):
mask = gd.numpy.logical_and(start <= fg,
fg <= classes[i])
for j in range(len(lut[i])):
rgb[j] = gd.numpy.choose(mask, (rgb[j], lut[i][j]))
start = classes[i]+1
# Convert the shaded relief to a PIL image
im1 = Image.fromarray(bg).convert('RGB')
# Convert the colorized DEM to a PIL image.
# We must transpose it from the Numpy row, col order
# to the PIL col, row order (width, height).
im2 = Image.fromarray(rgb.transpose(1, 2, 0)).convert('RGB')
# Blend the two images with a 40% alpha
hillshade = Image.blend(im1, im2, .4)
# Save the hillshade
hillshade.save(target)
|
190673
|
import asyncio
import grp
import importlib.resources
import os
import pathlib
import pwd
import sys
import unittest
from ...Shell import Shell
from ...util import which, export
from ..test_util import register, TmpDirMixin
__all__ = []
async def build_and_wait(factory, *args, **kwargs):
obj = await factory.build(*args, **kwargs)
return await obj.wait()
@export
@register()
class TestRunCommand(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
input_file = "input.txt"
output_file = "output.txt"
message = "Hello World!"
with Shell(cwd=cwd) as sh:
cat = sh.command('cat')
with (cwd / input_file).open('w') as fout:
fout.write(message)
job = sh(
cat.stdin(str(input_file)).stdout(str(output_file)).stderr(None),
wait=False
)[0]
return_codes = job.wait()
self.assertEqual(return_codes, (0,))
self.assertTrue(os.path.exists(str(cwd / output_file)))
with (cwd / output_file).open('r') as fin:
observed = fin.read()
self.assertEqual(message, observed)
@export
@register()
class TestRunCommandAndWait(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
input_file = "input.txt"
output_file = "output.txt"
message = "Hello World!"
with Shell(cwd=cwd) as sh:
cat = sh.command('cat')
with (cwd / input_file).open('w') as fout:
fout.write(message)
job = sh(
cat.stdin(str(input_file)).stdout(str(output_file)).stderr(None)
)[0]
self.assertTrue(os.path.exists(str(cwd / output_file)))
with (cwd / output_file).open('r') as fin:
observed = fin.read()
self.assertEqual(message, observed)
@export
@register()
class TestCommandDoesntExist(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
with Shell(cwd=cwd) as sh:
with self.assertRaises(RuntimeError):
sh.command('./DOES NOT EXIST')
@export
@register()
class TestCommandNotExecutable(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
tmpfile = cwd / "notexecutable.sh"
tmpfile.touch(mode=0o600)
with Shell(cwd=cwd) as sh:
with self.assertRaises(RuntimeError):
sh.command(f"./{tmpfile.name}")
@export
@register()
class TestCommandNotReadable(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
tmpfile = cwd / "notreadable.sh"
tmpfile.touch(mode=0o300)
with Shell(cwd=cwd) as sh:
if os.getuid() != 0:
with self.assertRaises(RuntimeError):
sh.command(f"./{tmpfile.name}")
else:
sh.command(f"./{tmpfile.name}")
@export
@register()
class TestNoEnvironment(TmpDirMixin):
def runTest(self):
with Shell(env={}) as sh:
self.assertEqual(len(sh.environment), 0)
@export
@register()
class TestWithEnvironment(TmpDirMixin):
def runTest(self):
num_existing = len(os.environ)
message = 'Hello World!'
MESSAGE = 'MESSAGE'
os.environ[MESSAGE] = message
with Shell(env=os.environ, cwd=os.getcwd()) as sh:
self.assertEqual(message, sh.getenv(MESSAGE))
self.assertEqual(num_existing + 1, len(sh.environment))
@export
@register()
class TestChangeDirectory(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
input_file = "input.txt"
output_file = "output.txt"
message = "Hello World!"
with Shell(cwd=cwd) as sh:
cat = sh.command('cat')
with (cwd / input_file).open('w') as fout:
fout.write(message)
job = sh(
cat.stdin(str(input_file)).stdout(str(output_file)).stderr(None),
wait=False
)[0]
return_codes = job.wait()
self.assertEqual(return_codes, (0,))
self.assertTrue(os.path.exists(str(cwd / output_file)))
with (cwd / output_file).open('r') as fin:
observed = fin.read()
self.assertEqual(message, observed)
@export
@register()
class TestEvaluate(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
input_file = "input.txt"
output_file = "output.txt"
message = "Hello World!"
with Shell(cwd=cwd) as sh:
cat = sh.command('cat')
with (cwd / input_file).open('w') as fout:
fout.write(message)
observed = sh.evaluate(
cat.stdin(str(input_file)).stderr(None)
)
self.assertEqual(message, observed)
@export
@register()
class TestEnvironmentSet(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
message = "Hello World!"
from .. import test_util
with importlib.resources.path(test_util.__package__, 'echo_env.py') as echo_env:
with Shell(cwd=cwd) as sh:
sh.export(
MESSAGE = message
)
python3 = sh.command('python3')
observed = sh.evaluate(
python3(echo_env, "MESSAGE")
)
self.assertEqual(message, observed)
@export
@register()
class TestEnvironmentSetGet(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
message = "Hello World!"
from .. import test_util
with Shell(cwd=cwd) as sh:
sh.export(
MESSAGE = message
)
observed = sh.getenv('MESSAGE')
self.assertEqual(message, observed)
@export
@register()
class TestChangeDirectoryManager(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
input_file = cwd / "input.txt"
output_file = cwd / "output.txt"
message = "Hello World!"
os.chdir("/")
with Shell(cwd=os.getcwd()) as sh:
cat = sh.command(which('cat'))
with input_file.open('w') as fout:
fout.write(message)
old_cwd = sh.cwd
self.assertNotEqual(old_cwd, cwd)
with sh.cd_manager(cwd) as target_cwd:
self.assertEqual(cwd, target_cwd)
job = sh(
cat.stdin(input_file.name).stdout(output_file.name).stderr(None),
wait=False
)[0]
self.assertEqual(sh.cwd, cwd)
self.assertEqual(sh.pwd, old_cwd)
self.assertEqual(sh.cwd, old_cwd)
self.assertEqual(sh.pwd, cwd)
return_codes = job.wait()
self.assertEqual(return_codes, (0,))
self.assertTrue(os.path.exists(str(output_file)))
with output_file.open('r') as fin:
observed = fin.read()
self.assertEqual(message, observed)
@export
@register()
class TestRunCommandDefaultShell(TmpDirMixin):
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
input_file = cwd / "input.txt"
output_file = cwd / "output.txt"
message = "Hello World!"
sh = Shell.get_shell()
cat = sh.command('cat')
with input_file.open('w') as fout:
fout.write(message)
job = sh(
cat.stdin(str(input_file)).stdout(str(output_file)).stderr(None),
wait=False
)[0]
return_codes = job.wait()
self.assertEqual(return_codes, (0,))
self.assertTrue(os.path.exists(str(output_file)))
with output_file.open('r') as fin:
observed = fin.read()
self.assertEqual(message, observed)
@export
@register()
class TestRunAsDifferentUser(TmpDirMixin):
def setUp(self):
super().setUp()
if ((sys.version_info.major, sys.version_info.minor) < (3, 9)):
raise unittest.SkipTest("Python version is less than 3.9")
if os.getuid() != 0:
raise unittest.SkipTest("Not running as root")
def unless_key_error(fun):
try:
return fun()
except KeyError:
return None
self.uid = unless_key_error(lambda: pwd.getpwnam('nobody').pw_uid)
if self.uid is None:
raise unittest.SkipTest("No user exists with name 'nobody'")
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
outfile = cwd / "result.txt"
with Shell(cwd=cwd, user=self.uid) as sh:
cmd_id = sh.command(which('id'))
sh.run(cmd_id('-u').stdout(outfile))
with outfile.open('r') as fin:
observed_uid = fin.read()
self.assertEqual(observed_uid.strip(), str(self.uid))
self.assertEqual(outfile.owner(), "nobody")
@export
@register()
class TestRunAsDifferentGroup(TmpDirMixin):
def setUp(self):
super().setUp()
if ((sys.version_info.major, sys.version_info.minor) < (3, 9)):
raise unittest.SkipTest("Python version is less than 3.9")
if os.getuid() != 0:
raise unittest.SkipTest("Not running as root")
def unless_key_error(fun):
try:
return fun()
except KeyError:
return None
self.gid = unless_key_error(lambda: pwd.getpwnam('nobody').pw_gid)
if self.gid is None:
raise unittest.SkipTest("No group exists with name 'nobody'")
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
outfile = cwd / "result.txt"
with Shell(cwd=cwd, group=self.gid) as sh:
cmd_id = sh.command(which('id'))
sh.run(cmd_id('-g').stdout(outfile))
with outfile.open('r') as fin:
observed_gid = fin.read()
self.assertEqual(observed_gid.strip(), str(self.gid))
self.assertEqual(grp.getgrnam(outfile.group()).gr_gid, self.gid)
@export
@register()
class TestEvaluateAsDifferentUser(TmpDirMixin):
def setUp(self):
super().setUp()
if ((sys.version_info.major, sys.version_info.minor) < (3, 9)):
raise unittest.SkipTest("Python version is less than 3.9")
if os.getuid() != 0:
raise unittest.SkipTest("Not running as root")
def unless_key_error(fun):
try:
return fun()
except KeyError:
return None
self.uid = unless_key_error(lambda: pwd.getpwnam('nobody').pw_uid)
if self.uid is None:
raise unittest.SkipTest("No user exists with name 'nobody'")
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
with Shell(cwd=cwd, user=self.uid) as sh:
cmd_id = sh.command(which('id'))
observed_uid = sh.evaluate(cmd_id('-u'))
self.assertEqual(observed_uid.strip(), str(self.uid))
@export
@register()
class TestEvaluateAsDifferentGroup(TmpDirMixin):
def setUp(self):
super().setUp()
if ((sys.version_info.major, sys.version_info.minor) < (3, 9)):
raise unittest.SkipTest("Python version is less than 3.9")
if os.getuid() != 0:
raise unittest.SkipTest("Not running as root")
def unless_key_error(fun):
try:
return fun()
except KeyError:
return None
self.gid = unless_key_error(lambda: pwd.getpwnam('nobody').pw_gid)
if self.gid is None:
raise unittest.SkipTest("No group exists with name 'nobody'")
def runTest(self):
cwd = pathlib.Path(self.tmpdir.name)
with Shell(cwd=cwd, group=self.gid) as sh:
cmd_id = sh.command(which('id'))
observed_gid = sh.evaluate(cmd_id('-g'))
self.assertEqual(observed_gid.strip(), str(self.gid))
|
190680
|
from typing import Union
from flair.data import Corpus, FlairDataset
from torch.utils.data import Subset
from embeddings.transformation.flair_transformation.corpus_sampling_transformation import (
CorpusSamplingTransformation,
)
class DownsampleFlairCorpusTransformation(CorpusSamplingTransformation):
def __init__(
self,
percentage: float,
downsample_train: bool = True,
downsample_dev: bool = True,
downsample_test: bool = True,
stratify: bool = True,
seed: int = 441,
):
super().__init__(stratify, seed)
self.percentage = percentage
self.downsample_train = downsample_train
self.downsample_dev = downsample_dev
self.downsample_test = downsample_test
self.seed = seed
def _downsample_subset(
self, data: FlairDataset, downsample: bool
) -> Union[FlairDataset, Subset[FlairDataset]]:
if data and downsample:
data = self._downsample_to_proportion(data)
return data
def _downsample_to_proportion(self, dataset: FlairDataset) -> Subset[FlairDataset]:
_, downsampled_dataset = self.randomly_split_into_two_datasets(
dataset=dataset, fraction_size=self.percentage
)
return downsampled_dataset
def transform(self, data: Corpus) -> Corpus:
train: FlairDataset = self._downsample_subset(data.train, self.downsample_train)
dev: FlairDataset = self._downsample_subset(data.dev, self.downsample_dev)
test: FlairDataset = self._downsample_subset(data.test, self.downsample_test)
return Corpus(train=train, dev=dev, test=test, sample_missing_splits=False)
|
190740
|
class BloxplorerException(Exception):
def __init__(self, message, resource_url, request_method):
self.message = message
self.resource_url = resource_url
self.request_method = request_method
def __str__(self):
return f'{self.message} (URL: {self.resource_url}, Method: {self.request_method})'
class BlockstreamClientError(BloxplorerException):
pass
class BlockstreamClientTimeout(BloxplorerException):
pass
class BlockstreamClientNetworkError(BloxplorerException):
pass
class BlockstreamApiError(BloxplorerException):
def __init__(self, message, resource_url, request_method, status_code):
super().__init__(message, resource_url, request_method)
self.status_code = status_code
def __str__(self):
return f'{self.message} ' \
f'(URL: {self.resource_url}, Method: {self.request_method}, ' \
f'Status code: {self.status_code})'
|
190743
|
import random
import pytest
from tests.test_client import get_doc
@pytest.fixture(scope="function")
def test_data(client, user):
system_random = random.SystemRandom()
url_x_count = system_random.randint(2, 5)
url_x_type = url_x_count
url_x = "s3://awesome-x/bucket/key"
versioned_count = system_random.randint(5, 10)
for _ in range(versioned_count):
doc = get_doc(has_urls_metadata=True, has_version=True)
if url_x_type > 0:
doc["urls"].append(url_x)
doc["urls_metadata"][url_x] = {"state": "uploaded"}
url_x_type -= 1
print(doc)
res = client.post("/index/", json=doc, headers=user)
assert res.status_code == 200
rec = client.get("/index/", json=doc, headers=user)
assert rec.status_code == 200
url_x_type = url_x_count
unversioned_count = system_random.randint(6, 10)
for _ in range(unversioned_count):
doc = get_doc(has_urls_metadata=True)
if url_x_type > 0:
doc["urls"].append(url_x)
doc["urls_metadata"][url_x] = {"state": "uploaded"}
url_x_type -= 1
print(doc)
res = client.post("/index/", json=doc, headers=user)
assert res.status_code == 200
rec = client.get("/index/", json=doc, headers=user)
assert rec.status_code == 200
return url_x_count, versioned_count, unversioned_count
def test_query_urls(client, test_data):
"""
Args:
client (test fixture)
test_data (tuple[int, int, int]:
"""
url_x_count, versioned_count, unversioned_count = test_data
# test get all
res = client.get("/_query/urls/q")
assert res.status_code == 200
urls_list = res.json
print(urls_list)
assert len(urls_list) == versioned_count + unversioned_count
# test list versioned urls
res = client.get("/_query/urls/q?versioned=true")
assert res.status_code == 200
urls_list = res.json
print(urls_list)
assert len(urls_list) == versioned_count
# test list un versioned
res = client.get("/_query/urls/q?versioned=false")
assert res.status_code == 200
urls_list = res.json
print(urls_list)
assert len(urls_list) == unversioned_count
# test exclude url
res = client.get("/_query/urls/q?exclude=awesome-x")
assert res.status_code == 200
urls_list = res.json
print(urls_list)
assert len(urls_list) == versioned_count + unversioned_count - 2 * url_x_count
# test include
res = client.get("/_query/urls/q?include=awesome-x")
assert res.status_code == 200
urls_list = res.json
print(urls_list)
assert len(urls_list) == 2 * url_x_count
# test include and exclude
res = client.get("/_query/urls/q?include=endpointurl&exclude=awesome-x")
assert res.status_code == 200
urls_list = res.json
print(urls_list)
assert len(urls_list) == versioned_count + unversioned_count - 2 * url_x_count
def test_query_urls_metadata(client, test_data):
"""
Args:
client (test fixture)
test_data (tuple[int, int, int]:
"""
url_x_count, _, unversioned_count = test_data
# test get all
res = client.get("_query/urls/metadata/q?key=state&value=uploaded&url=awesome-x")
assert res.status_code == 200
urls_list = res.json
assert len(urls_list) == 2 * url_x_count
# test list versioned urls
res = client.get(
"_query/urls/metadata/q?key=state&value=uploaded&url=awesome-x&versioned=True"
)
assert res.status_code == 200
urls_list = res.json
assert len(urls_list) == url_x_count
# test list un versioned
res = client.get(
"_query/urls/metadata/q?key=state&value=uploaded&url=endpointurl&versioned=False"
)
assert res.status_code == 200
urls_list = res.json
assert len(urls_list) == unversioned_count
# test unknown state
res = client.get("_query/urls/metadata/q?key=state&value=uploadedx&url=awesome-x")
assert res.status_code == 200
urls_list = res.json
assert len(urls_list) == 0
|
190749
|
from errors import *
from nodes import *
from ifelse import *
from loop import *
from func import *
from trycatch import *
from strquot import unquote
STATEMENTS = set('func labda local if elseif else while for try catch repeat'.split())
STATEMENT_CLASS = {'func': FuncStatement, 'labda': LabdaStatement,
'local': LocalFuncStatement, 'while': WhileStatement,
'for': ForStatement,
'repeat': RepeatStatement
}
class Context(object):
pass
class LineContext(Context):
def __init__(self, text, filename, linenr):
self.text = text
self.origtext = text
self.filename = filename
self.linenr = linenr
def indent(self):
i = 0
while self.text.startswith(('\t', ' ')):
if self.text[0] == '\t':
self.text = self.text[1:]
else:
self.text = self.text[4:]
i += 1
self.indent = i
return self
def stringify(self):
self.stringwise = self.text.split('"')
return self
def decomment(self):
for i, s in enumerate(self.stringwise):
if i % 2 == 0:
if '#' in s: #found comment!
self.stringwise = self.stringwise[:i]
self.stringwise.append(s.split('#', 1)[0])
break
if len(self.stringwise) % 2 == 0:
raise DejaSyntaxError("Unclosed string", self, self.text.rindex('"'))
return self
def wordify(self):
self.tokens = []
for i, s in enumerate(self.stringwise):
if i % 2:
self.tokens.append('"' + unquote(s))
else:
tokens = s.split()
for i, token in enumerate(tokens):
if token.startswith('@') and len(token) > 1:
token = token[1:]
if '!' in token:
if token.startswith('!'):
token = 'eva' + token
if token.endswith('!'):
token = token[:-1]
args = token.split('!')
base = args.pop(0)
tokens[i:i+1] = ['get-from', base] + [":" + x for x in args]
else:
tokens[i] = ":%s" % token
tokens.insert(i, 'get')
self.tokens.extend(tokens)
return self
def statementize(self):
self.statement = None
if self.tokens and not self.tokens[-1].startswith('"') and self.tokens[-1].endswith(':'):
self.tokens[-1] = self.tokens[-1][:-1]
if not self.tokens[-1]: #remove last word if empty
self.tokens.pop()
if not self.tokens:
raise DejaSyntaxError("Empty statement", self, self.text.index(':'))
if self.tokens[0].startswith('"'):
raise DejaSyntaxError("Statement starting with a string", self, self.text.index('"'))
if self.tokens[0] not in STATEMENTS:
self.statement = 'func'
else:
self.statement = self.tokens.pop(0)
getattr(self, 'assert_' + self.statement)()
return self
def assert_labda(self):
for token in self.tokens:
if WordList.gettokentype(token) != 'word':
raise DejaSyntaxError("Function definition containing wrong type of word", self, self.text.index(token))
def assert_func(self):
if not self.tokens:
raise DejaSyntaxError("Missing function name", self, self.text.index(':'))
self.assert_labda()
assert_local = assert_func
def assert_for(self):
if not self.tokens:
raise DejaSyntaxError("Missing counter name", self, self.text.index(':'))
def assert_if(self):
pass
def assert_while(self):
pass
def assert_repeat(self):
pass
def assert_try(self):
if self.tokens:
raise DejaSyntaxError("A try statement cannot have other words", self, self.text.index('try') + 4)
def assert_catch(self):
if not self.tokens:
raise DejaSyntaxError("Missing exception name", self, self.text.index(':'))
for ex in self.tokens:
if WordList.gettokentype(ex) != 'word':
raise DejaSyntaxError("Error category list must consist of proper words", self, self.text.index(ex))
def assert_elseif(self):
pass #needs to follow an if or another elseif
def assert_else(self):
#needs to follow an if or elseif
if self.tokens:
raise DejaSyntaxError("An else statement cannot have other words", self, self.text.index(self.tokens[0]))
def process(self):
return self.indent().stringify().decomment().wordify().statementize()
class FileContext(Context):
def __init__(self, filenode):
self.filenode = filenode
self.last_node = filenode
self.last_indent = 0
self.has_statement = False
self.indentation_stack = [filenode]
def addline(self, linecontext):
if not linecontext.tokens and not linecontext.statement:
return #skip empty lines
if self.has_statement:
if linecontext.indent != self.last_indent + 1:
raise DejaSyntaxError("Expected a single extra indentation", linecontext, 0)
else:
if linecontext.indent > self.last_indent:
raise DejaSyntaxError("Expected no extra indentation", linecontext, 0)
self.last_indent = linecontext.indent
self.has_statement = linecontext.statement is not None
if self.has_statement:
st = linecontext.statement
if st == 'if':
self.last_node = IfClause(IfStatement(self.indentation_stack[self.last_indent], linecontext.linenr), linecontext.tokens)
elif st == 'elseif':
if len(self.indentation_stack) <= self.last_indent + 1 or not isinstance(self.indentation_stack[self.last_indent + 1], (IfClause, ElseIfClause)):
raise DejaSyntaxError("No if clause or elseif clause preceding elseif clause", linecontext, 0)
self.last_node = ElseIfClause(self.indentation_stack[self.last_indent + 1].parent, linecontext.tokens)
elif st == 'else':
if len(self.indentation_stack) <= self.last_indent + 1 or not isinstance(self.indentation_stack[self.last_indent + 1], (IfClause, ElseIfClause)):
raise DejaSyntaxError("No if clause or elseif clause preceding else clause", linecontext, 0)
self.last_node = ElseClause(self.indentation_stack[self.last_indent + 1].parent)
elif st == 'try':
self.last_node = TryClause(TryStatement(self.indentation_stack[self.last_indent], linecontext.linenr))
elif st == 'catch':
if len(self.indentation_stack) <= self.last_indent + 1 or not isinstance(self.indentation_stack[self.last_indent + 1], (TryClause, CatchClause)):
raise DejaSyntaxError("No try clause preceding catch clause", linecontext, 0)
self.last_node = CatchClause(self.indentation_stack[self.last_indent + 1].parent, linecontext.tokens)
else:
self.last_node = BodyClause(STATEMENT_CLASS[st](self.indentation_stack[self.last_indent], linecontext.tokens, linecontext.linenr))
self.indentation_stack = self.indentation_stack[:self.last_indent + 1]
self.indentation_stack.append(self.last_node)
else:
try:
self.last_node = Line(self.indentation_stack[self.last_indent], linecontext.tokens, linecontext.linenr)
except UnicodeDecodeError as e:
raise DejaSyntaxError("Encoding error: all strings need to be UTF-8", linecontext, linecontext.text.index(e.object))
|
190755
|
from pluginbase import get_plugin_source
def get_app():
rv = get_plugin_source(stacklevel=1)
if rv is not None:
return rv.app
def get_app_name():
return get_app().name
|
190772
|
import socket
import os
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
for n in range(1, 5):
server_ip="192.168.20.{0}".format(n)
rep = os.system('ping ' + server_ip)
if rep == 0:
print ("server is up" ,server_ip)
else:
print ("server is down" ,server_ip)
|
190783
|
import FWCore.ParameterSet.Config as cms
from SimGeneral.MixingModule.mix_probFunction_25ns_PoissonOOTPU_cfi import *
mix.input.nbPileupEvents.probFunctionVariable = cms.vint32(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98
)
mix.input.nbPileupEvents.probValue = cms.vdouble(
0.00056178559316322207, 0.0007578881923109293, 0.00089840730652213097, 0.0010038191685453057, 0.0011915955692529678,
0.0016831225948408246, 0.0027497191913425922, 0.0045576253905892372, 0.0069810771383345127, 0.0095816180109977722,
0.011853505857288837, 0.013561941683292389, 0.014868909493088722, 0.016164621338248253, 0.017800241708755493,
0.019943427294492722, 0.022553266957402229, 0.02537936344742775, 0.027978429570794106, 0.029862202703952789,
0.030737783759832382, 0.030647184699773788, 0.029888983815908432, 0.028840301558375359, 0.027826078236103058,
0.027054192498326302, 0.026599425822496414, 0.02642328292131424, 0.026400813832879066, 0.026344424113631248,
0.026030091568827629, 0.025283832103013992, 0.024102954193949699, 0.022694429382681847, 0.021359216421842575,
0.020321305841207504, 0.019643651321530342, 0.019244605675339699, 0.018951877951622009, 0.018589355051517487,
0.018086889758706093, 0.017514361068606377, 0.016997335478663445, 0.01660793274641037, 0.016348866745829582,
0.016209937632083893, 0.016200525686144829, 0.016327474266290665, 0.016558451578021049, 0.016807874664664268,
0.016936073079705238, 0.016756467521190643, 0.01605769619345665, 0.014654850587248802, 0.012486033141613007,
0.0097413137555122375, 0.0068911332637071609, 0.0044789197854697704, 0.0028035130817443132, 0.0017981450073421001,
0.0011986247263848782, 0.00078225688776001334, 0.00046188471606001258, 0.0002335387107450515, 9.8074830020777881e-05,
3.36361990775913e-05, 9.325763130618725e-06, 2.0754421257151989e-06, 3.6864730645902455e-07, 5.1980304505150343e-08,
5.7291127397718355e-09, 4.2278064382728076e-10, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0)
|
190785
|
import pytest
def test_rogue():
# do this simple test since rogue has a rather complex boost dependency
# which we have gotten wrong in the past - valmar and cpo
import rogue
if __name__ == "__main__":
test_rogue()
|
190795
|
from OpenMatch.data.dataloader import DataLoader
from OpenMatch.data.datasets import *
from OpenMatch.data.tokenizers import *
|
190816
|
import bisect
import re
from typing import List, Dict
from securify.analyses.patterns.abstract_pattern import AbstractPattern, PatternMatch, Level, Severity, PatternMatchError, \
MatchComment, MatchSourceLocation
class RightToLeftOverridePattern(AbstractPattern):
regex_pattern = re.compile("\u202e".encode('utf-8'))
@property
def name(self) -> str:
return "Right-to-left-override pattern"
@property
def description(self) -> str:
return "Finds usages of the Right-To-Left-Override (U+202E) character in source code"
@property
def severity(self) -> Severity:
return Severity.CRITICAL
@property
def level(self) -> Level:
return Level.SOURCE
@property
def tags(self) -> Dict[str, str]:
return {}
def find_matches(self) -> List[PatternMatch]:
analysis_context = self.analysis_context
if analysis_context.source_code is None:
raise PatternMatchError("Source code is not available.")
encoding = analysis_context.config.encoding
source_code = analysis_context.source_code.encode(encoding)
rtlo_pattern = re.compile("\u202e".encode(encoding))
newl_pattern = re.compile("\n".encode(encoding))
newlines = sorted((m.start() for m in newl_pattern.finditer(source_code)))
def get_line(b):
return bisect.bisect(newlines, b) + 1
def get_contract(source_code, until_line):
source_code_lines = str(source_code).split("\n")[:until_line]
last_contract_line = [c for c in source_code_lines if "contract" in c][-1]
if not last_contract_line:
return "no contract"
m = re.search('contract (\2w+)', last_contract_line)
if m:
return m.group(1)
matches = [
self.match_violation().with_info(
MatchComment(
"Found right-to-left-override character"
),
MatchSourceLocation(
encoding,
m.start(),
m.end(),
get_line(m.start()),
get_contract(source_code, get_line(m.start()))
)
) for m in rtlo_pattern.finditer(source_code)]
if not matches:
return [self.match_compliant()]
return matches
|
190831
|
from torchvision.datasets import VisionDataset
import warnings
import torch
from PIL import Image
import os
import os.path
import numpy as np
from torchvision import transforms
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
class ImageList(VisionDataset):
"""
Args:
root (string): Root directory of dataset
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root=None, transform=None, target_transform=None, empty=False):
super(ImageList, self).__init__(root, transform=transform, target_transform=target_transform)
self.empty = empty
if empty:
self.samples = np.empty((1, 2), dtype='<U1000')
else:
self.samples = np.loadtxt(root, dtype=np.dtype((np.unicode_, 1000)), delimiter=' ')
self.loader = pil_loader
self.identity = transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
def __getitem__(self, index):
path, label = self.samples[index]
label = int(label)
output = {
'label': label,
'path': path,
'index': index
}
img0 = self.loader(path)
# original image without transform
output['img'] = self.identity(img0)
if self.transform is not None:
output['img0'] = self.transform(img0)
return output
def __len__(self):
return len(self.samples)
def add_item(self, addition):
if self.empty:
self.samples = addition
self.empty = False
else:
self.samples = np.concatenate((self.samples, addition), axis=0)
return self.samples
def remove_item(self, reduced):
self.samples = np.delete(self.samples, reduced, axis=0)
return self.samples
|
190837
|
import pytest
from mapreader import classifier
from mapreader import loadAnnotations
from mapreader import patchTorchDataset
import numpy as np
import torch
from torch import nn
import torchvision
from torchvision import transforms
from torchvision import models
PATH2IMAGES = "./examples/non-geospatial/classification_plant_phenotype/dataset/open_access_plant/*.png"
PATH2ANNOTS = "./examples/non-geospatial/classification_plant_phenotype/annotations_phenotype_open_access/phenotype_test_#kasra#.csv"
def test_slice():
from mapreader import loader
myimgs = loader(PATH2IMAGES)
# len() shows the total number of images currently read (or sliced, see below)
print(f"Number of images: {len(myimgs)}")
# To get more information
print(myimgs)
all_imgs = myimgs.list_parents()
assert len(all_imgs) == 2, "Expected 2 parents"
# `method` can also be set to meters
myimgs.sliceAll(
path_save="./dataset/eg_slice_50_50",
slice_size=50, # in pixels
square_cuts=False,
verbose=False,
method="pixel",
)
# if parent_id="XXX", only compute pixel stats for that parent
myimgs.calc_pixel_stats()
imgs_pd, patches_pd = myimgs.convertImages(fmt="dataframe")
assert len(imgs_pd) == len(all_imgs), "Expected same number of images"
def test_load_annotation():
annotated_images = loadAnnotations()
annotated_images.load(PATH2ANNOTS, path2dir="./dataset/eg_slice_50_50")
annotated_images.annotations.columns.tolist()
print(annotated_images)
# We need to shift these labels so that they start from 0:
annotated_images.adjust_labels(shiftby=-1)
# ### Split annotations into train/val or train/val/test
# We use a stratified method for splitting the annotations, that is, each set contains approximately the same percentage of samples of each target label as the original set.
annotated_images.split_annotations(frac_train=0.8, frac_val=0.2, frac_test=0.0)
annotated_images.train["label"].value_counts()
return annotated_images
def test_classifier():
annotated_images = test_load_annotation()
# # Classifier
# ## Dataset
# Define transformations to be applied to images before being used in training or validation/inference.
# `patchTorchDataset` has some default transformations. However, it is possible to define your own transformations and pass them to `patchTorchDataset`:
# ------------------
# --- Transformation
# ------------------
# FOR INCEPTION
# resize2 = 299
# otherwise:
resize2 = 224
# mean and standard deviations of pixel intensities in
# all the patches in 6", second edition maps
normalize_mean = 1 - np.array([0.82860442, 0.82515008, 0.77019864])
normalize_std = 1 - np.array([0.1025585, 0.10527616, 0.10039222])
data_transforms = {
"train": transforms.Compose(
[
transforms.Resize(resize2),
transforms.RandomApply(
[
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.5),
],
p=0.5,
),
# transforms.RandomApply([
# transforms.GaussianBlur(21, sigma=(0.5, 5.0)),
# ], p=0.25),
transforms.RandomApply(
[
# transforms.RandomPerspective(distortion_scale=0.5, p=0.5),
transforms.Resize((50, 50)),
],
p=0.25,
),
# transforms.RandomApply([
# transforms.RandomAffine(180, translate=None, scale=None, shear=20),
# ], p=0.25),
transforms.Resize((resize2, resize2)),
transforms.ToTensor(),
transforms.Normalize(normalize_mean, normalize_std),
]
),
"val": transforms.Compose(
[
transforms.Resize((resize2, resize2)),
transforms.ToTensor(),
transforms.Normalize(normalize_mean, normalize_std),
]
),
}
train_dataset = patchTorchDataset(
annotated_images.train, transform=data_transforms["train"]
)
valid_dataset = patchTorchDataset(
annotated_images.val, transform=data_transforms["val"]
)
# test_dataset = patchTorchDataset(annotated_images.test,
# transform=data_transforms["val"])
# ## Sampler
# -----------
# --- Sampler
# -----------
# We define a sampler as we have a highly imbalanced dataset
label_counts_dict = annotated_images.train["label"].value_counts().to_dict()
class_sample_count = []
for i in range(0, len(label_counts_dict)):
class_sample_count.append(label_counts_dict[i])
weights = 1.0 / (torch.Tensor(class_sample_count) / 1.0)
weights = weights.double()
print(f"Weights: {weights}")
train_sampler = torch.utils.data.sampler.WeightedRandomSampler(
weights[train_dataset.patchframe["label"].to_list()],
num_samples=len(train_dataset.patchframe),
)
valid_sampler = torch.utils.data.sampler.WeightedRandomSampler(
weights[valid_dataset.patchframe["label"].to_list()],
num_samples=len(valid_dataset.patchframe),
)
# ## Dataloader
myclassifier = classifier(device="default")
# myclassifier.load("./checkpoint_12.pkl")
batch_size = 8
# Add training dataset
myclassifier.add2dataloader(
train_dataset,
set_name="train",
batch_size=batch_size,
# shuffle can be False as annotations have already been shuffled
shuffle=False,
num_workers=0,
sampler=train_sampler,
)
# Add validation dataset
myclassifier.add2dataloader(
valid_dataset,
set_name="val",
batch_size=batch_size,
shuffle=False,
num_workers=0,
# sampler=valid_sampler
)
myclassifier.print_classes_dl()
# set class names for plots
class_names = {0: "No", 1: "Plant"}
myclassifier.set_classnames(class_names)
myclassifier.print_classes_dl()
myclassifier.batch_info()
# ### Method 2: use `.initialize_model`
myclassifier.del_model()
myclassifier.initialize_model(
"resnet18", pretrained=True, last_layer_num_classes="default", add_model=True
)
myclassifier.model_summary(only_trainable=False)
list2optim = myclassifier.layerwise_lr(min_lr=1e-4, max_lr=1e-3)
# #list2optim = myclassifier.layerwise_lr(min_lr=1e-4, max_lr=1e-3, ltype="geomspace")
optim_param_dict = {
"lr": 1e-3,
"betas": (0.9, 0.999),
"eps": 1e-08,
"weight_decay": 0,
"amsgrad": False,
}
# --- if list2optim is defined, e.g., by using `.layerwise_lr` method (see the previous cell):
myclassifier.initialize_optimizer(
optim_type="adam",
params2optim=list2optim,
optim_param_dict=optim_param_dict,
add_optim=True,
)
scheduler_param_dict = {
"step_size": 10,
"gamma": 0.1,
"last_epoch": -1,
# "verbose": False
}
myclassifier.initialize_scheduler(
scheduler_type="steplr",
scheduler_param_dict=scheduler_param_dict,
add_scheduler=True,
)
# Add criterion
criterion = nn.CrossEntropyLoss()
myclassifier.add_criterion(criterion)
# ## Train/fine-tune a model
myclassifier.train_component_summary()
myclassifier.train(
num_epochs=3,
save_model_dir="./models_plant_open",
tensorboard_path=False,
verbosity_level=0,
tmp_file_save_freq=2,
remove_after_load=False,
print_info_batch_freq=5,
)
# ### Plot results
print(list(myclassifier.metrics.keys()))
|
190876
|
import numpy as np
import pandas as pd
from pycytominer.operations import sparse_random_projection
data_df = pd.DataFrame(
{
"Metadata_plate": ["a", "a", "a", "a", "b", "b", "b", "b"],
"Metadata_treatment": [
"drug",
"drug",
"control",
"control",
"drug",
"drug",
"control",
"control",
],
"Metadata_batch": [
"day1",
"day1",
"day1",
"day1",
"day1",
"day1",
"day1",
"day1",
],
"x": [1, 2, 8, 2, 5, 5, 5, 1],
"y": [3, 1, 7, 4, 5, 9, 6, 1],
"z": [1, 8, 2, 5, 6, 22, 2, 2],
"zz": [14, 46, 1, 6, 30, 100, 2, 2],
}
)
def test_sparse_random_projection():
"""
Testing the base covariance pycytominer function
"""
n_components = 2
cp_features = ["x", "y", "z"]
seed = 123
sparse_result = sparse_random_projection(
population_df=data_df,
variables=cp_features,
n_components=n_components,
seed=seed,
).round(2)
expected_result = pd.DataFrame(
{
0: [2.79, 1.86],
1: [0.93, -0.93],
2: [6.51, -0.93],
3: [3.72, 1.86],
4: [4.65, 0.00],
5: [8.38, 3.72],
6: [5.58, 0.93],
7: [0.93, 0.00],
}
).transpose()
expected_result.columns = ["sparse_comp_0", "sparse_comp_1"]
assert sparse_result.equals(expected_result)
def test_sparse_random_projection_allvar():
"""
Testing the base covariance pycytominer function
"""
n_components = 2
cp_features = "all"
seed = 123
input_data_df = data_df.loc[:, ["x", "y", "z", "zz"]]
sparse_result = sparse_random_projection(
population_df=input_data_df,
variables=cp_features,
n_components=n_components,
seed=seed,
).round(2)
expected_result = pd.DataFrame(
{
0: [16.0, -14.0],
1: [45.0, -40.0],
2: [0.0, -7.0],
3: [8.0, -3.0],
4: [30.0, -29.0],
5: [104.0, -83.0],
6: [3.0, -5.0],
7: [2.0, -1.0],
}
).transpose()
expected_result.columns = ["sparse_comp_0", "sparse_comp_1"]
assert sparse_result.equals(expected_result)
|
190942
|
from pbpstats.data_loader.abs_data_loader import check_file_directory
from pbpstats.data_loader.live.file_loader import LiveFileLoader
class LiveBoxscoreFileLoader(LiveFileLoader):
"""
A ``LiveBoxscoreFileLoader`` object should be instantiated and passed into ``LiveBoxscoreLoader`` when loading data from file
:param str file_directory:
Directory in which data should be loaded from.
The specific file location will be `live_<game_id>.json` in the `/game_details` subdirectory.
"""
def __init__(self, file_directory):
self.file_directory = file_directory
@check_file_directory
def load_data(self, game_id):
self.game_id = game_id
self.file_path = f"{self.file_directory}/game_details/live_{self.game_id}.json"
return self._load_data_from_file()
|
190980
|
def post(settings):
data = {"dynaconf_merge": True}
if settings.get("ADD_BEATLES") is True:
data["BANDS"] = ["Beatles"]
return data
|
191019
|
import time
import sys
import traceback
try:
from support import exceptions
except:
sys.path.append('..')
from support import exceptions
def r(n=5, m=1, f=exceptions.current_code_list):
if n:
return r(n - 1, m)
for i in range(m):
return f()
def inline(n=5, m=10000):
if n:
return inline(n - 1, m)
for i in range(m):
f = sys._getframe().f_back
code_list = []
while f:
code_list.append(f.f_code)
code_list.append(f.f_lineno)
f = f.f_back
def test_current_code_list():
r()
def test_code_list2trace_list():
exceptions.code_list2trace_list(r())
if __name__ == "__main__":
s = time.time()
r(m=10000)
f = time.time()
print "current_code_list() took", (f - s) * 100, "microseconds"
s = time.time()
inline()
f = time.time()
print "inline code-list capture took", (f - s) * 100, "microseconds"
s = time.time()
r(m=10000, f=traceback.format_stack)
f = time.time()
print "traceback.format_stack() took", (f - s) * 100, "microseconds"
s = time.time()
r(m=10000, f=traceback.extract_stack)
f = time.time()
print "traceback.extract_stack() took", (f - s) * 100, "microseconds"
|
191035
|
import tensorflow as tf
import numpy as np
from utils import get_shape
try:
from tensorflow.contrib.rnn import LSTMStateTuple
except ImportError:
LSTMStateTuple = tf.nn.rnn_cell.LSTMStateTuple
def bidirectional_rnn(cell_fw, cell_bw, inputs, input_lengths,
initial_state_fw=None, initial_state_bw=None,
scope=None):
with tf.variable_scope(scope or 'bi_rnn') as scope:
(fw_outputs, bw_outputs), (fw_state, bw_state) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=inputs,
sequence_length=input_lengths,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
dtype=tf.float32,
scope=scope
)
outputs = tf.concat((fw_outputs, bw_outputs), axis=2)
def concatenate_state(fw_state, bw_state):
if isinstance(fw_state, LSTMStateTuple):
state_c = tf.concat(
(fw_state.c, bw_state.c), 1, name='bidirectional_concat_c')
state_h = tf.concat(
(fw_state.h, bw_state.h), 1, name='bidirectional_concat_h')
state = LSTMStateTuple(c=state_c, h=state_h)
return state
elif isinstance(fw_state, tf.Tensor):
state = tf.concat((fw_state, bw_state), 1,
name='bidirectional_concat')
return state
elif (isinstance(fw_state, tuple) and
isinstance(bw_state, tuple) and
len(fw_state) == len(bw_state)):
# multilayer
state = tuple(concatenate_state(fw, bw)
for fw, bw in zip(fw_state, bw_state))
return state
else:
raise ValueError(
'unknown state type: {}'.format((fw_state, bw_state)))
state = concatenate_state(fw_state, bw_state)
return outputs, state
def masking(scores, sequence_lengths, score_mask_value=tf.constant(-np.inf)):
score_mask = tf.sequence_mask(sequence_lengths, maxlen=tf.shape(scores)[1])
score_mask_values = score_mask_value * tf.ones_like(scores)
return tf.where(score_mask, scores, score_mask_values)
def attention(inputs, att_dim, sequence_lengths, scope=None):
assert len(inputs.get_shape()) == 3 and inputs.get_shape()[-1].value is not None
with tf.variable_scope(scope or 'attention'):
word_att_W = tf.get_variable(name='att_W', shape=[att_dim, 1])
projection = tf.layers.dense(inputs, att_dim, tf.nn.tanh, name='projection')
alpha = tf.matmul(tf.reshape(projection, shape=[-1, att_dim]), word_att_W)
alpha = tf.reshape(alpha, shape=[-1, get_shape(inputs)[1]])
alpha = masking(alpha, sequence_lengths, tf.constant(-1e15, dtype=tf.float32))
alpha = tf.nn.softmax(alpha)
outputs = tf.reduce_sum(inputs * tf.expand_dims(alpha, 2), axis=1)
return outputs, alpha
|
191055
|
from django import forms
from .models import *
class RequestForm(forms.ModelForm):
class Meta:
model = Request
fields = ("reason_for_request",)
class ApproveForm(forms.ModelForm):
# approved = forms.BooleanField()
approved = forms.TypedChoiceField(
coerce=lambda x: bool(int(x)),
choices=((1, "Approved"), (0, "Denied")),
widget=forms.RadioSelect,
label="Approved?",
)
class Meta:
model = Request
fields = ("approved", "reason_for_approval")
class ComputerForm(forms.ModelForm):
class Meta:
model = Computer
fields = ("serial", "username", "computername")
class SecretForm(forms.ModelForm):
class Meta:
model = Secret
fields = ("secret_type", "secret", "computer")
widgets = {"computer": forms.HiddenInput()}
|
191107
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import tensorflow as tf
import utils.utils as utils
class Evaluator:
def __init__(self,
cmd_args, config, optimizer, learning_rate, loss, saver, onehot_labels=False):
self.cmd_args = cmd_args
self.config = config
self.optimizer = optimizer
self.learning_rate = learning_rate
self.loss = loss
self.onehot_labels = onehot_labels
self.saver = saver
def eval_in_batches(self, y_conv, x, keep_prob, data, sess, batch_size, num_classes):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < batch_size:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = np.ndarray(shape=(size, num_classes), dtype=np.float32)
for begin in xrange(0, size, batch_size):
end = begin + batch_size
if end <= size:
predictions[begin:end, :] = sess.run(
y_conv,
feed_dict={x: data[begin:end, ...], keep_prob: 1.0})
else:
batch_predictions = sess.run(
y_conv,
feed_dict={x: data[-batch_size:, ...], keep_prob: 1.0})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
def run(self, input_dict):
x = input_dict["x"]
y_ = input_dict["y_"]
y_conv = input_dict["y_conv"]
keep_prob = input_dict["keep_prob"]
train_data = input_dict["train_data"]
train_labels = input_dict["train_labels"]
test_data = input_dict["test_data"]
test_labels = input_dict["test_labels"]
validation_data = input_dict["validation_data"]
validation_labels = input_dict["validation_labels"]
num_epochs = input_dict["num_epochs"]
train_size = input_dict["train_size"]
batch_size = self.config.getint('main', 'batch_size')
checkpoint_path = self.config.get('main', 'checkpoint_path')
num_classes = self.config.getint('main', 'num_classes')
eval_frequency = self.config.getint('main', 'eval_frequency')
utils.ensure_dir(os.path.dirname(checkpoint_path))
start_time = time.time()
with tf.Session() as sess:
tf.initialize_all_variables().run()
print('Initialized!')
if not self.cmd_args.restore_checkpoint:
print('No checkpoint to load, training model from scratch...')
if self.cmd_args.test:
iter_range = xrange(1)
else:
iter_range = xrange(int(num_epochs * train_size) // batch_size)
for step in iter_range:
offset = (step * batch_size) % (train_size - batch_size)
batch_data = train_data[offset:(offset + batch_size), ...]
batch_labels = train_labels[offset:(offset + batch_size)]
feed_dict = {
x: batch_data,
y_: batch_labels,
keep_prob: 0.5
}
_, l, lr, predictions = sess.run(
[self.optimizer, self.loss, self.learning_rate, y_conv], feed_dict=feed_dict)
if step % eval_frequency == 0:
if not self.cmd_args.test:
path = self.saver.save(sess, checkpoint_path)
print("Saved model checkpoint to {}\n".format(path))
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * batch_size / train_size,
1000 * elapsed_time / eval_frequency))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % utils.error_rate(predictions,
batch_labels,
self.onehot_labels))
print('Validation error: %.1f%%' % utils.error_rate(
self.eval_in_batches(y_conv,
x,
keep_prob,
validation_data,
sess,
batch_size,
num_classes), validation_labels, self.onehot_labels))
sys.stdout.flush()
# Finally print the result!
test_error = utils.error_rate(self.eval_in_batches(y_conv,
x,
keep_prob,
test_data,
sess,
batch_size,
num_classes), test_labels, self.onehot_labels)
print('Test error: %.1f%%' % test_error)
|
191143
|
og=list(input("Enter number "))
new=set(og)
print("".join(og),"is a Unique Number") if len(new)==len(og) else print("".join(og),"is not a Unique Number")
|
191161
|
import json
import os
import tempfile
import unittest
from gripql import Connection
from gripql.util import BaseConnection
def headersOverlap(actual, expected):
for k, v in expected.items():
assert k in actual
assert actual[k] == v
class TestRequestHeaderFormat(unittest.TestCase):
mock_url = "http://fakehost:8000"
def test_connection(self):
b = BaseConnection(self.mock_url)
headersOverlap(b.session.headers, {'Content-type': 'application/json'})
b = BaseConnection(self.mock_url, user="test", password="password")
headersOverlap(b.session.headers, {'Content-type': 'application/json', 'Authorization': 'Basic dGVzdDpwYXNzd29yZA=='})
b = BaseConnection(self.mock_url, token="<PASSWORD>")
headersOverlap(b.session.headers, {'Content-type': 'application/json', 'Authorization': 'Bearer iamnotarealtoken'})
creds = {"OauthEmail": "<EMAIL>",
"OauthAccessToken": "<PASSWORD>",
"OauthExpires": 1551985931}
tmp = tempfile.NamedTemporaryFile(mode="w", delete=False)
json.dump(creds, tmp)
tmp.close()
expected = creds.copy()
expected["OauthExpires"] = str(expected["OauthExpires"])
expected["Content-type"] = "application/json"
b = BaseConnection(self.mock_url, credential_file=tmp.name)
os.remove(tmp.name)
headersOverlap(b.session.headers, expected)
# test header propagation to Graph and Query classes
c = Connection(self.mock_url, token="<PASSWORD>")
self.assertEqual(c.session.headers, c.graph('test').session.headers)
self.assertEqual(c.session.headers, c.graph('test').query().session.headers)
|
191211
|
from nose.tools import ok_
from penn import Calendar
import datetime
class TestCalendar():
def setUp(self):
self.calendar = Calendar()
def test_pull(self):
cal = self.calendar.pull_3year()
ok_(len(cal) > 0)
for event in cal:
ok_(len(event) == 3)
def test_date(self):
cal = self.calendar.pull_3year()
ok_(len(cal) > 0)
for event in cal:
ok_(isinstance(event['name'], str))
if event['name'] == "Independence Day Observed (no classes)":
independence = event['start']
d = datetime.datetime.strptime(independence, '%Y-%m-%d').date()
ok_(d.month == 7)
def test_name(self):
cal = self.calendar.pull_3year()
ok_(len(cal) > 0)
for event in cal:
ok_(isinstance(event['name'], str))
start = datetime.datetime.strptime(event['start'], '%Y-%m-%d').date()
end = datetime.datetime.strptime(event['end'], '%Y-%m-%d').date()
ok_((end - start).total_seconds() >= 0)
def test_chrono(self):
cal = self.calendar.pull_3year()
ok_(len(cal) > 0)
for i, event in enumerate(cal[:-1]):
start = datetime.datetime.strptime(event['start'], '%Y-%m-%d').date()
nextstart = datetime.datetime.strptime(cal[i]['start'], '%Y-%m-%d').date()
ok_((nextstart - start).total_seconds() >= 0)
|
191222
|
from copy import deepcopy
from .ObjectDict import ObjectDict
from . import mixins
class AssemblyPlanReport(
mixins.PlotsMixin,
mixins.FolderReportMixin,
mixins.GenbankExportMixin,
mixins.PdfReportMixin,
):
def __init__(self, plan, sources):
self.plan = ObjectDict.from_dict(plan)
self.sources = ObjectDict.from_dict(sources)
@staticmethod
def from_dnaweaver_quote(quote):
plan = quote.assembly_plan_as_dict()
sources = quote.source.dict_supply_graph()
return AssemblyPlan(plan, sources)
def to_steps_list(self):
plan = deepcopy(self.plan)
nodes = []
def rec(node, depth=0):
if node.get("_visited", False):
return
node["_visited"] = True
assembly_plan = node.get("assembly_plan", [])
node["children"] = [n["id"] for n in assembly_plan]
nodes.append(node)
for other in sorted(
assembly_plan, key=lambda n: n["segment_start"]
):
rec(other)
rec(plan)
return nodes
|
191238
|
import logging
import signal
import sys
import threading
import time
from threading import Thread
from dbnd import Task, parameter
from dbnd._core.current import get_databand_context, try_get_databand_run
from dbnd._core.task_build.task_context import try_get_current_task
logger = logging.getLogger(__name__)
stop_requested = False
def msg(s):
sys.stderr.write("%s: %s\n" % (threading.get_ident(), s))
sys.stderr.flush()
def stop():
msg("stopping!")
task = try_get_current_task()
msg("Current tasks looks like: %s" % (task))
run = try_get_databand_run()
if run:
run.kill()
return
def sig_handler(signum, frame):
msg("handling signal: %s\n" % (signum))
msg("handling frame: %s\n" % (frame))
#
# global stop_requested
# stop_requested = True
t = Thread(target=stop)
t.start()
t.join()
raise Exception()
class SleepTask(Task):
sleep_sec = parameter.value(100)
def run(self):
sleep_sec = self.sleep_sec
while sleep_sec > 0:
time.sleep(2)
sleep_sec -= 2
logger.info("%d left to sleep", sleep_sec)
return "Done sleeping"
def on_kill(self):
logger.info("On Kill for %s", self)
def run():
SleepTask(task_version="now").dbnd_run()
if __name__ == "__main__":
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
msg("starting\n")
main_thread = Thread(name="dbnd_main", target=run)
main_thread.start()
main_thread.join()
msg("join completed\n")
|
191250
|
from neurovault.apps.statmaps.tasks import save_resampled_transformation_single
from neurovault.apps.statmaps.tests.utils import (clearDB, save_statmap_form)
from neurovault.apps.statmaps.models import (Collection)
from django.contrib.auth.models import User
from django.test import TestCase, Client
import pandas as pd
import os.path
import json
class TestGeneDecoding(TestCase):
_map = None
@classmethod
def setUpClass(cls):
cls.test_path = os.path.abspath(os.path.dirname(__file__))
cls.user, _ = User.objects.get_or_create(username='neurovault')
cls.client = Client()
cls.client.login(username=cls.user)
cls.Collection1 = Collection(name='Collection1', owner=cls.user)
cls.Collection1.save()
nii_path = os.path.join(
cls.test_path, cls._map)
map = save_statmap_form(
image_path=nii_path, collection=cls.Collection1)
save_resampled_transformation_single(map.pk)
response = json.loads(cls.client.get("/images/%d/gene_expression/json?mask=full" % map.pk, follow=True).content)
cls.df = pd.DataFrame(response["data"], columns=response["columns"])
@classmethod
def tearDownClass(cls):
clearDB()
cls.user.delete()
def _assess_gene(self, gene_name, field='t'):
value = self.df.loc[self.df['gene_symbol'] == gene_name][field]
self.assertEquals(len(value), 1)
value = list(value)[0]
self.assertGreaterEqual(value, 0.0)
def _assess_gene_relation(self, gene_name1, gene_name2, field='variance explained (mean)'):
value1 = self.df.loc[self.df['gene_symbol'] == gene_name1][field]
self.assertEquals(len(value1), 1)
value2 = self.df.loc[self.df['gene_symbol'] == gene_name2][field]
self.assertEquals(len(value2), 1)
value1 = list(value1)[0]
value2 = list(value2)[0]
self.assertGreater(value1, value2)
class TestWAY1(TestGeneDecoding):
_map = 'test_data/gene_validation/WAY_HC36_mean.nii.gz'
def test_positive_HTR1A(self):
self._assess_gene("HTR1A")
# class TestCUM1(TestGeneDecoding):
# _map = 'test_data/gene_validation/CUMl_BP_MNI.nii.gz'
#
# def test_HTR1A_greater_DRD2(self):
# self._assess_gene_relation("HTR1A", "DRD2")
#
#
# class TestFDOPA(TestGeneDecoding):
# _map = 'test_data/gene_validation/18FDOPA.nii.gz'
#
# def test_positive_DDC(self):
# self._assess_gene("DDC")
#
#
# class TestMWC(TestGeneDecoding):
# _map = 'test_data/gene_validation/MNI152_WaterContent_figureAlignedForPaper_resliceForSTOLTUSanalysis.nii.gz'
#
# def test_MBP_greater_DDC(self):
# self._assess_gene_relation("MBP", "DDC")
#
# def test_MOG_greater_DDC(self):
# self._assess_gene_relation("MOG", "DDC")
#
# def test_MOBP_greater_DDC(self):
# self._assess_gene_relation("MOBP", "DDC")
#
#
# class TestRACLOPRIDE(TestGeneDecoding):
# _map = 'test_data/gene_validation/RACLOPRIDE_TEMPLATE_inMNI_181_217_181.nii.gz'
#
# def test_positive_DRD2(self):
# self._assess_gene("DRD2")
#
#
# class TestFP_CIT(TestGeneDecoding):
# _map = 'test_data/gene_validation/123I-FP-CIT.nii.gz'
#
# def test_positive_SLC6A3_greater_HTR1A(self):
# self._assess_gene_relation("SLC6A3", "HTR1A")
#
#
# class TestDASB(TestGeneDecoding):
# _map = 'test_data/gene_validation/DASB_HC30_mean.nii.gz'
#
# def test_positive_SLC6A4(self):
# self._assess_gene("SLC6A4")
#
#
# class TestWAY2(TestGeneDecoding):
# _map = 'test_data/gene_validation/WAY_VT_MNI.nii.gz'
#
# def test_positive_HTR1A(self):
# self._assess_gene("HTR1A")
#
#
# #class TestP943(TestGeneDecoding):
# # _map = 'test_data/gene_validation/P943_HC22_mean.nii.gz'
# #
# # def test_positive_HTR1B(self):
# # self._assess_gene("HTR1B")
#
#
# class TestALTANSERIN(TestGeneDecoding):
# _map = 'test_data/gene_validation/ALT_HC19_mean.nii.gz'
#
# def test_positive_HTR1B(self):
# self._assess_gene("HTR2A")
|
191269
|
from asr.data import datasets, loaders
from asr import samplers
from asr.exceptions import ConfigurationError
from asr.utils.checks import check_for_data_path
import logging
logger = logging.getLogger()
def datasets_from_params(params):
"""
Load all the datasets specified by the config.
"""
sets = {}
for split in ['train', 'val', 'test']:
dataset_params = params.pop(f'{split}_dataset', None)
if dataset_params is None:
if split == 'train':
ConfigurationError('Must provide train_dataset params.')
continue
data_path = dataset_params.get('manifest_filepath', None)
if data_path is not None:
check_for_data_path(data_path, 'manifest_filepath')
sets[split] = datasets.from_params(dataset_params)
return sets
def loaders_from_params(params,
distributed=False,
world_size=1,
first_epoch='asc'):
"""
Load all loaders specified by the config.
"""
sets = datasets_from_params(params)
data_loaders = {}
for split in ['train', 'val', 'test']:
if split not in sets:
continue
loader_params = params.pop(f'{split}_data_loader', None)
if not loader_params:
loader_params = params.get('data_loader')
# TODO: put it in a better place
if distributed:
logger.info('Using distributed bucketing sampler')
sampler = samplers.DistributedBucketingSampler
else:
logger.info('Using normal bucketing sampler')
sampler = samplers.BucketingSampler
batch_sampler = sampler(sets[split],
batch_size=params['trainer']['batch_size'],
first_epoch=first_epoch)
data_loaders[split] = loaders.from_params(loader_params,
dataset=sets[split],
batch_sampler=batch_sampler)
return data_loaders
|
191274
|
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DLS_MODELS_REL_PATH = 'data/models'
DATASETS_REL_PATH = 'data/datasets-v2'
class Config(object):
DEBUG = True
TESTING = False
CSRF_ENABLED = True
SECRET_KEY = '<PASSWORD>-really-needs-to-be-changed'
#
DLS_FILEMANAGER_BASE_PATH = os.path.join(BASE_DIR, 'dataset-import-test')
# Allowed waiting interval for job in queue to be run, seconds
MISFIRE_GRACE_TIME = 3600
# seconds
JOB_MONITOR_INTERVAL = 2
# number of threads per executor in APScheduler
EXECUTOR_THREADS_NUMBER = 5
# seconds
SYSTEM_MONITOR_INTERVAL = 2
# directory for application logs
LOG_DIR = "logs"
# directory for task logs
LOG_DIR_TASK = "logs/tasks"
# Allowed file extensions for File Manager uploading
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'json', 'xml', 'zip'])
# Cuda Version. This property is applied if there is no nvcc in PATH
CUDA_VERSION = "7.5"
DLS_MODELS_BASE_PATH = models_dir = os.path.join(BASE_DIR, DLS_MODELS_REL_PATH)
DATASETS_BASE_PATH = os.path.join(BASE_DIR, DATASETS_REL_PATH)
class ProductionConfig(Config):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
|
191290
|
import os
import urllib.parse as up
from werkzeug.middleware.proxy_fix import ProxyFix
from flask import url_for, render_template
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from src import api, db, ma, create_app, Config, bp, bcrypt, jwt, admin, login_manager
# config = configs.get(config)
config = Config
extensions = [api, db, ma, admin, jwt, bcrypt, login_manager]
bps = [bp]
app = create_app(__name__, config, extensions=extensions, blueprints=bps)
app.wsgi_app = ProxyFix(app.wsgi_app, num_proxies=2)
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.shell
def _shell_context():
return dict(
app=app,
db=db,
ma=ma,
config=config
)
@manager.command
def list_routes():
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = up.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
for line in sorted(output):
print(line)
@manager.option('-A', '--application', dest='application', default='', required=True)
@manager.option('-n', '--name', dest='name')
@manager.option('-l', '--debug', dest='debug')
@manager.option('-f', '--logfile', dest='logfile')
@manager.option('-P', '--pool', dest='pool')
@manager.option('-Q', '--queue', dest='queue')
@manager.option('-c', '--concurrency', dest='concurrency', default=2)
def worker(application, concurrency, pool, debug, logfile, name, queue):
celery.start()
@app.route('/', methods=['GET'])
@app.route('/home', methods=['GET'])
def home():
return render_template('home.html')
if __name__ == "__main__":
manager.run()
|
191300
|
from AnyQt.QtGui import QPalette
from AnyQt.QtCore import Qt
import pyqtgraph as pg
pg.setConfigOption("background", "w")
pg.setConfigOption("foreground", "k")
pg.setConfigOptions(antialias=True)
def create_palette(colors):
p = QPalette()
for role, color in colors.items():
p.setColor(role, color)
return p
class OWPalette:
"""
These constants are defined here so that they can be changed without
extensive changes to the visualizations
"""
Canvas = QPalette.Base
Grid = QPalette.Button
Text = QPalette.Text
Data = QPalette.Text
Axis = QPalette.Text
System = QPalette()
Light = create_palette({Canvas: Qt.white, Grid: Qt.lightGray, Text: Qt.black})
Dark = create_palette({Canvas: Qt.black, Grid: Qt.darkGray, Text: Qt.white})
|
191338
|
import pandas as pd
import matplotlib.pyplot as plt
my_dataset1 = pd.read_excel('Smith_glass_post_NYT_data.xlsx', sheet_name='Supp_traces')
x = my_dataset1.Zr
y = my_dataset1.Th
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter(x, y, marker='s', color='#ff464a', edgecolor='#000000')
ax1.set_title("using scatter()")
ax1.set_xlabel("Zr [ppm]")
ax1.set_ylabel("Th [ppm]")
ax2 = fig.add_subplot(1, 2, 2)
ax2.plot(x, y, marker='s', linestyle='', color='#ff464a', markeredgecolor='#000000')
ax2.set_title("using plot()")
ax2.set_xlabel("Zr [ppm]")
ax2.set_ylabel("Th [ppm]")
fig.tight_layout()
|
191348
|
import os
import sys
import json
import webbrowser
import sublime
import sublime_plugin
from threading import Thread, Timer, Event
from .SoftwareUtil import *
from .CommonUtil import *
from .SoftwareUserStatus import *
from .SlackHttp import *
try:
#python2
from urllib import urlencode
except ImportError:
#python3
from urllib.parse import urlencode
pendingCallback = None
def getSlackWorkspaces():
integrations = getIntegrations()
workspaces = [x for x in integrations if (x['name'].lower() == 'slack' and x['status'].lower() == 'active')]
return workspaces if workspaces is not None and len(workspaces) > 0 else []
def hasSlackWorkspaces():
return True if len(getSlackWorkspaces()) > 0 else False
def disconnectSlackWorkspace():
result = checkSlackConnection(False)
if (result is False):
# show a prompt there are no slack workspaces to disconnect
sublime.message_dialog("No Slack workspaces found to disconnect")
# set the pending callback
global pendingCallback
pendingCallback = disconnectSlackWorkspaceCallback
showSlackWorkspaceSelection()
def disconnectSlackWorkspaceCallback(workspace):
if (workspace is not None):
removeSlackIntegration(workspace['authId'])
else:
clearPendingCallback()
def connectSlackWorkspace():
is_registered = checkRegistration(True)
if (is_registered is False):
return
params = {}
params["plugin"] = getPluginType()
params["plugin_uuid"] = getPluginUuid()
params["pluginVersion"] = getVersion()
params["plugin_id"] = getPluginId()
params["auth_callback_state"] = getAuthCallbackState()
params["integrate"] = "slack"
params["plugin_token"] = getItem("jwt")
api_endpoint = getApiEndpoint()
scheme = "https"
if('localhost' in api_endpoint):
scheme = "http"
url = scheme + "://" + api_endpoint + "/auth/slack?" + urlencode(params)
webbrowser.open(url)
t = Timer(10, refetchSlackConnectStatusLazily, [40])
t.start()
def pauseSlackNotifications():
is_registered = checkRegistration(True)
if (is_registered is False):
return
is_connected = checkSlackConnection(True)
if (is_connected is False):
return
updated = False
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('dnd.setSnooze', {'num_minutes': 120, 'token': workspace["access_token"]})
if (resp['ok'] is True):
updated = True
if (updated is True):
sublime.message_dialog("Slack notifications are paused for 2 hours")
def enableSlackNotifications():
is_registered = checkRegistration(True)
if (is_registered is False):
return
is_connected = checkSlackConnection(True)
if (is_connected is False):
return
updated = False
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('dnd.endSnooze', {'token': workspace["access_token"]})
if (resp['ok'] is True):
updated = True
if (updated is True):
sublime.message_dialog("Slack notifications enabled")
def getSlackDnDInfo():
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('dnd.info', {'token': workspace["access_token"]})
if (resp['ok'] is True):
# return the 1st one
return resp
return None
def getSlackStatus():
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('users.profile.get', {'token': workspace["access_token"]})
if (resp['ok'] is True):
# return the 1st one
return resp
return None
def getSlackPresence():
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('users.getPresence', {'token': workspace["access_token"]})
if (resp['ok'] is True):
# return the 1st one
return resp
return None
# accepted states: "auto" or "away"
def toggleSlackPresence(state):
is_registered = checkRegistration(True)
if (is_registered is False):
return
is_connected = checkSlackConnection(True)
if (is_connected is False):
return
updated = False
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('users.setPresence', {'token': workspace["access_token"], 'presence': state})
if (resp['ok'] is True):
updated = True
if (updated is True):
sublime.message_dialog("Slack presence updated")
def updateSlackStatusText(message):
updated = False
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('users.profile.set', {'token': workspace["access_token"], 'profile': {'status_text': message, 'status_emoji': "", 'status_expiration': 0}})
if (resp['ok'] is True):
updated = True
if (updated is True):
sublime.message_dialog("Slack status message updated")
def clearSlackStatusText():
updated = False
workspaces = getSlackWorkspaces()
for i in range(len(workspaces)):
workspace = workspaces[i]
resp = api_call('users.profile.set', {'token': workspace["access_token"], 'profile': {'status_text': "", 'status_emoji': ""}})
if (resp['ok'] is True):
updated = True
if (updated is True):
sublime.message_dialog("Slack status message cleared")
#######################################################################################
# PRIVATE METHODS
#######################################################################################
# done
def showSlackWorkspaceSelection():
workspaces = getSlackWorkspaces()
# create the options
options = []
for i in range(len(workspaces)):
workspace = workspaces[i]
options.append(workspace['team_domain'] + " (" + workspace['team_name'] + ")")
# show a prompt of which workspace to get the access token from
sublime.active_window().show_quick_panel(options, showSlackWorkspaceSelectionHandler)
# done
def showSlackWorkspaceSelectionHandler(result_idx):
# -1 means nothing was selected
if (result_idx == -1):
global pendingCallback
pendingCallback = None
return
workspaces = getSlackWorkspaces()
if (len(workspaces) > result_idx):
# perform the waiting callback
pendingCallback(workspaces[result_idx])
else:
clearPendingCallback()
# done
def checkSlackConnection(show_connect=True):
if (hasSlackWorkspaces() is False):
clearPendingCallback()
if (show_connect is True):
# show the prompt
options = ['Connect a Slack workspace to continue', 'Not now']
sublime.active_window().show_quick_panel(options, connectSlackPromptHandler)
return False
else:
return True
# done
def connectSlackPromptHandler(result_idx):
# zero means they've selected to connect slack
if (result_idx != 0):
clearPendingCallback()
else:
# connect
connectSlackWorkspace()
# done
def removeSlackIntegration(auth_id):
new_workspaces = [x for x in getSlackWorkspaces() if (x['authId'] != auth_id)]
syncIntegrations(new_workspaces)
clearPendingCallback()
def clearPendingCallback():
global pendingCallback
pendingCallback = None
def checkRegistration(show_signup=True):
name = getItem("name")
if (name is None):
clearPendingCallback()
if (show_signup is True):
# show the signup confirm
options = ['Connecting Slack requires a registered account. Sign up or log in to continue.', 'Not now']
sublime.active_window().show_quick_panel(options, signupPromptHandler)
return False
return True
def signupPromptHandler(result_idx):
# zero means they've selected to sign up
if (result_idx == 0):
# show the sign up flow
signupOptions = ['Google', 'GitHub', 'Email']
sublime.active_window().show_quick_panel(signupOptions, authSelectionHandler)
def authSelectionHandler(result_idx):
if (result_idx == 0):
launchLoginUrl('google', False)
elif (result_idx == 1):
launchLoginUrl('github', False)
elif (result_idx == 2):
launchLoginUrl('software', False)
def refetchSlackConnectStatusLazily(try_count=40):
foundSlackAuth = getSlackAuth()
if (foundSlackAuth is False):
if (try_count > 0):
try_count -= 1
t = Timer(10, refetchSlackConnectStatusLazily, [try_count])
t.start()
else:
setAuthCallbackState(None)
else:
setAuthCallbackState(None)
sublime.message_dialog("Successfully connected to Slack")
def getSlackAuth():
foundNewIntegration = False
userState = getUserRegistrationState(True)
if (userState["user"] is not None):
foundNewIntegration = updateSlackIntegrationsFromUser(userState["user"])
return foundNewIntegration
|
191363
|
import re
from geom2d import Circle, Point, Rect, Size, Segment
from geom2d import make_polygon_from_coords
__NUM_RE = r'\d+(\.\d+)?'
__CIRC_RE = rf'circ (?P<cx>{__NUM_RE}) (?P<cy>{__NUM_RE}) ' \
rf'(?P<r>{__NUM_RE})'
__RECT_RE = rf'rect (?P<ox>{__NUM_RE}) (?P<oy>{__NUM_RE}) ' \
rf'(?P<w>{__NUM_RE}) (?P<h>{__NUM_RE})'
__POLY_RE = rf'poly (?P<coords>[\d\s\.]+)'
__SEGM_RE = rf'segm (?P<sx>{__NUM_RE}) (?P<sy>{__NUM_RE}) ' \
rf'(?P<ex>{__NUM_RE}) (?P<ey>{__NUM_RE})'
def can_parse_circle(line):
return re.match(__CIRC_RE, line)
def parse_circle(line):
match = re.match(__CIRC_RE, line)
return Circle(
center=Point(
float(match.group('cx')),
float(match.group('cy'))
),
radius=float(match.group('r'))
)
def can_parse_rect(line):
return re.match(__RECT_RE, line)
def parse_rect(line):
match = re.match(__RECT_RE, line)
return Rect(
origin=Point(
float(match.group('ox')),
float(match.group('oy'))
),
size=Size(
float(match.group('w')),
float(match.group('h'))
)
)
def can_parse_polygon(line):
return re.match(__POLY_RE, line)
def parse_polygon(line):
match = re.match(__POLY_RE, line)
coords = [float(n) for n in match.group('coords').split(' ')]
return make_polygon_from_coords(coords)
def can_parse_segment(line):
return re.match(__SEGM_RE, line)
def parse_segment(line):
match = re.match(__SEGM_RE, line)
return Segment(
start=Point(
float(match.group('sx')),
float(match.group('sy'))
),
end=Point(
float(match.group('ex')),
float(match.group('ey'))
)
)
|
191369
|
import json
DROPLETS_FILE = "droplets.json"
def get_droplets():
with open(DROPLETS_FILE, "r") as f:
data = f.read()
if not data:
return []
else:
return json.loads(data)
def main():
for droplet in get_droplets():
print droplet["name"]
print droplet["ip_address"]
print "%s:%s" % (droplet["username"], droplet["password"])
print
print
if __name__ == "__main__":
main()
|
191428
|
from urllib2 import HTTPError
class MockHTTPError(HTTPError):
def __init__(self, code):
super(MockHTTPError, self).__init__('https://thebluealliance.com', code, 'mock', {}, None)
|
191472
|
import os
import yaml
with open('neptune.yaml') as f:
config = yaml.load(f)
exp_name = config['name']
exp_root = config['parameters']['solution_dir']
data_dir = config['parameters']['data_dir']
os.makedirs(exp_root, exist_ok=True)
IMAGE_COLUMNS = ['Image']
SHAPE_COLUMNS = ['height', 'width']
LOCALIZER_TARGET_COLUMNS = ['bbox1_x', 'bbox1_y', 'bbox2_x', 'bbox2_y']
LOCALIZER_AUXILARY_COLUMNS = []
ALIGNER_TARGET_COLUMNS = ['bonnet_x', 'bonnet_y', 'blowhead_x', 'blowhead_y']
ALIGNER_AUXILARY_COLUMNS = ['callosity', 'whaleID']
CLASSIFIER_TARGET_COLUMNS = ['whaleID']
CLASSIFIER_AUXILARY_COLUMNS = ['callosity']
LOCALIZER_COLUMNS = LOCALIZER_TARGET_COLUMNS + LOCALIZER_AUXILARY_COLUMNS
ALIGNER_COLUMNS = ALIGNER_TARGET_COLUMNS + ALIGNER_AUXILARY_COLUMNS
CLASSIFIER_COLUMNS = CLASSIFIER_TARGET_COLUMNS + CLASSIFIER_AUXILARY_COLUMNS
TARGET_COLUMNS = {'localization': LOCALIZER_TARGET_COLUMNS,
'alignment': ALIGNER_TARGET_COLUMNS,
'classification': CLASSIFIER_TARGET_COLUMNS,
'end_to_end': CLASSIFIER_TARGET_COLUMNS
}
GLOBAL_CONFIG = {'exp_name': exp_name,
'exp_root': exp_root,
'num_workers': 6,
'callosity_classes': 3,
'num_classes': 447,
'img_H-W': (256, 256),
'img_C-H-W': (3, 256, 256),
'batch_size_train': 32,
'batch_size_inference': 32,
'localizer_bins': 128,
'aligner_bins': 128
}
SOLUTION_CONFIG = {
'global': {'cache_dirpath': GLOBAL_CONFIG['exp_root']},
'trainer': {'metadata': os.path.join(data_dir, 'metadata.csv'),
'train_csv': os.path.join(data_dir, 'annotations/train.csv'),
'bbox_train_json': os.path.join(data_dir, 'annotations/slot.json'),
'bonnet_tip_json': os.path.join(data_dir, 'annotations/bonnet_tip.json'),
'blowhead_json': os.path.join(data_dir, 'annotations/blowhead.json'),
'callosity_csv': os.path.join(data_dir, 'annotations/new_conn.csv'),
'imgs_dir': os.path.join(data_dir, 'imgs')
},
'localizer_dataloader': {'dataset_params': {'train': {'img_dirpath': os.path.join(data_dir, 'imgs'),
'augmentation': True,
'target_size': GLOBAL_CONFIG['img_H-W'],
'bins_nr': GLOBAL_CONFIG['localizer_bins']
},
'inference': {'img_dirpath': os.path.join(data_dir, 'imgs'),
'augmentation': False,
'target_size': GLOBAL_CONFIG['img_H-W'],
'bins_nr': GLOBAL_CONFIG['localizer_bins']
},
},
'loader_params': {'train': {'batch_size': GLOBAL_CONFIG['batch_size_train'],
'shuffle': True,
'num_workers': GLOBAL_CONFIG['num_workers']
},
'inference': {'batch_size': GLOBAL_CONFIG['batch_size_inference'],
'shuffle': False,
'num_workers': GLOBAL_CONFIG['num_workers']
},
},
},
'localizer_network': {'architecture_config': {'model_params': {'input_shape': GLOBAL_CONFIG['img_C-H-W'],
'classes': GLOBAL_CONFIG['localizer_bins']
},
'optimizer_params': {'lr': 0.0005,
'momentum': 0.9,
'nesterov': True
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': 0.0005,
'weight_decay_linear': 0.01},
'weights_init': {'function': 'normal',
'params': {'mean': 0,
'std_conv2d': 0.01,
'std_linear': 0.001
},
},
},
'training_config': {'epochs': 150},
'callbacks_config': {'model_checkpoint': {
'checkpoint_dir': os.path.join(exp_root, 'checkpoints', 'localizer_network'),
'epoch_every': 1},
'lr_scheduler': {'gamma': 0.9955,
'epoch_every': 1},
'training_monitor': {'batch_every': 1,
'epoch_every': 1},
'validation_monitor': {'epoch_every': 1},
'bounding_box_predictions': {'img_dir': 'output/debugging',
'bins_nr': GLOBAL_CONFIG['localizer_bins'],
'epoch_every': 1
},
'neptune_monitor': {'bins_nr': GLOBAL_CONFIG['localizer_bins'],
'img_nr': 10}
},
},
'localizer_unbinner': {'bins_nr': GLOBAL_CONFIG['localizer_bins']},
'aligner_encoder': {'encode': ['callosity', 'whaleID'],
'no_encode': ['bonnet_x', 'bonnet_y', 'blowhead_x', 'blowhead_y', ],
},
'aligner_dataloader': {'dataset_params': {'train': {'img_dirpath': os.path.join(data_dir, 'imgs'),
'augmentation': True,
'target_size': GLOBAL_CONFIG['img_H-W'],
'bins_nr': GLOBAL_CONFIG['aligner_bins']
},
'inference': {'img_dirpath': os.path.join(data_dir, 'imgs'),
'augmentation': False,
'target_size': GLOBAL_CONFIG['img_H-W'],
'bins_nr': GLOBAL_CONFIG['aligner_bins']
},
},
'loader_params': {'train': {'batch_size': GLOBAL_CONFIG['batch_size_train'],
'shuffle': True,
'num_workers': GLOBAL_CONFIG['num_workers']
},
'inference': {'batch_size': GLOBAL_CONFIG['batch_size_inference'],
'shuffle': False,
'num_workers': GLOBAL_CONFIG['num_workers']
},
},
},
'aligner_network': {'architecture_config': {'model_params': {'input_shape': GLOBAL_CONFIG['img_C-H-W'],
'classes': {'points': GLOBAL_CONFIG['aligner_bins'],
'callosity': GLOBAL_CONFIG[
'callosity_classes'],
'whale_id': GLOBAL_CONFIG['num_classes']
}
},
'optimizer_params': {'lr': 0.0005,
'momentum': 0.9,
'nesterov': True
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': 0.0005,
'weight_decay_linear': 0.01},
'weights_init': {'function': 'normal',
'params': {'mean': 0,
'std_conv2d': 0.01,
'std_linear': 0.001
},
},
},
'training_config': {'epochs': 120},
'callbacks_config': {'model_checkpoint': {
'checkpoint_dir': os.path.join(exp_root, 'checkpoints', 'aligner_network'),
'epoch_every': 1
},
'lr_scheduler': {'gamma': 0.9955,
'epoch_every': 1},
'training_monitor': {'batch_every': 1,
'epoch_every': 1},
'validation_monitor': {'epoch_every': 1},
'neptune_monitor': {'bins_nr': GLOBAL_CONFIG['aligner_bins'],
'img_nr': 10}
},
},
'aligner_unbinner': {'bins_nr': GLOBAL_CONFIG['aligner_bins'],
'shape': GLOBAL_CONFIG['img_H-W']},
'aligner_adjuster': {'shape': GLOBAL_CONFIG['img_H-W']},
'classifier_encoder': {'encode': ['whaleID', 'callosity'],
'no_encode': [],
},
'classifier_dataloader': {'dataset_params': {'train': {'img_dirpath': os.path.join(data_dir, 'imgs'),
'augmentation': True,
'target_size': GLOBAL_CONFIG['img_H-W'],
'num_classes': GLOBAL_CONFIG['num_classes']
},
'inference': {'img_dirpath': os.path.join(data_dir, 'imgs'),
'augmentation': False,
'target_size': GLOBAL_CONFIG['img_H-W'],
'num_classes': GLOBAL_CONFIG['num_classes']
},
},
'loader_params': {'train': {'batch_size': GLOBAL_CONFIG['batch_size_train'],
'shuffle': True,
'num_workers': GLOBAL_CONFIG['num_workers']
},
'inference': {'batch_size': GLOBAL_CONFIG['batch_size_inference'],
'shuffle': False,
'num_workers': GLOBAL_CONFIG['num_workers']
},
},
},
'classifier_network': {'architecture_config': {'model_params': {'input_shape': GLOBAL_CONFIG['img_C-H-W'],
'classes': {
'whale_id': GLOBAL_CONFIG['num_classes'],
'callosity': GLOBAL_CONFIG['callosity_classes'],
},
},
'weights_init': {'function': 'normal',
'params': {'mean': 0.0,
'std_conv2d': 0.01,
'std_linear': 0.001
},
},
'regularizer_params': {'regularize': True,
'weight_decay_conv2d': 0.0005,
'weight_decay_linear': 0.01
},
'optimizer_params': {'lr': 0.001,
'momentum': 0.9,
'nesterov': True
},
},
'training_config': {'epochs': 250},
'callbacks_config': {'model_checkpoint': {
'checkpoint_dir': os.path.join(exp_root, 'checkpoints', 'classifier_network'),
'epoch_every': 5,
'batch_every': 0
},
'lr_scheduler': {'gamma': 0.9955},
'validation_monitor': {'epoch_every': 1,
'batch_every': 30
},
'training_monitor': {'epoch_every': 1,
'batch_every': 30
},
},
},
'classifier_calibrator': {'power': 1.35},
}
|
191498
|
from colosseum.constants import AUTO, BLOCK, RTL, SOLID
from colosseum.declaration import CSS
from ...utils import LayoutTestCase, TestNode
class WidthTests(LayoutTestCase):
def test_no_horizontal_properties(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_left_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10, margin_left=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_left_and_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, height=10, margin_left=AUTO, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (1024, 10)},
'padding_box': {'position': (0, 0), 'size': (1024, 10)},
'content': {'position': (0, 0), 'size': (1024, 10)},
}
)
def test_width(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (50, 10)},
'padding_box': {'position': (0, 0), 'size': (50, 10)},
'content': {'position': (0, 0), 'size': (50, 10)},
}
)
def test_width_auto_left_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_left=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (974, 0), 'size': (50, 10)},
'padding_box': {'position': (974, 0), 'size': (50, 10)},
'content': {'position': (974, 0), 'size': (50, 10)},
}
)
def test_width_auto_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (50, 10)},
'padding_box': {'position': (0, 0), 'size': (50, 10)},
'content': {'position': (0, 0), 'size': (50, 10)},
}
)
def test_width_auto_left_and_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_left=AUTO, margin_right=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (487, 0), 'size': (50, 10)},
'padding_box': {'position': (487, 0), 'size': (50, 10)},
'content': {'position': (487, 0), 'size': (50, 10)},
}
)
def test_width_fixed_left_and_right_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=50, height=10, margin_left=30, margin_right=40)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (30, 0), 'size': (50, 10)},
'padding_box': {'position': (30, 0), 'size': (50, 10)},
'content': {'position': (30, 0), 'size': (50, 10)},
}
)
def test_width_fixed_left_and_right_margin_rtl(self):
node = TestNode(
name='div', style=CSS(
display=BLOCK, width=50, height=10,
margin_left=30, margin_right=40, direction=RTL
)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (934, 0), 'size': (50, 10)},
'padding_box': {'position': (934, 0), 'size': (50, 10)},
'content': {'position': (934, 0), 'size': (50, 10)},
}
)
def test_width_exceeds_parent(self):
node = TestNode(
name='div', style=CSS(
display=BLOCK, width=500, height=20,
padding=50, border_width=60, border_style=SOLID,
margin=70
)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (70, 70), 'size': (720, 240)},
'padding_box': {'position': (130, 130), 'size': (600, 120)},
'content': {'position': (180, 180), 'size': (500, 20)},
}
)
def test_width_exceeds_parent_auto_left_and_right_margins(self):
node = TestNode(
name='div', style=CSS(
display=BLOCK, width=500, height=20,
padding=50, border_width=60, border_style=SOLID,
margin_left=AUTO, margin_right=AUTO
)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (152, 0), 'size': (720, 240)},
'padding_box': {'position': (212, 60), 'size': (600, 120)},
'content': {'position': (262, 110), 'size': (500, 20)},
}
)
class HeightTests(LayoutTestCase):
def test_no_vertical_properties(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 0)},
'padding_box': {'position': (0, 0), 'size': (10, 0)},
'content': {'position': (0, 0), 'size': (10, 0)},
}
)
def test_height(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10, height=50)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 50)},
'padding_box': {'position': (0, 0), 'size': (10, 50)},
'content': {'position': (0, 0), 'size': (10, 50)},
}
)
def test_height_auto_top_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10, height=50, margin_top=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 50)},
'padding_box': {'position': (0, 0), 'size': (10, 50)},
'content': {'position': (0, 0), 'size': (10, 50)},
}
)
def test_height_auto_bottom_margin(self):
node = TestNode(
name='div', style=CSS(display=BLOCK, width=10, height=50, margin_bottom=AUTO)
)
self.layout_node(node)
self.assertLayout(
node,
{
'tag': 'div',
'border_box': {'position': (0, 0), 'size': (10, 50)},
'padding_box': {'position': (0, 0), 'size': (10, 50)},
'content': {'position': (0, 0), 'size': (10, 50)},
}
)
|
191539
|
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
#parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#parser.add_argument("-f", "--file", type=str, help="Voxel file (.vxl)", required=True)
#args = parser.parse_args()
## Voxel-A lens
k1 = -0.1583968
k2 = 0.06113919
k3 = 0.09898978
p1 = 0.001591975
p2 = -0.0001962754
x = np.linspace(0, 0.9, 200)
y = 0
r2 = x*x
r4 = r2*r2
r6 = r4*r2
x1 = x*(1 + k1*r2 + k2*r4 + k3*r6) + 2*p1*x*y + p2*(r2 + 2*x*x)
y1 = y*(1 + k1*r2 + k2*r4 + k3*r6) + p1*(r2 + 2*y*y) + p2*x*y
## Tintin lens
k1 = 0.909882
k2 = -3.559455
k3 = 3.626591
p1 = 0.047604
p2 = -0.005546
x2 = x*(1 + k1*r2 + k2*r4 + k3*r6) + 2*p1*x*y + p2*(r2 + 2*x*x)
y2 = y*(1 + k1*r2 + k2*r4 + k3*r6) + p1*(r2 + 2*y*y) + p2*x*y
r2 = x1*x1 + y1*y1
r4 = r2*r2
r6 = r4*r2
x3 = x1*(1 + k1*r2 + k2*r4 + k3*r6) + 2*p1*x1*y1 + p2*(r2 + 2*x1*x1)
y3 = y1*(1 + k1*r2 + k2*r4 + k3*r6) + p1*(r2 + 2*y1*y1) + p2*x1*y1
plt.plot(x, x1, x, x2, 'r', x, x3, 'k')
plt.grid(True)
plt.legend(['Voxel-A', 'TintinCDK', 'Distorted to Corrected'])
plt.show()
|
191549
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='1'
from os import listdir
import sys
import time
import argparse
import tools.ops
import subprocess
import numpy as np
import tensorflow as tf
import scipy.misc as sm
from models.mfb_net_cross import *
from tools.utilities import *
from tools.ops import *
parser = argparse.ArgumentParser()
parser.add_argument('-lr', dest='lr', type=float, default='1e-4', help='original learning rate')
parser.add_argument('-batch_size', dest='batch_size', type=int, default='10', help='batch_size')
args = parser.parse_args()
flags = tf.app.flags
flags.DEFINE_float('lr', args.lr, 'Original learning rate.')
flags.DEFINE_integer('batch_size', args.batch_size, 'Batch size.')
flags.DEFINE_integer('num_epochs', 500, 'Number of epochs.')
flags.DEFINE_integer('num_gpus', 4, 'Number of GPUs.')
flags.DEFINE_integer('seq_length', 16, 'Length of each video clip.')
flags.DEFINE_integer('height', 128, 'Height of video frame.')
flags.DEFINE_integer('width', 128, 'Width of video frame.')
flags.DEFINE_integer('channel', 3, 'Number of channels for each frame.')
flags.DEFINE_integer('num_sample', 10060, 'Number of samples in this dataset.')
flags.DEFINE_float('wd', 0.001, 'Weight decay rate.')
FLAGS = flags.FLAGS
prefix = 'mfb_cross'
model_save_dir = './ckpt/' + prefix
logs_save_dir = './logs/' + prefix
pred_save_dir = './output/' + prefix
loss_save_dir = './loss'
train_list_path = './dataset/trainlist.txt'
dataset_path = './dataset/UCF-101-tf-records'
evaluation_job = './jobs/mfb_cross_val'
use_pretrained_model = True
save_predictions = True
def run_training():
# Create model directory
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
model_filename = "./mfb_baseline_ucf24.model"
# Consturct computational graph
tower_grads = []
tower_losses, tower_ffg_losses, tower_fbg_losses, tower_lfg_losses, tower_feat_losses, tower_wd_losses = [], [], [], [], [], []
tower_ffg_m_losses, tower_fbg_m_losses, tower_lfg_m_losses = [], [], []
global_step = tf.get_variable(
'global_step',
[],
initializer=tf.constant_initializer(0),
trainable=False
)
starter_learning_rate = FLAGS.lr
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.5, staircase=True)
opt = tf.train.AdamOptimizer(learning_rate)
# Create a session for running Ops on the Graph.
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
coord = tf.train.Coordinator()
threads = None
train_list_file = open(train_list_path, 'r')
train_list = train_list_file.read().splitlines()
for i, line in enumerate(train_list):
train_list[i] = os.path.join(dataset_path, train_list[i])
assert(len(train_list) % FLAGS.num_gpus == 0)
num_for_each_gpu = len(train_list) // FLAGS.num_gpus
clips_list, img_masks_list, loss_masks_list = [], [], []
with sess.as_default():
for i in range(FLAGS.num_gpus):
clips, img_masks, loss_masks = input_pipeline(train_list[i*num_for_each_gpu:(i+1)*num_for_each_gpu], \
FLAGS.batch_size, read_threads=4, num_epochs=FLAGS.num_epochs, is_training=True)
clips_list.append(clips)
img_masks_list.append(img_masks)
loss_masks_list.append(loss_masks)
mfb_list = []
with tf.variable_scope('vars') as var_scope:
for gpu_index in range(FLAGS.num_gpus):
with tf.device('/gpu:%d' % (gpu_index)):
with tf.name_scope('%s_%d' % ('tower', gpu_index)) as scope:
# construct model
mfb = mfb_net(clips_list[gpu_index], FLAGS.height, FLAGS.width, FLAGS.seq_length, FLAGS.channel, FLAGS.batch_size)
mfb_list.append(mfb)
loss, first_fg_loss, first_bg_loss, last_fg_loss, feat_loss, wd_loss = \
tower_loss(scope, mfb, clips_list[gpu_index], img_masks_list[gpu_index], loss_masks_list[gpu_index])
var_scope.reuse_variables()
vars_to_optimize = tf.trainable_variables()
grads = opt.compute_gradients(loss, var_list=vars_to_optimize)
tower_grads.append(grads)
tower_losses.append(loss)
tower_ffg_losses.append(first_fg_loss)
tower_fbg_losses.append(first_bg_loss)
tower_lfg_losses.append(last_fg_loss)
tower_feat_losses.append(feat_loss)
tower_wd_losses.append(wd_loss)
# concatenate the losses of all towers
loss_op = tf.reduce_mean(tower_losses)
ffg_loss_op = tf.reduce_mean(tower_ffg_losses)
fbg_loss_op = tf.reduce_mean(tower_fbg_losses)
lfg_loss_op = tf.reduce_mean(tower_lfg_losses)
feat_loss_op = tf.reduce_mean(tower_feat_losses)
wd_loss_op = tf.reduce_mean(tower_wd_losses)
tf.summary.scalar('loss', loss_op)
tf.summary.scalar('ffg_loss', ffg_loss_op)
tf.summary.scalar('fbg_loss', fbg_loss_op)
tf.summary.scalar('lfg_loss', lfg_loss_op)
tf.summary.scalar('feat_loss', feat_loss_op)
tf.summary.scalar('wd_loss', wd_loss_op)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
grads = average_gradients(tower_grads)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads, global_step=global_step)
# saver for saving checkpoints
saver = tf.train.Saver(max_to_keep=10)
init = tf.initialize_all_variables()
sess.run(init)
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
if use_pretrained_model:
print('[*] Loading checkpoint ...')
model = tf.train.latest_checkpoint(model_save_dir)
if model is not None:
saver.restore(sess, model)
print('[*] Loading success: %s!'%model)
else:
print('[*] Loading failed ...')
# Create summary writer
merged = tf.summary.merge_all()
if not os.path.exists(logs_save_dir):
os.makedirs(logs_save_dir)
sum_writer = tf.summary.FileWriter(logs_save_dir, sess.graph)
# Create prediction output folder
if not os.path.exists(pred_save_dir):
os.makedirs(pred_save_dir)
# Create loss output folder
if not os.path.exists(loss_save_dir):
os.makedirs(loss_save_dir)
loss_file = open(os.path.join(loss_save_dir, prefix+'.txt'), 'w')
total_steps = (FLAGS.num_sample / (FLAGS.num_gpus * FLAGS.batch_size)) * FLAGS.num_epochs
# start queue runner
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
with sess.as_default():
print('\n\n\n*********** start training ***********\n\n\n')
while not coord.should_stop():
# Run training steps
start_time = time.time()
sess.run(train_op)
duration = time.time() - start_time
step = global_step.eval()
if step == 1 or step % 10 == 0: # evaluate loss
loss, ffg_loss, fbg_loss, lfg_loss, feat_loss, wd_loss, lr = \
sess.run([loss_op, ffg_loss_op, fbg_loss_op, lfg_loss_op, feat_loss_op, wd_loss_op, learning_rate])
line = 'step %d/%d, loss=%.8f, ffg=%.8f, fbg=%.8f, lfg=%.8f, feat=%.8f, lwd=%.8f, dur=%.3f, lr=%.8f' \
%(step, total_steps, loss, ffg_loss, fbg_loss, lfg_loss, feat_loss, wd_loss, duration, lr)
print(line)
loss_file.write(line + '\n')
loss_file.flush()
if step == 1 or step % 10 == 0: # save summary
summary = summary_str = sess.run(merged)
sum_writer.add_summary(summary, step)
if step % 100 == 0 and save_predictions: # save current predictions
mfb = mfb_list[0] # only visualize prediction in first tower
ffg, fbg, lfg, gt_ffg, gt_fbg, gt_lfg = sess.run([
mfb.first_fg_rec[0], mfb.first_bg_rec[0], mfb.last_fg_rec[0], \
mfb.gt_ffg[0], mfb.gt_fbg[0], mfb.gt_lfg[0]])
ffg, fbg, lfg = (ffg+1)/2*255.0, (fbg+1)/2*255.0, (lfg+1)/2*255.0
gt_ffg, gt_fbg, gt_lfg = (gt_ffg+1)/2*255.0, (gt_fbg+1)/2*255.0, (gt_lfg+1)/2*255.0
img = gen_pred_img(ffg, fbg, lfg)
gt = gen_pred_img(gt_ffg, gt_fbg, gt_lfg)
save_img = np.concatenate((img, gt))
sm.imsave(os.path.join(pred_save_dir, '%07d.jpg'%step), save_img)
if step % 500 == 0: # save checkpoint
saver.save(sess, os.path.join(model_save_dir, model_filename), global_step=global_step)
if step % 500 == 0:
pass
# launch a new script for validation (please modify it for your own script)
#subprocess.check_output(['python', evaluation_job])
except tf.errors.OutOfRangeError:
print('Done training -- epoch limit reached')
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
|
191569
|
from .loopback import Loopback # noqa
from .dataleakageprevention import DataLeakagePreventionFileSystem # noqa
|
191683
|
import torch
import numpy as np
import matplotlib.pyplot as plt
from operator import itemgetter
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
from sklearn.metrics import confusion_matrix, roc_curve, precision_recall_curve
plt.style.use('fivethirtyeight')
def odds(prob):
return prob / (1 - prob)
def log_odds(prob):
return np.log(odds(prob))
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def split_cm(cm):
# Actual negatives go in the top row,
# above the probability line
actual_negative = cm[0]
# Predicted negatives go in the first column
tn = actual_negative[0]
# Predicted positives go in the second column
fp = actual_negative[1]
# Actual positives go in the bottow row,
# below the probability line
actual_positive = cm[1]
# Predicted negatives go in the first column
fn = actual_positive[0]
# Predicted positives go in the second column
tp = actual_positive[1]
return tn, fp, fn, tp
def tpr_fpr(cm):
tn, fp, fn, tp = split_cm(cm)
tpr = tp / (tp + fn)
fpr = fp / (fp + tn)
return tpr, fpr
def precision_recall(cm):
tn, fp, fn, tp = split_cm(cm)
precision = tp / (tp + fp)
recall = tp / (tp + fn)
return precision, recall
def probability_line(ax, y, probs, threshold, shift=0.0, annot=False, colors=None):
if colors is None:
colors = ['r', 'b']
ax.grid(False)
ax.set_ylim([-.1, .1])
ax.axes.get_yaxis().set_visible(False)
ax.plot([0, 1], [0, 0], linewidth=2, c='k', zorder=1)
ax.plot([0, 0], [-.1, .1], c='k', zorder=1)
ax.plot([1, 1], [-.1, .1], c='k', zorder=1)
tn = (y == 0) & (probs < threshold)
fn = (y == 0) & (probs >= threshold)
tp = (y == 1) & (probs >= threshold)
fp = (y == 1) & (probs < threshold)
ax.plot([threshold, threshold], [-.1, .1], c='k', zorder=1, linestyle='--')
ax.scatter(probs[tn], np.zeros(tn.sum()) + shift, c=colors[0], s=150, zorder=2, edgecolor=colors[0], linewidth=3)
ax.scatter(probs[fn], np.zeros(fn.sum()) + shift, c=colors[0], s=150, zorder=2, edgecolor=colors[1], linewidth=3)
ax.scatter(probs[tp], np.zeros(tp.sum()) - shift, c=colors[1], s=150, zorder=2, edgecolor=colors[1], linewidth=3)
ax.scatter(probs[fp], np.zeros(fp.sum()) - shift, c=colors[1], s=150, zorder=2, edgecolor=colors[0], linewidth=3)
ax.set_xlabel(r'$\sigma(z) = P(y=1)$')
ax.set_title('Threshold = {}'.format(threshold))
if annot:
ax.annotate('TN', xy=(.20, .03), c='k', weight='bold', fontsize=20)
ax.annotate('FN', xy=(.20, -.08), c='k', weight='bold', fontsize=20)
ax.annotate('FP', xy=(.70, .03), c='k', weight='bold', fontsize=20)
ax.annotate('TP', xy=(.70, -.08), c='k', weight='bold', fontsize=20)
return ax
def probability_contour(ax, model, device, X, y, threshold, cm=None, cm_bright=None):
if cm is None:
cm = plt.cm.RdBu
if cm_bright is None:
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
h = .02 # step size in the mesh
x_min, x_max = -2.25, 2.25
y_min, y_max = -2.25, 2.25
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
logits = model(torch.as_tensor(np.c_[xx.ravel(), yy.ravel()]).float().to(device))
logits = logits.detach().cpu().numpy().reshape(xx.shape)
yhat = sigmoid(logits)
ax.contour(xx, yy, yhat, levels=[threshold], cmap="Greys", vmin=0, vmax=1)
contour = ax.contourf(xx, yy, yhat, 25, cmap=cm, alpha=.8, vmin=0, vmax=1)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright, edgecolors='k')
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$\sigma(z) = P(y=1)$')
ax.grid(False)
ax_c = plt.colorbar(contour)
ax_c.set_ticks([0, .25, .5, .75, 1])
return ax
def eval_curves_from_probs(y, probabilities, threshs, line=False, annot=False):
cms = [confusion_matrix(y, (probabilities >= threshold)) for threshold in threshs]
rates = np.array(list(map(tpr_fpr, cms)))
precrec = np.array(list(map(precision_recall, cms)))
return eval_curves(rates[:, 1], rates[:, 0], precrec[:, 1], precrec[:, 0], threshs, line=line, annot=annot)
def eval_curves(fprs, tprs, recalls, precisions, thresholds, thresholds2=None, line=False, annot=False):
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
if thresholds2 is None:
thresholds2 = thresholds[:]
marker = '.r-' if line else '.r'
axs[0].plot(fprs, tprs, marker, markersize=12, linewidth=2)
axs[0].set_xlim([-.05, 1.05])
axs[0].set_ylim([-.05, 1.05])
axs[0].set_xlabel('False Positive Rate')
axs[0].set_ylabel('True Positive Rate')
axs[0].set_title('ROC Curve')
axs[1].plot(recalls, precisions, marker, markersize=12, linewidth=2)
axs[1].set_xlim([-.05, 1.05])
axs[1].set_ylim([-.05, 1.05])
axs[1].set_xlabel('Recall')
axs[1].set_ylabel('Precision')
axs[1].set_title('Precision-Recall Curve')
if annot:
for thresh, fpr, tpr, prec, rec in zip(thresholds, fprs, tprs, precisions, recalls):
axs[0].annotate(str(thresh), xy=(fpr - .03, tpr - .07))
for thresh, fpr, tpr, prec, rec in zip(thresholds2, fprs, tprs, precisions, recalls):
axs[1].annotate(str(thresh), xy=(rec - .03, prec - .07))
fig.tight_layout()
return fig
def figure1(X_train, y_train, X_val, y_val, cm_bright=None):
if cm_bright is None:
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)#, edgecolors='k')
ax[0].set_xlabel(r'$X_1$')
ax[0].set_ylabel(r'$X_2$')
ax[0].set_xlim([-2.3, 2.3])
ax[0].set_ylim([-2.3, 2.3])
ax[0].set_title('Generated Data - Train')
ax[1].scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright)#, edgecolors='k')
ax[1].set_xlabel(r'$X_1$')
ax[1].set_ylabel(r'$X_2$')
ax[1].set_xlim([-2.3, 2.3])
ax[1].set_ylim([-2.3, 2.3])
ax[1].set_title('Generated Data - Validation')
fig.tight_layout()
return fig
def figure2(prob1):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
prob = np.linspace(.01, .99, 99)
for i in [0, 1]:
ax[i].plot(prob, odds(prob), linewidth=2)
ax[i].set_xlabel('Probability')
if i:
ax[i].set_yscale('log')
ax[i].set_ylabel('Odds Ratio (log scale)')
ax[i].set_title('Odds Ratio (log scale)')
else:
ax[i].set_ylabel('Odds Ratio')
ax[i].set_title('Odds Ratio')
ax[i].scatter([prob1, .5, (1-prob1)], [odds(prob1), odds(.5), odds(1-prob1)], c='r')
fig.tight_layout()
return fig
def figure3(prob1):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
prob = np.linspace(.01, .99, 99)
ax[0].plot(prob, log_odds(prob), linewidth=2)
ax[0].set_xlabel('Probability')
ax[0].set_ylabel('Log Odds Ratio')
ax[0].set_title('Log Odds Ratio')
ax[0].scatter([prob1, .5, (1-prob1)], [log_odds(prob1), log_odds(.5), log_odds(1-prob1)], c='r')
ax[1].plot(log_odds(prob), prob, linewidth=2)
ax[1].set_ylabel('Probability')
ax[1].set_xlabel('Log Odds Ratio')
ax[1].set_title('Probability')
ax[1].scatter([log_odds(prob1), log_odds(.5), log_odds(1-prob1)], [prob1, .5, (1-prob1)], c='r')
fig.tight_layout()
return fig
def figure4(prob1):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
prob = np.linspace(.01, .99, 99)
ax.plot(log_odds(prob), prob, linewidth=2, c='r')
ax.set_ylabel('Probability')
ax.set_xlabel('Log Odds Ratio')
ax.set_title('Sigmoid')
ax.scatter([log_odds(prob1), log_odds(.5), log_odds(1-prob1)], [prob1, .5, (1-prob1)], c='r')
fig.tight_layout()
return fig
def figure7(X, y, model, device, cm=None, cm_bright=None):
if cm is None:
cm = plt.cm.RdBu
if cm_bright is None:
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
fig = plt.figure(figsize=(15, 4.5))
h = .02 # step size in the mesh
# x_min, x_max = X_train[:, 0].min() - .5, X_train[:, 0].max() + .5
# y_min, y_max = X_train[:, 1].min() - .5, X_train[:, 1].max() + .5
x_min, x_max = -2.25, 2.25
y_min, y_max = -2.25, 2.25
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
logits = model(torch.as_tensor(np.c_[xx.ravel(), yy.ravel()]).float().to(device))
logits = logits.detach().cpu().numpy().reshape(xx.shape)
yhat = sigmoid(logits)
# 1st plot
ax = plt.subplot(1, 3, 1)
contour = ax.contourf(xx, yy, logits, 25, cmap=cm, alpha=.8)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$z = b + w_1x_1 + w_2x_2$')
ax.grid(False)
ax_c = plt.colorbar(contour)
ax_c.set_label("$z$", rotation=0)
# 2nd plot
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(xx, yy, yhat, rstride=1, cstride=1, alpha=.5, cmap=cm, linewidth=0, antialiased=True, vmin=0, vmax=1)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$\sigma(z) = P(y=1)$')
ax_c = plt.colorbar(surf)
ax_c.set_ticks([0, .25, .5, .75, 1])
ax.view_init(30, 220)
# 3rd plot
ax = plt.subplot(1, 3, 3)
ax.contour(xx, yy, yhat, levels=[.5], cmap="Greys", vmin=0, vmax=1)
contour = ax.contourf(xx, yy, yhat, 25, cmap=cm, alpha=.8, vmin=0, vmax=1)
# Plot the training points
ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cm_bright)
# Plot the testing points
#ax.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
ax.set_title(r'$\sigma(z) = P(y=1)$')
ax.grid(False)
ax_c = plt.colorbar(contour)
ax_c.set_ticks([0, .25, .5, .75, 1])
plt.tight_layout()
return fig
def one_dimension(x, y, colors=None):
if colors is None:
colors = ['r', 'b']
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
ax.grid(False)
ax.set_ylim([-.1, .1])
ax.axes.get_yaxis().set_visible(False)
ax.plot([-3, 3], [0, 0], linewidth=2, c='k', zorder=1)
ax.plot([0, 0], [-.03, .03], c='k', zorder=1)
ax.scatter(x[y==1], np.zeros_like(x[y==1]), c=colors[1], s=150, zorder=2, linewidth=3)
ax.scatter(x[y==0], np.zeros_like(x[y==0]), c=colors[0], s=150, zorder=2, linewidth=3)
ax.set_xlabel(r'$X_1$')
ax.set_title('One Dimension')
fig.tight_layout()
return fig
def two_dimensions(x, y, colors=None):
if colors is None:
colors = ['r', 'b']
x2 = np.concatenate([x.reshape(-1, 1), (x ** 2).reshape(-1, 1)], axis=1)
fig = plt.figure(figsize=(10, 4.5))
gs = fig.add_gridspec(3, 2)
ax = fig.add_subplot(gs[2, 0])
ax.grid(False)
ax.set_ylim([-.1, .1])
ax.axes.get_yaxis().set_visible(False)
ax.plot([-3, 3], [0, 0], linewidth=2, c='k', zorder=1)
ax.plot([0, 0], [-.03, .03], c='k', zorder=1)
ax.scatter(x[y==1], np.zeros_like(x[y==1]), c=colors[1], s=150, zorder=2, linewidth=3)
ax.scatter(x[y==0], np.zeros_like(x[y==0]), c=colors[0], s=150, zorder=2, linewidth=3)
ax.set_xlabel(r'$X_1$')
ax.set_title('One Dimension')
ax = fig.add_subplot(gs[:, 1])
ax.scatter(*x2[y==1, :].T, c='b', s=150, zorder=2, linewidth=3)
ax.scatter(*x2[y==0, :].T, c='r', s=150, zorder=2, linewidth=3)
ax.plot([-2, 2], [1, 1], 'k--', linewidth=2)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2=X_1^2$')
ax.set_title('Two Dimensions')
fig.tight_layout()
return fig
def figure9(x, y, model, device, probabilities, threshold, shift=0.0, annot=False, cm=None, cm_bright=None):
fig = plt.figure(figsize=(15, 5))
gs = fig.add_gridspec(3, 3)
ax = fig.add_subplot(gs[:, 0])
probability_contour(ax, model, device, x, y, threshold, cm, cm_bright)
if cm_bright is None:
colors = ['r', 'b']
else:
colors = cm_bright.colors
ax = fig.add_subplot(gs[1, 1:])
probability_line(ax, y, probabilities, threshold, shift, annot, colors)
fig.tight_layout()
return fig
def figure10(y, probabilities, threshold, shift, annot, colors=None):
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
probability_line(ax, y, probabilities, threshold, shift, annot, colors)
fig.tight_layout()
return fig
def figure17(y, probabilities, threshs):
cms = [confusion_matrix(y, (probabilities >= threshold)) for threshold in threshs]
rates = np.array(list(map(tpr_fpr, cms)))
precrec = np.array(list(map(precision_recall, cms)))
precrec = np.nan_to_num(precrec, nan=1.)
fig = eval_curves(rates[:, 1], rates[:, 0], precrec[:, 1], precrec[:, 0], threshs, line=True, annot=False)
return fig
def figure19(y, probabilities, threshs=(.4, .5, .57), colors=None):
fig, axs = plt.subplots(3, 1, figsize=(10, 6))
probability_line(axs[0], y, probabilities, threshs[0], 0.0, False, colors)
probability_line(axs[1], y, probabilities, threshs[1], 0.0, False, colors)
probability_line(axs[2], y, probabilities, threshs[2], 0.0, False, colors)
fig.tight_layout()
return fig
def figure20(y):
fpr_perfect, tpr_perfect, thresholds1_perfect = roc_curve(y, y)
prec_perfect, rec_perfect, thresholds2_perfect = precision_recall_curve(y, y)
fig = eval_curves(fpr_perfect, tpr_perfect, rec_perfect, prec_perfect, thresholds1_perfect, thresholds2_perfect, line=True)
return fig
def figure21(y, probabilities):
fpr_random, tpr_random, thresholds1_random = roc_curve(y, probabilities)
prec_random, rec_random, thresholds2_random = precision_recall_curve(y, probabilities)
fig = eval_curves(fpr_random, tpr_random, rec_random, prec_random, thresholds1_random, thresholds2_random, line=True)
axs = fig.axes
axs[0].plot([0, 1], [0, 1], 'k--', linewidth=2)
axs[1].plot([0, 1], [y.mean(), y.mean()], 'k--', linewidth=2)
return fig
|
191711
|
import json
import shutil
from typing import Any, Dict
import joblib
from sklearn.pipeline import Pipeline
def save_model(
pipe: Pipeline,
target_names_mapping: Dict[int, str],
config: Dict[str, Any],
) -> None:
"""Save:
- model pipeline (tf-idf + logreg)
- target names mapping
- config
- hyper-parameters grid (from config)
Args:
pipe (Pipeline): Model pipeline (tf-idf + logreg).
target_names_mapping (Dict[int, str]): Name for each class.
config (Dict[str, Any]): Config.
"""
# save pipe
joblib.dump(pipe, config["path_to_save_model"])
# save target names mapping
with open(config["path_to_save_target_names_mapping"], mode="w") as fp:
json.dump(target_names_mapping, fp)
# save config
shutil.copy2(config["path_to_config"], config["path_to_save_folder"])
# save hyperparams grid
if config["grid-search"]["do_grid_search"]:
shutil.copy2(
config["grid-search"]["grid_search_params_path"],
config["path_to_save_folder"],
)
|
191744
|
import pytest
from pyDEA.core.models.envelopment_model_base import EnvelopmentModelBase
from pyDEA.core.models.envelopment_model import EnvelopmentModelOutputOriented
from pyDEA.core.models.envelopment_model_decorators import DefaultConstraintCreator
from pyDEA.core.models.envelopment_model import EnvelopmentModelOutputOrientedWithNonDiscVars
from pyDEA.core.data_processing.read_data_from_xls import read_data
from pyDEA.core.data_processing.read_data_from_xls import convert_to_dictionary
from pyDEA.core.data_processing.read_data_from_xls import construct_input_data_instance
from pyDEA.core.data_processing.read_data_from_xls import validate_data
from pyDEA.core.models.bound_generators import generate_lower_bound_for_efficiency_score
from pyDEA.core.utils.dea_utils import clean_up_pickled_files
from tests.test_CRS_env_input_oriented_model import data
import tests.utils_for_tests as utils_for_tests
@pytest.fixture
def model(request, data):
model = EnvelopmentModelBase(
data,
EnvelopmentModelOutputOriented(
generate_lower_bound_for_efficiency_score),
DefaultConstraintCreator())
return model
def test_CRS_env_output_oriented_small(model, data):
model_solution = model.run()
utils_for_tests.check_optimal_solution_status_and_sizes(
model_solution, data)
dmus = ['A', 'B', 'C', 'D', 'E']
utils_for_tests.check_efficiency_scores(dmus,
[0.5, 1, 1 / 1.2, 1 / 1.4, 1],
model_solution, data)
utils_for_tests.check_lambda_variables('A', 'B', 1, model_solution, data)
utils_for_tests.check_lambda_variables('B', 'B', 1, model_solution, data)
utils_for_tests.check_lambda_variables('C', 'B', 1.2, model_solution, data)
utils_for_tests.check_lambda_variables('C', 'E', 0.6, model_solution, data)
utils_for_tests.check_lambda_variables('D', 'B', 0.3, model_solution, data)
utils_for_tests.check_lambda_variables('D', 'E', 0.4, model_solution, data)
utils_for_tests.check_lambda_variables('E', 'E', 1, model_solution, data)
utils_for_tests.check_categories_for_dmu('A', ['x1', 'x2', 'q'],
[1, 0, 1], model_solution, data)
utils_for_tests.check_categories_for_dmu('B', ['x1', 'x2', 'q'],
[0.1, 0.2, 0.5], model_solution,
data)
utils_for_tests.check_categories_for_dmu('C', ['x1', 'x2', 'q'],
[0.066666667, 0.13333333,
0.33333333],
model_solution, data)
utils_for_tests.check_categories_for_dmu('D', ['x1', 'x2', 'q'],
[0.2, 0.4, 1], model_solution,
data)
utils_for_tests.check_categories_for_dmu('E', ['x1', 'x2', 'q'],
[0.1, 0.2, 0.5], model_solution,
data)
def test_CRS_env_output_oriented_non_disc_vars():
categories, xls_data, dmu_name, sheet_name = read_data(
'tests/DEA_example_data.xls')
coefficients, has_same_dmus = convert_to_dictionary(xls_data)
assert has_same_dmus is False
assert validate_data(categories, coefficients) is True
data = construct_input_data_instance(categories, coefficients)
data.add_input_category('I1')
data.add_input_category('I2')
data.add_output_category('ND1')
data.add_output_category('O1')
data.add_output_category('O2')
model = EnvelopmentModelBase(
data,
EnvelopmentModelOutputOrientedWithNonDiscVars(
set(['ND1']),
generate_lower_bound_for_efficiency_score),
DefaultConstraintCreator())
model_solution = model.run()
utils_for_tests.check_optimal_solution_status_and_sizes(model_solution,
data)
dmus = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
utils_for_tests.check_efficiency_scores(dmus, [1, 1, 1 / 1.1698685, 1,
1 / 2.3749162, 1,
1, 1 / 1.3198374,
1 / 1.2115385,
0.5, 1], model_solution,
data)
clean_up_pickled_files()
|
191753
|
from django import forms
from .models import Article
from .models import BoardType
from django_summernote.widgets import SummernoteWidget
# 게시글 폼
class ArticleForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
kwargs.setdefault('label_suffix','')
super(ArticleForm,self).__init__(*args,**kwargs)
class Meta:
model = Article
fields = ('title', 'text',) # 제목과 내용을 입력 가능하도록 설정
title = forms.CharField(required=True, label="제목")
text = forms.CharField(widget=SummernoteWidget, label="내용")
|
191764
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
name='Great Circle v2',
ext_modules=cythonize("great_circle_v2.pyx"),
)
|
191776
|
import sys
sys.path.append('..')
import re
import urllib
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
from newsplease import NewsPlease
def get_content_onlinekhabar(link):
"""This function extract the contants from onlinakhabar.com
Arguments:
link {string} -- [Link for onlinekhabar news]
Returns:
[string] -- [News content from the link]
"""
req = Request(link, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage)
text = ''
for tex_ in soup.find_all("p")[:-1]:
text = text + " " + tex_.getText()
return ''.join(text)
def get_content_ekantipur(link):
"""This function helps in extracting the news from ekantipur.com
Arguments:
link {string} -- [News link from ekantipur site.]
Raises:
ValueError: [If unable to extract news from given link]
Returns:
[string] -- [News content from the link]
"""
req = Request(link, headers={'User-Agent': 'mozilla/5.0'})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage)
try:
for text_ in soup.find_all(attrs={'class': 'description'}):
return text_.text[:text_.text.find('प्रकाशित ')]
except:
raise ValueError('Unable to extract from the link given.')
def extract_news(link):
"""This function extract news from given link.
Arguments:
link {string} -- [Link of news article.]
Raises:
ValueError: [Raise error if link is not for ekantipur/onlinekhabar]
Returns:
[tuple(title, sample_text)] -- [Title: Title of the news, sample_text: news article that has been extracted from the link given.]
"""
if 'onlinekhabar.com' in link:
sample_text = get_content_onlinekhabar(link)
elif 'ekantipur.com' in link:
sample_text = get_content_ekantipur(link)
else:
raise ValueError('Currently we work with onlinekhabar and ekantipur only. Other sites will be addedd soon.')
article = NewsPlease.from_url(link)
title = article.title
return (title, sample_text)
|
191795
|
import unittest
import unittest.mock
import edifice._component as component
import edifice.engine as engine
import edifice.base_components as base_components
from edifice.qt import QT_VERSION
if QT_VERSION == "PyQt5":
from PyQt5 import QtWidgets
else:
from PySide2 import QtWidgets
if QtWidgets.QApplication.instance() is None:
app = QtWidgets.QApplication(["-platform", "offscreen"])
class TestReference(unittest.TestCase):
def test_reference(self):
class TestComp(component.Component):
def __init__(self):
super().__init__()
self.render_count = 0
self.ref1 = component.Reference()
self.ref2 = component.Reference()
def render(self):
self.render_count += 1
if self.render_count == 1:
return base_components.Label("Test").register_ref(self.ref1)
else:
return base_components.Label("Test").register_ref(self.ref2)
class TestCompWrapper(component.Component):
def __init__(self):
super().__init__()
self.render_count = 0
def render(self):
self.render_count += 1
if self.render_count == 3:
# We do this to force the dismount of TestComp
return base_components.Label("Test")
else:
return TestComp()
root = TestCompWrapper()
render_engine = engine.RenderEngine(root)
render_engine._request_rerender([root])
sub_comp = render_engine._component_tree[root]
label_comp = render_engine._component_tree[sub_comp]
self.assertEqual(sub_comp.ref1(), label_comp)
self.assertEqual(sub_comp.ref2(), None)
# Rerender so that ref2 should also point to label
render_engine._request_rerender([root])
new_sub_comp = render_engine._component_tree[root]
new_label = render_engine._component_tree[new_sub_comp]
self.assertEqual(new_sub_comp, sub_comp)
self.assertEqual(new_label, label_comp)
self.assertEqual(sub_comp.ref1(), label_comp)
self.assertEqual(sub_comp.ref2(), label_comp)
# Rerender to test dismount behavior
render_engine._request_rerender([root])
new_sub_comp = render_engine._component_tree[root]
assert sub_comp not in render_engine._component_tree
self.assertEqual(sub_comp.ref1(), None)
self.assertEqual(sub_comp.ref2(), None)
class _TestComponentInner(component.Component):
@component.register_props
def __init__(self, prop_a):
self.state_a = "A"
def render(self):
return base_components.View()(
base_components.Label(self.props.prop_a),
base_components.Label(self.state_a),
)
class _TestComponentOuter(component.Component):
"""
The rendered tree should be (with index address):
View( # []
View( # [0]
Label, # [0, 0]
Label) # [0, 1]
View( # [1]
Label, # [1, 0]
Label) # [1, 1]
Label # [2]
)
"""
@component.register_props
def __init__(self):
self.state_a = "A"
self.state_b = "B"
self.state_c = "C"
def render(self):
return base_components.View()(
_TestComponentInner(self.state_a),
_TestComponentInner(self.state_b),
base_components.Label(self.state_c),
)
class _TestComponentOuterList(component.Component):
"""
The rendered tree should be (with index address):
View( # []
View( # [0]
Label, # [0, 0]
Label) # [0, 1]
View( # [1]
Label, # [1, 0]
Label) # [1, 1]
...
)
"""
def __init__(self, use_keys, use_state_as_key):
super().__init__()
self.use_keys = use_keys
self.use_state_as_key = use_state_as_key
self.state = ["A", "B", "C"]
def render(self):
if self.use_keys:
if self.use_state_as_key:
return base_components.View()(
*[_TestComponentInner(text).set_key(text) for text in self.state]
)
else:
return base_components.View()(
*[_TestComponentInner(text).set_key(str(i)) for i, text in enumerate(self.state)]
)
return base_components.View()(
*[_TestComponentInner(text) for text in self.state]
)
def _commands_for_address(qt_tree, address):
qt_tree = qt_tree._dereference(address)
if isinstance(qt_tree.component, base_components.View):
return qt_tree.component._qt_stateless_commands(qt_tree.children, qt_tree.component.props, {})
return qt_tree.component._qt_update_commands(qt_tree.children, qt_tree.component.props, {})
class RenderTestCase(unittest.TestCase):
def test_basic_render(self):
component = _TestComponentOuter()
app = engine.RenderEngine(component)
render_result = app._request_rerender([component])
qt_tree = render_result.trees[0]
qt_commands = render_result.commands
def C(*args):
return _commands_for_address(qt_tree, args)
def V(*args):
view = qt_tree._dereference(args)
return [(view.component._add_child, i, child.component.underlying)
for (i, child) in enumerate(view.children)]
expected_commands = C(0, 0) + C(0, 1) + V(0) + C(0) + C(1, 0) + C(1, 1) + V(1) + C(1) + C(2) + V() + C()
self.assertEqual(qt_commands, expected_commands)
# After everything rendered, a rerender shouldn't involve any commands
# TODO: make sure this is actually true!
render_result = app._request_rerender([component])
qt_tree = render_result.trees[0]
qt_commands = render_result.commands
self.assertEqual(qt_commands, [])
def test_state_changes(self):
component = _TestComponentOuter()
app = engine.RenderEngine(component)
render_result = app._request_rerender([component])
qt_tree = render_result.trees[0]
qt_commands = render_result.commands
component.state_a = "AChanged"
render_result = app._request_rerender([component])
new_qt_tree = render_result.trees[0]
qt_commands = render_result.commands
# TODO: Make it so that only the label (0, 0) needs to update!
expected_commands = [(qt_tree._dereference([0, 0]).component.underlying.setText, "AChanged")]
self.assertEqual(qt_commands, expected_commands)
component.state_b = "BChanged"
render_result = app._request_rerender([component])
qt_commands = render_result.commands
expected_commands = [(qt_tree._dereference([1, 0]).component.underlying.setText, "BChanged")]
self.assertEqual(qt_commands, expected_commands)
component.state_c = "CChanged"
render_result = app._request_rerender([component])
new_qt_tree = render_result.trees[0]
qt_commands = render_result.commands
expected_commands = [(qt_tree._dereference([2]).component.underlying.setText, "CChanged")]
self.assertEqual(qt_commands, expected_commands)
def test_keyed_list_add(self):
component = _TestComponentOuterList(True, True)
app = engine.RenderEngine(component)
render_result = app._request_rerender([component])
qt_tree = render_result.trees[0]
qt_commands = render_result.commands
component.state = ["A", "B", "D", "C"]
render_result = app._request_rerender([component])
_new_qt_tree = render_result.trees[0]
qt_commands = render_result.commands
def new_V(*args):
view = _new_qt_tree._dereference(args)
return [(view.component._add_child, i, child.component.underlying)
for (i, child) in enumerate(view.children)]
self.assertEqual(_new_qt_tree._dereference([2, 0]).component.props.text, "D")
def new_C(*args):
return _commands_for_address(_new_qt_tree, args)
expected_commands = (new_C(2, 0) + new_C(2, 1) + new_V(2) + new_C(2) +
[(qt_tree.component._add_child, 2, _new_qt_tree.children[2].component.underlying)])
self.assertEqual(qt_commands, expected_commands)
def test_keyed_list_reshuffle(self):
component = _TestComponentOuterList(True, True)
app = engine.RenderEngine(component)
render_result = app._request_rerender([component])
qt_tree = render_result.trees[0]
qt_commands = render_result.commands
old_child0 = qt_tree.children[0].component
old_child2 = qt_tree.children[2].component
component.state = ["C", "B", "A"]
render_result = app._request_rerender([component])
_new_qt_tree = render_result.trees[0]
qt_commands = render_result.commands
expected_commands = (
[(qt_tree.component._add_child, 0, qt_tree.children[2].component.underlying)]
+ [(qt_tree.component._add_child, 1, qt_tree.children[1].component.underlying)])
self.assertEqual(qt_commands, expected_commands)
def test_keyed_list_nochange(self):
component = _TestComponentOuterList(True, False)
app = engine.RenderEngine(component)
render_result = app._request_rerender([component])
qt_tree = render_result.trees[0]
qt_commands = render_result.commands
component.state = ["C", "B", "A"]
render_result = app._request_rerender([component])
_new_qt_tree = render_result.trees[0]
qt_commands = render_result.commands
expected_commands = [(qt_tree._dereference([0, 0]).component.underlying.setText, "C"), (qt_tree._dereference([2, 0]).component.underlying.setText, "A")]
self.assertEqual(qt_commands, expected_commands)
def test_keyed_list_delete_child(self):
component = _TestComponentOuterList(True, True)
app = engine.RenderEngine(component)
render_result = app._request_rerender([component])
qt_tree = render_result.trees[0]
old_child = qt_tree.children[2].component
qt_commands = render_result.commands
component.state = ["A", "B"]
render_result = app._request_rerender([component])
_new_qt_tree = render_result.trees[0]
qt_commands = render_result.commands
expected_commands = [(qt_tree.component._delete_child, 2, old_child)]
self.assertEqual(qt_commands, expected_commands)
def test_one_child_rerender(self):
class TestCompInner(component.Component):
@component.register_props
def __init__(self, val):
self.count = 0
def render(self):
self.count += 1
return base_components.Label(self.props.val)
class TestCompOuter(component.Component):
def render(self):
return base_components.View()(TestCompInner(self.value))
test_comp = TestCompOuter()
test_comp.value = 2
app = engine.RenderEngine(test_comp)
render_result = app._request_rerender([test_comp])
inner_comp = app._component_tree[app._component_tree[test_comp]][0]
self.assertEqual(inner_comp.count, 1)
self.assertEqual(inner_comp.props.val, 2)
test_comp.value = 4
render_result = app._request_rerender([test_comp])
inner_comp = app._component_tree[app._component_tree[test_comp]][0]
self.assertEqual(inner_comp.count, 2)
self.assertEqual(inner_comp.props.val, 4)
def test_render_exception(self):
class TestCompInner1(component.Component):
@component.register_props
def __init__(self, val):
self.count = 0
self.success_count = 0
def render(self):
self.count += 1
self.success_count += 1
return base_components.Label(self.props.val)
class TestCompInner2(component.Component):
@component.register_props
def __init__(self, val):
self.count = 0
self.success_count = 0
def render(self):
self.count += 1
assert self.props.val == 8
self.success_count += 1
return base_components.Label(self.props.val)
class TestCompOuter(component.Component):
def render(self):
return base_components.View()(
TestCompInner1(self.value * 2),
TestCompInner2(self.value * 4),
)
test_comp = TestCompOuter()
test_comp.value = 2
app = engine.RenderEngine(test_comp)
render_result = app._request_rerender([test_comp])
inner_comp1, inner_comp2 = app._component_tree[app._component_tree[test_comp]]
self.assertEqual(inner_comp1.count, 1)
self.assertEqual(inner_comp1.props.val, 4)
self.assertEqual(inner_comp2.count, 1)
self.assertEqual(inner_comp2.props.val, 8)
test_comp.value = 3
try:
render_result = app._request_rerender([test_comp])
except AssertionError:
pass
inner_comp1, inner_comp2 = app._component_tree[app._component_tree[test_comp]]
self.assertEqual(inner_comp1.props.val, 4)
self.assertEqual(inner_comp2.count, 2)
self.assertEqual(inner_comp2.success_count, 1)
self.assertEqual(inner_comp2.props.val, 8)
class RefreshClassTestCase(unittest.TestCase):
def test_refresh_child(self):
class OldInnerClass(component.Component):
@component.register_props
def __init__(self, val):
self.count = 0
self.will_unmount = unittest.mock.MagicMock()
def render(self):
self.count += 1
return base_components.Label(self.props.val)
class NewInnerClass(component.Component):
@component.register_props
def __init__(self, val):
self.count = 0
def render(self):
self.count += 1
return base_components.Label(self.props.val * 2)
class OuterClass(component.Component):
@component.register_props
def __init__(self):
self.count = 0
def render(self):
self.count += 1
return base_components.View()(
OldInnerClass(5)
)
outer_comp = OuterClass()
app = engine.RenderEngine(outer_comp)
app._request_rerender([outer_comp])
old_inner_comp = app._component_tree[app._component_tree[outer_comp]][0]
assert isinstance(old_inner_comp, OldInnerClass)
app._refresh_by_class([(OldInnerClass, NewInnerClass)])
inner_comp = app._component_tree[app._component_tree[outer_comp]][0]
old_inner_comp.will_unmount.assert_called_once()
assert isinstance(inner_comp, NewInnerClass)
self.assertEqual(inner_comp.props.val, 5)
def test_refresh_child_error(self):
class OldInnerClass(component.Component):
@component.register_props
def __init__(self, val):
self.count = 0
self.will_unmount = unittest.mock.MagicMock()
def render(self):
self.count += 1
return base_components.Label(self.props.val)
class NewInnerClass(component.Component):
@component.register_props
def __init__(self, val):
self.count = 0
def render(self):
self.count += 1
assert False
return base_components.Label(self.props.val * 2)
class OuterClass(component.Component):
@component.register_props
def __init__(self):
self.count = 0
def render(self):
self.count += 1
return base_components.View()(
OldInnerClass(5)
)
outer_comp = OuterClass()
app = engine.RenderEngine(outer_comp)
app._request_rerender([outer_comp])
old_inner_comp = app._component_tree[app._component_tree[outer_comp]][0]
assert isinstance(old_inner_comp, OldInnerClass)
try:
app._refresh_by_class([(OldInnerClass, NewInnerClass)])
except AssertionError:
pass
inner_comp = app._component_tree[app._component_tree[outer_comp]][0]
old_inner_comp.will_unmount.assert_not_called()
assert isinstance(inner_comp, OldInnerClass)
self.assertEqual(inner_comp.props.val, 5)
if __name__ == "__main__":
unittest.main()
|
191806
|
from enum import Enum
class ModelWeightsStatus(Enum):
NO_INFO = 0
SUCCESS = 1
MODEL_NOT_FOUND = 2
WIP = 3
|
191822
|
import parl
from parl import layers
class Model(parl.Model):
def __init__(self, act_dim):
self.conv1 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.conv2 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.conv3 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.conv4 = layers.conv2d(num_filters=32, filter_size=3, stride=2, padding=1, act='relu')
self.fc = layers.fc(size=512, act='relu')
self.policy_fc = layers.fc(size=act_dim)
self.value_fc = layers.fc(size=1)
def policy(self, obs):
"""
Args:
obs: 输入的图像,shape为[N, C, H, W]
Returns:
policy_logits: N * ACTION_DIM
"""
conv1 = self.conv1(obs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
flatten = layers.flatten(conv4, axis=1)
fc_output = self.fc(flatten)
policy_logits = self.policy_fc(fc_output)
return policy_logits
def value(self, obs):
"""
Args:
obs: 输入的图像,shape为[N, C, H, W]
Returns:
values: N
"""
conv1 = self.conv1(obs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
flatten = layers.flatten(conv4, axis=1)
fc_output = self.fc(flatten)
values = self.value_fc(fc_output)
values = layers.squeeze(values, axes=[1])
return values
def policy_and_value(self, obs):
"""
Args:
obs: 输入的图像,shape为[N, C, H, W]
Returns:
policy_logits: N * ACTION_DIM
values: N
"""
conv1 = self.conv1(obs)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
flatten = layers.flatten(conv4, axis=1)
fc_output = self.fc(flatten)
policy_logits = self.policy_fc(fc_output)
values = self.value_fc(fc_output)
values = layers.squeeze(values, axes=[1])
return policy_logits, values
|
191846
|
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
def send_job_tests_details(created, instance):
def data():
result = {}
# Test instance update for tests table
test_item = dict()
test_item['uuid'] = instance.uuid
if instance.start_time: # Initial Tests post_save signal will not have start_time for test item
test_item['start_time'] = instance.get_start_time()
if instance.stop_time:
test_item['stop_time'] = instance.get_stop_time()
if instance.time_taken:
test_item['time_taken'] = instance.get_time_taken()
else:
test_item['time_taken'] = None
try:
test_item['time_taken_eta'] = instance.test.get_time_taken_eta()
except:
test_item['time_taken_eta'] = None
test_item['status'] = instance.status
result['test'] = test_item
result['test_count'] = str(instance.job.tests.count())
result['not_started'] = str(instance.job.tests.filter(status=1).count())
result['passed'] = str(instance.job.tests.filter(status=3).count())
result['failed'] = str(instance.job.tests.filter(status=4).count())
result['skipped'] = str(instance.job.tests.filter(status=5).count())
result['aborted'] = str(instance.job.tests.filter(status=6).count())
return result
if created:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
"job_tests_details" + "-" + instance.job.uuid,
{
"type": "message",
"message": data()
}
)
if instance:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
"job_tests_details" + "-" + instance.job.uuid,
{
"type": "message",
"message": data()
}
)
|
191862
|
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Union
import havsfunc as haf
import lvsfunc as lvf
import vapoursynth as vs
import vardefunc as vdf
from vsutil import (depth, fallback, get_depth, get_w, get_y, insert_clip,
iterate, join, plane)
core = vs.core
def rescaler(clip: vs.VideoNode, height: int, shader_file: str) -> Tuple[vs.VideoNode, vs.VideoNode]:
"""
Basic rescaling and mask generating function using FSRCNNX/nnedi3.
:param clip: Source clip
:param height: Height to descale to
:return: Rescaled clip, descale detail mask
"""
descale = lvf.kernels.Bicubic().descale(get_y(clip), get_w(height), height)
rescale_fsrcnx = vdf.scale.fsrcnnx_upscale(descale, shader_file=shader_file)
rescale_nnedi3 = vdf.scale.nnedi3_upscale(descale, pscrn=1).resize.Bicubic(clip.width, clip.height)
rescale = core.std.Merge(rescale_fsrcnx, rescale_nnedi3)
scaled = join([rescale, plane(clip, 1), plane(clip, 2)])
upscale = lvf.kernels.Bicubic().scale(descale, 1920, 1080)
detail_mask = lvf.scale.descale_detail_mask(clip, upscale, threshold=0.045)
return depth(scaled, 16), core.std.Expr(detail_mask, 'x 65535 *', vs.GRAY16)
def detail_mask(clip: vs.VideoNode,
sigma: float = 1.0, rxsigma: List[int] = [50, 200, 350],
pf_sigma: Optional[float] = 1.0,
rad: int = 3, brz: Tuple[int, int] = (2500, 4500),
rg_mode: int = 17,
) -> vs.VideoNode:
"""
A detail mask aimed at preserving as much detail as possible
within darker areas, even if it contains mostly noise.
"""
import kagefunc as kgf
bits = get_depth(clip)
if bits != 16:
clip = depth(clip, 16)
clip_y = get_y(clip)
pf = core.bilateral.Gaussian(clip_y, sigma=pf_sigma) if pf_sigma else clip_y
ret = core.retinex.MSRCP(pf, sigma=rxsigma, upper_thr=0.005)
blur_ret = core.bilateral.Gaussian(ret, sigma=sigma)
blur_ret_diff = core.std.Expr([blur_ret, ret], "x y -")
blur_ret_dfl = core.std.Deflate(blur_ret_diff)
blur_ret_ifl = iterate(blur_ret_dfl, core.std.Inflate, 4)
blur_ret_brz = core.std.Binarize(blur_ret_ifl, brz[0])
blur_ret_brz = core.morpho.Close(blur_ret_brz, size=8)
kirsch = kgf.kirsch(clip_y).std.Binarize(brz[1])
kirsch_ifl = core.std.Deflate(kirsch).std.Inflate()
kirsch_brz = core.std.Binarize(kirsch_ifl, brz[1])
kirsch_brz = core.morpho.Close(kirsch_brz, size=4)
morpho_mask = morpho_mask_simple(clip, radius=rad)
merged = core.std.Expr([blur_ret_brz, kirsch_brz, morpho_mask], "x y + z +")
rm_grain = core.rgvs.RemoveGrain(merged, rg_mode)
return rm_grain if bits == 16 else depth(rm_grain, bits)
def morpho_mask_simple(clip: vs.VideoNode, radius: int = 3, **mode: str) -> vs.VideoNode:
clip_y = plane(clip, 0)
refa = haf.mt_inpand_multi(haf.mt_expand_multi(clip_y, sw=radius, sh=radius, **mode), sw=radius, sh=radius, **mode)
refb = haf.mt_expand_multi(haf.mt_inpand_multi(clip_y, sw=radius, sh=radius, **mode), sw=radius, sh=radius, **mode)
return core.std.Expr([clip_y, refa, refb], 'x y - abs x z - abs max')
def placebo_debander(clip: vs.VideoNode, grain: int = 4, **deband_args: Any) -> vs.VideoNode:
return join([ # Still not sure why splitting it up into planes is faster, but hey!
core.placebo.Deband(plane(clip, 0), grain=grain, **deband_args),
core.placebo.Deband(plane(clip, 1), grain=0, **deband_args),
core.placebo.Deband(plane(clip, 2), grain=0, **deband_args)
])
def mt_xxpand_multi(clip: vs.VideoNode, # noqa
sw: int = 1, sh: Optional[int] = None,
mode: str = 'square',
planes: Union[List[range], int, None] = None, start: int = 0,
M__imum: Any = core.std.Maximum,
**params: Any) -> List[vs.VideoNode]:
sh = fallback(sh, sw)
assert clip.format is not None
planes = [range(clip.format.num_planes)] or planes
if mode == 'ellipse':
coordinates = [[1]*8, [0, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 0, 1, 0]]
elif mode == 'losange':
coordinates = [[0, 1, 0, 1, 1, 0, 1, 0]] * 3
else:
coordinates = [[1]*8] * 3
clips = [clip]
end = min(sw, sh) + start
for x in range(start, end):
clips += [M__imum(clips[-1], coordinates=coordinates[x % 3], planes=planes, **params)]
for x in range(end, end + sw - sh):
clips += [M__imum(clips[-1], coordinates=[0, 0, 0, 1, 1, 0, 0, 0], planes=planes, **params)]
for x in range(end, end + sh - sw):
clips += [M__imum(clips[-1], coordinates=[0, 1, 0, 0, 0, 0, 1, 0], planes=planes, **params)]
return clips
maxm = partial(mt_xxpand_multi, M__imum=core.std.Maximum)
minm = partial(mt_xxpand_multi, M__imum=core.std.Minimum)
def zzdeband(clip: vs.VideoNode, denoised: bool = False, mask: int = 0,
f3kdb_args: Dict[str, Any] = {}, placebo_args: Dict[str, Any] = {}
) -> Union[vs.VideoNode, Any]:
"""
Written by Zastin, *CAUTIOUSLY* modified by puny little me
This is all pure black magic to me,
so I'm just gonna pretend I didn't see anything.
"""
import zzfunc as zzf
plcbo_args: Dict[str, Any] = dict(iterations=3, threshold=5, radius=16, grain=0)
plcbo_args.update(placebo_args)
dumb3kdb_args: Dict[str, Any] = dict(radius=16, threshold=[30, 0], grain=0)
dumb3kdb_args.update(f3kdb_args)
brz = 256 if denoised else 384
clip_depth = get_depth(clip)
if clip_depth != 16:
clip = depth(clip, 16)
clip_y = plane(clip, 0)
ymax = maxm(clip_y, sw=30, mode='ellipse')
ymin = minm(clip_y, sw=30, mode='ellipse')
# edge detection
thr = 3.2 * 256
ypw0 = clip_y.std.Prewitt()
ypw = ypw0.std.Binarize(thr).rgvs.RemoveGrain(11)
if mask == 1:
return ypw
# range masks (neighborhood max - min)
rad, thr = 3, 2.5 * 256
yrangesml = core.std.Expr([ymax[3], ymin[3]], 'x y - abs')
yrangesml = yrangesml.std.Binarize(thr).std.BoxBlur(0, 2, 1, 2, 1)
if mask == 2:
return yrangesml
rad, thr = 14, 6.5 * 256
yrangebig0 = core.std.Expr([ymax[rad], ymin[rad]], 'x y - abs')
yrangebig = yrangebig0.std.Binarize(thr)
yrangebig = minm(yrangebig, sw=rad * 3 // 4, threshold=65536 // ((rad * 3 // 4) + 1), mode='ellipse')[-1]
yrangebig = yrangebig.std.BoxBlur(0, rad//4, 1, rad//4, 1)
if mask == 3:
return yrangebig
# morphological masks (shapes)
rad = 30
ymph = core.std.Expr([clip_y, maxm(ymin[rad], sw=rad, mode='ellipse')[rad],
minm(ymax[rad], sw=rad, mode='ellipse')[rad]], 'x y - z x - max')
ymph = ymph.std.Binarize(brz)
ymph = ymph.std.Minimum().std.Maximum()
ymph = ymph.std.BoxBlur(0, 4, 1, 4, 1)
if mask == 4:
return ymph
grad_mask = zzf.combine([ymph, yrangesml, ypw])
if mask == 5:
return grad_mask
ydebn_strong = clip_y.placebo.Deband(1, **plcbo_args)
ydebn_normal = vdf.deband.dumb3kdb(clip_y, **dumb3kdb_args)
ydebn = ydebn_strong.std.MaskedMerge(ydebn_normal, grad_mask)
ydebn = ydebn.std.MaskedMerge(clip_y, yrangebig)
merged = join([ydebn, plane(clip, 1), plane(clip, 2)])
return merged if clip_depth == 16 else depth(merged, clip_depth)
def panner_x(clip: vs.VideoNode, image: str, acc: float = 1.0) -> vs.VideoNode:
"""Written by Varde, stolen by yours truly"""
clip60 = haf.ChangeFPS(clip, 60000, 1001)
panlarge = core.imwri.Read(image)
step_x = (panlarge.width - 1920) / clip60.num_frames
newpan = core.std.BlankClip(panlarge, 1920, 1080, length=1)
for i in range(clip60.num_frames):
acc = (i / clip60.num_frames) ** acc
x_e, x_v = divmod(i * step_x, 1)
crop = core.std.CropAbs(panlarge, 1921, 1080, int(x_e), 0)
newpan += core.resize.Bicubic(crop, src_left=x_v).std.Crop(right=1)
return core.std.AssumeFPS(newpan[1:], clip60).resize.Bicubic(format=vs.YUV420P16, matrix=1)
_dfttest_args: Any = dict(smode=0, sosize=0, tbsize=1, tosize=0, tmode=0, planes=[0])
_slocation: List[float] = [0.0, 4, 0.35, 16, 0.4, 512, 1.0, 512]
def extractFrequency(clip: vs.VideoNode,
slocation: List[float] = _slocation
) -> vs.VideoNode:
"""
Denoise certain frequencies
"""
return core.dfttest.DFTTest(clip, sbsize=9, slocation=slocation, **_dfttest_args)
def mergeFrequency(extracted_clip: vs.VideoNode,
denoised_clip: vs.VideoNode,
slocation: List[float] = _slocation
) -> vs.VideoNode:
"""
Merge certain frequences with a denoised clip
"""
den = core.dfttest.DFTTest(denoised_clip, sbsize=9, slocation=slocation, **_dfttest_args)
hif = core.std.MakeDiff(denoised_clip, den)
return core.std.MergeDiff(extracted_clip, hif)
def multi_denoise(clip: vs.VideoNode, mask: vs.VideoNode, rep: int = 17) -> vs.VideoNode:
"""Multiple denoising stages all done in one function"""
decs = vdf.noise.decsiz(clip, sigmaS=10, min_in=200 << 8, max_in=240 << 8)
denoise = haf.SMDegrain(decs, thSAD=200, tr=3, contrasharp=True, RefineMotion=True, pel=4, subpixel=3, prefilter=3)
efrq = extractFrequency(decs) # Found out-post encode that I fucked this one up oops thanks flake8 # noqa
mfrq = mergeFrequency(denoise, denoise)
mrep = core.rgvs.Repair(mfrq, decs, rep)
return core.std.MaskedMerge(mrep, decs, mask)
def fader(clip: vs.VideoNode,
start_frame: int, end_frame: int,
duration: Optional[int] = None, input_frame: Optional[int] = None
) -> vs.VideoNode:
"""
A function to fade a part of a given clip into itself.
Hyper specific, and probably only useful for clip-to-black fades or something.
start_frame and end_frame are for trimming the clip. Exclusive.
duration determines how long the fade is.
input_frame determines where in the clip the faded clip gets inserted.
"""
import kagefunc as kgf
duration = duration or (end_frame - start_frame)
input_frame = input_frame or start_frame
fade = kgf.crossfade(clip[start_frame] * duration, clip[end_frame] * duration, duration - 1)
return insert_clip(clip, fade, input_frame)
def clamp_aa(clip: vs.VideoNode, strength: float = 1.0) -> vs.VideoNode:
aa_weak = lvf.aa.nneedi3_clamp(clip)
aa_strong = lvf.aa.upscaled_sraa(clip)
return lvf.aa.clamp_aa(clip, aa_weak, aa_strong, strength=strength)
def multi_debander(clip: vs.VideoNode, old_clip: vs.VideoNode) -> vs.VideoNode:
mask = detail_mask(old_clip, brz=(1000, 3500), pf_sigma=False)
deband = vdf.deband.dumb3kdb(get_y(clip), radius=17, threshold=40)
deband_chr = placebo_debander(clip, iterations=2, threshold=5, radius=12, grain=4)
deband = join([deband, plane(deband_chr, 1), plane(deband_chr, 2)])
return core.std.MaskedMerge(deband, clip, mask)
def grain(clip: vs.VideoNode, **kwargs: Any) -> vs.VideoNode:
"""Consistent grain output across files"""
from adptvgrnMod import adptvgrnMod
grain_args: Dict[str, Any] = dict(
strength=0.35, luma_scaling=10, static=True, size=1.25, sharp=80, grain_chroma=False, seed=42069
)
grain_args |= kwargs
return adptvgrnMod(clip, **grain_args)
|
191870
|
from parsl import python_app
import pytest
from parsl.tests.configs.htex_local import fresh_config
local_config = fresh_config()
@python_app
def compute_descript(size=1000):
import numpy as np
x = np.array(list(range(0, size)), dtype=complex).astype(np.float32)
return x
@pytest.mark.local
def test_1480(size=10**6):
x = compute_descript(size=size)
# Before PR#1841 this would have raised a TypeError
# Now, with the threshold increased this should not trigger any error
assert len(x.result()) == size, "Lengths do not match"
if __name__ == "__main__":
test_1480()
|
191876
|
from server.config import ParseConfig
from server.parsing.parse_class_file import ParseClassFile
from collections import namedtuple
# create parser instance
parser = ParseClassFile.from_object(ParseConfig)
# create namedtuple instance called "meta_data" - with variables that will be inserted to the Attendance class
AttendanceMetaData = namedtuple('meta_data', ['filter_modes', 'time_delta', 'start_sentence', 'zoom_names_to_ignore'])
|
191885
|
from startX.serivce.v1 import StartXHandler, get_m2m_display
from django.urls import reverse
from django.utils.safestring import mark_safe
from .base_promission import PermissionHandler
class HomeworkHandler(PermissionHandler, StartXHandler):
def display_outline(self, model=None, is_header=None, *args, **kwargs):
if is_header:
return '作业详情'
record_url = reverse('startX:generic_homeworkdetail_list', kwargs={'homework_id': model.pk})
return mark_safe('<a target="_blank" href="%s">作业详情</a>' % record_url)
list_display = [get_m2m_display('课程', 'courses'), 'title', get_m2m_display('章节', 'chapter'), 'content',
display_outline]
search_list = ['courses__contains']
|
191894
|
from social.pipeline.partial import save_status_to_session
save_status_to_session # placate pyflakes
|
191897
|
def is_k_anonymous(df, partition, sensitive_column, k=3):
"""
:param df: The dataframe on which to check the partition.
:param partition: The partition of the dataframe to check.
:param sensitive_column: The name of the sensitive column
:param k: The desired k
:returns : True if the partition is valid according to our k-anonymity criteria, False otherwise.
"""
if len(partition) < k:
# we cannot split this partition further...
return False
return True
def partition_dataset(df, feature_columns, sensitive_column, scale, is_valid):
"""
:param df: The dataframe to be partitioned.
:param feature_columns: A list of column names along which to partition the dataset.
:param sensitive_column: The name of the sensitive column (to be passed on to the `is_valid` function)
:param scale: The column spans as generated before.
:param is_valid: A function that takes a dataframe and a partition and returns True if the partition is valid.
:returns : A list of valid partitions that cover the entire dataframe.
"""
finished_partitions = []
partitions = [df.index]
while partitions:
partition = partitions.pop(0)
spans = get_spans(df[feature_columns], partition, scale)
for column, span in sorted(spans.items(), key=lambda x:-x[1]):
#we try to split this partition along a given column
lp, rp = split(df, partition, column)
if not is_valid(df, lp, sensitive_column) or not is_valid(df, rp, sensitive_column):
continue
# the split is valid, we put the new partitions on the list and continue
partitions.extend((lp, rp))
break
else:
# no split was possible, we add the partition to the finished partitions
finished_partitions.append(partition)
return finished_partitions
|
192052
|
from src.scripts.emr_test_steps import lambda_handler as emr_steps
def test_emr_steps(create_cluster, lambda_context):
cluster_id = create_cluster.get('JobFlowId')
assert 'j-' in cluster_id
cluster_name = create_cluster.get('Name')
steps_request = {
"api_request_id": "test_bootstrap_actions",
"cluster_id": cluster_id,
"cluster_name": cluster_name
}
try:
response = emr_steps(steps_request, lambda_context)
except Exception as error:
print(error)
else:
assert 2 == response.get('steps_count') and 'steps' in response
print("Step count", response.get('steps_count') )
print("Steps", response.get('steps'))
|
192065
|
class BaseDecoder(object):
def __init__(self, name="BaseDecoder"):
self.name = name
print(name)
def decode(self, **kwargs):
"""
Using for greedy decoding for a given input, may corresponding with gold target,
return the log_probability of decoder steps.
"""
raise NotImplementedError
def score(self, **kwargs):
"""
Used for teacher-forcing training,
return the log_probability of <input,output>.
"""
raise NotImplementedError
|
192066
|
import random
import typing
from typing import List, Callable
from hearthstone.simulator.agent.actions import StandardAction, DiscoverChoiceAction, RearrangeCardsAction
from hearthstone.simulator.agent.agent import Agent
if typing.TYPE_CHECKING:
from hearthstone.simulator.core.cards import MonsterCard
from hearthstone.simulator.core.player import Player
class PriorityFunctionBot(Agent):
def __init__(self, authors: List[str], priority: Callable[['Player', 'MonsterCard'], float], seed: int):
if not authors:
authors = ["JB", "AS", "ES", "JS", "DVP"]
self.authors = authors
self.priority = priority
self.local_random = random.Random(seed)
async def discover_choice_action(self, player: 'Player') -> DiscoverChoiceAction:
discover_cards = player.discover_queue[0].items
discover_cards = sorted(discover_cards, key=lambda card: self.priority(player, card), reverse=True)
return DiscoverChoiceAction(player.discover_queue[0].items.index(discover_cards[0]))
async def rearrange_cards(self, player: 'Player') -> RearrangeCardsAction:
permutation = list(range(len(player.in_play)))
self.local_random.shuffle(permutation)
return RearrangeCardsAction(permutation)
async def buy_phase_action(self, player: 'Player') -> StandardAction:
pass
|
192078
|
from __future__ import unicode_literals
from binascii import Error as BinaryError
from base64 import b16encode, b16decode
from django.apps import apps
from django.core.exceptions import FieldDoesNotExist
from django.http import Http404
from django.shortcuts import get_object_or_404
from .exceptions import B16DecodingFail
def spammables():
# Lists all models that are marked flaggable
flaggables = []
for model in apps.get_models():
try:
model._meta.get_field_by_name('spam_flag')
except FieldDoesNotExist:
continue
flaggables.append(model)
return flaggables
def is_spammable(app, model):
model_class = apps.get_model("{}.{}".format(app, model))
return model_class in spammables()
def get_app_name(model_class_or_instance):
return model_class_or_instance._meta.app_config.name.split('.')[-1]
def b16_slug_to_arguments(b16_slug):
"""
Raises B16DecodingFail exception on
"""
try:
url = b16decode(b16_slug.decode('utf-8'))
except BinaryError:
raise B16DecodingFail
except TypeError:
raise B16DecodingFail('Non-base16 digit found')
except AttributeError:
raise B16DecodingFail("b16_slug must have a 'decode' method.")
try:
app, model, pk = url.decode('utf-8').split('/')[0:3]
except UnicodeDecodeError:
raise B16DecodingFail("Invalid b16_slug passed")
return app, model, pk
def get_spammable_or_404(app, model, pk):
# Does this view have the is_spammable mixin?
if is_spammable(app, model):
# convert app/model into the actual model class
model_class = apps.get_model(app, model)
# So we can call meta for details in the template
model_class.meta = model_class._meta
instance = get_object_or_404(model_class, pk=pk)
return model_class, instance
raise Http404
|
192118
|
import pservlet
def init(args):
return (pservlet.pipe_define("in", pservlet.PIPE_INPUT),
pservlet.pipe_define("out", pservlet.PIPE_OUTPUT))
def execute(s):
while True:
tmp = pservlet.pipe_read(s[0])
if not tmp:
if not pservlet.pipe_eof(s[0]):
pservlet.pipe_set_flag(s[0], pservlet.PIPE_PERSIST)
else:
pservlet.pipe_clr_flag(s[0], pservlet.PIPE_PERSIST)
return 0
else:
pservlet.pipe_write(s[1], tmp)
def cleanup(s):
return 0
|
192128
|
import torch.nn as nn
import torch
import wandb
class FlowLoss(nn.Module):
def __init__(self,):
super().__init__()
def forward(self, sample, logdet, logger, mode='eval'):
nll_loss = torch.mean(nll(sample))
assert len(logdet.shape) == 1
nlogdet_loss = -torch.mean(logdet)
loss = nll_loss + nlogdet_loss
reference_nll_loss = torch.mean(nll(torch.randn_like(sample)))
loss_dic = {
f"Loss": loss.item(),
f"reference_nll_loss": reference_nll_loss.item(),
f"nlogdet_loss": nlogdet_loss.item(),
f"nll_loss": nll_loss.item(),
}
logger.append(loss_dic)
## Add description to keys to be identified either as train or eval
loss_dic = {mode + '_' + key:val for key, val in loss_dic.items()}
wandb.log(loss_dic)
return loss
def nll(sample):
return 0.5 * torch.sum(torch.pow(sample, 2), dim=[1, 2, 3])
|
192203
|
import getpass
import os
import tempfile
import builtins
import yaml
LOCAL_DEFAULT_USERNAME = "paradrop"
LOCAL_DEFAULT_PASSWORD = ""
def format_result(data):
"""
Format a result from an API call for printing.
"""
if data is None or data == []:
return ""
return yaml.safe_dump(data, default_flow_style=False)
def open_text_editor(data):
if data is None:
data = ""
fd, path = tempfile.mkstemp()
os.close(fd)
with open(path, 'w') as output:
output.write(data)
# Get modified time before calling editor.
orig_mtime = os.path.getmtime(path)
editor = os.environ.get("EDITOR", "vim")
os.spawnvpe(os.P_WAIT, editor, [editor, path], os.environ)
with open(path, 'r') as source:
data = source.read()
# Check if the file has been modified, and if it has, send the update.
new_mtime = os.path.getmtime(path)
if new_mtime == orig_mtime:
data = None
os.remove(path)
return data
def open_yaml_editor(data, description):
if data is None:
data = {}
fd, path = tempfile.mkstemp()
os.close(fd)
with open(path, 'w') as output:
if len(data) > 0:
output.write(yaml.safe_dump(data, default_flow_style=False))
output.write("\n")
output.write("# You are editing the configuration for the {}.\n".format(description))
output.write("# Blank lines and lines starting with '#' will be ignored.\n")
output.write("# Save and exit to apply changes; exit without saving to discard.\n")
# Get modified time before calling editor.
orig_mtime = os.path.getmtime(path)
editor = os.environ.get("EDITOR", "vim")
os.spawnvpe(os.P_WAIT, editor, [editor, path], os.environ)
with open(path, 'r') as source:
data = source.read()
new_data = yaml.safe_load(data)
# If result is null, convert to an empty dict before sending to router.
if new_data is None:
new_data = {}
# Check if the file has been modified.
new_mtime = os.path.getmtime(path)
changed = (new_mtime != orig_mtime)
os.remove(path)
return new_data, changed
def update_object(obj, path, callback=None):
"""
Traverse a data structure ensuring all nodes exist.
obj: expected to be a dictionary
path: string with dot-separated path components
callback: optional callback function (described below)
When update_object reaches the parent of the leaf node, it calls the
optional callback function. The arguments to the callback function are:
- parent: dictionary containing the leaf node
- key: string key for the leaf node in parent
- created: boolean flag indicating whether any part of the path, including
the leaf node needed to be created.
If the callback function is None, update_object will still ensure that all
components along the path exist. If the leaf needs to be created, it will
be created as an empty dictionary.
Example:
update_object({}, 'foo.bar') -> {'foo': {'bar': {}}}
Return value: Returns either the return value of callback, or if callback
is None, returns the value of the leaf node.
"""
parts = path.split(".")
current = obj
parent = obj
created = False
for part in parts:
if len(part) == 0:
raise Exception("Path ({}) is invalid".format(path))
if not isinstance(current, dict):
raise Exception("Cannot set {}, not a dictionary".format(path))
# Create dictionaries along the way if path nodes do not exist,
# but make note of the fact that the previous value did not exist.
if part not in current:
current[part] = {}
created = True
parent = current
current = parent[part]
if callback is not None:
return callback(parent, parts[-1], created)
else:
return current
class LoginGatherer(object):
"""
LoginGatherer is an iterator that produces username/password tuples.
On the first iteration, it returns a default username/password combination
for convenience. On subsequent iterations, it prompts the user for input.
The class method prompt() can be used directly in a loop for situations
where the default is not desired.
Usage examples:
for username, password in LoginGatherer(netloc):
...
while True:
username, password = LoginGatherer.prompt(netloc)
"""
def __init__(self, netloc):
self.first = True
self.netloc = netloc
def __iter__(self):
self.first = True
return self
def __next__(self):
if self.first:
self.first = False
return (LOCAL_DEFAULT_USERNAME, LOCAL_DEFAULT_PASSWORD)
else:
return LoginGatherer.prompt(self.netloc)
def next(self):
"""
Get the next username and password pair.
"""
return self.__next__()
@classmethod
def prompt(cls, netloc):
"""
Prompt the user to enter a username and password.
The netloc argument is presented in the prompt, so that the user knows
the relevant authentication domain.
"""
print("Please enter your username and password for {}."
.format(netloc))
username = builtins.input("Username: ")
password = getpass.getpass("Password: ")
return (username, password)
|
192224
|
import abc
from typing import Callable, TypeVar, Generic, Union, cast, Any
from amino.logging import Logging
from amino import LazyList, Boolean, __, _, Either, Right, Maybe, Left, L, Map, curried
from amino.boolean import false, true
from amino.tc.base import Implicits
from amino.tc.flat_map import FlatMap
from amino.func import call_by_name
from amino.lazy_list import LazyLists
def indent(strings: LazyList[str]) -> LazyList[str]:
return strings.map(' ' + _)
Data = TypeVar('Data')
Data1 = TypeVar('Data1')
Sub = TypeVar('Sub')
Sub1 = TypeVar('Sub1')
A = TypeVar('A')
B = TypeVar('B')
Z = TypeVar('Z')
Key = Union[str, int]
class Node(Generic[Data, Sub], Logging, abc.ABC, Implicits, implicits=True, auto=True):
@abc.abstractproperty
def sub(self) -> Sub:
...
@abc.abstractproperty
def sub_l(self) -> LazyList['Node[Data, Any]']:
...
@abc.abstractmethod
def _strings(self) -> LazyList[str]:
...
@property
def strings(self) -> LazyList[str]:
return self._strings()
def _show(self) -> str:
return self._strings().mk_string('\n')
@property
def show(self) -> str:
return self._show()
@abc.abstractmethod
def foreach(self, f: Callable[['Node'], None]) -> None:
...
@abc.abstractmethod
def filter(self, pred: Callable[['Node'], bool]) -> 'Node':
...
def filter_not(self, pred: Callable[['Node'], bool]) -> 'Node':
return self.filter(lambda a: not pred(a))
@abc.abstractproperty
def flatten(self) -> 'LazyList[Any]':
...
@abc.abstractmethod
def contains(self, target: 'Node') -> Boolean:
...
@abc.abstractmethod
def lift(self, key: Key) -> 'SubTree':
...
def __getitem__(self, key: Key) -> 'SubTree':
return self.lift(key)
@abc.abstractproperty
def s(self) -> 'SubTree':
...
@abc.abstractproperty
def empty(self) -> Boolean:
...
@curried
def fold_left(self, z: Z, f: Callable[[Z, 'Node'], Z]) -> Z:
z1 = f(z, self)
return self.sub_l.fold_left(z1)(lambda z2, a: a.fold_left(z2)(f))
@abc.abstractmethod
def replace(self, data: LazyList['Node[Data1, Sub1]']) -> 'Node[Data1, Sub1]':
...
@abc.abstractmethod
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
...
class Inode(Generic[Data, Sub], Node[Data, Sub]):
@abc.abstractproperty
def sub(self) -> LazyList[Any]:
...
def foreach(self, f: Callable[[Node], None]) -> None:
f(self)
self.sub_l.foreach(__.foreach(f))
@property
def flatten(self) -> LazyList[Any]:
return self.sub_l.flat_map(_.flatten).cons(self)
def contains(self, target: Node) -> Boolean:
return self.sub_l.contains(target)
@property
def empty(self) -> Boolean:
return self.data.empty
class ListNode(Generic[Data], Inode[Data, LazyList[Node[Data, Any]]]):
def __init__(self, sub: LazyList[Node[Data, Any]]) -> None:
self.data = sub
@property
def sub(self) -> LazyList[Node[Data, Any]]:
return self.data
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return self.sub
@property
def _desc(self) -> str:
return '[]'
def _strings(self) -> LazyList[str]:
return indent(self.sub // (lambda a: a._strings())).cons(self._desc)
@property
def head(self) -> 'SubTree':
return self.lift(0)
@property
def last(self) -> 'SubTree':
return self.lift(-1)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.sub.map(str).mk_string(','))
def __repr__(self) -> str:
return str(self)
def lift(self, key: Key) -> 'SubTree':
return (
SubTreeInvalid(key, 'ListNode index must be int')
if isinstance(key, str) else
self.sub.lift(key) / L(SubTree.cons)(_, key) | (lambda: SubTreeInvalid(key, 'ListNode index oob'))
)
def replace(self, sub: LazyList[Any]) -> Node:
return ListNode(sub)
def filter(self, pred: Callable[[Node], bool]) -> Node:
def filt(n: Node) -> bool:
return (
pred(n)
if isinstance(n, LeafNode) else
not n.empty
)
return self.replace(self.sub.map(__.filter(pred)).filter(filt))
@property
def s(self) -> 'SubTree':
return SubTreeList(self, 'root')
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(ListNode(self.sub.map(lambda a: a.map_nodes(f))))
class MapNode(Generic[Data], Inode[Data, Map[str, Node[Data, Any]]]):
def __init__(self, data: Map[str, Node[Data, Any]]) -> None:
self.data = data
@property
def sub(self) -> Map[str, Node[Data, Any]]:
return self.data
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return LazyList(self.data.v)
@property
def _desc(self) -> str:
return '{}'
def _strings(self) -> LazyList[str]:
return indent(self.sub_l // (lambda a: a._strings())).cons(self._desc)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.sub_l)
def __repr__(self) -> str:
return str(self)
# TODO allow int indexes into sub_l
def lift(self, key: Key) -> 'SubTree':
def err() -> 'SubTree':
keys = ', '.join(self.data.keys())
return SubTreeInvalid(key, f'MapNode({self.rule}) invalid key ({keys})')
return (
self.data.lift(key) /
L(SubTree.cons)(_, key) |
err
)
def replace(self, sub: Map[str, Node]) -> Node:
return MapNode(sub)
def filter(self, pred: Callable[[Node], bool]) -> Node:
def filt(n: Node) -> bool:
return (
pred(n)
if isinstance(n, LeafNode) else
not n.empty
)
return self.replace(self.data.valmap(__.filter(pred)).valfilter(filt))
@property
def s(self) -> 'SubTree':
return SubTreeMap(self, 'root')
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(MapNode(self.sub.valmap(lambda a: a.map_nodes(f))))
class LeafNode(Generic[Data], Node[Data, None]):
def __init__(self, data: Data) -> None:
self.data = data
def _strings(self) -> LazyList[Data]:
return LazyLists.cons(self.data)
@property
def sub(self) -> None:
pass
@property
def sub_l(self) -> LazyList[Node[Data, Any]]:
return LazyList([])
def foreach(self, f: Callable[[Node], None]) -> None:
f(self)
def filter(self, pred: Callable[[Node], bool]) -> Node:
return self
@property
def flatten(self) -> LazyList[Any]:
return LazyLists.cons(self)
def contains(self, target: Node) -> Boolean:
return false
def lift(self, key: Key) -> 'SubTree':
return SubTreeInvalid(key, 'LeafNode cannot be indexed')
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data)
def __repr__(self) -> str:
return str(self)
@property
def empty(self) -> Boolean:
return false
@property
def s(self) -> 'SubTree':
return SubTreeLeaf(self, 'root')
def replace(self, sub: Data) -> Node:
return LeafNode(sub)
def map_nodes(self, f: Callable[['Node[Data, Sub]'], 'Node[Data, Sub]']) -> 'Node[Data, Sub]':
return f(self)
class TreeFlatMap(FlatMap, tpe=Node):
def flat_map(self, fa: Node[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return (
self.flat_map_inode(fa, f)
if isinstance(fa, Inode) else
self.flat_map_leaf(fa, f)
)
def flat_map_inode(self, fa: Inode[A, Any], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
def err() -> Inode[A, Any]:
raise Exception(f'invalid sub for `TreeFlatMap.flat_map_inode`: {fa}')
return (
self.flat_map_map(fa, f)
if isinstance(fa, MapNode) else
self.flat_map_list(fa, f)
if isinstance(fa, ListNode) else
err()
)
def flat_map_map(self, fa: MapNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return MapNode(fa.sub.valmap(lambda a: self.flat_map(a, f)))
def flat_map_list(self, fa: ListNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return ListNode(fa.sub.map(lambda a: self.flat_map(a, f)))
def flat_map_leaf(self, fa: LeafNode[A], f: Callable[[A], Node[B, Any]]) -> Node[B, Any]:
return f(fa.data)
def map(self, fa: Node[A, Any], f: Callable[[A], B]) -> Node[B, Any]:
return (
self.map_inode(fa, f)
if isinstance(fa, Inode) else
self.map_leaf(fa, f)
)
def map_inode(self, fa: Inode[A, Any], f: Callable[[A], B]) -> Node[B, Any]:
def err() -> Inode[A, Any]:
raise Exception(f'invalid sub for `TreeFlatMap.map_inode`: {fa}')
return (
self.map_map(fa, f)
if isinstance(fa, MapNode) else
self.map_list(fa, f)
if isinstance(fa, ListNode) else
err()
)
def map_map(self, fa: MapNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return MapNode(fa.data.valmap(lambda a: self.map(a, f)))
def map_list(self, fa: ListNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return ListNode(fa.sub.map(lambda a: self.map(a, f)))
def map_leaf(self, fa: LeafNode[A], f: Callable[[A], B]) -> Node[B, Any]:
return LeafNode(f(fa.data))
class SubTree(Implicits, implicits=True, auto=True):
@staticmethod
def cons(fa: Node, key: Key) -> 'SubTree':
return ( # type: ignore
cast(SubTree, SubTreeList(fa, key))
if isinstance(fa, ListNode) else
SubTreeLeaf(fa, key)
if isinstance(fa, LeafNode) else
SubTreeMap(fa, key)
)
@staticmethod
def from_maybe(data: Maybe[Node], key: Key, err: str) -> 'SubTree':
return data.cata(SubTree.cons, SubTreeInvalid(key, err))
def __getattr__(self, key: Key) -> 'SubTree':
try:
return super().__getattr__(key)
except AttributeError:
return self._getattr(key)
@abc.abstractmethod
def _getattr(self, key: Key) -> 'SubTree':
...
def __getitem__(self, key: Key) -> 'SubTree':
return self._getitem(key)
@abc.abstractmethod
def _getitem(self, key: Key) -> 'SubTree':
...
def cata(self, f: Callable[[Node], A], b: Union[A, Callable[[], A]]) -> A:
return (
f(self.data)
if isinstance(self, SubTreeValid)
else call_by_name(b)
)
@abc.abstractproperty
def e(self) -> Either[str, Node]:
...
@abc.abstractproperty
def valid(self) -> Boolean:
...
@abc.abstractproperty
def strings(self) -> LazyList[str]:
...
@abc.abstractproperty
def show(self) -> LazyList[str]:
...
@property
def rule(self) -> Either[str, str]:
return self.e.map(_.rule)
class SubTreeValid(SubTree):
def __init__(self, data: Node, key: Key) -> None:
self.data = data
self._key = key
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data)
@property
def e(self) -> Either[str, Node]:
return Right(self.data)
@property
def valid(self) -> Boolean:
return true
@property
def strings(self) -> LazyList[str]:
return self.data.strings
@property
def show(self) -> str:
return self.data.show
class SubTreeList(SubTreeValid):
@property
def head(self) -> SubTree:
return self[0]
@property
def last(self) -> SubTree:
return self[-1]
def _getattr(self, key: Key) -> SubTree:
return SubTreeInvalid(key, 'cannot access attrs in SubTreeList')
def _getitem(self, key: Key) -> SubTree:
return self.data.lift(key)
def __str__(self) -> str:
return '{}({})'.format(self.__class__.__name__, self.data.sub_l.drain.join_comma)
@property
def _keys(self) -> LazyList[str]:
return self.data.k
class SubTreeLeaf(SubTreeValid):
def err(self, key: Key) -> SubTree:
return SubTreeInvalid(key, 'cannot access attrs in SubTreeLeaf')
def _getattr(self, key: Key) -> SubTree:
return self.err(key)
def _getitem(self, key: Key) -> SubTree:
return self.err(key)
class SubTreeMap(SubTreeValid):
def _getattr(self, key: Key) -> SubTree:
return self.data.lift(key)
def _getitem(self, key: Key) -> SubTree:
return self.data.lift(key)
@property
def _keys(self) -> LazyList[str]:
return self.data.k
class SubTreeInvalid(SubTree):
def __init__(self, key: Key, reason: str) -> None:
self.key = key
self.reason = reason
def __str__(self) -> str:
s = 'SubTreeInvalid({}, {})'
return s.format(self.key, self.reason)
def __repr__(self) -> str:
return str(self)
@property
def valid(self) -> Boolean:
return false
@property
def _error(self) -> str:
return 'no subtree `{}`: {}'.format(self.key, self.reason)
def _getattr(self, key: Key) -> SubTree:
return self
def _getitem(self, key: Key) -> SubTree:
return self
@property
def e(self) -> Either[str, Node]:
return Left(self._error)
@property
def strings(self) -> LazyList[str]:
return LazyList([])
@property
def show(self) -> LazyList[str]:
return str(self)
__all__ = ('Node', 'Inode', 'LeafNode', 'MapNode', 'LeafNode', 'ListNode')
|
192252
|
import joblib
import numpy as np
from flask import Flask, app
from flask import jsonify # herramienta para trabajar cno arch json
app = Flask(__name__)
#<NAME>
@app.route('/predict', methods=['GET'])
def predict():
"""Funcion que se expondra en la direccion 8080/predict y que muestra la prediccion hecha
por nuestro modelo que exportamos al archivo best_model.pkl"""
X_test = np.array([7.594444821,7.479555538,1.616463184,1.53352356,0.796666503,0.635422587,0.362012237,0.315963835,2.277026653])
prediction = model.predict(X_test.reshape(1, -1))
return jsonify({'prediccion': list(prediction)})
if __name__ == "__main__":
model = joblib.load('./project/models/best_model.pkl')
app.run(port=8080)
|
192260
|
import torch
import torch.nn
import torch.nn.functional
import torch.optim
import torch.utils.data
import numpy
import matplotlib
import matplotlib.pyplot
import time
import torchvision
from torchvision import *
import matplotlib.colors
import socket
device = torch.device("cuda:0")
class DrNet(torch.nn.Module):
def __init__(self):
super(DrNet, self).__init__()
self.model = torch.nn.Sequential(
torch.nn.Linear(784, 256),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(256, 64),
torch.nn.ReLU(inplace=True),
torch.nn.Linear(64, 10),
)
def forward(self, x):
x = self.model(x)
return x
net = DrNet()
net = net.to(device)
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('mnist_data',
train=True,
download=True,
transform=transforms.Compose([transforms.ToTensor()])))
train_images = torch.zeros(60000, 784)
train_number = torch.zeros(60000, dtype=torch.int64)
train_images = train_images.to(device)
train_number = train_number.to(device)
for index, data in enumerate(train_loader):
image = data[0].squeeze()
number = data[1]
if index < 3:
matplotlib.pyplot.figure()
matplotlib.pyplot.imshow(image, cmap='gist_gray')
matplotlib.pyplot.show()
train_images[index] = image.view(-1)
train_number[index] = number
print(train_images.shape)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('mnist_data',
train=False,
download=True,
transform=transforms.Compose([transforms.ToTensor()])))
test_images = torch.zeros(10000, 784)
test_number = torch.zeros(10000, dtype=torch.int64)
test_images = test_images.to(device)
test_number = test_number.to(device)
for index, data in enumerate(test_loader):
image = data[0].squeeze()
number = data[1]
if index < 3:
matplotlib.pyplot.figure()
matplotlib.pyplot.imshow(image, cmap='gist_gray')
matplotlib.pyplot.show()
test_images[index] = image.view(-1)
test_number[index] = number
print(test_images.shape)
loss_array = []
def compute(source):
result = net(source)
return result
loop = 5
def train():
global net
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
for i in range(0, loop):
r = compute(train_images)
loss = torch.nn.functional.cross_entropy(r, train_number)
loss_array.append(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
print(f"loss={loss} time={i}")
t = time.time()
train()
print(time.time() - t)
matplotlib.pyplot.figure()
matplotlib.pyplot.plot(range(0, len(loss_array)), loss_array)
matplotlib.pyplot.show()
test_result = compute(test_images)
test_index = test_result.argmax(1)
test_eq = test_index.eq(test_number)
test_total = test_eq.sum()
print('accuracy:')
print(test_total * 100.0/10000)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(('localhost', 6666))
while True:
data, address = s.recvfrom(8192)
temp = torch.zeros(1, 784)
for i in range(0, 784):
temp[0, i] = data[i] / 255.0
temp_image = temp.view(28, 28)
temp = temp.to(device)
temp_result = compute(temp)
temp_index = temp_result.argmax(1)
s.sendto(bytes([temp_index]), address) #upper() 小写变大写
matplotlib.pyplot.figure()
matplotlib.pyplot.imshow(temp_image, cmap='gist_gray')
matplotlib.pyplot.show()
|
192263
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def longestZigZag(self, root: TreeNode) -> int:
self.result = 0
self.dfs(root)
return self.result
def dfs(self, root):
if root == None: return 0, 0
if root.left == None and root.right == None: return 0, 0
rootL, rootR = 0, 0
if root.left == None:
rootL = 0
else:
l, r = self.dfs(root.left)
rootL = r + 1
if root.right == None:
rootR = 0
else:
l, r = self.dfs(root.right)
rootR = l + 1
self.result = max(self.result, rootL, rootR)
return rootL, rootR
node1 = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
node6 = TreeNode(6)
node7 = TreeNode(7)
node8 = TreeNode(8)
node1.right = node2
node2.left = node3
node2.right = node4
node4.left = node5
node4.right = node6
node5.right = node7
node7.right = node8
s = Solution()
result = s.longestZigZag(node1)
print(result)
|
192303
|
from http import HTTPStatus
import pytest
import requests
from rotkehlchen.constants.assets import A_ETH, A_EUR, A_KRW, A_USD
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.api import (
api_url_for,
assert_error_response,
assert_proper_response,
assert_proper_response_with_result,
)
@pytest.mark.parametrize('start_with_logged_in_user', [False])
def test_querying_exchange_rates(rotkehlchen_api_server):
"""Make sure that querying exchange rates works also without logging in"""
# Test with empty list of currencies
data = {'currencies': []}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_error_response(
response=response,
contained_in_msg='Empty list of currencies provided',
status_code=HTTPStatus.BAD_REQUEST,
)
def assert_okay(response):
"""Helper function for DRY checking below assertions"""
assert_proper_response(response)
json_data = response.json()
assert json_data['message'] == ''
result = json_data['result']
assert len(result) == 4
assert FVal(result['EUR']) > 0
assert FVal(result['USD']) > 0
assert FVal(result['KRW']) > 0
assert FVal(result['ETH']) > 0
# Test with some currencies, both JSON body and query parameters
data = {'currencies': ['EUR', 'USD', 'KRW', 'ETH']}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_okay(response)
# This serves as a test that a list of parameters works with query args too
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource') + '?currencies=' +
','.join(data['currencies']),
)
result = assert_proper_response_with_result(response)
expected_currencies = [A_EUR, A_USD, A_KRW, A_ETH]
assert len(result) == len(expected_currencies)
for currency in expected_currencies:
assert FVal(result[currency.identifier]) > 0
@pytest.mark.parametrize('start_with_logged_in_user', [False])
def test_querying_exchange_rates_errors(rotkehlchen_api_server):
"""Make sure that querying exchange rates with wrong input is handled"""
# Test with invalid type for currency
data = {'currencies': [4234324.21], 'async_query': False}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_error_response(
response=response,
contained_in_msg='Tried to initialize an asset out of a non-string identifier',
status_code=HTTPStatus.BAD_REQUEST,
)
# Test with unknown assets
data = {'currencies': ['DDSAS', 'EUR'], 'async_query': False}
response = requests.get(
api_url_for(rotkehlchen_api_server, 'exchangeratesresource'), json=data,
)
assert_proper_response_with_result(response)
|
192332
|
import re
import itertools
from .dict_changed import DictChanged
class UnicodeMap(DictChanged):
'''Represents PDF "ToUnicode" optional data structure that controls character encoding'''
NotParsed = object() #: for internal use
def __init__(self, text):
self._text = text
super().__init__(_parse(text))
def format(self):
'''Generates string representing the PDF stream, ready to be inserted into the PDF file'''
if not self.changed:
return self._text
return _format(self, self._text)
_RE_CODE = r'<([\dA-Fa-f]{2,4})\s*>'
_RE_TEXT = r'<([\dA-Fa-f ]+)\s*>'
def _parse(text):
umap = {}
for mtc in re.finditer(br'\s*\d+\sbeginbfchar(.*?)endbfchar\s*', text, flags=re.DOTALL):
for key, val in _parse_bfchar(mtc.group(1).decode()):
umap[key] = val
for mtc in re.finditer(br'\s*\d+\sbeginbfrange(.*?)endbfrange\s*', text, flags=re.DOTALL):
for key, val in _parse_bfrange(mtc.group(1).decode()):
umap[key] = val
return umap
def _parse_bfchar(text):
text = text.strip()
if text == '':
return
off = 0
for mtc in re.finditer(_RE_CODE + r'\s*' + _RE_TEXT + r'\s*', text):
assert mtc.start() == off, text
off = mtc.end()
key = int(mtc.group(1), 16)
val = ''.join(_parse_text(mtc.group(2)))
yield key, val
assert off == len(text), text
def _parse_text(text):
text = text.replace(' ', '')
if len(text) == 2:
return chr(int(text, 16))
tx = []
for off in range(0, len(text), 4):
x = text[off:off+4]
assert len(x) == 4, text
tx.append(chr(int(x, 16)))
assert len(tx) >= 1, text
return tx
def _parse_bfrange(text):
text = text.strip()
if text == '':
return
for line in text.split('\n'):
line = line.strip()
mtc = re.match(_RE_CODE + r'\s*' + _RE_CODE + r'\s*' + _RE_TEXT + r'$', line)
if mtc is not None:
yield from _parse_implicit_range(mtc.group(1), mtc.group(2), mtc.group(3))
else:
mtc = re.match(_RE_CODE + r'\s*' + _RE_CODE + r'\s*\[(.*)\]$', line)
assert mtc, text
yield from _parse_explicit_range(mtc.group(1), mtc.group(2), mtc.group(3))
def _parse_implicit_range(start_code, end_code, start_text):
start_code = int(start_code, 16)
end_code = int(end_code, 16) + 1
start_text = _parse_text(start_text)
assert start_code < end_code
prefix = ''.join(start_text[:-1])
last_text_ord = ord(start_text[-1])
for i,code in enumerate(range(start_code, end_code)):
yield code, prefix + chr(i + last_text_ord)
def _parse_explicit_range(start_code, end_code, text_list):
start_code = int(start_code, 16)
end_code = int(end_code, 16) + 1
off = 0
texts = []
for mtc in re.finditer(_RE_TEXT + r'\s*', text_list):
assert mtc.start() == off, text_list
off = mtc.end()
val = ''.join(_parse_text(mtc.group(1)))
texts.append(val)
assert off == len(text_list), text_list
assert start_code < end_code
assert len(texts) == end_code - start_code
yield from zip(range(start_code, end_code), texts)
def _format(mapping, text):
bfchars, bfranges = _format_mapping(mapping)
preamble, postamble = _clear_bfchars_bfranges(text)
text = []
if preamble:
text.append(preamble)
for bfchar in bfchars:
if bfchar:
text.append(f'{len(bfchar)} beginbfchar'.encode())
for x, y in bfchar:
text.append(f'<{_format_hex_code(x)}> <{_format_hex_text(y)}>'.encode())
text.append(b'endbfchar')
for bfrange in bfranges:
if bfrange:
text.append(f'{len(bfrange)} beginbfrange'.encode())
for x, y, z in bfrange:
text.append(f'<{_format_hex_code(x)}> <{_format_hex_code(y)}> <{_format_hex_text(z)}>'.encode())
text.append(b'endbfrange')
if postamble:
text.append(postamble)
return b'\n'.join(text)
def _format_mapping(mapping):
code_groups = []
# PDF spec state that bfchar can not be longer than 100 items, and can not span codes that have different high-order byte
for _, codes in itertools.groupby(sorted(mapping.keys()), lambda code: code // 256):
codes = list(codes)
for off in range(0, len(codes), 100):
code_groups.append(codes[off:off+100])
bfchars = []
bfgroups = []
for g in code_groups:
bfchar, bfrange = _try_compress([ (x, mapping[x]) for x in g ])
bfchars.append(bfchar)
bfgroups.append(bfrange)
# FIXME: generate groups too!
return bfchars, bfgroups
def _clear_bfchars_bfranges(text):
mtc = re.search(br'\s*\d+\s+beginbfchar', text)
if mtc is None:
mtc = re.search(br'\s*\d+\s+beginbfrange', text)
assert mtc, text
preamble = text[:mtc.start()]
rest = text[mtc.start():]
rest = re.sub(br'\s*\d+\s+beginbfchar.*?endbfchar\s*', b'', rest, flags=re.DOTALL)
assert not re.search(br'beginbfchar', rest), rest
rest = re.sub(br'\s*\d+\s+beginbfrange.*?endbfrange\s*', b'', rest, flags=re.DOTALL)
assert not re.search(br'beginbfrange', rest), rest
return preamble, rest
def _format_hex_code(code):
text = hex(code)[2:].upper()
assert len(text) <= 4, (code, hex(code).upper())
return '0' * (4-len(text)) + text # leading zeroes, if needed
def _format_hex_text(text):
if text == '':
text = ' '
out = []
for c in text:
out.append(_format_hex_code(ord(c)))
return ''.join(out)
def _sequential_blocks(chars):
old_code, old_text = chars[0]
block = [ (old_code, old_text) ]
for code, text in chars[1:]:
if old_code+1 != code or not _is_sequential_text(old_text, text):
yield block[:]
block.clear()
block.append((code, text))
old_code, old_text = code, text
yield block
def _try_compress(chars):
bfchars = []
bfranges = []
for block in _sequential_blocks(chars):
if len(block) <= 2:
bfchars.extend(block)
else:
code, text = block[0]
bfranges.append((code, code+len(block)-1, text))
return bfchars, bfranges
def _is_sequential_text(text0, text1):
if len(text0) != len(text1):
return False
if text0[:-1] != text1[:-1]:
return False
return ord(text0[-1]) +1 == ord(text1[-1])
|
192348
|
from opnsense_cli.api.base import ApiBase
class Export(ApiBase):
MODULE = "haproxy"
CONTROLLER = "export"
"""
Haproxy ExportController
"""
@ApiBase._api_call
def config(self, *args):
self.method = "get"
self.command = "config"
@ApiBase._api_call
def diff(self, *args):
self.method = "get"
self.command = "diff"
@ApiBase._api_call
def download(self, *args):
self.method = "get"
self.command = "download"
class Service(ApiBase):
MODULE = "haproxy"
CONTROLLER = "service"
"""
Haproxy ServiceController
"""
@ApiBase._api_call
def configtest(self, *args):
self.method = "get"
self.command = "configtest"
@ApiBase._api_call
def reconfigure(self, *args):
self.method = "post"
self.command = "reconfigure"
class Settings(ApiBase):
MODULE = "haproxy"
CONTROLLER = "settings"
"""
Haproxy SettingsController
"""
@ApiBase._api_call
def addAcl(self, *args):
self.method = "post"
self.command = "addAcl"
@ApiBase._api_call
def addAction(self, *args):
self.method = "post"
self.command = "addAction"
@ApiBase._api_call
def addBackend(self, *args, json=None):
self.method = "post"
self.command = "addBackend"
@ApiBase._api_call
def addCpu(self, *args):
self.method = "post"
self.command = "addCpu"
@ApiBase._api_call
def addErrorfile(self, *args):
self.method = "post"
self.command = "addErrorfile"
@ApiBase._api_call
def addFrontend(self, *args, json=None):
self.method = "post"
self.command = "addFrontend"
@ApiBase._api_call
def addGroup(self, *args):
self.method = "post"
self.command = "addGroup"
@ApiBase._api_call
def addHealthcheck(self, *args):
self.method = "post"
self.command = "addHealthcheck"
@ApiBase._api_call
def addLua(self, *args):
self.method = "post"
self.command = "addLua"
@ApiBase._api_call
def addMapfile(self, *args):
self.method = "post"
self.command = "addMapfile"
@ApiBase._api_call
def addServer(self, *args, json=None):
self.method = "post"
self.command = "addServer"
@ApiBase._api_call
def addUser(self, *args):
self.method = "post"
self.command = "addUser"
@ApiBase._api_call
def addmailer(self, *args):
self.method = "post"
self.command = "addmailer"
@ApiBase._api_call
def addresolver(self, *args):
self.method = "post"
self.command = "addresolver"
@ApiBase._api_call
def delAcl(self, *args):
self.method = "post"
self.command = "delAcl"
@ApiBase._api_call
def delAction(self, *args):
self.method = "post"
self.command = "delAction"
@ApiBase._api_call
def delBackend(self, *args, json=None):
self.method = "post"
self.command = "delBackend"
@ApiBase._api_call
def delCpu(self, *args):
self.method = "post"
self.command = "delCpu"
@ApiBase._api_call
def delErrorfile(self, *args):
self.method = "post"
self.command = "delErrorfile"
@ApiBase._api_call
def delFrontend(self, *args, json=None):
self.method = "post"
self.command = "delFrontend"
@ApiBase._api_call
def delGroup(self, *args):
self.method = "post"
self.command = "delGroup"
@ApiBase._api_call
def delHealthcheck(self, *args):
self.method = "post"
self.command = "delHealthcheck"
@ApiBase._api_call
def delLua(self, *args):
self.method = "post"
self.command = "delLua"
@ApiBase._api_call
def delMapfile(self, *args):
self.method = "post"
self.command = "delMapfile"
@ApiBase._api_call
def delServer(self, *args):
self.method = "post"
self.command = "delServer"
@ApiBase._api_call
def delUser(self, *args):
self.method = "post"
self.command = "delUser"
@ApiBase._api_call
def delmailer(self, *args):
self.method = "post"
self.command = "delmailer"
@ApiBase._api_call
def delresolver(self, *args):
self.method = "post"
self.command = "delresolver"
@ApiBase._api_call
def get(self, *args):
self.method = "get"
self.command = "get"
@ApiBase._api_call
def setAcl(self, *args):
self.method = "post"
self.command = "setAcl"
@ApiBase._api_call
def setAction(self, *args):
self.method = "post"
self.command = "setAction"
@ApiBase._api_call
def setBackend(self, *args, json=None):
self.method = "post"
self.command = "setBackend"
@ApiBase._api_call
def setCpu(self, *args):
self.method = "post"
self.command = "setCpu"
@ApiBase._api_call
def setErrorfile(self, *args):
self.method = "post"
self.command = "setErrorfile"
@ApiBase._api_call
def setFrontend(self, *args, json=None):
self.method = "post"
self.command = "setFrontend"
@ApiBase._api_call
def setGroup(self, *args):
self.method = "post"
self.command = "setGroup"
@ApiBase._api_call
def setHealthcheck(self, *args):
self.method = "post"
self.command = "setHealthcheck"
@ApiBase._api_call
def setLua(self, *args):
self.method = "post"
self.command = "setLua"
@ApiBase._api_call
def setMapfile(self, *args):
self.method = "post"
self.command = "setMapfile"
@ApiBase._api_call
def setServer(self, *args, json=None):
self.method = "post"
self.command = "setServer"
@ApiBase._api_call
def setUser(self, *args):
self.method = "post"
self.command = "setUser"
@ApiBase._api_call
def setmailer(self, *args):
self.method = "post"
self.command = "setmailer"
@ApiBase._api_call
def setresolver(self, *args):
self.method = "post"
self.command = "setresolver"
|
192381
|
import filestack.models
from filestack.utils import requests
class AudioVisual:
def __init__(self, url, uuid, timestamp, apikey=None, security=None):
"""
AudioVisual instances provide a bridge between transform and filelinks, and allow
you to check the status of a conversion and convert to a Filelink once completed
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='path/to/file/doom.mp4')
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
filelink = av_convert.to_filelink()
print(filelink.url)
```
"""
self.url = url
self.apikey = apikey
self.security = security
self.uuid = uuid
self.timestamp = timestamp
def to_filelink(self):
"""
Checks is the status of the conversion is complete and, if so, converts to a Filelink
*returns* [Filestack.Filelink]
```python
filelink = av_convert.to_filelink()
```
"""
if self.status != 'completed':
raise Exception('Audio/video conversion not complete!')
response = requests.get(self.url).json()
handle = response['data']['url'].split('/')[-1]
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
@property
def status(self):
"""
Returns the status of the AV conversion (makes a GET request)
*returns* [String]
```python
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
```
"""
return requests.get(self.url).json()['status']
|
192407
|
import pytest
@pytest.mark.parametrize(
"server_options,port",
[("--debug", 8090), ("--debug --mathjax", 8090), ("--debug", 9090)],
)
@pytest.mark.parametrize("method", ["curl", "stdin"])
def test_math(browser, Server, server_options, port, method):
with Server(server_options, port) as srv:
srv.send(method, "tests/test_math.md")
result = browser.get(srv.port)
assert '<span class="test-case"></span>' in result, "No text was rendered"
latex_envs = (r"\begin", r"\end", "$$")
if "--mathjax" in server_options:
assert all(env not in result for env in latex_envs), (
"LaTeX equations should have been rendered as MathJax, "
"but that did not happen"
)
else:
assert all(
env in result for env in latex_envs
), "LaTeX equations are not left as it is"
|
192410
|
from matplotlib import pyplot as plt
def init_histogram(axis, sub_dataframe):
centroids = sub_dataframe['bin_centroid']
bins = len(sub_dataframe)
weights = sub_dataframe['bin_count']
min_bin = sub_dataframe['bin_lower_bound'].min()
max_bin = sub_dataframe['bin_upper_bound'].max()
counts_, bins_, _ = axis.hist(centroids,
bins=bins,
weights=weights,
range=(min_bin, max_bin))
def get_measurement_concept_ids(df):
"""
Retrieve a unique set of measurement_concept_ids from the given df
:param df: dataframe
:return: a unique set of measurement_concept_ids
"""
return df['measurement_concept_id'].unique()
def get_unit_concept_ids(df, measurement_concept_id=None):
"""
Retrieve a unique set of unit concept ids for a given df
:param df: dataframe
:param measurement_concept_id: an option measurement_concept_id
:return: a unique set of unit_concept_ids
"""
if measurement_concept_id is None:
unit_concept_ids = df['unit_concept_id'].unique()
else:
unit_concept_ids = df.loc[df['measurement_concept_id'] ==
measurement_concept_id,
'unit_concept_id'].unique()
return unit_concept_ids
def get_sub_dataframe(df, measurement_concept_id, unit_concept_id):
"""
Retrieve subset of the dataframe given a measurement_concept_id and unit_concept_id
:param df: dataframe
:param measurement_concept_id: measurement_concept_id for which the subset is extracted
:param unit_concept_id: the unit_concept_id for which the subset is extracted
:return: a subset of the dataframe
"""
indexes = (df['measurement_concept_id'] == measurement_concept_id) \
& (df['unit_concept_id'] == unit_concept_id)
return df[indexes]
def get_measurement_concept_dict(df):
"""
Retrieve dictionary containing the measurement_concept_id and its corresponding measurement_concept_name
:param df: dataframe
:return: a ictionary containing the measurement_concept_id and its corresponding measurement_concept_name
"""
return dict(zip(df.measurement_concept_id, df.measurement_concept_name))
def get_unit_concept_id_dict(df):
"""
Retrieve dictionary containing the unit_concept_id and its corresponding unit_concept_name
:param df: dataframe
:return: a dictionary containing the unit_concept_id and its corresponding unit_concept_name
"""
return dict(zip(df.unit_concept_id, df.unit_concept_name))
def generate_plot(measurement_concept_id,
measurement_concept_dict,
value_dists_1,
value_dists_2,
unit_dict_1,
unit_dict_2,
sharex=False,
sharey=False):
"""
Generate n (the number of source units being transformed) by 2
grid to compare the value distributions of before and after unit transformation.
:param measurement_concept_id: the measurement_concept_id for which the distributions are displayed
:param measurement_concept_dict: the dictionary containing the measurement name
:param value_dists_1 dataframe containing the distribution for dataset 1
:param value_dists_2 dataframe containing the distribution for dataset 2
:param unit_dict_1 dictionary containing the unit names for dataset 1
:param unit_dict_2 dictionary containing the unit names for dataset 2
:param sharex a boolean indicating whether subplots share the x-axis
:param sharey a boolean indicating whether subplots share the y-axis
:return: a list of query dicts for rerouting the records to the corresponding destination table
"""
measurement_concept_id = str(measurement_concept_id)
units_before = get_unit_concept_ids(value_dists_1, measurement_concept_id)
units_after = get_unit_concept_ids(value_dists_2, measurement_concept_id)
# Automatically adjusting the height of the plot
plt.rcParams['figure.figsize'] = [18, 4 * len(units_before)]
for unit_after in units_after:
unit_after_name = unit_dict_2[unit_after]
# Generate the n * 2 grid to display the side by side distributions
fig, axs = plt.subplots(len(units_before),
2,
sharex=sharex,
sharey=sharey)
measurement_concept_name = measurement_concept_dict[
measurement_concept_id]
unit_concept_after = unit_dict_2[unit_after]
fig.suptitle(
'Measurement: {measurement}\n standard unit: {unit}'.format(
measurement=measurement_concept_name, unit=unit_concept_after))
counter = 0
sub_df_after = get_sub_dataframe(value_dists_2, measurement_concept_id,
unit_after)
for unit_before in units_before:
sub_df_before = get_sub_dataframe(value_dists_1,
measurement_concept_id,
unit_before)
unit_before_name = unit_dict_1[unit_before]
if len(units_before) == 1:
axs_before = axs[0]
axs_after = axs[1]
else:
axs_before = axs[counter][0]
axs_after = axs[counter][1]
init_histogram(axs_before, sub_df_before)
axs_before.set_title('before unit: {}'.format(unit_before_name))
init_histogram(axs_after, sub_df_after)
axs_after.set_title('after unit: {}'.format(unit_after_name))
counter += 1
def convert_to_sql_list(concept_ids):
return f'({",".join(map(str, concept_ids))})'
|
192429
|
from torch import Tensor
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
from typing import Callable
from typing import Optional
from functools import partial
from torchvision.datasets import MNIST
from ....data import Transforms
from ....types import tensor_dict_type
from ....types import sample_weights_type
from ....constants import INPUT_KEY
from ....constants import LABEL_KEY
from ....constants import DATA_CACHE_DIR
from ....constants import ORIGINAL_LABEL_KEY
from ....data import CVDataset
from ....data import CVLoader
from ....data import DataLoader
from ....data import DLDataModule
from ....data.interface import CVDataModule
def batch_callback(
label_callback: Optional[Callable[[Tuple[Tensor, Tensor]], Tensor]],
batch: Tuple[Tensor, Tensor],
) -> tensor_dict_type:
img, labels = batch
if label_callback is None:
actual_labels = labels.view(-1, 1)
else:
actual_labels = label_callback(batch)
return {
INPUT_KEY: img,
LABEL_KEY: actual_labels,
ORIGINAL_LABEL_KEY: labels,
}
@DLDataModule.register("mnist")
class MNISTData(CVDataModule):
def __init__(
self,
*,
root: str = DATA_CACHE_DIR,
shuffle: bool = True,
batch_size: int = 64,
num_workers: int = 0,
drop_train_last: bool = True,
transform: Optional[Union[str, List[str], Transforms, Callable]],
transform_config: Optional[Dict[str, Any]] = None,
label_callback: Optional[Callable[[Tuple[Tensor, Tensor]], Tensor]] = None,
):
self.root = root
self.shuffle = shuffle
self.batch_size = batch_size
self.num_workers = num_workers
self.drop_train_last = drop_train_last
self.transform = Transforms.convert(transform, transform_config)
self.test_transform = self.transform
self.label_callback = label_callback
@property
def info(self) -> Dict[str, Any]:
return dict(root=self.root, shuffle=self.shuffle, batch_size=self.batch_size)
# TODO : support sample weights
def prepare(self, sample_weights: sample_weights_type) -> None:
self.train_data = CVDataset(
MNIST(
self.root,
transform=self.transform,
download=True,
)
)
self.valid_data = CVDataset(
MNIST(
self.root,
train=False,
transform=self.transform,
download=True,
)
)
def initialize(self) -> Tuple[CVLoader, Optional[CVLoader]]:
train_loader = CVLoader(
DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
drop_last=self.drop_train_last,
),
partial(batch_callback, self.label_callback),
)
valid_loader = CVLoader(
DataLoader(
self.valid_data,
batch_size=self.batch_size,
shuffle=self.shuffle,
num_workers=self.num_workers,
),
partial(batch_callback, self.label_callback),
)
return train_loader, valid_loader
__all__ = [
"MNISTData",
]
|
192433
|
import voluptuous
from pg_discuss import ext
#: Minimum allowed comment length
MIN_COMMENT_LENGTH = 3
#: Maximum allowed comment length
MAX_COMMENT_LENGTH = 65535
class ValidateCommentLen(ext.ValidateComment):
"""Extension to add a validation rule that enforces a minimum and
maximum comment length.
"""
def __init__(self, app):
app.config.setdefault('MIN_COMMENT_LENGTH', MIN_COMMENT_LENGTH)
app.config.setdefault('MAX_COMMENT_LENGTH', MAX_COMMENT_LENGTH)
super(ext.ValidateComment, self).__init__(app)
def validate_comment(self, comment, action, **extras):
text = comment['text']
min_comment_length = self.app.config['MIN_COMMENT_LENGTH']
max_comment_length = self.app.config['MAX_COMMENT_LENGTH']
voluptuous.Length(min=min_comment_length, max=max_comment_length)(
text.rstrip())
return comment
|
192481
|
import random
from indy import ledger, did
import json
from perf_load.perf_req_gen import RequestGenerator
from perf_load.perf_utils import random_string
class RGPoolNewDemotedNode(RequestGenerator):
_req_types = ["0"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._steward_did = None
self._node_alias = None
self._node_did = None
def _rand_data(self):
ret = "0.{}.{}.{}".format(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
return ret
async def on_pool_create(self, pool_handle, wallet_handle, submitter_did, sign_req_f, send_req_f, *args, **kwargs):
self._node_alias = random_string(7)
self._node_did, node_ver = await did.create_and_store_my_did(wallet_handle,
json.dumps({'seed': random_string(32)}))
self._steward_did, verk = await did.create_and_store_my_did(wallet_handle,
json.dumps({'seed': random_string(32)}))
nym_req = await ledger.build_nym_request(submitter_did, self._steward_did, verk, None, "STEWARD")
await ledger.sign_and_submit_request(pool_handle, wallet_handle, submitter_did, nym_req)
async def _gen_req(self, submitter_did, req_data):
data = {'alias': self._node_alias, 'client_port': 50001, 'node_port': 50002, 'node_ip': req_data,
'client_ip': req_data, 'services': []}
return await ledger.build_node_request(self._steward_did, self._node_did, json.dumps(data))
def req_did(self):
return self._steward_did
|
192532
|
import numpy as np
import argparse
def patch_generator(img, patch_size, stride):
h, w, _ = img.shape
patch_t = []
for i in range(0, h-patch_size+1, stride):
for j in range(0, w-patch_size+1, stride):
patch_t.append(img[i: i + patch_size, j: j + patch_size, :])
return patch_t
def data_aug(img, mode=0): # img: W*H
if mode == 0:
return img
elif mode == 1:
return np.flipud(img)
elif mode == 2:
return np.rot90(img, axes=(0, 1))
elif mode == 3:
return np.flipud(np.rot90(img, axes=(0, 1)))
elif mode == 4:
return np.rot90(img, k=2, axes=(0, 1))
elif mode == 5:
return np.flipud(np.rot90(img, k=2, axes=(0, 1)))
elif mode == 6:
return np.rot90(img, k=3, axes=(0, 1))
elif mode == 7:
return np.flipud(np.rot90(img, k=3, axes=(0, 1)))
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y1', '1', 'TRUE'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0', 'FALSE'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
|
192540
|
from django.conf.urls import url
from drugs import views
urlpatterns = [
url(r'^drugbrowser', views.drugbrowser, name='drugbrowser'),
url(r'^drugstatistics', views.drugstatistics, name='drugstatistics'),
url(r'^drugmapping', views.drugmapping, name='drugmapping'),
url(r'^nhs/section/(?P<slug>[\w|\W]+)/$', views.nhs_section, name='nhs_section'),
url(r'^nhs/(?P<slug>[\w|\W]+)/$', views.nhs_drug, name='nhs_drug'),
]
|
192542
|
import urllib.request
from functools import wraps
def sanitize_field(field):
return urllib.request.quote(field.encode("UTF-8"), safe="")
def sanitize_string_args(function):
"""Helper decorator that ensures that all arguments passed are url-safe."""
@wraps(function)
def sanitized_function(*args, **kwargs):
sanitized_args = []
sanitized_kwargs = {}
for arg in args:
if isinstance(arg, str):
arg = sanitize_field(arg)
sanitized_args.append(arg)
for key, value in kwargs.items():
if isinstance(value, str):
value = sanitize_field(value)
sanitized_kwargs[key] = value
return function(*sanitized_args, **sanitized_kwargs)
return sanitized_function
|
192550
|
import os
from future import standard_library
with standard_library.hooks():
import configparser
def read_config(paths=()):
config = {}
fs_config = configparser.ConfigParser()
fs_config.read(paths)
config['cups_uri'] = fs_config.get('main', 'cups_uri')
config['ipptool_path'] = fs_config.get('main', 'ipptool_path')
try:
config['login'] = fs_config.get('main', 'login')
except configparser.NoOptionError:
pass
try:
config['password'] = fs_config.get('main', 'password')
except configparser.NoOptionError:
pass
try:
config['graceful_shutdown_time'] = fs_config.getint(
'main',
'graceful_shutdown_time')
except configparser.NoOptionError:
config['graceful_shutdown_time'] = 2
try:
config['timeout'] = fs_config.getint('main', 'timeout')
except configparser.NoOptionError:
config['timeout'] = 10
return config
class LazyConfig(dict):
def __init__(self, paths):
self.paths = paths
self.loaded = False
def __getitem__(self, key):
if not self.loaded:
self.update(read_config(self.paths))
self.loaded = True
return super(LazyConfig, self).__getitem__(key)
def get_config(paths=('/etc/opt/pyipptool/pyipptool.cfg',
os.path.join(os.path.expanduser('~'),
'.pyipptool.cfg'))):
return LazyConfig(paths)
|
192552
|
import torch
import torch.nn as nn
import torchvision
class Conv2dCReLU(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size,stride,padding):
super(Conv2dCReLU, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=kernel_size,stride=stride,padding=padding)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU6(inplace=True)
def forward(self, x):
x = self.bn(self.conv(x))
out = torch.cat([x, -x], dim=1)
return self.relu(out)
class InceptionModules(nn.Module):
def __init__(self):
super(InceptionModules, self).__init__()
self.branch1_conv1 = nn.Conv2d(in_channels=128,out_channels=32,kernel_size=1,stride=1)
self.branch1_conv1_bn = nn.BatchNorm2d(32)
self.branch2_pool = nn.MaxPool2d(kernel_size=3,stride=1,padding=1)
self.branch2_conv1 = nn.Conv2d(in_channels=128, out_channels=32, kernel_size=1, stride=1)
self.branch2_conv1_bn = nn.BatchNorm2d(32)
self.branch3_conv1 = nn.Conv2d(in_channels=128, out_channels=24, kernel_size=1, stride=1)
self.branch3_conv1_bn = nn.BatchNorm2d(24)
self.branch3_conv2 = nn.Conv2d(in_channels=24, out_channels=32, kernel_size=3, stride=1, padding=1)
self.branch3_conv2_bn = nn.BatchNorm2d(32)
self.branch4_conv1 = nn.Conv2d(in_channels=128, out_channels=24, kernel_size=1, stride=1)
self.branch4_conv1_bn = nn.BatchNorm2d(24)
self.branch4_conv2 = nn.Conv2d(in_channels=24, out_channels=32, kernel_size=3, stride=1, padding=1)
self.branch4_conv2_bn = nn.BatchNorm2d(32)
self.branch4_conv3 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1)
self.branch4_conv3_bn = nn.BatchNorm2d(32)
def forward(self, x):
x1 = self.branch1_conv1_bn(self.branch1_conv1(x))
x2 = self.branch2_conv1_bn(self.branch2_conv1(self.branch2_pool(x)))
x3 = self.branch3_conv2_bn(self.branch3_conv2(self.branch3_conv1_bn(self.branch3_conv1(x))))
x4 = self.branch4_conv3_bn(self.branch4_conv3(self.branch4_conv2_bn(self.branch4_conv2(self.branch4_conv1_bn(self.branch4_conv1(x))))))
out = torch.cat([x1, x2, x3, x4],dim=1)
return out
class FaceBoxes(nn.Module):
def __init__(self, num_classes, phase):
super(FaceBoxes, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.RapidlyDigestedConvolutionalLayers = nn.Sequential(
Conv2dCReLU(in_channels=3,out_channels=24,kernel_size=7,stride=4,padding=3),
nn.MaxPool2d(kernel_size=3,stride=2,padding=1),
Conv2dCReLU(in_channels=48,out_channels=64,kernel_size=5,stride=2,padding=2),
nn.MaxPool2d(kernel_size=3, stride=2,padding=1)
)
self.MultipleScaleConvolutionalLayers = nn.Sequential(
InceptionModules(),
InceptionModules(),
InceptionModules(),
)
self.conv3_1 = nn.Conv2d(in_channels=128,out_channels=128,kernel_size=1,stride=1)
self.conv3_2 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)
self.conv4_1 = nn.Conv2d(in_channels=256, out_channels=128, kernel_size=1, stride=1)
self.conv4_2 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)
self.loc_layer1 = nn.Conv2d(in_channels=128, out_channels=21*4, kernel_size=3, stride=1, padding=1)
self.conf_layer1 = nn.Conv2d(in_channels=128, out_channels=21*num_classes, kernel_size=3, stride=1, padding=1)
self.loc_layer2 = nn.Conv2d(in_channels=256, out_channels=4, kernel_size=3, stride=1, padding=1)
self.conf_layer2 = nn.Conv2d(in_channels=256, out_channels=num_classes, kernel_size=3, stride=1, padding=1)
self.loc_layer3 = nn.Conv2d(in_channels=256, out_channels=4, kernel_size=3, stride=1, padding=1)
self.conf_layer3 = nn.Conv2d(in_channels=256, out_channels=num_classes, kernel_size=3, stride=1, padding=1)
if self.phase == 'test':
self.softmax = nn.Softmax(dim=-1)
elif self.phase == 'train':
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias, 0)
else:
nn.init.xavier_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.RapidlyDigestedConvolutionalLayers(x)
out1 = self.MultipleScaleConvolutionalLayers(x)
out2 = self.conv3_2(self.conv3_1(out1))
out3 = self.conv4_2(self.conv4_1(out2))
loc1 = self.loc_layer1(out1)
conf1 = self.conf_layer1(out1)
loc2 = self.loc_layer2(out2)
conf2 = self.conf_layer2(out2)
loc3 = self.loc_layer3(out3)
conf3 = self.conf_layer3(out3)
locs = torch.cat([loc1.permute(0, 2, 3, 1).contiguous().view(loc1.size(0), -1),
loc2.permute(0, 2, 3, 1).contiguous().view(loc2.size(0), -1),
loc3.permute(0, 2, 3, 1).contiguous().view(loc3.size(0), -1)], dim=1)
confs = torch.cat([conf1.permute(0, 2, 3, 1).contiguous().view(conf1.size(0), -1),
conf2.permute(0, 2, 3, 1).contiguous().view(conf2.size(0), -1),
conf3.permute(0, 2, 3, 1).contiguous().view(conf3.size(0), -1)], dim=1)
if self.phase == 'test':
out = (locs.view(locs.size(0), -1, 4),
self.softmax(confs.view(-1, self.num_classes)))
else:
out = (locs.view(locs.size(0), -1, 4),
confs.view(-1, self.num_classes))
return out
if __name__ == '__main__':
model = FaceBoxes(num_classes=2, phase='train')
print(model)
input = torch.randn(1, 3, 1024, 1024)
out = model(input)
print(out[0].shape)
print(out[1].shape)
|
192606
|
import sys
import matplotlib as mpl
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torch import nn
from torch.autograd import Variable
from tqdm import trange
import gaussian
import util
from util import sparsemm
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
import networkx as nx
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
"""
Graph convolution experiment: learn a similarity graph for a given dataset end-to-end using simplified graph
convolutions.
Abandoned. Let me know if you get it to work.
"""
def clean(axes=None):
if axes is None:
axes = plt.gca()
[s.set_visible(False) for s in axes.spines.values()]
axes.tick_params(top=False, bottom=False, left=False, right=False, labelbottom=False, labelleft=False)
def densities(points, means, sigmas):
"""
Compute the unnormalized PDFs of the points under the given MVNs
(with sigma a diagonal matrix per MVN)
:param means:
:param sigmas:
:param points:
:return:
"""
# n: number of MVNs
# d: number of points per MVN
# rank: dim of points
batchsize, n, rank = points.size()
batchsize, k, rank = means.size()
# batchsize, k, rank = sigmas.size()
points = points.unsqueeze(2).expand(batchsize, n, k, rank)
means = means.unsqueeze(1).expand_as(points)
sigmas = sigmas.unsqueeze(1).expand_as(points)
sigmas_squared = torch.sqrt(1.0/(gaussian.EPSILON + sigmas))
points = points - means
points = points * sigmas_squared
# Compute dot products for all points
# -- unroll the batch/n dimensions
points = points.view(-1, 1, rank)
# -- dot prod
products = torch.bmm(points, points.transpose(1,2))
# -- reconstruct shape
products = products.view(batchsize, n, k)
num = torch.exp(- 0.5 * products)
return num
class MatrixHyperlayer(nn.Module):
"""
Constrained version of the matrix hyperlayer. Each output get exactly k inputs
"""
def duplicates(self, tuples):
"""
Takes a list of tuples, and for each tuple that occurs mutiple times
marks all but one of the occurences (in the mask that is returned).
:param tuples: A size (batch, k, rank) tensor of integer tuples
:return: A size (batch, k) mask indicating the duplicates
"""
b, k, r = tuples.size()
primes = self.primes[:r]
primes = primes.unsqueeze(0).unsqueeze(0).expand(b, k, r)
unique = ((tuples+1) ** primes).prod(dim=2) # unique identifier for each tuple
sorted, sort_idx = torch.sort(unique, dim=1)
_, unsort_idx = torch.sort(sort_idx, dim=1)
mask = sorted[:, 1:] == sorted[:, :-1]
zs = torch.zeros(b, 1, dtype=torch.uint8, device='cuda' if self.use_cuda else 'cpu')
mask = torch.cat([zs, mask], dim=1)
return torch.gather(mask, 1, unsort_idx)
def cuda(self, device_id=None):
self.use_cuda = True
super().cuda(device_id)
def __init__(self, in_num, out_num, k, radditional=0, gadditional=0, region=(128,),
sigma_scale=0.2, min_sigma=0.0, fix_value=False):
super().__init__()
self.min_sigma = min_sigma
self.use_cuda = False
self.in_num = in_num
self.out_num = out_num
self.k = k
self.radditional = radditional
self.region = region
self.gadditional = gadditional
self.sigma_scale = sigma_scale
self.fix_value = fix_value
self.weights_rank = 2 # implied rank of W
self.params = Parameter(torch.randn(k * out_num, 3))
outs = torch.arange(out_num).unsqueeze(1).expand(out_num, k * (2 + radditional + gadditional)).contiguous().view(-1, 1)
self.register_buffer('outs', outs.long())
outs_inf = torch.arange(out_num).unsqueeze(1).expand(out_num, k).contiguous().view(-1, 1)
self.register_buffer('outs_inf', outs_inf.long())
self.register_buffer('primes', torch.tensor(util.PRIMES))
def size(self):
return (self.out_num, self.in_num)
def generate_integer_tuples(self, means,rng=None, use_cuda=False):
dv = 'cuda' if use_cuda else 'cpu'
c, k, rank = means.size()
assert rank == 1
# In the following, we cut the first dimension up into chunks of size self.k (for which the row index)
# is the same. This then functions as a kind of 'batch' dimension, allowing us to use the code from
# globalsampling without much adaptation
"""
Sample the 2 nearest points
"""
floor_mask = torch.tensor([1, 0], device=dv, dtype=torch.uint8)
fm = floor_mask.unsqueeze(0).unsqueeze(2).expand(c, k, 2, 1)
neighbor_ints = means.data.unsqueeze(2).expand(c, k, 2, 1).contiguous()
neighbor_ints[fm] = neighbor_ints[fm].floor()
neighbor_ints[~fm] = neighbor_ints[~fm].ceil()
neighbor_ints = neighbor_ints.long()
"""
Sample uniformly from a small range around the given index tuple
"""
rr_ints = torch.cuda.FloatTensor(c, k, self.radditional, 1) if use_cuda else torch.FloatTensor(c, k, self.radditional, 1)
rr_ints.uniform_()
rr_ints *= (1.0 - gaussian.EPSILON)
rng = torch.cuda.FloatTensor(rng) if use_cuda else torch.FloatTensor(rng)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints) # bounds of the tensor
rrng = torch.cuda.FloatTensor(self.region) if use_cuda else torch.FloatTensor(self.region) # bounds of the range from which to sample
rrng = rrng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints)
mns_expand = means.round().unsqueeze(2).expand_as(rr_ints)
# upper and lower bounds
lower = mns_expand - rrng * 0.5
upper = mns_expand + rrng * 0.5
# check for any ranges that are out of bounds
idxs = lower < 0.0
lower[idxs] = 0.0
idxs = upper > rngxp
lower[idxs] = rngxp[idxs] - rrng[idxs]
rr_ints = (rr_ints * rrng + lower).long()
"""
Sample uniformly from all index tuples
"""
g_ints = torch.cuda.FloatTensor(c, k, self.gadditional, 1) if use_cuda else torch.FloatTensor(c, k, self.gadditional, 1)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(g_ints) # bounds of the tensor
g_ints.uniform_()
g_ints *= (1.0 - gaussian.EPSILON) * rngxp
g_ints = g_ints.long()
ints = torch.cat([neighbor_ints, rr_ints, g_ints], dim=2)
return ints.view(c, -1, rank)
def forward(self, input, train=True):
### Compute and unpack output of hypernetwork
means, sigmas, values = self.hyper(input)
nm = means.size(0)
c = nm // self.k
means = means.view(c, self.k, 1)
sigmas = sigmas.view(c, self.k, 1)
values = values.view(c, self.k)
rng = (self.in_num, )
assert input.size(0) == self.in_num
if train:
indices = self.generate_integer_tuples(means, rng=rng, use_cuda=self.use_cuda)
indfl = indices.float()
# Mask for duplicate indices
dups = self.duplicates(indices)
props = densities(indfl, means, sigmas).clone() # result has size (c, indices.size(1), means.size(1))
props[dups] = 0
props = props / props.sum(dim=1, keepdim=True)
values = values.unsqueeze(1).expand(c, indices.size(1), means.size(1))
values = props * values
values = values.sum(dim=2)
# unroll the batch dimension
indices = indices.view(-1, 1)
values = values.view(-1)
indices = torch.cat([self.outs, indices.long()], dim=1)
else:
indices = means.round().long().view(-1, 1)
values = values.squeeze().view(-1)
indices = torch.cat([self.outs_inf, indices.long()], dim=1)
if self.use_cuda:
indices = indices.cuda()
# Kill anything on the diagonal
values[indices[:, 0] == indices[:, 1]] = 0.0
# if self.symmetric:
# # Add reverse direction automatically
# flipped_indices = torch.cat([indices[:, 1].unsqueeze(1), indices[:, 0].unsqueeze(1)], dim=1)
# indices = torch.cat([indices, flipped_indices], dim=0)
# values = torch.cat([values, values], dim=0)
### Create the sparse weight tensor
# Prevent segfault
assert not util.contains_nan(values.data)
vindices = Variable(indices.t())
sz = Variable(torch.tensor((self.out_num, self.in_num)))
spmm = sparsemm(self.use_cuda)
output = spmm(vindices, values, sz, input)
return output
def hyper(self, input=None):
"""
Evaluates hypernetwork.
"""
k, width = self.params.size()
means = F.sigmoid(self.params[:, 0:1])
# Limits for each of the w_rank indices
# and scales for the sigmas
s = torch.cuda.FloatTensor((self.in_num,)) if self.use_cuda else torch.FloatTensor((self.in_num,))
s = Variable(s.contiguous())
ss = s.unsqueeze(0)
sm = s - 1
sm = sm.unsqueeze(0)
means = means * sm.expand_as(means)
sigmas = nn.functional.softplus(self.params[:, 1:2] + gaussian.SIGMA_BOOST) + gaussian.EPSILON
values = self.params[:, 2:] # * 0.0 + 1.0
sigmas = sigmas.expand_as(means)
sigmas = sigmas * ss.expand_as(sigmas)
sigmas = sigmas * self.sigma_scale + self.min_sigma
return means, sigmas, values * 0.0 + 1.0/self.k if self.fix_value else values
class GraphConvolution(Module):
"""
Code adapted from pyGCN, see https://github.com/tkipf/pygcn
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, has_weight=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if has_weight else None
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.weight is not None:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_() # different from the default implementation
def forward(self, input, adj, train=True):
if input is None: # The input is the identity matrix
support = self.weight
elif self.weight is not None:
support = torch.mm(input, self.weight)
else:
support = input
output = adj(support, train=train)
if self.bias is not None:
return output + self.bias
else:
return output
class ConvModel(nn.Module):
def __init__(self, data_size, k, emb_size = 16, radd=32, gadd=32, range=128, min_sigma=0.0, directed=True, fix_value=False, encoder=False):
super().__init__()
self.data_shape = data_size
n, c, h, w = data_size
# - channel sizes
c1, c2, c3 = 16, 32, 64
h1, h2, h3 = 256, 128, 64
# upmode = 'bilinear'
# self.decoder_conv = nn.Sequential(
# nn.Linear(h3, 4 * 4 * c3), nn.ReLU(),
# util.Reshape((c3, 4, 4)),
# nn.ConvTranspose2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c3, c2, (3, 3), padding=1), nn.ReLU(),
# nn.Upsample(scale_factor=3, mode=upmode),
# nn.ConvTranspose2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c2, c1, (3, 3), padding=1), nn.ReLU(),
# nn.Upsample(scale_factor=2, mode=upmode),
# nn.ConvTranspose2d(c1, c1, (5, 5), padding=0), nn.ReLU(),
# nn.ConvTranspose2d(c1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.ConvTranspose2d(c1, 1, (3, 3), padding=1), nn.Sigmoid(),
# # util.Debug(lambda x : print(x.size()))
# )
#
# self.decoder_lin = nn.Sequential(
# nn.Linear(emb_size, h3), nn.ReLU(),
# nn.Linear(h3, h2), nn.ReLU(),
# nn.Linear(h2, h3),
# )
#
# self.decoder = nn.Sequential(
# self.decoder_lin,
# self.decoder_conv
# )
# Encoder is only used during pretraining
self.encoder = nn.Sequential(
util.Flatten(),
nn.Linear(28*28, h2), nn.ReLU(),
nn.Linear(h2, h3), nn.ReLU(),
nn.Linear(h3, emb_size * 2)
)
self.decoder = nn.Sequential(
nn.Linear(emb_size, h3), nn.ReLU(),
nn.Linear(h3, h2), nn.ReLU(),
nn.Linear(h2, h3), nn.ReLU(),
nn.Linear(h3, 28*28), nn.Sigmoid(),
util.Reshape((1, 28, 28))
)
# self.encoder = None
# if encoder:
# self.encoder_conv = nn.Sequential(
# nn.Conv2d(1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c1, c1, (3, 3), padding=1), nn.ReLU(),
# nn.MaxPool2d((2, 2)),
# nn.Conv2d(c1, c2, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c2, c2, (3, 3), padding=1), nn.ReLU(),
# nn.MaxPool2d((2, 2)),
# nn.Conv2d(c2, c3, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.Conv2d(c3, c3, (3, 3), padding=1), nn.ReLU(),
# nn.MaxPool2d((2, 2)),
# util.Flatten(),
# nn.Linear(9 * c3, h1)
# )
#
# self.encoder_lin = nn.Sequential(
# util.Flatten(),
# nn.Linear(h1, h2), nn.ReLU(),
# nn.Linear(h2, h3), nn.ReLU(),
# nn.Linear(h3, emb_size * 2),
# )
#
# self.encoder = nn.Sequential(
# self.encoder_conv,
# self.encoder_lin
# )
#
self.adj = MatrixHyperlayer(n, n, k, radditional=radd, gadditional=gadd, region=(range,),
min_sigma=min_sigma, fix_value=fix_value)
self.embedding = Parameter(torch.randn(n, emb_size))
self.emb_size = emb_size
# self.embedding_conv = GraphConvolution(n, emb_size, bias=False)
# self.weightless_conv = GraphConvolution(emb_size, emb_size, has_weight=False, bias=False)
def freeze(self):
for param in self.encoder_conv.parameters():
param.requires_grad = False
for param in self.decoder_conv.parameters():
param.requires_grad = False
def forward(self, depth=1, train=True, data=None): #, reg=util.kl_batch
# x0 = self.embedding_conv.weight
# x = self.embedding_conv(input=None, adj=self.adj, train=train) # identity matrix input
# results = [x0, x]
# for i in range(1, depth):
# x = self.weightless_conv(input=x, adj=self.adj, train=train)
# results.append(x)
reg_losses = []
if self.encoder is None:
x = self.embedding
else:
xraw = self.encoder(data)
xmean, xsig = xraw[:, :self.emb_size], xraw[:, self.emb_size:]
reg_losses.append(util.kl_loss(xmean, xsig)[:, None])
x = util.vae_sample(xmean, xsig)
n, e = x.size()
results = [x]
# reg_losses.append(reg(x))
for _ in range(1, depth):
x = self.adj(x, train=train)
results.append(x)
# reg_losses.append(reg(x))
# if self.encoder is None:
# return [self.decoder(r) for r in results]
# else:
return [self.decoder(r) for r in results], None #, reg_losses
def cuda(self):
super().cuda()
self.adj.apply(lambda t: t.cuda())
def train_decoder(self, data, epochs=1000, lr=0.0001,
batch_size=256, cuda=torch.cuda.is_available()):
n, c, h, w = data.size()
params = list(self.encoder.parameters()) + list(self.decoder.parameters())
opt = torch.optim.Adam(params, lr=lr)
for e in trange(epochs):
for fr in range(0, n, batch_size):
to = min(fr + batch_size, n)
batch, b = data[fr:to], to - fr
if cuda:
batch = batch.cuda()
batch = Variable(batch)
opt.zero_grad()
# forward
z = self.encoder(batch)
kl = util.kl_loss(z[:, :self.emb_size], z[:, self.emb_size:])
z = util.vae_sample(z[:, :self.emb_size], z[:, self.emb_size:])
rec = self.decoder(z)
# backward
loss = F.binary_cross_entropy(rec, batch, reduce=False).view(b, -1).sum(dim=1) + kl
loss.mean().backward()
opt.step()
self.embedding.data = self.encoder(data).data[:, :self.emb_size]
self.embedding.requires_grad = False
for p in self.decoder.parameters():
p.requires_grad = False
#
class ConvModelFlat(nn.Module):
def __init__(self, data_size, k, radd=128, gadd=128, range=16, min_sigma=0.0, fix_value=False):
super().__init__()
n, c, h, w = data_size
self.adj = MatrixHyperlayer(n, n, k, radditional=radd, gadditional=gadd, min_sigma=min_sigma,
region=(range,), fix_value=fix_value)
def forward(self, data, depth=1, train=True):
n = data.size(0)
x = data.view(n, -1)
results =[]
for _ in range(depth):
x = self.adj(x, train=train)
results.append(x)
return [r.view(data.size()).clamp(0, 1) for r in results]
def cuda(self):
super().cuda()
self.adj.apply(lambda t: t.cuda())
PLOT_MAX = 2000 # max number of data points for the latent space plot
def go(arg):
MARGIN = 0.1
util.makedirs('./conv/')
torch.manual_seed(arg.seed)
writer = SummaryWriter()
mnist = torchvision.datasets.MNIST(root=arg.data, train=True, download=True, transform=transforms.ToTensor())
data = util.totensor(mnist, shuffle=True, maxclass=None)
assert data.min() == 0 and data.max() == 1.0
if arg.limit is not None:
data = data[:arg.limit]
model = ConvModel(data.size(), k=arg.k, emb_size=arg.emb_size,
gadd=arg.gadditional, radd=arg.radditional, range=arg.range,
min_sigma=arg.min_sigma, fix_value=arg.fix_value, encoder=arg.encoder)
# model = ConvModelFlat(data.size(), k=arg.k,
# gadd=arg.gadditional, radd=arg.radditional, range=arg.range,
# min_sigma=arg.min_sigma, fix_value=arg.fix_value)
if arg.cuda:
model.cuda()
data = data.cuda()
data, target = Variable(data), Variable(data)
optimizer = optim.Adam(
list(model.parameters()), lr=arg.lr)
n, c, h, w = data.size()
print('pretraining')
model.train_decoder(data, epochs=arg.pretrain_epochs, cuda=arg.cuda)
print('training')
for epoch in trange(arg.epochs):
optimizer.zero_grad()
outputs, _ = model(depth=arg.depth, data=data)
# reg=lambda x: util.kl_batch(x)[None, None].expand(n, 1) * 10000
# reg = lambda x: x.norm(dim=1, keepdim=True)
# reg=lambda x: F.relu(x.norm(dim=1, keepdim=True) - 1.0) * 1000
# reg=lambda x : torch.zeros(x.size(0), 1)
rec_losses = []
for i, o in enumerate(outputs):
rec_losses.append( F.binary_cross_entropy(o, target, reduce=False).view(n, -1).sum(dim=1, keepdim=True) )
#
# losses = torch.cat(rec_losses + reg_losses, dim=1).mean(dim=0)
# regularize sigmas
_, sigmas, _ = model.adj.hyper()
reg = sigmas.norm().mean()
loss = torch.cat(rec_losses, dim=1).sum()
# print(loss, reg)
# sys.exit()
tloss = loss + arg.regweight * reg
tloss.backward()
optimizer.step()
writer.add_scalar('conv/train-loss', loss.item(), epoch)
if epoch % arg.plot_every == 0:
plt.figure(figsize=(8, 2))
# print(losses)
# if arg.depth > 1:
# print(' adj', model.adj.params.grad.mean().item())
# # print(' lin', next(model.decoder.parameters()).grad.mean().item())
plt.cla()
plt.imshow(np.transpose(torchvision.utils.make_grid(data.data[:16, :]).cpu().numpy(), (1, 2, 0)),
interpolation='nearest')
plt.savefig('./conv/inp.{:05d}.png'.format(epoch))
# Plot the results
with torch.no_grad():
outputs, _ = model(depth=arg.depth, train=False, data=data)
for d, o in enumerate(outputs):
plt.cla()
plt.imshow(np.transpose(torchvision.utils.make_grid(o.data[:16, :]).cpu().numpy(), (1, 2, 0)),
interpolation='nearest')
plt.savefig('./conv/rec.{:05d}.{:02d}.png'.format(epoch, d))
plt.figure(figsize=(8, 8))
means, sigmas, values = model.adj.hyper()
means, sigmas, values = means.data, sigmas.data, values.data
means = torch.cat([model.adj.outs_inf.data.float(), means], dim=1)
if arg.draw_matrix:
plt.cla()
s = model.adj.size()
util.plot1d(means, sigmas, values.squeeze(), shape=s)
plt.xlim((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))
plt.ylim((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))
plt.savefig('./conv/means.{:05}.pdf'.format(epoch))
graph = np.concatenate([means.round().long().cpu().numpy(), values.cpu().numpy()], axis=1)
np.savetxt('graph.{:05}.csv'.format(epoch), graph)
"""
Plot the data, reconstructions and components
"""
w, h = 24, 1 + arg.depth + arg.k
mround = means.round().long()
plt.figure(figsize=(w, h))
norm = mpl.colors.Normalize(vmin=-1.0,
vmax=1.0) # doing this manually, the nx code produces very strange results
map = mpl.cm.ScalarMappable(norm=norm, cmap=plt.cm.RdYlBu)
for i in range(w):
# plot the image
ax = plt.subplot(h, w, i + 1)
im = np.transpose(data[i, :, :, :].cpu().numpy(), (1, 2, 0))
im = np.squeeze(im)
ax.imshow(im, interpolation='nearest', origin='upper', cmap='gray_r')
if i == 0:
ax.set_ylabel('image')
clean(ax)
# plot the reconstructions
for r, output in enumerate(outputs):
ax = plt.subplot(h, w, w*(r+1) +(i + 1))
im = np.transpose(output[i, :, :, :].cpu().numpy(), (1, 2, 0))
im = np.squeeze(im)
ax.imshow(im, interpolation='nearest', origin='upper', cmap='gray_r')
if i == 0:
ax.set_ylabel('rec. {}'.format(r))
clean(ax)
# plot the components
for c in range(arg.k):
ax = plt.subplot(h, w, w*(c+1+len(outputs)) +(i + 1))
comp = mround.view(-1, arg.k, 2)[i, c, 1]
mult = values.view(-1, arg.k)[i, c]
color = np.asarray(map.to_rgba(mult))[:3]
im = np.transpose(data[comp, :, :, :].cpu().numpy(), (1, 2, 0))
im = im * (1.0 - color)
im = 1.0 - im
ax.imshow(im,
interpolation='nearest',
origin='upper')
clean(ax)
if i == 0:
ax.set_ylabel('comp. {}'.format(c+1))
plt.subplots_adjust(wspace=None, hspace=None)
#fig.tight_layout()
plt.savefig('./conv/examples{:03}.pdf'.format(epoch), dpi=72)
"""
Plot the embeddings
"""
if arg.depth == 2:
map = None
else:
norm = mpl.colors.Normalize(vmin=1.0, vmax=arg.depth)
map = mpl.cm.ScalarMappable(norm=norm, cmap=plt.cm.tab10)
latents = model.encoder(data) if arg.encoder else model.embedding.data
images = data.data.cpu().permute(0, 2, 3, 1).numpy()[:PLOT_MAX, :]
ax = None
size = None
for d in range(arg.depth):
if d == 0:
color = None
elif map is None:
color = np.asarray([0.0, 0.0, 1.0, 1.0])
else:
color = map.to_rgba(d)
l2 = latents[:PLOT_MAX, :2]
ax, size = util.scatter_imgs(l2.cpu().numpy(), images, ax=ax, color=color, size=size)
if d < arg.depth - 1:
latents = model.adj(latents, train=False)
util.clean(ax)
plt.savefig('./conv/latent-space.{:05}.pdf'.format(epoch), dpi=600)
"""
Plot the graph (reasonable results for small datasets)
"""
if arg.draw_graph:
# Plot the graph
outputs = model(depth=arg.depth, train=False)
g = nx.MultiDiGraph()
g.add_nodes_from(range(data.size(0)))
print('Drawing graph at ', epoch, 'epochs')
for i in range(means.size(0)):
m = means[i, :].round().long()
v = values[i]
g.add_edge(m[1].item(), m[0].item(), weight=v.item() )
# print(m[1].item(), m[0].item(), v.item())
plt.figure(figsize=(8,8))
ax = plt.subplot(111)
pos = nx.spring_layout(g, iterations=100, k=5/math.sqrt(data.size(0)))
# pos = nx.circular_layout(g)
nx.draw_networkx_nodes(g, pos, node_size=30, node_color='w', node_shape='s', axes=ax)
# edges = nx.draw_networkx_edges(g, pos, edge_color=values.data.view(-1), edge_vmin=0.0, edge_vmax=1.0, cmap='bone')
weights = [d['weight'] for (_, _, d) in g.edges(data=True)]
colors = map.to_rgba(weights)
nx.draw_networkx_edges(g, pos, width=1.0, edge_color=colors, axes=ax)
ims = 0.03
xmin, xmax = float('inf'), float('-inf')
ymin, ymax = float('inf'), float('-inf')
out0 = outputs[1].data
# out1 = outputs[1].data
for i, coords in pos.items():
extent = (coords[0] - ims, coords[0] + ims, coords[1] - ims, coords[1] + ims)
# extent0 = (coords[0] - ims, coords[0] + ims, coords[1] + ims, coords[1] + 3 * ims)
# extent1 = (coords[0] - ims, coords[0] + ims, coords[1] + 3 * ims, coords[1] + 5 * ims)
ax.imshow(data[i].cpu().squeeze(), cmap='gray_r', extent=extent, zorder=100, alpha=1)
# ax.imshow(out0[i].cpu().squeeze(), cmap='pink_r', extent=extent0, zorder=100, alpha=0.85)
# ax.imshow(out1[i].cpu().squeeze(), cmap='pink_r', extent=extent1, zorder=100)
xmin, xmax = min(coords[0], xmin), max(coords[0], xmax)
ymin, ymax = min(coords[1], ymin), max(coords[1], ymax)
MARGIN = 0.3
ax.set_xlim(xmin-MARGIN, xmax+MARGIN)
ax.set_ylim(ymin-MARGIN, ymax+MARGIN)
plt.axis('off')
plt.savefig('./conv/graph.{:05}.pdf'.format(epoch), dpi=300)
print('Finished Training.')
def test():
"""
Poor man's unit test
"""
indices = Variable(torch.tensor([[0,1],[1,0],[2,1]]), requires_grad=True)
values = Variable(torch.tensor([1.0, 2.0, 3.0]), requires_grad=True)
size = Variable(torch.tensor([3, 2]))
wsparse = torch.sparse.FloatTensor(indices.t(), values, (3,2))
wdense = Variable(torch.tensor([[0.0,1.0],[2.0,0.0],[0.0, 3.0]]), requires_grad=True)
x = Variable(torch.randn(2, 4), requires_grad=True)
#
# print(wsparse)
# print(wdense)
# print(x)
# dense version
mul = torch.mm(wdense, x)
loss = mul.norm()
loss.backward()
print('dw', wdense.grad)
print('dx', x.grad)
del loss
# spmm version
# mul = torch.mm(wsparse, x)
# loss = mul.norm()
# loss.backward()
#
# print('dw', values.grad)
# print('dx', x.grad)
x.grad = None
values.grad = None
mul = util.SparseMultCPU.apply(indices.t(), values, size, x)
loss = mul.norm()
loss.backward()
print('dw', values.grad)
print('dx', x.grad)
# Finite elements approach for w
for h in [1e-4, 1e-5, 1e-6]:
grad = torch.zeros(values.size(0))
for i in range(values.size(0)):
nvalues = values.clone()
nvalues[i] = nvalues[i] + h
mul = util.SparseMultCPU.apply(indices.t(), values, size, x)
loss0 = mul.norm()
mul = util.SparseMultCPU.apply(indices.t(), nvalues, size, x)
loss1 = mul.norm()
grad[i] = (loss1-loss0)/h
print('hw', h, grad)
# Finite elements approach for x
for h in [1e-4, 1e-5, 1e-6]:
grad = torch.zeros(x.size())
for i in range(x.size(0)):
for j in range(x.size(1)):
nx = x.clone()
nx[i, j] = x[i, j] + h
mul = util.SparseMultCPU.apply(indices.t(), values, size, x)
loss0 = mul.norm()
mul = util.SparseMultCPU.apply(indices.t(), values, size, nx)
loss1 = mul.norm()
grad[i, j] = (loss1-loss0)/h
print('hx', h, grad)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--test", dest="test",
help="Run the unit tests.",
action="store_true")
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs",
default=250, type=int)
parser.add_argument("-P", "--pretrain-epochs",
dest="pretrain_epochs",
help="Number of epochs spent optimizing the embeddings. After this cutoff, the embeddings are frozen, and the adjacency matrix is trained.",
default=1000, type=int)
parser.add_argument("-E", "--emb_size",
dest="emb_size",
help="Size of the node embeddings.",
default=16, type=int)
parser.add_argument("-k", "--num-points",
dest="k",
help="Number of index tuples",
default=3, type=int)
parser.add_argument("-L", "--limit",
dest="limit",
help="Number of data points",
default=None, type=int)
parser.add_argument("-a", "--gadditional",
dest="gadditional",
help="Number of additional points sampled globally per index-tuple",
default=32, type=int)
parser.add_argument("-A", "--radditional",
dest="radditional",
help="Number of additional points sampled locally per index-tuple",
default=16, type=int)
parser.add_argument("-R", "--range",
dest="range",
help="Range in which the local points are sampled",
default=128, type=int)
parser.add_argument("-d", "--depth",
dest="depth",
help="Number of graph convolutions",
default=5, type=int)
parser.add_argument("-p", "--plot-every",
dest="plot_every",
help="Numer of epochs to wait between plotting",
default=100, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.01, type=float)
parser.add_argument("-r", "--seed",
dest="seed",
help="Random seed",
default=4, type=int)
parser.add_argument("-c", "--cuda", dest="cuda",
help="Whether to use cuda.",
action="store_true")
#
# parser.add_argument("-S", "--undirected", dest="undirected",
# help="Use an undirected graph",
# action="store_true")
parser.add_argument("-J", "--draw-matrix", dest="draw_matrix",
help="Draw the adjacency matrix",
action="store_true")
parser.add_argument("-G", "--draw-graph", dest="draw_graph",
help="Draw the graph",
action="store_true")
parser.add_argument("-F", "--fix-value", dest="fix_value",
help="Fix the values of the matrix to 1/k",
action="store_true")
parser.add_argument("-N", "--use-encoder", dest="encoder",
help="Whether to use an encoder",
action="store_true")
parser.add_argument("-D", "--data", dest="data",
help="Data directory",
default='./data')
parser.add_argument("-M", "--min-sigma",
dest="min_sigma",
help="Minimal sigma value",
default=0.0, type=float)
parser.add_argument("-Q", "--regularization-weight",
dest="regweight",
help="Regularization weight (the bigger this is, the faster the sigma's converge).",
default=0.0, type=float)
args = parser.parse_args()
if args.test:
test()
print('Tests completed succesfully.')
sys.exit()
print('OPTIONS', args)
go(args)
|
192665
|
import setuptools #enables develop
import os
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
from edgeml_pytorch.utils import findCUDA
if findCUDA() is not None:
setuptools.setup(
name='fastgrnn_cuda',
ext_modules=[
CUDAExtension('fastgrnn_cuda', [
'fastgrnn_cuda.cpp',
'fastgrnn_cuda_kernel.cu',
]),
],
cmdclass={
'build_ext': BuildExtension
}
)
|
192668
|
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from armulator.armv6.bits_ops import add, sub
from bitstring import BitArray
from armulator.armv6.arm_exceptions import UndefinedInstructionException
from armulator.armv6.enums import InstrSet
class Rfe(AbstractOpcode):
def __init__(self, increment, word_higher, wback, n):
super(Rfe, self).__init__()
self.increment = increment
self.word_higher = word_higher
self.wback = wback
self.n = n
def execute(self, processor):
if processor.condition_passed():
if processor.registers.current_mode_is_hyp():
raise UndefinedInstructionException()
elif (not processor.registers.current_mode_is_not_user() or
processor.registers.current_instr_set() == InstrSet.InstrSet_ThumbEE):
print "unpredictable"
else:
address = (processor.registers.get(self.n)
if self.increment
else sub(processor.registers.get(self.n), BitArray(bin="1000"), 32))
if self.word_higher:
address = add(address, BitArray(bin="100"), 32)
new_pc_value = processor.mem_a_get(address, 4)
spsr_value = processor.mem_a_get(add(address, BitArray(bin="100"), 32), 4)
if self.wback:
processor.registers.set(
self.n,
(add(processor.registers.get(self.n), BitArray(bin="1000"), 32)
if self.increment
else sub(processor.registers.get(self.n), BitArray(bin="1000"), 32))
)
processor.registers.cpsr_write_by_instr(spsr_value, BitArray(bin="1111"), True)
if (processor.registers.cpsr.get_m() == "0b11010" and
processor.registers.cpsr.get_j() and
processor.registers.cpsr.get_t()):
print "unpredictable"
else:
processor.branch_write_pc(new_pc_value)
|
192670
|
import aio_pika
import asyncio
import logging
import json
import os
from tornado.httpclient import AsyncHTTPClient
from tornado.iostream import StreamClosedError
from tornado.web import HTTPError, RequestHandler
from urllib.parse import urlencode
import zipfile
from datamart_core.common import log_future
from datamart_geo import GeoData
from datamart_materialize import get_writer
from .graceful_shutdown import GracefulApplication
logger = logging.getLogger(__name__)
BUCKETS = [
0.5, 1.0, 5.0, 10.0, 20.0, 30.0, 60.0, 120.0, 300.0, 600.0,
float('inf'),
]
class BaseHandler(RequestHandler):
"""Base class for all request handlers.
"""
application: 'Application'
def set_default_headers(self):
self.set_header('Server', 'Auctus/%s' % os.environ['DATAMART_VERSION'])
def get_json(self):
type_ = self.request.headers.get('Content-Type', '')
if not type_.startswith('application/json'):
self.send_error_json(400, "Expected JSON")
raise HTTPError(400)
try:
return json.loads(self.request.body.decode('utf-8'))
except UnicodeDecodeError:
self.send_error_json(400, "Invalid character encoding")
raise HTTPError(400)
except json.JSONDecodeError:
self.send_error_json(400, "Invalid JSON")
raise HTTPError(400)
def send_json(self, obj):
if isinstance(obj, list):
obj = {'results': obj}
elif not isinstance(obj, dict):
raise ValueError("Can't encode %r to JSON" % type(obj))
self.set_header('Content-Type', 'application/json; charset=utf-8')
return self.finish(json.dumps(obj))
def send_error_json(self, status, message):
logger.info("Sending error %s JSON: %s", status, message)
self.set_status(status)
return self.send_json({'error': message})
async def send_file(self, path, name):
if zipfile.is_zipfile(path):
type_ = 'application/zip'
name += '.zip'
else:
type_ = 'application/octet-stream'
self.set_header('Content-Type', type_)
self.set_header('X-Content-Type-Options', 'nosniff')
self.set_header('Content-Disposition',
'attachment; filename="%s"' % name)
logger.info("Sending file...")
with open(path, 'rb') as fp:
self.set_header('Content-Length', fp.seek(0, 2))
fp.seek(0, 0)
BUFSIZE = 40960
buf = fp.read(BUFSIZE)
try:
while buf:
self.write(buf)
if len(buf) != BUFSIZE:
break
buf = fp.read(BUFSIZE)
await self.flush()
return await self.finish()
except StreamClosedError:
return
def prepare(self):
super(BaseHandler, self).prepare()
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Methods', 'POST')
self.set_header('Access-Control-Allow-Headers', 'Content-Type')
self.set_header(
'Access-Control-Expose-Headers',
'Content-Type, Content-Length, Content-Disposition',
)
def options(self):
# CORS pre-flight
self.set_status(204)
return self.finish()
def validate_format(self, format, format_options):
writer_cls = get_writer(format)
format_ext = None
if hasattr(writer_cls, 'parse_options'):
format_options = writer_cls.parse_options(format_options)
elif format_options:
self.send_error_json(400, "Invalid output options")
raise HTTPError(400)
if hasattr(writer_cls, 'extension'):
format_ext = writer_cls.extension
return format, format_options, format_ext
def read_format(self, default_format='csv'):
if 'format' in self.request.files:
# Legacy
format = self.request.files['format'][0].body.decode('utf-8')
format_options = {}
else:
format = self.get_query_argument('format', default_format)
format_options = {}
for n, v in self.request.query_arguments.items():
if n.startswith('format_'):
if len(v) != 1:
self.send_error_json(
400,
"Multiple occurrences of format option %r" % n[7:],
)
raise HTTPError(400)
format_options[n[7:]] = self.decode_argument(v[0])
return self.validate_format(format, format_options)
@staticmethod
def serialize_format(format, format_options):
dct = {'format': format}
for k, v in format_options.items():
dct['format_' + k] = v
return urlencode(dct)
class Application(GracefulApplication):
def __init__(self, *args, es, redis_client, lazo, **kwargs):
super(Application, self).__init__(*args, **kwargs)
self.is_closing = False
self.frontend_url = os.environ['FRONTEND_URL'].rstrip('/')
self.api_url = os.environ['API_URL'].rstrip('/')
self.elasticsearch = es
self.redis = redis_client
self.lazo_client = lazo
if os.environ.get('NOMINATIM_URL'):
self.nominatim = os.environ['NOMINATIM_URL']
else:
self.nominatim = None
logger.warning(
"$NOMINATIM_URL is not set, not resolving addresses"
)
self.geo_data = GeoData.from_local_cache()
self.channel = None
self.custom_fields = {}
custom_fields = os.environ.get('CUSTOM_FIELDS', None)
if custom_fields:
custom_fields = json.loads(custom_fields)
if custom_fields:
for field, opts in custom_fields.items():
opts.setdefault('label', field)
opts.setdefault('required', False)
opts.setdefault('type', 'text')
if (
not opts.keys() <= {'label', 'type', 'required'}
or not isinstance(opts['label'], str)
or not isinstance(opts['required'], bool)
or not isinstance(opts['type'], str)
or opts['type'] not in ('integer', 'text', 'keyword')
):
raise ValueError("Invalid custom field %s" % field)
self.custom_fields = custom_fields
logger.info(
"Custom fields: %s",
", ".join(self.custom_fields.keys()),
)
self.sources_counts = {}
self.recent_discoveries = []
asyncio.get_event_loop().run_until_complete(
asyncio.get_event_loop().create_task(self._amqp())
)
async def _amqp(self):
connection = await aio_pika.connect_robust(
host=os.environ['AMQP_HOST'],
port=int(os.environ['AMQP_PORT']),
login=os.environ['AMQP_USER'],
password=os.environ['AMQP_PASSWORD'],
)
self.channel = await connection.channel()
await self.channel.set_qos(prefetch_count=1)
# Declare profiling exchange (to publish datasets via upload)
self.profile_exchange = await self.channel.declare_exchange(
'profile',
aio_pika.ExchangeType.FANOUT,
)
# Start statistics-fetching coroutine
log_future(
asyncio.get_event_loop().create_task(self.update_statistics()),
logger,
should_never_exit=True,
)
async def update_statistics(self):
http_client = AsyncHTTPClient()
while True:
try:
# Get counts from coordinator
response = await http_client.fetch(
'http://coordinator:8003/api/statistics',
)
statistics = json.loads(response.body.decode('utf-8'))
except Exception:
logger.exception("Can't get statistics from coordinator")
else:
self.sources_counts = statistics['sources_counts']
self.recent_discoveries = statistics['recent_discoveries']
await asyncio.sleep(60)
def log_request(self, handler):
if handler.request.path == '/health':
return
super(Application, self).log_request(handler)
|
192687
|
import flask
import os
app = flask.Flask(__name__)
FILE_PATH = 'data/files/'
def check_path(path):
path = os.path.normpath(os.path.join(FILE_PATH, path))
if not path.startswith(FILE_PATH):
return flask.abort(400)
return path
def get_tmp_path(path):
dirpath, basename = os.path.split(path)
basename = f".{basename}.tmp"
return os.path.join(dirpath, basename)
def make_dir(path):
try:
os.makedirs(os.path.dirname(path))
except Exception:
pass
@app.route("/files/<path:path>", methods=['PUT'])
def file_put(path):
path = check_path(path)
request_range = flask.request.headers.get('range')
start, end = map(int, request_range.split('-', 1))
body = flask.request.data
assert len(body) == end - start + 1
tmp_path = get_tmp_path(path)
if start == 0 and os.path.exists(tmp_path):
print(f"first data of {path}")
os.unlink(tmp_path)
make_dir(tmp_path)
with open(tmp_path, "a+b") as fh:
fh.seek(start, os.SEEK_SET)
fh.write(body)
print(f"write {path} {len(body)}B from {start}")
return ""
@app.route("/files/<path:path>", methods=['POST'])
def file_post(path):
path = check_path(path)
args = flask.request.json
# commit file?
if args['action'] == 'finish':
tmp_path = get_tmp_path(path)
make_dir(path)
os.rename(tmp_path, path)
print(f"commit {path}")
ret = {"status": "ok"}
else:
return flask.abort(400)
return flask.jsonify(ret)
|
192714
|
import torch
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from SVC_Utils import *
from sklearn.metrics import roc_curve, auc
# determine device to run network on (runs on gpu if available)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def eval_target_net(net, testloader, classes=None):
if classes is not None:
class_correct = np.zeros(10)
class_total = np.zeros(10)
total = 0
correct = 0
with torch.no_grad():
net.eval()
for i, (imgs, lbls) in enumerate(testloader):
imgs, lbls = imgs.to(device), lbls.to(device)
output = net(imgs)
predicted = output.argmax(dim=1)
total += imgs.size(0)
correct += predicted.eq(lbls).sum().item()
if classes is not None:
for prediction, lbl in zip(predicted, lbls):
class_correct[lbl] += prediction == lbl
class_total[lbl] += 1
accuracy = 100*(correct/total)
if classes is not None:
for i in range(len(classes)):
print('Accuracy of %s : %.2f %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
print("\nAccuracy = %.2f %%\n\n" % (accuracy) )
return accuracy
def eval_attack_net(attack_net, target, target_train, target_out, k):
"""Assess accuracy, precision, and recall of attack model for in training set/out of training set classification.
Edited for use with SVCs."""
in_predicts=[]
out_predicts=[]
losses = []
if type(target) is not Pipeline:
target_net=target
target_net.eval()
attack_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
train_top = np.empty((0,2))
out_top = np.empty((0,2))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
#[mini_batch_size x num_classes] tensors, (0,1) probabilities for each class for each sample)
if type(target) is Pipeline:
traininputs=train_imgs.view(train_imgs.shape[0], -1)
outinputs=out_imgs.view(out_imgs.shape[0], -1)
train_posteriors=torch.from_numpy(target.predict_proba(traininputs)).float()
out_posteriors=torch.from_numpy(target.predict_proba(outinputs)).float()
else:
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
#[k x mini_batch_size] tensors, (0,1) probabilities for top k probable classes
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
#Collects probabilities for predicted class.
for p in train_top_k:
in_predicts.append((p.max()).item())
for p in out_top_k:
out_predicts.append((p.max()).item())
if type(target) is not Pipeline:
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
#print(train_top.shape)
train_lbl = torch.ones(mini_batch_size).to(device)
out_lbl = torch.zeros(mini_batch_size).to(device)
#Takes in probabilities for top k most likely classes, outputs ~1 (in training set) or ~0 (out of training set)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
for j, t in enumerate(thresholds):
true_positives[j] += (train_predictions >= t).sum().item()
false_positives[j] += (out_predictions >= t).sum().item()
false_negatives[j] += (train_predictions < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_predictions >= t).sum().item()
correct[j] += (out_predictions < t).sum().item()
total[j] += train_predictions.size(0) + out_predictions.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
def eval_attack_roc(attack_net, target_net, target_train, target_out, k):
losses = []
target_net.eval()
attack_net.eval()
total = 0
correct = 0
train_top = np.empty((0,2))
out_top = np.empty((0,2))
true_positives = 0
false_positives = 0
false_negatives = 0
predictions = np.array([])
labels = np.array([])
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
train_size = train_imgs.shape[0]
out_size = out_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top_k = train_sort[:,:k].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top_k = out_sort[:,:k].clone().to(device)
train_top = np.vstack((train_top,train_top_k[:,:2].cpu().detach().numpy()))
out_top = np.vstack((out_top, out_top_k[:,:2].cpu().detach().numpy()))
#print("train_top_k = ",train_top_k)
#print("out_top_k = ",out_top_k)
train_lbl = torch.ones(train_size).to(device)
out_lbl = torch.zeros(out_size).to(device)
train_predictions = F.sigmoid(torch.squeeze(attack_net(train_top_k)))
out_predictions = F.sigmoid(torch.squeeze(attack_net(out_top_k)))
predictions = np.concatenate((predictions, train_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.ones(train_size)), axis=0)
predictions = np.concatenate((predictions, out_predictions.detach().cpu().numpy()), axis=0)
labels = np.concatenate((labels, np.zeros(out_size)), axis=0)
#print("train_predictions = ",train_predictions)
#print("out_predictions = ",out_predictions)
true_positives += (train_predictions >= 0.5).sum().item()
false_positives += (out_predictions >= 0.5).sum().item()
false_negatives += (train_predictions < 0.5).sum().item()
correct += (train_predictions>=0.5).sum().item()
correct += (out_predictions<0.5).sum().item()
total += train_predictions.size(0) + out_predictions.size(0)
accuracy = 100 * correct / total
precision = true_positives / (true_positives + false_positives) if true_positives + false_positives != 0 else 0
recall = true_positives / (true_positives + false_negatives) if true_positives + false_negatives !=0 else 0
print("Membership Inference Performance")
print("Accuracy = %.2f%%, Precision = %.2f, Recall = %.2f" % (accuracy, precision, recall))
fpr, tpr, thresholds = roc_curve(labels, predictions, pos_label=1)
roc_auc = auc(fpr, tpr)
return fpr, tpr, roc_auc
def eval_membership_inference(target_net, target_train, target_out):
target_net.eval()
precisions = []
recalls = []
accuracies = []
#for threshold in np.arange(0.5, 1, 0.005):
thresholds = np.arange(0.5, 1, 0.005)
total = np.zeros(len(thresholds))
correct = np.zeros(len(thresholds))
true_positives = np.zeros(len(thresholds))
false_positives = np.zeros(len(thresholds))
false_negatives = np.zeros(len(thresholds))
for i, ((train_imgs, _), (out_imgs, _)) in enumerate(zip(target_train, target_out)):
mini_batch_size = train_imgs.shape[0]
train_imgs, out_imgs = train_imgs.to(device), out_imgs.to(device)
train_posteriors = F.softmax(target_net(train_imgs.detach()), dim=1)
out_posteriors = F.softmax(target_net(out_imgs.detach()), dim=1)
train_sort, _ = torch.sort(train_posteriors, descending=True)
train_top = train_sort[:,0].clone().to(device)
out_sort, _ = torch.sort(out_posteriors, descending=True)
out_top = out_sort[:,0].clone().to(device)
#print(train_top.shape)
for j, t in enumerate(thresholds):
true_positives[j] += (train_top >= t).sum().item()
false_positives[j] += (out_top >= t).sum().item()
false_negatives[j] += (train_top < t).sum().item()
#print(train_top >= threshold)
#print((train_top >= threshold).sum().item(),',',(out_top >= threshold).sum().item())
correct[j] += (train_top >= t).sum().item()
correct[j] += (out_top < t).sum().item()
total[j] += train_top.size(0) + out_top.size(0)
#print(true_positives,',',false_positives,',',false_negatives)
for j, t in enumerate(thresholds):
accuracy = 100 * correct[j] / total[j]
precision = true_positives[j] / (true_positives[j] + false_positives[j]) if true_positives[j] + false_positives[j] != 0 else 0
recall = true_positives[j] / (true_positives[j] + false_negatives[j]) if true_positives[j] + false_negatives[j] !=0 else 0
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
print("threshold = %.4f, accuracy = %.2f, precision = %.2f, recall = %.2f" % (t, accuracy, precision, recall))
plt.plot(recalls, precisions)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.show()
|
192728
|
from pip import get_installed_distributions
from pip.commands import install
install_cmd = install.InstallCommand()
options, args = install_cmd.parse_args([package.project_name
for package in
get_installed_distributions()])
options.upgrade = True
install_cmd.run(options, args) # Chuck this in a try/except and print as wanted
|
192768
|
from abc import ABCMeta, abstractmethod
import logging
from raco.expression.visitor import ExpressionVisitor
LOG = logging.getLogger(__name__)
class Algebra(object):
__metaclass__ = ABCMeta
@abstractmethod
def opt_rules(self, **kwargs):
raise NotImplementedError("{op}.opt_rules()".format(op=type(self)))
class Language(object):
__metaclass__ = ABCMeta
EQ = "=="
NEQ = "!="
PLUS = "+"
MINUS = "-"
DIVIDE = "/"
TIMES = "*"
IDIVIDE = "//"
GT = ">"
LT = "<"
GTEQ = ">="
LTEQ = "<="
MOD = "%"
LIKE = "like"
# By default, reuse scans
reusescans = True
@staticmethod
def preamble(query=None, plan=None):
return ""
@staticmethod
def postamble(query=None, plan=None):
return ""
@staticmethod
def body(compileResult):
return compileResult
@classmethod
def compile_stringliteral(cls, value):
return '"%s"' % value
@staticmethod
def log(txt):
"""Emit code that will generate a log message at runtime. Defaults to
nothing."""
return ""
@classmethod
def compile_numericliteral(cls, value):
return '%s' % value
@classmethod
def compile_attribute(cls, attr, **kwargs):
return attr.compile()
@classmethod
def conjunction(cls, *args):
return cls.expression_combine(args, operator="and")
@classmethod
def disjunction(cls, *args):
return cls.expression_combine(args, operator="or")
@classmethod
def compile_expression(cls, expr, **kwargs):
compilevisitor = CompileExpressionVisitor(cls, **kwargs)
expr.accept(compilevisitor)
return compilevisitor.getresult()
@classmethod
def expression_combine(cls, args, operator="and"):
"""Combine the given arguments using the specified infix operator"""
delim = " %s " % operator
return delim.join(args)
class CompileExpressionVisitor(ExpressionVisitor):
def __init__(self, language, **kwargs):
self.language = language
self.combine = language.expression_combine
self.stack = []
self.kwargs = kwargs
def getresult(self):
assert len(self.stack) == 1, \
"stack is size {0} != 1".format(len(self.stack))
return self.stack.pop()
def __visit_BinaryOperator__(self, binaryexpr):
right = self.stack.pop()
left = self.stack.pop()
return left, right
def appendbinop(self, binaryexpr, languageop):
left, right = self.__visit_BinaryOperator__(binaryexpr)
val = self.combine([left, right], operator=languageop)
self.stack.append(val)
def visit_NOT(self, unaryexpr):
inputexpr = self.stack.pop()
self.stack.append(self.language.negation(inputexpr))
def visit_AND(self, binaryexpr):
left, right = self.__visit_BinaryOperator__(binaryexpr)
self.stack.append(self.language.conjunction(left, right))
def visit_OR(self, binaryexpr):
left, right = self.__visit_BinaryOperator__(binaryexpr)
self.stack.append(self.language.disjunction(left, right))
def visit_EQ(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.EQ)
def visit_NEQ(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.NEQ)
def visit_GT(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.GT)
def visit_LT(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.LT)
def visit_GTEQ(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.GTEQ)
def visit_LTEQ(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.LTEQ)
def visit_LIKE(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.LIKE)
def visit_NamedAttributeRef(self, named):
self.stack.append(
self.language.compile_attribute(
named,
**self.kwargs))
def visit_UnnamedAttributeRef(self, unnamed):
LOG.debug("expr %s is UnnamedAttributeRef", unnamed)
self.stack.append(
self.language.compile_attribute(
unnamed,
**self.kwargs))
def visit_NumericLiteral(self, numericliteral):
self.stack.append(self.language.compile_numericliteral(numericliteral))
def visit_StringLiteral(self, stringliteral):
self.stack.append(self.language.compile_stringliteral(stringliteral))
def visit_DIVIDE(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.DIVIDE)
def visit_PLUS(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.PLUS)
def visit_MINUS(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.MINUS)
def visit_IDIVIDE(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.IDIVIDE)
def visit_MOD(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.MOD)
def visit_TIMES(self, binaryexpr):
self.appendbinop(binaryexpr, self.language.TIMES)
def visit_NEG(self, unaryexpr):
inputexpr = self.stack.pop()
self.stack.append(self.language.negative(inputexpr))
def visit_Case(self, caseexpr):
if caseexpr.else_expr is not None:
else_compiled = self.stack.pop()
when_compiled = []
for _ in range(len(caseexpr.when_tuples)):
thenexpr, ifexpr = self.stack.pop(), self.stack.pop()
when_compiled.insert(0, (ifexpr, thenexpr))
self.stack.append(
self.language.conditional(
when_compiled,
else_compiled))
def visit_NamedStateAttributeRef(self, attr):
self.stack.append(self.language.compile_attribute(attr, **self.kwargs))
def visit_UnaryFunction(self, expr):
inputexpr = self.stack.pop()
self.stack.append(
self.language.function_call(
type(expr).__name__,
inputexpr))
def visit_BinaryFunction(self, expr):
left, right = self.__visit_BinaryOperator__(expr)
self.stack.append(
self.language.function_call(
type(expr).__name__,
left,
right))
def visit_CustomBinaryFunction(self, expr):
left, right = self.__visit_BinaryOperator__(expr)
self.stack.append(
self.language.function_call(
expr.name,
left,
right, custom=True))
def visit_NaryFunction(self, expr):
arglist = []
for _ in range(len(expr.operands)):
arglist.insert(0, self.stack.pop())
self.stack.append(
self.language.function_call(
type(expr).__name__,
*arglist))
def visit_CustomZeroaryFunction(self, expr):
self.stack.append(
self.language.function_call(expr.name, custom=True))
def visit_CAST(self, expr):
inputexpr = self.stack.pop()
self.stack.append(self.language.cast(expr._type, inputexpr))
|
192775
|
from __future__ import with_statement # this is to work with python2.5
from validation import vworkspace
with vworkspace() as w:
w.props.PRETTYPRINT_FINAL_RETURN = True
w.props.PRETTYPRINT_ALL_LABELS = True
w.props.PRETTYPRINT_EMPTY_BLOCKS = True
w.props.PRETTYPRINT_UNSTRUCTURED = True
w.props.memory_effects_only = False
w.all_functions.validate_phases("unspaghettify","suppress_dead_code","dead_code_elimination",display_initial=True)
|
192846
|
import os
import sys
import json
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class WindowClassificationTrainUpdateSchedulerParam(QtWidgets.QWidget):
forward_loss_param = QtCore.pyqtSignal();
backward_optimizer_param = QtCore.pyqtSignal();
def __init__(self):
super().__init__()
self.cfg_setup()
self.title = 'Experiment {} - Update Scheduler Params'.format(self.system["experiment"])
self.left = 10
self.top = 10
self.width = 900
self.height = 600
self.scheduler_ui_mxnet = [];
self.scheduler_ui_keras = [];
self.scheduler_ui_pytorch = [];
self.current_scheduler = {};
self.current_scheduler["name"] = "";
self.current_scheduler["params"] = {};
self.initUI()
def cfg_setup(self):
with open('base_classification.json') as json_file:
self.system = json.load(json_file)
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height);
# Backward
self.b1 = QPushButton('Back', self)
self.b1.move(600,550)
self.b1.clicked.connect(self.backward)
# Forward
self.b2 = QPushButton('Next', self)
self.b2.move(700,550)
self.b2.clicked.connect(self.forward)
# Quit
self.b3 = QPushButton('Quit', self)
self.b3.move(800,550)
self.b3.clicked.connect(self.close)
self.cb1 = QComboBox(self);
self.cb1.move(20, 20);
self.cb1.activated.connect(self.select_scheduler);
self.cb2 = QComboBox(self);
self.cb2.move(20, 20);
self.cb2.activated.connect(self.select_scheduler);
self.cb3 = QComboBox(self);
self.cb3.move(20, 20);
self.cb3.activated.connect(self.select_scheduler);
self.mxnet_schedulers_list = ["select", "lr_fixed", "lr_step_decrease", "lr_multistep_decrease"];
self.keras_schedulers_list = ["select", "lr_fixed", "lr_step_decrease", "lr_exponential_decrease", "lr_plateau_decrease"];
self.pytorch_schedulers_list = ["select", "lr_fixed", "lr_step_decrease", "lr_multistep_decrease", "lr_exponential_decrease",
"lr_plateau_decrease"];
if(self.system["backend"] == "Mxnet-1.5.1"):
self.cb1.addItems(self.mxnet_schedulers_list);
self.cb1.show();
self.cb2.hide();
self.cb3.hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.cb2.addItems(self.keras_schedulers_list);
self.cb2.show();
self.cb1.hide();
self.cb3.hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.cb3.addItems(self.pytorch_schedulers_list);
self.cb3.show();
self.cb1.hide();
self.cb2.hide();
tmp = [];
self.mx_sc1_l1 = QLabel(self);
self.mx_sc1_l1.setText("No parameters (Arguments) to set.");
self.mx_sc1_l1.move(20, 100);
tmp.append(self.mx_sc1_l1);
self.scheduler_ui_mxnet.append(tmp)
tmp = [];
self.mx_sc2_l1 = QLabel(self);
self.mx_sc2_l1.setText("1. Step Size: ");
self.mx_sc2_l1.move(20, 100);
tmp.append(self.mx_sc2_l1);
self.mx_sc2_e1 = QLineEdit(self)
self.mx_sc2_e1.move(150, 100);
self.mx_sc2_e1.setText("5");
tmp.append(self.mx_sc2_e1);
self.mx_sc2_l2 = QLabel(self);
self.mx_sc2_l2.setText("2. Learning rate multiplicative factor: ");
self.mx_sc2_l2.move(20, 150);
tmp.append(self.mx_sc2_l2);
self.mx_sc2_e2 = QLineEdit(self)
self.mx_sc2_e2.move(290, 150);
self.mx_sc2_e2.setText("0.1");
tmp.append(self.mx_sc2_e2);
self.scheduler_ui_mxnet.append(tmp)
tmp = [];
self.mx_sc3_l1 = QLabel(self);
self.mx_sc3_l1.setText("1. Milestones: ");
self.mx_sc3_l1.move(20, 100);
tmp.append(self.mx_sc3_l1);
self.mx_sc3_e1 = QLineEdit(self)
self.mx_sc3_e1.move(150, 100);
self.mx_sc3_e1.setText("5, 10, 15");
tmp.append(self.mx_sc3_e1);
self.mx_sc3_l2 = QLabel(self);
self.mx_sc3_l2.setText("2. Learning rate multiplicative factor: ");
self.mx_sc3_l2.move(20, 150);
tmp.append(self.mx_sc3_l2);
self.mx_sc3_e2 = QLineEdit(self)
self.mx_sc3_e2.move(290, 150);
self.mx_sc3_e2.setText("0.1");
tmp.append(self.mx_sc3_e2);
self.scheduler_ui_mxnet.append(tmp)
tmp = [];
self.ke_sc1_l1 = QLabel(self);
self.ke_sc1_l1.setText("No parameters (Arguments) to set.");
self.ke_sc1_l1.move(20, 100);
tmp.append(self.ke_sc1_l1);
self.scheduler_ui_keras.append(tmp)
tmp = [];
self.ke_sc2_l1 = QLabel(self);
self.ke_sc2_l1.setText("1. Step Size: ");
self.ke_sc2_l1.move(20, 100);
tmp.append(self.ke_sc2_l1);
self.ke_sc2_e1 = QLineEdit(self)
self.ke_sc2_e1.move(150, 100);
self.ke_sc2_e1.setText("5");
tmp.append(self.ke_sc2_e1);
self.ke_sc2_l2 = QLabel(self);
self.ke_sc2_l2.setText("2. Learning rate multiplicative factor: ");
self.ke_sc2_l2.move(20, 150);
tmp.append(self.ke_sc2_l2);
self.ke_sc2_e2 = QLineEdit(self)
self.ke_sc2_e2.move(290, 150);
self.ke_sc2_e2.setText("0.1");
tmp.append(self.ke_sc2_e2);
self.scheduler_ui_keras.append(tmp)
tmp = [];
self.ke_sc3_l1 = QLabel(self);
self.ke_sc3_l1.setText("1. Learning rate multiplicative factor: ");
self.ke_sc3_l1.move(20, 100);
tmp.append(self.ke_sc3_l1);
self.ke_sc3_e1 = QLineEdit(self)
self.ke_sc3_e1.move(290, 100);
self.ke_sc3_e1.setText("0.9");
tmp.append(self.ke_sc3_e1);
self.scheduler_ui_keras.append(tmp)
tmp = [];
self.ke_sc4_l1 = QLabel(self);
self.ke_sc4_l1.setText("1. Mode: ");
self.ke_sc4_l1.move(20, 100);
tmp.append(self.ke_sc4_l1);
self.ke_sc4_cb1 = QComboBox(self);
self.ke_sc4_cb1.move(200, 100);
self.ke_sc4_cb1.addItems(["Min", "Max"]);
tmp.append(self.ke_sc4_cb1);
self.ke_sc4_l2 = QLabel(self);
self.ke_sc4_l2.setText("2. Learning rate multiplicative factor: ");
self.ke_sc4_l2.move(20, 150);
tmp.append(self.ke_sc4_l2);
self.ke_sc4_e2 = QLineEdit(self)
self.ke_sc4_e2.move(290, 150);
self.ke_sc4_e2.setText("0.1");
tmp.append(self.ke_sc4_e2);
self.ke_sc4_l3 = QLabel(self);
self.ke_sc4_l3.setText("3. Number of epochs to wait: ");
self.ke_sc4_l3.move(20, 200);
tmp.append(self.ke_sc4_l3);
self.ke_sc4_e3 = QLineEdit(self)
self.ke_sc4_e3.move(290, 200);
self.ke_sc4_e3.setText("10");
tmp.append(self.ke_sc4_e3);
self.ke_sc4_l4 = QLabel(self);
self.ke_sc4_l4.setText("4. Threshold: ");
self.ke_sc4_l4.move(20, 250);
tmp.append(self.ke_sc4_l4);
self.ke_sc4_e4 = QLineEdit(self)
self.ke_sc4_e4.move(290, 250);
self.ke_sc4_e4.setText("0.0001");
tmp.append(self.ke_sc4_e4);
self.ke_sc4_l5 = QLabel(self);
self.ke_sc4_l5.setText("5. Minimum learning rate: ");
self.ke_sc4_l5.move(20, 300);
tmp.append(self.ke_sc4_l5);
self.ke_sc4_e5 = QLineEdit(self)
self.ke_sc4_e5.move(290, 300);
self.ke_sc4_e5.setText("0.0");
tmp.append(self.ke_sc4_e5);
self.scheduler_ui_keras.append(tmp)
tmp = [];
self.py_sc1_l1 = QLabel(self);
self.py_sc1_l1.setText("No parameters (Arguments) to set.");
self.py_sc1_l1.move(20, 100);
tmp.append(self.py_sc1_l1);
self.scheduler_ui_pytorch.append(tmp)
tmp = [];
self.py_sc2_l1 = QLabel(self);
self.py_sc2_l1.setText("1. Step Size: ");
self.py_sc2_l1.move(20, 100);
tmp.append(self.py_sc2_l1);
self.py_sc2_e1 = QLineEdit(self)
self.py_sc2_e1.move(150, 100);
self.py_sc2_e1.setText("5");
tmp.append(self.py_sc2_e1);
self.py_sc2_l2 = QLabel(self);
self.py_sc2_l2.setText("2. Learning rate multiplicative factor: ");
self.py_sc2_l2.move(20, 150);
tmp.append(self.py_sc2_l2);
self.py_sc2_e2 = QLineEdit(self)
self.py_sc2_e2.move(290, 150);
self.py_sc2_e2.setText("0.1");
tmp.append(self.py_sc2_e2);
self.scheduler_ui_pytorch.append(tmp)
tmp = [];
self.py_sc3_l1 = QLabel(self);
self.py_sc3_l1.setText("1. Milestones: ");
self.py_sc3_l1.move(20, 100);
tmp.append(self.py_sc3_l1);
self.py_sc3_e1 = QLineEdit(self)
self.py_sc3_e1.move(150, 100);
self.py_sc3_e1.setText("5, 10, 15");
tmp.append(self.py_sc3_e1);
self.py_sc3_l2 = QLabel(self);
self.py_sc3_l2.setText("2. Learning rate multiplicative factor: ");
self.py_sc3_l2.move(20, 150);
tmp.append(self.py_sc3_l2);
self.py_sc3_e2 = QLineEdit(self)
self.py_sc3_e2.move(290, 150);
self.py_sc3_e2.setText("0.1");
tmp.append(self.py_sc3_e2);
self.scheduler_ui_pytorch.append(tmp)
tmp = [];
self.py_sc4_l1 = QLabel(self);
self.py_sc4_l1.setText("1. Learning rate multiplicative factor: ");
self.py_sc4_l1.move(20, 100);
tmp.append(self.py_sc4_l1);
self.py_sc4_e1 = QLineEdit(self)
self.py_sc4_e1.move(290, 100);
self.py_sc4_e1.setText("0.9");
tmp.append(self.py_sc4_e1);
self.scheduler_ui_pytorch.append(tmp)
tmp = [];
self.py_sc5_l1 = QLabel(self);
self.py_sc5_l1.setText("1. Mode: ");
self.py_sc5_l1.move(20, 100);
tmp.append(self.py_sc5_l1);
self.py_sc5_cb1 = QComboBox(self);
self.py_sc5_cb1.move(200, 100);
self.py_sc5_cb1.addItems(["Min", "Max"]);
tmp.append(self.py_sc5_cb1);
self.py_sc5_l2 = QLabel(self);
self.py_sc5_l2.setText("2. Learning rate multiplicative factor: ");
self.py_sc5_l2.move(20, 150);
tmp.append(self.py_sc5_l2);
self.py_sc5_e2 = QLineEdit(self)
self.py_sc5_e2.move(290, 150);
self.py_sc5_e2.setText("0.1");
tmp.append(self.py_sc5_e2);
self.py_sc5_l3 = QLabel(self);
self.py_sc5_l3.setText("3. Number of epochs to wait: ");
self.py_sc5_l3.move(20, 200);
tmp.append(self.py_sc5_l3);
self.py_sc5_e3 = QLineEdit(self)
self.py_sc5_e3.move(290, 200);
self.py_sc5_e3.setText("10");
tmp.append(self.py_sc5_e3);
self.py_sc5_l4 = QLabel(self);
self.py_sc5_l4.setText("4. Threshold: ");
self.py_sc5_l4.move(20, 250);
tmp.append(self.py_sc5_l4);
self.py_sc5_e4 = QLineEdit(self)
self.py_sc5_e4.move(290, 250);
self.py_sc5_e4.setText("0.0001");
tmp.append(self.py_sc5_e4);
self.py_sc5_l5 = QLabel(self);
self.py_sc5_l5.setText("5. Minimum learning rate: ");
self.py_sc5_l5.move(20, 300);
tmp.append(self.py_sc5_l5);
self.py_sc5_e5 = QLineEdit(self)
self.py_sc5_e5.move(290, 300);
self.py_sc5_e5.setText("0.0");
tmp.append(self.py_sc5_e5);
self.scheduler_ui_pytorch.append(tmp)
self.select_scheduler();
self.tb1 = QTextEdit(self)
self.tb1.move(550, 20)
self.tb1.resize(300, 500)
if(self.system["update"]["schedulers"]["active"]):
wr = "";
wr = json.dumps(self.system["update"]["schedulers"]["value"], indent=4)
self.tb1.setText(wr);
else:
self.tb1.setText("Using Default Scheduler.")
self.b4 = QPushButton('Select Scheduler', self)
self.b4.move(400,400)
self.b4.clicked.connect(self.add_scheduler)
self.b6 = QPushButton('Clear ', self)
self.b6.move(400,500)
self.b6.clicked.connect(self.clear_scheduler)
def select_scheduler(self):
self.current_scheduler = {};
self.current_scheduler["name"] = "";
self.current_scheduler["params"] = {};
if(self.system["backend"] == "Mxnet-1.5.1"):
self.current_scheduler["name"] = self.cb1.currentText();
index = self.mxnet_schedulers_list.index(self.cb1.currentText());
for i in range(len(self.scheduler_ui_mxnet)):
for j in range(len(self.scheduler_ui_mxnet[i])):
if((index-1)==i):
self.scheduler_ui_mxnet[i][j].show();
else:
self.scheduler_ui_mxnet[i][j].hide();
for i in range(len(self.scheduler_ui_keras)):
for j in range(len(self.scheduler_ui_keras[i])):
self.scheduler_ui_keras[i][j].hide();
for i in range(len(self.scheduler_ui_pytorch)):
for j in range(len(self.scheduler_ui_pytorch[i])):
self.scheduler_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
self.current_scheduler["name"] = self.cb2.currentText();
index = self.keras_schedulers_list.index(self.cb2.currentText());
for i in range(len(self.scheduler_ui_keras)):
for j in range(len(self.scheduler_ui_keras[i])):
if((index-1)==i):
self.scheduler_ui_keras[i][j].show();
else:
self.scheduler_ui_keras[i][j].hide();
for i in range(len(self.scheduler_ui_mxnet)):
for j in range(len(self.scheduler_ui_mxnet[i])):
self.scheduler_ui_mxnet[i][j].hide();
for i in range(len(self.scheduler_ui_pytorch)):
for j in range(len(self.scheduler_ui_pytorch[i])):
self.scheduler_ui_pytorch[i][j].hide();
elif(self.system["backend"] == "Pytorch-1.3.1"):
self.current_scheduler["name"] = self.cb3.currentText();
index = self.pytorch_schedulers_list.index(self.cb3.currentText());
for i in range(len(self.scheduler_ui_pytorch)):
for j in range(len(self.scheduler_ui_pytorch[i])):
if((index-1)==i):
self.scheduler_ui_pytorch[i][j].show();
else:
self.scheduler_ui_pytorch[i][j].hide();
for i in range(len(self.scheduler_ui_keras)):
for j in range(len(self.scheduler_ui_keras[i])):
self.scheduler_ui_keras[i][j].hide();
for i in range(len(self.scheduler_ui_mxnet)):
for j in range(len(self.scheduler_ui_mxnet[i])):
self.scheduler_ui_mxnet[i][j].hide();
def add_scheduler(self):
self.system["update"]["schedulers"]["active"] = True;
if(self.system["backend"] == "Mxnet-1.5.1"):
if(self.current_scheduler["name"] == self.mxnet_schedulers_list[1]):
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.mxnet_schedulers_list[2]):
self.current_scheduler["params"]["step_size"] = self.mx_sc2_e1.text();
self.current_scheduler["params"]["gamma"] = self.mx_sc2_e2.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.mxnet_schedulers_list[3]):
self.current_scheduler["params"]["milestones"] = self.mx_sc3_e1.text();
self.current_scheduler["params"]["gamma"] = self.mx_sc3_e2.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.system["backend"] == "Keras-2.2.5_Tensorflow-1"):
if(self.current_scheduler["name"] == self.keras_schedulers_list[1]):
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.keras_schedulers_list[2]):
self.current_scheduler["params"]["step_size"] = self.ke_sc2_e1.text();
self.current_scheduler["params"]["gamma"] = self.ke_sc2_e2.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.keras_schedulers_list[3]):
self.current_scheduler["params"]["gamma"] = self.ke_sc3_e1.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.keras_schedulers_list[4]):
self.current_scheduler["params"]["mode"] = self.ke_sc4_cb1.currentText();
self.current_scheduler["params"]["factor"] = self.ke_sc4_e2.text();
self.current_scheduler["params"]["patience"] = self.ke_sc4_e3.text();
self.current_scheduler["params"]["threshold"] = self.ke_sc4_e4.text();
self.current_scheduler["params"]["min_lr"] = self.ke_sc4_e5.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.system["backend"] == "Pytorch-1.3.1"):
if(self.current_scheduler["name"] == self.pytorch_schedulers_list[1]):
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.keras_schedulers_list[2]):
self.current_scheduler["params"]["step_size"] = self.py_sc2_e1.text();
self.current_scheduler["params"]["gamma"] = self.py_sc2_e2.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.keras_schedulers_list[3]):
self.current_scheduler["params"]["milestones"] = self.py_sc3_e1.text();
self.current_scheduler["params"]["gamma"] = self.py_sc3_e2.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.keras_schedulers_list[4]):
self.current_scheduler["params"]["gamma"] = self.py_sc4_e1.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
elif(self.current_scheduler["name"] == self.keras_schedulers_list[5]):
self.current_scheduler["params"]["mode"] = self.py_sc5_cb1.currentText();
self.current_scheduler["params"]["factor"] = self.py_sc5_e2.text();
self.current_scheduler["params"]["patience"] = self.py_sc5_e3.text();
self.current_scheduler["params"]["threshold"] = self.py_sc5_e4.text();
self.current_scheduler["params"]["min_lr"] = self.py_sc5_e5.text();
self.system["update"]["schedulers"]["value"] = self.current_scheduler;
wr = "";
wr = json.dumps(self.system["update"]["schedulers"]["value"], indent=4)
self.tb1.setText(wr);
def clear_scheduler(self):
self.system["update"]["schedulers"]["value"] = "";
self.system["update"]["schedulers"]["active"] = False;
wr = "";
self.tb1.setText(wr);
def forward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.forward_loss_param.emit();
def backward(self):
with open('base_classification.json', 'w') as outfile:
json.dump(self.system, outfile)
self.backward_optimizer_param.emit();
'''
app = QApplication(sys.argv)
screen = WindowClassificationTrainUpdateSchedulerParam()
screen.show()
sys.exit(app.exec_())
'''
|
192853
|
from collections import namedtuple
from xml.dom import minidom
BOOKMARK_COLOR_NONE = "-1"
BOOKMARK_COLOR_BLUE = "0"
BOOKMARK_COLOR_GREEN = "1"
BOOKMARK_COLOR_YELLOW = "2"
BOOKMARK_COLOR_ORANGE = "3"
BOOKMARK_COLOR_RED = "4"
LINK_STYLE_NORMAL = "0"
LINK_STYLE_DASHED = "1"
LINK_STYLE_DOTTED = "2"
LINK_STYLE_DASHDOT = "3"
UIM_FATAL = 'FatalError'
UIM_PARTIAL = 'PartialError'
UIM_INFORM = 'Inform'
UIM_DEBUG = 'Debug'
UIMessage = namedtuple('UIMessage', 'message, messageType')
Property = namedtuple('Property', 'displayName, matchingRule, value')
class MaltegoEntity(object):
def __init__(self, eT=None, v=None):
"""Create a Maltego entity of type eT and value v."""
if (eT is not None):
self.entityType = eT
else:
self.entityType = "Phrase"
if (v is not None):
self.value = v
else:
self.value = ""
self.additionalFields = {}
self.displayInformation = {}
self.weight = 100
self.iconURL = ""
def setType(self, eT=None):
"""Sets the type of the entity to eT.
See list of Entity definitions in TRX documentation for possible values.
"""
if (eT is not None):
self.entityType = eT
def setValue(self, eV=None):
"""Sets the value of Maltego entity to eV."""
if (eV is not None):
self.value = eV
def setWeight(self, w=None):
"""Sets weight of Maltego entity to w."""
if (w is not None):
self.weight = w
def addDisplayInformation(self, di=None, dl='Info'):
"""Adds display information to entity.
This field is rendered as HTML within Maltego. See pages 29 & 50 in
TRX documentation.
"""
if (di is not None):
self.displayInformation[dl] = di
def addProperty(self, fieldName=None, displayName=None, matchingRule=False, value=None):
"""Add a property to the entity.
Each property has a name, value and a display name. The display name is
how it will be represented within Maltego. The matching rule determines
how entities will be matched and could be 'strict' (default) or 'loose'.
See pages 30 & 50 in TRX documentation.
"""
self.additionalFields[fieldName] = Property(displayName, matchingRule, value)
def setIconURL(self, iU=None):
"""Define a URL pointing to a PNG or JPG for the icon.
Maltego will size to fit but lots of large files will drain
resources.
"""
if (iU is not None):
self.iconURL = iU
def setLinkColor(self, color):
"""Sets the color of the link to the node. Colors are in hex, for example '0xff00ff'."""
self.addProperty('link#maltego.link.color', 'LinkColor', '', color)
def setLinkStyle(self, style):
"""Set the style of a link to an entity using LINK_STYLE_* constants."""
self.addProperty('link#maltego.link.style', 'LinkStyle', '', style)
def setLinkThickness(self, thick):
"""Set the thickiness of a link to an entity in pixels."""
self.addProperty('link#maltego.link.thickness', 'Thickness', '', str(thick))
def setLinkLabel(self, label):
"""Sets the label of the link to the node."""
self.addProperty('link#maltego.link.label', 'Label', '', label)
def setBookmark(self, bookmark):
"""Sets the bookmark color of the node.
Keep in mind that these are chosen from a set number of colors. Use the
BOOKMARK_COLOR_* constants.
"""
self.addProperty('bookmark#', 'Bookmark', '', bookmark)
def setNote(self, note):
"""Creates an annotation to the node.
If a subsequent transform sets an annotation on the node it will be
appended to the note.
"""
self.addProperty('notes#', 'Notes', '', note)
def returnEntity(self):
"""Generate XML snippet for returning to Maltego."""
r = ''
r += "<Entity Type=\"" + unicode(self.entityType) + "\">"
r += "<Value>" + unicode(self.value) + "</Value>"
r += "<Weight>" + str(self.weight) + "</Weight>"
if (len(self.displayInformation) > 0):
r += "<DisplayInformation>"
for label in self.displayInformation:
r += '<Label Name=\"' + label + '\" Type=\"text/html\"><![CDATA[' + unicode(self.displayInformation[label]) + ']]></Label>'
r += '</DisplayInformation>'
if (len(self.additionalFields) > 0):
r += "<AdditionalFields>"
for field in self.additionalFields:
if (self.additionalFields[field].matchingRule != "strict"):
r += "<Field Name=\"" + field + "\" DisplayName=\"" + unicode(self.additionalFields[field].displayName) + "\">" + unicode(self.additionalFields[field].value) + "</Field>"
else:
r += "<Field MatchingRule=\"strict\" Name=\"" + field + "\" DisplayName=\"" + unicode(self.additionalFields[field].displayName) + "\">" + unicode(self.additionalFields[field].value) + u"</Field>"
r += "</AdditionalFields>"
if (len(self.iconURL) > 0):
r += "<IconURL>" + self.iconURL + "</IconURL>"
r += "</Entity>"
return r
class MaltegoTransform(object):
"""This is used to construct the reply to the TDS.
All values are strings.
"""
def __init__(self):
"""Create Maltego transform to hold entities, exceptions, and messages.
See page 49 in TRX documentation.
"""
self.entities = []
self.exceptions = []
self.UIMessages = []
def addEntity(self, enType=None, enValue=None):
"""Adds an entity to the return vessel with type 'enType' and
value 'enValue'.
"""
me = MaltegoEntity(enType, enValue)
self.entities.append(me)
return me
def addUIMessage(self, msg, msgType=UIM_INFORM):
"""Shows a message 'msg' in the Maltego GUI.
Use UIM_* constants for message type.
"""
self.UIMessages.append(UIMessage(messageType=msgType, message=msg))
def addException(self, exceptionString):
"""Throws a transform exception."""
self.exceptions.append(exceptionString)
def throwExceptions(self):
"""Returns the XML of the exception(s)."""
r = ''
r += "<MaltegoMessage>"
r += "<MaltegoTransformExceptionMessage>"
r += "<Exceptions>"
for i in range(len(self.exceptions)):
r += "<Exception>" + self.exceptions[i] + "</Exception>"
r += "</Exceptions>"
r += "</MaltegoTransformExceptionMessage>"
r += "</MaltegoMessage>"
return r
def returnOutput(self):
"""Returns the XML of the vessel."""
r = ''
r += "<MaltegoMessage>"
r += "<MaltegoTransformResponseMessage>"
r += "<Entities>"
for i in range(len(self.entities)):
r += self.entities[i].returnEntity()
r += "</Entities>"
r += "<UIMessages>"
for msg in self.UIMessages:
r += "<UIMessage MessageType=\"" + msg.messageType + "\">" + msg.message + "</UIMessage>"
r += "</UIMessages>"
r += "</MaltegoTransformResponseMessage>"
r += "</MaltegoMessage>"
return r
class MaltegoMsg:
"""This reads the Maltego request and is passed along to each transform.
See page 49 in TRX documentation.
"""
def __init__(self, MaltegoXML=""):
"""Parse XML received from Maltego."""
xmldoc = minidom.parseString(MaltegoXML)
# read the easy stuff like value, limits etc
self.Value = self._getNodeValue(xmldoc, "Value")
self.Weight = self._getNodeValue(xmldoc, "Weight")
self.Slider = int(
self._getNodeAttributeValue(xmldoc, "Limits", "SoftLimit"))
self.Type = self._getNodeAttributeValue(xmldoc, "Entity", "Type")
# read additional fields
Properties = {}
AFNodes = xmldoc.getElementsByTagName("AdditionalFields")[0]
Settings = AFNodes.getElementsByTagName("Field")
for node in Settings:
AFName = node.attributes["Name"].value
AFValue = self._getText(node.childNodes)
Properties[AFName] = AFValue
# parse transform settings
TransformSettings = {}
TSNodes = xmldoc.getElementsByTagName("TransformFields")[0]
Settings = TSNodes.getElementsByTagName("Field")
for node in Settings:
TSName = node.attributes["Name"].value
TSValue = self._getText(node.childNodes)
TransformSettings[TSName] = TSValue
# load back into object
self.Properties = Properties
self.TransformSettings = TransformSettings
def _getText(self, nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def _getNodeValue(self, node, Tag):
return self._getText(node.getElementsByTagName(Tag)[0].childNodes)
def _getNodeAttributeValue(self, node, Tag, Attribute):
return node.getElementsByTagName(Tag)[0].attributes[Attribute].value
def getProperty(self, skey):
"""Returns the value of the key, or None if not defined."""
if skey in self.Properties:
return self.Properties[skey]
else:
return None
def getTransformSetting(self, skey):
"""Returns the value of the key, or None if not defined."""
if skey in self.TransformSettings:
return self.TransformSettings[skey]
else:
return None
|
192856
|
from graphish.connector import GraphConnector
from graphish.search import Search
from graphish.delete import Delete
from graphish.mailfolder import MailFolder
|
192866
|
import requests
from bs4 import BeautifulSoup
import os
from tools import jsonl
base_url = 'https://www.supremecourt.gov/oral_arguments'
with jsonl.JWZ('corpus_staging/manifest.jsonl.gz') as manifest:
year = 2010
while True:
page = requests.get('{}/argument_audio/{}'.format(base_url, year))
soup = BeautifulSoup(page.text, 'html.parser')
counter = 0
for link in soup.find_all('a'):
if 'href' not in link.attrs or not link.attrs['href'].startswith('../audio/'):
continue
os.system('mkdir -p ./downloads/{}'.format(year))
counter += 1
case_title = link.parent.parent.find_all('span')[-1].text
docket_number = link.text
print('found Docket Number: {} ({})'.format(docket_number, case_title))
docket_page = requests.get('{}/audio/{}'.format(
base_url,
link.attrs['href'][len('../audio/'):]
))
docket_soup = BeautifulSoup(docket_page.text, 'html.parser')
mp3_urls = [
link.attrs['href'] for link in docket_soup.find_all('a')
if 'href' in link.attrs
and link.attrs['href'].endswith('mp3')
]
assert len(mp3_urls) == 1
audio_mp3_path = './downloads/{}/{}.mp3'.format(year, docket_number)
mp3_url = mp3_urls[0]
pdf_urls = [
link.attrs['href'] for link in docket_soup.find_all('a')
if 'href' in link.attrs
and link.attrs['href'].endswith('pdf')
and 'argument_transcripts' in link.attrs['href']
]
assert len(pdf_urls) == 1
transcript_pdf_path = './downloads/{}/{}.pdf'.format(year, docket_number)
pdf_url = '{}/{}'.format(base_url, pdf_urls[0][len('/oral_arguments/'):])
description = {
'docket_number': docket_number,
'case_title': case_title,
'audio_mp3_url': mp3_url,
'audio_mp3_path': audio_mp3_path,
'transcript_pdf_url': pdf_url,
'transcript_pdf_path': transcript_pdf_path
}
manifest.dump(description)
if counter == 0:
# caught up to the most recent records
break
year += 1
|
192915
|
import errno
import logging
import os
import string
import time
import urllib.request
from collections import namedtuple
from errno import ENOENT
from urllib.parse import urlparse
from markupsafe import escape
from galaxy.datatypes import sniff
from galaxy.exceptions import (
ConfigurationError,
MessageException,
ObjectNotFound,
)
from galaxy.tool_util.deps import (
build_dependency_manager,
NullDependencyManager
)
from galaxy.tool_util.loader_directory import looks_like_a_tool
from galaxy.util import (
etree,
ExecutionTimer,
get_charset_from_http_headers,
listify,
parse_xml,
string_as_bool,
unicodify,
)
from galaxy.util.bunch import Bunch
from galaxy.util.dictifiable import Dictifiable
from .filters import FilterFactory
from .integrated_panel import ManagesIntegratedToolPanelMixin
from .lineages import LineageMap
from .panel import (
panel_item_types,
ToolPanelElements,
ToolSection,
ToolSectionLabel
)
from .parser import (
ensure_tool_conf_item,
get_toolbox_parser
)
from .tags import tool_tag_manager
log = logging.getLogger(__name__)
SHED_TOOL_CONF_XML = """<?xml version="1.0"?>
<toolbox tool_path="{shed_tools_dir}">
</toolbox>
"""
# A fake ToolShedRepository constructed from a shed tool conf
_ToolConfRepository = namedtuple(
'_ToolConfRepository',
(
'tool_shed', 'name', 'owner', 'installed_changeset_revision', 'changeset_revision',
'tool_dependencies_installed_or_in_error', 'repository_path', 'tool_path',
)
)
class ToolConfRepository(_ToolConfRepository):
def get_tool_relative_path(self, *args, **kwargs):
# This is a somewhat public function, used by data_manager_manual for instance
return self.tool_path, self.repository_path
class AbstractToolBox(Dictifiable, ManagesIntegratedToolPanelMixin):
"""
Abstract container for managing a ToolPanel - containing tools and
workflows optionally in labelled sections.
"""
def __init__(self, config_filenames, tool_root_dir, app, save_integrated_tool_panel=True):
"""
Create a toolbox from the config files named by `config_filenames`, using
`tool_root_dir` as the base directory for finding individual tool config files.
"""
# The _dynamic_tool_confs list contains dictionaries storing
# information about the tools defined in each shed-related
# shed_tool_conf.xml file.
self._dynamic_tool_confs = []
self._tools_by_id = {}
self._tools_by_uuid = {}
self._integrated_section_by_tool = {}
# Tool lineages can contain chains of related tools with different ids
# so each will be present once in the above dictionary. The following
# dictionary can instead hold multiple tools with different versions.
self._tool_versions_by_id = {}
self._workflows_by_id = {}
# Cache for tool's to_dict calls specific to toolbox. Invalidates on toolbox reload.
self._tool_to_dict_cache = {}
self._tool_to_dict_cache_admin = {}
# In-memory dictionary that defines the layout of the tool panel.
self._tool_panel = ToolPanelElements()
self._index = 0
self.data_manager_tools = {}
self._lineage_map = LineageMap(app)
# Sets self._integrated_tool_panel and self._integrated_tool_panel_config_has_contents
self._init_integrated_tool_panel(app.config)
# The following refers to the tool_path config setting for backward compatibility. The shed-related
# (e.g., shed_tool_conf.xml) files include the tool_path attribute within the <toolbox> tag.
self._tool_root_dir = tool_root_dir
self.app = app
self._tool_watcher = self.app.watchers.tool_watcher
self._tool_config_watcher = self.app.watchers.tool_config_watcher
self._filter_factory = FilterFactory(self)
self._tool_tag_manager = tool_tag_manager(app)
self._init_tools_from_configs(config_filenames)
self._load_edam()
if self.app.name == 'galaxy' and self._integrated_tool_panel_config_has_contents:
# Load self._tool_panel based on the order in self._integrated_tool_panel.
if self.app.config.enable_beta_edam_toolbox:
self._load_tool_panel_edam()
else:
self._load_tool_panel()
if save_integrated_tool_panel:
self._save_integrated_tool_panel()
def _recurse_edam_parents(self, term, path=None):
log.debug(f"term:{term} path:{path} parents:{self.edam[term]['parents']}")
if self.edam[term]['parents'] and len(self.edam[term]['parents']) > 0:
for parent in self.edam[term]['parents']:
yield from self._recurse_edam_parents(parent, path + [parent])
else:
yield path
def _load_edam(self):
if not self.app.config.enable_beta_edam_toolbox:
return
log.debug('Loading EDAM Terms')
if not os.path.exists(self.app.config.beta_edam_toolbox_ontology_path):
log.debug('EDAM ontology file not present, downloading')
page = urllib.request.urlopen('https://edamontology.org/EDAM.tsv')
sniff.stream_to_open_named_file(page, os.open(self.app.config.beta_edam_toolbox_ontology_path, os.O_WRONLY | os.O_CREAT), None, source_encoding=get_charset_from_http_headers(page.headers))
log.debug('Processing EDAM Terms')
with open(self.app.config.beta_edam_toolbox_ontology_path, 'r') as handle:
log.debug(f'Processing {handle}')
self.edam = {}
for line in handle.readlines():
fields = line.split('\t')
if not fields[0].startswith('http://edamontology.org/'):
continue
term_id = fields[0][len('http://edamontology.org/'):]
# Only care about formats and operations
if not (term_id.startswith('operation_') or term_id.startswith('topic_')):
continue
parents = fields[7].split('|')
self.edam[term_id] = {
'label': fields[1], # preferred label
'parents': [x[len('http://edamontology.org/'):] for x in parents if x.startswith('http://edamontology.org/')],
}
log.debug(f'Loaded term {term_id} => {fields[1]}')
for term in sorted(self.edam.keys()):
tails = []
for x in self._recurse_edam_parents(term, path=[]):
if x[-2:] not in tails:
tails.append(x[-2:])
self.edam[term]['path'] = tails
def create_tool(self, config_file, tool_shed_repository=None, guid=None, **kwds):
raise NotImplementedError()
def create_dynamic_tool(self, dynamic_tool):
raise NotImplementedError()
def can_load_config_file(self, config_filename):
return True
def _init_tools_from_configs(self, config_filenames):
""" Read through all tool config files and initialize tools in each
with init_tools_from_config below.
"""
execution_timer = ExecutionTimer()
self._tool_tag_manager.reset_tags()
config_filenames = listify(config_filenames)
for config_filename in config_filenames:
if os.path.isdir(config_filename):
directory_contents = sorted(os.listdir(config_filename))
directory_config_files = [config_file for config_file in directory_contents if config_file.endswith(".xml")]
config_filenames.remove(config_filename)
config_filenames.extend(directory_config_files)
for config_filename in config_filenames:
if not self.can_load_config_file(config_filename):
continue
try:
self._init_tools_from_config(config_filename)
except etree.ParseError:
# Occasionally we experience "Missing required parameter 'shed_tool_conf'."
# This happens if parsing the shed_tool_conf fails, so we just sleep a second and try again.
# TODO: figure out why this fails occasionally (try installing hundreds of tools in batch ...).
time.sleep(1)
try:
self._init_tools_from_config(config_filename)
except Exception:
raise
except Exception:
log.exception("Error loading tools defined in config %s", config_filename)
log.debug("Reading tools from config files finished %s", execution_timer)
def _init_tools_from_config(self, config_filename):
"""
Read the configuration file and load each tool. The following tags are currently supported:
.. raw:: xml
<toolbox>
<tool file="data_source/upload.xml"/> # tools outside sections
<label text="Basic Tools" id="basic_tools" /> # labels outside sections
<workflow id="529fd61ab1c6cc36" /> # workflows outside sections
<section name="Get Data" id="getext"> # sections
<tool file="data_source/biomart.xml" /> # tools inside sections
<label text="In Section" id="in_section" /> # labels inside sections
<workflow id="adb5f5c93f827949" /> # workflows inside sections
<tool file="data_source/foo.xml" labels="beta" /> # label for a single tool
</section>
</toolbox>
"""
log.info(f"Parsing the tool configuration {config_filename}")
try:
tool_conf_source = get_toolbox_parser(config_filename)
except OSError as exc:
dynamic_confs = (self.app.config.shed_tool_config_file, self.app.config.migrated_tools_config)
if config_filename in dynamic_confs and exc.errno == errno.ENOENT:
log.info("Shed-enabled tool configuration file does not exist, but will be created on demand: %s",
config_filename)
stcd = dict(config_filename=config_filename,
tool_path=self.app.config.shed_tools_dir,
config_elems=[],
create=SHED_TOOL_CONF_XML.format(shed_tools_dir=self.app.config.shed_tools_dir))
self._dynamic_tool_confs.append(stcd)
return
raise
tool_path = tool_conf_source.parse_tool_path()
tool_cache_data_dir = tool_conf_source.parse_tool_cache_data_dir()
parsing_shed_tool_conf = tool_conf_source.is_shed_tool_conf()
if parsing_shed_tool_conf:
# Keep an in-memory list of xml elements to enable persistence of the changing tool config.
config_elems = []
tool_conf_type = 'shed tool' if parsing_shed_tool_conf else 'tool'
log.debug("Tool path for %s configuration %s is %s", tool_conf_type, config_filename, tool_path)
tool_path = self.__resolve_tool_path(tool_path, config_filename)
# Only load the panel_dict under certain conditions.
load_panel_dict = not self._integrated_tool_panel_config_has_contents
for item in tool_conf_source.parse_items():
index = self._index
self._index += 1
if parsing_shed_tool_conf:
config_elems.append(item.elem)
self.load_item(
item,
tool_path=tool_path,
tool_cache_data_dir=tool_cache_data_dir,
load_panel_dict=load_panel_dict,
guid=item.get('guid'),
index=index,
)
if parsing_shed_tool_conf:
# if read_only mode, (CVMFS consumer) don't add to dynamic_confs
if os.access(config_filename, os.W_OK):
shed_tool_conf_dict = dict(config_filename=config_filename,
tool_path=tool_path,
tool_cache_data_dir=tool_cache_data_dir,
config_elems=config_elems)
self._dynamic_tool_confs.append(shed_tool_conf_dict)
def _get_tool_by_uuid(self, tool_uuid):
if tool_uuid in self._tools_by_uuid:
return self._tools_by_uuid[tool_uuid]
dynamic_tool = self.app.dynamic_tool_manager.get_tool_by_uuid(tool_uuid)
if dynamic_tool:
return self.load_dynamic_tool(dynamic_tool)
return None
def load_dynamic_tool(self, dynamic_tool):
if not dynamic_tool.active:
return None
tool = self.create_dynamic_tool(dynamic_tool)
self.register_tool(tool)
self._tools_by_uuid[dynamic_tool.uuid] = tool
return tool
def load_item(self, item, tool_path, panel_dict=None, integrated_panel_dict=None, load_panel_dict=True, guid=None, index=None, tool_cache_data_dir=None):
with self.app._toolbox_lock:
item = ensure_tool_conf_item(item)
item_type = item.type
if panel_dict is None:
panel_dict = self._tool_panel
if integrated_panel_dict is None:
integrated_panel_dict = self._integrated_tool_panel
if item_type == 'tool':
self._load_tool_tag_set(item, panel_dict=panel_dict, integrated_panel_dict=integrated_panel_dict, tool_path=tool_path, load_panel_dict=load_panel_dict, guid=guid, index=index, tool_cache_data_dir=tool_cache_data_dir)
elif item_type == 'workflow':
self._load_workflow_tag_set(item, panel_dict=panel_dict, integrated_panel_dict=integrated_panel_dict, load_panel_dict=load_panel_dict, index=index)
elif item_type == 'section':
self._load_section_tag_set(item, tool_path=tool_path, load_panel_dict=load_panel_dict, index=index, tool_cache_data_dir=tool_cache_data_dir)
elif item_type == 'label':
self._load_label_tag_set(item, panel_dict=panel_dict, integrated_panel_dict=integrated_panel_dict, load_panel_dict=load_panel_dict, index=index)
elif item_type == 'tool_dir':
self._load_tooldir_tag_set(item, panel_dict, tool_path, integrated_panel_dict, load_panel_dict=load_panel_dict, tool_cache_data_dir=tool_cache_data_dir)
def get_shed_config_dict_by_filename(self, filename):
filename = os.path.abspath(filename)
dynamic_tool_conf_paths = []
for shed_config_dict in self._dynamic_tool_confs:
dynamic_tool_conf_path = os.path.abspath(shed_config_dict['config_filename'])
dynamic_tool_conf_paths.append(dynamic_tool_conf_path)
if dynamic_tool_conf_path == filename:
return shed_config_dict
log.warning(f"'{filename}' not among installable tool config files ({', '.join(dynamic_tool_conf_paths)})")
return None
def update_shed_config(self, shed_conf):
""" Update the in-memory descriptions of tools and write out the changes
to integrated tool panel unless we are just deactivating a tool (since
that doesn't affect that file).
"""
for index, my_shed_tool_conf in enumerate(self._dynamic_tool_confs):
if shed_conf['config_filename'] == my_shed_tool_conf['config_filename']:
self._dynamic_tool_confs[index] = shed_conf
self._save_integrated_tool_panel()
def get_section(self, section_id, new_label=None, create_if_needed=False):
tool_panel_section_key = str(section_id)
if tool_panel_section_key in self._tool_panel:
# Appending a tool to an existing section in toolbox._tool_panel
tool_section = self._tool_panel[tool_panel_section_key]
log.debug(f"Appending to tool panel section: {str(tool_section.name)}")
elif new_label and self._tool_panel.get_label(new_label):
tool_section = self._tool_panel.get_label(new_label)
tool_panel_section_key = tool_section.id
elif create_if_needed:
# Appending a new section to toolbox._tool_panel
if new_label is None:
# This might add an ugly section label to the tool panel, but, oh well...
new_label = section_id
section_dict = {
'name': new_label,
'id': section_id,
'version': '',
}
self.create_section(section_dict)
tool_section = self._tool_panel[tool_panel_section_key]
self._save_integrated_tool_panel()
else:
tool_section = None
return tool_panel_section_key, tool_section
def create_section(self, section_dict):
tool_section = ToolSection(section_dict)
self._tool_panel.append_section(tool_section.id, tool_section)
log.debug(f"Loading new tool panel section: {str(tool_section.name)}")
return tool_section
def get_integrated_section_for_tool(self, tool):
tool_id = tool.id
if tool_id in self._integrated_section_by_tool:
return self._integrated_section_by_tool[tool_id]
return None, None
def __resolve_tool_path(self, tool_path, config_filename):
if not tool_path:
# Default to backward compatible config setting.
tool_path = self._tool_root_dir
else:
# Allow use of __tool_conf_dir__ in toolbox config files.
tool_conf_dir = os.path.dirname(config_filename)
tool_path_vars = {"tool_conf_dir": tool_conf_dir}
tool_path = string.Template(tool_path).safe_substitute(tool_path_vars)
return tool_path
def __add_tool_to_tool_panel(self, tool, panel_component, section=False):
# See if a version of this tool is already loaded into the tool panel.
# The value of panel_component will be a ToolSection (if the value of
# section=True) or self._tool_panel (if section=False).
if tool.hidden:
log.debug("Skipping tool panel addition of hidden tool: %s, version: %s", tool.id, tool.version)
return
tool_id = str(tool.id)
tool = self._tools_by_id[tool_id]
log_msg = ""
if section:
panel_dict = panel_component.elems
else:
panel_dict = panel_component
related_tool = self._lineage_in_panel(panel_dict, tool=tool)
if related_tool:
if self._newer_tool(tool, related_tool):
panel_dict.replace_tool(
previous_tool_id=related_tool.id,
new_tool_id=tool_id,
tool=tool,
)
log_msg = f"Loaded tool id: {tool.id}, version: {tool.version} into tool panel."
else:
inserted = False
index = self._integrated_tool_panel.index_of_tool_id(tool_id)
if index:
panel_dict.insert_tool(index, tool)
inserted = True
if not inserted:
# Check the tool's installed versions.
if tool.lineage is not None:
versions = tool.lineage.get_versions()
for tool_lineage_version in versions:
lineage_id = tool_lineage_version.id
index = self._integrated_tool_panel.index_of_tool_id(lineage_id)
if index:
panel_dict.insert_tool(index, tool)
inserted = True
else:
log.warning("Could not find lineage for tool '%s'", tool.id)
if not inserted:
if (
tool.guid is None
or tool.tool_shed is None
or tool.repository_name is None
or tool.repository_owner is None
or tool.installed_changeset_revision is None
):
# We have a tool that was not installed from the Tool
# Shed, but is also not yet defined in
# integrated_tool_panel.xml, so append it to the tool
# panel.
panel_dict.append_tool(tool)
log_msg = f"Loaded tool id: {tool.id}, version: {tool.version} into tool panel.."
else:
# We are in the process of installing the tool or we are reloading the whole toolbox.
tool_lineage = self._lineage_map.get(tool_id)
already_loaded = self._lineage_in_panel(panel_dict, tool_lineage=tool_lineage) is not None
if not already_loaded:
# If the tool is not defined in integrated_tool_panel.xml, append it to the tool panel.
panel_dict.append_tool(tool)
log_msg = f"Loaded tool id: {tool.id}, version: {tool.version} into tool panel...."
if log_msg and (not hasattr(self.app, 'tool_cache') or tool_id in self.app.tool_cache._new_tool_ids):
log.debug(log_msg)
def _get_edam_sec(self, tool):
edam = tool.edam_operations + tool.edam_topics
if len(edam) > 0:
for term in edam:
yield term
else:
yield 'uncategorized'
def _get_section(self, sec_id, sec_nm):
if sec_id not in self._tool_panel:
section = ToolSection({'id': sec_id, 'name': sec_nm, 'version': ''})
self._tool_panel[sec_id] = section
else:
section = self._tool_panel[sec_id]
return section
def _edam_children_of(self, parentTerm):
for term in self.edam.keys():
if parentTerm in self.edam[term]['parents']:
yield term
def _load_tool_panel_edam(self):
execution_timer = ExecutionTimer()
# Find the children of the top level topics
operations = ['operation_0004'] + list(self._edam_children_of('operation_0004'))
topics = ['topic_0003'] + list(self._edam_children_of('topic_0003'))
# Sort them (by english label)
# operations = sorted(operations, key=lambda x: self.edam[x]['label'])
# topics = sorted(topics, key=lambda x: self.edam[x]['label'])
# Convert these to list of dicts, wherein we'll add our tools/etc.
operations = {
x: {}
for x in operations
}
topics = {
x: {}
for x in topics
}
uncategorized = []
for key, item_type, val in self._integrated_tool_panel.panel_items_iter():
if item_type == panel_item_types.TOOL:
tool_id = key.replace('tool_', '', 1)
if tool_id in self._tools_by_id:
if tool_id in self._tools_by_id:
for term in self._get_edam_sec(val):
if term == 'uncategorized':
uncategorized.append((tool_id, key, val, val.name))
else:
for path in self.edam[term]['path']:
if len(path) == 1:
t = term
else:
t = path[0]
if path[0].startswith('operation_'):
operations[t][tool_id] = (term, tool_id, key, val, val.name)
elif path[0].startswith('topic_'):
topics[t][tool_id] = (term, tool_id, key, val, val.name)
# elif item_type == panel_item_types.WORKFLOW:
# workflow_id = key.replace('workflow_', '', 1)
# if workflow_id in self._workflows_by_id:
# workflow = self._workflows_by_id[workflow_id]
# self._tool_panel[key] = workflow
# log.debug(f"Loaded workflow: {workflow_id} {workflow.name}")
elif item_type == panel_item_types.SECTION:
for section_key, section_item_type, section_val in val.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
tool_id = section_key.replace('tool_', '', 1)
if tool_id in self._tools_by_id:
for term in self._get_edam_sec(section_val):
if term == 'uncategorized':
uncategorized.append((tool_id, key, section_val, val.name))
else:
for path in self.edam[term]['path']:
if len(path) == 1:
t = term
else:
t = path[0]
if path[0].startswith('operation_'):
operations[t][tool_id] = (term, tool_id, key, section_val, val.name)
if path[0].startswith('topic_'):
topics[t][tool_id] = (term, tool_id, key, section_val, val.name)
# elif section_item_type == panel_item_types.WORKFLOW:
# workflow_id = section_key.replace('workflow_', '', 1)
# if workflow_id in self._workflows_by_id:
# workflow = self._workflows_by_id[workflow_id]
# section.elems[section_key] = workflow
# log.debug(f"Loaded workflow: {workflow_id} {workflow.name}")
log.debug("Loading tool panel finished %s", execution_timer)
for term in sorted(operations.keys(), key=lambda x: self._sort_edam_key(x)):
if len(operations[term].keys()) == 0:
continue
elem = etree.Element('label')
elem.attrib['text'] = self.edam[term]['label']
elem.attrib['id'] = term
self._tool_panel[f"label_{term}"] = ToolSectionLabel(elem)
for (term, tool_id, key, val, val_name) in operations[term].values():
section = self._get_section(term, self.edam[term]['label'])
self.__add_tool_to_tool_panel(val, section, section=True)
self._integrated_section_by_tool[tool_id] = key, val_name
for term in sorted(topics.keys(), key=lambda x: self._sort_edam_key(x)):
if len(topics[term].keys()) == 0:
continue
elem = etree.Element('label')
elem.attrib['text'] = self.edam[term]['label']
elem.attrib['id'] = term
self._tool_panel[f"label_{term}"] = ToolSectionLabel(elem)
for (term, tool_id, key, val, val_name) in topics[term].values():
section = self._get_section(term, self.edam[term]['label'])
self.__add_tool_to_tool_panel(val, section, section=True)
self._integrated_section_by_tool[tool_id] = key, val_name
section = self._get_section('uncategorized', 'Uncategorized')
for (tool_id, key, val, val_name) in uncategorized:
print(tool_id, key, val, val_name)
self.__add_tool_to_tool_panel(val, section, section=True)
self._integrated_section_by_tool[tool_id] = key, val_name
def _sort_edam_key(self, x):
if x in ('operation_0004', 'topic_0003'):
return f"!{x}"
else:
return self.edam[x]['label']
def _load_tool_panel(self):
execution_timer = ExecutionTimer()
for key, item_type, val in self._integrated_tool_panel.panel_items_iter():
if item_type == panel_item_types.TOOL:
tool_id = key.replace('tool_', '', 1)
if tool_id in self._tools_by_id:
self.__add_tool_to_tool_panel(val, self._tool_panel, section=False)
self._integrated_section_by_tool[tool_id] = '', ''
elif item_type == panel_item_types.WORKFLOW:
workflow_id = key.replace('workflow_', '', 1)
if workflow_id in self._workflows_by_id:
workflow = self._workflows_by_id[workflow_id]
self._tool_panel[key] = workflow
log.debug(f"Loaded workflow: {workflow_id} {workflow.name}")
elif item_type == panel_item_types.LABEL:
self._tool_panel[key] = val
elif item_type == panel_item_types.SECTION:
section_dict = {
'id': val.id or '',
'name': val.name or '',
'version': val.version or '',
}
section = ToolSection(section_dict)
log.debug(f"Loading section: {section_dict.get('name')}")
for section_key, section_item_type, section_val in val.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
tool_id = section_key.replace('tool_', '', 1)
if tool_id in self._tools_by_id:
self.__add_tool_to_tool_panel(section_val, section, section=True)
self._integrated_section_by_tool[tool_id] = key, val.name
elif section_item_type == panel_item_types.WORKFLOW:
workflow_id = section_key.replace('workflow_', '', 1)
if workflow_id in self._workflows_by_id:
workflow = self._workflows_by_id[workflow_id]
section.elems[section_key] = workflow
log.debug(f"Loaded workflow: {workflow_id} {workflow.name}")
elif section_item_type == panel_item_types.LABEL:
if section_val:
section.elems[section_key] = section_val
log.debug(f"Loaded label: {section_val.text}")
self._tool_panel[key] = section
log.debug("Loading tool panel finished %s", execution_timer)
def _load_integrated_tool_panel_keys(self):
"""
Load the integrated tool panel keys, setting values for tools and
workflows to None. The values will be reset when the various tool
panel config files are parsed, at which time the tools and workflows
are loaded.
"""
tree = parse_xml(self._integrated_tool_panel_config)
root = tree.getroot()
for elem in root:
key = elem.get('id')
if elem.tag == 'tool':
self._integrated_tool_panel.stub_tool(key)
elif elem.tag == 'workflow':
self._integrated_tool_panel.stub_workflow(key)
elif elem.tag == 'section':
section = ToolSection(elem)
for section_elem in elem:
section_id = section_elem.get('id')
if section_elem.tag == 'tool':
section.elems.stub_tool(section_id)
elif section_elem.tag == 'workflow':
section.elems.stub_workflow(section_id)
elif section_elem.tag == 'label':
section.elems.stub_label(section_id)
self._integrated_tool_panel.append_section(key, section)
elif elem.tag == 'label':
self._integrated_tool_panel.stub_label(key)
def get_tool(self, tool_id, tool_version=None, get_all_versions=False, exact=False, tool_uuid=None):
"""Attempt to locate a tool in the tool box. Note that `exact` only refers to the `tool_id`, not the `tool_version`."""
if tool_version:
tool_version = str(tool_version)
if get_all_versions and exact:
raise AssertionError("Cannot specify get_tool with both get_all_versions and exact as True")
if tool_id is None:
if tool_uuid is not None:
tool_from_uuid = self._get_tool_by_uuid(tool_uuid)
if tool_from_uuid is None:
raise ObjectNotFound(f"Failed to find a tool with uuid [{tool_uuid}]")
tool_id = tool_from_uuid.id
if tool_id is None:
raise AssertionError("get_tool called with tool_id as None")
if "/repos/" in tool_id: # test if tool came from a toolshed
tool_id_without_tool_shed = tool_id.split("/repos/")[1]
available_tool_sheds = [urlparse(_) for _ in self.app.tool_shed_registry.tool_sheds.values()]
available_tool_sheds = [url.geturl().replace(f"{url.scheme}://", '', 1) for url in available_tool_sheds]
tool_ids = [f"{tool_shed}repos/{tool_id_without_tool_shed}" for tool_shed in available_tool_sheds]
if tool_id in tool_ids: # move original tool_id to the top of tool_ids
tool_ids.remove(tool_id)
tool_ids.insert(0, tool_id)
else:
tool_ids = [tool_id]
for tool_id in tool_ids:
if tool_id in self._tools_by_id and not get_all_versions:
# tool_id exactly matches an available tool by id (which is 'old' tool_id or guid)
if not tool_version:
return self._tools_by_id[tool_id]
elif tool_version in self._tool_versions_by_id[tool_id]:
return self._tool_versions_by_id[tool_id][tool_version]
elif exact:
# We're looking for an exact match, so we skip lineage and
# versionless mapping, though we may want to check duplicate
# toolsheds
continue
# exact tool id match not found, or all versions requested, search for other options, e.g. migrated tools or different versions
rval = []
tool_lineage = self._lineage_map.get(tool_id)
if tool_lineage:
lineage_tool_versions = tool_lineage.get_versions()
for lineage_tool_version in lineage_tool_versions:
lineage_tool = self._tool_from_lineage_version(lineage_tool_version)
if lineage_tool:
rval.append(lineage_tool)
if not rval:
# still no tool, do a deeper search and try to match by old ids
for tool in self._tools_by_id.values():
if tool.old_id == tool_id:
rval.append(tool)
if get_all_versions and tool_id in self._tool_versions_by_id:
for tool in self._tool_versions_by_id[tool_id].values():
if tool not in rval:
rval.append(tool)
# if we don't have a lineage_map for this tool we need to sort by version,
# so that the last tool in rval is the newest tool.
rval.sort(key=lambda t: t.version)
if rval:
if get_all_versions:
return rval
else:
if tool_version:
# return first tool with matching version
for tool in rval:
if tool.version == tool_version:
return tool
# No tool matches by version, simply return the newest matching tool
return rval[-1]
# We now likely have a Toolshed guid passed in, but no supporting database entries
# If the tool exists by exact id and is loaded then provide exact match within a list
if tool_id in self._tools_by_id:
if get_all_versions:
return [self._tools_by_id[tool_id]]
else:
return self._tools_by_id[tool_id]
return None
def has_tool(self, tool_id, tool_version=None, exact=False):
return self.get_tool(tool_id, tool_version=tool_version, exact=exact) is not None
def is_missing_shed_tool(self, tool_id):
"""Confirm that the tool ID does reference a shed tool and is not installed."""
if tool_id is None:
# This is not a tool ID.
return False
if 'repos' not in tool_id:
# This is not a shed tool.
return False
# This is a valid tool, and it is from a toolshed. Check if it's
# missing from the toolbox.
if tool_id not in self._tools_by_id:
return True
return False
def get_loaded_tools_by_lineage(self, tool_id):
"""Get all loaded tools associated by lineage to the tool whose id is tool_id."""
tool_lineage = self._lineage_map.get(tool_id)
if tool_lineage:
lineage_tool_versions = tool_lineage.get_versions()
available_tool_versions = []
for lineage_tool_version in lineage_tool_versions:
tool = self._tool_from_lineage_version(lineage_tool_version)
if tool:
available_tool_versions.append(tool)
return available_tool_versions
else:
if tool_id in self._tools_by_id:
tool = self._tools_by_id[tool_id]
return [tool]
return []
def tools(self):
return self._tools_by_id.copy().items()
def dynamic_confs(self, include_migrated_tool_conf=False):
confs = []
for dynamic_tool_conf_dict in self._dynamic_tool_confs:
dynamic_tool_conf_filename = dynamic_tool_conf_dict['config_filename']
if include_migrated_tool_conf or (dynamic_tool_conf_filename != self.app.config.migrated_tools_config):
confs.append(dynamic_tool_conf_dict)
return confs
def default_shed_tool_conf_dict(self):
"""If set, returns the first shed_tool_conf_dict corresponding to shed_tool_config_file, else the first dynamic conf."""
dynamic_confs = self.dynamic_confs(include_migrated_tool_conf=False)
# Pick the first tool config that doesn't set `is_shed_conf="false"` and that is not a migrated_tool_conf
try:
shed_config_dict = dynamic_confs[0]
except IndexError:
raise ConfigurationError("No shed_tool_conf file active")
if self.app.config.shed_tool_config_file in self.app.config.tool_configs:
# Use shed_tool_config_file if loaded
for shed_config_dict in dynamic_confs:
if shed_config_dict.get('config_filename') == self.app.config.shed_tool_config_file:
break
return shed_config_dict
def dynamic_conf_filenames(self, include_migrated_tool_conf=False):
""" Return list of dynamic tool configuration filenames (shed_tools).
These must be used with various dynamic tool configuration update
operations (e.g. with update_shed_config).
"""
for dynamic_tool_conf_dict in self.dynamic_confs(include_migrated_tool_conf=include_migrated_tool_conf):
yield dynamic_tool_conf_dict['config_filename']
def _path_template_kwds(self):
return {}
def _load_tool_tag_set(self, item, panel_dict, integrated_panel_dict, tool_path, load_panel_dict, guid=None, index=None, tool_cache_data_dir=None):
try:
path_template = item.get("file")
template_kwds = self._path_template_kwds()
path = string.Template(path_template).safe_substitute(**template_kwds)
concrete_path = os.path.join(tool_path, path)
if not os.path.exists(concrete_path):
# This is a lot faster than attempting to load a non-existing tool
raise OSError(ENOENT, os.strerror(ENOENT))
tool_shed_repository = None
can_load_into_panel_dict = True
tool = self.load_tool_from_cache(concrete_path)
from_cache = tool
if from_cache:
if guid and tool.id != guid:
# In rare cases a tool shed tool is loaded into the cache without guid.
# In that case recreating the tool will correct the cached version.
from_cache = False
if guid and not from_cache: # tool was not in cache and is a tool shed tool
tool_shed_repository = self.get_tool_repository_from_xml_item(item.elem, concrete_path)
if tool_shed_repository:
if hasattr(tool_shed_repository, 'deleted'):
# The shed tool is in the install database
# Only load tools if the repository is not deactivated or uninstalled.
can_load_into_panel_dict = not tool_shed_repository.deleted
tool = self.load_tool(concrete_path, guid=guid, tool_shed_repository=tool_shed_repository, use_cached=False, tool_cache_data_dir=tool_cache_data_dir)
if not tool: # tool was not in cache and is not a tool shed tool.
tool = self.load_tool(concrete_path, use_cached=False, tool_cache_data_dir=tool_cache_data_dir)
if string_as_bool(item.get('hidden', False)):
tool.hidden = True
key = f'tool_{str(tool.id)}'
if can_load_into_panel_dict:
if guid and not from_cache:
tool.tool_shed = tool_shed_repository.tool_shed
tool.repository_name = tool_shed_repository.name
tool.repository_owner = tool_shed_repository.owner
tool.installed_changeset_revision = tool_shed_repository.installed_changeset_revision
tool.guid = guid
tool.version = item.elem.find("version").text
if item.has_elem:
self._tool_tag_manager.handle_tags(tool.id, item.elem)
self.__add_tool(tool, load_panel_dict, panel_dict)
# Always load the tool into the integrated_panel_dict, or it will not be included in the integrated_tool_panel.xml file.
integrated_panel_dict.update_or_append(index, key, tool)
# If labels were specified in the toolbox config, attach them to
# the tool.
labels = item.labels
if labels is not None:
tool.labels = labels
except OSError as exc:
msg = "Error reading tool configuration file from path '%s': %s", path, unicodify(exc)
if exc.errno == ENOENT:
log.error(msg)
else:
log.exception(msg)
except Exception:
log.exception("Error reading tool from path: %s", path)
def get_tool_repository_from_xml_item(self, elem, path):
tool_shed = elem.find("tool_shed").text
repository_name = elem.find("repository_name").text
repository_owner = elem.find("repository_owner").text
# The definition of `installed_changeset_revision` for a repository is that it has been cloned at <tool_path/toolshed/repos/owner/name/installed_changeset_revision>
# so if we load a tool it needs to be at a path that contains `installed_changeset_revision`.
path_to_installed_changeset_revision = os.path.join(tool_shed, 'repos', repository_owner, repository_name)
if path_to_installed_changeset_revision in path:
installed_changeset_revision = path[path.index(path_to_installed_changeset_revision) + len(path_to_installed_changeset_revision):].split(os.path.sep)[1]
else:
installed_changeset_revision_elem = elem.find("installed_changeset_revision")
if installed_changeset_revision_elem is None:
# Backward compatibility issue - the tag used to be named 'changeset_revision'.
installed_changeset_revision_elem = elem.find("changeset_revision")
installed_changeset_revision = installed_changeset_revision_elem.text
repository = self._get_tool_shed_repository(tool_shed=tool_shed,
name=repository_name,
owner=repository_owner,
installed_changeset_revision=installed_changeset_revision)
if not repository:
msg = "Attempted to load tool shed tool, but the repository with name '%s' from owner '%s' was not found " \
"in database. Tool will be loaded without install database."
log.warning(msg, repository_name, repository_owner)
# Figure out path to repository on disk given the tool shed info and the path to the tool contained in the repo
repository_path = os.path.join(tool_shed, 'repos', repository_owner, repository_name, installed_changeset_revision)
tool_path = path[:path.index(repository_path)]
repository = ToolConfRepository(
tool_shed,
repository_name,
repository_owner,
installed_changeset_revision,
installed_changeset_revision,
None,
repository_path,
tool_path
)
self.app.tool_shed_repository_cache.add_local_repository(repository)
return repository
def _get_tool_shed_repository(self, tool_shed, name, owner, installed_changeset_revision):
# Abstract class doesn't have a dependency on the database, for full Tool Shed
# support the actual Galaxy ToolBox implements this method and returns a Tool Shed repository.
return None
def __add_tool(self, tool, load_panel_dict, panel_dict):
# Allow for the same tool to be loaded into multiple places in the
# tool panel. We have to handle the case where the tool is contained
# in a repository installed from the tool shed, and the Galaxy
# administrator has retrieved updates to the installed repository. In
# this case, the tool may have been updated, but the version was not
# changed, so the tool should always be reloaded here. We used to
# only load the tool if it was not found in self._tools_by_id, but
# performing that check did not enable this scenario.
tool._lineage = self._lineage_map.register(tool)
self.register_tool(tool)
if load_panel_dict:
self.__add_tool_to_tool_panel(tool, panel_dict, section=isinstance(panel_dict, ToolSection))
def _load_workflow_tag_set(self, item, panel_dict, integrated_panel_dict, load_panel_dict, index=None):
try:
# TODO: should id be encoded?
workflow_id = item.get('id')
workflow = self._load_workflow(workflow_id)
self._workflows_by_id[workflow_id] = workflow
key = f"workflow_{workflow_id}"
if load_panel_dict:
panel_dict[key] = workflow
# Always load workflows into the integrated_panel_dict.
integrated_panel_dict.update_or_append(index, key, workflow)
except Exception:
log.exception("Error loading workflow: %s", workflow_id)
def _load_label_tag_set(self, item, panel_dict, integrated_panel_dict, load_panel_dict, index=None):
label = ToolSectionLabel(item)
key = f"label_{label.id}"
if load_panel_dict:
panel_dict[key] = label
integrated_panel_dict.update_or_append(index, key, label)
def _load_section_tag_set(self, item, tool_path, load_panel_dict, index=None, tool_cache_data_dir=None):
key = item.get("id")
if key in self._tool_panel:
section = self._tool_panel[key]
elems = section.elems
else:
section = ToolSection(item)
elems = section.elems
if key in self._integrated_tool_panel:
integrated_section = self._integrated_tool_panel[key]
integrated_elems = integrated_section.elems
else:
integrated_section = ToolSection(item)
integrated_elems = integrated_section.elems
for sub_index, sub_item in enumerate(item.items):
self.load_item(
sub_item,
tool_path=tool_path,
panel_dict=elems,
integrated_panel_dict=integrated_elems,
load_panel_dict=load_panel_dict,
guid=sub_item.get('guid'),
index=sub_index,
tool_cache_data_dir=tool_cache_data_dir,
)
# Ensure each tool's section is stored
for section_key, section_item_type, section_item in integrated_elems.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
if section_item:
tool_id = section_key.replace('tool_', '', 1)
self._integrated_section_by_tool[tool_id] = integrated_section.id, integrated_section.name
if load_panel_dict:
self._tool_panel[key] = section
# Always load sections into the integrated_tool_panel.
self._integrated_tool_panel.update_or_append(index, key, integrated_section)
def _load_tooldir_tag_set(self, item, elems, tool_path, integrated_elems, load_panel_dict, tool_cache_data_dir=None):
directory = os.path.join(tool_path, item.get("dir"))
recursive = string_as_bool(item.get("recursive", True))
self.__watch_directory(directory, elems, integrated_elems, load_panel_dict, recursive, force_watch=True, tool_cache_data_dir=tool_cache_data_dir)
def __watch_directory(self, directory, elems, integrated_elems, load_panel_dict, recursive, force_watch=False, tool_cache_data_dir=None):
def quick_load(tool_file, async_load=True):
try:
tool = self.load_tool(tool_file, tool_cache_data_dir)
self.__add_tool(tool, load_panel_dict, elems)
# Always load the tool into the integrated_panel_dict, or it will not be included in the integrated_tool_panel.xml file.
key = f'tool_{str(tool.id)}'
integrated_elems[key] = tool
if async_load:
if self.app.config.enable_beta_edam_toolbox:
self._load_tool_panel_edam()
else:
self._load_tool_panel()
self._save_integrated_tool_panel()
return tool.id
except Exception:
log.exception("Failed to load potential tool %s.", tool_file)
return None
tool_loaded = False
if not os.path.isdir(directory):
log.error("Failed to read tool directory %s.", directory)
return
for name in os.listdir(directory):
if name.startswith(('.', '_')):
# Very unlikely that we want to load tools from a hidden or private folder
continue
child_path = os.path.join(directory, name)
if os.path.isdir(child_path) and recursive:
self.__watch_directory(child_path, elems, integrated_elems, load_panel_dict, recursive)
elif self._looks_like_a_tool(child_path):
tool_id = quick_load(child_path, async_load=False)
tool_loaded = bool(tool_id)
if (tool_loaded or force_watch) and self._tool_watcher:
self._tool_watcher.watch_directory(directory, quick_load)
def load_tool(self, config_file, guid=None, tool_shed_repository=None, use_cached=False, tool_cache_data_dir=None, **kwds):
"""Load a single tool from the file named by `config_file` and return an instance of `Tool`."""
# Parse XML configuration file and get the root element
tool = None
if use_cached:
tool = self.load_tool_from_cache(config_file)
if not tool or guid and guid != tool.guid:
try:
tool = self.create_tool(config_file=config_file, tool_shed_repository=tool_shed_repository, guid=guid, tool_cache_data_dir=tool_cache_data_dir, **kwds)
except Exception:
# If the tool is broken but still exists we can load it from the cache
tool = self.load_tool_from_cache(config_file, recover_tool=True)
if tool:
log.exception(f"Tool '{config_file}' is not valid:")
tool.tool_errors = 'Current on-disk tool is not valid'
else:
raise
if tool.tool_shed_repository or not guid:
self.add_tool_to_cache(tool, config_file)
self.watch_tool(tool)
return tool
def watch_tool(self, tool):
if not tool.id.startswith("__"):
# do not monitor special tools written to tmp directory - no reason
# to monitor such a large directory.
if self._tool_config_watcher:
[self._tool_config_watcher.watch_file(macro_path) for macro_path in tool._macro_paths]
def add_tool_to_cache(self, tool, config_file):
tool_cache = getattr(self.app, 'tool_cache', None)
if tool_cache:
self.app.tool_cache.cache_tool(config_file, tool)
def load_tool_from_cache(self, config_file, recover_tool=False):
tool_cache = getattr(self.app, 'tool_cache', None)
tool = None
if tool_cache:
if recover_tool:
tool = tool_cache.get_removed_tool(config_file)
else:
tool = tool_cache.get_tool(config_file)
return tool
def load_hidden_lib_tool(self, path):
tool_xml = os.path.join(os.getcwd(), "lib", path)
return self.load_hidden_tool(tool_xml)
def load_hidden_tool(self, config_file, **kwds):
""" Load a hidden tool (in this context meaning one that does not
appear in the tool panel) and register it in _tools_by_id.
"""
tool = self.load_tool(config_file, **kwds)
self.register_tool(tool)
return tool
def register_tool(self, tool):
tool_id = tool.id
version = tool.version or None
if tool_id not in self._tool_versions_by_id:
self._tool_versions_by_id[tool_id] = {version: tool}
else:
self._tool_versions_by_id[tool_id][version] = tool
if tool_id in self._tools_by_id:
related_tool = self._tools_by_id[tool_id]
# This one becomes the default un-versioned tool
# if newer.
if self._newer_tool(tool, related_tool):
self._tools_by_id[tool_id] = tool
else:
self._tools_by_id[tool_id] = tool
def package_tool(self, trans, tool_id):
"""
Create a tarball with the tool's xml, help images, and test data.
:param trans: the web transaction
:param tool_id: the tool ID from app.toolbox
:returns: tuple of tarball filename, success True/False, message/None
"""
# Make sure the tool is actually loaded.
if tool_id not in self._tools_by_id:
raise ObjectNotFound(f"No tool found with id '{escape(tool_id)}'.")
else:
tool = self._tools_by_id[tool_id]
return tool.to_archive()
def reload_tool_by_id(self, tool_id):
"""
Attempt to reload the tool identified by 'tool_id', if successful
replace the old tool.
"""
if tool_id not in self._tools_by_id:
message = f"No tool with id '{escape(tool_id)}'."
status = 'error'
else:
old_tool = self._tools_by_id[tool_id]
new_tool = self.load_tool(old_tool.config_file, use_cached=False)
# The tool may have been installed from a tool shed, so set the tool shed attributes.
# Since the tool version may have changed, we don't override it here.
new_tool.id = old_tool.id
new_tool.guid = old_tool.guid
new_tool.tool_shed = old_tool.tool_shed
new_tool.repository_name = old_tool.repository_name
new_tool.repository_owner = old_tool.repository_owner
new_tool.installed_changeset_revision = old_tool.installed_changeset_revision
new_tool.old_id = old_tool.old_id
# Replace old_tool with new_tool in self._tool_panel
tool_key = f"tool_{tool_id}"
for key, val in self._tool_panel.items():
if key == tool_key:
self._tool_panel[key] = new_tool
break
elif key.startswith('section'):
if tool_key in val.elems:
self._tool_panel[key].elems[tool_key] = new_tool
break
# (Re-)Register the reloaded tool, this will handle
# _tools_by_id and _tool_versions_by_id
self.register_tool(new_tool)
message = {'name': old_tool.name, 'id': old_tool.id, 'version': old_tool.version}
status = 'done'
return message, status
def remove_tool_by_id(self, tool_id, remove_from_panel=True):
"""
Attempt to remove the tool identified by 'tool_id'. Ignores
tool lineage - so to remove a tool with potentially multiple
versions send remove_from_panel=False and handle the logic of
promoting the next newest version of the tool into the panel
if needed.
"""
if tool_id not in self._tools_by_id:
message = f"No tool with id {escape(tool_id)}"
status = 'error'
else:
tool = self._tools_by_id[tool_id]
del self._tools_by_id[tool_id]
tool_cache = getattr(self.app, 'tool_cache', None)
if tool_cache:
tool_cache.expire_tool(tool_id)
if remove_from_panel:
tool_key = f"tool_{tool_id}"
for key, val in self._tool_panel.items():
if key == tool_key:
del self._tool_panel[key]
break
elif key.startswith('section'):
if tool_key in val.elems:
del self._tool_panel[key].elems[tool_key]
break
if tool_id in self.data_manager_tools:
del self.data_manager_tools[tool_id]
# TODO: do we need to manually remove from the integrated panel here?
message = "Removed the tool:<br/>"
message += f"<b>name:</b> {escape(tool.name)}<br/>"
message += f"<b>id:</b> {escape(tool.id)}<br/>"
message += f"<b>version:</b> {escape(tool.version)}"
status = 'done'
return message, status
def get_sections(self):
for v in self._tool_panel.values():
if isinstance(v, ToolSection):
yield (v.id, v.name)
def find_section_id(self, tool_panel_section_id):
"""
Find the section ID referenced by the key or return '' indicating
no such section id.
"""
if not tool_panel_section_id:
tool_panel_section_id = ''
else:
if tool_panel_section_id not in self._tool_panel:
# Hack introduced without comment in a29d54619813d5da992b897557162a360b8d610c-
# not sure why it is needed.
fixed_tool_panel_section_id = f'section_{tool_panel_section_id}'
if fixed_tool_panel_section_id in self._tool_panel:
tool_panel_section_id = fixed_tool_panel_section_id
else:
tool_panel_section_id = ''
return tool_panel_section_id
def _load_workflow(self, workflow_id):
"""
Return an instance of 'Workflow' identified by `id`,
which is encoded in the tool panel.
"""
id = self.app.security.decode_id(workflow_id)
stored = self.app.model.context.query(self.app.model.StoredWorkflow).get(id)
return stored.latest_workflow
def tool_panel_contents(self, trans, **kwds):
""" Filter tool_panel contents for displaying for user.
"""
filter_method = self._build_filter_method(trans)
for _, item_type, elt in self._tool_panel.panel_items_iter():
elt = filter_method(elt, item_type)
if elt:
yield elt
def get_tool_to_dict(self, trans, tool, tool_help=False):
"""Return tool's to_dict.
Use cache if present, store to cache otherwise.
Note: The cached tool's to_dict is specific to the calls from toolbox.
"""
to_dict = None
if not trans.user_is_admin:
if not tool_help:
to_dict = self._tool_to_dict_cache.get(tool.id, None)
if not to_dict:
to_dict = tool.to_dict(trans, link_details=True, tool_help=tool_help)
if not tool_help:
self._tool_to_dict_cache[tool.id] = to_dict
else:
if not tool_help:
to_dict = self._tool_to_dict_cache_admin.get(tool.id, None)
if not to_dict:
to_dict = tool.to_dict(trans, link_details=True, tool_help=tool_help)
if not tool_help:
self._tool_to_dict_cache_admin[tool.id] = to_dict
return to_dict
def to_dict(self, trans, in_panel=True, tool_help=False, **kwds):
"""
Create a dictionary representation of the toolbox.
Uses primitive cache for toolbox-specific tool 'to_dict's.
"""
rval = []
if in_panel:
panel_elts = list(self.tool_panel_contents(trans, **kwds))
for elt in panel_elts:
# Only use cache for objects that are Tools.
if hasattr(elt, "tool_type"):
rval.append(self.get_tool_to_dict(trans, elt, tool_help=tool_help))
else:
kwargs = dict(trans=trans, link_details=True, tool_help=tool_help, toolbox=self)
rval.append(elt.to_dict(**kwargs))
else:
filter_method = self._build_filter_method(trans)
for tool in self._tools_by_id.values():
tool = filter_method(tool, panel_item_types.TOOL)
if not tool:
continue
rval.append(self.get_tool_to_dict(trans, tool, tool_help=tool_help))
return rval
def _lineage_in_panel(self, panel_dict, tool=None, tool_lineage=None):
""" If tool with same lineage already in panel (or section) - find
and return it. Otherwise return None.
"""
if tool_lineage is None:
assert tool is not None
tool_lineage = tool.lineage
if tool_lineage is not None:
for lineage_tool_version in reversed(tool_lineage.get_versions()):
lineage_tool = self._tool_from_lineage_version(lineage_tool_version)
if lineage_tool:
lineage_id = lineage_tool.id
if panel_dict.has_tool_with_id(lineage_id):
return panel_dict.get_tool_with_id(lineage_id)
else:
log.warning("Could not find lineage for tool '%s'", tool.id)
return None
def _newer_tool(self, tool1, tool2):
""" Return True if tool1 is considered "newer" given its own lineage
description.
"""
return tool1.version_object > tool2.version_object
def _tool_from_lineage_version(self, lineage_tool_version):
if lineage_tool_version.id_based:
return self._tools_by_id.get(lineage_tool_version.id, None)
else:
return self._tool_versions_by_id.get(lineage_tool_version.id, {}).get(lineage_tool_version.version, None)
def _build_filter_method(self, trans):
context = Bunch(toolbox=self, trans=trans)
filters = self._filter_factory.build_filters(trans)
return lambda element, item_type: _filter_for_panel(element, item_type, filters, context)
def _filter_for_panel(item, item_type, filters, context):
"""
Filters tool panel elements so that only those that are compatible
with provided filters are kept.
"""
def _apply_filter(filter_item, filter_list):
for filter_method in filter_list:
try:
if not filter_method(context, filter_item):
return False
except Exception as e:
raise MessageException(f"Toolbox filter exception from '{filter_method.__name__}': {unicodify(e)}.")
return True
if item_type == panel_item_types.TOOL:
if _apply_filter(item, filters['tool']):
return item
elif item_type == panel_item_types.LABEL:
if _apply_filter(item, filters['label']):
return item
elif item_type == panel_item_types.SECTION:
# Filter section item-by-item. Only show a label if there are
# non-filtered tools below it.
if _apply_filter(item, filters['section']):
cur_label_key = None
tools_under_label = False
filtered_elems = item.elems.copy()
for key, section_item_type, section_item in item.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
# Filter tool.
if _apply_filter(section_item, filters['tool']):
tools_under_label = True
else:
del filtered_elems[key]
elif section_item_type == panel_item_types.LABEL:
# If there is a label and it does not have tools,
# remove it.
if cur_label_key and (not tools_under_label or not _apply_filter(section_item, filters['label'])):
del filtered_elems[cur_label_key]
# Reset attributes for new label.
cur_label_key = key
tools_under_label = False
# Handle last label.
if cur_label_key and not tools_under_label:
del filtered_elems[cur_label_key]
# Only return section if there are elements.
if len(filtered_elems) != 0:
copy = item.copy()
copy.elems = filtered_elems
return copy
return None
class BaseGalaxyToolBox(AbstractToolBox):
"""
Extend the AbstractToolBox with more Galaxy tooling-specific
functionality. Adds dependencies on dependency resolution and
tool loading modules, that an abstract description of panels
shouldn't really depend on.
"""
def __init__(self, config_filenames, tool_root_dir, app, save_integrated_tool_panel=True):
super().__init__(config_filenames, tool_root_dir, app, save_integrated_tool_panel)
old_toolbox = getattr(app, 'toolbox', None)
if old_toolbox:
self.dependency_manager = old_toolbox.dependency_manager
else:
self._init_dependency_manager()
@property
def sa_session(self):
"""
Returns a SQLAlchemy session
"""
return self.app.model.context
def _looks_like_a_tool(self, path):
return looks_like_a_tool(path, enable_beta_formats=getattr(self.app.config, "enable_beta_tool_formats", False))
def _init_dependency_manager(self):
use_tool_dependency_resolution = getattr(self.app, "use_tool_dependency_resolution", True)
if not use_tool_dependency_resolution:
self.dependency_manager = NullDependencyManager()
return
app_config_dict = self.app.config.config_dict
conf_file = app_config_dict.get("dependency_resolvers_config_file")
default_tool_dependency_dir = os.path.join(
self.app.config.data_dir, self.app.config.schema.defaults['tool_dependency_dir'])
self.dependency_manager = build_dependency_manager(
app_config_dict=app_config_dict, conf_file=conf_file, default_tool_dependency_dir=default_tool_dependency_dir)
def reload_dependency_manager(self):
self._init_dependency_manager()
|
192948
|
from setuptools import setup
setup(
name='vesseg',
version='0.2.0',
author='<NAME>',
author_email='<EMAIL>',
install_requires=[
'click',
'tensorflow-gpu',
'niftynet',
'SimpleITK',
'vtk',
'tqdm',
],
entry_points={
'console_scripts': [
'vesseg=segment:main',
'segmerge=merge_segs:main',
'bin2mesh=bin2mesh:main',
],
},
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.