id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11522234
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
VERSION = (0, 1)
backendapp = 'django_wysiwyg'
# Do some settings checks.
if backendapp not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The '{0}' application is required to use the '{1}' plugin.".format(backendapp, 'text'))
try:
import django_wysiwyg
except ImportError:
raise ImportError("The 'django-wysiwyg' package is required to use the 'text' plugin.")
|
11522242
|
import tensorflow as tf
import unittest
from context import fastISM
from fastISM.models.bpnet import bpnet_model
class TestCustomStopLayer(unittest.TestCase):
# testing introducing stop layers at intermediate nodes (early_stop_layers)
# that are not necessarily in STOP_LAYERS (i.e. not a dense/flatten etc
# layer) could be conv/add layers as well. Would be useful if perturbed
# width increases quickly at spans large fraction of intermediate conv
# layers
def test_two_conv_addstop_fc(self):
# inp --> C -> Add (stop) -> D -> y
# |-> C ----^
inp = tf.keras.Input((100, 4))
x1 = tf.keras.layers.Conv1D(20, 3)(inp)
x2 = tf.keras.layers.Conv1D(20, 3)(inp)
stop_add = tf.keras.layers.Add()
x = stop_add([x1, x2])
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model,
early_stop_layers=stop_add.name,
test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_two_conv_addstop_skip_fc(self):
# inp --> C -> Add (stop) -> D --> Add -> y
# |-> C -> ^ ----------> D -----^
inp = tf.keras.Input((100, 4))
x1 = tf.keras.layers.Conv1D(20, 3)(inp)
x2 = tf.keras.layers.Conv1D(20, 3)(inp)
stop_add = tf.keras.layers.Add()
x = stop_add([x1, x2])
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(10)(x)
x2 = tf.keras.layers.Flatten()(x2)
x2 = tf.keras.layers.Dense(10)(x2)
x = tf.keras.layers.Add()([x, x2])
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model,
early_stop_layers=stop_add.name,
test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_into_stop_segment(self):
# inp --> C -> C (stop) -> Add -> D --> y
# |--> C -----------^
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(20, 3)(inp)
x1 = tf.keras.layers.Conv1D(20, 3)(x)
stop_conv = tf.keras.layers.Conv1D(20, 3)
x = stop_conv(x)
x = tf.keras.layers.Add()([x, x1])
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model,
early_stop_layers=stop_conv.name,
test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_two_conv_maxpool_fc(self):
# inp -> C -> MXP -> C -> MXP -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(10, 7, padding='same')(inp)
x = tf.keras.layers.MaxPooling1D(3)(x)
x = tf.keras.layers.Conv1D(10, 3)(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(2)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
for layer in model.layers[1:5]:
fast_ism_model = fastISM.FastISM(
model,
early_stop_layers=layer.name,
test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_mini_dense_net(self):
# early stops added at layers with "x"
# _________ _____________ __________________ _____________
# ^ | ^ | ^ | ^ |
# inp -> C -> C-> Add1 (x)-> C -> Add2(x) -> MXP (x) -> C1 (x) -> C2 -> Max1 (x) -> C -> Add -> D -> y
# |_______________________^ |___________________________________^
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(20, 3)(inp)
x1 = tf.keras.layers.Conv1D(20, 3, padding='same')(x)
add1 = tf.keras.layers.Add()
x1 = add1([x, x1])
x2 = tf.keras.layers.Conv1D(20, 5, padding='same')(x1)
add2 = tf.keras.layers.Add()
x2 = add2([x, x1, x2])
mxp = tf.keras.layers.MaxPooling1D(3)
x2 = mxp(x2)
c1 = tf.keras.layers.Conv1D(10, 2)
x2 = c1(x2)
x3 = tf.keras.layers.Conv1D(10, 7, padding='same')(x2)
max1 = tf.keras.layers.Maximum()
x3 = max1([x2, x3])
x4 = tf.keras.layers.Conv1D(10, 4, padding='same')(x3)
x4 = tf.keras.layers.Add()([x2, x3, x4])
x4 = tf.keras.layers.Flatten()(x4)
y = tf.keras.layers.Dense(1)(x4)
model = tf.keras.Model(inputs=inp, outputs=y)
for layer in [add1, add2, mxp, c1, max1]:
fast_ism_model = fastISM.FastISM(
model,
early_stop_layers=layer.name,
test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_bpnet_5_dilated_100(self):
model = bpnet_model(seqlen=100, num_dilated_convs=5)
conv_layers = [x.name for x in model.layers if 'conv1d' in x.name]
for conv_layer in conv_layers:
# try with an early stop at each of conv layers
fast_ism_model = fastISM.FastISM(
model,
early_stop_layers=conv_layer,
test_correctness=False)
# seems to need lower numerical to always pass
self.assertTrue(fast_ism_model.test_correctness(atol=1e-5))
def test_bpnet_9_dilated_100(self):
model = bpnet_model(seqlen=100, num_dilated_convs=9)
conv_layers = [x.name for x in model.layers if 'conv1d' in x.name]
for conv_layer in conv_layers[-4:]:
# try with an early stop at each of the last 4 conv layers
fast_ism_model = fastISM.FastISM(
model,
early_stop_layers=conv_layer,
test_correctness=False)
# seems to need lower numerical to always pass
self.assertTrue(fast_ism_model.test_correctness(atol=1e-5))
if __name__ == '__main__':
unittest.main()
|
11522285
|
from __future__ import annotations
from rich.console import Console
from typing import ItemsView, KeysView, ValuesView, NamedTuple
from . import log
from .geometry import Region, Size
from .widget import Widget
class RenderRegion(NamedTuple):
region: Region
order: tuple[int, ...]
clip: Region
class LayoutMap:
def __init__(self, size: Size) -> None:
self.size = size
self.widgets: dict[Widget, RenderRegion] = {}
def __getitem__(self, widget: Widget) -> RenderRegion:
return self.widgets[widget]
def items(self) -> ItemsView[Widget, RenderRegion]:
return self.widgets.items()
def keys(self) -> KeysView[Widget]:
return self.widgets.keys()
def values(self) -> ValuesView[RenderRegion]:
return self.widgets.values()
def clear(self) -> None:
self.widgets.clear()
def add_widget(
self,
widget: Widget,
region: Region,
order: tuple[int, ...],
clip: Region,
) -> None:
from .view import View
if widget in self.widgets:
return
self.widgets[widget] = RenderRegion(region + widget.layout_offset, order, clip)
if isinstance(widget, View):
view: View = widget
scroll = view.scroll
total_region = region.size.region
sub_clip = clip.intersection(region)
arrangement = view.get_arrangement(region.size, scroll)
for sub_region, sub_widget, sub_order in arrangement:
total_region = total_region.union(sub_region)
if sub_widget is not None:
self.add_widget(
sub_widget,
sub_region + region.origin - scroll,
sub_order,
sub_clip,
)
view.virtual_size = total_region.size
|
11522323
|
from django.apps import AppConfig
class BeatServerConfig(AppConfig):
name = "beatserver"
verbose_name = "Beat Server"
|
11522325
|
import paddlehub as hub
love_module = hub.Module(name="ernie_gen_lover_words")
if __name__=='__main__':
test_texts = ['情人节', '故乡', '小编带大家了解一下程序员情人节']
results = love_module.generate(texts=test_texts, beam_width=1)
for result in results:
print(result)
|
11522336
|
import os
import matplotlib
#Allow headless use
if os.environ.get('DISPLAY') is None:
print("Falling back to Agg engine")
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib.image
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from numpy import mean
from datetime import datetime
def plot_data(data, filename='plot.png',maxups=None,maxcoms=None,maxage=None,show=False, timeline = None):
'''
Where the plotting magic happens.
plot_data works on a data point. It can be used directly after get_data.
After collect_data, plot_collec should be preferred, to correctly handle the max... args.
If max... args are None, plot_data will freely adapt the chart and color.
timeline contains specific data from plot_collec and is required to plot the upper timeline.
Plotting the timeline requires a full knowledge of the data_collec and therefore
doesnt work on a single data.
'''
def format_title(title,post_sub,analysed_sub,limit_title_len):
'''reformat the title if too long and add the sub name if different from
the analysed sub'''
if post_sub != 'r/'+analysed_sub:
f_title = post_sub + ' - ' + title
else:
f_title = title
if len(f_title) > limit_title_len:
f_title = f_title[:limit_title_len-3]+'...'
return f_title
def crop_image(img, imgheight):
'''cut the thumbnail in the middle of the height'''
topcut = round(len(img)/2) - round(imgheight/2)
bottomcut = round(len(img)/2) + round(imgheight/2)
img = img[topcut:bottomcut, :, :]
return img
def make_colormap_age(maxage,ages,cmapname='GnBu'):
'''prepare the colormap'''
cmap = plt.cm.get_cmap(cmapname)
norm = matplotlib.colors.Normalize(vmin=0, vmax=1.05*maxage)
#avoid that vmax matches the max of the color map, otherwise
#it could be white with certain cmaps
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cmapage = []
for age in ages:
cmapage.append(cmap(norm(age)))
return cmapage, sm
def rm_frames(ax):
'''shortcut to remove all frames of a subplot'''
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
imgheight = 48 #crop images at 50px high (108px width by default)
limit_title_len = 60 #max nbr of characters in displayed title
figsize = (18,10)
ups = data['ups']
coms = data['coms']
thumbs = data['thumbs']
ages = [age/60 for age in data['ages']]
titles = data['titles']
subs = data['subs']
if not maxage:
maxage = max(ages)
maxage = maxage/60
cmapage, sm = make_colormap_age(maxage=maxage,ages=ages)
maxposts = len(ups)
list_yticks = list(range(1,maxposts+1))
#initiate plot
plt.rcdefaults()
matplotlib.rcParams.update({'font.size': 15})
plt.yticks(list_yticks)
fig = plt.figure(figsize = figsize)
gs = gridspec.GridSpec(2, 5, width_ratios=[0.2,1.5,0.3,1.5,0.05], height_ratios=[1,5])
#Grid is 2 rows * 4 columns
#Top row is for the timeline
#Bottom row is for karma bars / thumbnails / comments bars / colormap legend
#top of the plot, where the timeline is plotted
if timeline:
color_ups = '#549ED6'
color_coms = '#33cc33'
ax_tl = plt.subplot(gs[0,1:5])
rm_frames(ax_tl)
ax_tl.spines['bottom'].set_visible(True)
tl_ups = timeline['ups']
tl_coms = timeline['coms']
tl_ages = [age/60 for age in timeline['ages']]
tl_dates = timeline['dates']
curr_date = datetime.strptime(data['timestamp'],"%b %d %Y %H:%M:%S")
idx_curr_date = tl_dates.index(curr_date)
ax_tl.plot(tl_dates[:idx_curr_date+1],tl_ups[:idx_curr_date+1],color=color_ups)
ax_tl.plot(tl_dates[idx_curr_date:],tl_ups[idx_curr_date:],color=color_ups, alpha=0.1)
ax_tl.set_ylabel('mean(Karma)', color=color_ups)
ax_tl.tick_params('y', colors=color_ups)
ax_tl.yaxis.set_major_locator(mticker.LinearLocator(3))
ax_tl.yaxis.grid(color='grey', linestyle='-', linewidth=0.5, alpha=0.5)
ax_tltwin = ax_tl.twinx()
rm_frames(ax_tltwin)
ax_tltwin.plot(tl_dates[:idx_curr_date+1],tl_ages[:idx_curr_date+1],color=color_coms)
ax_tltwin.plot(tl_dates[idx_curr_date:],tl_ages[idx_curr_date:],color=color_coms, alpha=0.1)
ax_tltwin.set_ylabel('mean(ages in hour)', color=color_coms)
ax_tltwin.yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f'))
ax_tltwin.tick_params('y', colors=color_coms)
ax_tltwin.yaxis.set_major_locator(mticker.LinearLocator(3))
#ax_tltwin.xaxis.set_major_locator(mdates.HourLocator())
ax_tltwin.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
else: #if no timeline, when plot_data is used on a single data point, fill
#the empty space with a title.
#TODO: improve that part to make better use of the space...
ax_tl = plt.subplot(gs[0,1:])
ax_tl.axis('off')
text_title = 'r/' + data['sub'] + ' - ' + data['timestamp']
ax_tl.text(0.4,0.5,text_title,fontsize=16, fontweight='bold')
#left side of the plot, where the karma is plotted
ax_ups = plt.subplot(gs[1,:2])
rm_frames(ax_ups)
if maxups:
ax_ups.set_xlim(0, maxups)
ax_ups.barh(list_yticks,ups, color = cmapage)
ax_ups.invert_xaxis()
ax_ups.invert_yaxis()
ax_ups.set_xlabel('Karma')
ax_ups.xaxis.set_label_position('bottom')
ax_ups.xaxis.tick_bottom()
ax_ups.set_yticks([])
plt.setp(ax_ups.get_yticklabels(), visible=False)
ax_ups.xaxis.grid(color='grey', linestyle='-', linewidth=0.5, alpha=0.5)
for n in range(len(titles)):
title = format_title(titles[n],subs[n],data['sub'],limit_title_len)
title_pos = ax_ups.get_xlim()[0]
ax_ups.text(title_pos,n+1,' '+title, verticalalignment='center')
#center of the plot, for pictures
ax_thumbs = plt.subplot(gs[1,2], sharey=ax_ups)
ax_thumbs.set_yticklabels([])
ax_thumbs.axis('off')
for n in range(len(thumbs)):
arr_img = crop_image(img=matplotlib.image.imread(thumbs[n]),imgheight=imgheight)
imagebox = OffsetImage(arr_img, 0.7)
ab = AnnotationBbox(imagebox, (0.5, n+1), frameon=False)
ax_thumbs.add_artist(ab)
#right side of the plot, where the comments are plotted
ax_coms = plt.subplot(gs[1,3], sharey=ax_ups)
rm_frames(ax_coms)
if maxcoms:
ax_coms.set_xlim(0,maxcoms)
ax_coms.barh(list_yticks,coms, color = cmapage)
ax_coms.set_xlabel('Comments')
plt.setp(ax_coms.get_yticklabels(), visible=False)
ax_coms.xaxis.set_label_position('bottom')
ax_coms.xaxis.tick_bottom()
ax_coms.xaxis.grid(color='grey', linestyle='-', linewidth=0.5, alpha=0.5)
#colormap legend
ax_cbar = plt.subplot(gs[1,4])
cbar = plt.colorbar(sm, cax = ax_cbar)
cbar.set_label('age in hours')
plt.subplots_adjust(wspace=0.05, hspace=0.2)
#Check existence of plots directory
if not os.path.exists("plots/"):
os.makedirs("plots/")
plt.savefig('plots/'+filename, bbox_inches='tight')
if show: plt.show()
plt.close()
return
def plot_collec(data_collec,maxups=None, maxage=None,maxcoms=None):
'''
Prepares the max... args to make sure that the highest value of the axis
and the max color of the color map covers all the data_collec.
Prepares the timeline_data required for plotting the timeline.
Then, launch the loop to plot each data point.
'''
if not maxups:
maxups = max([max(d['ups']) for d in data_collec])
if not maxcoms:
maxcoms = max([max(d['coms']) for d in data_collec])
if not maxage:
maxage = max([max(d['ages']) for d in data_collec])
nbr_zfill = len(str(len(data_collec)))
timeline = {
'ups':[mean(data['ups']) for data in data_collec],
'coms':[mean(data['coms']) for data in data_collec],
'dates':[datetime.strptime(data['timestamp'],"%b %d %Y %H:%M:%S") for data in data_collec],
'ages': [mean(data['ages']) for data in data_collec]
}
n=1
for data in data_collec:
filename = str(n).zfill(nbr_zfill)+'.png'
plot_data(data,filename=filename,maxups=maxups, maxage=maxage,maxcoms=maxcoms,show=False, timeline = timeline)
n+=1
if __name__ == '__main__':
pass
|
11522367
|
import random
import tempfile
from os.path import exists, join
from arbol import aprint, asection
from tifffile import imread
from dexp.datasets import ZDataset
from dexp.datasets.operations.tiff import dataset_tiff
from dexp.datasets.synthetic_datasets import generate_nuclei_background_data
from dexp.utils.backends import Backend, CupyBackend, NumpyBackend
def demo_tiff_numpy():
with NumpyBackend():
_demo_tiff()
def demo_tiff_cupy():
try:
with CupyBackend():
_demo_tiff(length_xy=128, zoom=2)
return True
except ModuleNotFoundError:
aprint("Cupy module not found! demo ignored")
return False
def _demo_tiff(length_xy=96, zoom=1, n=16, display=True):
xp = Backend.get_xp_module()
sp = Backend.get_sp_module()
# generate nuclei image:
_, _, image = generate_nuclei_background_data(
add_noise=False,
length_xy=length_xy,
length_z_factor=1,
independent_haze=True,
sphere=True,
zoom=zoom,
dtype=xp.float32,
)
with asection("prepare simulated timelapse:"):
# move to backend:
image = Backend.to_backend(image)
# generate reference 'ground truth' timelapse
images = [image.copy() for _ in range(n)]
# modify each image:
images = [
sp.ndimage.shift(
image, shift=(random.uniform(-1.0, 1.0), random.uniform(-1.0, 1.0), random.uniform(-1.0, 1.0))
)
for image in images
]
images = [image + random.uniform(-10, 10) for image in images]
# turn into array:
images = xp.stack(images)
with tempfile.TemporaryDirectory() as tmpdir:
aprint("created temporary directory", tmpdir)
with asection("Prepare dataset..."):
input_path = join(tmpdir, "dataset.zarr")
dataset = ZDataset(path=input_path, mode="w", store="dir")
dataset.add_channel(name="channel", shape=images.shape, chunks=(1, 64, 64, 64), dtype=images.dtype)
dataset.write_array(channel="channel", array=Backend.to_numpy(images))
source_array = dataset.get_array("channel")
with asection("Export to tiff..."):
# output_folder:
output_path = join(tmpdir, "file")
# Do actual tiff export:
dataset_tiff(dataset=dataset, dest_path=output_path, channels=("channel",), slicing=(slice(0, 4), ...))
output_file = output_path + ".tiff"
assert exists(output_file)
tiff_image = imread(output_file)
assert tiff_image.shape[0] == 4
assert tiff_image.shape[1:] == source_array.shape[1:]
# compute mean absolute errors:
source_array = Backend.to_backend(source_array)
tiff_image = Backend.to_backend(tiff_image)
error = xp.mean(xp.absolute(source_array[0:4] - tiff_image))
aprint(f"error = {error}")
# Asserts that check if things behave as expected:
assert error < 1e-6
if display:
def _c(array):
return Backend.to_numpy(array)
import napari
viewer = napari.Viewer(ndisplay=3)
viewer.add_image(_c(source_array), name="source_array")
viewer.add_image(_c(tiff_image), name="tiff_image")
viewer.grid.enabled = True
napari.run()
with asection("Export to tiff, one file per timepoint..."):
# output_folder:
output_path = join(tmpdir, "projection")
# Do actual tiff export:
dataset_tiff(
dataset=dataset, dest_path=output_path, channels=("channel",), slicing=(slice(0, 4), ...), project=0
)
output_file = output_path + ".tiff"
assert exists(output_file)
tiff_image = imread(output_file)
assert tiff_image.shape[0] == 4
assert tiff_image.ndim == 3
assert tiff_image.shape[1:] == source_array.shape[2:]
if display:
def _c(array):
return Backend.to_numpy(array)
import napari
viewer = napari.Viewer(ndisplay=3)
viewer.add_image(_c(source_array), name="source_array")
viewer.add_image(_c(tiff_image), name="tiff_image")
viewer.grid.enabled = True
napari.run()
if __name__ == "__main__":
if not demo_tiff_cupy():
demo_tiff_numpy()
|
11522376
|
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(path_files):
if path_files.find('.txt') != -1:
paths, size = make_dataset_txt(path_files)
else:
paths, size = make_dataset_dir(path_files)
return paths, size
def make_dataset_txt(files):
"""
:param path_files: the path of txt file that store the image paths
:return: image paths and sizes
"""
img_paths = []
with open(files) as f:
paths = f.readlines()
for path in paths:
path = path.strip()
if is_image_file(path) and os.path.exists(path):
img_paths.append(path)
return img_paths, len(img_paths)
def make_dataset_dir(dir):
"""
:param dir: directory paths that store the image
:return: image paths and sizes
"""
img_paths = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in os.walk(dir):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
img_paths.append(path)
return img_paths, len(img_paths)
|
11522382
|
from .ISAM.ISAM import Indice
from .ISAM.Cilindro import Cilindro, Registro
from .ISAM import BinWriter as bi
import os
import pickle
import shutil
class TabsStruct:
def __init__(self, name, cols, ruta):
# numero de columnas
self.countCol = cols
# nombre de la tabla
self.name = name
#llaves primarias
self.pks=[0]
# tuplas con estructura de ISAM
self.tuplas = Indice(self.pks,ruta)
#bandera
self.llaves = True
class Tables:
def __init__(self, ruta):
self.Tabs = {}
self.load(ruta)
def createTable(self, table, numberColumns, ruta):
if not table in self.Tabs:
self.initCheck(str(ruta)+"/"+str(table))
tab = TabsStruct(table, numberColumns,
'data/databases/'+ruta+"/"+str(table))
self.Tabs[table] = tab
self.grabar(ruta)
return 0
else:
return 3
def showTables(self):
# este a nivel de db
names = []
for tabs in self.Tabs:
names.append(self.Tabs[tabs].name)
return names
def dropTable(self, table, ruta):
try:
if table in self.Tabs:
del self.Tabs[table]
shutil.rmtree("data/databases/"+str(ruta)+"/"+str(table))
self.grabar(ruta)
return 0
else:
return 3
except:
return 1
def extractTable(self, table):
try:
return self.Tabs[table].tuplas.readAll()
except:
return None
def extractRangeTable(self, table, column, lower, upper):
try:
return self.Tabs[table].tuplas.readRange(column, lower, upper)
except:
return None
def alterAddPK(self, table, columns, ruta):
try:
if table in self.Tabs:
bool = True
for i in columns:
if i >= self.Tabs[table].countCol:
bool = False
if bool:
if self.Tabs[table].llaves:
self.Tabs[table].pks = columns
self.Tabs[table].llaves = False
else:
for x in columns:
if not x in self.Tabs[table].pks:
self.Tabs[table].pks.append(x)
# self.Tabs[table].tuplas.pkey=columns
# self.Tabs[table].tuplas.refreshMem()
else:
return 4
tup = self.Tabs[table].tuplas.readAll()
self.truncate(table, ruta)
for x in tup:
self.insert(table, x)
self.grabar(ruta)
return 0
else:
return 5
else:
return 3
except:
return 1
def alterDropPK(self, table, ruta):
try:
if table in self.Tabs:
if len(self.Tabs[table].pks) != 0:
self.Tabs[table].pks = []
return 0
else:
return 4
else:
return 3
except:
return 1
def alterTable(self, tableOld, tableNew, ruta):
try:
if tableOld in self.Tabs:
if not tableNew in self.Tabs:
temp = self.Tabs[tableOld]
self.dropTable(tableOld, ruta)
self.createTable(tableNew, temp.countCol, ruta)
self.Tabs[tableNew].pks = temp.pks
self.Tabs[tableNew].tuplas = temp.tuplas
self.grabar(ruta)
return 0
else:
return 4
else:
return 3
except:
return 1
def alterAddColumn(self, db, table, default):
try:
if table in self.Tabs:
self.Tabs[table].countCol += 1
if os.path.exists("data/databases/"+str(db)+"/"+str(table)):
contenido = os.listdir(
"data/databases/"+str(db)+"/"+str(table))
contenido.remove('indx.b')
for x in range(0, 30):
var = 'CS'+str(x)+'.b'
if var in contenido:
indx = bi.read(
"data/databases/"+str(db)+"/"+str(table)+"/CS"+str(x)+".b")
v = Registro(indx.valores)
v.alterAddColumn()
sobre = bi.write(
v, "data/databases/"+str(db)+"/"+str(table)+"/CS"+str(x)+".b")
self.Tabs[table].tuplas.refreshMem()
self.grabar(db)
return 0
else:
return 3
except:
return 1
def alterDropColumn(self, db, table, columnNumber):
try:
if table in self.Tabs:
if self.Tabs[table].countCol > 0:
if not columnNumber in self.Tabs[table].pks:
if columnNumber <= int(self.Tabs[table].countCol):
self.Tabs[table].countCol -= 1
if os.path.exists("data/databases/"+str(db)+"/"+str(table)):
contenido = os.listdir(
"data/databases/"+str(db)+"/"+str(table))
if 'indx.b' in contenido:
contenido.remove('indx.b')
for x in range(0, 30):
var = 'CS'+str(x)+'.b'
if var in contenido:
indx = bi.read(
"data/databases/"+str(db)+"/"+str(table)+"/CS"+str(x)+".b")
v = Registro(indx.valores)
v.alterDropColumn()
sobre = bi.write(
v, "data/databases/"+str(db)+"/"+str(table)+"/CS"+str(x)+".b")
self.Tabs[table].tuplas.refreshMem()
self.grabar(db)
return 0
else:
return 5
else:
return 4
else:
return 4
else:
return 3
except:
return 1
def insert(self, db,table, register):
try:
if table in self.Tabs:
if len(register) == self.Tabs[table].countCol:
ins = self.Tabs[table].tuplas.insert(register)
self.grabar(db)
return ins
else:
return 5
else:
return 3
except:
return 1
def extractRow(self, table, columns):
try:
if table in self.Tabs:
return self.Tabs[table].tuplas.extractRow(columns)
else:
return []
except:
return []
def update(self,db, table, register, columns):
try:
if table in self.Tabs:
upd = self.Tabs[table].tuplas.update(register, columns)
self.grabar(db)
return upd
else:
return 3
except:
return 1
def delete(self,db, table, columns):
try:
if table in self.Tabs:
d = self.Tabs[table].tuplas.delete(columns)
self.grabar(db)
return d
else:
return 3
except:
return 1
def truncate(self,table,ruta):
try:
if table in self.Tabs:
shutil.rmtree("data/databases/"+str(ruta)+"/"+str(table))
self.initCheck(str(ruta)+"/"+str(table))
self.Tabs[table].tuplas = Indice(
self.Tabs[table].pks, 'data/databases/'+ruta+"/"+str(table))
self.grabar(ruta)
return 0
else:
return 3
except:
return 1
def loadCSV(self, db,filepath, table):
try:
res = []
import csv
with open(filepath, 'r') as file:
reader = csv.reader(file, delimiter=',')
for row in reader:
res.append(self.insert(db,table, row))
return res
except:
return []
def initCheck(self, name):
if not os.path.exists('data/databases/'+name):
os.makedirs('data/databases/'+name)
def load(self, ruta):
if os.path.isfile('data/databases/'+str(ruta)+"/tab.b"):
self.Tabs = bi.read('data/databases/'+str(ruta)+"/tab.b")
def grabar(self, ruta):
if os.path.exists('data/databases/'+str(ruta)):
bi.write(self.Tabs, 'data/databases/'+str(ruta)+"/tab.b")
|
11522399
|
from random import shuffle
def rota(rooms):
result = []
for _ in xrange(0, 7, len(rooms)):
shuffle(rooms)
result.extend(rooms)
return result[:7]
|
11522401
|
import torch
import os
import math
from torch.autograd import Variable
import numpy as np
from PIL import Image
from util import util
import numpy as np
def color_map(i):
colors = [
[255,0,0],
[0,255,0],
[0,0,255],
[128,128,0],
[0,128,128]
]
if i < 5:
return colors[i]
else:
return np.random.randint(0,256,3)
def draw_square(image, center, color, radius = 2):
d = 2*radius + 1
image_p = np.pad(image, ((radius,radius),(radius,radius),(0,0)),'constant')
center_p = [center[0]+radius, center[1]+radius]
image_p[center_p[0]-radius, (center_p[1]-radius):(center_p[1]-radius+d), :] = np.tile(color,[d,1])
image_p[(center_p[0]-radius):(center_p[0]-radius+d), center_p[1]-radius, :] = np.tile(color,[d,1])
image_p[center_p[0]+radius, (center_p[1]-radius):(center_p[1]-radius+d), :] = np.tile(color,[d,1])
image_p[(center_p[0]-radius):(center_p[0]-radius+d), center_p[1]+radius, :] = np.tile(color,[d,1])
return image_p[radius:image_p.shape[0]-radius, radius:image_p.shape[1]-radius, :]
def draw_dots(image, center, color):
image[center[0], center[1], :] = color
return image
def draw_circle(image, center, color, radius = 4, border_color = [255,255,255]):
image_p = np.pad(image, ((radius,radius),(radius,radius),(0,0)),'constant')
center_p = [center[0]+radius, center[1]+radius]
edge_d = math.floor((2*radius + 1)/6)
image_p[center_p[0]-radius, (center_p[1]-edge_d):(center_p[1]+edge_d+1), :] = np.tile(border_color,[3,1])
image_p[center_p[0]+radius, (center_p[1]-edge_d):(center_p[1]+edge_d+1), :] = np.tile(border_color,[3,1])
for i in range(1,radius):
image_p[center_p[0]+i, center_p[1]-radius+i-1, :] = border_color
image_p[center_p[0]-i, center_p[1]-radius+i-1, :] = border_color
image_p[center_p[0]+i, (center_p[1]-radius+i):(center_p[1]+radius-i+1), :] = np.tile(color, [2*(radius-i)+1,1])
image_p[center_p[0]-i, (center_p[1]-radius+i):(center_p[1]+radius-i+1), :] = np.tile(color, [2*(radius-i)+1,1])
image_p[center_p[0]+i, center_p[1]+radius+1-i, :] = border_color
image_p[center_p[0]-i, center_p[1]+radius+1-i, :] = border_color
image_p[center_p[0], center_p[1]-radius, :] = border_color
image_p[center_p[0], (center_p[1]-radius+1):(center_p[1]+radius), :] = np.tile(color, [2*(radius-1)+1,1])
image_p[center_p[0], center_p[1]+radius, :] = border_color
return image_p[radius:image_p.shape[0]-radius, radius:image_p.shape[1]-radius, :]
def draw_points(self, A, points, radius, name, save_dir, unicolor = False, level = 0):
A_marked = util.tensor2im(A)
for i in range(len(points)):
center = [points[i][0], points[i][1]]
if unicolor == True:
color = color_map(0)
else:
color = color_map(i)
if level > 2 :
A_marked = draw_square(A_marked, center, color, radius=radius)
elif level == 2 or level == 1:
A_marked = draw_circle(A_marked, center, color)
else:
A_marked = draw_dots(A_marked, center, color)
util.save_image(A_marked, os.path.join(save_dir, name + '.png'))
def draw_correspondence(A, B, correspondence, radius, save_dir, level = 0, name=''):
A_marked = util.tensor2im(A)
B_marked = util.tensor2im(B)
for i in range(len(correspondence[0])):
color = color_map(i)
center_1 = [correspondence[0][i][0], correspondence[0][i][1]]
center_2 = [correspondence[1][i][0], correspondence[1][i][1]]
if level < 3 :
A_marked = draw_circle(A_marked, center_1, color)
B_marked = draw_circle(B_marked, center_2, color)
else:
A_marked = draw_square(A_marked, [center_1[0]+radius, center_1[1]+radius], color, radius=radius)
B_marked = draw_square(B_marked, [center_2[0]+radius, center_2[1]+radius], color, radius=radius)
util.save_image(A_marked, os.path.join(save_dir, 'A_level_'+str(level)+name+'.png'))
util.save_image(B_marked, os.path.join(save_dir, 'B_level_'+str(level)+name+'.png'))
|
11522436
|
import unittest
from conans.test.tools import TestClient
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import CONANFILE, CONANINFO
import os
from conans.model.info import ConanInfo
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.paths import CONANFILE_TXT
import platform
from conans.client.detect import detected_os
from conans.test.utils.test_files import temp_folder
from conans.util.files import load
class InstallTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
self.settings = ("-s os=Windows -s compiler='Visual Studio' -s compiler.version=12 "
"-s arch=x86 -s compiler.runtime=MD")
def _create(self, number, version, deps=None, export=True, no_config=False):
files = cpp_hello_conan_files(number, version, deps, build=False, config=not no_config)
self.client.save(files, clean_first=True)
if export:
self.client.run("export lasote/stable")
def imports_test(self):
""" Ensure that when importing files in a global path, outside the package build,
they are not deleted
"""
dst_global_folder = temp_folder().replace("\\", "/")
conanfile = '''
from conans import ConanFile
class ConanLib(ConanFile):
name = "Hello"
version = "0.1"
exports = "*"
def package(self):
self.copy("*", dst="files")
'''
conanfile2 = '''
from conans import ConanFile
class ConanLib(ConanFile):
name = "Say"
version = "0.1"
requires = "Hello/0.1@lasote/stable"
def imports(self):
self.copy("*file.txt", dst="%s", src="files")
''' % dst_global_folder
self.client.save({CONANFILE: conanfile, "other/folder/file.txt": "My file content"})
self.client.run("export lasote/stable")
self.client.save({CONANFILE: conanfile2}, clean_first=True)
self.client.run("export lasote/stable")
self.client.current_folder = temp_folder()
self.client.run("install Say/0.1@lasote/stable --build=missing")
content = load(os.path.join(dst_global_folder, "other/folder/file.txt"))
self.assertTrue(content, "My file content")
def reuse_test(self):
self._create("Hello0", "0.1")
self._create("Hello1", "0.1", ["Hello0/0.1@lasote/stable"])
self._create("Hello2", "0.1", ["Hello1/0.1@lasote/stable"], export=False)
for lang, id0, id1 in [(0, "2e38bbc2c3ef1425197c8e2ffa8532894c347d26",
"44671ecdd9c606eb7166f2197ab50be8d36a3c3b"),
(1, "8b964e421a5b7e48b7bc19b94782672be126be8b",
"3eeab577a3134fa3afdcd82881751789ec48e08f")]:
self.client.run("install -o language=%d %s --build missing" % (lang, self.settings))
info_path = os.path.join(self.client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
self.assertEqual("arch=x86\n"
"compiler=Visual Studio\n"
"compiler.runtime=MD\n"
"compiler.version=12\n"
"os=Windows",
conan_info.settings.dumps())
self.assertEqual("language=%s\nstatic=True" % lang, conan_info.options.dumps())
conan_ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
hello0 = self.client.paths.package(PackageReference(conan_ref, id0))
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
self.assertEqual(lang, hello0_conan_info.options.language)
package_ref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"),
id1)
hello1 = self.client.paths.package(package_ref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
self.assertEqual(lang, hello1_conan_info.options.language)
def upper_option_test(self):
self._create("Hello0", "0.1", no_config=True)
self._create("Hello1", "0.1", ["Hello0/0.1@lasote/stable"], no_config=True)
self._create("Hello2", "0.1", ["Hello1/0.1@lasote/stable"], export=False, no_config=True)
self.client.run("install -o Hello2:language=1 -o Hello1:language=0 -o Hello0:language=1 %s"
" --build missing" % self.settings)
info_path = os.path.join(self.client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
self.assertEqual("language=1\nstatic=True", conan_info.options.dumps())
conan_ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
hello0 = self.client.paths.package(PackageReference(conan_ref,
"8b964e421a5b7e48b7bc19b94782672be126be8b"))
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
self.assertEqual(1, hello0_conan_info.options.language)
package_ref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"),
"44671ecdd9c606eb7166f2197ab50be8d36a3c3b")
hello1 = self.client.paths.package(package_ref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
self.assertEqual(0, hello1_conan_info.options.language)
def inverse_upper_option_test(self):
self._create("Hello0", "0.1", no_config=True)
self._create("Hello1", "0.1", ["Hello0/0.1@lasote/stable"], no_config=True)
self._create("Hello2", "0.1", ["Hello1/0.1@lasote/stable"], export=False, no_config=True)
self.client.run("install -o language=0 -o Hello1:language=1 -o Hello0:language=0 %s "
"--build missing" % self.settings)
info_path = os.path.join(self.client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
self.assertEqual("language=0\nstatic=True", conan_info.options.dumps())
conan_ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
hello0 = self.client.paths.package(PackageReference(conan_ref,
"2e38bbc2c3ef1425197c8e2ffa8532894c347d26"))
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
self.assertEqual("language=0\nstatic=True", hello0_conan_info.options.dumps())
package_ref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"),
"3eeab577a3134fa3afdcd82881751789ec48e08f")
hello1 = self.client.paths.package(package_ref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
self.assertEqual("language=1\nstatic=True", hello1_conan_info.options.dumps())
def upper_option_txt_test(self):
self._create("Hello0", "0.1", no_config=True)
self._create("Hello1", "0.1", ["Hello0/0.1@lasote/stable"], no_config=True)
files = cpp_hello_conan_files("Hello2", "0.1", ["Hello1/0.1@lasote/stable"])
files.pop(CONANFILE)
files[CONANFILE_TXT] = """[requires]
Hello1/0.1@lasote/stable
[options]
Hello0:language=1
Hello1:language=0
"""
self.client.save(files, clean_first=True)
self.client.run("install %s --build missing" % self.settings)
info_path = os.path.join(self.client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
self.assertEqual("", conan_info.options.dumps())
conan_ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
hello0 = self.client.paths.package(PackageReference(conan_ref,
"8b964e421a5b7e48b7bc19b94782672be126be8b"))
hello0_info = os.path.join(hello0, CONANINFO)
hello0_conan_info = ConanInfo.load_file(hello0_info)
self.assertEqual(1, hello0_conan_info.options.language)
package_ref1 = PackageReference(ConanFileReference.loads("Hello1/0.1@lasote/stable"),
"44671ecdd9c606eb7166f2197ab50be8d36a3c3b")
hello1 = self.client.paths.package(package_ref1)
hello1_info = os.path.join(hello1, CONANINFO)
hello1_conan_info = ConanInfo.load_file(hello1_info)
self.assertEqual(0, hello1_conan_info.options.language)
def change_option_txt_test(self):
self._create("Hello0", "0.1")
client = TestClient(base_folder=self.client.base_folder)
files = {CONANFILE_TXT: """[requires]
Hello0/0.1@lasote/stable
[options]
Hello0:language=1
"""}
client.save(files)
client.run("install %s --build missing" % self.settings)
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
self.assertEqual("", conan_info.options.dumps())
self.assertIn("Hello0:language=1", conan_info.full_options.dumps())
self.assertIn("Hello0/0.1@lasote/stable:8b964e421a5b7e48b7bc19b94782672be126be8b",
conan_info.full_requires.dumps())
files = {CONANFILE_TXT: """[requires]
Hello0/0.1@lasote/stable
[options]
Hello0:language=0
"""}
client.save(files)
client.run("install %s --build missing" % self.settings)
info_path = os.path.join(client.current_folder, CONANINFO)
conan_info = ConanInfo.load_file(info_path)
self.assertEqual("", conan_info.options.dumps())
self.assertIn("Hello0:language=0", conan_info.full_options.dumps())
self.assertIn("Hello0/0.1@lasote/stable:2e38bbc2c3ef1425197c8e2ffa8532894c347d26",
conan_info.full_requires.dumps())
def warn_bad_os_test(self):
bad_os = "Linux" if platform.system() != "Linux" else "Macos"
message = "You are building this package with settings.os='%s" % bad_os
self._create("Hello0", "0.1")
self.client.run("install Hello0/0.1@lasote/stable -s os=%s" % bad_os, ignore_error=True)
self.assertIn(message, self.client.user_io.out)
self.client.run("install Hello0/0.1@lasote/stable -s os=%s" % detected_os(),
ignore_error=True)
self.assertNotIn("You are building this package with settings.os", self.client.user_io.out)
|
11522442
|
import torch
import numpy as np
from collections import OrderedDict
from copy import deepcopy
from robopose.utils.logging import get_logger
from robopose.lib3d.robopose_ops import loss_refiner_CO_disentangled_reference_point
from robopose.lib3d.transform_ops import add_noise, invert_T, transform_pts
from robopose.lib3d.robopose_ops import (
TCO_init_from_boxes_zup_autodepth,
)
from robopose.lib3d.robot_ops import add_noise_joints
from robopose.lib3d.articulated_mesh_database import Meshes
logger = get_logger(__name__)
def cast(obj, dtype=None):
if isinstance(obj, (dict, OrderedDict)):
for k, v in obj.items():
obj[k] = cast(torch.as_tensor(v))
if dtype is not None:
obj[k] = obj[k].to(dtype)
return obj
else:
return obj.cuda(non_blocking=True)
def obj_infos_to_tensor(urdf_layer, obj_infos):
q = []
for n in range(len(obj_infos)):
q.append(urdf_layer.to_tensor(obj_infos[n]['joints']))
q = torch.cat(q, dim=0)
return q
def h_pose(model, data, meters, cfg,
n_iterations=1, mesh_db=None, train=True):
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
else:
model_without_ddp = model
dtype, device = torch.float32, 'cuda'
images = cast(data.images).float() / 255.
batch_size, _, h, w = images.shape
TCO_gt = cast(data.TCO).float()
K = cast(data.K).float()
bboxes = cast(data.bboxes).float()
# Convert joints dict to tensor
obj_infos_gt = data.objects
for n in range(batch_size):
name = obj_infos_gt[n]['name']
urdf_layer = mesh_db.urdf_layers[mesh_db.label_to_id[name]]
obj_infos_gt[n]['joints'] = {k: torch.as_tensor(obj_infos_gt[n]['joints'][k]).view(1, -1).to(dtype) for k in urdf_layer.joint_names}
# Compute input pose/joints by adding noise to the ground truth
## Joint initialization
obj_infos_init = deepcopy(obj_infos_gt)
if cfg.predict_joints:
for n in range(batch_size):
name = obj_infos_gt[n]['name']
urdf_layer = mesh_db.urdf_layers[mesh_db.label_to_id[name]]
if cfg.input_generator == 'gt+noise':
q_limits = urdf_layer.joint_limits
q0 = cast(obj_infos_gt[n]['joints'], dtype=dtype)
q0_tensor = urdf_layer.to_tensor(q0)
q0_tensor = add_noise_joints(q0_tensor,
std_interval_ratio=cfg.joints_std_interval_ratio,
q_limits=q_limits)
elif cfg.input_generator == 'fixed':
q_default = urdf_layer.joints_default.unsqueeze(0).to(dtype)
q0_tensor = urdf_layer.to_tensor(q_default)
else:
raise ValueError
obj_infos_init[n]['joints'] = urdf_layer.from_tensor(q0_tensor)
# Pose initialization
meshes = model_without_ddp.mesh_db.select(obj_infos_init)
_, T_O_CENTROID = meshes.center_meshes()
T_C_CENTROID_gt = TCO_gt @ T_O_CENTROID
if cfg.input_generator == 'gt+noise':
T_C_CENTROID_init = add_noise(T_C_CENTROID_gt,
euler_deg_std=[60, 60, 60],
trans_std=[0.1, 0.1, 0.1])
elif cfg.input_generator == 'fixed':
centered_meshes = Meshes(meshes.labels, transform_pts(invert_T(T_O_CENTROID), meshes.points))
centered_points = centered_meshes.sample_points(2000, deterministic=True)
T_C_CENTROID_init = TCO_init_from_boxes_zup_autodepth(bboxes, centered_points, K)
else:
raise ValueError
TCO_init = T_C_CENTROID_init @ invert_T(T_O_CENTROID)
# Cast joints to gpu
for n in range(batch_size):
name = obj_infos_gt[n]['name']
urdf_layer = mesh_db.urdf_layers[mesh_db.label_to_id[name]]
obj_infos_gt[n]['joints'] = cast(obj_infos_gt[n]['joints'], dtype=dtype)
obj_infos_init[n]['joints'] = cast(obj_infos_init[n]['joints'], dtype=dtype)
# Forward pass
outputs = model(images=images, K=K, obj_infos=obj_infos_init,
TCO=TCO_init, n_iterations=n_iterations,
update_obj_infos=cfg.predict_joints)
losses_TCO_iter = []
losses_q_iter = []
losses_iter = []
q_gt = obj_infos_to_tensor(urdf_layer, obj_infos_gt)
for n in range(n_iterations):
iter_outputs = outputs[f'iteration={n+1}']
K_crop = iter_outputs['K_crop']
refiner_outputs = iter_outputs['refiner_outputs']
obj_infos_input = iter_outputs['obj_infos_input']
# Pose loss
anchor_link_names = iter_outputs['anchor_link_names']
if cfg.points_for_pose_loss == 'anchor_link':
link_meshes = mesh_db.select(obj_infos_input, link_names=iter_outputs['anchor_link_names'], apply_fk=False)
anchor_loss_pts = link_meshes.sample_points(min(cfg.n_points_loss, link_meshes.points.shape[1]), deterministic=False)
elif cfg.points_for_pose_loss == 'whole_robot':
robot_meshes = mesh_db.select(obj_infos_input, apply_fk=True)
anchor_loss_pts = robot_meshes.sample_points(min(cfg.n_points_loss, robot_meshes.points.shape[1]), deterministic=False)
assert all([anchor_link_names[n] == urdf_layer.robot.base_link.name for n in range(batch_size)])
else:
raise ValueError(cfg.points_for_pose_loss)
TOA_gt = urdf_layer.compute_link_pose(anchor_link_names, q_gt)
TCA_gt = TCO_gt @ TOA_gt
TCA_input = iter_outputs['TCA_input']
t_C_REF = iter_outputs['t_C_REF']
refiner_pose_update = refiner_outputs['pose']
loss_TCO_iter = loss_refiner_CO_disentangled_reference_point(
TCO_possible_gt=TCA_gt.unsqueeze(1),
TCO_input=TCA_input,
refiner_outputs=refiner_pose_update,
K_crop=K_crop,
points=anchor_loss_pts,
tCR=t_C_REF,
)
# Joints loss
q_output = obj_infos_to_tensor(urdf_layer, iter_outputs['obj_infos_output_no_clamp'])
if cfg.predict_joints:
loss_q_iter = ((q_output - q_gt) ** 2).mean(dim=-1)
meters[f'loss_q-iter={n+1}'].add(loss_q_iter.mean().item())
losses_q_iter.append(loss_q_iter)
if model_without_ddp.debug:
from robopose.lib3d.camera_geometry import project_points
model_without_ddp.tmp_debug['pts_proj_gt'] = project_points(anchor_loss_pts, K_crop, TCA_gt)
model_without_ddp.tmp_debug['pts_proj_input'] = project_points(anchor_loss_pts, K_crop, TCA_input)
meters[f'loss_TCO-iter={n+1}'].add(loss_TCO_iter.mean().item())
losses_TCO_iter.append(loss_TCO_iter)
losses_TCO_iter = torch.cat(losses_TCO_iter)
loss_TCO = losses_TCO_iter.mean()
if cfg.predict_joints:
loss_q = torch.cat(losses_q_iter).mean()
loss_q_scaled = loss_q * cfg.loss_q_lambda
meters['loss_q'].add(loss_q.item())
meters['loss_q_scaled'].add(loss_q_scaled.item())
loss = loss_TCO + loss_q_scaled
else:
loss = loss_TCO
meters['loss_TCO'].add(loss_TCO.item())
meters['loss_total'].add(loss.item())
return loss
|
11522447
|
import argparse
import numpy as np
from keras.layers import (LSTM, BatchNormalization, Convolution3D, Dense, Dropout, Flatten, Input,
MaxPooling3D, TimeDistributed, ZeroPadding3D)
from keras.models import Model, Sequential
from src.data import import_labels
from src.io_data import get_duration, get_num_frames, video_to_array
from src.processing import activity_localization, get_classification, smoothing
def run_all_pipeline(input_video, smoothing_k, activity_threshold):
input_size = (112, 112)
length = 16
# Load labels
with open('dataset/labels.txt', 'r') as f:
labels = import_labels(f)
print('Reading Video...')
video_array = video_to_array(input_video, resize=input_size)
if video_array is None:
raise Exception('The video could not be read')
nb_frames = get_num_frames(input_video)
duration = get_duration(input_video)
fps = nb_frames / duration
print('Duration: {:.1f}s'.format(duration))
print('FPS: {:.1f}'.format(fps))
print('Number of frames: {}'.format(nb_frames))
nb_clips = nb_frames // length
video_array = video_array.transpose(1, 0, 2, 3)
video_array = video_array[:nb_clips * length, :, :, :]
video_array = video_array.reshape((nb_clips, length, 3, 112, 112))
video_array = video_array.transpose(0, 2, 1, 3, 4)
# Load C3D model and mean
print('Loading C3D network...')
model = C3D_conv_features(True)
model.compile(optimizer='sgd', loss='mse')
mean_total = np.load('data/models/c3d-sports1M_mean.npy')
mean = np.mean(mean_total, axis=(0, 2, 3, 4), keepdims=True)
# Extract features
print('Extracting features...')
X = video_array - mean
Y = model.predict(X, batch_size=1, verbose=1)
# Load the temporal localization network
print('Loading temporal localization network...')
model_localization = temporal_localization_network(True)
model_localization.compile(
optimizer='rmsprop', loss='categorical_crossentropy')
# Predict with the temporal localization network
print('Predicting...')
Y = Y.reshape(nb_clips, 1, 4096)
prediction = model_localization.predict(Y, batch_size=1, verbose=1)
prediction = prediction.reshape(nb_clips, 201)
# Post processing the predited output
print('Post-processing output...')
labels_idx, scores = get_classification(prediction, k=5)
print('Video: {}\n'.format(input_video))
print('Classification:')
for idx, score in zip(labels_idx, scores):
label = labels[idx]
print('{:.4f}\t{}'.format(score, label))
prediction_smoothed = smoothing(prediction, k=smoothing_k)
activities_idx, startings, endings, scores = activity_localization(
prediction_smoothed, activity_threshold)
print('\nDetection:')
print('Score\tInterval\t\tActivity')
for idx, s, e, score in zip(activities_idx, startings, endings, scores):
start = s * float(length) / fps
end = e * float(length) / fps
label = labels[idx]
print(
'{:.4f}\t{:.1f}s - {:.1f}s\t\t{}'.format(score, start, end, label))
def C3D_conv_features(summary=False):
""" Return the Keras model of the network until the fc6 layer where the
convolutional features can be extracted.
"""
from keras.layers.convolutional import Convolution3D, MaxPooling3D, ZeroPadding3D
from keras.layers.core import Dense, Dropout, Flatten
from keras.models import Sequential
model = Sequential()
# 1st layer group
model.add(
Convolution3D(
64,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv1',
subsample=(1, 1, 1),
input_shape=(3, 16, 112, 112),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(1, 2, 2),
strides=(1, 2, 2),
border_mode='valid',
name='pool1'))
# 2nd layer group
model.add(
Convolution3D(
128,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv2',
subsample=(1, 1, 1),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool2'))
# 3rd layer group
model.add(
Convolution3D(
256,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv3a',
subsample=(1, 1, 1),
trainable=False))
model.add(
Convolution3D(
256,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv3b',
subsample=(1, 1, 1),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool3'))
# 4th layer group
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv4a',
subsample=(1, 1, 1),
trainable=False))
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv4b',
subsample=(1, 1, 1),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool4'))
# 5th layer group
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv5a',
subsample=(1, 1, 1),
trainable=False))
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv5b',
subsample=(1, 1, 1),
trainable=False))
model.add(ZeroPadding3D(padding=(0, 1, 1), name='zeropadding'))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool5'))
model.add(Flatten(name='flatten'))
# FC layers group
model.add(Dense(4096, activation='relu', name='fc6', trainable=False))
model.add(Dropout(.5, name='do1'))
model.add(Dense(4096, activation='relu', name='fc7'))
model.add(Dropout(.5, name='do2'))
model.add(Dense(487, activation='softmax', name='fc8'))
# Load weights
model.load_weights('data/models/c3d-sports1M_weights.h5')
for _ in range(4):
model.pop_layer()
if summary:
print(model.summary())
return model
def temporal_localization_network(summary=False):
input_features = Input(batch_shape=(1, 1, 4096, ), name='features')
input_normalized = BatchNormalization(name='normalization')(input_features)
input_dropout = Dropout(p=.5)(input_normalized)
lstm = LSTM(
512, return_sequences=True, stateful=True, name='lsmt1')(input_dropout)
output_dropout = Dropout(p=.5)(lstm)
output = TimeDistributed(
Dense(201, activation='softmax'), name='fc')(output_dropout)
model = Model(input=input_features, output=output)
model.load_weights('data/models/temporal-location_weights.hdf5')
if summary:
model.summary()
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Run all pipeline. Given a video, classify it and temporal localize the activity on it'
)
parser.add_argument(
'-i',
'--input-video',
type=str,
dest='input_video',
help='Path to the input video')
parser.add_argument(
'-k',
type=int,
dest='smoothing_k',
default=5,
help='Smoothing factor at post-processing (default: %(default)s)')
parser.add_argument(
'-t',
type=float,
dest='activity_threshold',
default=.2,
help='Activity threshold at post-processing (default: %(default)s)')
args = parser.parse_args()
run_all_pipeline(args.input_video, args.smoothing_k,
args.activity_threshold)
|
11522460
|
import dcase_util
audio_container = dcase_util.containers.AudioContainer().load(
filename=dcase_util.utils.Example.audio_filename()
)
audio_container.filename = None
audio_container.plot_wave()
|
11522498
|
import os
from torchvision import datasets
from torch.utils.data import Dataset
import numpy as np
import tqdm
from enum import Enum
import PIL.Image
__all__ = ['DatasetType', 'DATASETS', 'CIFAR10Dataset', 'NIHDataset', 'SVHNDataset']
class CIFAR10Dataset(Dataset):
def __init__(self, root, split, transform=None, target_classes=None, target_indexes_path=None):
super().__init__()
if split == 'train':
self._dataset = datasets.CIFAR10(root=root, train=True, transform=transform, download=True)
else:
self._dataset = datasets.CIFAR10(root=root, train=False, transform=transform, download=True)
if (target_classes is not None) and (target_indexes_path is not None):
raise ValueError("You must specify either 'target_classes' either 'target_indexes_path',"
"but not both")
if target_classes is not None:
self._target_indexes = []
for index, label in enumerate(self._dataset.targets):
if label in target_classes:
self._target_indexes.append(index)
elif target_indexes_path is not None:
self._target_indexes = np.load(target_indexes_path)
else:
self._target_indexes = list(range(len(self._dataset)))
def __getitem__(self, index):
image, _ = self._dataset[self._target_indexes[index]]
return image
def __len__(self):
return len(self._target_indexes)
class SVHNDataset(Dataset):
def __init__(self, root, split, transform=None, target_classes=None, target_indexes_path=None):
super().__init__()
self._dataset = datasets.SVHN(root=root, split=split, transform=transform, download=True)
if (target_classes is not None) and (target_indexes_path is not None):
raise ValueError("You must specify either 'target_classes' either 'target_indexes_path',"
"but not both")
if target_classes is not None:
self._target_indexes = []
for index, (_, label) in enumerate(self._dataset):
if label in target_classes:
self._target_indexes.append(index)
elif target_indexes_path is not None:
self._target_indexes = np.load(target_indexes_path)
else:
self._target_indexes = list(range(len(self._dataset)))
def __getitem__(self, index):
image, _ = self._dataset[self._target_indexes[index]]
return image
def __len__(self):
return len(self._target_indexes)
class Camelyon16Dataset(Dataset):
def __init__(self, image_root, split_root, split, transform=None, cache_data=False):
super().__init__()
self._image_root = image_root
self._transform = transform
split_info_path = os.path.join(split_root, split)
with open(split_info_path) as f_in:
self._image_filenames = [filename.strip() for filename in f_in.readlines()]
self._cached_images = {}
if cache_data:
self._cache_data = False
print('Loading dataset ... ')
for index in tqdm.tqdm(range(len(self))):
self._cached_images[index] = self[index]
self._cache_data = cache_data
def __getitem__(self, index):
if self._cache_data:
return self._cached_images[index]
else:
image_path = os.path.join(self._image_root, self._image_filenames[index])
image = PIL.Image.open(image_path)
if self._transform is not None:
image = self._transform(image)
return image
def __len__(self):
return len(self._image_filenames)
class NIHDataset(Dataset):
def __init__(self, image_root, split_root, split, transform=None, cache_data=False):
super().__init__()
self._image_root = image_root
self._transform = transform
split_info_path = os.path.join(split_root, split)
with open(split_info_path) as f_in:
self._image_filenames = [filename.strip() for filename in f_in.readlines()]
self._cached_images = {}
if cache_data:
self._cache_data = False
print('Loading dataset ... ')
for index in tqdm.tqdm(range(len(self))):
self._cached_images[index] = self[index]
self._cache_data = cache_data
def __getitem__(self, index):
if self._cache_data:
return self._cached_images[index]
else:
image_path = os.path.join(self._image_root, self._image_filenames[index])
image = PIL.Image.open(image_path)
if self._transform is not None:
image = self._transform(image)
return image
def __len__(self):
return len(self._image_filenames)
class DatasetType(Enum):
cifar10 = 'cifar10'
camelyon16 = 'camelyon16'
nih = 'nih'
svhn = 'svhn'
DATASETS = {
DatasetType.cifar10: CIFAR10Dataset,
DatasetType.camelyon16: Camelyon16Dataset,
DatasetType.nih: NIHDataset,
DatasetType.svhn: SVHNDataset,
}
|
11522525
|
from rest_framework.generics import ListAPIView
from search.views.paper import PaperDocumentView
from search.views.post import PostDocumentView
from search.views.person import PersonDocumentView
from search.views.hub import HubDocumentView
from utils.permissions import ReadOnly
from rest_framework.response import Response
from elasticsearch_dsl import Search
from search.serializers.combined import CombinedSerializer
from django_elasticsearch_dsl_drf.filter_backends import (
CompoundSearchFilterBackend,
DefaultOrderingFilterBackend,
HighlightBackend,
FilteringFilterBackend,
NestedFilteringFilterBackend,
IdsFilterBackend,
OrderingFilterBackend,
SuggesterFilterBackend,
PostFilterFilteringFilterBackend,
FacetedSearchFilterBackend,
SearchFilterBackend,
)
from search.backends.multi_match_filter import MultiMatchSearchFilterBackend
class CombinedView(ListAPIView):
max_results_per_entity = 2
permission_classes = [ReadOnly]
serializer_class = CombinedSerializer
filter_backends = [
MultiMatchSearchFilterBackend,
CompoundSearchFilterBackend,
FacetedSearchFilterBackend,
FilteringFilterBackend,
PostFilterFilteringFilterBackend,
DefaultOrderingFilterBackend,
OrderingFilterBackend,
HighlightBackend,
]
def __init__(self, *args, **kwargs):
self.paper_view = PaperDocumentView(*args, **kwargs)
self.post_view = PostDocumentView(*args, **kwargs)
self.person_view = PersonDocumentView(*args, **kwargs)
self.hub_view = HubDocumentView(*args, **kwargs)
super(CombinedView, self).__init__(*args, **kwargs)
def get_queryset(self):
return
def list(self, request, *args, **kwargs):
response = {
"paper": [],
"post": [],
"person": [],
"hub": [],
}
papers_es_res = self.paper_view._filter_queryset(
request,
)
post_es_res = self.post_view._filter_queryset(
request,
)
person_es_res = self.person_view._filter_queryset(
request,
)
hub_es_res = self.hub_view._filter_queryset(
request,
)
response['paper'] = self.get_serializer(papers_es_res, many=True).data[0:self.max_results_per_entity]
response['post'] = self.get_serializer(post_es_res, many=True).data[0:self.max_results_per_entity]
response['person'] = self.get_serializer(person_es_res, many=True).data[0:self.max_results_per_entity]
response['hub'] = self.get_serializer(hub_es_res, many=True).data[0:self.max_results_per_entity]
return Response(response)
|
11522539
|
import logging
import numpy as np
import re
import time
from copy import deepcopy
from os import getenv
import ner_model
import sentry_sdk
from flask import Flask, jsonify, request
from nltk.tokenize import word_tokenize
sentry_sdk.init(getenv("SENTRY_DSN"))
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO)
logger = logging.getLogger(__name__)
app = Flask(__name__)
ner = ner_model.load_model()
logger.info("ner model is loaded.")
nltk_stopwords_file = "nltk_stopwords.txt"
NLTK_STOPWORDS = set([line.strip() for line in open(nltk_stopwords_file, "r").readlines()])
BANNED_ENTITIES = re.compile(
r"\b(okay|ok|name|ocean|hey|cool|corona|pop|rap|bo+"
r"|hmph|oops|ouch|sh+|hush|whew|whoa|uhu|huh|wow|ya+y|yip+e+|yahoo|hurray"
r"|[aeou]+[mhrw]+[aeou]*|[mhrw]+[aeou]+[mhrw]+|[mhrw]+|nowhere|nice|good"
r"|somewhere|anywhere|honey)\b",
re.IGNORECASE,
)
EVERYTHING_EXCEPT_LETTERS_DIGITALS_AND_SPACE = re.compile(r"[^a-zA-Z0-9 \-]")
DOUBLE_SPACES = re.compile(r"\s+")
with open("./google-english-no-swears.txt", "r") as f:
UNIGRAMS = set(f.read().splitlines()[:500])
def extract_good_entities(preds, sentences):
good_preds = []
for sent, entities_for_sent in zip(sentences, preds):
good_entities_for_sent = []
for ent in entities_for_sent:
ent_text = ent["text"].lower()
# remove everything except of letters, digitals, spaces and -
ent_text = EVERYTHING_EXCEPT_LETTERS_DIGITALS_AND_SPACE.sub(" ", ent_text)
ent_text = DOUBLE_SPACES.sub(" ", ent_text).strip()
if ent_text == "alexa" and re.match("^alexa .+", sent):
# skip alexa if alexa is a first word of uttr
continue
is_not_stopword = ent_text not in NLTK_STOPWORDS and ent_text not in UNIGRAMS
is_long_enough = len(ent_text) > 2
is_not_banned = not re.match(BANNED_ENTITIES, ent_text)
if is_not_stopword and is_not_banned and is_long_enough:
good_entities_for_sent.append(deepcopy(ent))
good_preds.append(good_entities_for_sent)
return good_preds
def get_predictions_for_list_sentences(sentences):
sents = [word_tokenize(sent.lower()) for sent in sentences]
preds = ner.predict(sents)
# each sample is a list of sentences of current utterance
# so, preds is a list of length = number of sents in utterances
# each element of preds is a list of entities.
# EXAMPLE:
# one sample = ["i have been in london and greece.", "my name is valentine and beatrice."]
# preds = [[{'confidence': 1, 'end_pos': 5, 'start_pos': 4, 'text': 'london', 'type': 'LOC'},
# {'confidence': 1, 'end_pos': 7, 'start_pos': 6, 'text': 'greece', 'type': 'LOC'}],
# [{'confidence': 1, 'end_pos': 4, 'start_pos': 3, 'text': 'valentine', 'type': 'PER'},
# {'confidence': 1, 'end_pos': 6, 'start_pos': 5, 'text': 'beatrice', 'type': 'PER'}]]
good_preds = extract_good_entities(preds, sentences)
return good_preds
def get_result(request):
st_time = time.time()
last_utterances = request.json["last_utterances"]
logger.info(f"input (the last utterances): {last_utterances}")
samples = []
dialog_ids = []
for i, utterance_sents in enumerate(last_utterances):
for sent in utterance_sents:
samples.append(sent)
dialog_ids.append(i)
good_preds = get_predictions_for_list_sentences(samples)
dialog_ids = np.array(dialog_ids)
ret = []
for i, utterance_sents in enumerate(last_utterances):
curr_ids = np.where(dialog_ids == i)[0]
curr_preds = [good_preds[curr_id] for curr_id in curr_ids]
ret.append(curr_preds)
logger.info(f"NER output: {ret}")
total_time = time.time() - st_time
logger.info(f"NER exec time: {total_time: .3f}s")
return ret
@app.route("/ner", methods=["POST"])
def respond():
result = get_result(request)
return jsonify(result)
@app.route("/ner_batch", methods=["POST"])
def respond_batch():
result = get_result(request)
return jsonify([{"batch": result}])
if __name__ == "__main__":
app.run(debug=False, host="0.0.0.0", port=8021)
|
11522554
|
import json
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
font = {'size' : 25}
matplotlib.rc('font', **font)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
colors = [[0, 0.447, 0.7410], [0.85, 0.325, 0.098], [0.466, 0.674, 0.188], [0.929, 0.694, 0.125],
[0.494, 0.1844, 0.556], [0, 0.447, 0.7410], [0.3010, 0.745, 0.933], [0.85, 0.325, 0.098],
[0.466, 0.674, 0.188], [0.929, 0.694, 0.125],
[0.3010, 0.745, 0.933], [0.635, 0.078, 0.184]]
folder_path = 'path_to_folder_with_trial_ids_folders/'
track_time = True
def plot_all(path, trial):
print('Ploting trial', trial)
plt.close('all')
# extract params from json
with open(os.path.join(path, 'params.json')) as json_file:
params = json.load(json_file)
instructions = params['train_descriptions']
nb_instr = len(instructions)
n_cycles = params['experiment_params']['n_cycles']
rollout_batch_size = params['experiment_params']['rollout_batch_size']
n_cpu = params['experiment_params']['n_cpus']
# extract notebooks
data = pd.read_csv(os.path.join(path, 'progress.csv'))
n_points = data['eval/success_goal_0'].shape[0]
episodes = data['episode']
n_epochs = len(episodes)
n_eps = n_cpu * rollout_batch_size * n_cycles
episodes = np.arange(n_eps, n_epochs * n_eps + 1, n_eps)
episodes = episodes / 1000
task_success_rates = np.zeros([n_points, nb_instr])
goals_reached = np.zeros([n_points, nb_instr])
for i in range(nb_instr):
task_success_rates[:, i] = data['eval/success_goal_' + str(i)]
zero_success_rates = task_success_rates.copy()
for i in range(zero_success_rates.shape[0]):
for j in range(zero_success_rates.shape[1]):
if np.isnan(zero_success_rates[i, j]):
zero_success_rates[i, j] = 0
np.savetxt(path + 'sr_train_set.txt', zero_success_rates.transpose())
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# plot success_rate
fig = plt.figure(figsize=(22, 15), frameon=False)
ax = fig.add_subplot(111)
ax.spines['top'].set_linewidth(6)
ax.spines['right'].set_linewidth(6)
ax.spines['bottom'].set_linewidth(6)
ax.spines['left'].set_linewidth(6)
ax.tick_params(width=4, direction='in', length=10, labelsize='small')
p = plt.plot(episodes, zero_success_rates.mean(axis=1), linewidth=10) # , c=colors[i])
# leg = plt.legend(['task ' + str(i) for i in range(nb_instr)], frameon=False)
lab = plt.xlabel('Episodes (x$10^3$)')
plt.ylim([-0.01, 1.01])
plt.yticks([0.25, 0.50, 0.75, 1])
lab2 = plt.ylabel('Average success rate')
plt.savefig(os.path.join(path, 'plot_av_success_rate.png'), bbox_extra_artists=(lab, lab2), bbox_inches='tight',
dpi=50) # add leg
plt.close('all')
if track_time:
computation_time = data['epoch_duration (s)']
time_batch = data['time_batch']
time_epoch = data['time_epoch']
time_train = data['time_train']
time_update = data['time_update']
time_replay = data['time_replay']
time_reward_func_replay = data['time_reward_func_replay']
time_reward_func_update = data['time_reward_func_update']
# check training timings
time_stuff = [time_epoch, time_train - time_update, time_update, time_batch, time_replay, time_reward_func_replay, time_reward_func_update]
legends = ['time_epoch', 'time_train', 'time_update', 'time_batch', 'time_replay', 'time_reward_func_replay', 'time_reward_func_update']
discoveries = np.zeros([nb_instr])
discoveries.fill(np.nan)
for i in range(nb_instr):
ind_zero = np.argwhere(goals_reached[:, i] == 0)
if ind_zero.size == 0:
discoveries[i] = 0
else:
discoveries[i] = ind_zero[-1][0]
np.savetxt(os.path.join(path, 'discoveries.txt'), discoveries)
# plot computation time per epoch
fig = plt.figure(figsize=(22, 15), frameon=False)
plt.plot(episodes, computation_time, linewidth=3)
for d in discoveries:
plt.axvline(x=episodes[int(d)])
plt.savefig(os.path.join(path, 'time.png'), bbox_extra_artists=(lab, lab2), bbox_inches='tight',
dpi=50) # add leg
plt.close('all')
fig = plt.figure(figsize=(22, 15), frameon=False)
for i in range(len(time_stuff)):
plt.plot(episodes, time_stuff[i], linewidth=3)
plt.legend(legends)
for d in discoveries:
plt.axvline(x=episodes[int(d)])
plt.savefig(os.path.join(path, 'time2.png'), bbox_extra_artists=(lab, lab2), bbox_inches='tight',
dpi=50) # add leg
plt.close('all')
# Extract exploration scores
first = 600
step = 600
last = int(np.array(episodes)[-1]) * 1000
steps = np.arange(first, last + 1, step)
n_steps = steps.size
var_obj_pos = np.zeros([n_steps])
count_reward_extra_set = np.zeros([n_steps])
count_reward_train_set = np.zeros([n_steps])
count_reward_test_set = np.zeros([n_steps])
dist_per_obj = np.zeros([n_steps])
exploration_score_all = np.zeros([n_steps])
exploration_score_test = np.zeros([n_steps])
exploration_score_train = np.zeros([n_steps])
exploration_score_extra = np.zeros([n_steps])
var_states = np.zeros([n_steps])
# counter_special = np.zeros([n_steps])
counter_rew_train_test = np.zeros([n_steps])
for k, ep in enumerate(steps):
with open(path + 'goal_info/info_' + str(ep) + '.pk', 'rb') as f:
data = pickle.load(f)
metrics = data['exploration_metrics']
# track number of reward and exploration score
explo_score_train = 0
explo_score_all = 0
explo_score_test = 0
explo_score_extra = 0
prev_counters = dict()
rarities = []
for d in metrics['counter_since_begininng'].keys():
prev_counters[d] = metrics['counter_since_begininng'][d] - metrics['rewards_last_state'][d]
rarities.append(1 / (1 + prev_counters[d]))
assert prev_counters[d] >= 0
for d in params['train_descriptions']:
explo_score_train += metrics['rewards_last_state'][d] * (1 / (prev_counters[d] + 1))
for d in params['test_descriptions']:
explo_score_test += metrics['rewards_last_state'][d] * (1 / (prev_counters[d] + 1))
for d in params['extra_descriptions']:
explo_score_extra += metrics['rewards_last_state'][d] * (1 / (prev_counters[d] + 1))
for d in params['train_descriptions'] + params['test_descriptions'] + params['extra_descriptions']:
explo_score_all += metrics['rewards_last_state'][d] * (1 / (prev_counters[d] + 1))
# for d in specialial[t_i, k] += metrics['rewards_last_state'][d]
explo_score_train /= np.mean(rarities)
explo_score_test /= np.mean(rarities)
explo_score_extra /= np.mean(rarities)
explo_score_all /= np.mean(rarities)
exploration_score_train[k] = explo_score_train
exploration_score_test[k] = explo_score_test
exploration_score_extra[k] = explo_score_extra
exploration_score_all[k] = explo_score_all
dist_per_obj[k] = metrics['dist_per_obj']
count_reward_test_set[k] = metrics['count_reward_test_set']
count_reward_train_set[k] = metrics['count_reward_train_set']
count_reward_extra_set[k] = metrics['count_reward_extra_set']
counter_rew_train_test[k] = metrics['count_reward_test_set'] + metrics['count_reward_train_set']
var_obj_pos[k] = metrics['var_obj_pos']
var_states[k] = metrics['var_states']
exploration_metrics = dict(var_obj_pos=var_obj_pos,
count_reward_extra_set=count_reward_extra_set,
count_reward_train_set=count_reward_train_set,
count_reward_test_set=count_reward_test_set,
dist_per_obj=dist_per_obj,
exploration_score_all=exploration_score_all,
exploration_score_test=exploration_score_test,
exploration_score_train=exploration_score_train,
exploration_score_extra=exploration_score_extra,
var_states=var_states,
counter_rew_train_test=counter_rew_train_test
)
with open(path + 'exploration_metrics.pk', 'wb') as f:
pickle.dump(exploration_metrics, f)
if __name__=="__main__":
for trial in os.listdir(folder_path + '/'):
path = folder_path + '/' + trial + '/'
plot_all(path, trial)
|
11522559
|
import argparse
import sys
import yaml
from six import StringIO
from django.utils.text import camel_case_to_spaces
from lepo.router import Router
HANDLER_TEMPLATE = '''
def {func_name}(request, {parameters}):
raise NotImplementedError('Handler {operation_id} not implemented')
'''.strip()
def generate_handler_stub(router, handler_template=HANDLER_TEMPLATE):
output = StringIO()
func_name_to_operation = {}
for path in router.get_paths():
for operation in path.get_operations():
snake_operation_id = camel_case_to_spaces(operation.id).replace(' ', '_')
func_name_to_operation[snake_operation_id] = operation
for func_name, operation in sorted(func_name_to_operation.items()):
parameter_names = [p['name'] for p in operation.parameters]
handler = handler_template.format(
func_name=func_name,
operation_id=operation.id,
parameters=', '.join(parameter_names),
)
output.write(handler)
output.write('\n\n\n')
return output.getvalue()
def cmdline():
ap = argparse.ArgumentParser()
ap.add_argument('input', default=None, nargs='?')
args = ap.parse_args()
input = (open(args.input) if args.input else sys.stdin)
api = yaml.safe_load(input)
print(generate_handler_stub(Router(api)))
if __name__ == '__main__':
cmdline()
|
11522570
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.water_heaters_and_thermal_storage import WaterHeaterHeatPumpPumpedCondenser
log = logging.getLogger(__name__)
class TestWaterHeaterHeatPumpPumpedCondenser(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_waterheaterheatpumppumpedcondenser(self):
pyidf.validation_level = ValidationLevel.error
obj = WaterHeaterHeatPumpPumpedCondenser()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# object-list
var_compressor_setpoint_temperature_schedule_name = "object-list|Compressor Setpoint Temperature Schedule Name"
obj.compressor_setpoint_temperature_schedule_name = var_compressor_setpoint_temperature_schedule_name
# real
var_dead_band_temperature_difference = 10.00005
obj.dead_band_temperature_difference = var_dead_band_temperature_difference
# node
var_condenser_water_inlet_node_name = "node|Condenser Water Inlet Node Name"
obj.condenser_water_inlet_node_name = var_condenser_water_inlet_node_name
# node
var_condenser_water_outlet_node_name = "node|Condenser Water Outlet Node Name"
obj.condenser_water_outlet_node_name = var_condenser_water_outlet_node_name
# real
var_condenser_water_flow_rate = 0.0001
obj.condenser_water_flow_rate = var_condenser_water_flow_rate
# real
var_evaporator_air_flow_rate = 0.0001
obj.evaporator_air_flow_rate = var_evaporator_air_flow_rate
# alpha
var_inlet_air_configuration = "Schedule"
obj.inlet_air_configuration = var_inlet_air_configuration
# node
var_air_inlet_node_name = "node|Air Inlet Node Name"
obj.air_inlet_node_name = var_air_inlet_node_name
# node
var_air_outlet_node_name = "node|Air Outlet Node Name"
obj.air_outlet_node_name = var_air_outlet_node_name
# node
var_outdoor_air_node_name = "node|Outdoor Air Node Name"
obj.outdoor_air_node_name = var_outdoor_air_node_name
# node
var_exhaust_air_node_name = "node|Exhaust Air Node Name"
obj.exhaust_air_node_name = var_exhaust_air_node_name
# object-list
var_inlet_air_temperature_schedule_name = "object-list|Inlet Air Temperature Schedule Name"
obj.inlet_air_temperature_schedule_name = var_inlet_air_temperature_schedule_name
# object-list
var_inlet_air_humidity_schedule_name = "object-list|Inlet Air Humidity Schedule Name"
obj.inlet_air_humidity_schedule_name = var_inlet_air_humidity_schedule_name
# object-list
var_inlet_air_zone_name = "object-list|Inlet Air Zone Name"
obj.inlet_air_zone_name = var_inlet_air_zone_name
# alpha
var_tank_object_type = "WaterHeater:Mixed"
obj.tank_object_type = var_tank_object_type
# object-list
var_tank_name = "object-list|Tank Name"
obj.tank_name = var_tank_name
# node
var_tank_use_side_inlet_node_name = "node|Tank Use Side Inlet Node Name"
obj.tank_use_side_inlet_node_name = var_tank_use_side_inlet_node_name
# node
var_tank_use_side_outlet_node_name = "node|Tank Use Side Outlet Node Name"
obj.tank_use_side_outlet_node_name = var_tank_use_side_outlet_node_name
# alpha
var_dx_coil_object_type = "Coil:WaterHeating:AirToWaterHeatPump:Pumped"
obj.dx_coil_object_type = var_dx_coil_object_type
# object-list
var_dx_coil_name = "object-list|DX Coil Name"
obj.dx_coil_name = var_dx_coil_name
# real
var_minimum_inlet_air_temperature_for_compressor_operation = -5.0
obj.minimum_inlet_air_temperature_for_compressor_operation = var_minimum_inlet_air_temperature_for_compressor_operation
# real
var_maximum_inlet_air_temperature_for_compressor_operation = 60.0
obj.maximum_inlet_air_temperature_for_compressor_operation = var_maximum_inlet_air_temperature_for_compressor_operation
# alpha
var_compressor_location = "Schedule"
obj.compressor_location = var_compressor_location
# object-list
var_compressor_ambient_temperature_schedule_name = "object-list|Compressor Ambient Temperature Schedule Name"
obj.compressor_ambient_temperature_schedule_name = var_compressor_ambient_temperature_schedule_name
# alpha
var_fan_object_type = "Fan:OnOff"
obj.fan_object_type = var_fan_object_type
# object-list
var_fan_name = "object-list|Fan Name"
obj.fan_name = var_fan_name
# alpha
var_fan_placement = "BlowThrough"
obj.fan_placement = var_fan_placement
# real
var_on_cycle_parasitic_electric_load = 0.0
obj.on_cycle_parasitic_electric_load = var_on_cycle_parasitic_electric_load
# real
var_off_cycle_parasitic_electric_load = 0.0
obj.off_cycle_parasitic_electric_load = var_off_cycle_parasitic_electric_load
# alpha
var_parasitic_heat_rejection_location = "Zone"
obj.parasitic_heat_rejection_location = var_parasitic_heat_rejection_location
# node
var_inlet_air_mixer_node_name = "node|Inlet Air Mixer Node Name"
obj.inlet_air_mixer_node_name = var_inlet_air_mixer_node_name
# node
var_outlet_air_splitter_node_name = "node|Outlet Air Splitter Node Name"
obj.outlet_air_splitter_node_name = var_outlet_air_splitter_node_name
# object-list
var_inlet_air_mixer_schedule_name = "object-list|Inlet Air Mixer Schedule Name"
obj.inlet_air_mixer_schedule_name = var_inlet_air_mixer_schedule_name
# alpha
var_tank_element_control_logic = "MutuallyExlcusive"
obj.tank_element_control_logic = var_tank_element_control_logic
# real
var_control_sensor_1_height_in_stratified_tank = 0.0
obj.control_sensor_1_height_in_stratified_tank = var_control_sensor_1_height_in_stratified_tank
# real
var_control_sensor_1_weight = 0.5
obj.control_sensor_1_weight = var_control_sensor_1_weight
# real
var_control_sensor_2_height_in_stratified_tank = 0.0
obj.control_sensor_2_height_in_stratified_tank = var_control_sensor_2_height_in_stratified_tank
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].name, var_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].availability_schedule_name, var_availability_schedule_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].compressor_setpoint_temperature_schedule_name, var_compressor_setpoint_temperature_schedule_name)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].dead_band_temperature_difference, var_dead_band_temperature_difference)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].condenser_water_inlet_node_name, var_condenser_water_inlet_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].condenser_water_outlet_node_name, var_condenser_water_outlet_node_name)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].condenser_water_flow_rate, var_condenser_water_flow_rate)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].evaporator_air_flow_rate, var_evaporator_air_flow_rate)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].inlet_air_configuration, var_inlet_air_configuration)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].air_inlet_node_name, var_air_inlet_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].air_outlet_node_name, var_air_outlet_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].outdoor_air_node_name, var_outdoor_air_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].exhaust_air_node_name, var_exhaust_air_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].inlet_air_temperature_schedule_name, var_inlet_air_temperature_schedule_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].inlet_air_humidity_schedule_name, var_inlet_air_humidity_schedule_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].inlet_air_zone_name, var_inlet_air_zone_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].tank_object_type, var_tank_object_type)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].tank_name, var_tank_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].tank_use_side_inlet_node_name, var_tank_use_side_inlet_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].tank_use_side_outlet_node_name, var_tank_use_side_outlet_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].dx_coil_object_type, var_dx_coil_object_type)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].dx_coil_name, var_dx_coil_name)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].minimum_inlet_air_temperature_for_compressor_operation, var_minimum_inlet_air_temperature_for_compressor_operation)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].maximum_inlet_air_temperature_for_compressor_operation, var_maximum_inlet_air_temperature_for_compressor_operation)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].compressor_location, var_compressor_location)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].compressor_ambient_temperature_schedule_name, var_compressor_ambient_temperature_schedule_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].fan_object_type, var_fan_object_type)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].fan_name, var_fan_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].fan_placement, var_fan_placement)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].on_cycle_parasitic_electric_load, var_on_cycle_parasitic_electric_load)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].off_cycle_parasitic_electric_load, var_off_cycle_parasitic_electric_load)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].parasitic_heat_rejection_location, var_parasitic_heat_rejection_location)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].inlet_air_mixer_node_name, var_inlet_air_mixer_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].outlet_air_splitter_node_name, var_outlet_air_splitter_node_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].inlet_air_mixer_schedule_name, var_inlet_air_mixer_schedule_name)
self.assertEqual(idf2.waterheaterheatpumppumpedcondensers[0].tank_element_control_logic, var_tank_element_control_logic)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].control_sensor_1_height_in_stratified_tank, var_control_sensor_1_height_in_stratified_tank)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].control_sensor_1_weight, var_control_sensor_1_weight)
self.assertAlmostEqual(idf2.waterheaterheatpumppumpedcondensers[0].control_sensor_2_height_in_stratified_tank, var_control_sensor_2_height_in_stratified_tank)
|
11522577
|
from .seq2seq import Seq2SeqModel
from .nary_tree2seq import NaryTree2SeqModel
from .mm2seq import MM2SeqModel
from .code2seq import Code2Seq
from .hi_transformer_summarization import HiTransformerSummarizationModel
from .transformer import TransformerModel
# from .transformer_summarization_ft import TransformerFtModel
from .neural_transformer import NeuralTransformerModel
from .transformer_from_roberta import TransformerFromRobertaModel
from .codenn import CodeNNModel
from .deepcom import DeepComModel
from .debug import DebugModel
__all__ = [
'Seq2SeqModel',
'NaryTree2SeqModel',
'MM2SeqModel',
'HiTransformerSummarizationModel',
'TransformerModel',
'NeuralTransformerModel',
'TransformerFromRobertaModel',
'Code2Seq',
'CodeNNModel',
'DeepComModel',
'DebugModel'
]
|
11522585
|
import math
import time
import docker
from flask import jsonify, current_app
from flask_socketio import emit
from app import socket_io
from app.libs.certification import get_certification, cert_file_path
from app.libs.error_code import Success, StopFail, StartFail, RemoveFail
from app.validators.client_forms import ListForm, ContainerForm
from app.libs.redprint import Redprint
api = Redprint('container')
@api.route('/list', methods=['GET'])
def list():
form = ListForm().validate_for_api()
host = form.host.data
page = int(form.page.data)
search = form.search.data
certification = get_certification(host)
# 判断是否鉴权
if certification:
cert, key = cert_file_path(host)
tls_config = docker.tls.TLSConfig(client_cert=(cert, key), verify=False)
client = docker.DockerClient(base_url='tcp://' + host + ':2376', tls=tls_config)
else:
client = docker.DockerClient(base_url='tcp://' + host + ':2375')
container_list = client.containers.list(all=True)
containers = []
for c in container_list:
container = {}
container['name'] = c.name
container['id'] = c.short_id
container['status'] = c.status
container['ports'] = str(c.ports).replace('{', '').replace('}', '').replace(': None', '').replace("'HostIp': ",
'').replace(
", 'HostPort'", '')
container['created'] = c.attrs['Created'].split('.')[0].replace('T', ' ')
container['restartCount'] = c.attrs['RestartCount']
container['cmd'] = str(c.attrs['Config']['Cmd'])
container['image'] = c.attrs['Config']['Image']
if c.attrs['HostConfig']['Binds']:
container['volumes'] = str(c.attrs['HostConfig']['Binds']).replace(':rw', '')
else:
container['volumes'] = ''
container['network'] = c.attrs['HostConfig']['NetworkMode']
# container['portsSet'] = str(c.attrs['HostConfig']['PortBindings']).replace('{', '').replace('}', '').replace("'HostIp': '', 'HostPort': ", '')
containers.append(container)
# 查询
if search:
container_list_new = []
for c in containers:
if search in c['name'] or search in c['id']:
container_list_new.append(c)
containers = container_list_new
# 分页
PAGE_NUM = current_app.config['PAGE_NUM']
start = (page - 1) * PAGE_NUM
end = page * PAGE_NUM
data = containers[start:end]
paginate_result = {
'data': data,
'page': page,
'pages': math.ceil(len(containers) / PAGE_NUM),
'total': len(containers)
}
client.close()
return jsonify(paginate_result)
@api.route('/stop', methods=['GET'])
def stop():
form = ContainerForm().validate_for_api()
host = form.host.data
name_or_Id = form.nameOrId.data
certification = get_certification(host)
# 判断是否鉴权
if certification:
cert, key = cert_file_path(host)
tls_config = docker.tls.TLSConfig(client_cert=(cert, key), verify=False)
client = docker.DockerClient(base_url='tcp://' + host + ':2376', tls=tls_config)
else:
client = docker.DockerClient(base_url='tcp://' + host + ':2375')
try:
c = client.containers.get(name_or_Id)
c.stop()
except Exception:
client.close()
return StopFail()
client.close()
return Success(msg='容器停止成功')
@api.route('/start', methods=['GET'])
def start():
form = ContainerForm().validate_for_api()
host = form.host.data
name_or_Id = form.nameOrId.data
certification = get_certification(host)
# 判断是否鉴权
if certification:
cert, key = cert_file_path(host)
tls_config = docker.tls.TLSConfig(client_cert=(cert, key), verify=False)
client = docker.DockerClient(base_url='tcp://' + host + ':2376', tls=tls_config)
else:
client = docker.DockerClient(base_url='tcp://' + host + ':2375')
try:
c = client.containers.get(name_or_Id)
c.start()
except Exception:
client.close()
return StartFail()
client.close()
return Success(msg='容器启动成功')
@api.route('/remove', methods=['GET'])
def remove():
form = ContainerForm().validate_for_api()
host = form.host.data
name_or_Id = form.nameOrId.data
volume = form.volume.data
certification = get_certification(host)
# 判断是否鉴权
if certification:
cert, key = cert_file_path(host)
tls_config = docker.tls.TLSConfig(client_cert=(cert, key), verify=False)
client = docker.DockerClient(base_url='tcp://' + host + ':2376', tls=tls_config)
else:
client = docker.DockerClient(base_url='tcp://' + host + ':2375')
try:
c = client.containers.get(name_or_Id)
c.remove(v=volume)
except Exception:
client.close()
return RemoveFail()
client.close()
return Success(msg='容器删除成功')
@socket_io.on('logs')
def logs(data):
host = data.get('host')
name = data.get('name')
certification = get_certification(host)
# 判断是否鉴权
if certification:
cert, key = cert_file_path(host)
tls_config = docker.tls.TLSConfig(client_cert=(cert, key), verify=False)
client = docker.DockerClient(base_url='tcp://' + host + ':2376', tls=tls_config)
else:
client = docker.DockerClient(base_url='tcp://' + host + ':2375')
c = client.containers.get(name)
for line in c.logs(stream=True, tail=20, follow=True):
print(line.decode('utf-8').strip())
emit(host + name, {'name': name, 'msg': line.decode('utf-8').strip()})
@socket_io.on('shell')
def shell(ws):
form = ContainerForm().validate_for_api()
host = ''
name_or_Id = 'f5ca53929c1d'
certification = get_certification(host)
# 判断是否鉴权
if certification:
cert, key = cert_file_path(host)
tls_config = docker.tls.TLSConfig(client_cert=(cert, key), verify=False)
client = docker.APIClient(base_url='tcp://' + host + ':2376', tls=tls_config)
else:
client = docker.APIClient(base_url='tcp://' + host + ':2375')
exec_id = client.exec_create(name_or_Id, '/bin/sh', stdin=True, tty=True)['Id']
client.exec_resize(exec_id, height=100, width=118)
client.exec_start(exec_id, detach=False, tty=True, stream=True)
client.close()
@socket_io.on_error() # Handles the default namespace
def error_handler(e):
raise RuntimeError()
@socket_io.on('connect')
def test_connect():
emit('my response', {'data': 'Connected'})
@socket_io.on('disconnect')
def test_disconnect():
print('Client disconnected')
|
11522607
|
from tir.technologies.webapp_internal import WebappInternal
from tir.technologies.apw_internal import ApwInternal
from tir.technologies.core.config import ConfigLoader
from tir.technologies.core.base_database import BaseDatabase
"""
This file must contain the definition of all User Classes.
These classes will contain only calls to the Internal classes.
"""
class Webapp():
"""
Instantiates the Webapp automated interface testing class.
:param config_path: The path to the config file. - **Default:** "" (empty string)
:type config_path: str
:param autostart: Sets whether TIR should open browser and execute from the start. - **Default:** True
:type: bool
"""
def __init__(self, config_path="", autostart=True):
self.__webapp = WebappInternal(config_path, autostart)
self.__database = BaseDatabase(config_path, autostart=False)
self.config = ConfigLoader()
self.coverage = self.config.coverage
def AddParameter(self, parameter, branch, portuguese_value="", english_value="", spanish_value=""):
"""
Adds a parameter to the queue of parameters to be set by SetParameters method.
:param parameter: The parameter name.
:type parameter: str
:param branch: The branch to be filled in parameter edit screen.
:type branch: str
:param portuguese_value: The value for a portuguese repository.
:type portuguese_value: str
:param english_value: The value for an english repository.
:type english_value: str
:param spanish_value: The value for a spanish repository.
:type spanish_value: str
Usage:
>>> # Calling the method:
>>> oHelper.AddParameter("MV_MVCSA1", "", ".F.", ".F.", ".F.")
"""
self.__webapp.AddParameter(parameter, branch, portuguese_value, english_value, spanish_value)
def AssertFalse(self, expected=False, scritp_message=''):
"""
Defines that the test case expects a False response to pass
Usage:
>>> #Instantiating the class
>>> inst.oHelper = Webapp()
>>> #Calling the method
>>> inst.oHelper.AssertFalse()
"""
self.__webapp.AssertFalse(expected, scritp_message)
def AssertTrue(self, expected=True, scritp_message=''):
"""
Defines that the test case expects a True response to pass
Usage:
>>> #Instantiating the class
>>> inst.oHelper = Webapp()
>>> #Calling the method
>>> inst.oHelper.AssertTrue()
"""
self.__webapp.AssertTrue(expected, scritp_message)
def ChangeEnvironment(self, date="", group="", branch="", module=""):
"""
Clicks on the change environment area of Protheus Webapp and
fills the environment screen.
:param date: The date to fill on the environment screen. - **Default:** "" (empty string)
:type date: str
:param group: The group to fill on the environment screen. - **Default:** "" (empty string)
:type group: str
:param branch: The branch to fill on the environment screen. - **Default:** "" (empty string)
:type branch: str
:param module: The module to fill on the environment screen. - **Default:** "" (empty string)
:type module: str
Usage:
>>> # Calling the method:
>>> oHelper.ChangeEnvironment(date="13/11/2018", group="T1", branch="D MG 01 ")
"""
self.__webapp.ChangeEnvironment(date, group, branch, module)
def ChangeUser(self, user, password, initial_program = "", date='', group='99', branch='01'):
"""
Change the user.
:param initial_program: The initial program to load. - **Default:** "" (previous initial_program)
:type initial_program: str
:param date: The date to fill on the environment screen. - **Default:** "" (previous date)
:type date: str
:param group: The group to fill on the environment screen. - **Default:** "previous date group"
:type group: str
:param branch: The branch to fill on the environment screen. - **Default:** "previous branch"
:type branch: str
Usage:
>>> # Calling the method:
>>> oHelper.ChangeUser("userTest", "a", "SIGAFAT", "18/08/2018", "T1", "D MG 01 ")
>>> #------------------------------------------------------------------------
>>> # Calling the method:
>>> oHelper.ChangeUser(user="user08", password="8" )
>>> #------------------------------------------------------------------------
"""
self.__webapp.ChangeUser(user, password, initial_program, date, group, branch)
def CheckResult(self, field, user_value, grid=False, line=1, grid_number=1, name_attr=False, input_field=True, direction=None, grid_memo_field=False):
"""
Checks if a field has the value the user expects.
:param field: The field or label of a field that must be checked.
:type field: str
:param user_value: The value that the field is expected to contain.
:type user_value: str
:param grid: Boolean if this is a grid field or not. - **Default:** False
:type grid: bool
:param line: Grid line that contains the column field to be checked.- **Default:** 1
:type line: int
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param name_attr: Boolean if search by Name attribute must be forced. - **Default:** False
:type name_attr: bool
:param input_field: False if the desired field is not an input type .
:type input_field: bool
:param direction: Desired direction to search for the element, currently accepts right and down.
:type direction: str
:param grid_memo_field: Boolean if this is a memo grid field. - **Default:** False
:type grid_memo_field: bool
Usage:
>>> # Calling method to check a value of a field:
>>> oHelper.CheckResult("A1_COD", "000001")
>>> #-----------------------------------------
>>> # Calling method to check a field that is on the second line of a grid:
>>> oHelper.CheckResult("Client", "000001", grid=True, line=2)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to check a field that is on the second grid of the screen:
>>> oHelper.CheckResult("Order", "000001", grid=True, line=1, grid_number=2)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Call method to check a field value that is not an input field and is on the right:
>>> oHelper.CheckResult("Saldo Titulo", "100.000,00", input_field=False, direction='right')
>>> oHelper.LoadGrid()
"""
self.__webapp.CheckResult(field, user_value, grid, line, grid_number, name_attr, input_field, direction, grid_memo_field)
def CheckView(self, text, element_type="help"):
"""
Checks if a certain text is present in the screen at the time and takes an action.
"help" - closes element.
:param text: Text to be checked.
:type text: str
:param element_type: Type of element. - **Default:** "help"
:type element_type: str
Usage:
>>> # Calling the method.
>>> oHelper.CheckView("Processing")
"""
self.__webapp.CheckView(text, element_type)
def ClickBox(self, fields, contents_list="", select_all=False, grid_number=1, itens=False):
"""
Clicks on Checkbox elements of a grid.
:param field: Comma divided string with values that must be checked, combine with content_list.
:type field: str
:param content_list: Comma divided string with values that must be checked. - **Default:** "" (empty string)
:type content_list: str
:param select_all: Boolean if all options should be selected. - **Default:** False
:type select_all: bool
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param itens: Bool parameter that click in all itens based in the field and content reference.
:type itens: bool
Usage:
>>> # Calling the method to select a specific checkbox:
>>> oHelper.ClickBox("Branch", "D MG 01 ")
>>> #--------------------------------------------------
>>> # Calling the method to select multiple checkboxes:
>>> oHelper.ClickBox("Branch", "D MG 01 , D RJ 02")
>>> #--------------------------------------------------
>>> # Calling the method to select all checkboxes:
>>> oHelper.ClickBox("Branch", select_all=True)
>>> #--------------------------------------------------
>>> # Calling the method to performe click based in 2 fields and contens:
>>> test_helper.ClickBox('Numero da SC, Item da SC', 'COM068, 0001')
>>> #--------------------------------------------------
>>> # Calling the method to click in all itens with this reference:
>>> test_helper.ClickBox('Numero da SC', 'COM068', itens=True)
"""
self.__webapp.ClickBox(fields, contents_list, select_all, grid_number, itens)
def ClickFolder(self, item, position=1):
"""
Clicks on folder elements on the screen.
:param folder_name: Which folder item should be clicked.
:type folder_name: str
:param position: In case of two or more folders with the same name in the screen, you could choose by position in order
:type position: int
Usage:
>>> # Calling the method:
>>> oHelper.ClickFolder("Folder1")
>>> # Second folder named as Folder1 in the same screen
>>> oHelper.ClickFolder("Folder1", position=2)
"""
self.__webapp.ClickFolder(item, position)
def ClickGridCell(self, column, row=1, grid_number=1):
"""
Clicks on a Cell of a Grid.
:param column: The column that should be clicked.
:type column: str
:param row_number: Grid line that contains the column field to be checked.- **Default:** 1
:type row_number: int
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
Usage:
>>> # Calling the method:
>>> oHelper.ClickGridCell("Product", 1)
"""
self.__webapp.ClickGridCell(column, row, grid_number)
def ClickGridHeader( self, column = 1, column_name = '', grid_number = 1):
"""
Clicks on a Cell of a Grid Header.
:param column: The column index that should be clicked.
:type column: int
:param column_name: The column index that should be clicked.
:type row_number: str
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
Usage:
>>> # Calling the method:
>>> oHelper.ClickGridHeader(column = 1 , grid_number = 1)
>>> oHelper.ClickGridHeader(column_name = 'Código' , grid_number = 1)
>>> oHelper.ClickGridHeader(column = 1 , grid_number = 2)
"""
self.__webapp.ClickGridHeader(column, column_name, grid_number)
def ClickIcon(self, icon_text, position=1):
"""
Clicks on an Icon button based on its tooltip text.
:param icon_text: The tooltip text.
:type icon_text: str
:param position: Position which element is located. - **Default:** 1
:type position: int
Usage:
>>> # Call the method:
>>> oHelper.ClickIcon("Add")
>>> oHelper.ClickIcon("Edit")
"""
self.__webapp.ClickIcon(icon_text, position)
def ClickCheckBox(self, label_box_name, position=1):
"""
Clicks on a Label in box on the screen.
:param label_box_name: The label box name
:type label_box_name: str
:param position: index label box on interface
:type position: int
Usage:
>>> # Call the method:
>>> oHelper.ClickCheckBox("Search",1)
"""
self.__webapp.ClickCheckBox(label_box_name,position)
def ClickLabel(self, label_name):
"""
Clicks on a Label on the screen.
:param label_name: The label name
:type label_name: str
Usage:
>>> # Call the method:
>>> oHelper.ClickLabel("Search")
"""
self.__webapp.ClickLabel(label_name)
def GetValue(self, field, grid=False, line=1, grid_number=1, grid_memo_field=False):
"""
Gets the current value or text of element.
:param field: The field or label of a field that must be checked.
:type field: str
:param grid: Boolean if this is a grid field or not. - **Default:** False
:type grid: bool
:param line: Grid line that contains the column field to be checked.- **Default:** 1
:type line: int
:param grid_number: Grid number of which grid should be checked when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param grid_memo_field: Boolean if this is a memo grid field. - **Default:** False
:type grid_memo_field: bool
Usage:
>>> # Calling the method:
>>> current_value = oHelper.GetValue("A1_COD")
"""
return self.__webapp.GetValue(field, grid, line, grid_number, grid_memo_field)
def LoadGrid(self):
"""
This method is responsible for running all actions of the input and check queues
of a grid. After running, the queues would be empty.
Must be called after SetValue and CheckResult calls that has the grid parameter set to True.
Usage:
>>> # After SetValue:
>>> oHelper.SetValue("A1_COD", "000001", grid=True)
>>> oHelper.LoadGrid()
>>> #--------------------------------------
>>> # After CheckResult:
>>> oHelper.CheckResult("A1_COD", "000001", grid=True, line=1)
>>> oHelper.LoadGrid()
"""
self.__webapp.LoadGrid()
def LogOff(self):
"""
Logs out of the Protheus Webapp.
.. note::
Do not use this method in any routine. Use on home screen.
Usage:
>>> # Calling the method.
>>> oHelper.LogOff()
"""
self.__webapp.LogOff()
def Finish(self):
"""
Exit the Protheus Webapp.
Usage:
>>> # Calling the method.
>>> oHelper.Finish()
"""
self.__webapp.Finish()
def MessageBoxClick(self, button_text):
"""
Clicks on desired button inside a Messagebox element.
:param button_text: Desired button to click.
:type button_text: str
Usage:
>>> # Calling the method:
>>> oHelper.MessageBoxClick("Ok")
"""
self.__webapp.MessageBoxClick(button_text)
def Program(self, program_name):
"""
Method that sets the program in the initial menu search field.
.. note::
Only used when the Initial Program is the module Ex: SIGAFAT.
:param program_name: The program name
:type program_name: str
Usage:
>>> # Calling the method:
>>> oHelper.Program("MATA020")
"""
self.__webapp.Program(program_name)
def RestoreParameters(self):
"""
Restores parameters to previous value in CFG screen. Should be used after a **SetParameters** call.
Usage:
>>> # Adding Parameter:
>>> oHelper.AddParameter("MV_MVCSA1", "", ".F.", ".F.", ".F.")
>>> # Calling the method:
>>> oHelper.SetParameters()
"""
self.__webapp.RestoreParameters()
def ScrollGrid(self, column, match_value, grid_number=1):
"""
Scrolls Grid until a matching column is found.
:param field: The column to be matched.
:type field: str
:param match_value: The value to be matched in defined column.
:type match_value: str
:param grid_number: Which grid should be used when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
Usage:
>>> # Calling the method to scroll to a column match:
>>> oHelper.ScrollGrid(column="Branch",match_value="D MG 01 ")
>>> #--------------------------------------------------
>>> # Calling the method to scroll to a column match of the second grid:
>>> oHelper.ScrollGrid(column="Branch", match_value="D MG 01 ", grid_number=2)
"""
self.__webapp.ScrollGrid(column, match_value, grid_number)
def Screenshot(self, filename):
"""
Takes a screenshot and saves on the screenshot folder defined in config.
:param filename: The name of the screenshot file.
:type: str
Usage:
>>> # Calling the method:
>>> oHelper.Screenshot(filename="myscreenshot")
"""
self.__webapp.take_screenshot(filename)
def F3(self, field, name_attr=False,send_key=False):
"""
Do the standard query(F3)
this method:
1.Search the field
2.Search icon "lookup"
3.Click()
:param term: The term that must be searched.
:type term: str
:param name_attr: True: searchs element by name.
:type name_attr: bool
:param send_key: True: try open standard search field send key F3.
:type send_key: bool
Usage:
>>> # To search using a label name:
>>> oHelper.F3("Cód")
>>> #------------------------------------------------------------------------
>>> # To search using the name of input:
>>> oHelper.F3(field='A1_EST',name_attr=True)
>>> #------------------------------------------------------------------------
>>> # To search using the name of input and do action with a key:
>>> oHelper.F3(field='A1_EST',name_attr=True,send_key=True)
"""
self.__webapp.standard_search_field( field, name_attr, send_key )
def SetupTSS(self, initial_program="", environment=""):
"""
Prepare the Protheus Webapp TSS for the test case, filling the needed information to access the environment.
.. note::
This method use the user and password from config.json.
:param initial_program: The initial program to load.
:type initial_program: str
:param environment: The initial environment to load.
:type environment: str
Usage:
>>> # Calling the method:
>>> oHelper.SetupTSS("TSSMANAGER", "SPED")
"""
self.__webapp.SetupTSS(initial_program, environment)
def SearchBrowse(self, term, key=None, identifier=None, index=False, column=None):
"""
Searchs a term on Protheus Webapp.
It will search using the default search key, but if a **key** is provided
it will search using the chosen key.
It will search using the first search box on the screen, but if an **identifier**
is provided, it will search on the chosen search box.
:param term: The term that must be searched.
:type term: str
:param key: The search key to be chosen on the search dropdown. - **Default:** None
:type key: str
:param identifier: The identifier of the search box. If none is provided, it defaults to the first of the screen. - **Default:** None
:type identifier: str
:param index: Whether the key is an index or not. - **Default:** False
:type index: bool
:param column: The search column to be chosen on the search dropdown. - **Default:** None
:type column: str
Usage:
>>> # To search using the first search box and default search key:
>>> oHelper.SearchBrowse("D MG 001")
>>> #------------------------------------------------------------------------
>>> # To search using the first search box and a chosen key:
>>> oHelper.SearchBrowse("D MG 001", key="Branch+id")
>>> #------------------------------------------------------------------------
>>> # To search using a chosen search box and the default search key:
>>> oHelper.SearchBrowse("D MG 001", identifier="Products")
>>> #------------------------------------------------------------------------
>>> # To search using a chosen search box and a chosen search key:
>>> oHelper.SearchBrowse("D MG 001", key="Branch+id", identifier="Products")
>>> oHelper.SearchBrowse("D MG 001", identifier="Products")
>>> #------------------------------------------------------------------------
>>> # To search using an index instead of name for the search key:
>>> oHelper.SearchBrowse("D MG 001", key=2, index=True)
>>> #------------------------------------------------------------------------
>>> # To search using the first search box and a chosen column:
>>> oHelper.SearchBrowse("D MG 001", column="Nome")
>>> #------------------------------------------------------------------------
>>> #------------------------------------------------------------------------
>>> # To search using the first search box and a chosen columns:
>>> oHelper.SearchBrowse("D MG 001", column="Nome, Filial*, ColumnX, AnotherColumnY")
>>> #------------------------------------------------------------------------
"""
self.__webapp.SearchBrowse(term, key, identifier, index, column)
def SetBranch(self, branch):
"""
Chooses the branch on the branch selection screen.
:param branch: The branch that would be chosen.
:type branch: str
Usage:
>>> # Calling the method:
>>> oHelper.SetBranch("D MG 01 ")
"""
self.__webapp.SetBranch(branch)
def SetButton(self, button, sub_item="", position=1, check_error=True):
"""
Method that clicks on a button on the screen.
:param button: Button to be clicked.
:type button: str
:param sub_item: Sub item to be clicked inside the first button. - **Default:** "" (empty string)
:type sub_item: str
:param position: Position which element is located. - **Default:** 1
:type position: int
Usage:
>>> # Calling the method to click on a regular button:
>>> oHelper.SetButton("Add")
>>> #-------------------------------------------------
>>> # Calling the method to click on a sub item inside a button.
>>> oHelper.SetButton("Other Actions", "Process")
"""
self.__webapp.SetButton(button, sub_item, position, check_error=check_error)
def SetFilePath(self, value, button = ""):
"""
Fills the path screen with the desired path
.. warning::
Necessary informed the button name or the program will select the current button name.
:param value: Path to be inputted.
:type value: str
:param button: Name button from path screen.
:type button: str
Usage:
>>> # Calling the method:
>>> oHelper.SetFilePath(r"C:\\folder")
>>> oHelper.SetFilePath(r"C:\\folder","save")
"""
self.__webapp.SetFilePath(value, button)
def SetFocus(self, field, grid_cell=False, row_number=1, position=1):
"""
Sets the current focus on the desired field.
:param field: The field that must receive the focus.
:type field: str
:param grid_cell: Indicates if the element that deserve focus is on a grid.
:type grid_cell: bool
:param row_number: Number of row in case of multiples rows.
:type row_number: int
:param position: Position which element is located. - **Default:** 1
:type position: int
Usage:
>>> # Calling the method:
>>> oHelper.SetFocus("A1_COD")
"""
self.__webapp.SetFocus(field, grid_cell, row_number, position)
def SetKey(self, key, grid=False, grid_number=1,additional_key="", wait_show = "", step = 3):
"""
Press the desired key on the keyboard on the focused element.
.. warning::
If this methods is the first to be called, we strongly recommend using some wait methods like WaitShow().
.. warning::
Before using this method, set focus on any element.
Supported keys: F1 to F12, CTRL+Key, ALT+Key, Up, Down, Left, Right, ESC, Enter and Delete ...
:param key: Key that would be pressed
:type key: str
:param grid: Boolean if action must be applied on a grid. (Usually with DOWN key)
:type grid: bool
:param grid_number: Grid number of which grid should be used when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param additional_key: Key additional that would be pressed.
:type additional_key: str
:param wait_show: String that will hold the wait after press a key.
:type wait_show: str
:param step: The amount of time each step should wait. - **Default:** 3
:type step: float
Usage:
>>> # Calling the method:
>>> oHelper.SetKey("ENTER")
>>> #--------------------------------------
>>> # Calling the method on a grid:
>>> oHelper.SetKey("DOWN", grid=True)
>>> #--------------------------------------
>>> # Calling the method on the second grid on the screen:
>>> oHelper.SetKey("DOWN", grid=True, grid_number=2)
>>> #--------------------------------------
>>> # Calling the method when you expected new window or text appears on the screen:
>>> oHelper.SetKey( key = "F12", wait_show="Parametros", step = 3 )
>>> #--------------------------------------
>>> # Calling the method with special keys (using parameter additional_key):
>>> oHelper.SetKey(key="CTRL", additional_key="A")
"""
self.__webapp.SetKey(key, grid, grid_number,additional_key, wait_show, step)
def SetLateralMenu(self, menuitens):
"""
Navigates through the lateral menu using provided menu path.
e.g. "MenuItem1 > MenuItem2 > MenuItem3"
:param menu_itens: String with the path to the menu.
:type menu_itens: str
Usage:
>>> # Calling the method:
>>> oHelper.SetLateralMenu("Updates > Registers > Products > Groups")
"""
self.__webapp.SetLateralMenu(menuitens)
def SetParameters(self):
"""
Sets the parameters in CFG screen. The parameters must be passed with calls for **AddParameter** method.
Usage:
>>> # Adding Parameter:
>>> oHelper.AddParameter("MV_MVCSA1", "", ".F.", ".F.", ".F.")
>>> # Calling the method:
>>> oHelper.SetParameters()
"""
self.__webapp.SetParameters()
def SetTabEDAPP(self, table_name):
"""
Chooses the table on the generic query (EDAPP).
:param table: The table that would be chosen.
:type table: str
Usage:
>>> # Calling the method:
>>> oHelper.SetTabEDAPP("AAB")
"""
self.__webapp.SetTabEDAPP(table_name)
def SetValue(self, field, value, grid=False, grid_number=1, ignore_case=True, row=None, name_attr=False, position = 1, check_value=None, grid_memo_field=False, range_multiplier=None, direction=None, duplicate_fields=[]):
"""
Sets value of an input element.
.. note::
Attention don't use position parameter with grid parameter True.
:param field: The field name or label to receive the value
:type field: str
:param value: The value to be inputted on the element.
:type value: str
:param grid: Boolean if this is a grid field or not. - **Default:** False
:type grid: bool
:param grid_number: Grid number of which grid should be inputted when there are multiple grids on the same screen. - **Default:** 1
:type grid_number: int
:param ignore_case: Boolean if case should be ignored or not. - **Default:** True
:type ignore_case: bool
:param row: Row number that will be filled
:type row: int
:param name_attr: Boolean if search by Name attribute must be forced. - **Default:** False
:type name_attr: bool
:param check_value: Boolean ignore input check - **Default:** True
:type name_attr: bool
:param position: Position which element is located. - **Default:** 1
:type position: int
:param grid_memo_field: Boolean if this is a memo grid field. - **Default:** False
:type grid_memo_field: bool
:param range_multiplier: Integer value that refers to the distance of the label from the input object. The safe value must be between 1 to 10.
:type range_multiplier: int
:param direction: Desired direction to search for the element from a label, currently accepts right and down.
:type direction: str
Usage:
>>> # Calling method to input value on a field:
>>> oHelper.SetValue("A1_COD", "000001")
>>> #-----------------------------------------
>>> # Calling method to input value on a field from a label text and looking an input field for a specific direction:
>>> oHelper.SetValue("Codigo", "000001", direction='right')
>>> #-----------------------------------------
>>> # Calling method to input value on a field using by label name:
>>> oHelper.SetValue("Codigo", "000001")
>>> #-----------------------------------------
>>> # Calling method to input value on a field using by an existing label name:
>>> oHelper.SetValue(field = "Codigo", value = "000002", position = 2)
>>> #-----------------------------------------
>>> # Calling method to input value on a field that is a grid:
>>> oHelper.SetValue("Client", "000001", grid=True)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to checkbox value on a field that is a grid:
>>> oHelper.SetValue('Confirmado?', True, grid=True)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to input value on a field that is on the second grid of the screen:
>>> oHelper.SetValue("Order", "000001", grid=True, grid_number=2)
>>> oHelper.LoadGrid()
>>> #-----------------------------------------
>>> # Calling method to input value on a field that is a grid (2) *Will not attempt to verify the entered value. Run only once.* :
>>> oHelper.SetValue("Order", "000001", grid=True, grid_number=2, check_value = False)
>>> oHelper.LoadGrid()
>>> #--------------------------------------
>>> # Calling method to input value in cases that have duplicate fields:
>>> oHelper.SetValue('Tipo Entrada' , '073', grid=True, grid_number=2, name_attr=True)
>>> self.oHelper.SetValue('Tipo Entrada' , '073', grid=True, grid_number=2, name_attr=True, duplicate_fields=['tipo entrada', 10])
>>> oHelper.LoadGrid()
"""
self.__webapp.SetValue(field, value, grid, grid_number, ignore_case, row, name_attr, position, check_value, grid_memo_field, range_multiplier, direction, duplicate_fields)
def Setup(self, initial_program, date="", group="99", branch="01", module=""):
"""
Prepare the Protheus Webapp for the test case, filling the needed information to access the environment.
:param initial_program: The initial program to load.
:type initial_program: str
:param date: The date to fill on the environment screen. - **Default:** "" (empty string)
:type date: str
:param group: The group to fill on the environment screen. - **Default:** "99"
:type group: str
:param branch: The branch to fill on the environment screen. - **Default:** "01"
:type branch: str
:param module: The module to fill on the environment screen. - **Default:** "" (empty string)
:type module: str
Usage:
>>> # Calling the method:
>>> oHelper.Setup("SIGAFAT", "18/08/2018", "T1", "D MG 01 ")
"""
self.__webapp.Setup(initial_program, date, group, branch, module)
def SetTIRConfig(self, config_name, value):
"""
Changes a value of a TIR internal config during runtime.
This could be useful for TestCases that must use a different set of configs
than the ones defined at **config.json**
Available configs:
- Url
- Environment
- User
- Password
- Language
- DebugLog
- TimeOut
- InitialProgram
- Routine
- Date
- Group
- Branch
- Module
:param config_name: The config to be changed.
:type config_name: str
:param value: The value that would be set.
:type value: str
Usage:
>>> # Calling the method:
>>> oHelper.SetTIRConfig(config_name="date", value="30/10/2018")
"""
self.__webapp.SetTIRConfig(config_name, value)
def Start(self):
"""
Opens the browser maximized and goes to defined URL.
Usage:
>>> # Calling the method:
>>> oHelper.Start()
"""
self.__webapp.Start()
def TearDown(self):
"""
Closes the webdriver and ends the test case.
Usage:
>>> #Calling the method
>>> inst.oHelper.TearDown()
"""
self.__webapp.TearDown()
def WaitFieldValue(self, field, expected_value):
"""
Wait until field has expected value.
Recommended for Trigger fields.
:param field: The desired field.
:type field: str
:param expected_value: The expected value.
:type expected_value: str
Usage:
>>> # Calling method:
>>> self.WaitFieldValue("CN0_DESCRI", "MY DESCRIPTION")
"""
self.__webapp.WaitFieldValue(field, expected_value)
def WaitHide(self, string):
"""
Search string that was sent and wait hide the elements.
:param itens: String that will hold the wait.
:type string: str
Usage:
>>> # Calling the method:
>>> oHelper.WaitHide("Processing")
"""
self.__webapp.WaitHide(string)
def WaitProcessing(self, string):
"""
Uses WaitShow and WaitHide to Wait a Processing screen
:param string: String that will hold the wait.
:type string: str
Usage:
>>> # Calling the method:
>>> oHelper.WaitProcessing("Processing")
"""
self.__webapp.WaitProcessing(string)
def WaitShow(self, string):
"""
Search string that was sent and wait show the elements.
:param itens: String that will hold the wait.
:type string: str
Usage:
>>> # Calling the method:
>>> oHelper.WaitShow("Processing")
"""
self.__webapp.WaitShow(string)
def ClickTree(self, treepath, right_click=False, position=1):
"""
Clicks on TreeView component.
:param treepath: String that contains the access path for the item separate by ">" .
:type string: str
:param right_click: Clicks with the right button of the mouse in the last element of the tree.
:type string: bool
Usage:
>>> # Calling the method:
>>> oHelper.ClickTree("element 1 > element 2 > element 3")
>>> # Right Click example:
>>> oHelper.ClickTree("element 1 > element 2 > element 3", right_click=True)
"""
self.__webapp.ClickTree(treepath=treepath, right_click=right_click, position=position)
def GridTree(self, column, treepath, right_click=False):
"""
Clicks on Grid TreeView component.
:param treepath: String that contains the access path for the item separate by ">" .
:type string: str
:param right_click: Clicks with the right button of the mouse in the last element of the tree.
:type string: bool
Usage:
>>> # Calling the method:
>>> oHelper.GridTree("element 1 > element 2 > element 3")
>>> # Right GridTree example:
>>> oHelper.GridTree("element 1 > element 2 > element 3", right_click=True)
"""
self.__webapp.GridTree(column, treepath, right_click)
def GetText(self, string_left="", string_right=""):
"""
This method returns a string from modal based on the string in the left or right position that you send on parameter.
If the string_left was filled then the right side content is return.
If the string_right was filled then the left side content is return.
If no parameter was filled so the full content is return.
:param string_left: String of the left side of content.
:type string_left: str
:param string_right: String of the right side of content.
:type string_right: str
Usage:
>>> # Calling the method:
>>> oHelper.GetText(string_left="Left Text", string_right="Right Text")
>>> oHelper.GetText(string_left="Left Text")
>>> oHelper.GetText()
"""
return self.__webapp.GetText(string_left, string_right)
def CheckHelp(self, text="", button="", text_help="", text_problem="", text_solution="", verbosity=False):
"""
Checks if some help screen is present in the screen at the time and takes an action.
:param text: Text to be checked.
:type text: str
:param text_help: Only the help text will be checked.
:type text_help: str
:param text_problem: Only the problem text will be checked.
:type text_problem: str
:param text_solution: Only the solution text will be checked.
:type text_solution: str
:param button: Button to be clicked.
:type button: str
:param verbosity: Check the text with high accuracy.
:type verbosity: bool
Usage:
>>> # Calling method to check all window text.
>>> oHelper.CheckHelp("TK250CADRE Problema: Essa reclamação já foi informada anteriormente. Solução: Informe uma reclamação que ainda não tenha sido cadastrada nessa tabela.", "Fechar")
>>> # Calling method to check help text only.
>>> oHelper.CheckHelp(text_help="TK250CADRE", button="Fechar")
>>> # Calling method to check problem text only.
>>> oHelper.CheckHelp(text_problem="Problema: Essa reclamação já foi informada anteriormente.", button="Fechar")
>>> # Calling method to check problem text only.
>>> oHelper.CheckHelp(text_solution="Solução: Informe uma reclamação que ainda não tenha sido cadastrada nessa tabela.", button="Fechar")
>>> # Calling the method to check only the problem text with high precision.
>>> oHelper.CheckHelp(text_problem="Problema: Essa reclamação já foi informada anteriormente.", button="Fechar", verbosity=True)
"""
return self.__webapp.CheckHelp(text, button, text_help, text_problem, text_solution, verbosity)
def ClickMenuPopUpItem(self, text, right_click=False, position = 1):
"""
Clicks on MenuPopUp Item based in a text
:param text: Text in MenuPopUp to be clicked.
:type text: str
:param right_click: Button to be clicked.
:type button: bool
:param position: index item text
:type position: int
Usage:
>>> # Calling the method.
>>> oHelper.ClickMenuPopUpItem("Label")
>>> # Calling the method using position.
>>> oHelper.ClickMenuPopUpItem("Label", position = 2)
"""
return self.__webapp.ClickMenuPopUpItem(text, right_click, position = position)
def GetRelease(self):
"""
Get the current release from Protheus.
:return: The current release from Protheus.
:type: str
Usage:
>>> # Calling the method:
>>> oHelper.get_release()
>>> # Conditional with method:
>>> # Situation: Have a input that only appears in release greater than or equal to 12.1.027
>>> if self.oHelper.get_release() >= '12.1.027':
>>> self.oHelper.SetValue('AK1_CODIGO', 'codigo_CT001')
"""
return self.__webapp.get_release()
def ClickListBox(self, text):
"""
Clicks on Item based in a text in a window tlistbox
:param text: Text in windows to be clicked.
:type text: str
Usage:
>>> # Calling the method.
>>> oHelper.ClickListBox("text")
"""
return self.__webapp.ClickListBox(text)
def ClickImage(self, img_name, double_click=False):
"""
Clicks in an Image button. They must be used only in case that 'ClickIcon' doesn't support.
:param img_name: Image to be clicked.
:type img_name: src
Usage:
>>> # Call the method:
>>> oHelper.ClickImage("img_name")
>>> oHelper.ClickImage("img_name",double_click=True)
"""
self.__webapp.ClickImage(img_name,double_click)
def ProgramScreen(self, initial_program=""):
"""
Fills the first screen of Protheus with the first program to run.
:param initial_program: The initial program to load
:type initial_program: str
Usage:
>>> # Calling the method:
>>> self.ProgramScreen("SIGAADV")
"""
self.__webapp.program_screen(initial_program, coverage=self.coverage)
def OpenCSV(self, csv_file='', delimiter=';', column=None, header=None, filter_column=None, filter_value=''):
"""
Returns a dictionary when the file has a header in another way returns a list
The folder must be entered in the CSVPath parameter in the config.json.
.. note::
This method return data as a string if necessary use some method to convert data like int().
>>> config.json
>>> "CSVPath" : "C:\\temp"
:param csv_file: .csv file name
:type csv_file: str
:param delimiter: Delimiter option such like ';' or ',' or '|'
:type delimiter: str
:param column: To files with Header is possible return only a column by header name or Int value for no header files
:type column: str
:param header: Indicate with the file contains a Header or not default is Header None
:type header: bool
:param filter_column: Is possible to filter a specific value by column and value content, if value is int starts with number 1
:type filter_column: str or int
:param filter_value: Value used in pair with filter_column parameter
:type filter_value: str
:param filter_data: If you want filter a value by column, this parameter need to be a True value
:type filter_data: bool
>>> # Call the method:
>>> file_csv = self.oHelper.OpenCSV(delimiter=";", csv_file="no_header.csv")
>>> file_csv_no_header_column = self.oHelper.OpenCSV(column=0, delimiter=";", csv_file="no_header_column.csv")
>>> file_csv_column = self.oHelper.OpenCSV(column='CAMPO', delimiter=";", csv_file="header_column.csv", header=True)
>>> file_csv_pipe = self.oHelper.OpenCSV(delimiter="|", csv_file="pipe_no_header.csv")
>>> file_csv_header = self.oHelper.OpenCSV(delimiter=";", csv_file="header.csv", header=True)
>>> file_csv_header_column = self.oHelper.OpenCSV(delimiter=";", csv_file="header.csv", header=True)
>>> file_csv_header_pipe = self.oHelper.OpenCSV(delimiter="|", csv_file="pipe_header.csv", header=True)
>>> file_csv_header_filter = self.oHelper.OpenCSV(delimiter=";", csv_file="header.csv", header=True, filter_column='CAMPO', filter_value='A00_FILIAL')
>>> file_csv _no_header_filter = self.oHelper.OpenCSV(delimiter=";", csv_file="no_header.csv", filter_column=0, filter_value='A00_FILIAL')
"""
return self.__webapp.open_csv(csv_file, delimiter, column, header, filter_column, filter_value)
def StartDB(self):
"""
:return: connection object
Usage:
>>> # Call the method:
>>> self.oHelper.StartDB()
"""
return self.__database.connect_database()
def StopDB(self, connection):
"""
:param connection: connection object
:type param: object
Usage:
>>> # Call the method:
>>> self.oHelper.StopDB(connection)
"""
self.__database.connect_database()
def QueryExecute(self, query, database_driver="", dbq_oracle_server="", database_server="", database_port=1521, database_name="", database_user="", database_password=""):
"""
Return a dictionary if the query statement is a SELECT otherwise print a number of row
affected in case of INSERT|UPDATE|DELETE statement.
.. note::
Default Database information is in config.json another way is possible put this in the QueryExecute method parameters:
Parameters:
"DBDriver": "",
"DBServer": "",
"DBName": "",
"DBUser": "",
"DBPassword": ""
.. note::
Must be used an ANSI default SQL statement.
.. note::
dbq_oracle_server parameter is necessary only for Oracle connection.
:param query: ANSI SQL estatement query
:type query: str
:param database_driver: ODBC Driver database name
:type database_driver: str
:param dbq_oracle_server: Only for Oracle: DBQ format:Host:Port/oracle instance
:type dbq_oracle_server: str
:param database_server: Database Server Name
:type database_server: str
:param database_port: Database port default port=1521
:type database_port: int
:param database_name: Database Name
:type database_name: str
:param database_user: User Database Name
:type database_user: str
:param database_password: Database password
:type database_password: str
Usage:
>>> # Call the method:
>>> self.oHelper.QueryExecute("SELECT * FROM SA1T10")
>>> self.oHelper.QueryExecute("SELECT * FROM SA1T10", database_driver="DRIVER_ODBC_NAME", database_server="SERVER_NAME", database_name="DATABASE_NAME", database_user="sa", database_password="<PASSWORD>")
>>> # Oracle Example:
>>> self.oHelper.QueryExecute("SELECT * FROM SA1T10", database_driver="Oracle in OraClient19Home1", dbq_oracle_server="Host:Port/oracle instance", database_server="SERVER_NAME", database_name="DATABASE_NAME", database_user="sa", database_password="<PASSWORD>")
"""
return self.__database.query_execute(query, database_driver, dbq_oracle_server, database_server, database_port, database_name, database_user, database_password)
def GetConfigValue(self, json_key):
"""
:param json_key: Json Key in config.json
:type json_key: str
:return: Json Key item in config.json
"""
return self.__webapp.get_config_value(json_key)
def ReportComparison(self, base_file="", current_file=""):
"""
Compare two reports files and if exists show the difference between then if exists.
.. warning::
Important to use BaseLine_Spool key in config.json to work appropriately. Baseline_Spool is the path of report spool in yout environment
.. warning::
Some words are changed to this pattern below:
'Emissão: 01-01-2015'
'Emision: 01-01-2015'
'DT.Ref.: 01-01-2015'
'Fc.Ref.: 01-01-2015'
'Hora...: 00:00:00'
'Hora Término: 00:00:00'
'/' to '@'
Only .xml
'encoding=""'
'"DateTime">2015-01-01T00:00:00'
'ss:Width="100"'
:param base_file: Base file that reflects the expected. If doesn't exist make a copy of auto and then rename to base
:type base_file: str
:param current_file: Current file recently impressed, this file is use to generate file_auto automatically.
:type current_file: str
Usage:
>>> # File example:
>>> # acda080rbase.##r
>>> # acda080rauto.##r
>>> # Calling the method:
>>> self.oHelper.ReportComparison(base_file="acda080rbase.##r", current_file="acda080rauto.##r")
"""
return self.__webapp.report_comparison(base_file, current_file)
def GetGrid(self, grid_number=1, grid_element = None):
"""
Gets a grid BeautifulSoup object from the screen.
:param grid_number: The number of the grid on the screen.
:type: int
:param grid_element: Grid class name in HTML ex: ".tgrid".
:type: str
:return: Grid BeautifulSoup object
:rtype: BeautifulSoup object
Usage:
>>> # Calling the method:
>>> my_grid = self.get_grid()
"""
return self.__webapp.get_grid_content(grid_number, grid_element)
def LengthGridLines(self, grid):
"""
Returns the length of the grid.
:return:
"""
return self.__webapp.LengthGridLines(grid)
class Apw():
def __init__(self, config_path=""):
self.__Apw = ApwInternal()
def CheckBrowse(self, valores):
self.__Apw.CheckBrowse(valores)
def CheckLink(self, Link):
self.__Apw.CheckLink(Link)
def ClickLink(self, Link):
self.__Apw.ClickLink(Link)
def ClickMenu(self, caminho):
self.__Apw.ClickMenu(caminho)
def CloseAlert(self):
self.__Apw.CloseAlert()
def CloseWindow(self):
self.__Apw.CloseWindow()
def EndCase(self):
self.__Apw.EndCase()
def SetButton(self, button, type=''):
self.__Apw.SetButton(button, type)
def SetGrid(self, btnFunc="Incluir"):
self.__Apw.SetGrid(btnFunc)
def SelectBrowse(self, valores, opcao='', duplo=True):
self.__Apw.SelectBrowse(valores, opcao, duplo)
def Setup(self, lblUser="Usuário", lblPassword="<PASSWORD>", btnAccess="Acessar Portal"):
self.__Apw.Setup(lblUser, lblPassword, btnAccess)
def SwitchModal(self, opcao, frame=''):
self.__Apw.SwitchModal(opcao, frame)
def SwitchWindow(self, exit=False):
self.__Apw.SwitchWindow(exit)
def SearchValue(self, busca, valor, grid=False, btnOk='ok', btnFind='buscar', searchparam='Pesquisar'):
self.__Apw.SearchValue(busca, valor, grid, btnOk, btnFind, searchparam)
def SetValue(self, campo, valor, grid=False, linha=0, chknewline=False, disabled=False):
self.__Apw.SetValue(campo, valor, grid, linha, chknewline, disabled)
def WaitModal(self, text, opcao="title"):
self.__Apw.WaitModal(text, opcao)
|
11522631
|
from spacy.symbols import IS_PUNCT
import re
import string
from spacy.tokenizer import Tokenizer
from spacy.util import compile_prefix_regex, compile_infix_regex, compile_suffix_regex
def create_medspacy_tokenizer(nlp):
"""Generates a custom tokenizer to augment the default spacy tokenizer
for situations commonly seen in clinical text.
This includes:
* Punctuation infixes.
For example, this allows the following examples to be more aggresively tokenized as :
"Patient complains of c/o" -> [..., 'c', '/', 'o']
"chf+cp" -> ['chf', '+', 'cp']
@param nlp: Spacy language model
"""
# augment the defaults
# this is not quite correct. We do not want to break on uppercase and we do not
# want to break on all punctuation (periods)
# infixes = nlp.Defaults.infixes + (r'''[^a-z0-9]''',)
# escape all the punctuation we want to allow to allow to break up tokens
# get all python punctuation
punctuation_chars = string.punctuation
# remove periods so that we do not break up '1.5 mg' into '1 . 5 mg'
punctuation_chars = punctuation_chars.replace(".", "")
infixes = nlp.Defaults.infixes + [
r"""[{}]""".format(re.escape(punctuation_chars)),
]
prefixes = nlp.Defaults.prefixes
suffixes = nlp.Defaults.suffixes
# compile
infix_re = compile_infix_regex(infixes)
prefix_re = compile_prefix_regex(prefixes)
suffix_re = compile_suffix_regex(suffixes)
# Default exceptions could be extended later
tokenizer_exceptions = nlp.Defaults.tokenizer_exceptions.copy()
# now create this
tokenizer = Tokenizer(
nlp.vocab,
tokenizer_exceptions,
prefix_search=prefix_re.search,
suffix_search=suffix_re.search,
infix_finditer=infix_re.finditer,
token_match=nlp.tokenizer.token_match,
)
return tokenizer
|
11522647
|
import os
import sys
import torch
from ncc import LOGGER
from ncc import tasks
from ncc.utils import checkpoint_utils
from ncc.utils import utils
from ncc.utils.file_ops.yaml_io import load_yaml
from ncc.utils.logging import progress_bar
def main(args):
return _main(args, sys.stdout)
def _main(args, output_file):
if args['dataset']['max_tokens'] is None and args['dataset']['max_sentences'] is None:
args['dataset']['max_tokens'] = 12000
use_cuda = torch.cuda.is_available() and not args['common']['cpu']
if use_cuda:
device = os.environ.get('CUDA_VISIBALE_DEVICES', [0])[0] # get first device as default
torch.cuda.set_device(f'cuda:{device}')
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args['dataset']['gen_subset'], shuffle=False)
# Load ensemble
LOGGER.info('loading model(s) from {}'.format(args['eval']['path']))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(args['eval']['path']),
arg_overrides=eval(args['eval']['model_overrides']),
task=task,
)
# Optimize ensemble for generation
for model in models:
if _model_args['common']['fp16']:
model.half()
if use_cuda:
model.cuda()
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args['dataset']['gen_subset']),
max_tokens=args['dataset']['max_tokens'],
max_sentences=args['eval']['max_sentences_eval'],
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=_model_args['dataset']['skip_invalid_size_inputs_valid_test'],
required_batch_size_multiple=_model_args['dataset']['required_batch_size_multiple'],
num_shards=_model_args['dataset']['num_shards'],
shard_id=_model_args['dataset']['shard_id'],
num_workers=_model_args['dataset']['num_workers'],
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=_model_args['common']['log_format'],
log_interval=_model_args['common']['log_interval'],
default_log_format=('tqdm' if not _model_args['common']['no_progress_bar'] else 'none'),
)
sequence_completor = task.build_completor([model], args)
accuracy = {'all': 0.}
mrr = {'all': 0.}
sample_num = {'all': 0.}
if task.dataset('test').attrs is not None:
for attr in task.dataset('test').attrs:
accuracy[attr] = 0.
mrr[attr] = 0.
sample_num[attr] = 0
def _eval(lprobs, target, idx, num):
with torch.no_grad():
lprobs = lprobs[idx]
target = target[idx]
accuracy = (torch.argmax(lprobs, dim=-1) == target).sum().float().item()
# Ref: Code Prediction by Feeding Trees to Transformers
# With this practical perspective and for ease of computation, we only consider ranki ≤ 10 for each
# location i (all ranki > 10 will have a score of 0).
ranks = (lprobs >= lprobs[:, target].diag().unsqueeze(dim=-1)).sum(-1)
mrr = 1. / ranks
mrr[ranks > 10] = 0.
mrr = mrr.sum().float().item()
return accuracy, mrr, num
for sample in progress:
torch.cuda.empty_cache()
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
with torch.no_grad():
net_output = sequence_completor.generate([model], sample, prefix_tokens=None)
# lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = torch.softmax(net_output[0], dim=-1)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
# all
# ignore pad and unk
idx = sample['net_input']['src_tokens'].view(-1) != task.target_dictionary.pad()
idx[sample['target'].view(-1) == task.target_dictionary.unk()] = 0
# ignore overlapping tokens
max_len = sample['target'].size(-1)
for i, ext_i in enumerate(sample['extends']):
idx[i * max_len:i * max_len + ext_i] = 0
batch_acc, batch_mrr, batch_num = _eval(lprobs, target, idx, num=idx.sum().item())
accuracy['all'] += batch_acc
mrr['all'] += batch_mrr
sample_num['all'] += batch_num
# other attrs
if sample['attr_masks'] is not None:
for attr, attr_idx in sample['attr_masks'].items():
# pick out attr_idx who are not unk/pad
attr_idx = attr_idx[idx[attr_idx].tolist()]
if len(attr_idx) > 0:
batch_acc, batch_mrr, batch_num = _eval(lprobs, target, attr_idx, num=attr_idx.size)
accuracy[attr] += batch_acc
mrr[attr] += batch_mrr
sample_num[attr] += batch_num
for attr in accuracy.keys():
avg_acc = round(accuracy[attr] / sample_num[attr], 6) if sample_num[attr] > 0. else None
avg_mrr = round(mrr[attr] / sample_num[attr], 6) if sample_num[attr] > 0. else None
print('[{}] tokens, accuracy: {}, MRR: {}'.format(attr, avg_acc, avg_mrr))
def cli_main():
import argparse
parser = argparse.ArgumentParser(
description="Downloading/Decompressing CodeSearchNet dataset(s) or Tree-Sitter Library(ies)")
parser.add_argument(
"--yaml_file", "-f", type=str, help="load {yaml_file}.yml for train",
default='config/raw_py150/python'
)
args = parser.parse_args()
yaml_file = os.path.join(os.path.dirname(__file__), '{}.yml'.format(args.yaml_file))
LOGGER.info('Load arguments in {}'.format(yaml_file))
args = load_yaml(yaml_file)
LOGGER.info(args)
main(args)
if __name__ == '__main__':
cli_main()
|
11522770
|
import numpy as np
import subprocess
import os
import uuid
import cloudpickle
import multiprocessing
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
LINE_WIDTH = 0.25
SIAY = 60 * 60 * 24 * 365.25
USE_N_CORES = 30
# Read Ben's data for Cascadia
pts, _tris, t, slip_all, state_all = np.load("data_for_brendan.npy")
dt_vec = np.diff(t)
slip_magnitude_all = np.sqrt(
slip_all[:, :, 0] ** 2 + slip_all[:, :, 1] ** 2 + slip_all[:, :, 2] ** 2
)
slip_diff_all = np.diff(slip_magnitude_all, axis=0)
slip_rate_all = slip_diff_all / dt_vec[:, np.newaxis]
slip_rate_log_all = np.log10(np.abs(slip_rate_all))
def f(idx, filepath):
# idx = idx + 500
plt.figure(figsize=(6, 6))
# Show histories for each particle
for i in range(0, slip_rate_log_all.shape[1]):
x_history = slip_rate_log_all[0 : idx + 1, i]
y_history = state_all[0 : idx + 1, i]
plt.plot(
x_history,
y_history,
"-k",
linewidth=0.25,
color=[0.90, 0.90, 0.90],
alpha=0.1,
)
plt.scatter(
slip_rate_log_all[idx, :],
state_all[idx, :],
c=np.abs(pts[:, 2]),
s=10,
alpha=1.0,
edgecolors="k",
linewidths=LINE_WIDTH,
zorder=30,
cmap=plt.get_cmap("plasma"),
)
tt = t[idx] / t[-1] * 14
x_fill = np.array([-14, -14 + tt, -14 + tt, -14])
y_fill = np.array([0.79, 0.79, 0.8, 0.8])
plt.fill(x_fill, y_fill, "grey")
plt.xlabel("log(v)")
plt.ylabel("state")
plt.xlim([-14, 0])
plt.ylim([0.4, 0.8])
plt.xticks([-14, -7, 0])
plt.yticks([0.4, 0.6, 0.8])
plt.title("t = " + "{:010.9}".format(t[idx] / SIAY), fontsize=10)
plt.savefig(filepath, dpi=300)
def main():
make_video("vid_" + uuid.uuid4().hex, 1998, f)
if __name__ == "__main__":
main()
|
11522771
|
from __future__ import annotations
import ast
from functools import partial
from flake8_pie.base import Error
from flake8_pie.utils import is_if_test_func_call
def pie788_no_bool_condition(node: ast.If | ast.IfExp, errors: list[Error]) -> None:
if is_if_test_func_call(node=node, func_name="bool"):
errors.append(PIE788(lineno=node.test.lineno, col_offset=node.test.col_offset))
PIE788 = partial(
Error, message="PIE788: no-bool-condition: Remove unnecessary bool() call."
)
|
11522813
|
l=int(input('Enter size of the array: '))
a=[]
print("Enter",l,"elements")
for i in range(l):
a.append(int(input()))
x=[]
i=0
n=1
while(i<=l and n<l+1):
if sum(a[i:i+n])==0:
x.append(a[i:i+n])
i+=1
if(i+n==l+1):
n+=1
i=0
print("Output:",len(x))
|
11522880
|
import pytest
import lemoncheesecake.api as lcc
def test_log_invalid_argument():
with pytest.raises(TypeError, match="got int"):
lcc.log_debug(1)
with pytest.raises(TypeError, match="got int"):
lcc.log_info(1)
with pytest.raises(TypeError, match="got int"):
lcc.log_warning(1)
with pytest.raises(TypeError, match="got int"):
lcc.log_error(1)
def test_log_check_invalid_arguments():
with pytest.raises(TypeError, match="got int"):
lcc.log_check(1, True)
with pytest.raises(TypeError, match="got str"):
lcc.log_check("foo", "bar")
with pytest.raises(TypeError, match="got int"):
lcc.log_check("foo", True, 1)
def test_prepare_attachment_invalid_arguments():
with pytest.raises(TypeError, match="got int"):
lcc.save_attachment_content("foo", "foo.txt", 1)
def test_set_step_invalid_argument():
with pytest.raises(TypeError, match="got int"):
lcc.set_step(1)
def test_log_url_invalid_argument():
with pytest.raises(TypeError, match="got int"):
lcc.log_url(1, "foo")
with pytest.raises(TypeError, match="got int"):
lcc.log_url("http://www.example.com", 1)
def test_add_report_info_invalid_argument():
with pytest.raises(TypeError, match="got int"):
lcc.add_report_info(1, "bar")
with pytest.raises(TypeError, match="got int"):
lcc.add_report_info("foo", 1)
|
11522897
|
import os
import warnings
from typing import Optional, Union
import cv2
import imageio
import numpy as np
import torch
from ..geometry.geometryutils import relative_transformation
from torch.utils import data
from . import datautils
__all__ = ["ICL"]
class ICL(data.Dataset):
r"""A torch Dataset for loading in `the ICL-NUIM dataset <https://www.doc.ic.ac.uk/~ahanda/VaFRIC/iclnuim.html>`_.
Will fetch sequences of rgb images, depth maps, intrinsics matrix, poses, frame to frame relative transformations
(with first frame's pose as the reference transformation), names of frames. Uses the
`TUM RGB-D Compatible PNGs` files and `Global_RT_Trajectory_GT` from
`here <https://www.doc.ic.ac.uk/~ahanda/VaFRIC/iclnuim.html}{ICL-NUIM dataset>`_. Expects the following folder
structure for the ICL dataset:
.. code-block::
| ├── ICL
| │ ├── living_room_traj0_frei_png
| │ │ ├── depth/
| │ │ ├── rgb/
| │ │ ├── associations.txt
| │ │ └── livingRoom0n.gt.sim
| │ ├── living_room_traj1_frei_png
| │ │ ├── depth/
| │ │ ├── rgb/
| │ │ ├── associations.txt
| │ │ └── livingRoom1n.gt.sim
| │ ├── living_room_traj2_frei_png
| │ │ ├── depth/
| │ │ ├── rgb/
| │ │ ├── associations.txt
| │ │ └── livingRoom2n.gt.sim
| │ ├── living_room_traj3_frei_png
| │ │ ├── depth/
| │ │ ├── rgb/
| │ │ ├── associations.txt
| │ │ └── livingRoom3n.gt.sim
| │ ├── living_room_trajX_frei_png
| │ │ ├── depth/
| │ │ ├── rgb/
| │ │ ├── associations.txt
| │ │ └── livingRoomXn.gt.sim
|
Example of sequence creation from frames with `seqlen=4`, `dilation=1`, `stride=3`, and `start=2`:
.. code-block::
sequence0
┎───────────────┲───────────────┲───────────────┒
| | | |
frame0 frame1 frame2 frame3 frame4 frame5 frame6 frame7 frame8 frame9 frame10 frame11 ...
| | | |
└───────────────┵───────────────┵────────────────┚
sequence1
Args:
basedir (str): Path to the base directory containing the `living_room_trajX_frei_png/` directories from
ICL-NUIM. Each trajectory subdirectory is assumed to contain `depth/`, `rgb/`, `associations.txt` and
`livingRoom0n.gt.sim`.
.. code-block::
├── living_room_trajX_frei_png
│ ├── depth/
│ ├── rgb/
│ ├── associations.txt
│ └── livingRoomXn.gt.sim
trajectories (str or tuple of str or None): Trajectories to use from "living_room_traj0_frei_png",
"living_room_traj1_frei_png", "living_room_traj2_frei_png" or "living_room_traj3_frei_png".
Can be path to a `.txt` file where each line is a trajectory name (`living_room_traj0_frei_png`),
a tuple of trajectory names, or None to use all trajectories. Default: None
seqlen (int): Number of frames to use for each sequence of frames. Default: 4
dilation (int or None): Number of (original trajectory's) frames to skip between two consecutive
frames in the extracted sequence. See above example if unsure.
If None, will set `dilation = 0`. Default: None
stride (int or None): Number of frames between the first frames of two consecutive extracted sequences.
See above example if unsure. If None, will set `stride = seqlen * (dilation + 1)`
(non-overlapping sequences). Default: None
start (int or None): Index of the frame from which to start extracting sequences for every trajectory.
If None, will start from the first frame. Default: None
end (int): Index of the frame at which to stop extracting sequences for every trajectory.
If None, will continue extracting frames until the end of the trajectory. Default: None
height (int): Spatial height to resize frames to. Default: 480
width (int): Spatial width to resize frames to. Default: 640
channels_first (bool): If True, will use channels first representation :math:`(B, L, C, H, W)` for images
`(batchsize, sequencelength, channels, height, width)`. If False, will use channels last representation
:math:`(B, L, H, W, C)`. Default: False
normalize_color (bool): Normalize color to range :math:`[0 1]` or leave it at range :math:`[0 255]`.
Default: False
return_depth (bool): Determines whether to return depths. Default: True
return_intrinsics (bool): Determines whether to return intrinsics. Default: True
return_pose (bool): Determines whether to return poses. Default: True
return_transform (bool): Determines whether to return transforms w.r.t. initial pose being transformed to be
identity. Default: True
return_names (bool): Determines whether to return sequence names. Default: True
Examples::
>>> dataset = ICL(
basedir="ICL-data/",
trajectories=("living_room_traj0_frei_png", "living_room_traj1_frei_png")
)
>>> loader = data.DataLoader(dataset=dataset, batch_size=4)
>>> colors, depths, intrinsics, poses, transforms, names = next(iter(loader))
"""
def __init__(
self,
basedir: str,
trajectories: Union[tuple, str, None] = None,
seqlen: int = 4,
dilation: Optional[int] = None,
stride: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
height: int = 480,
width: int = 640,
channels_first: bool = False,
normalize_color: bool = False,
*,
return_depth: bool = True,
return_intrinsics: bool = True,
return_pose: bool = True,
return_transform: bool = True,
return_names: bool = True,
):
super(ICL, self).__init__()
basedir = os.path.normpath(basedir)
self.height = height
self.width = width
self.height_downsample_ratio = float(height) / 480
self.width_downsample_ratio = float(width) / 640
self.channels_first = channels_first
self.normalize_color = normalize_color
self.return_depth = return_depth
self.return_intrinsics = return_intrinsics
self.return_pose = return_pose
self.return_transform = return_transform
self.return_names = return_names
self.load_poses = self.return_pose or self.return_transform
if not isinstance(seqlen, int):
raise TypeError("seqlen must be int. Got {0}.".format(type(seqlen)))
if not (isinstance(stride, int) or stride is None):
raise TypeError("stride must be int or None. Got {0}.".format(type(stride)))
if not (isinstance(dilation, int) or dilation is None):
raise TypeError(
"dilation must be int or None. Got {0}.".format(type(dilation))
)
dilation = dilation if dilation is not None else 0
stride = stride if stride is not None else seqlen * (dilation + 1)
self.seqlen = seqlen
self.stride = stride
self.dilation = dilation
if seqlen < 0:
raise ValueError("seqlen must be positive. Got {0}.".format(seqlen))
if dilation < 0:
raise ValueError('"dilation" must be positive. Got {0}.'.format(dilation))
if stride < 0:
raise ValueError("stride must be positive. Got {0}.".format(stride))
if not (isinstance(start, int) or start is None):
raise TypeError("start must be int or None. Got {0}.".format(type(start)))
if not (isinstance(end, int) or end is None):
raise TypeError("end must be int or None. Got {0}.".format(type(end)))
start = start if start is not None else 0
self.start = start
self.end = end
if start < 0:
raise ValueError("start must be positive. Got {0}.".format(stride))
if not (end is None or end > start):
raise ValueError(
"end ({0}) must be None or greater than start ({1})".format(end, start)
)
# preprocess trajectories to be a tuple or None
valid_trajectory_dirs = [
f
for f in os.listdir(basedir)
if os.path.isdir(os.path.join(basedir, f))
and f[:16] == "living_room_traj"
and f[-9:] == "_frei_png"
]
if len(valid_trajectory_dirs) == 0:
msg = "basedir ({0}) should contain trajectory folders with the following naming ".format(
basedir
)
msg += 'convention: "living_room_trajX_frei_png". Found 0 folders with this naming convention.'
raise ValueError(msg)
if isinstance(trajectories, str):
if os.path.isfile(trajectories):
with open(trajectories, "r") as f:
trajectories = tuple(f.read().split("\n"))
valid_trajectory_dirs = list(trajectories)
else:
raise ValueError(
"incorrect filename: {} doesn't exist".format(trajectories)
)
elif not (trajectories is None or isinstance(trajectories, tuple)):
msg = '"trajectories" should either be path to .txt file or tuple of trajectory names or None, '
msg += " but was of type {0} instead"
raise TypeError(msg.format(type(trajectories)))
if isinstance(trajectories, tuple):
if len(trajectories) == 0:
raise ValueError(
'"trajectories" must have atleast one element. Got len(trajectories)=0'
)
msg = '"trajectories" should only contain trajectory folder names of the following convention: '
msg += '"living_room_trajX_frei_png". It contained: {0}.'
for t in trajectories:
if not (t[:16] == "living_room_traj" and t[-9:] == "_frei_png"):
raise ValueError(msg.format(t))
valid_trajectory_dirs = list(trajectories)
# check if ICL folder structure correct: If trajectories is not None, should contain all trajectory paths.
# Should also contain atleast one trajectory path.
trajectory_paths = []
dirmsg = "ICL folder should look something like:\n\n| ├── basedir\n"
for i in range(4):
dirmsg += (
"| │ ├── living_room_traj{0}_frei_png\n| │ │ ├── depth/\n".format(
str(i)
)
)
dirmsg += "| │ │ ├── rgb/\n| │ │ ├── associations.txt\n"
dirmsg += "| │ │ └── livingRoom{0}n.gt.sim\n".format(str(i))
for item in os.listdir(basedir):
if (
os.path.isdir(os.path.join(basedir, item))
and item in valid_trajectory_dirs
):
trajectory_paths.append(os.path.join(basedir, item))
if len(trajectory_paths) == 0:
raise ValueError(
'Incorrect folder structure in basedir ("{0}"). '.format(basedir)
+ dirmsg
)
if trajectories is not None and len(trajectory_paths) != len(trajectories):
msg = '"trajectories" contains trajectories not available in basedir:\n'
msg += "trajectories contains: " + ", ".join(trajectories) + "\n"
msg += (
"basedir contains: "
+ ", ".join(list(map(os.path.basename, trajectory_paths)))
+ "\n"
)
raise ValueError(msg.format(basedir) + dirmsg)
# get association and pose file paths
associationsfiles, posesfiles = [], []
for trajectory_path in trajectory_paths:
associationsfile = os.path.join(trajectory_path, "associations.txt")
if not os.path.isfile(associationsfile):
msg = 'Missing associations file ("associations.txt") in {0}. '.format(
trajectory_path
)
raise ValueError(msg + dirmsg)
associationsfiles.append(associationsfile)
if self.load_poses:
trajectory_num = trajectory_path[
trajectory_path.index("living_room_traj") + 16 :
].split("_")[0]
posesfile = os.path.join(
trajectory_path, "livingRoom{0}n.gt.sim".format(trajectory_num)
)
if not os.path.isfile(posesfile):
msg = 'Missing ground truth poses file ("{0}") in {1}. '.format(
posesfile, basedir
)
raise ValueError(msg + dirmsg)
posesfiles.append(posesfile)
# Get a list of all color, depth, pose, label and intrinsics files.
colorfiles, depthfiles, posemetas, framenames = [], [], [], []
idx = np.arange(seqlen) * (dilation + 1)
for file_num, associationsfile in enumerate(associationsfiles):
parentdir = os.path.dirname(associationsfile)
splitpath = associationsfile.split(os.sep)
trajectory_name = splitpath[-2]
if trajectories is not None:
if trajectory_name not in trajectories:
continue
traj_colorfiles, traj_depthfiles = [], []
traj_poselinenums, traj_framenames = [], []
with open(associationsfile, "r") as f:
lines = f.readlines()
if self.end is None:
end = len(lines)
if end > len(lines):
msg = "end was larger than number of frames in trajectory: {0} > {1} (trajectory: {2})"
warnings.warn(msg.format(end, len(lines), trajectory_name))
# traj0 is missing a pose in livingRoom0n.gt.sim, thus we remove one of the frames
if trajectory_name == "living_room_traj0_frei_png":
lines = lines[:-1]
lines = lines[start:end]
if self.load_poses:
posesfile = posesfiles[file_num]
with open(posesfile, "r") as f:
len_posesfile = sum(1 for line in f)
for line_num, line in enumerate(lines):
line = line.strip().split()
msg = "incorrect reading from ICL associations"
if line[3][:3] != "rgb":
raise ValueError(msg)
traj_colorfiles.append(
os.path.normpath(os.path.join(parentdir, line[3]))
)
if line[1][:5] != "depth":
raise ValueError(msg)
traj_depthfiles.append(
os.path.normpath(os.path.join(parentdir, line[1]))
)
if self.load_poses:
if line_num * 4 > len_posesfile:
msg = '{0}th pose should start from line {1} of file "{2}", but said file has only {3} lines.'
raise ValueError(
msg.format(
line_num,
line_num * 4,
os.path.join(*posesfile.split(os.sep)[-2:]),
len_posesfile,
)
)
traj_poselinenums.append(line_num * 4)
traj_framenames.append(
os.path.join(trajectory_name, line[1][6:].split(".")[0])
)
traj_len = len(traj_colorfiles)
for start_ind in range(0, traj_len, stride):
if (start_ind + idx[-1]) >= traj_len:
break
inds = start_ind + idx
colorfiles.append([traj_colorfiles[i] for i in inds])
depthfiles.append([traj_depthfiles[i] for i in inds])
framenames.append(", ".join([traj_framenames[i] for i in inds]))
if self.load_poses:
posemetas.append(
{
"file": posesfile,
"line_nums": [traj_poselinenums[i] for i in inds],
}
)
self.num_sequences = len(colorfiles)
# Class members to store the list of valid filepaths.
self.colorfiles = colorfiles
self.depthfiles = depthfiles
self.posemetas = posemetas
self.framenames = framenames
# Camera intrinsics matrix for ICL dataset
intrinsics = torch.tensor(
[[481.20, 0, 319.5, 0], [0, -480.0, 239.5, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
).float()
self.intrinsics = datautils.scale_intrinsics(
intrinsics, self.height_downsample_ratio, self.width_downsample_ratio
).unsqueeze(0)
# Scaling factor for depth images
self.scaling_factor = 5000.0
def __len__(self):
r"""Returns the length of the dataset."""
return self.num_sequences
def __getitem__(self, idx: int):
r"""Returns the data from the sequence at index idx.
Returns:
color_seq (torch.Tensor): Sequence of rgb images of each frame
depth_seq (torch.Tensor): Sequence of depths of each frame
pose_seq (torch.Tensor): Sequence of poses of each frame
transform_seq (torch.Tensor): Sequence of transformations between each frame in the sequence and the
previous frame. Transformations are w.r.t. the first frame in the sequence having identity pose
(relative transformations with first frame's pose as the reference transformation). First
transformation in the sequence will always be `torch.eye(4)`.
intrinsics (torch.Tensor): Intrinsics for the current sequence
framename (str): Name of the frame
Shape:
- color_seq: :math:`(L, H, W, 3)` if `channels_first` is False, else :math:`(L, 3, H, W)`. `L` denotes
sequence length.
- depth_seq: :math:`(L, H, W, 1)` if `channels_first` is False, else :math:`(L, 1, H, W)`. `L` denotes
sequence length.
- pose_seq: :math:`(L, 4, 4)` where `L` denotes sequence length.
- transform_seq: :math:`(L, 4, 4)` where `L` denotes sequence length.
- intrinsics: :math:`(1, 4, 4)`
"""
# Read in the color, depth, pose, label and intrinstics info.
color_seq_path = self.colorfiles[idx]
depth_seq_path = self.depthfiles[idx]
pose_seq_meta = self.posemetas[idx] if self.load_poses else None
framename = self.framenames[idx]
color_seq, depth_seq, pose_seq, label_seq = [], [], [], []
for i in range(self.seqlen):
color = np.asarray(imageio.imread(color_seq_path[i]), dtype=float)
color = self._preprocess_color(color)
color = torch.from_numpy(color)
color_seq.append(color)
if self.return_depth:
depth = np.asarray(imageio.imread(depth_seq_path[i]), dtype=np.int64)
depth = self._preprocess_depth(depth)
depth = torch.from_numpy(depth)
depth_seq.append(depth)
if self.load_poses:
poses = self._loadPoses(pose_seq_meta["file"], pose_seq_meta["line_nums"])
pose_seq = [torch.from_numpy(pose) for pose in poses]
output = []
color_seq = torch.stack(color_seq, 0).float()
output.append(color_seq)
if self.return_depth:
depth_seq = torch.stack(depth_seq, 0).float()
output.append(depth_seq)
if self.return_intrinsics:
intrinsics = self.intrinsics
output.append(intrinsics)
if self.return_pose:
pose_seq = torch.stack(pose_seq, 0).float()
pose_seq = self._preprocess_poses(pose_seq)
output.append(pose_seq)
if self.return_transform:
transform_seq = datautils.poses_to_transforms(poses)
transform_seq = [torch.from_numpy(x).float() for x in transform_seq]
transform_seq = torch.stack(transform_seq, 0).float()
output.append(transform_seq)
if self.return_names:
output.append(framename)
return tuple(output)
def _preprocess_color(self, color: np.ndarray):
r"""Preprocesses the color image by resizing to :math:`(H, W, C)`, (optionally) normalizing values to
:math:`[0, 1]`, and (optionally) using channels first :math:`(C, H, W)` representation.
Args:
color (np.ndarray): Raw input rgb image
Retruns:
np.ndarray: Preprocessed rgb image
Shape:
- Input: :math:`(H_\text{old}, W_\text{old}, C)`
- Output: :math:`(H, W, C)` if `self.channels_first == False`, else :math:`(C, H, W)`.
"""
color = cv2.resize(
color, (self.width, self.height), interpolation=cv2.INTER_LINEAR
)
if self.normalize_color:
color = datautils.normalize_image(color)
if self.channels_first:
color = datautils.channels_first(color)
return color
def _preprocess_depth(self, depth: np.ndarray):
r"""Preprocesses the depth image by resizing, adding channel dimension, and scaling values to meters. Optionally
converts depth from channels last :math:`(H, W, 1)` to channels first :math:`(1, H, W)` representation.
Args:
depth (np.ndarray): Raw depth image
Returns:
np.ndarray: Preprocessed depth
Shape:
- depth: :math:`(H_\text{old}, W_\text{old})`
- Output: :math:`(H, W, 1)` if `self.channels_first == False`, else :math:`(1, H, W)`.
"""
depth = cv2.resize(
depth.astype(float),
(self.width, self.height),
interpolation=cv2.INTER_NEAREST,
)
depth = np.expand_dims(depth, -1)
if self.channels_first:
depth = datautils.channels_first(depth)
return depth / self.scaling_factor
def _preprocess_poses(self, poses: torch.Tensor):
r"""Preprocesses the poses by setting first pose in a sequence to identity and computing the relative
homogenous transformation for all other poses.
Args:
poses (torch.Tensor): Pose matrices to be preprocessed
Returns:
Output (torch.Tensor): Preprocessed poses
Shape:
- poses: :math:`(L, 4, 4)` where :math:`L` denotes sequence length.
- Output: :math:`(L, 4, 4)` where :math:`L` denotes sequence length.
"""
return relative_transformation(
poses[0].unsqueeze(0).repeat(poses.shape[0], 1, 1),
poses,
orthogonal_rotations=False,
)
def _loadPoses(self, pose_path, start_lines):
r"""Loads poses from groundtruth pose text files and returns the poses
as a list of numpy arrays.
Args:
pose_path (str): The path to groundtruth pose text file.
start_lines (list of ints):
Returns:
poses (list of np.array): List of ground truth poses in
np.array format. Each np.array has a shape of [4, 4] if
homogen_coord is True, or a shape of [3, 4] otherwise.
"""
pose = []
poses = []
parsing_pose = False
with open(pose_path, "r") as f:
lines = f.readlines()
for i, line in enumerate(lines):
if not (i in start_lines or parsing_pose):
continue
parsing_pose = True
line = line.strip().split()
if len(line) != 4:
msg = "Faulty poses file: Expected line {0} of the poses file {1} to contain pose matrix values, "
msg += 'but it didn\'t. You can download "Global_RT_Trajectory_GT" from here:\n'
msg += "https://www.doc.ic.ac.uk/~ahanda/VaFRIC/iclnuim.html"
raise ValueError(msg)
pose.append(line)
if len(pose) == 3:
pose.append([0.0, 0.0, 0.0, 1.0])
poses.append(np.array(pose, dtype=np.float32))
pose = []
parsing_pose = False
return poses
|
11522920
|
import json
import requests
from fabric.colors import red
def publish_deploy_event(name, component, environment):
url = environment.fab_settings_config.deploy_event_url
if not url:
return
token = environment.get_secret("deploy_event_token")
if not token:
print(red(f"skipping {name} event: deploy_event_token secret not set"))
return
headers = {
"Authorization": f"token {token}",
"Accept": "application/vnd.github.v3+json",
}
data = json.dumps({
"event_type": name,
"client_payload": {
"component": component,
"environment": environment.meta_config.deploy_env,
},
})
response = requests.post(url, data=data, headers=headers)
if 200 <= response.status_code < 300:
print(f"triggered {name} event")
else:
print(red(f"{name} event status: {response.status_code}"))
|
11522951
|
import kitten
def test_chunks():
assert list(kitten.chunks([1, 2, 3, 4, 5, 6, 7, 8], 3)) == [
[1, 2, 3],
[4, 5, 6],
[7, 8],
]
|
11522957
|
import pytest
import ptf.testutils as testutils
import logging
import pprint
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_rand_selected_tor_m # lgtm[py/unused-import]
from tests.common.fixtures.duthost_utils import ports_list, utils_vlan_ports_list
from tests.common.utilities import wait_until
from tests.common.helpers.snmp_helpers import get_snmp_facts
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.topology('t0')
]
# Use original ports intead of sub interfaces for ptfadapter if it's t0-backend
PTF_PORT_MAPPING_MODE = "use_orig_interface"
DUMMY_MAC_PREFIX = "02:11:22:33"
def get_fdb_dynamic_mac_count(duthost):
res = duthost.command('show mac')
logger.info('"show mac" output on DUT:\n{}'.format(pprint.pformat(res['stdout_lines'])))
total_mac_count = 0
for l in res['stdout_lines']:
if "dynamic" in l.lower() and DUMMY_MAC_PREFIX in l.lower():
total_mac_count += 1
return total_mac_count
def fdb_table_has_no_dynamic_macs(duthost):
return (get_fdb_dynamic_mac_count(duthost) == 0)
@pytest.fixture(scope="module", autouse=True)
def fdb_cleanup(duthost):
""" cleanup FDB before test run """
if fdb_table_has_no_dynamic_macs(duthost):
return
else:
duthost.command('sonic-clear fdb all')
assert wait_until(20, 2, 0, fdb_table_has_no_dynamic_macs, duthost), "FDB Table Cleanup failed"
def build_icmp_packet(vlan_id, src_mac="00:22:00:00:00:02", dst_mac="ff:ff:ff:ff:ff:ff",
src_ip="192.168.0.1", dst_ip="192.168.0.2", ttl=64):
pkt = testutils.simple_icmp_packet(pktlen=100 if vlan_id == 0 else 104,
eth_dst=dst_mac,
eth_src=src_mac,
dl_vlan_enable=False if vlan_id == 0 else True,
vlan_vid=vlan_id,
vlan_pcp=0,
ip_src=src_ip,
ip_dst=dst_ip,
ip_ttl=ttl)
return pkt
@pytest.mark.bsl
def test_snmp_fdb_send_tagged(ptfadapter, utils_vlan_ports_list, toggle_all_simulator_ports_to_rand_selected_tor_m, duthost, localhost, creds_all_duts):
"""
Send tagged packets from each port.
Verify SNMP FDB entry
"""
cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts']
config_portchannels = cfg_facts.get('PORTCHANNEL', {})
send_cnt = 0
send_portchannels_cnt = 0
for vlan_port in utils_vlan_ports_list:
port_index = vlan_port["port_index"][0]
for permit_vlanid in map(int, vlan_port["permit_vlanid"]):
dummy_mac = '{}:{:02x}:{:02x}'.format(DUMMY_MAC_PREFIX, (port_index>>8)&0xFF, port_index&0xFF)
pkt = build_icmp_packet(permit_vlanid, dummy_mac)
logger.info("Send tagged({}) packet from {} ...".format(permit_vlanid, port_index))
logger.info(pkt.sprintf("%Ether.src% %IP.src% -> %Ether.dst% %IP.dst%"))
testutils.send(ptfadapter, port_index, pkt)
send_cnt += 1
if vlan_port['dev'] in config_portchannels:
send_portchannels_cnt += 1
# Flush dataplane
ptfadapter.dataplane.flush()
hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
snmp_facts = get_snmp_facts(localhost, host=hostip, version="v2c", community=creds_all_duts[duthost]["snmp_rocommunity"], wait=True)['ansible_facts']
assert 'snmp_fdb' in snmp_facts
assert 'snmp_interfaces' in snmp_facts
dummy_mac_cnt = 0
recv_portchannels_cnt = 0
for key in snmp_facts['snmp_fdb']:
# key is string: vlan.mac
items = key.split('.')
if len(items) != 2:
continue
logger.info("FDB entry: {}".format(items))
if DUMMY_MAC_PREFIX in items[1]:
dummy_mac_cnt += 1
idx = str(snmp_facts['snmp_fdb'][key])
assert idx in snmp_facts['snmp_interfaces']
assert 'name' in snmp_facts['snmp_interfaces'][idx]
if snmp_facts['snmp_interfaces'][idx]['name'] in config_portchannels:
recv_portchannels_cnt += 1
assert send_cnt == dummy_mac_cnt, "Dummy MAC count does not match"
assert send_portchannels_cnt == recv_portchannels_cnt, "Portchannels count does not match"
|
11522966
|
from PyResis import propulsion_power
from D3HRE.simulation import Task, Reactive_simulation
from D3HRE.core.mission_utility import Mission
from D3HRE.core.file_reading_utility import read_route_from_gpx
from D3HRE.optimization import Constraint_mixed_objective_optimisation
from D3HRE.core.battery_models import Battery_managed
import numpy as np
import cloudpickle
ship = propulsion_power.Ship()
ship.dimension(2.8, 0.25, 0.8, 1.2, 2.8 / (0.47) ** (1 / 3), 0.613)
power_consumption_list = {'single_board_computer': {'power': [2, 10], 'duty_cycle': 0.5},
'webcam': {'power': [0.6], 'duty_cycle': 1},
'gps': {'power': [0.04, 0.4], 'duty_cycle': 0.9},
'imu': {'power': [0.67, 1.1], 'duty_cycle': 0.9},
'sonar': {'power': [0.5, 50, 0.2], 'duty_cycle': 0.5},
'ph_sensor': {'power': [0.08, 0.1], 'duty_cycle': 0.95},
'temp_sensor': {'power': [0.04], 'duty_cycle': 1},
'wind_sensor': {'power': [0.67, 1.1], 'duty_cycle': 0.5},
'servo_motors': {'power': [0.4, 1.35], 'duty_cycle': 0.5},
'radio_transmitter': {'power': [0.5, 20], 'duty_cycle': 0.2}}
config = {'load': {'prop_load': {'prop_eff': 0.7,
'sea_margin': 0.2,
'temperature': 25}},
'optimization': {'constraints': {'turbine_diameter_ratio': 1.2,
'volume_factor': 0.1,
'water_plane_coff': 0.88},
'cost': {'battery': 1, 'lpsp': 10000, 'solar': 210, 'wind': 320},
'method': {'nsga': {'cr': 0.95, 'eta_c': 10, 'eta_m': 50, 'm': 0.01},
'pso': {'eta1': 2.05,
'eta2': 2.05,
'generation': 100,
'max_vel': 0.5,
'neighb_param': 4,
'neighb_type': 2,
'omega': 0.7298,
'population': 100,
'variant': 5}},
'safe_factor': 0.2},
'simulation': {'battery': {'B0': 1,
'DOD': 0.9,
'SED': 500,
'eta_in': 0.9,
'eta_out': 0.8,
'sigma': 0.005},
'coupling': {'eff': 0.05}},
'source': {'solar': {'brl_parameters': {'a0': -5.32,
'a1': 7.28,
'b1': -0.03,
'b2': -0.0047,
'b3': 1.72,
'b4': 1.08}}},
'transducer': {'solar': {'azim': 0,
'eff': {'k_1': -0.017162,
'k_2': -0.040289,
'k_3': -0.004681,
'k_4': 0.000148,
'k_5': 0.000169,
'k_6': 5e-06},
'pitch': 0.1,
'roll': 0.1,
'stationary': False,
'loss': 0.1,
'power_density': 140,
'tacking': 0,
'tech': 'csi',
'tilt': 0},
'wind': {'power_coef': 0.3,
'thurse_coef': 0.6,
'v_in': 2,
'v_off': 45,
'v_rate': 15}}}
route_index = 3
START_DATE = '2013-01-01'
SPEED = 3
def prepare(route_index, START_DATE, SPEED):
all_routes = read_route_from_gpx('/home/tony/D3HRE_notebooks/Example data/routes.gpx')
ROUTE = np.array(all_routes[route_index])
mission = Mission(START_DATE, ROUTE, SPEED)
study_task = Task(mission, ship, power_consumption_list)
con_mix_opt = Constraint_mixed_objective_optimisation(study_task, config=config)
champion, champion_x = con_mix_opt.run()
solar_area, wind_area, battery_capacity = champion_x
battery = Battery_managed(battery_capacity, config=config)
rea_sim = Reactive_simulation(study_task, config=config)
result_df = rea_sim.result(solar_area, wind_area, battery_capacity)
resource = (result_df.wind_power + result_df.solar_power)
name = 'Route-{r}-{t}-{s}.pkl'.format(r=route_index, t=START_DATE, s=SPEED)
with open(name, 'wb') as f:
cloudpickle.dump([battery, result_df, resource], f)
if __name__ == '__main__':
route_index = 3
START_DATE = '2013-01-01'
SPEED = 3
prepare(route_index, START_DATE, SPEED)
|
11522990
|
from copy import deepcopy
from globals import *
import zones as zns
import life as lfe
import render_los
import bad_numbers
import zones
import alife
import numpy
import tiles
import maps
import logging
import time
import sys
def astar(life, start, end, zones, chunk_mode=False, terraform=None, avoid_tiles=[], avoid_chunk_types=[], map_size=MAP_SIZE):
_stime = time.time()
_path = {'start': tuple(start),
'end': tuple(end),
'olist': [tuple(start)],
'clist': [],
'segments': [],
'map': [],
'map_size': map_size,
'chunk_mode': chunk_mode}
if terraform:
_path['map_size'] = terraform['size']
else:
maps.load_cluster_at_position_if_needed(end)
if chunk_mode:
_path['start'] = (_path['start'][0]/WORLD_INFO['chunk_size'], _path['start'][1]/WORLD_INFO['chunk_size'])
_path['end'] = (_path['end'][0]/WORLD_INFO['chunk_size'], _path['end'][1]/WORLD_INFO['chunk_size'])
_path['olist'][0] = (_path['start'][0], _path['start'][1])
_path['fmap'] = numpy.zeros((_path['map_size'][1], _path['map_size'][0]), dtype=numpy.int16)
_path['gmap'] = numpy.zeros((_path['map_size'][1], _path['map_size'][0]), dtype=numpy.int16)
_path['hmap'] = numpy.zeros((_path['map_size'][1], _path['map_size'][0]), dtype=numpy.int16)
_path['pmap'] = []
_path['tmap'] = numpy.zeros((_path['map_size'][0], _path['map_size'][1]), dtype=numpy.int16)
for x in range(_path['map_size'][0]):
_path['pmap'].append([0] * _path['map_size'][1])
_path['map'] = numpy.zeros((_path['map_size'][1], _path['map_size'][0]))
_path['map'] -= 2
#KEY:
#0: Unwalkable
#1: Walkable
if terraform:
_start_chunk_key = '%s,%s' % ((start[0]/terraform['chunk_size'])*terraform['chunk_size'],
(start[1]/terraform['chunk_size'])*terraform['chunk_size'])
_end_chunk_key = '%s,%s' % ((end[0]/terraform['chunk_size'])*terraform['chunk_size'],
(end[1]/terraform['chunk_size'])*terraform['chunk_size'])
if chunk_mode:
_increment = terraform['chunk_size']
else:
_increment = 1
for y in range(0, terraform['size'][1], _increment):
for x in range(0, terraform['size'][0], _increment):
if chunk_mode:
_chunk_key = '%s,%s' % ((x/terraform['chunk_size'])*terraform['chunk_size'],
(y/terraform['chunk_size'])*terraform['chunk_size'])
if not _chunk_key in [_start_chunk_key, _end_chunk_key]:
if terraform['chunk_map'][_chunk_key]['type'] in avoid_chunk_types:
continue
_path['map'][y/terraform['chunk_size'], x/terraform['chunk_size']] = 1
else:
_map_pos = terraform['map'][x][y][2]
if _map_pos['id'] in avoid_tiles:
continue
if not (x, y) in [_path['start'], path['end']]:
_path['map'][y, x] = 1
else:
if chunk_mode:
for y in range(MAP_SIZE[1]/WORLD_INFO['chunk_size']):
for x in range(MAP_SIZE[0]/WORLD_INFO['chunk_size']):
_chunk_key = '%s,%s' % (x*WORLD_INFO['chunk_size'],
y*WORLD_INFO['chunk_size'])
_path['map'][y, x] = 1
else:
_path['map'] = numpy.zeros((_path['map_size'][1], _path['map_size'][0]))
for z in zones:
_slice_map = WORLD_INFO['path_map'][str(z)]
_path['map'] += _slice_map
_path['map'] = _path['map'].clip(-2, 1)
_path['hmap'][_path['start'][1], _path['start'][0]] = (abs(_path['start'][0]-_path['end'][0])+abs(_path['start'][1]-_path['end'][1]))*10
_path['fmap'][_path['start'][1], _path['start'][0]] = _path['hmap'][_path['start'][1],_path['start'][0]]
return walk_path({}, _path)
def walk_path(life, path):
if path['map'][path['end'][1], path['end'][0]] == -2:
logging.warning('Pathfinding: Attempted to create path ending in an unpathable area.')
return False
node = path['olist'][0]
_clist = path['clist']
_olist = path['olist']
_gmap = path['gmap']
_hmap = path['hmap']
_fmap = path['fmap']
_pmap = path['pmap']
_stime = time.time()
while len(_olist):
_olist.remove(node)
if tuple(node) == path['end'][:2]:
_olist = []
break
_clist.append(node)
_lowest = {'pos': None, 'f': 9000}
for adj in getadj(path, node):
if not adj in _olist:
if abs(node[0]-adj[0])+abs(node[1]-adj[1]) == 1:
_gmap[adj[1],adj[0]] = _gmap[node[1],node[0]]+10
else:
_gmap[adj[1],adj[0]] = _gmap[node[1],node[0]]+14
xDistance = abs(adj[0]-path['end'][0])
yDistance = abs(adj[1]-path['end'][1])
if xDistance > yDistance:
_hmap[adj[1],adj[0]] = 14*yDistance + 10*(xDistance-yDistance)
else:
_hmap[adj[1],adj[0]] = 14*xDistance + 10*(yDistance-xDistance)
_fmap[adj[1],adj[0]] = _gmap[adj[1],adj[0]]+_hmap[adj[1],adj[0]]
_pmap[adj[0]][adj[1]] = node
_olist.append(adj)
for o in _olist:
if _fmap[o[1],o[0]] < _lowest['f']:
_lowest['pos'] = o
_lowest['f'] = _fmap[o[1],o[0]]
if _lowest['pos']:
node = _lowest['pos']
return find_path(path)
def getadj(path, pos, checkclist=True):
adj = []
for r in [(-1,-1),(0,-1),(1,-1),(-1,0),(1,0),(-1,1),(0,1),(1,1)]:
_x = pos[0]+r[0]
_y = pos[1]+r[1]
if _x<0 or _x>=path['map_size'][0] or _y<0 or _y>=path['map_size'][1] or path['map'][_y,_x]==-2:
continue
if (_x,_y) in path['clist'] and checkclist:
continue
adj.append((_x,_y))
return adj
def find_path(path):
if path['map'][path['end'][1], path['end'][0]] == -2:
return [[path['start'][0], path['start'][1], 0]]
node = path['pmap'][path['end'][0]][path['end'][1]]
_path = [[path['end'][0],path['end'][1],int(path['map'][path['end'][1],path['end'][0]])]]
_broken = False
while not tuple(node) == tuple(path['start']):
if not node:
_broken = True
break
else:
_path.insert(0,(node[0], node[1], int(path['map'][node[1], node[0]])))
path['tmap'][node[0]][node[1]] = 1
node = path['pmap'][node[0]][node[1]]
return _path
def short_path(life, start, end):
_s = time.time()
_line = render_los.draw_line(start[0], start[1], end[0], end[1])
if bad_numbers.distance(start, end)>30:
return False
if not _line:
return [start]
_line.pop(0)
for pos in _line:
if not lfe.can_traverse(life, pos):
return False
return _line
def chunk_path(life, start, end, zones):
return astar(life, start, end, zones, map_size=(MAP_SIZE[0]/WORLD_INFO['chunk_size'], MAP_SIZE[1]/WORLD_INFO['chunk_size']), chunk_mode=True)
def walk_chunk_path(life):
_existing_chunk_path = alife.brain.get_flag(life, 'chunk_path')
if _existing_chunk_path['path']:
_next_chunk = _existing_chunk_path['path'].pop(0)
_next_pos = WORLD_INFO['chunk_map']['%s,%s' % (_next_chunk[0]*WORLD_INFO['chunk_size'], _next_chunk[1]*WORLD_INFO['chunk_size'])]['pos']
return create_path(life, life['pos'], _next_pos, _existing_chunk_path['zones'], ignore_chunk_path=True)
else:
alife.brain.unflag(life, 'chunk_path')
def create_path(life, start, end, zones, ignore_chunk_path=False):
if not ignore_chunk_path:
_existing_chunk_path = alife.brain.get_flag(life, 'chunk_path')
if _existing_chunk_path:
return walk_chunk_path(life)
_shortpath = short_path(life, start, end)
if _shortpath:
return _shortpath
if len(zones) == 1 and (bad_numbers.distance(start, end) >= 100 and not ignore_chunk_path):
_chunk_path = {'path': chunk_path(life, start, end, zones),
'start': start,
'end': end,
'zones': zones}
alife.brain.flag(life, 'chunk_path', _chunk_path)
_next_pos = _chunk_path['path'][0]
_next_pos = (_next_pos[0]*WORLD_INFO['chunk_size'], _next_pos[1]*WORLD_INFO['chunk_size'])
return astar(life, start, _next_pos, zones)
return astar(life, start, end, zones)
|
11522994
|
import os
from os.path import dirname, join, realpath
import pytest
import vcr
from pygitguardian import GGClient
base_uri = os.environ.get("TEST_LIVE_SERVER_URL", "https://api.gitguardian.com")
my_vcr = vcr.VCR(
cassette_library_dir=join(dirname(realpath(__file__)), "cassettes"),
path_transformer=vcr.VCR.ensure_suffix(".yaml"),
decode_compressed_response=True,
ignore_localhost=True,
match_on=["method", "url"],
serializer="yaml",
record_mode="once",
filter_headers=["Authorization"],
)
if os.environ.get("TEST_LIVE_SERVER", "false").lower() == "true":
my_vcr.record_mode = "all"
@pytest.fixture
def client():
api_key = os.environ.get("TEST_LIVE_SERVER_TOKEN", "sample_api_key")
return GGClient(base_uri=base_uri, api_key=api_key)
|
11523002
|
from handler.base_plugin import CommandPlugin
from utils import parse_user_id
from utils import plural_form
import asyncio, re
# Requirements:
# ChatMetaPlugin
# StoragePlugin
class VoterPlugin(CommandPlugin):
__slots__ = ("command_groups", "votes")
def __init__(self, vote_commands=None, vote_undo_commands=None,
votekick_commands=None, prefixes=None, strict=False):
"""This plugin allows users to do votes in chats with ability to kick someone with votekick"""
if not vote_commands:
vote_commands = ("vote", "+")
if not vote_undo_commands:
vote_undo_commands = ("unvote", "-")
if not votekick_commands:
votekick_commands = ("votekick", "выгоняем")
super().__init__(*(vote_commands + votekick_commands + vote_undo_commands), prefixes=prefixes, strict=strict)
self.command_groups = vote_commands, vote_undo_commands, votekick_commands
self.votes = {}
p = self.prefixes[-1]
self.description = [
f"Голосование",
f"Устраивайте голосование или выкидывайте людей из чата голосованием ;)",
f"{p}{vote_commands[0]} - голосовать за.",
f"{p}{vote_undo_commands[0]} - отменить свой голос.",
f"{p}{votekick_commands[0]} [кого кикать] - начать голосование за изгнание.",
f"{p}{vote_commands[0]} (нужно голосов, длительность голосования в секундах) тема голосования - начать голосование.",
f"Примеры:",
f"{p}{vote_commands[0]} (4, 30) Тут тема - начать голосование с темой \"Тут тема\", которое будет длиться 30 секунд и для положительного результата необходимо набрать 4 голоса.",
f"{p}{vote_commands[0]} Тут тема - начать голосование с темой \"Тут тема\", которое будет длиться 180 секунд и для положительного результата необходимо набрать 6, 8, 10 или все голоса (в зависимость от численности чата).",
]
async def do_vote(self, msg, title, maximum=None, votetime=180, kick=None):
unvoters = 2 if kick else 1
maximum = min(len(msg.meta["data_chat"]["chat_info"]["users"]) - \
unvoters, maximum if maximum else float("inf"))
await msg.answer(
f"Начало голосования с темой \"{title}\". Максимальное кол-во проголосовавших: {maximum}. "
f"Время голосования: {round(votetime/60, 2)} мин. Голосовать - {self.prefixes[-1]}{self.command_groups[0][0]}"
)
async def tick(timeleft):
await msg.answer(f"До конца голосования {plural_form(timeleft, ('секунда', 'секунды', 'секунд'))} ({len(self.votes[msg.chat_id])}/{maximum}).")
if votetime == 180:
times = [60, 60, 30, 15, 10, 5]
else:
times = []
temp = votetime
while temp > 0:
step = min(60, temp // 2)
while step % 5 != 0 and step < temp:
step += 1
times.append(step)
temp -= step
await tick(votetime)
for delta in times:
await asyncio.sleep(delta)
votetime -= delta
if votetime > 0: await tick(votetime)
result = len(self.votes[msg.chat_id])
if result >= maximum:
text = f"Голосование закончено c положительным результатом"
else:
text = "Голосование закончено с негативным результатом"
text += f" ({result}/{maximum})!"
await msg.answer(text)
if kick and result >= maximum:
await self.api.messages.removeChatUser(chat_id=msg.chat_id, user_id=kick)
del self.votes[msg.chat_id]
async def process_message(self, msg):
if msg.chat_id == 0:
return await msg.answer("Эта команда доступна только в беседах.")
if not msg.meta.get("data_chat", {}).get("chat_info"):
raise ValueError("This plugin requires `ChatMetaPlugin`.")
command, text = self.parse_message(msg, True)
if command in self.command_groups[0]:
if msg.chat_id not in self.votes:
if text:
match = re.match(r"\((\d+?)(, ?\d+?)?\)", text)
if match:
maximum = int(match.group(1)) if match.group(1) else None
votetime = int(match.group(2)[1:].strip()) if match.group(2) else 180
title = text[match.end():].strip()
else:
maximum = None
votetime = 180
title = text.strip()
self.votes[msg.chat_id] = set()
return asyncio.ensure_future(self.do_vote(msg, title, maximum=maximum, votetime=votetime))
return await msg.answer("Голосований не идёт в данный момент.")
if text:
return await msg.answer("Голосование уже идёт. Подождите его завершения.")
if msg.user_id in self.votes[msg.chat_id]:
return await msg.answer("Вы уже голосовали.")
self.votes[msg.chat_id].add(msg.user_id)
return await msg.answer("ОК+")
if command in self.command_groups[1]:
if msg.chat_id not in self.votes:
return await msg.answer("Голосований не идёт в данный момент.")
if msg.user_id not in self.votes[msg.chat_id]:
return await msg.answer("Вы не голосовали.")
self.votes[msg.chat_id].remove(msg.user_id)
return await msg.answer("ОК-")
if command in self.command_groups[2]:
if msg.chat_id in self.votes:
return await msg.answer("Голосование уже идёт. Подождите его завершения.")
puid = await parse_user_id(msg)
if not puid:
return await msg.answer(
"Введите пользователя, которого хотите выкинуть голосованием.\nПример: " +
self.prefixes[-1] + self.command_groups[2][0] + " 87641997"
)
user = None
for u in msg.meta["data_chat"]["chat_info"]["users"]:
if u["id"] == puid:
user = u
break
if not user:
return await msg.answer("Вы пытаетесь выгнать пользователя, которого нет в беседе.")
self.votes[msg.chat_id] = set()
members = len(msg.meta["data_chat"]["chat_info"]["users"])
if members < 10:
maximum = members - 1
elif members < 16:
maximum = 6
elif members < 22:
maximum = 8
else:
maximum = 10
return asyncio.ensure_future(self.do_vote(msg, f"Кик пользователя: {user['first_name']} {user['last_name']}", maximum, kick=puid))
|
11523023
|
from datetime import datetime, timedelta
import pytest
import eventlet
from detox.proc import Resources
class TestResources:
def test_getresources(self):
x = []
class Provider:
def provide_abc(self):
x.append(1)
return 42
resources = Resources(Provider())
res, = resources.getresources("abc")
assert res == 42
assert len(x) == 1
res, = resources.getresources("abc")
assert len(x) == 1
assert res == 42
def test_getresources_param(self):
class Provider:
def provide_abc(self, param):
return param
resources = Resources(Provider())
res, = resources.getresources("abc:123")
return res == "123"
def test_getresources_parallel(self):
x = []
class Provider:
def provide_abc(self):
x.append(1)
return 42
resources = Resources(Provider())
pool = eventlet.GreenPool(2)
pool.spawn(lambda: resources.getresources("abc"))
pool.spawn(lambda: resources.getresources("abc"))
pool.waitall()
assert len(x) == 1
def test_getresources_multi(self):
x = []
class Provider:
def provide_abc(self):
x.append(1)
return 42
def provide_def(self):
x.append(1)
return 23
resources = Resources(Provider())
a, d = resources.getresources("abc", "def")
assert a == 42
assert d == 23
class TestDetoxExample1:
pytestmark = [pytest.mark.example1, pytest.mark.timeout(20)]
def test_createsdist(self, detox):
sdists, = detox.getresources("sdist")
for sdist in sdists:
assert sdist.check()
def test_getvenv(self, detox):
venv, = detox.getresources("venv:py")
assert venv.envconfig.envdir.check()
venv2, = detox.getresources("venv:py")
assert venv == venv2
def test_test(self, detox):
detox.runtests("py")
class TestDetoxExample2:
pytestmark = [pytest.mark.example2, pytest.mark.timeout(20)]
def test_test(self, detox):
detox.runtests("py")
def test_developpkg(self, detox):
detox.getresources("venv:py")
developpkg, = detox.getresources("developpkg:py")
assert developpkg is False
class TestCmdline:
pytestmark = [pytest.mark.example1]
@pytest.mark.timeout(20)
def test_runtests(self, cmd):
result = cmd.rundetox("-e", "py", "-v", "-v")
result.stdout.fnmatch_lines(["py*getenv*", "py*create:*"])
class TestProcLimitOption:
pytestmark = [pytest.mark.example3]
def test_runtestmulti(self):
class MyConfig:
class MyOption:
numproc = 7
option = MyOption()
x = []
def MyGreenPool(**kw):
x.append(kw)
# Building a Detox object will already call GreenPool(),
# so we have to let MyGreenPool being called twice before raise
if len(x) == 2:
raise ValueError
from detox import proc
setattr(proc, "GreenPool", MyGreenPool)
with pytest.raises(ValueError):
d = proc.Detox(MyConfig())
d.runtestsmulti(["env1", "env2", "env3"]) # Fake env list
assert x[0] == {} # When building Detox object
assert x[1] == {"size": 7} # When calling runtestsmulti
@pytest.mark.timeout(60)
def test_runtests(self, cmd):
now1 = datetime.now()
cmd.rundetox("-n", "1", "-epy1,py2")
then1 = datetime.now()
delta1 = then1 - now1
assert delta1 >= timedelta(seconds=2)
now2 = datetime.now()
cmd.rundetox("--num", "2", "-epy1,py2")
then2 = datetime.now()
delta2 = then2 - now2
assert delta2 >= timedelta(seconds=1)
assert delta1 >= delta2, "pool size=2 took much time than pool size=1"
|
11523025
|
from fastapi import APIRouter, Request
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
import json
from pydantic import BaseModel
import s3fs
from typing import Optional, TypedDict, List
router = APIRouter()
s3 = s3fs.S3FileSystem(anon=False, client_kwargs={"region_name": "us-east-1"})
with open("app/catalog.json", "r") as fp:
master_catalog = json.load(fp)
datasets = master_catalog.get("datasets")
class CatalogSearchResponseTypedDict(TypedDict):
filepath: str
dap_url: str
timestamp: str
class CatalogSearchResponse(BaseModel):
matches: List[CatalogSearchResponseTypedDict]
@router.get("/search", tags=["catalog"], description="Query the catalog for matching files",
summary="search", response_model=CatalogSearchResponse,
responses={200: {"content": {"application/json": {}}, "description": "Successful Response"}})
async def search(
request: Request,
keyword: Optional[str] = None,
filetype: Optional[str] = None
) -> JSONResponse:
folders = []
matching_files = []
for d in datasets:
query = d
if keyword:
query += f"*{keyword}"
if filetype:
query += f"*.{filetype}"
else:
query += "*"
print(query)
all_files = s3.glob(query)
matching_files.extend([x for x in all_files if any([s3.isfile(x), x.endswith(".zarr")])])
match_dict = [{"filepath": x,
"dap_url": request.url_for("opendap_dods", path=x)[:-5].replace("http", "https"),
"timestamp": s3.modified(x) if s3.isfile(x) else ""}
for x in matching_files]
results = {"matches": match_dict}
return JSONResponse(content=jsonable_encoder(results))
|
11523079
|
import networkx as nx
import numpy.testing as npt
from GraphRicciCurvature.OllivierRicci import OllivierRicci
def test_compute_ricci_curvature_edges():
G = nx.karate_club_graph()
orc = OllivierRicci(G, method="OTD", alpha=0.5)
output = orc.compute_ricci_curvature_edges([(0, 1)])
npt.assert_almost_equal(output[0, 1], 0.111111)
def test_compute_ricci_curvature():
G = nx.karate_club_graph()
orc = OllivierRicci(G, method="OTD", alpha=0.5)
Gout = orc.compute_ricci_curvature()
rc = list(nx.get_edge_attributes(Gout, "ricciCurvature").values())
ans = [0.111111, -0.143750, 0.041667, -0.114583, -0.281250, -0.281250, 0.062500, -0.200000, -0.114583, 0.062500,
-0.000000, 0.062500, 0.062500, -0.031250, 0.062500, -0.427083, 0.044444, 0.166667, 0.194444, 0.244444,
0.166667, 0.111111, 0.166667, -0.041667, 0.050000, 0.125000, 0.100000, 0.100000, 0.200000, -0.175000,
0.033333, -0.233333, 0.416667, 0.250000, 0.216667, 0.291667, 0.500000, 0.500000, 0.291667, 0.375000,
0.375000, 0.375000, -0.025000, 0.011765, -0.044118, -0.288235, 0.125000, 0.088235, 0.125000, 0.088235,
0.125000, 0.088235, -0.254902, 0.125000, 0.088235, 0.125000, 0.088235, 0.100000, 0.225000, 0.200000,
-0.066667, -0.076471, 0.500000, 0.125000, 0.083333, 0.166667, 0.375000, -0.073529, -0.147059, 0.166667,
-0.068627, -0.041667, -0.014706, -0.041667, -0.044118, -0.166667, -0.122549, 0.267157]
npt.assert_array_almost_equal(rc, ans)
def test_compute_ricci_curvature_directed():
Gd = nx.DiGraph()
Gd.add_edges_from([(0, 1), (1, 2), (2, 3), (1, 3), (3, 1)])
orc = OllivierRicci(Gd, method="OTD", alpha=0.5)
Gout = orc.compute_ricci_curvature()
rc = list(nx.get_edge_attributes(Gout, "ricciCurvature").values())
ans = [-0.49999999999999956,
-3.842615114990622e-11,
0.49999999996158007,
0.49999999992677135,
0.7499999999364129]
npt.assert_array_almost_equal(rc, ans)
def test_compute_ricci_curvature_ATD():
G = nx.karate_club_graph()
orc = OllivierRicci(G, alpha=0.5, method="ATD", verbose="INFO")
orc.compute_ricci_curvature()
Gout = orc.compute_ricci_curvature()
rc = list(nx.get_edge_attributes(Gout, "ricciCurvature").values())
ans = [-0.343750, -0.437500, -0.265625, -0.250000, -0.390625, -0.390625, -0.195312, -0.443750, -0.250000,
0.000000, -0.140625, -0.287500, -0.109375, -0.291667, -0.109375, -0.640625, -0.311111, -0.175926,
-0.083333, -0.166667, 0.000000, -0.166667, 0.000000, -0.333333, -0.241667, -0.137500, -0.220000,
-0.125000, -0.160000, -0.400000, -0.200000, -0.479167, 0.020833, 0.041667, -0.100000, -0.041667,
0.055556, -0.062500, -0.041667, 0.000000, 0.000000, -0.075000, -0.275000, -0.300000, -0.176471,
-0.464706, 0.000000, -0.073529, 0.000000, -0.073529, 0.000000, -0.073529, -0.421569, 0.000000,
-0.073529, 0.000000, -0.073529, -0.200000, -0.200000, -0.125000, -0.291667, -0.335294, -0.055556,
-0.208333, -0.194444, -0.194444, 0.062500, -0.176471, -0.375000, -0.166667, -0.245098, -0.197917,
-0.227941, -0.250000, -0.294118, -0.430556, -0.455882, -0.355392]
npt.assert_array_almost_equal(rc, ans)
def test_compute_ricci_flow():
G = nx.karate_club_graph()
orc = OllivierRicci(G, method="OTD", alpha=0.5)
Gout = orc.compute_ricci_flow(iterations=3)
rf = list(nx.get_edge_attributes(Gout, "weight").values())
ans = [0.584642, 1.222957, 0.828566, 1.893597, 2.179315, 2.179315, 0.814135, 1.647656, 1.893597, 0.906430,
0.916791, 0.798319, 0.760511, 0.829311, 0.760511, 2.477847, 0.937765, 0.681481, 0.612859, 0.568307,
0.675702, 0.702774, 0.675702, 1.484889, 0.843498, 0.753397, 1.098413, 0.868616, 0.646627, 2.061065,
1.425968, 1.924123, 0.292387, 0.487378, 0.446435, 0.509673, 0.101477, 0.108645, 0.509673, 0.246037,
0.246037, 0.228701, 1.309931, 1.213249, 1.317511, 2.149341, 0.712759, 0.811386, 0.712759, 0.811386,
0.712759, 0.811386, 2.245314, 0.712759, 0.811386, 0.712759, 0.811386, 0.947310, 0.518039, 0.857636,
1.525740, 1.429449, 0.180896, 0.692919, 0.724545, 0.639637, 0.281116, 1.427853, 1.622385, 0.807457,
1.386869, 1.372091, 1.320579, 1.324087, 1.276729, 1.843012, 1.721982, 0.412472]
npt.assert_array_almost_equal(rf, ans)
def test_ricci_community_all_possible_clusterings():
G = nx.karate_club_graph()
orc = OllivierRicci(G, exp_power=1, alpha=0.5)
orc.compute_ricci_flow(iterations=40)
cc = orc.ricci_community_all_possible_clusterings()
cuts = [x[0] for x in cc]
clusterings = [x[1] for x in cc]
cuts_ans = [1.8364944935528884, 1.6114944935528852, 1.461494493552883, 1.2614944935528802, 1.1864944935528792,
1.111494493552878, 1.036494493552877]
clusterings_ans = [{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2,
25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2},
{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2,
25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2},
{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2,
25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2},
{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2,
25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2},
{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2,
25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2},
{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2,
25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2},
{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 30: 2, 23: 3,
24: 3, 25: 3, 26: 3, 27: 3, 28: 3, 29: 3, 31: 3}]
npt.assert_array_almost_equal(cuts, cuts_ans)
assert clusterings == clusterings_ans
def test_ricci_community():
G = nx.karate_club_graph()
orc = OllivierRicci(G, exp_power=1, alpha=0.5)
cut, clustering = orc.ricci_community()
cut_ans = 1.2613588421005884
clustering_ans = {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1,
6: 1, 10: 1, 16: 1, 8: 2, 30: 2, 32: 3, 33: 3, 14: 3, 15: 3, 18: 3, 20: 3, 22: 3, 23: 4,
24: 4, 25: 4, 26: 4, 27: 4, 28: 4, 29: 4, 31: 4}
npt.assert_array_almost_equal(cut, cut_ans)
assert clustering == clustering_ans
|
11523085
|
import rospy
from simulation_brain_link.msg import MissionMode
from simulation_evaluation.msg import Referee as RefereeMsg
from simulation_groundtruth.msg import GroundtruthStatus
from std_msgs.msg import String as StringMsg
from simulation.utils.ros_base.node_base import NodeBase
class DriveTestNode(NodeBase):
"""Complete an automatic test using this node.
Also the gazebo simulation, KITcar_brain and the CarStateNode needs to be running.
"""
def __init__(self, run: bool = True):
"""Initialize the node and optionally start it as well.
Args:
run: Indicate wether to call :py:func:`run`.
In a test it can be useful to do this manually.
"""
super().__init__(name="drive_test_node", log_level=rospy.DEBUG)
self._groundtruth_status = GroundtruthStatus.READY
self._previous_state_machine_msg = None
self._state_machine_msg = None
self.state = -1
self.parking_successes = -1
if run:
self.run()
def start(self):
self.mission_mode_publisher = rospy.Publisher(
self.param.topics.mission_mode, MissionMode, queue_size=1
)
self.groundtruth_status_subscriber = rospy.Subscriber(
self.param.topics.groundtruth.status,
GroundtruthStatus,
callback=self.receive_groundtruth_update,
)
# 1. Ensure that evaluation pipeline is started up
# Otherwise it can happen that the groundtruth is reloaded
# while the speaker is starting up.
# Then the speakers will operate on a false groundtruth!
rospy.wait_for_message(self.param.topics.referee.info, RefereeMsg)
# For some reason this is necessary for the mission mode publisher to work correctly
rospy.sleep(0.1)
self.referee_subscriber = rospy.Subscriber(
self.param.topics.referee.info,
RefereeMsg,
callback=self.referee_cb,
queue_size=1,
) # Subscribe to referee
self.sm_info_subscriber = rospy.Subscriber(
self.param.topics.state_machine.info,
StringMsg,
callback=self.receive_state_machine_info,
queue_size=1,
) # Subscribe to sm updates
rospy.wait_for_message(self.param.topics.referee.info, RefereeMsg)
# Goooo
self.update_mission_mode(self.param.mission_mode)
def stop(self):
self.referee_subscriber.unregister()
self.sm_info_subscriber.unregister()
self.groundtruth_status_subscriber.unregister()
self.mission_mode_publisher.unregister()
def receive_groundtruth_update(self, msg: GroundtruthStatus):
"""Receive GroundtruthStatus message.
Args:
msg: New GroundtruthStatus message
"""
self._groundtruth_status = msg.status
def receive_state_machine_info(self, msg: StringMsg):
"""Receive info message when state machines change.
Args:
msg: New string message
"""
if msg.data == self._state_machine_msg:
return
if self._state_machine_msg is not None:
self._previous_state_machine_msg = self._state_machine_msg
self._state_machine_msg = msg.data
def update_mission_mode(self, mission_mode: int):
"""Change the car's mission mode.
Args:
mission_mode: Desired mission mode.
"""
rospy.loginfo(f"Updating the mission mode to {mission_mode}.")
msg = MissionMode()
msg.header.stamp = rospy.Time.now()
msg.mission_mode = mission_mode
self.mission_mode_publisher.publish(msg)
def referee_cb(self, msg: RefereeMsg):
"""Listen for changes in the referee status."""
rospy.logdebug(f"Referee state: {msg.state}")
if msg.state == RefereeMsg.FAILED or msg.state == RefereeMsg.COMPLETED:
# Drive is over: save result and shutdown!
self.state = msg.state
self.parking_successes = msg.parking_successes
self.last_state_machine_transition = (
self._previous_state_machine_msg,
self._state_machine_msg,
)
# Shutdown node...
rospy.signal_shutdown("Drive is finished...")
|
11523093
|
from doajtest.fixtures import ArticleFixtureFactory
from portality.bll import exceptions
from portality.models import Article
from portality.bll.exceptions import ArticleMergeConflict
from datetime import datetime
class BLLArticleMockFactory(object):
@classmethod
def merge_mock(cls, article):
pass
@classmethod
def pull_mock(cls, id):
return Article
@classmethod
def doi_or_fulltext_updated(cls, doi_updated, ft_updated):
result = doi_updated and ft_updated
def mock(new_article, update_id):
return result
return mock
@classmethod
def discover_duplicates(cls, doi_duplicates=0, fulltext_duplicates=0, overlap=0):
if overlap > doi_duplicates or overlap > fulltext_duplicates:
raise Exception("overlap must be the same as or less than either of doi_duplicates or fulltext_duplicates")
idents = []
# first make duplicate records for the total number of desired dois
for i in range(doi_duplicates):
idents.append({"doi_domain" : True, "doi" : "10.1234/abc/1", "fulltext" : "http://example.com/unique/" + str(i)})
for i in range(overlap):
idents[i]["fulltext"] = "http://example.com/1"
idents[i]["fulltext_domain"] = True
remaining_fulltexts = fulltext_duplicates - overlap
for i in range(remaining_fulltexts):
idents.append({"fulltext_domain" : True, "doi" : "10.1234/unique/" + str(i), "fulltext" : "http://example.com/1"})
possible_duplicates = {"doi" : [], "fulltext" : []}
for i, ident in enumerate(idents):
source = ArticleFixtureFactory.make_article_source(eissn="1234-5678", pissn="9876-5432", doi=ident["doi"], fulltext=["fulltext"])
article = Article(**source)
article.set_id()
article.data["last_updated"] = datetime.fromtimestamp(i * 100000).strftime("%Y-%m-%dT%H:%M:%SZ")
if "doi_domain" in ident:
possible_duplicates["doi"].append(article)
if "fulltext_domain" in ident:
possible_duplicates["fulltext"].append(article)
if len(possible_duplicates["doi"]) == 0:
del possible_duplicates["doi"]
if len(possible_duplicates["fulltext"]) == 0:
del possible_duplicates["fulltext"]
def mock(article, owner=None, results_per_match_type=10):
return possible_duplicates
return mock
@classmethod
def is_legitimate_owner(cls, legit=None, legit_on_issn=None):
def mock(*arg, **kwarg):
if legit is not None:
return legit
if legit_on_issn is not None:
article = arg[0]
issns = article.bibjson().issns()
for issn in issns:
if issn in legit_on_issn:
return True
return False
return False
return mock
@classmethod
def issn_ownership_status(cls, owned, shared, unowned, unmatched):
def mock(*arg, **kwarg):
return owned, shared, unowned, unmatched
return mock
@classmethod
def get_duplicate(cls, given_article_id = None, return_none=False, eissn=None, pissn=None, doi=None, fulltext=None, merge_duplicate=False):
article = None
if not merge_duplicate:
if given_article_id == "exception":
raise exceptions.ArticleMergeConflict()
if not return_none:
source = ArticleFixtureFactory.make_article_source(eissn=eissn, pissn=pissn, doi=doi, fulltext=fulltext)
article = Article(**source)
article.set_id(given_article_id)
def mock(*args, **kwargs):
if merge_duplicate:
raise exceptions.ArticleMergeConflict()
supplied_article = args[0]
if given_article_id is not None:
if given_article_id == supplied_article.id:
return article
else:
return article
return mock
@classmethod
def batch_create(cls, *args, **kwargs):
raise RuntimeError("Batch create unsuccessful.")
@classmethod
def has_permissions(cls, has_permission):
def mock(*args, **kwargs):
return has_permission
return mock
@classmethod
def _prepare_update_admin(cls, duplicate_result, update_article_id):
if update_article_id is None:
result = -1
elif duplicate_result == "itself" or duplicate_result == "none":
result = 1
else:
result = 0
def mock(*args, **kwargs):
print(duplicate_result, update_article_id)
if result == -1:
raise exceptions.ConfigurationException
if result == 0:
raise exceptions.DuplicateArticleException("duplicate result is 'different'")
return 1
return mock
@classmethod
def _prepare_update_publisher(cls, duplicate_result, has_ft_doi_changed):
if duplicate_result is None:
result = 0
elif duplicate_result == "itself":
if has_ft_doi_changed:
result = -1
else:
result = 1
elif duplicate_result == "different":
result = -1
def mock(*args, **kwargs):
if result == -1:
raise exceptions.DuplicateArticleException()
return result
return mock
|
11523122
|
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as base_auth_views
from django.conf import settings
from django.conf.urls.static import static
from django.urls import reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
# Admin redirections/views
url(r'^admin/login/$', RedirectView.as_view(url=reverse_lazy(settings.LOGIN_URL),
query_string=True)),
url(r'^staff/login/$', RedirectView.as_view(url=reverse_lazy(settings.LOGIN_URL),
query_string=True)),
url(r'^admin/$', RedirectView.as_view(url=reverse_lazy('admin:app_list',
args=('huntserver',)))),
url(r'^staff/$', RedirectView.as_view(url=reverse_lazy('admin:app_list',
args=('huntserver',)))),
url(r'^staff/', admin.site.urls),
url(r'^admin/', admin.site.urls),
# All of the huntserver URLs
url(r'^', include('huntserver.urls', namespace="huntserver")),
# User auth/password reset
url(r'^accounts/logout/$', base_auth_views.LogoutView.as_view(),
name='logout', kwargs={'next_page': '/'}),
url(r'^accounts/login/$', base_auth_views.LoginView.as_view()),
url(r'^password_reset/$', base_auth_views.PasswordResetView.as_view(), name='password_reset'),
url(r'^password_reset/done/$', base_auth_views.PasswordResetDoneView.as_view(),
name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
base_auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
url(r'^reset/done/$', base_auth_views.PasswordResetCompleteView.as_view(),
name='password_reset_complete'),
]
# Use silk if enabled
if 'silk' in settings.INSTALLED_APPS:
urlpatterns.append(url(r'^silk/', include('silk.urls', namespace='silk')))
# Hack for using development server
if(settings.DEBUG):
import debug_toolbar
urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls)))
|
11523126
|
import pytest
from lucid.misc.io.scoping import io_scope, scope_url
def test_empty_io_scope():
path = "./some/file.ext"
scoped = scope_url(path)
assert scoped == path
|
11523240
|
from instructions import to_stack_registers
from opcodes import INTERNAL_CALL_OPCODE
from expressionblock import ExpressionBlock
import os
def get_prefix(depth):
return " " * depth
class ExternalFunction(object):
def __init__(self, signature, graph, tracker, entry_exit):
self.signature = signature
self.graph = graph
self.tracker = tracker
self.entry_id, self.exit_id = entry_exit
self.resolver = None
self.ins_outs = None
def get_begin_address(self):
# print(self.entry_id)
entry_block = self.graph[self.entry_id]
return entry_block.get_entry_address()
def extract_intcall(self, callee_pair, caller_pair, opcode):
callee_begin, callee_end = callee_pair
caller_begin, caller_end = caller_pair
caller_begin_block = self.graph[caller_begin]
caller_end_block = self.graph[caller_end]
new_block = \
caller_begin_block.insert_intcall(opcode, caller_end_block)
self.graph.replace_block(new_block)
self.graph.remove_edge(caller_begin, callee_begin)
self.graph.remove_edge(callee_end, caller_end)
self.graph.add_edge(caller_begin, caller_end)
# def convert_to_ssa(self):
# self.__create_work_lists()
# self.__insert_phi_functions()
# self.__rename_registers()
#
# def __create_work_lists(self):
# self.__work_lists = dict()
# for basic_block in self.graph:
# cur_id = basic_block.get_str_id()
# for register in basic_block.exit_registers:
# if register not in self.__work_lists:
# self.__work_lists[register] = set()
# self.__work_lists[register].add(cur_id)
# return
#
# def __insert_phi_functions(self):
# frontiers = self.graph.get_dominance_frontiers(self.entry_id)
# for register, work_list in self.__work_lists.items():
# while len(work_list) != 0:
# cur_id_1 = work_list.pop()
# frontier = frontiers[cur_id_1]
# for cur_id_2 in frontier:
# basic_block = self.graph[cur_id_2]
# if basic_block.has_phi_function(register):
# continue
# pre_ids = self.graph.get_predecessor_ids(cur_id_2)
# # (pre_id -> register) trivial phi function
# basic_block.insert_phi_function(register, pre_ids)
# work_list.add(cur_id_2)
# return
#
# def __rename_registers(self):
# self.__name_counter = {"$s0": 1, "$t": 0}
# self.__name_stack = {"$s0": ["$s0_0"], "$t": []}
#
# for i in range(1, 40):
# register = "$s" + str(i)
# self.__name_counter[register] = 0
# self.__name_stack[register] = []
#
# self.__dominator_tree = \
# self.graph.get_dominator_tree(self.entry_id)
# # print(self.__dominator_tree)
# self.__rename_block_registers(self.entry_id)
# return
#
# def __rename_block_registers(self, cur_id):
# cur_block = self.graph[cur_id]
# phi_functions = cur_block.phi_functions
# to_pop = dict()
#
# for register in phi_functions.keys():
# if register not in to_pop:
# to_pop[register] = 0
# to_pop[register] += 1
# new_register = self.__get_new_name(register)
# phi_functions[new_register] = phi_functions.pop(register)
#
# for instruction in cur_block:
# for register in instruction.reads:
# new_register = self.__get_top_name(register)
# instruction.rename_read_register(register, new_register)
# for register in instruction.writes:
# if register not in to_pop:
# to_pop[register] = 0
# to_pop[register] += 1
# new_register = self.__get_new_name(register)
# instruction.rename_write_register(register, new_register)
# # print(str(instruction).lower())
# # self.debug_function_instructions(cur_id)
# # TODO: is this even right?
# for index, register in enumerate(cur_block.exit_registers):
# new_register = self.__get_top_name(register)
# cur_block.exit_registers[index] = new_register
#
# for suc_id in self.graph.get_successor_ids(cur_id):
# suc_block = self.graph[suc_id]
# for phi_function in suc_block.phi_functions.values():
# for pre_id, register in phi_function.items():
# if pre_id == cur_id:
# new_register = self.__get_top_name(register)
# # print(pre_id, register, new_register)
# phi_function[pre_id] = new_register
#
# if cur_id not in self.__dominator_tree:
# return
#
# for child_id in self.__dominator_tree[cur_id]:
# self.__rename_block_registers(child_id)
#
# for register, count in to_pop.items():
# for i in range(count):
# self.__name_stack[register].pop()
# return
#
# def __get_new_name(self, register):
# count = self.__name_counter[register]
# self.__name_counter[register] = count + 1
# new_name = register + "_%d" % count
# self.__name_stack[register].append(new_name)
# return new_name
#
# def __get_top_name(self, register):
# return self.__name_stack[register][-1]
# def convert_to_expressions(self):
# for basic_block in self.graph:
# basic_block = ExpressionBlock(basic_block)
#
# basic_block.aggregate_expressions()
# # basic_block.collapse_expressions()
# basic_block.fold_expressions()
# self.graph.replace_block(basic_block)
def get_block_hashes(self):
block_hashes = list()
for basic_block in self.graph:
block_hashes.append(basic_block.get_block_hash())
return block_hashes
def visualize_function(self):
if not os.path.exists("temp/"):
os.makedirs("temp/")
self.graph.visualize("temp/temp.dot")
os.system("dot -Tpdf temp/temp.dot -o temp/0x%x.pdf" % self.signature)
def debug_function(self, block_id=None):
if block_id:
self.graph[block_id].debug_block()
return
print("\nfunction_" + hex(self.signature))
for basic_block in self.graph:
basic_block.debug_block(0)
def __str__(self):
return "function_%x" % self.signature
class InternalFunction(ExternalFunction):
def __init__(self, signature, graph, tracker, entry_exit, action):
ExternalFunction.__init__(self, signature, graph, tracker, entry_exit)
self.action = action
entry_size = self.tracker.get_observed_image(self.entry_id).top
alpha, delta = self.action
self.reads = to_stack_registers(range(entry_size - alpha, entry_size))
self.reads.reverse()
self.writes = \
to_stack_registers(range(entry_size - alpha, entry_size - alpha + delta))
self.writes.reverse()
return
def get_intcall_opcode(self):
return INTERNAL_CALL_OPCODE + "%d" % self.signature
def insert_intreturn(self):
exit_block = self.graph[self.exit_id]
new_block = exit_block.insert_intreturn()
self.graph.replace_block(new_block)
def __str__(self):
prefix = "function_%x\n" % self.signature
return prefix + ", ".join(self.reads) + "\n" + ", ".join(self.writes)
def visualize_function(self):
if not os.path.exists("temp/"):
os.makedirs("temp/")
self.graph.visualize("temp/temp.dot", (self.reads, self.writes))
os.system("dot -Tpdf temp/temp.dot -o temp/0x%x.pdf" % self.signature)
class Structure:
def __init__(self, block_id, suc_address, blocks):
self.__block_id = block_id
self.__exit_address = suc_address
self.blocks = blocks
def get_exit_address(self):
return self.__exit_address
def get_id(self):
return self.__block_id
def get_entry_address(self):
return self.blocks[0].get_entry_address()
def get_block(self, block_id):
if self.__block_id == block_id:
return self
for block in self.blocks:
result = block.get_block(block_id)
if result is not None:
return result
def get_blocks(self):
return self.blocks
def get_nth_block(self, index):
return self.blocks[index]
class Seq(Structure):
def __init__(self, block_id, suc_address, blocks):
Structure.__init__(self, block_id, suc_address, blocks)
for b in blocks[:-1]:
if isinstance(b, ExpressionBlock):
b.remove_end_jump()
def debug_block(self, depth):
# print(prefix + "SEQ")
for block in self.blocks:
block.debug_block(depth)
# print(prefix + "QES")
def dot_format_block(self, depth):
results = []
for block in self.blocks:
results.append(block.dot_format_block(depth))
return "".join(results)
def remove_end_jump(self):
self.blocks[-1].remove_end_jump()
class IfThen(Structure):
def __init__(self, block_id, suc_address, a0, a1):
Structure.__init__(self, block_id, suc_address, [a0, a1])
if isinstance(a1, ExpressionBlock):
a1.remove_end_jump()
def debug_block(self, depth):
prefix = get_prefix(depth)
print(prefix + "IF")
self.blocks[0].debug_block(depth + 1)
print(prefix + "THEN")
self.blocks[1].debug_block(depth + 1)
print(prefix + "FI")
def dot_format_block(self, depth):
prefix = get_prefix(depth)
results = [
self.blocks[0].dot_format_if_header(depth),
self.blocks[1].dot_format_block(depth + 1),
prefix + "}\l"]
return "".join(results)
class IfThenElse(Structure):
def __init__(self, block_id, suc_address, a0, a1, a2):
Structure.__init__(self, block_id, suc_address, [a0, a1, a2])
if isinstance(a1, ExpressionBlock):
a1.remove_end_jump()
if isinstance(a2, ExpressionBlock):
a2.remove_end_jump()
def debug_block(self, depth):
prefix = get_prefix(depth)
print(prefix + "IF")
self.blocks[0].debug_block(depth + 1)
print(prefix + "THEN")
self.blocks[1].debug_block(depth + 1)
print(prefix + "ELSE")
self.blocks[2].debug_block(depth + 1)
print(prefix + "FI")
def dot_format_block(self, depth):
prefix = get_prefix(depth)
results = [
self.blocks[0].dot_format_if_header(depth),
self.blocks[1].dot_format_block(depth + 1),
prefix + "} else {\l",
self.blocks[2].dot_format_block(depth + 1),
prefix + "}\l"]
return "".join(results)
class Loop(Structure):
def __init__(self, block_id, suc_address, a0, a1):
Structure.__init__(self, block_id, suc_address, [a0, a1])
# print(a1)
if isinstance(a1, ExpressionBlock) or isinstance(a1, Seq):
a1.remove_end_jump()
def debug_block(self, depth):
prefix = get_prefix(depth)
print(prefix + "WHILE")
self.blocks[0].debug_block(depth + 1)
print(prefix + "DO")
self.blocks[1].debug_block(depth + 1)
print(prefix + "OD")
def dot_format_block(self, depth):
prefix = get_prefix(depth)
results = [
prefix + "while (0x1) {\l",
self.blocks[0].dot_format_while_header(depth + 1),
self.blocks[1].dot_format_block(depth + 1),
prefix + "}\l",
]
return "".join(results)
|
11523256
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import np_utils
from scipy.misc import imresize
# this allows the example to be run in-repo
# (or can be removed if lfw_fuel is installed)
import sys
sys.path.append('.')
from lfw_fuel import lfw
'''
Train a simple convnet on the LFW dataset.
'''
batch_size = 128
nb_epoch = 12
feature_width = 32
feature_height = 32
downsample_size = 32
def crop_and_downsample(originalX):
"""
Starts with a 250 x 250 image.
Crops to 128 x 128 around the center.
Downsamples the image to (downsample_size) x (downsample_size).
Returns an image with dimensions (channel, width, height).
"""
current_dim = 250
target_dim = 128
margin = int((current_dim - target_dim)/2)
left_margin = margin
right_margin = current_dim - margin
# newim is shape (6, 128, 128)
newim = originalX[:, left_margin:right_margin, left_margin:right_margin]
# resized are shape (feature_width, feature_height, 3)
feature_width = feature_height = downsample_size
resized1 = imresize(newim[0:3,:,:], (feature_width, feature_height), interp="bicubic", mode="RGB")
resized2 = imresize(newim[3:6,:,:], (feature_width, feature_height), interp="bicubic", mode="RGB")
# re-packge into a new X entry
newX = np.concatenate([resized1,resized2], axis=2)
# the next line is important.
# if you don't normalize your data, all predictions will be 0 forever.
newX = newX/255.0
return newX
(X_train, y_train), (X_test, y_test) = lfw.load_data("deepfunneled")
# the data, shuffled and split between train and test sets
X_train = np.asarray([crop_and_downsample(x) for x in X_train])
X_test = np.asarray([crop_and_downsample(x) for x in X_test])
# print shape of data while model is building
print("{1} train samples, {2} channel{0}, {3}x{4}".format("" if X_train.shape[1] == 1 else "s", *X_train.shape))
print("{1} test samples, {2} channel{0}, {3}x{4}".format("" if X_test.shape[1] == 1 else "s", *X_test.shape))
model = Sequential()
model.add(Conv2D(32, (5,5), input_shape=(downsample_size,downsample_size,6), padding='same', data_format='channels_last', activation='relu'))
model.add(Conv2D(32, (5,5), padding='same', data_format='channels_last', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), data_format='channels_last'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', metrics=['binary_accuracy'], optimizer='adadelta')
model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
11523283
|
import pdb
import h5py
import torch
import numpy as np
from base.base_data_loader import BaseDataLoader
class DataLoaderSimple(BaseDataLoader):
"""
Simple DataLoader without expert rewards or trajectories.
"""
def __init__(self, opts):
super(DataLoaderSimple, self).__init__(opts)
# Dataset statistics for unseen classes
if opts.h5_path_unseen != '':
self.test_unseen_count = self.data['test_unseen'].shape[0]
self.test_unseen_idx = 0
def _load_data(self, opts):
# ---- Load the dataset ----
h5_file = h5py.File(opts.h5_path, 'r')
self.data = {split: np.array(h5_file[split]) for split in ['train', 'val', 'test']}
h5_file.close()
# ---- Load the unseen classes (if available)----
if opts.h5_path_unseen != '':
h5_file_unseen = h5py.File(opts.h5_path_unseen, 'r')
self.data['test_unseen'] = np.array(h5_file_unseen['test'])
h5_file_unseen.close()
def _get_data(self, split, idxes):
out_imgs = np.copy(self.data[split][idxes])
return (out_imgs, )
class DataLoaderExpert(DataLoaderSimple):
"""
DataLoader that additionally returns expert rewards
"""
def __init__(self, opts):
super(DataLoaderExpert, self).__init__(opts)
def _load_data(self, opts):
# ---- Loads images ----
super(DataLoaderExpert, self)._load_data(opts)
# ---- Load rewards ----
rewards_file = h5py.File(opts.rewards_h5_path)
self.rewards = {}
# These are KxNxM arrays containing rewards corresponding to each views of
# all panoramas in the train and val splits
self.rewards['train'] = np.array(rewards_file['train/nms'])
self.rewards['val'] = np.array(rewards_file['val/nms'])
def _get_data(self, split, idxes):
out_imgs = np.copy(self.data[split][idxes])
# test, test_unseen data do not have expert rewards
out_rewards = self.rewards[split][idxes] if split in self.rewards else None
return out_imgs, out_rewards
class DataLoaderExpertPolicy(DataLoaderSimple):
"""
DataLoader that additionally returns expert trajectories
"""
def __init__(self, opts):
super(DataLoaderExpertPolicy, self).__init__(opts)
def _load_data(self, opts):
# ---- Load the images, labels ----
super(DataLoaderExpertPolicy, self)._load_data(opts)
# ---- Load the expert trajectories ----
self.trajectories_type = opts.trajectories_type
if opts.trajectories_type == 'utility_maps':
# Load the utility maps
utility_file = h5py.File(opts.utility_h5_path)
self.utility_maps = {}
# These are KxNxMxNxM arrays
for split in utility_file.keys():
self.utility_maps[split] = np.array(utility_file[split]['utility_maps'])
elif opts.trajectories_type == 'expert_trajectories':
# Load the trajectories
# {split: #split_samples x T-1 numpy array}
self.trajectories = torch.load(opts.utility_h5_path)
elif opts.trajectories_type == 'saliency_scores':
# Load the saliency scores
h5_file = h5py.File(opts.utility_h5_path)
self.saliency_scores = {}
# These are MxNxB arrays (transposed due to matlab)
for split in h5_file.keys():
split_data = np.array(h5_file[split])
ndim = len(split_data)
self.saliency_scores[split] = split_data.transpose(*reversed(range(ndim)))
else:
raise ValueError('Wrong trajectories_type!')
def _get_data(self, split, idxes):
out_imgs = np.copy(self.data[split][idxes])
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps[split][idxes]
elif self.trajectories_type == 'expert_trajectories':
out_maps = {}
out_maps = {self.trajectories[split][(i, j)][idxes] for i in range(self.N)
for j in range(self.M)}
elif self.trajectories_type == 'saliency_scores':
out_maps = self.saliency_scores[split][idxes]
return out_imgs, out_maps
|
11523331
|
import logging
import traci
import random
class Platoon():
def __init__(self, startingVehicles):
"""Create a platoon, setting default values for all variables"""
logging.info("Creating a new platoon with: %s", startingVehicles)
self._vehicles = list(startingVehicles)
self._active = True
self._color = (random.randint(0, 255), random.randint(
0, 255), random.randint(0, 255))
self._currentSpeed = self.getLeadVehicle().getSpeed()
self._disbandReason = None
self._eligibleForMerging = False
self._lane = self.getLeadVehicle().getLane()
self._lanePosition = self.getLeadVehicle().getLanePosition()
self._controlledLanes = set()
self._targetSpeed = -1
self.getLeadVehicle().setColor(self._color)
self.startBehaviour(startingVehicles[1:])
def addControlledLanes(self, lanes):
for lane in lanes:
self._controlledLanes.add(lane)
def addVehicle(self, vehicle):
"""Adds a single vehicle to this platoon"""
self._vehicles.append(vehicle)
self.startBehaviour([vehicle, ])
logging.info("Adding %s to platoon %s, New length: %s",
vehicle.getName(), self.getID(), len(self._vehicles))
def canMerge(self):
"""
Returns True if this platoon can currently merge with another
"""
return self._eligibleForMerging
def checkVehiclePathsConverge(self, vehicles):
# Check that the given vehicles are going to follow the lead
# vehicle into the next edge
leadVehicleRoute = self.getLeadVehicle().getRemainingRoute()
if len(leadVehicleRoute) > 1:
leadVehicleNextEdge = leadVehicleRoute[1]
for vehicle in vehicles:
if leadVehicleNextEdge not in vehicle.getRoute():
return False
return True
def disband(self):
"""Marks a platoon as dead and returns vehicles to normal"""
self.stopBehaviour()
self._active = False
logging.info("Disbanding platoon: %s", self.getID())
def getAcceleration(self):
return max([v.getAcceleration() for v in self.getAllVehicles()])
def getAllVehicles(self):
"""Retrieve the list of all the vehicles in this platoon"""
return self._vehicles
def getAllVehiclesByName(self):
"""Retrieve the list of all the vehicles in this platoon by name"""
return [v.getName() for v in self.getAllVehicles()]
def getSpeed(self):
return self._currentSpeed
def getID(self):
"""Generates and returns a unique ID for this platoon"""
return "%s" % (self.getLeadVehicle().getName())
def getLane(self):
return self._lane
def getLanesOfAllVehicles(self):
return [v.getLane() for v in self.getAllVehicles() if v.isActive()]
def getLanePositionFromFront(self, lane=None):
if lane:
vehiclesInLane = [v for v in self.getAllVehicles() if v.getLane() == lane]
if vehiclesInLane:
return traci.lane.getLength(lane) - vehiclesInLane[0].getLanePositionFromFront()
else:
return traci.lane.getLength(self._lane) - self._lanePosition
def getLeadVehicle(self):
return self._vehicles[0]
def getLength(self):
""" Gets the total length of the platoon
Done by taking the distance between the vehicle's front
bumper and the end of the lane
"""
laneLen = traci.lane.getLength(self._lane)
front = laneLen - self.getLeadVehicle().getLanePosition()
rear = laneLen - self._vehicles[-1].getLanePosition()
rearVehicleLength = self._vehicles[-1].getLength() * 2
return rear - front + rearVehicleLength
def getLengthOfSingleVehicle(self):
return self.getLeadVehicle().getLength()
def getMaxSpeed(self):
""" Gets the maximum speed of the platoon
"""
return max([v.getMaxSpeed() for v in self.getAllVehicles()])
def getNumberOfVehicles(self):
return len(self._vehicles)
def getTargetSpeed(self):
return self._targetSpeed
def isActive(self):
"""Is the platoon currently active within the scenario"""
return self._active
def mergePlatoon(self, platoon):
"""Merges the given platoon into the current platoon"""
if self.checkVehiclePathsConverge(platoon.getAllVehicles()) and platoon.getLane() == self.getLane():
platoon.disband()
platoon._disbandReason = "Merged"
for vehicle in platoon.getAllVehicles():
self.addVehicle(vehicle)
self._eligibleForMerging = False
platoon._eligibleForMerging = False
def removeControlledLanes(self, lanes):
"""
Removes the lanes from the platoon that were previously being controlled by an
intersection controller.
"""
for lane in lanes:
self._controlledLanes.remove(lane)
def removeTargetSpeed(self):
"""
Removes the target speed from this platoon
"""
self._targetSpeed = -1
def setTargetSpeed(self, speed):
"""
Sets a manual target speed for this platoon (normally determined by the lead
vehicle but this will override it). This will remain until removeTargetSpeed
is called.
"""
self._targetSpeed = speed
def setGap(self, gap):
"""
Set the gap between vehicles in the platoon
"""
for veh in self.getAllVehicles():
veh.setTau(gap)
def setSpeedMode(self, speedMode):
"""
Set the speed mode for every vehicle in the platoon.
Speed mode is a SUMO function that allows different behaviour once the setSpeed
function has been called. Including ignoring safe speed threholds and junctions.
Used here so that vehicles don't brake unncessarily when closing distances are tight
during CIM.
"""
for v in self.getAllVehicles():
v.setSpeedMode(speedMode)
def startBehaviour(self, vehicles):
"""A function to start platooning a specific set of vehicles"""
if self.isActive():
for v in vehicles:
v.setColor(self._color)
v.setImperfection(0)
v.setMinGap(0)
v.setTau(0.05)
self.update()
def stopBehaviour(self):
"""Stops vehicles exhibiting platoon behaviour, if they are
still present within the map"""
for v in self._vehicles:
if v.isActive():
v.setColor((255, 255, 255))
v.setImperfection(0.5)
v.setMinGap(2.5)
v.setTau(1)
# Take speed back to default behaviour
v.setSpeed(-1)
def updateIsActive(self):
"""
Is Active Update, if not disband
"""
if not all([v.isActive() for v in self._vehicles]):
self._disbandReason = "One vehicle not active"
self.disband()
return True
def update(self):
"""
Performs updates to maintain the platoon
1. set platoon location information using lead vehicle
2. set the speed of all vehicles in the convoy,
using the lead vehicle's current speed
3. is this platoon still alive (in the map),
should it be labelled as inactive?
"""
self.updateIsActive()
if self.isActive():
potentialNewLeader = self.getLeadVehicle().getLeader()
if potentialNewLeader and potentialNewLeader[0] in self.getAllVehiclesByName():
# Something has gone wrong disband the platoon
self._disbandReason = "Reform required due to new leader"
self.disband()
# Location Info Update
self._lane = self.getLeadVehicle().getLane()
self._lanePosition = self.getLeadVehicle().getLanePosition()
# Speed Update
leadVehicleSpeed = self.getLeadVehicle().getSpeed()
if self._currentSpeed != 0 and leadVehicleSpeed == 0:
self._eligibleForMerging = True
self._currentSpeed = leadVehicleSpeed
if self._targetSpeed != -1:
self._updateSpeed(self._targetSpeed)
else:
if self.getLeadVehicle().getLane() not in self._controlledLanes:
self.getLeadVehicle().setSpeed(-1)
self._updateSpeed(self._currentSpeed, False)
# Route updates
# Check that all cars still want to continue onto the
# next edge, otherwise disband the platoon
if not self._currentSpeed == 0:
if not self.checkVehiclePathsConverge(self.getAllVehicles()):
self._disbandReason = "Platoon paths now diverge"
self.disband()
def _updateSpeed(self, speed, inclLeadingVeh=True):
""" Sets the speed of all vehicles in the platoon
If inclLeadingVeh set to false, then the leading vehicle is
excluded from the speed change.
Also checks that the platoon is bunched together, this allows
for vehicles to "catch-up"
"""
if inclLeadingVeh and self.getLeadVehicle().getLane() not in self._controlledLanes:
self.getLeadVehicle().setSpeed(speed)
leadVehEdge = self.getLeadVehicle().getEdge()
targetLane = self.getLeadVehicle().getLaneIndex()
# Non leading vehicles should follow the speed of the vehicle in front
vehicles = self._vehicles[1:]
for veh in vehicles:
try:
if veh.getEdge() == leadVehEdge:
veh.setTargetLane(targetLane)
except traci.TraCIException:
logging.error("Could not change lane of %s", veh.getName())
# Only set the speed if the vehicle is not in a lane controlled by a third party.
if veh.getLane() not in self._controlledLanes:
# If we're in range of the leader and they are moving
# follow thier speed
# Otherwise follow vehicle speed limit rules to catch up
leadVeh = veh.getLeader()
if leadVeh and leadVeh[1] <= 5 and self._currentSpeed != 0:
veh.setSpeed(speed)
else:
veh.setSpeed(-1)
|
11523340
|
from visual_mpc.video_prediction.setup_predictor import setup_predictor
from visual_mpc.video_prediction.vpred_model_interface import VPred_Model_Interface
from video_prediction.models.indep_multi_savp_model import IndepMultiSAVPVideoPredictionModel
import video_prediction
base_dir = video_prediction.__file__
base_dir = '/'.join(str.split(base_dir, '/')[:-2])
modeldir = base_dir + '/pretrained_models/mixed_datasets/towel_hard_objects/'
configuration = {
'pred_model': VPred_Model_Interface,
'pred_model_class':IndepMultiSAVPVideoPredictionModel,
'setup_predictor':setup_predictor,
'json_dir': modeldir + '/view0/model.savp.None/',
'pretrained_model':[modeldir + '/view0/model.savp.None/model-300000', modeldir + '/view1/model.savp.None/model-300000'], # 'filepath of a pretrained model to resume training from.' ,
'sequence_length': 15, # 'sequence length to load, including context frames.' ,
'context_frames': 2, # of frames before predictions.' ,
'model': 'appflow', #'model architecture to use - CDNA, DNA, or STP' ,
'batch_size': 150,
'sdim':8,
'adim':4,
'orig_size':[48,64],
'ndesig':2,
'ncam':2,
}
|
11523370
|
from aimi import app
import sys
if __name__ == '__main__':
if sys.argv[-1] == "-t":
app.run(True)
else:
app.run(False)
|
11523381
|
from basehandler import BaseHandler
from oracle.oracle_db import KeyValue
import json
import logging
import time
from shared.liburl_wrapper import safe_pushtx
TURN_LENGTH_TIME = 60 * 1
class TransactionVerificationError(Exception):
pass
class TransactionSigner(BaseHandler):
def __init__(self, oracle):
self.oracle = oracle
self.btc = oracle.btc
self.kv = KeyValue(self.oracle.db)
def includes_me(self, prevtx):
for tx in prevtx:
if not 'redeemScript' in tx:
return False
my_turn = self.get_my_turn(tx['redeemScript'])
if my_turn < 0:
return False
return True
def get_my_turn(self, redeem_script):
# oracles sign transactions based on the order of their signatures
addresses = sorted(self.btc.decode_script(redeem_script)['addresses'])
for idx, addr in enumerate(addresses):
if self.btc.address_is_mine(addr):
return idx
return -1
def is_proper_transaction(self, tx, prevtxs):
logging.info('testing tx: %r' % tx)
logging.info('with prevtxs: %r' % prevtxs)
if not self.oracle.btc.is_valid_transaction(tx):
logging.debug("transaction invalid")
return False
inputs, outputs = self.btc.get_inputs_outputs(tx)
if not self.includes_me(prevtxs):
logging.debug("transaction does not include me")
return False
if self.oracle.btc.transaction_already_signed(tx, prevtxs):
logging.debug("transaction already signed")
return False
return True
def sign(self, tx, pwtxid, inputs, req_sigs):
# sign is being called by external contracts to initiate signing procedure
# it marks the transaction as being ready to be signed if received from fastcast
# and schedules signing -- in case oracles previous in line didn't want to sign it
logging.debug("tx: %r" % tx)
tx_inputs, tx_outputs = self.btc.get_inputs_outputs(tx)
#todo: shouldn't all the input scripts be guaranteed to be exactly the same by now?
turns = [self.get_my_turn(vin['redeemScript']) for vin in inputs if 'redeemScript' in vin]
my_turn = max(turns)
add_time = (my_turn - 1) * TURN_LENGTH_TIME
rq_hash = self.get_tx_hash(tx)
logging.info("sign -> rq_hash: {}".format(rq_hash))
try:
self.kv.store( 'signable', rq_hash, { 'inputs':inputs, 'sigs_so_far':0, 'req_sigs': req_sigs , 'pwtxid' : pwtxid } )
except:
logging.warning('duplicate sign task? this try..except should be removed ultimately!')
self.oracle.task_queue.save({
"operation": 'sign',
"json_data": json.dumps({"transaction": tx}),
"next_check": time.time() + add_time,
"done": 0,
})
def sign_now(self, tx):
# sign now signs the transaction and broadcasts it over the network
inputs, outputs = self.btc.get_inputs_outputs(tx)
rq_hash = self.get_tx_hash(tx)
rq_data = self.kv.get_by_section_key('signable', rq_hash)
if rq_data is None:
logging.debug("not scheduled to sign this")
return
inputs = rq_data['inputs']
sigs_so_far = rq_data['sigs_so_far']
req_sigs = rq_data['req_sigs']
assert( self.is_proper_transaction(tx, inputs) )
tx_sigs_count = self.btc.signatures_count(
tx,
inputs)
logging.debug("sigs count so far: %r; req_sigs: %r" % (tx_sigs_count, req_sigs))
if sigs_so_far > tx_sigs_count: # or > not >=? TODO
logging.debug('already signed a transaction with more sigs')
return
rq_data['sigs_so_far'] = tx_sigs_count
self.kv.update('signable', rq_hash, rq_data)
# ^ let's remember the tx with most sigs that we've seen.
if tx_sigs_count >= req_sigs:
logging.debug('already signed with enough keys')
return
pwtxid = rq_data['pwtxid']
signed_transaction = self.btc.sign_transaction(tx, inputs)
tx_new_sigs_count = self.btc.signatures_count(signed_transaction, inputs)
if (tx_new_sigs_count == tx_sigs_count):
logging.debug('failed signing transaction. already signed by me? aborting')
return
tx_sigs_count += 1
body = { 'pwtxid': pwtxid, 'operation':'sign', 'transaction': signed_transaction, 'sigs': tx_sigs_count, 'req_sigs': req_sigs }
logging.debug('broadcasting: %r' % body)
self.oracle.broadcast_with_fastcast(json.dumps(body))
if tx_sigs_count == req_sigs:
safe_pushtx(signed_transaction)
self.oracle.btc.send_transaction(signed_transaction)
rq_data['sigs_so_far'] = tx_sigs_count
self.kv.update('signable', rq_hash, rq_data)
def handle_request(self, request):
body = request.message
# if the oracle received a transaction from fastcast, it attempts to sign it
# all the validity checks are being handled by sign_now
tx = body['transaction']
self.sign_now(tx)
def handle_task(self, task):
# handles scheduled signing
# in a perfect world only the first oracle would have to call this
# and all the others would sign through handle_request
self.oracle.task_queue.done(task)
message = json.loads(task['json_data'])
tx = message['transaction']
rq_hash = self.get_tx_hash(tx)
rq_data = self.kv.get_by_section_key('signable', rq_hash)
assert(rq_data is not None)
logging.info("rq_data: %r" % rq_data)
if rq_data['sigs_so_far'] > 0:
logging.debug('I already signed more popular txs')
return
self.sign_now(tx)
|
11523384
|
import re
import unittest
from python_aternos import atjsparse
class TestJs2Py(unittest.TestCase):
def setUp(self) -> None:
self.tests = []
with open('token.txt', 'rt') as f:
lines = re.split(r'[\r\n]', f.read())
del lines[len(lines)-1] # Remove empty string
self.tests = lines
self.results = [
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2rKOA1IFdBcHhEM616cb',
'2iXh5W5uEYq5fWJIazQ6',
'<KEY>',
'<KEY>',
'UfLlemvKEE16ltk0hZNM',
'S1Oban9UGRXVIepREw9q',
'S1Oban9UGRXVIepREw9q',
'<KEY>',
'<KEY>',
'KbxzYCJUrFjWzbeZcAmE',
'K<KEY>'
]
def test_base64(self) -> None:
encoded = 'QEhlbGxvIFdvcmxkIQ=='
decoded = atjsparse.atob(encoded)
self.assertEqual(decoded, '@Hello World!')
def test_conv(self) -> None:
token = '(() => {window["AJAX_TOKEN"]=("<KEY>");})();'
f = atjsparse.to_ecma5_function(token)
self.assertEqual(f, '(function(){window["AJAX_TOKEN"]=("2r" + "KO" + "A1" + "IFdBcHhEM" + "61" + "6cb");})()')
def test_ecma6parse(self) -> None:
code = '''
window.t0 =
window['document']&&
!window[["p","Ma"].reverse().join('')]||
!window[["ut","meo","i","etT","s"].reverse().join('')];'''
part1 = '''window.t1 = Boolean(window['document']);'''
part2 = '''window.t2 = Boolean(!window[["p","Ma"].reverse().join('')]);'''
part3 = '''window.t3 = Boolean(!window[["ut","meo","i","etT","s"].reverse().join('')]);'''
ctx0 = atjsparse.exec(code)
ctx1 = atjsparse.exec(part1)
ctx2 = atjsparse.exec(part2)
ctx3 = atjsparse.exec(part3)
self.assertEqual(ctx1.window['t1'], True)
self.assertEqual(ctx2.window['t2'], False)
self.assertEqual(ctx3.window['t3'], False)
def test_exec(self) -> None:
for i, f in enumerate(self.tests):
ctx = atjsparse.exec(f)
res = ctx.window['AJAX_TOKEN']
self.assertEqual(res, self.results[i])
def tearDown(self) -> None:
del self.tests
del self.results
|
11523478
|
from records_mover.records.records_format import DelimitedRecordsFormat
import unittest
import json
class TestDelimitedRecordsFormat(unittest.TestCase):
def test_dumb(self):
records_format = DelimitedRecordsFormat(variant='dumb')
# Should match up with
# https://github.com/bluelabsio/records-mover/blob/master/docs/RECORDS_SPEC.md#dumb-variant
expected_hints = {
'compression': 'GZIP',
'dateformat': 'YYYY-MM-DD',
'datetimeformat': 'YYYY-MM-DD HH:MI:SS',
'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF',
'doublequote': False,
'encoding': 'UTF8',
'escape': None,
'field-delimiter': ',',
'quotechar': '"',
'quoting': None,
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS',
'header-row': False,
}
self.assertEqual(expected_hints, records_format.hints)
def test_csv(self):
records_format = DelimitedRecordsFormat(variant='csv')
# Should match up with
# https://github.com/bluelabsio/records-mover/blob/master/docs/RECORDS_SPEC.md#csv-variant
expected_hints = {
'compression': 'GZIP',
'dateformat': 'MM/DD/YY',
'datetimeformat': 'MM/DD/YY HH24:MI',
'datetimeformattz': 'MM/DD/YY HH24:MI',
'doublequote': True,
'encoding': 'UTF8',
'escape': None,
'field-delimiter': ',',
'quotechar': '"',
'quoting': 'minimal',
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS',
'header-row': True,
}
self.assertEqual(expected_hints, records_format.hints)
def test_with_altered_hints(self):
records_format = DelimitedRecordsFormat(variant='csv').alter_hints({'quotechar': 'A'})
# Should match up with
# https://github.com/bluelabsio/records-mover/blob/master/docs/RECORDS_SPEC.md#csv-variant
expected_hints = {
'compression': 'GZIP',
'dateformat': 'MM/DD/YY',
'datetimeformat': 'MM/DD/YY HH24:MI',
'datetimeformattz': 'MM/DD/YY HH24:MI',
'doublequote': True,
'encoding': 'UTF8',
'escape': None,
'field-delimiter': ',',
'quotechar': 'A',
'quoting': 'minimal',
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS',
'header-row': True,
}
self.assertEqual(expected_hints, records_format.hints)
self.assertEqual({'quotechar': 'A'}, records_format.custom_hints)
def test_eq(self):
records_format_1 = DelimitedRecordsFormat()
records_format_2 = DelimitedRecordsFormat()
self.assertTrue(records_format_1 == records_format_2)
def test_eq_error(self):
records_format_1 = DelimitedRecordsFormat()
records_format_2 = "wrong type"
self.assertTrue(records_format_1 != records_format_2)
def test_unsupported_variant(self):
with self.assertRaises(NotImplementedError):
DelimitedRecordsFormat(variant='fake_thing_i_just_made_up')
def test_json(self):
records_format = DelimitedRecordsFormat()
self.assertEqual({
'hints': {
'compression': 'GZIP',
'dateformat': 'YYYY-MM-DD',
'datetimeformat': 'YYYY-MM-DD HH24:MI:SS',
'datetimeformattz': 'YYYY-MM-DD HH:MI:SSOF',
'doublequote': False,
'encoding': 'UTF8',
'escape': '\\',
'field-delimiter': ',',
'header-row': False,
'quotechar': '"',
'quoting': None,
'record-terminator': '\n',
'timeonlyformat': 'HH24:MI:SS'},
'type': 'delimited',
'variant': 'bluelabs'
}, json.loads(records_format.json()))
def test_repr(self):
records_format = DelimitedRecordsFormat()
self.assertEqual('DelimitedRecordsFormat(bluelabs)', repr(records_format))
def test_generate_filename_gzip(self):
records_format = DelimitedRecordsFormat(hints={'compression': 'GZIP'})
self.assertEqual('foo.csv.gz', records_format.generate_filename('foo'))
def test_generate_filename_bzip(self):
records_format = DelimitedRecordsFormat(hints={'compression': 'BZIP'})
self.assertEqual('foo.csv.bz2', records_format.generate_filename('foo'))
def test_generate_filename_no_compression(self):
records_format = DelimitedRecordsFormat(hints={'compression': None})
self.assertEqual('foo.csv', records_format.generate_filename('foo'))
def test_alter_variant(self):
records_format = DelimitedRecordsFormat(variant='csv', hints={'compression': 'BZIP'})
new_records_format = records_format.alter_variant('bigquery')
self.assertEqual(records_format.variant, 'csv')
self.assertEqual(new_records_format.variant, 'bigquery')
|
11523513
|
package = "s3cmd"
version = "1.0.1"
url = "http://s3tools.org"
license = "GPL version 2"
short_description = "Command line tool for managing Amazon S3 and CloudFront services"
long_description = """
S3cmd lets you copy files from/to Amazon S3
(Simple Storage Service) using a simple to use
command line client. Supports rsync-like backup,
GPG encryption, and more. Also supports management
of Amazon's CloudFront content delivery network.
"""
|
11523551
|
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import \
absolute_sum_of_changes
class AbsoluteSumOfChanges(AggregationPrimitive):
"""Returns the sum over the absolute value
of consecutive changes in the series x.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.absolute_sum_of_changes
"""
name = "absolute_sum_of_changes"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def get_function(self):
return absolute_sum_of_changes
|
11523554
|
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
### Classification ###
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_decision_boundary(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
plot_decision_boundary(ax, clf, xx, yy, **params)
def plot_classification(ax, X, y, clf):
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
scatter = ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k', alpha=1.0)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_title('Bank Notes Classification')
handles, labels = scatter.legend_elements()
ax.legend(handles=handles, labels=['genuine', 'fake'])
def compare_classification(X, y, clfs, titles):
fig = plt.figure(figsize=(14, 4), dpi=100)
for i, clf in enumerate(clfs):
ax = fig.add_subplot(1, len(clfs), i+1)
plot_classification(ax, X, y, clf)
ax.set_title(titles[i])
### Regression ###
def plot_regression(ax, X, y, reg, scaler=None):
# plot the examples
ax.scatter(X, y, alpha=0.6)
# create feature matrix
xmin, xmax = ax.get_xlim()
x_line = np.linspace(xmin, xmax, 30).reshape(-1, 1)
x_line_predict = scaler.transform(x_line) if scaler else x_line
# predict
y_line = reg.predict(x_line_predict)
# plot the hypothesis
ax.plot(x_line, y_line, c='g', linewidth=3)
# formatting
ax.set_xlim(xmin, xmax)
ax.set_xlabel('planned online time (min)')
ax.set_ylabel('time spent online (min)')
ax.set_title('Online Procrastination');
def compare_regression(X, y, regs, titles, scaler=None):
fig = plt.figure(figsize=(14, 4), dpi=100)
for i, reg in enumerate(regs):
ax = fig.add_subplot(1, len(regs), i+1)
plot_regression(ax, X, y, reg, scaler)
ax.set_title(titles[i])
|
11523572
|
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
from typing import Dict, List
def intable(value):
try:
int(value)
return True
except:
return False
def create_slice(index, dim):
slice_list = []
for i in range(0, dim):
slice_list.append(slice(None, None, None))
slice_list.append(index)
return slice_list
def neg_index(index, size):
n_index = np.arange(0, size)
mask = np.ones(size) > 0
mask[index] = False
n_index = n_index[mask]
return n_index
def set_cut_tensor(tensor, cut_dims):
data = tensor
param_list = []
for i in range(0, len(cut_dims)):
if len(cut_dims[i]) == 0:
param_list.append(None)
continue
data_slice = create_slice(cut_dims[i], i)
param_list.append(data[data_slice].cpu().detach().numpy())
data_slice = create_slice(neg_index(cut_dims[i], data.size(i)), i)
if data.requires_grad:
with torch.no_grad():
data.set_(data[data_slice])
else:
if isinstance(tensor, nn.Parameter):
data.set_(data[data_slice])
else:
data = data[data_slice]
data.grad = None
return data, param_list
def recovery_cut_tensor(tensor, cut_dims, param_list):
data = tensor
tensor_size = list(data.size())
current_params = data.detach().cpu().numpy()
for i in range(len(cut_dims) - 1, -1, -1):
if len(cut_dims[i]) == 0:
continue
tensor_size[i] += len(cut_dims[i])
array = np.zeros(tensor_size).astype("float32")
index = neg_index(cut_dims[i], tensor_size[i])
array[create_slice(index, i)] = current_params
if param_list is not None:
array[create_slice(cut_dims[i], i)] = param_list[i]
else:
array[create_slice(cut_dims[i], i)] = 0
current_params = array
new_data = torch.tensor(current_params, device=data.device)
with torch.no_grad():
data.set_(new_data)
data.grad = None
return data
def set_zero_tensor(tensor, cut_dims):
data = tensor
if isinstance(tensor, nn.Parameter):
data = tensor.data
param_list = []
for i in range(0, len(cut_dims)):
if len(cut_dims[i]) == 0:
param_list.append(None)
continue
data_slice = create_slice(cut_dims[i], i)
param_list.append(data[data_slice].detach().cpu().numpy())
with torch.no_grad():
data[data_slice] = 0
if isinstance(tensor, nn.Parameter):
tensor.data = data
return tensor, param_list
def recovery_zero_tensor(tensor, cut_dims, param_list):
data = tensor
if isinstance(tensor, nn.Parameter):
data = tensor.data
for i in range(len(cut_dims) - 1, -1, -1):
if len(cut_dims[i]) == 0:
continue
data_slice = create_slice(cut_dims[i], i)
if param_list is not None:
with torch.no_grad():
data[data_slice] = torch.tensor(param_list[i], device=data.device)
else:
with torch.no_grad():
data[data_slice] = 0
if isinstance(tensor, nn.Parameter):
tensor.data = data
return tensor
|
11523600
|
from panda3d.core import *
from direct.showbase.DirectObject import DirectObject
from direct.interval.MetaInterval import Sequence, Parallel
from direct.interval.LerpInterval import LerpHprInterval, LerpPosInterval
from direct.interval.FunctionInterval import Func
__all__ = ['CameraControler']
class CameraControler (DirectObject):
def __init__(self, pos=(0,0,3.0), offset=(0, 12, 12), speed=1.0,
zoom_speed=1.0, limits=(3.0, 500.0, -110, 110.0), relative_pan=True):
self.node = render.attach_new_node("node")
self.node.set_pos(pos)
self.gimbal = self.node.attach_new_node("gimbal")
base.camera.set_pos(render, offset)
base.camera.look_at(self.node)
base.camera.wrt_reparent_to(self.gimbal)
self.initial_pos=pos
self.initial_offset=offset
self.relative_pan=relative_pan
self.last_mouse_pos=Vec2(0,0)
self.speed=speed*100.0
self.move_speed=speed
self.zoom_speed=zoom_speed
self.zoom = 0.0
self.last_delta=Point2(0,0)
self.limits=limits[:2]
self.max_p=limits[2:4]
self.key_map = {'rotate': False, 'forward':False, 'back':False,
'left':False, 'right':False, 'pan':False,
'up':False, 'down':False}
taskMgr.add(self.update, "camcon_update")
self.bind_keys()
def set_view(self, view='front', num=0):
views={'front':(Vec3(90, 0, 0),Vec3(90, 0, 0)),
'back': (Vec3(-90, 0, 0),Vec3(-90, 0, 0)),
'left':(Vec3(0, 0, 0),Vec3(0, 0, 0)),
'right':(Vec3(180, 0, 0),Vec3(180, 0, 0)),
'top':(Vec3(90, 0, 0),Vec3(90, 90, 0)),
'bottom':(Vec3(90, 0, 0),Vec3(90, -90, 0))}
if view in views:
if self.node.get_hpr(render).almost_equal(views[view][0]) and self.gimbal.get_hpr(render).almost_equal(views[view][1]):
return
else:
if num>3:
self.node.set_hpr(render, views[view][0])
self.gimbal.set_hpr(render, views[view][1])
self.node.set_pos(render,(0,0,0))
return
Sequence(Parallel(LerpHprInterval(self.gimbal, 0.1, views[view][1], other=render),
LerpHprInterval(self.node, 0.1, views[view][0], other=render),
LerpPosInterval(self.node, 0.1, Vec3(0,0,0), other=render)),
Func(self.set_view, view, 1+num)
).start()
def bind_keys(self, rotate='mouse3', zoom_in='wheel_up', zoom_out='wheel_down',
left='a', right='d', forward='w', back='s', pan='shift-mouse1',
up='q', down='z', view_left=['4','arrow_left'], view_right=['6', 'arrow_right'],
view_front=['7', 'home'], view_back=['1','end'], view_top=['8', 'arrow_up'],
view_bottom=['2', 'arrow_down']):
''' Make the camera respond to key press/mouse move'''
self.ignoreAll()
self.accept(rotate, self.key_map.__setitem__, ['rotate', True])
self.accept(rotate+'-up', self.key_map.__setitem__, ['rotate', False])
self.accept(pan, self.key_map.__setitem__, ['pan', True])
self.accept(pan.split('-')[0]+'-up', self.key_map.__setitem__, ['pan', False])
self.accept(pan.split('-')[1]+'-up', self.key_map.__setitem__, ['pan', False])
self.accept(zoom_in, self.zoom_control,[1.0])
self.accept(zoom_out,self.zoom_control,[-1.0])
self.accept(view_left[0], self.set_view, ['left'])
self.accept(view_left[1], self.set_view, ['left'])
self.accept(view_right[0], self.set_view, ['right'])
self.accept(view_right[1], self.set_view, ['right'])
self.accept(view_top[0], self.set_view, ['top'])
self.accept(view_top[1], self.set_view, ['top'])
self.accept(view_bottom[0], self.set_view,[ 'bottom'])
self.accept(view_bottom[1], self.set_view,[ 'bottom'])
self.accept(view_front[0], self.set_view, ['front'])
self.accept(view_front[1], self.set_view, ['front'])
self.accept(view_back[0], self.set_view, ['back'])
self.accept(view_back[1], self.set_view, ['back'])
self.accept(left, self.key_map.__setitem__, ['left', True])
self.accept(left+'-up', self.key_map.__setitem__, ['left', False])
self.accept(right, self.key_map.__setitem__, ['right', True])
self.accept(right+'-up', self.key_map.__setitem__, ['right', False])
self.accept(forward, self.key_map.__setitem__, ['forward', True])
self.accept(forward+'-up', self.key_map.__setitem__, ['forward', False])
self.accept(back, self.key_map.__setitem__, ['back', True])
self.accept(back+'-up', self.key_map.__setitem__, ['back', False])
self.accept(up, self.key_map.__setitem__, ['up', True])
self.accept(up+'-up', self.key_map.__setitem__, ['up', False])
self.accept(down, self.key_map.__setitem__, ['down', True])
self.accept(down+'-up', self.key_map.__setitem__, ['down', False])
self.accept('space', self.debug)
def debug(self):
print('node', self.node.get_hpr(render))
print('gimbal',self.gimbal.get_hpr(render))
def reset(self):
self.node.set_pos(self.initial_pos)
self.node.set_hpr(0,0,0)
self.gimbal.set_hpr(0,0,0)
base.camera.set_pos(render, self.initial_offset)
base.camera.look_at(self.node)
def set_speed(self, speed):
self.speed=speed*6000.0
def set_zoom_speed(self, speed):
self.zoom_speed=speed*5.0
def zoom_control(self, amount):
self.zoom+=amount*self.zoom_speed
self.zoom=min(max(self.zoom, -6.0*self.zoom_speed), 6.0*self.zoom_speed)
def update(self, task):
dt = globalClock.getDt()
if dt > 1.0/12.0:
self.last_delta=Point2(0,0)
return task.cont
if self.key_map['forward']:
self.node.set_y(self.node,-self.move_speed*dt)
elif self.key_map['back']:
self.node.set_y(self.node,self.move_speed*dt)
if self.key_map['left']:
self.node.set_x(self.node,self.move_speed*dt)
elif self.key_map['right']:
self.node.set_x(self.node,-self.move_speed*dt)
if self.key_map['up']:
self.node.set_z(self.node,self.move_speed*dt)
elif self.key_map['down']:
self.node.set_z(self.node,-self.move_speed*dt)
if base.mouseWatcherNode.has_mouse():
if self.zoom != 0.0:
distance=base.camera.get_distance(self.node)
if (distance > self.limits[0] and self.zoom >0.0) or (distance < self.limits[1] and self.zoom < 0.0):
zoom_speed=self.zoom*dt
base.camera.set_y(base.camera, zoom_speed)
if base.camLens.is_orthographic():
film_size=Vec2(base.camLens.get_film_size())
film_size[0]-=film_size[0]*zoom_speed*0.1
film_size[1]-=film_size[1]*zoom_speed*0.1
base.camLens.set_film_size(film_size)
zoom_speed*=4.0
if self.zoom > 0.1:
self.zoom-=zoom_speed
elif self.zoom < -0.1:
self.zoom-=zoom_speed
else:
self.zoom=0.0
m_pos=base.mouseWatcherNode.get_mouse()
delta = m_pos- self.last_mouse_pos
self.last_mouse_pos = Vec2(m_pos)
#pan
if self.key_map['pan']:
if self.relative_pan:
pos=Vec3(delta.x, 0, delta.y)
self.node.set_pos(base.cam, self.node.get_pos(base.cam)-pos*self.move_speed*5.0)
else:
pos=Vec3(delta.x, delta.y, 0)
self.node.set_pos(self.node, pos*self.move_speed*5.0)
#rotate
if self.key_map['rotate']:
p=self.gimbal.get_p()- delta[1]*self.speed
self.gimbal.set_p(min(max(p, self.max_p[0]), self.max_p[1]))
self.node.set_h(self.node.get_h()- delta[0]*self.speed)
return task.again
|
11523646
|
import base64
import io
import logging
import os
from heapq import nsmallest
import numpy as np
from PIL import Image
from dotenv import load_dotenv
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import image
from objdict import ObjDict
from powerskill.timer import timefunc
from scipy.spatial import distance
load_dotenv()
def find_most_similar(image_vectors, all_image_features):
"""
Parameters
----------
image_vectors: Vectors of our input image
all_image_features: All vectorised images
Returns: The cosine similarity score per comparison
-------
"""
scorescos = {}
for key, vector in all_image_features.items():
scorecos = findDifference(image_vectors, vector)
scorescos[key] = scorecos # cosine similarity
return scorescos
def predict(img64: str, model: Model):
"""
Parameters
----------
img64: The base64 encoded representation of the image
model: The ResNet model
Returns: The extracted features
-------
"""
# Load the image
temp_image = Image.open(io.BytesIO(img64))
newsize = (224, 224)
im = temp_image.resize(newsize)
x = image.img_to_array(im)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return model.predict(x)
def findDifference(image_vector1, image_vector2):
"""
Parameters
----------
image_vector1: Our source image vector
image_vector2: The target image vector
Returns: Cosine distance score
-------
"""
dist = distance.cdist(
image_vector1.reshape(1, -1),
image_vector2.reshape(1, -1),
metric="cosine")
return dist[0][0]
def extract_image_features(resnet_model, image64):
"""
Parameters
----------
resnet_model: The ResNet model for feature extraction
image64: The base64 encoded representation of the image
Returns: Extracted features
-------
"""
# Here we extract the features of the image
image_vectors = predict(image64, resnet_model)[0]
return image_vectors
def set_log_level(debug):
"""
:param debug: Boolean value
:return: None
"""
if bool(debug):
logging.basicConfig(level=logging.DEBUG)
set_log_level(bool(os.environ['DEBUG']))
def build_output_response(inputs, outputs, topncos, error=None):
"""
:param inputs: The inputs gathered from the extraction process
:param outputs: The outputs object - power skill output
:return: The json response object
"""
values = ObjDict()
values.values = []
entities = []
entities.append(topncos)
entity_values = {}
entity_values['most_similar'] = topncos
errors = ''
values.values.append({'recordId': inputs['values'][0]['recordId'], \
"errors": errors,
"data": entity_values,
"warnings": ""})
return values
@timefunc
def go_extract(inputs, all_image_features, resnet_model, topn):
"""
:param args:
:return:
"""
try:
outputs = {}
output_response = {}
topncos = {}
record_id = inputs['values'][0]['recordId']
# Get the base64 encoded image
encoded_image = inputs['values'][0]['data']['images']['data']
img = base64.b64decode(str(encoded_image).strip())
logging.info((f"Base64Encoded string {img[:100]}"))
image_vectors = extract_image_features(resnet_model, img)
compared_vectorscos = find_most_similar(image_vectors, all_image_features)
topncos = nsmallest(topn, compared_vectorscos, key=compared_vectorscos.get)
except Exception as ProcessingError:
logging.exception(ProcessingError)
error = str(ProcessingError)
output_response = build_output_response(inputs, outputs, topncos)
logging.info(output_response)
output_response = build_output_response(inputs, outputs, topncos)
return output_response
|
11523677
|
import efel
import numpy as np
import math
import matplotlib.pyplot as plt
stim_start = 100#ms
stim_end = 400#ms
decay_start_after_stim =1
decay_end_after_stim = 10
#feature_list = ['Spikecount','time_to_first_spike', 'min_AHP_indices','peak_voltage','mean_AP_amplitude', 'AHP_depth','AP_begin_indices', 'spike_half_width', 'min_AHP_indices','AP_amplitude','min_AHP_values','voltage_base','steady_state_voltage_stimend']
#feature_list = ['voltage_base','steady_state_voltage_stimend','decay_time_constant_after_stim','sag_amplitude','ohmic_input_resistance','voltage_after_stim','voltage_deflection','voltage_deflection_vb_ssse','sag_ratio1','sag_ratio2']
#feature_list = ['voltage_base','steady_state_voltage_stimend','voltage_after_stim','ohmic_input_resistance']
all_feature_list = ['voltage_base','AP_amplitude','voltage_after_stim','peak_time','spike_half_width','AHP_depth','chi']
feature_list = ['voltage_base','AP_amplitude','voltage_after_stim','peak_time','spike_half_width','AHP_depth']
feature_number = len(feature_list)
efel.api.setDoubleSetting('Threshold', -30)
def get_chi(orig_volts,volts,times):
chi = []
for curr_volts in volts:
chi.append(np.sum(np.sqrt(np.square(np.subtract(orig_volts,curr_volts))))/len(times))
return chi
def eval(target_volts_list, data_volts_list,times):
def diff_lists(lis1, lis2):
if lis1 is None or lis2 is None:
return 1000
len1, len2 = len(lis1), len(lis2)
if len1 > len2:
lis2 = np.concatenate((lis2, np.zeros(len1 - len2)), axis=0)
if len2 > len1:
lis1 = np.concatenate((lis1, np.zeros(len2 - len1)), axis=0)
# print(np.sqrt(((lis1 - lis2)**2).mean()))
# print('\n')
return np.sqrt(((lis1 - lis2)**2).mean())
all_features = []
curr_trace_target = {}
curr_trace_target['T'] = times
curr_trace_target['V'] = target_volts_list[0]
curr_trace_target['stim_start'] = [stim_start]
curr_trace_target['stim_end'] = [stim_end]
# curr_trace_target['stimulus_current'] = [-0.40]
curr_trace_target['decay_start_after_stim'] = [decay_start_after_stim]
curr_trace_target['decay_end_after_stim'] = [decay_end_after_stim]
#feature = efel.getFeatureValues([curr_trace_target], feature_list)
traces = [curr_trace_target]
nan_inds_bol = np.isnan(data_volts_list).any(axis=1)
nan_inds = [i for i, x in enumerate(nan_inds_bol) if x]
data_volts_list = np.delete(data_volts_list,nan_inds,axis=0)
for i in range(len(data_volts_list)):
curr_trace_data = {}
curr_trace_data['T'] = times
curr_trace_data['V'] = data_volts_list[i]
curr_trace_data['stim_start'] = [stim_start]
curr_trace_data['stim_end'] = [stim_end]
# curr_trace_data['stimulus_current'] = [-0.40]
curr_trace_data['decay_start_after_stim'] = [decay_start_after_stim]
curr_trace_data['decay_end_after_stim'] = [decay_end_after_stim]
traces.append(curr_trace_data)
print('in efel before getting features')
traces_results = efel.getFeatureValues(traces, feature_list)
print('in efel after getting features')
if 'chi' in all_feature_list:
all_chis = get_chi(target_volts_list[0],data_volts_list,times)
for i in range(len(data_volts_list)):
curr_feature_list=[]
f_counter = 0
for feature_name in all_feature_list:
if feature_name is not 'chi':
diff_feature = diff_lists(traces_results[0][feature_name], traces_results[i+1][feature_name])
# diff_feature = diff_feature * weights[f_counter]
if math.isnan(diff_feature):
diff_feature = 10000
# if diff_feature == 0:
# print('i is' +str(i) + 'feature is ' + feature_name)
# plt.plot(data_volts_list[i],'r')
# plt.plot(target_volts_list[0],'b')
# plt.show()
else:
diff_feature = all_chis[i]
if math.isnan(diff_feature):
diff_feature = 10000
curr_feature_list.append(diff_feature)
f_counter +=1
all_features.append(tuple(curr_feature_list))
all_features = np.array(all_features)
res = []
counter = 0
for ind in nan_inds_bol:
if ind:
res.append(np.zeros(len(all_feature_list))+100000)
else:
res.append(all_features[counter])
counter +=1
print(['best indvs ',res[0]])
return res
|
11523701
|
import json
from unittest.mock import ANY
from purpleserver.graph.tests.base import GraphTestCase
class TestSystemConnections(GraphTestCase):
def test_query_system_connections(self):
response = self.query(
"""
query get_system_connections {
system_connections {
id
carrier_id
carrier_name
test
active
}
}
""",
op_name="get_system_connections",
)
response_data = json.loads(response.content)
self.assertResponseNoErrors(response)
self.assertDictEqual(response_data, SYSTEM_CONNECTIONS)
class TestUserConnections(GraphTestCase):
def test_query_user_connections(self):
response = self.query(
"""
query get_user_connections {
user_connections {
__typename
... on CanadaPostSettings {
id
carrier_id
carrier_name
test
active
username
password
}
... on UPSSettings {
id
carrier_id
carrier_name
test
active
username
password
access_license_number
account_number
}
}
}
""",
op_name="get_user_connections",
)
response_data = json.loads(response.content)
self.assertResponseNoErrors(response)
self.assertDictEqual(response_data, USER_CONNECTIONS)
def test_create_user_connection(self):
response = self.query(
"""
mutation create_connection($data: CreateConnectionInput!) {
create_connection(input: $data) {
sendlesettings {
id
test
active
carrier_id
sendle_id
api_key
}
}
}
""",
op_name="create_connection",
variables=CONNECTION_DATA,
)
response_data = json.loads(response.content)
self.assertResponseNoErrors(response)
self.assertDictEqual(response_data, CONNECTION_RESPONSE)
def test_update_user_connection(self):
response = self.query(
"""
mutation update_connection($data: UpdateConnectionInput!) {
update_connection(input: $data) {
canadapostsettings {
carrier_id
username
customer_number
contract_id
password
}
}
}
""",
op_name="update_connection",
variables={
**CONNECTION_UPDATE_DATA,
"data": {**CONNECTION_UPDATE_DATA["data"], "id": self.carrier.id},
},
)
response_data = json.loads(response.content)
self.assertResponseNoErrors(response)
self.assertDictEqual(response_data, CONNECTION_UPDATE_RESPONSE)
SYSTEM_CONNECTIONS = {
"data": {
"system_connections": [
{
"id": ANY,
"carrier_id": "dhl_universal",
"carrier_name": "dhl_universal",
"test": True,
"active": True,
},
{
"id": ANY,
"carrier_id": "fedex_express",
"carrier_name": "fedex",
"test": True,
"active": True,
},
]
}
}
USER_CONNECTIONS = {
"data": {
"user_connections": [
{
"__typename": "UPSSettings",
"id": ANY,
"carrier_id": "ups_package",
"carrier_name": "ups",
"test": True,
"active": True,
"username": "test",
"password": "<PASSWORD>",
"access_license_number": "000000",
"account_number": "000000",
},
{
"__typename": "CanadaPostSettings",
"id": ANY,
"carrier_id": "canadapost",
"carrier_name": "canadapost",
"test": True,
"active": True,
"username": "6e93d53968881714",
"password": "<PASSWORD>",
},
]
}
}
CONNECTION_DATA = {
"data": {
"sendlesettings": {
"test": True,
"carrier_id": "sendle",
"sendle_id": "test_sendle_id",
"api_key": "test_api_key",
}
}
}
CONNECTION_RESPONSE = {
"data": {
"create_connection": {
"sendlesettings": {
"active": True,
"api_key": "test_api_key",
"carrier_id": "sendle",
"id": ANY,
"sendle_id": "test_sendle_id",
"test": True,
}
}
}
}
CONNECTION_UPDATE_DATA = {
"data": {
"canadapostsettings": {
"carrier_id": "canadapost_updated",
"username": "6e93d53968881714_updated",
"customer_number": "2004381_updated",
"contract_id": "42708517_updated",
"password": "<PASSWORD>",
}
}
}
CONNECTION_UPDATE_RESPONSE = {
"data": {
"update_connection": {
"canadapostsettings": {
"carrier_id": "canadapost_updated",
"contract_id": "42708517_updated",
"customer_number": "2004381_updated",
"password": "<PASSWORD>",
"username": "6e93d53968881714_updated",
}
}
}
}
|
11523804
|
if __name__ == "__main__":
## setup the Django with public settings for using the database
from main.tools import set_up
set_up.set_up_django_environment('main.tools.settings_for_script')
from law.models import Document
# prints the relative number of laws that have no text
import datetime
date = datetime.date(1986, 1, 1)
documents = Document.objects.filter(date__gt=date)
total = documents.count()
actual = documents.filter(text=None).count()
print(actual*1./total*100)
|
11523959
|
import gfapy
class Validator:
@staticmethod
def _validate_gfa_field(obj, datatype, fieldname = None):
"""Validate the content of a field of a Line instance.
Parameters:
obj: the value to be validated. It can be either a string (in which case
the encoded validation method is used) or any other kind of Python
object (in which case the decoded validation method is used).
datatype (str) : the name of the datatype to be used for the validation.
The datatype name is used for the lookup in the FIELD_MODULE dictiorary
and the validation method of the returned class is used.
fieldname (str) : optional, for error messages
Raises:
gfapy.error.FormatError : if the format of the string representation is
invalid; or the object contains strings with an invalid format
gfapy.error.ValueError : if the value of the decoded field is invalid
gfapy.error.TypeError : if the specified datatype is not defined or
if the type of the decoded field is invalid
gfapy.error.VersionError : if the value is invalid for the GFA version
for which the datatype is specified
"""
if isinstance(obj, str):
Validator.__validate_encoded_gfa_field( obj, datatype, fieldname)
else:
Validator.__validate_decoded_gfa_field( obj, datatype, fieldname)
@staticmethod
def __validate_decoded_gfa_field(obj, datatype, fieldname = None):
"""Validate a non-string field content.
Parameters:
obj : the field content to validate
datatype (str) : the datatype identifier
fieldname (str) : for error messages
Raises:
gfapy.error.TypeError: if the specified datatype is invalid or the
object is of a class which is not compatible with the datatype
gfapy.error.FormatError: if the format of a string in the object
is not compatible with the datatype; or if the object encoded into
a GFA string is incompatible with the specification
gfapy.error.VersionError: if the object value is invalid
for the specific GFA version for which this datatype is used
gfapy.error.ValueError: if the value of the object is invalid
"""
if isinstance(obj, gfapy.FieldArray):
return obj._validate_gfa_field(datatype, fieldname=fieldname)
mod = gfapy.Field.FIELD_MODULE.get(datatype)
if not mod:
raise gfapy.TypeError(
"Datatype unknown: {}".format(repr(datatype)))
return mod.validate_decoded(obj)
@staticmethod
def __validate_encoded_gfa_field(obj, datatype, fieldname = None):
"""Validate a string field content.
Parameters:
obj (str): the field content to validate
datatype (str) : the datatype identifier
fieldname (str) : for error messages
Raises:
gfapy.error.TypeError: if the specified datatype is invalid
gfapy.error.FormatError: if the format of the string is invalid
for the specified datatype
gfapy.error.VersionError: if the format of the string is invalid
for the specific GFA version for which this datatype is used
gfapy.error.ValueError: if the format of the string is valid,
but the value encoded by the string is invalid
"""
mod = gfapy.Field.FIELD_MODULE.get(datatype)
if not mod:
raise gfapy.TypeError(
"Datatype unknown: {}".format(repr(datatype)))
return mod.validate_encoded(obj)
|
11523963
|
import pickle
import os
import random
import heapq
import operator
import hashlib
try:
import ujson as json
except:
import json
from copy import copy
from functools import partial
from collections import defaultdict, OrderedDict
from aser.eventuality import Eventuality
from aser.relation import Relation, relation_senses
from aser.concept import ASERConcept, ASERConceptInstancePair
from aser.database.base import SqliteConnection, MongoDBConnection
CHUNKSIZE = 32768
CONCEPT_TABLE_NAME = "Concepts"
CONCEPT_COLUMNS = ["_id", "pattern", "info"]
CONCEPT_COLUMN_TYPES = ["PRIMARY KEY", "TEXT", "BLOB"]
RELATION_TABLE_NAME = "Relations"
RELATION_COLUMNS = ["_id", "hid", "tid"] + relation_senses
RELATION_COLUMN_TYPES = ["PRIMARY KEY", "TEXT", "TEXT"] + ["REAL"] * len(relation_senses)
CONCEPTINSTANCEPAIR_TABLE_NAME = "ConceptInstancePairs"
CONCEPTINSTANCEPAIR_COLUMNS = ["_id", "cid", "eid", "pattern", "score"]
CONCEPTINSTANCEPAIR_COLUMN_TYPES = ["PRIMARY KEY", "TEXT", "TEXT", "TEXT", "REAL"]
class ASERConceptConnection(object):
def __init__(self, db_path, db="sqlite", mode='cache', chunksize=-1):
if db == "sqlite":
self._conn = SqliteConnection(db_path, chunksize if chunksize > 0 else CHUNKSIZE)
elif db == "mongoDB":
self._conn = MongoDBConnection(db_path, chunksize if chunksize > 0 else CHUNKSIZE)
else:
raise NotImplementedError("Error: %s database is not supported!" % (db))
self.mode = mode
if self.mode not in ["insert", "cache", "memory"]:
raise NotImplementedError("Error: only support insert/cache/memory modes.")
self.concept_table_name = CONCEPT_TABLE_NAME
self.concept_columns = CONCEPT_COLUMNS
self.concept_column_types = CONCEPT_COLUMN_TYPES
self.relation_table_name = RELATION_TABLE_NAME
self.relation_columns = RELATION_COLUMNS
self.relation_column_types = RELATION_COLUMN_TYPES
self.concept_instance_pair_table_name = CONCEPTINSTANCEPAIR_TABLE_NAME
self.concept_instance_pair_columns = CONCEPTINSTANCEPAIR_COLUMNS
self.concept_instance_pair_column_types = CONCEPTINSTANCEPAIR_COLUMN_TYPES
self.cids = set()
self.rids = set()
self.eids = set()
self.cid2concept_cache = dict()
self.rid2relation_cache = dict()
self.cid2eid_pattern_scores = dict()
self.eid2cid_scores = dict()
self.partial2cids_cache = dict()
self.partial2rids_cache = {"hid": dict()}
self.init()
def init(self):
"""
create tables
load id sets
load cache
"""
for table_name, columns, column_types in zip(
[self.concept_table_name, self.relation_table_name, self.concept_instance_pair_table_name],
[self.concept_columns, self.relation_columns, self.concept_instance_pair_columns],
[self.concept_column_types, self.relation_column_types, self.concept_instance_pair_column_types]):
if len(columns) == 0 or len(column_types) == 0:
raise NotImplementedError("Error: %s_columns and %s_column_types must be defined" % (table_name, table_name))
try:
self._conn.create_table(table_name, columns, column_types)
except:
pass
if self.mode == 'memory':
for c in map(self._convert_row_to_concept, self._conn.get_columns(self.concept_table_name, self.concept_columns)):
self.cids.add(c.cid)
self.cid2concept_cache[c.cid] = c
# handle another cache
for k, v in self.partial2cids_cache.items():
if getattr(c, k) not in v:
v[getattr(c, k)] = [c.cid]
else:
v[getattr(c, k)].append(c.cid)
for r in map(self._convert_row_to_relation, self._conn.get_columns(self.relation_table_name, self.relation_columns)):
self.rids.add(r.rid)
self.rid2relation_cache[r.rid] = r
# handle another cache
for k, v in self.partial2rids_cache.items():
if getattr(r, k) not in v:
v[getattr(r, k)] = [r.rid]
else:
v[getattr(r, k)].append(r.rid)
for p in map(self._convert_row_to_concept_instance_pair, self._conn.get_columns(self.concept_instance_pair_table_name, self.concept_instance_pair_columns)):
self.eids.add(p.eid)
# handle another cache
if p.cid not in self.cid2eid_pattern_scores:
self.cid2eid_pattern_scores[p.cid] = [(p.eid, p.pattern, p.score)]
else:
self.cid2eid_pattern_scores[p.cid].append((p.eid, p.pattern, p.score))
if p.eid not in self.eid2cid_scores:
self.eid2cid_scores[p.eid] = [(p.cid, p.score)]
else:
self.eid2cid_scores[p.eid].append((p.cid, p.score))
else:
for x in self._conn.get_columns(self.concept_table_name, ["_id"]):
self.cids.add(x["_id"])
for x in self._conn.get_columns(self.relation_table_name, ["_id"]):
self.rids.add(x["_id"])
for x in self._conn.get_columns(self.concept_instance_pair_table_name, ["eid"]):
self.eids.add(x["eid"])
def close(self):
self._conn.close()
self.eids.clear()
self.rids.clear()
self.eids.clear()
self.cid2concept_cache.clear()
self.rid2relation_cache.clear()
self.cid2eid_pattern_scores.clear()
self.eid2cid_scores.clear()
for k in self.partial2cids_cache:
self.partial2cids_cache[k].clear()
for k in self.partial2rids_cache:
self.partial2rids_cache[k].clear()
"""
KG (Concepts)
"""
def _convert_concept_to_row(self, concept):
row = OrderedDict({"_id": concept.cid})
for c in self.concept_columns[1:-1]:
d = getattr(concept, c)
if isinstance(d, list):
row[c] = " ".join(d)
else:
row[c] = d
row["info"] = concept.encode()
return row
def _convert_row_to_concept(self, row):
concept = ASERConcept().decode(row["info"])
concept.cid = row["_id"]
return concept
def _insert_concept(self, concept):
row = self._convert_concept_to_row(concept)
self._conn.insert_row(self.concept_table_name, row)
if self.mode == "insert":
self.cids.add(concept.cid)
elif self.mode == "cache":
self.cids.add(concept.cid)
self.cid2concept_cache[concept.cid] = concept
for k, v in self.partial2cids_cache.items():
if concept.get(k) not in v:
v[concept.get(k)] = [concept.cid]
else:
v[concept.get(k)].append(concept.cid)
return concept
def _insert_concepts(self, concepts):
rows = list(map(self._convert_concept_to_row, concepts))
self._conn.insert_rows(self.concept_table_name, rows)
if self.mode == "insert":
for concept in concepts:
self.cids.add(concept.cid)
elif self.mode == "cache":
for concept in concepts:
self.cids.add(concept.cid)
self.cid2concept_cache[concept.cid] = concept
for k, v in self.partial2cids_cache.items():
if concept.get(k) in v:
v[concept.get(k)].append(concept.cid)
elif self.mode == "memory":
for concept in concepts:
self.cids.add(concept.cid)
self.cid2concept_cache[concept.cid] = concept
for k, v in self.partial2cids_cache.items():
if concept.get(k) not in v:
v[concept.get(k)] = [concept.cid]
else:
v[concept.get(k)].append(concept.cid)
return concepts
def _get_concept_and_store_in_cache(self, cid):
return self._get_concepts_and_store_in_cache([cid])[0]
def _get_concepts_and_store_in_cache(self, cids):
concepts = list(map(self._convert_row_to_concept, self._conn.select_rows(self.concept_table_name, cids, self.concept_columns)))
for concept in concepts:
if concept:
self.cid2concept_cache[concept.cid] = concept
cached_eid_pattern_scores = self.cid2eid_pattern_scores.get(concept.cid, None)
if not cached_eid_pattern_scores:
eid_pattern_scores = self._conn.get_rows_by_keys(self.concept_instance_pair_table_name, bys=["cid"], keys=[concept.cid], columns=["eid", "pattern", "score"])
cached_eid_pattern_scores = [
(x["eid"], x["pattern"], x["score"]) for x in eid_pattern_scores]
self.cid2eid_pattern_scores[concept.cid] = cached_eid_pattern_scores
concept.instances = cached_eid_pattern_scores
return concepts
def _update_concept(self, concept):
# TODO: Add frequency
if self.mode == "insert":
return None # don"t care
return concept
def _update_concepts(self, concepts):
# TODO: Add frequency
if self.mode == "insert":
return [None] * len(concepts) # don"t care
return concepts
def insert_concept(self, concept):
if concept.cid not in self.cids:
concept = self._insert_concept(concept)
else:
concept = self._update_concept(concept)
return concept
def insert_concepts(self, concepts):
results = []
new_indices = []
existing_indices = []
for idx, concept in enumerate(concepts):
if concept.cid not in self.cids:
new_indices.append(idx)
results.append(concept)
else:
existing_indices.append(idx)
results.append(None)
if len(new_indices):
new_concepts = [concepts[idx] for idx in new_indices]
self._insert_concepts(new_concepts)
if len(existing_indices):
existing_concepts = [concepts[idx] for idx in existing_indices]
for idx, updated_concept in enumerate(self._update_concepts(existing_concepts)):
results[existing_indices[idx]] = updated_concept
return results
def get_exact_match_concept(self, concept):
"""
concept can be ASERConcept, Dictionary, str
"""
if isinstance(concept, ASERConcept):
cid = concept.cid
elif isinstance(concept, dict):
cid = concept["cid"]
elif isinstance(concept, str):
cid = concept
else:
raise ValueError("Error: concept should be an instance of ASERConcept, a dictionary, or a cid.")
if cid not in self.cids:
return None
exact_match_concept = self.cid2concept_cache.get(cid, None)
if not exact_match_concept:
exact_match_concept = self._get_concept_and_store_in_cache(cid)
return exact_match_concept
def get_exact_match_concepts(self, concepts):
"""
concepts can be ASERConcepts, Dictionaries, strs
"""
exact_match_concepts = []
if len(concepts):
if isinstance(concepts[0], ASERConcept):
cids = [concept.cid for concept in concepts]
elif isinstance(concepts[0], dict):
cids = [concept["cid"] for concept in concepts]
elif isinstance(concepts[0], str):
cids = concepts
else:
raise ValueError("Error: concepts should instances of ASERConcept, dictionaries, or cids.")
missed_indices = []
missed_cids = []
for idx, cid in enumerate(cids):
if cid not in self.cids:
exact_match_concepts.append(None)
exact_match_concept = self.cid2concept_cache.get(cid, None)
exact_match_concepts.append(exact_match_concept)
if not exact_match_concept:
missed_indices.append(idx)
missed_cids.append(cid)
for idx, exact_match_concept in enumerate(self._get_concepts_and_store_in_cache(missed_cids)):
exact_match_concepts[missed_indices[idx]] = exact_match_concept
return exact_match_concepts
def get_concepts_by_keys(self, bys, keys, order_bys=None, reverse=False, top_n=None):
assert len(bys) == len(keys)
for i in range(len(bys)-1, -1, -1):
if bys[i] not in self.concept_columns:
bys.pop(i)
keys.pop(i)
if len(bys) == 0:
return []
return list(map(self._convert_row_to_concept,
self._conn.get_rows_by_keys(self.concept_table_name, bys, keys, self.concept_columns, order_bys=order_bys, reverse=reverse, top_n=top_n)))
def get_concept_given_str(self, concept_str):
cid = ASERConcept.generate_cid(concept_str)
return self.get_exact_match_concept(cid)
def get_concepts_given_strs(self, concept_strs):
cids = list(map(ASERConcept.generate_cid, concept_strs))
return self.get_exact_match_concepts(cids)
"""
KG (Relations)
"""
def _convert_relation_to_row(self, relation):
row = OrderedDict({"_id": relation.rid})
for c in self.relation_columns[1:-len(relation_senses)]:
row[c] = getattr(relation, c)
for r in relation_senses:
row[r] = relation.relations.get(r, 0.0)
return row
def _convert_row_to_relation(self, row):
return Relation(row["hid"], row["tid"], {r: cnt for r, cnt in row.items() if isinstance(cnt, float) and cnt > 0.0})
def _insert_relation(self, relation):
row = self._convert_relation_to_row(relation)
self._conn.insert_row(self.relation_table_name, row)
if self.mode == "insert":
self.rids.add(relation.rid)
elif self.mode == "cache":
self.rids.add(relation.rid)
self.rid2relation_cache[relation.rid] = relation
for k, v in self.partial2rids_cache.items():
if getattr(relation, k) in v:
v[getattr(relation, k)].append(relation.rid)
elif self.mode == "memory":
self.rids.add(relation.rid)
self.rid2relation_cache[relation.rid] = relation
for k, v in self.partial2rids_cache.items():
if getattr(relation, k) not in v:
v[getattr(relation, k)] = [relation.rid]
else:
v[getattr(relation, k)].append(relation.rid)
return relation
def _insert_relations(self, relations):
rows = list(map(self._convert_relation_to_row, relations))
self._conn.insert_rows(self.relation_table_name, rows)
if self.mode == "insert":
for relation in relations:
self.rids.add(relation.rid)
elif self.mode == "cache":
for relation in relations:
self.rids.add(relation.rid)
self.rid2relation_cache[relation.rid] = relation
for k, v in self.partial2rids_cache.items():
if getattr(relation, k) in v:
v[getattr(relation, k)].append(relation.rid)
elif self.mode == "memory":
for relation in relations:
self.rids.add(relation.rid)
self.rid2relation_cache[relation.rid] = relation
for k, v in self.partial2rids_cache.items():
if getattr(relation, k) not in v:
v[getattr(relation, k)] = [relation.rid]
else:
v[getattr(relation, k)].append(relation.rid)
return relations
def _get_relation_and_store_in_cache(self, rid):
return self._get_relations_and_store_in_cache([rid])
def _get_relations_and_store_in_cache(self, rids):
relations = list(map(self._convert_row_to_relation, self._conn.select_rows(self.relation_table_name, rids, self.relation_columns)))
for relation in relations:
if relation:
self.rid2relation_cache[relation.rid] = relation
return relations
def _update_relation(self, relation):
# find new relation frequencies
update_columns = []
for r in relation_senses:
if relation.relations.get(r, 0.0) > 0.0:
update_columns.append(r)
# update db
update_op = self._conn.get_update_op(update_columns, "+")
row = self._convert_relation_to_row(relation)
self._conn.update_row(self.relation_table_name, row, update_op, update_columns)
# update cache
updated_relation = self.rid2relation_cache.get(relation.rid, None)
if updated_relation:
for r in update_columns:
updated_relation.relation[r] += relation.relation[r]
else:
updated_relation = self._get_relation_and_store_in_cache(relation.rid)
return updated_relation
def _update_relations(self, relations):
# update db
update_op = self._conn.get_update_op(relation_senses, "+")
rows = list(map(self._convert_relation_to_row, relations))
self._conn.update_rows(self.relation_table_name, rows, update_op, relation_senses)
# update cache
updated_relations = []
missed_indices = []
missed_rids = []
for idx, relation in enumerate(relations):
if relation.rid not in self.rids:
updated_relations.append(None)
updated_relation = self.rid2relation_cache.get(relation.rid, None)
updated_relations.append(updated_relations)
if updated_relation:
for r in relation_senses:
if updated_relation.relations.get(r, 0.0) > 0.0:
updated_relation.relations[r] += relation.relations[r]
else:
missed_indices.append(idx)
missed_rids.append(relation.rid)
for idx, updated_relation in enumerate(self._get_relations_and_store_in_cache(missed_rids)):
updated_relations[missed_indices[idx]] = updated_relation
return updated_relations
def insert_relation(self, relation):
if relation.rid not in self.rid2relation_cache:
return self._insert_relation(relation)
else:
return self._update_relation(relation)
def insert_relations(self, relations):
results = []
new_relations = []
existing_indices = []
existing_relations = []
for idx, relation in enumerate(relations):
if relation.rid not in self.rids:
new_relations.append(relation)
results.append(relation)
else:
existing_indices.append(idx)
existing_relations.append(relation)
results.append(None)
if len(new_relations):
self._insert_relations(new_relations)
if len(existing_relations):
for idx, updated_relation in enumerate(self._update_relations(existing_relations)):
results[existing_indices[idx]] = updated_relation
return results
def get_exact_match_relation(self, relation):
"""
relation can be Relation, Dictionary, str, (ASERConcept, ASERConcept), (str, str)
"""
if isinstance(relation, Relation):
rid = relation.rid
elif isinstance(relation, dict):
rid = relation["rid"]
elif isinstance(relation, str):
rid = relation
elif isinstance(relation, (tuple, list)) and len(relation) == 2:
if isinstance(relation[0], ASERConcept) and isinstance(relation[1], ASERConcept):
rid = Relation.generate_rid(relation[0].cid, relation[1].cid)
elif isinstance(relation[0], str) and isinstance(relation[1], str):
rid = Relation.generate_rid(relation[0], relation[1])
else:
raise ValueError("Error: relation should be (an instance of ASERConcept, an instance of ASERConcept) or (hid, tid).")
else:
raise ValueError("Error: relation should be an instance of Relation, a dictionary, rid, (an instance of ASERConcept, an instance of ASERConcept), or (hid, tid).")
if rid not in self.rids:
return None
exact_match_relation = self.rid2relation_cache.get(rid, None)
if not exact_match_relation:
exact_match_relation = self._get_relation_and_store_in_cache(rid)
return exact_match_relation
def get_exact_match_relations(self, relations):
"""
relations can be Relations, Dictionaries, strs, [(ASERConcept, ASERConcept), ...], [(str, str), ...]
"""
exact_match_relations = []
if len(relations):
if isinstance(relations[0], Relation):
rids = [relation.rid for relation in relations]
elif isinstance(relations[0], dict):
rids = [relation["rid"] for relation in relations]
elif isinstance(relations[0], str):
rids = relations
elif isinstance(relations[0], (tuple, list)) and len(relations[0]) == 2:
if isinstance(relations[0][0], ASERConcept) and isinstance(relations[0][1], ASERConcept):
rids = [Relation.generate_rid(relation[0].cid, relation[1].cid) for relation in relations]
elif isinstance(relations[0][0], str) and isinstance(relations[0][1], str):
rids = [Relation.generate_rid(relation[0], relation[1]) for relation in relations]
else:
raise ValueError("Error: relations should be [(an instance of ASERConcept, an instance of ASERConcept), ...] or [(hid, tid), ...].")
else:
raise ValueError("Error: relations should be instances of Relation, dictionaries, rids, [(an instance of ASERConcept, an instance of ASERConcept), ...], or [(hid, tid), ...].")
missed_indices = []
missed_rids = []
for idx, rid in enumerate(rids):
if rid not in self.rids:
exact_match_relations.append(None)
exact_match_relation = self.rid2relation_cache.get(rid, None)
exact_match_relations.append(exact_match_relation)
if not exact_match_relation:
missed_indices.append(idx)
missed_rids.append(rid)
for idx, exact_match_relation in enumerate(self._get_relations_and_store_in_cache(missed_rids)):
exact_match_relations[missed_indices[idx]] = exact_match_relation
return exact_match_relations
def get_relations_by_keys(self, bys, keys, order_bys=None, reverse=False, top_n=None):
assert len(bys) == len(keys)
for i in range(len(bys)-1, -1, -1):
if bys[i] not in self.relation_columns:
bys.pop(i)
keys.pop(i)
if len(bys) == 0:
return []
cache = None
by_index = -1
for k in ["hid", "tid"]:
if k in bys and k in self.partial2rids_cache:
cache = self.partial2rids_cache[k]
by_index = bys.index(k)
break
if cache:
if keys[by_index] in cache:
key_match_relations = [self.rid2relation_cache[rid] for rid in cache[keys[by_index]]]
else:
if self.mode == "memory":
return []
key_cache = []
key_match_relations = list(map(self._convert_row_to_relation,
self._conn.get_rows_by_keys(self.relation_table_name, [bys[by_index]], [keys[by_index]], self.relation_columns)))
for key_match_relation in key_match_relations:
if key_match_relation.rid not in self.rid2relation_cache:
self.rid2relation_cache[key_match_relation.rid] = key_match_relation
key_cache.append(key_match_relation.rid)
cache[keys[by_index]] = key_cache
for i in range(len(bys)):
if i == by_index:
continue
key_match_relations = list(filter(lambda x: x[bys[i]] == keys[i], key_match_relations))
if order_bys:
key_match_relations.sort(key=operator.itemgetter(*order_bys), reverse=reverse)
if top_n:
key_match_relations = key_match_relations[:top_n]
return key_match_relations
return list(map(self._convert_row_to_relation,
self._conn.get_rows_by_keys(self.relation_table_name, bys, keys, self.relation_columns, order_bys=order_bys, reverse=reverse, top_n=top_n)))
"""
KG (ConceptInstancePairs)
"""
def _convert_concept_instance_pair_to_row(self, concept_instance_pair):
if isinstance(concept_instance_pair, ASERConceptInstancePair):
row = OrderedDict({"_id": concept_instance_pair.pid,
"cid": concept_instance_pair.cid, "eid": concept_instance_pair.eid,
"pattern": concept_instance_pair.pattern, "score": concept_instance_pair.score})
elif isinstance(concept_instance_pair, (list, tuple)) and len(concept_instance_pair) == 3:
pid = ASERConceptInstancePair.generate_pid(concept_instance_pair[0].cid, concept_instance_pair[1].eid)
row = OrderedDict({"_id": pid,
"cid": concept_instance_pair[0].cid, "eid": concept_instance_pair[1].eid,
"pattern": concept_instance_pair[1].pattern, "score": concept_instance_pair[2]})
return row
def _convert_row_to_concept_instance_pair(self, row):
return ASERConceptInstancePair(row["cid"], row["eid"], row["pattern"], row["score"])
def _insert_concept_instance_pair(self, concept_instance_pair):
concept, eventuality, score = concept_instance_pair
row = self._convert_concept_instance_pair_to_row(*concept_instance_pair)
self._conn.insert_row(self.concept_instance_pair_table_name, row)
if self.mode == "insert":
self.eids.add(eventuality.eid)
elif self.mode == "cache":
self.eids.add(eventuality.eid)
if concept.cid in self.cid2eid_pattern_scores:
self.cid2eid_pattern_scores[concept.cid].append((eventuality.eid, eventuality.pattern, score))
if eventuality.eid in self.eid2cid_scores:
self.eid2cid_scores[eventuality.eid].append((concept.cid, score))
elif self.mode != "memory":
self.eids.add(eventuality.eid)
if concept.cid not in self.cid2eid_pattern_scores:
self.cid2eid_pattern_scores[concept.cid] = [(eventuality.eid, eventuality.pattern, score)]
else:
self.cid2eid_pattern_scores[concept.cid].append((eventuality.eid, eventuality.pattern, score))
if eventuality.eid not in self.eid2cid_scores:
self.eid2cid_scores[eventuality.eid] = [(concept.cid, score)]
else:
self.eid2cid_scores[eventuality.eid].append((concept.cid, score))
return self._convert_row_to_concept_instance_pair(row)
def _insert_concept_instance_pairs(self, concept_instance_pairs):
rows = list(map(self._convert_concept_instance_pair_to_row, concept_instance_pairs))
self._conn.insert_rows(self.concept_instance_pair_table_name, rows)
if self.mode == "insert":
for _, eventuality, _ in concept_instance_pairs:
self.eids.add(eventuality.eid)
elif self.mode == "cache":
for concept, eventuality, score in concept_instance_pairs:
self.eids.add(eventuality.eid)
if concept.cid in self.cid2eid_pattern_scores:
self.cid2eid_pattern_scores[concept.cid].append((eventuality.eid, eventuality.pattern, score))
if eventuality.eid in self.eid2cid_scores:
self.eid2cid_scores[eventuality.eid].append((concept.cid, score))
elif self.mode == "memory":
for concept, eventuality, score in concept_instance_pairs:
self.eids.add(eventuality.eid)
if concept.cid not in self.cid2eid_pattern_scores:
self.cid2eid_pattern_scores[concept.cid] = [(eventuality.eid, eventuality.pattern, score)]
else:
self.cid2eid_pattern_scores[concept.cid].append((eventuality.eid, eventuality.pattern, score))
if eventuality.eid not in self.eid2cid_scores:
self.eid2cid_scores[eventuality.eid] = [(concept.cid, score)]
else:
self.eid2cid_scores[eventuality.eid].append((concept.cid, score))
return [self._convert_row_to_concept_instance_pair(row) for row in rows]
def _update_concept_instance_pair(self, concept_instance_pair):
concept, eventuality, score = concept_instance_pair
# update db
update_op = self._conn.get_update_op(["score"], "+")
row = self._convert_concept_instance_pair_to_row(concept_instance_pair)
self._conn.update_row(self.concept_instance_pair_table_name, row, update_op, ["score"])
# updata cache
updated_score = None
if self.mode == "insert":
return None # don"t care
cached_cid_scores = self.eid2cid_scores.get(eventuality.eid, None)
if cached_cid_scores:
for idx, cid_score in enumerate(cached_cid_scores):
if concept.cid == cid_score[0]:
updated_score = cid_score[1]+score
cached_cid_scores[idx] = (cid_score[0], updated_score)
break
cached_eid_pattern_scores = self.cid2eid_pattern_scores.get(concept.cid, None)
if cached_eid_pattern_scores:
for idx, eid_pattern_score in enumerate(cached_eid_pattern_scores):
if eventuality.eid == eid_pattern_score[0]:
updated_score = eid_pattern_score[2]+score
cached_eid_pattern_scores[idx] = (eid_pattern_score[0], eid_pattern_score[1], updated_score)
break
if updated_score is None:
updated_score = self._conn.select_row(self.concept_instance_pair_table_name, row["_id"], ["score"])["score"]
return ASERConceptInstancePair(concept.cid, eventuality.eid, eventuality.pattern, updated_score)
def _update_concept_instance_pairs(self, concept_instance_pairs):
# update db
update_op = self._conn.get_update_op(["score"], "+")
rows = list(map(self._convert_concept_instance_pair_to_row, concept_instance_pairs))
self._conn.update_rows(self.concept_instance_pair_table_name, rows, update_op, ["score"])
# update cache
if self.mode == "insert":
return [None] * len(concept_instance_pairs) # don"t care
results = []
updated_scores = []
missed_indices = []
for idx, (concept, eventuality, score) in enumerate(concept_instance_pairs):
cached_cid_scores = self.eid2cid_scores.get(eventuality.eid, None)
if cached_cid_scores:
for idx, cid_score in enumerate(cached_cid_scores):
if concept.cid == cid_score[0]:
updated_score = cid_score[1]+score
cached_cid_scores[idx] = (cid_score[0], updated_score)
break
cached_eid_pattern_scores = self.cid2eid_pattern_scores.get(concept.cid, None)
if cached_eid_pattern_scores:
for idx, eid_pattern_score in enumerate(cached_eid_pattern_scores):
if eventuality.eid == eid_pattern_score[0]:
updated_score = eid_pattern_score[2]+score
cached_eid_pattern_scores[idx] = (eid_pattern_score[0], eid_pattern_score[1], updated_score)
break
if updated_score is None:
missed_indices.append(idx)
updated_scores.append(None)
else:
updated_scores.append(updated_score)
if len(missed_indices):
for idx, updated_row in enumerate(self._conn.select_rows(self.concept_instance_pair_table_name, row["_id"], ["score"])):
updated_scores[missed_indices[idx]] = updated_row["score"]
return [ASERConceptInstancePair(c.cid, e.eid, e.pattern, updated_score) for c, e, updated_score in concept_instance_pairs]
def insert_concept_instance_pair(self, concept_instance_pair):
concept, eventuality, score = concept_instance_pair
if concept.cid in self.cids and eventuality.eid in self.eids:
return self._update_concept_instance_pair(concept, eventuality, score)
else:
return self._insert_concept_instance_pair(concept, eventuality, score)
def insert_concept_instance_pairs(self, concept_instance_pairs):
results = [None] * len(concept_instance_pairs)
new_indices = []
existing_indices = []
for idx, (concept, eventuality, score) in enumerate(concept_instance_pairs):
if concept.cid in self.cids and eventuality.eid in self.eids:
existing_indices.append(idx)
else:
new_indices.append(idx)
if len(new_indices):
for idx, new_pair in enumerate(self._insert_concept_instance_pairs([concept_instance_pairs[i] for i in new_indices])):
results[new_indices[idx]] = new_pair
if len(existing_indices):
for idx, updated_pair in enumerate(self._update_concept_instance_pairs([concept_instance_pairs[i] for i in existing_indices])):
results[existing_indices[idx]] = updated_pair
return results
def get_eventualities_given_concept(self, concept):
"""
concept can be ASERConcept, Dictionary, or cid
"""
if self.mode == "insert":
return []
if isinstance(concept, ASERConcept):
cid = concept.cid
elif isinstance(concept, dict):
cid = concept["cid"]
elif isinstance(concept, str):
cid = concept
else:
raise ValueError("Error: concept should be an instance of ASERConcept, a dictionary, or a cid.")
cached_eid_pattern_scores = self.cid2eid_pattern_scores.get(cid, None)
if cached_eid_pattern_scores:
return cached_eid_pattern_scores
else:
eid_pattern_scores = self._conn.get_rows_by_keys(self.concept_instance_pair_table_name, bys=["cid"], keys=[cid], columns=["eid", "pattern", "score"])
return eid_pattern_scores
def get_concepts_given_eventuality(self, eventuality):
"""
eventuality can be Eventuality, Dictionary, or eid
"""
if self.mode == "insert":
return []
if isinstance(eventuality, Eventuality):
eid = eventuality.eid
elif isinstance(eventuality, dict):
eid = eventuality["eid"]
elif isinstance(eventuality, str):
eid = eventuality
else:
raise ValueError("Error: concept should be an instance of Eventuality, a dictionary, or a eid.")
cached_cid_scores = self.eid2cid_scores.get(eid, None)
if cached_cid_scores:
cids = [cid_score[0] for cid_score in cached_cid_scores]
scores = [cid_score[1] for cid_score in cached_cid_scores]
else:
cid_scores = self._conn.get_rows_by_keys(self.concept_instance_pair_table_name, bys=["eid"], keys=[eid], columns=["cid", "score"])
cids = [cid_score["cid"] for cid_score in cid_scores]
scores = [cid_score["score"] for cid_score in cid_scores]
concepts = self.get_exact_match_concepts(cids)
return list(zip(concepts, scores))
"""
Additional apis
"""
def get_related_concepts(self, concept):
"""
concept can be ASERConcept, Dictionary, or cid
"""
if self.mode == "insert":
return []
if isinstance(concept, ASERConcept):
cid = concept.cid
elif isinstance(concept, dict):
cid = concept["cid"]
elif isinstance(concept, str):
cid = concept
else:
raise ValueError("Error: concept should be an instance of ASERConcept, a dictionary, or a cid.")
# cid == hid
results = []
if self.mode == "memory":
if "hid" in self.partial2rids_cache:
related_rids = self.partial2rids_cache["hid"].get(cid, list())
related_relations = self.get_exact_match_relations(related_rids)
else:
related_relations = self.get_relations_by_keys(bys=["hid"], keys=[cid])
tids = [x.tid for x in related_relations]
t_concepts = self.get_exact_match_concepts(tids)
elif self.mode == "cache":
if "hid" in self.partial2rids_cache:
if cid in self.partial2rids_cache["hid"]: # hit
related_rids = self.partial2rids_cache["hid"].get(cid, list())
related_relations = self.get_exact_match_relations(related_rids)
tids = [x.tid for x in related_relations]
t_concepts = self.get_exact_match_concepts(tids)
else: # miss
related_relations = self.get_relations_by_keys(bys=["hid"], keys=[cid])
tids = [x.tid for x in related_relations]
t_concepts = self.get_exact_match_concepts(tids)
# update cache
self.partial2rids_cache["hid"][cid] = [relation.rid for relation in related_relations]
else:
related_relations = self.get_relations_by_keys(bys=["hid"], keys=[cid])
tids = [x.tid for x in related_relations]
t_concepts = self.get_exact_match_concepts(tids)
return sorted(zip(t_concepts, related_relations), key=lambda x: sum(x[1].relations.values()))
|
11524016
|
from hyperparameter_hunter import Environment, CVExperiment
from hyperparameter_hunter.callbacks.bases import lambda_callback
from hyperparameter_hunter.utils.learning_utils import get_toy_classification_data
from hyperparameter_hunter.callbacks.recipes import confusion_matrix_oof
from sklearn.model_selection import RepeatedStratifiedKFold
from xgboost import XGBClassifier
def printer_callback():
"""This is a simple callback example that will print out :attr:`CVExperiment.last_evaluation_results` at all
available time intervals, along with the repetition, fold, and run number. Of course, printing evaluations at the beginning
of each of the intervals, as is shown below, is pretty much useless. However, this shows that if you want to, you can do it
anyways and create your own replacement for the default logger... Or make anything else you might want"""
def printer_helper(_rep, _fold, _run, last_evaluation_results):
print(f"{_rep}.{_fold}.{_run} {last_evaluation_results}")
return lambda_callback(
on_exp_start=printer_helper,
on_exp_end=printer_helper,
on_rep_start=printer_helper,
on_rep_end=printer_helper,
on_fold_start=printer_helper,
on_fold_end=printer_helper,
on_run_start=printer_helper,
on_run_end=printer_helper,
)
def execute():
env = Environment(
train_dataset=get_toy_classification_data(),
results_path="HyperparameterHunterAssets",
metrics=["roc_auc_score"],
cv_type=RepeatedStratifiedKFold,
cv_params=dict(n_splits=5, n_repeats=2, random_state=32),
runs=2,
# Just instantiate `Environment` with your list of callbacks, and go about business as usual
experiment_callbacks=[printer_callback(), confusion_matrix_oof()],
# In addition to `printer_callback` made above, we're also adding the `confusion_matrix_oof` callback
# This, and other callbacks, can be found in `hyperparameter_hunter.callbacks.recipes`
)
experiment = CVExperiment(
model_initializer=XGBClassifier,
model_init_params={},
model_extra_params=dict(fit=dict(verbose=False)),
)
if __name__ == "__main__":
execute()
|
11524018
|
import tomsup as ts
def test_tutorial():
# Get the competitive penny game payoff matrix
penny = ts.PayoffMatrix("penny_competitive")
tom_1 = ts.TOM(level=1)
init_states = tom_1.get_internal_states()
init_states["own_states"]["p_k"] = [0.3, 0.7]
tom_1.set_internal_states(init_states)
# print the changed states
tom_1.print_internal()
|
11524037
|
import pytest
from eth_tester.exceptions import TransactionFailed
from plasma_core.constants import NULL_ADDRESS, NULL_ADDRESS_HEX
from tests_utils.constants import PAYMENT_TX_MAX_INPUT_SIZE
ETH_ADDRESS_HEX = NULL_ADDRESS_HEX
@pytest.mark.parametrize("num_inputs", range(1, PAYMENT_TX_MAX_INPUT_SIZE))
def test_start_in_flight_exit_should_succeed(testlang, num_inputs):
amount = 100
owners = []
deposit_ids = []
for i in range(0, num_inputs):
owners.append(testlang.accounts[i])
deposit_ids.append(testlang.deposit(owners[i], amount))
spend_id = testlang.spend_utxo(deposit_ids, owners, outputs=[(owners[0].address, NULL_ADDRESS, amount)])
testlang.start_in_flight_exit(spend_id)
# Exit was created
in_flight_exit = testlang.get_in_flight_exit(spend_id)
assert in_flight_exit.exit_start_timestamp == testlang.timestamp
assert in_flight_exit.exit_map == 0
assert in_flight_exit.bond_owner == owners[0].address
assert in_flight_exit.oldest_competitor == 0
# Inputs are correctly set
for i in range(0, num_inputs):
input_info = in_flight_exit.get_input(i)
assert input_info.exit_target == owners[i].address
assert input_info.token == ETH_ADDRESS_HEX
assert input_info.amount == amount
# Remaining inputs are still unset
for i in range(num_inputs, PAYMENT_TX_MAX_INPUT_SIZE):
input_info = in_flight_exit.get_input(i)
assert input_info.exit_target == NULL_ADDRESS_HEX
assert input_info.amount == 0
@pytest.mark.parametrize("num_inputs", range(1, PAYMENT_TX_MAX_INPUT_SIZE))
def test_start_in_flight_exit_with_erc20_tokens_should_succeed(testlang, token, num_inputs):
amount = 100
owners = []
deposit_ids = []
for i in range(0, num_inputs):
owners.append(testlang.accounts[i])
deposit_ids.append(testlang.deposit_token(owners[i], token, amount))
spend_id = testlang.spend_utxo(deposit_ids, owners, outputs=[(owners[0].address, token.address, amount)])
testlang.start_in_flight_exit(spend_id)
# Exit was created
in_flight_exit = testlang.get_in_flight_exit(spend_id)
assert in_flight_exit.exit_start_timestamp == testlang.timestamp
assert in_flight_exit.exit_map == 0
assert in_flight_exit.bond_owner == owners[0].address
assert in_flight_exit.oldest_competitor == 0
# Inputs are correctly set
for i in range(0, num_inputs):
input_info = in_flight_exit.get_input(i)
assert input_info.exit_target == owners[i].address
assert input_info.token == token.address
assert input_info.amount == amount
# Remaining inputs are still unset
for i in range(num_inputs, PAYMENT_TX_MAX_INPUT_SIZE):
input_info = in_flight_exit.get_input(i)
assert input_info.exit_target == NULL_ADDRESS_HEX
assert input_info.amount == 0
def test_start_in_flight_exit_with_erc20_token_and_eth_should_succeed(testlang, token):
owner = testlang.accounts[0]
deposit_eth_id = testlang.deposit(owner, 100)
deposit_token_id = testlang.deposit_token(owner, token, 110)
spend_id = testlang.spend_utxo(
[deposit_eth_id, deposit_token_id],
[owner, owner],
[(owner.address, NULL_ADDRESS, 100), (owner.address, token.address, 110)]
)
testlang.start_in_flight_exit(spend_id)
# Exit was created
in_flight_exit = testlang.get_in_flight_exit(spend_id)
assert in_flight_exit.exit_start_timestamp == testlang.timestamp
assert in_flight_exit.exit_map == 0
assert in_flight_exit.bond_owner == owner.address
assert in_flight_exit.oldest_competitor == 0
# Inputs are correctly set
input_info = in_flight_exit.get_input(0)
assert input_info.exit_target == owner.address
assert input_info.token == ETH_ADDRESS_HEX
assert input_info.amount == 100
input_info = in_flight_exit.get_input(1)
assert input_info.exit_target == owner.address
assert input_info.token == token.address
assert input_info.amount == 110
# Remaining inputs are still unset
for i in range(2, PAYMENT_TX_MAX_INPUT_SIZE):
input_info = in_flight_exit.get_input(i)
assert input_info.exit_target == NULL_ADDRESS_HEX
assert input_info.amount == 0
def test_start_in_flight_exit_with_an_output_with_a_token_not_from_inputs_should_fail(testlang, token):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner], [(owner.address, token.address, 100)])
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id)
@pytest.mark.parametrize(
"output_values", [
[501], [500, 1], [100, 100, 301], [100, 100, 100, 201], [100, 100, 100, 100, 101]
]
)
def test_start_in_flight_exit_that_spends_more_than_value_of_inputs_should_fail(testlang, token, output_values):
owner, amount = testlang.accounts[0], 100
outputs = [(owner.address, token.address, value) for value in output_values]
deposits = [testlang.deposit_token(owner, token, amount) for _ in range(PAYMENT_TX_MAX_INPUT_SIZE)]
spend_id = testlang.spend_utxo(deposits, [owner] * PAYMENT_TX_MAX_INPUT_SIZE, outputs, force_invalid=True)
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id)
def test_start_in_flight_exit_invalid_signature_should_fail(testlang, token):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit_token(owner_1, token, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_2], outputs=[(owner_1.address, token.address, amount)], force_invalid=True)
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id)
def test_start_in_flight_exit_invalid_bond_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner], outputs=[(owner.address, NULL_ADDRESS, amount)])
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id, bond=0)
def test_start_in_flight_exit_invalid_spend_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner_2], outputs=[(owner_1.address, NULL_ADDRESS, amount)], force_invalid=True)
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id)
def test_start_in_flight_exit_invalid_proof_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner], outputs=[(owner.address, NULL_ADDRESS, amount)])
proofs = [b'']
(encoded_spend, encoded_inputs, input_pos, _, signatures) = testlang.get_in_flight_exit_info(spend_id)
bond = testlang.root_chain.inFlightExitBond()
with pytest.raises(TransactionFailed):
testlang.root_chain.startInFlightExit(encoded_spend, encoded_inputs, proofs, signatures, input_pos, value=bond)
def test_start_in_flight_exit_twice_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id], [owner], outputs=[(owner.address, NULL_ADDRESS, amount)])
# First time should succeed
testlang.start_in_flight_exit(spend_id)
# Second time should fail
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id)
def test_start_in_flight_exit_invalid_outputs_should_fail(testlang):
owner_1, owner_2, amount = testlang.accounts[0], testlang.accounts[1], 100
deposit_id = testlang.deposit(owner_1, amount)
# Create a transaction with outputs greater than inputs
output = (owner_2.address, NULL_ADDRESS, amount * 2)
spend_id = testlang.spend_utxo([deposit_id], [owner_1], [output], force_invalid=True)
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id)
def test_start_in_flight_exit_spending_the_same_input_twice_should_fail(testlang):
owner, amount = testlang.accounts[0], 100
deposit_id = testlang.deposit(owner, amount)
spend_id = testlang.spend_utxo([deposit_id] * 2,
[owner] * 2,
[(owner.address, NULL_ADDRESS, amount)],
force_invalid=True)
with pytest.raises(TransactionFailed):
testlang.start_in_flight_exit(spend_id)
def test_start_in_flight_exit_with_max_num_different_tokens_should_succeed(testlang, get_contract):
owner, amount, tokens_no = testlang.accounts[0], 100, PAYMENT_TX_MAX_INPUT_SIZE
tokens = [get_contract('ERC20Mintable') for _ in range(tokens_no)]
deposits = [testlang.deposit_token(owner, tokens[i], amount) for i in range(tokens_no)]
outputs = [(owner.address, tokens[i].address, amount) for i in range(PAYMENT_TX_MAX_INPUT_SIZE)]
spend_id = testlang.spend_utxo(deposits, [owner] * tokens_no, outputs)
testlang.start_in_flight_exit(spend_id, sender=owner)
in_flight_exit = testlang.get_in_flight_exit(spend_id)
assert in_flight_exit.bond_owner == owner.address
# TODO: add test_start_in_flight_exit_with_holes_in_inputs_should_fail
|
11524045
|
from btchip.btchip import getDongle # noqa
from binance_chain.ledger.client import LedgerApp # noqa
from binance_chain.ledger.wallet import LedgerWallet # noqa
|
11524051
|
from haystack.file_converter.base import FileTypeClassifier
from haystack.file_converter.docx import DocxToTextConverter
from haystack.file_converter.markdown import MarkdownConverter
from haystack.file_converter.pdf import PDFToTextConverter
from haystack.file_converter.tika import TikaConverter
from haystack.file_converter.txt import TextConverter
from haystack.file_converter.image import ImageToTextConverter
from haystack.file_converter.pdf import PDFToTextOCRConverter
|
11524055
|
from __future__ import with_statement
import numpy
from .._common import to_output
__all__ = [
"read",
"write",
]
header_to_unit = {
"ELEM": "",
"X": "(M)",
"Y": "(M)",
"Z": "(M)",
"PRES": "(PA)",
"P": "(PA)",
"TEMP": "(DEC-C)",
"T": "(DEC-C)",
"PCAP_GL": "(PA)",
"PCAP": "(PA)",
"DEN_G": "(KG/M**3)",
"DG": "(KG/M**3)",
"DEN_L": "(KG/M**3)",
"DW": "(KG/M**3)",
"ELEM1": "",
"ELEM2": "",
"HEAT": "(W)",
"FLOW": "(KG/S)",
"FLOW_G": "(KG/S)",
"FLOW_L": "(KG/S)",
}
def read(filename, file_type, file_format, labels_order):
"""Read OUTPUT_{ELEME, CONNE}.csv."""
with open(filename, "r") as f:
headers, times, variables = _read_csv(f, file_type)
ilab = 1 if file_type == "element" else 2
headers = headers[ilab:]
labels = [[v[:ilab] for v in variable] for variable in variables]
labels = (
[[l[0] for l in label] for label in labels]
if file_type == "element"
else labels
)
variables = numpy.array(
[[v[ilab:] for v in variable] for variable in variables]
)
return to_output(
file_type, file_format, labels_order, headers, times, labels, variables
)
def _read_csv(f, file_type):
"""Read CSV table."""
# Read header
line = f.readline().replace('"', "")
headers = [l.strip() for l in line.split(",")]
# Skip second line (unit)
line = f.readline()
# Check third line (does it start with TIME?)
line = f.readline()
single = not line.startswith('"TIME')
# Read data
if single:
times, variables = [None], [[]]
else:
times, variables = [], []
line = line.replace('"', "").strip()
ilab = 1 if file_type == "element" else 2
while line:
line = line.split(",")
# Time step
if line[0].startswith("TIME"):
line = line[0].split()
times.append(float(line[-1]))
variables.append([])
# Output
else:
tmp = [l.strip() for l in line[:ilab]]
tmp += [float(l.strip()) for l in line[ilab:]]
variables[-1].append(tmp)
line = f.readline().strip().replace('"', "")
return headers, times, variables
def write(filename, output):
"""Write OUTPUT_{ELEME, CONNE}.csv."""
out = output[-1]
headers = ["ELEM"] if out.type == "element" else ["ELEM1", "ELEM2"]
headers += ["X"] if "X" in out.data.keys() else []
headers += ["Y"] if "Y" in out.data.keys() else []
headers += ["Z"] if "Z" in out.data.keys() else []
headers += [k for k in out.data.keys() if k not in {"X", "Y", "Z"}]
with open(filename, "w") as f:
_write_csv(f, output, headers)
def _write_csv(f, output, headers):
"""Write CSV table."""
# Headers
units = [
header_to_unit[header] if header in header_to_unit.keys() else " (-)"
for header in headers
]
f.write(",".join('"{:>18}"'.format(header) for header in headers) + "\n")
f.write(",".join('"{:>18}"'.format(unit) for unit in units) + "\n")
# Data
formats = [
'"{:>18}"' if header.startswith("ELEM") else "{:20.12e}" for header in headers
]
for out in output:
# Time step
f.write('"TIME [sec] {:.8e}"\n'.format(out.time))
# Table
for i, label in enumerate(out.labels):
record = [label] if isinstance(label, str) else [l for l in label]
record += [out.data[k][i] for k in headers if not k.startswith("ELEM")]
record = (
",".join(
fmt.format(rec) if rec is not None else fmt.format(0.0)
for fmt, rec in zip(formats, record)
)
+ "\n"
)
f.write(record)
|
11524069
|
class Context:
def iter_roots(self):
c = self
while c:
if isinstance(c, ContextRoot):
yield c
c = c.get_context_parent()
def get_context_parent(self):
raise NotImplementedError
def get_context(self):
for r in self.iter_roots():
return r
def set_label(self, name, obj):
self.get_context().context[name] = obj
def get_label(self, name):
for r in self.iter_roots():
ret = r.context.get(name)
if ret:
return ret
def del_label(self, name):
if name in self.get_context().context:
del self.context[name]
return True
class ContextRoot(Context):
def __init__(self):
super().__init__()
self.context = {}
def destroy_context(self):
self.context.clear()
|
11524079
|
import unittest
from unittest.mock import MagicMock
from sanic_prometheus.endpoint import fn_by_type, get_from_url
def mk_request(url):
req = MagicMock()
req.path = url
return req
def az_url():
return "/".join('abcdefghijklmnopqrstuvwxyz')
class TestUrlEndpoint(unittest.TestCase):
def test_just_url(self):
fn = fn_by_type('url', None)
url = '/check/this/url'
self.assertEqual(fn(mk_request(url)), url)
def test_url_with_limit(self):
fn = fn_by_type('url:3', None)
self.assertEqual(fn(mk_request('/check/this/awesome/url')),
'/check/this/awesome')
self.assertEqual(fn(mk_request('/check/this')), '/check/this')
def test_url_bad_limit(self):
self.assertRaises(ValueError, fn_by_type, 'url:lakdh', None)
def test_get_endpoint_fn_argument_is_ignored(self):
should_be_ignored = lambda x: x
self.assertNotEqual(fn_by_type('url', should_be_ignored),
should_be_ignored)
class TestGetFromUrl(unittest.TestCase):
def test_no_limit_set_by_default(self):
test_url = az_url()
self.assertEqual(get_from_url(mk_request(test_url)), test_url)
def test_zero_or_negative_limit_eq_default_behaviour(self):
test_url = az_url()
self.assertEqual(get_from_url(mk_request(test_url), lim=0), test_url)
def test_lim_works_as_expected(self):
self.assertEqual(get_from_url(mk_request('/a'), lim=1), '/a')
self.assertEqual(get_from_url(mk_request('/a/b/c'), lim=1), '/a')
self.assertEqual(get_from_url(mk_request('/a'), lim=2), '/a')
self.assertEqual(get_from_url(mk_request('/a/'), lim=1), '/a')
|
11524082
|
import torch
import numpy as np
import copy
import shelve
import collections
from collections import OrderedDict
import matplotlib as mpl
import matplotlib.pyplot as plt
import random
import math
import os
import enum
import yaml
from termcolor import colored
from . import Distribution
from .. import util
class EmpiricalType(enum.Enum):
MEMORY = 1
FILE = 2
CONCAT_MEMORY = 3
CONCAT_FILE = 4
class Empirical(Distribution):
def __init__(self, values=None, log_weights=None, weights=None, file_name=None, file_read_only=False, file_sync_timeout=25, file_writeback=False, concat_empiricals=None, concat_empirical_file_names=None, name='Empirical'):
super().__init__(name)
self._finalized = False
self._read_only = file_read_only
if self._read_only:
if not os.path.exists(file_name):
raise ValueError('File not found: {}'.format(file_name))
shelf_flag = 'r'
else:
shelf_flag = 'c'
self._file_name = file_name
self._closed = False
self._categorical = None
self._log_weights = []
self._length = 0
self._uniform_weights = False
self._type = None
self._metadata = OrderedDict()
if concat_empiricals is not None or concat_empirical_file_names is not None:
if concat_empiricals is not None:
if type(concat_empiricals) == list:
if type(concat_empiricals[0]) == Empirical:
self._concat_empiricals = concat_empiricals
else:
raise TypeError('Expecting concat_empiricals to be a list of Empiricals.')
else:
raise TypeError('Expecting concat_empiricals to be a list of Empiricals.')
else:
if type(concat_empirical_file_names) == list:
if type(concat_empirical_file_names[0]) == str:
concat_empirical_file_names = list(map(os.path.abspath, concat_empirical_file_names))
self._concat_empiricals = [Empirical(file_name=f, file_read_only=True) for f in concat_empirical_file_names]
else:
raise TypeError('Expecting concat_empirical_file_names to be a list of file names.')
else:
raise TypeError('Expecting concat_empirical_file_names to be a list of file names.')
self._concat_cum_sizes = np.cumsum([emp.length for emp in self._concat_empiricals])
self._length = self._concat_cum_sizes[-1]
self._log_weights = torch.cat([util.to_tensor(emp._log_weights) for emp in self._concat_empiricals])
self._categorical = torch.distributions.Categorical(logits=util.to_tensor(self._log_weights, dtype=torch.float64))
weights = self._categorical.probs
self._effective_sample_size = 1. / weights.pow(2).sum()
name = 'Concatenated empirical, length: {:,}, ESS: {:,.2f}'.format(self._length, self._effective_sample_size)
# self._metadata.append('Begin concatenate empiricals ({})'.format(len(self._concat_empiricals)))
# for i, emp in enumerate(self._concat_empiricals):
# self._metadata.append('Begin source empirical ({}/{})'.format(i+1, len(self._concat_empiricals)))
# self._metadata.extend(emp._metadata)
# self._metadata.append('End source empirical ({}/{})'.format(i+1, len(self._concat_empiricals)))
# self._metadata.append('End concatenate empiricals ({})'.format(len(self._concat_empiricals)))
self.add_metadata(op='concat', num_empiricals=len(self._concat_empiricals), metadata_empiricals=[emp._metadata for emp in self._concat_empiricals])
self.rename(name)
self._finalized = True
self._read_only = True
if file_name is None:
self._type = EmpiricalType.CONCAT_MEMORY
else:
if concat_empirical_file_names is None:
raise ValueError('Expecting concat_empirical_file_names to write a concatenated empirical file.')
if shelf_flag == 'r':
raise RuntimeError('Empirical file already exists, cannot write new concatenated Empirical: {}'.format(self._file_name))
else:
self._type = EmpiricalType.CONCAT_FILE
self._shelf = shelve.open(self._file_name, flag=shelf_flag, writeback=False)
self._shelf['concat_empirical_file_names'] = concat_empirical_file_names
self._shelf['name'] = self.name
self._shelf['metadata'] = self._metadata
self._shelf.close()
else:
if file_name is None:
self._type = EmpiricalType.MEMORY
self._values = []
else:
self._shelf = shelve.open(self._file_name, flag=shelf_flag, writeback=file_writeback)
if 'concat_empirical_file_names' in self._shelf:
self._type = EmpiricalType.CONCAT_FILE
concat_empirical_file_names = self._shelf['concat_empirical_file_names']
self._concat_empiricals = [Empirical(file_name=f, file_read_only=True) for f in concat_empirical_file_names]
self._concat_cum_sizes = np.cumsum([emp.length for emp in self._concat_empiricals])
self._length = self._concat_cum_sizes[-1]
self._log_weights = torch.cat([util.to_tensor(emp._log_weights) for emp in self._concat_empiricals])
self._categorical = torch.distributions.Categorical(logits=util.to_tensor(self._log_weights, dtype=torch.float64))
self.name = self._shelf['name']
if 'metadata' in self._shelf:
self._metadata = self._shelf['metadata']
self._finalized = True
self._read_only = True
else:
self._type = EmpiricalType.FILE
if 'name' in self._shelf:
self.name = self._shelf['name']
if 'metadata' in self._shelf:
self._metadata = self._shelf['metadata']
if 'log_weights' in self._shelf:
self._log_weights = self._shelf['log_weights']
self._file_last_key = self._shelf['last_key']
self._length = len(self._log_weights)
else:
self._file_last_key = -1
self._file_sync_timeout = file_sync_timeout
self._file_sync_countdown = self._file_sync_timeout
self.finalize()
self._mean = None
self._variance = None
self._mode = None
self._min = None
self._max = None
self._effective_sample_size = None
self.add_metadata(name=self.name)
if values is not None:
if len(values) > 0:
self.add_sequence(values, log_weights, weights)
self.finalize()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if not self._closed:
self.close()
def __del__(self):
if not self._closed:
self.close()
def __len__(self):
return self._length
@property
def length(self):
return self._length
@property
def metadata(self):
return self._metadata
def add_metadata(self, **kwargs):
self._metadata['{}'.format(len(self._metadata))] = kwargs
def close(self):
if self._type == EmpiricalType.FILE:
self.finalize()
if not self._closed:
self._shelf.close()
self._closed = True
def copy(self, file_name=None):
self._check_finalized()
if self._type == EmpiricalType.FILE:
if file_name is None:
status = 'Copy Empirical(file_name: {}) to Empirical(memory)'.format(self._file_name)
print(status)
ret = Empirical(values=self.get_values(), log_weights=self._log_weights, name=self.name)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='copy', source='Empirical(file_name: {})'.format(self._file_name), target='Empirical(memory)')
return ret
else:
status = 'Copy Empirical(file_name: {}) to Empirical(file_name: {})'.format(self._file_name, file_name)
print(status)
ret = Empirical(file_name=file_name, name=self.name)
for i in range(self._length):
ret.add(value=self._get_value(i), log_weight=self._log_weights[i])
ret.finalize()
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='copy', source='Empirical(file_name: {})'.format(self._file_name), target='Empirical(file_name: {})'.format(file_name))
return ret
elif self._type == EmpiricalType.MEMORY:
if file_name is None:
status = 'Copy Empirical(memory) to Empirical(memory)'
print(status)
ret = copy.copy(self)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='copy', source='Empirical(memory)', target='Empirical(memory)')
return ret
else:
status = 'Copy Empirical(memory) to Empirical(file_name: {})'.format(file_name)
print(status)
ret = Empirical(values=self._values, log_weights=self._log_weights, file_name=file_name, name=self.name)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='copy', source='Empirical(memory)', target='Empirical(file_name: {})'.format(file_name))
return ret
else:
raise NotImplementedError('Not implemented for type: {}'.format(str(self._type)))
def finalize(self):
self._length = len(self._log_weights)
self._categorical = torch.distributions.Categorical(logits=util.to_tensor(self._log_weights, dtype=torch.float64))
self.add_metadata(op='finalize', length=self._length)
if self._length > 0:
self._uniform_weights = torch.eq(self._categorical.logits, self._categorical.logits[0]).all()
else:
self._uniform_weights = False
if self._type == EmpiricalType.FILE and not self._read_only:
self._shelf['name'] = self.name
self._shelf['metadata'] = self._metadata
self._shelf['log_weights'] = self._log_weights
self._shelf['last_key'] = self._file_last_key
self._shelf.sync()
self._finalized = True
def _check_finalized(self):
if not self._finalized:
raise RuntimeError('Empirical not finalized. Call finalize first.')
def add(self, value, log_weight=None, weight=None):
if self._read_only:
raise RuntimeError('Empirical is read-only.')
self._finalized = False
self._mean = None
self._variance = None
self._mode = None
self._min = None
self._max = None
self._effective_sample_size = None
if log_weight is not None:
self._log_weights.append(util.to_tensor(log_weight))
elif weight is not None:
self._log_weights.append(torch.log(util.to_tensor(weight)))
else:
self._log_weights.append(util.to_tensor(0.))
if self._type == EmpiricalType.FILE:
self._file_last_key += 1
self._shelf[str(self._file_last_key)] = value
self._file_sync_countdown -= 1
if self._file_sync_countdown == 0:
self.finalize()
self._file_sync_countdown = self._file_sync_timeout
else:
self._values.append(value)
def add_sequence(self, values, log_weights=None, weights=None):
if self._read_only:
raise RuntimeError('Empirical is read-only.')
if log_weights is not None:
for i in range(len(values)):
self.add(values[i], log_weight=log_weights[i])
elif weights is not None:
for i in range(len(values)):
self.add(values[i], weight=weights[i])
else:
for i in range(len(values)):
self.add(values[i])
def rename(self, name):
self.add_metadata(op='rename', name=name)
self.name = name
if self._type == EmpiricalType.FILE:
self._shelf['name'] = self.name
return self
def _get_value(self, index):
if self._type == EmpiricalType.MEMORY:
return self._values[index]
elif self._type == EmpiricalType.FILE:
if index < 0:
index = self._length + index
return self._shelf[str(index)]
else: # CONCAT_MEMORY or CONCAT_FILE
emp_index = self._concat_cum_sizes.searchsorted(index, 'right')
if emp_index > 0:
index = index - self._concat_cum_sizes[emp_index - 1]
return self._concat_empiricals[emp_index]._get_value(index)
def _get_log_weight(self, index):
return self._categorical.logits[index]
def _get_weight(self, index):
return self._categorical.probs[index]
def get_values(self):
self._check_finalized()
if self._type == EmpiricalType.MEMORY:
return self._values
elif self._type == EmpiricalType.FILE:
return [self._shelf[str(i)] for i in range(self._length)]
else:
raise NotImplementedError('Not implemented for type: {}'.format(str(self._type)))
def sample(self, min_index=None, max_index=None):
self._check_finalized()
if self._uniform_weights:
if min_index is None:
min_index = 0
if max_index is None:
max_index = self._length - 1
index = random.randint(min_index, max_index)
else:
if min_index is not None or max_index is not None:
raise NotImplementedError('Sample with min_index and/or max_index not implemented for Empirical with non-uniform weights.')
index = int(self._categorical.sample())
return self._get_value(index)
def __iter__(self):
self._check_finalized()
for i in range(self._length):
yield self._get_value(i)
def __getitem__(self, index):
self._check_finalized()
if isinstance(index, slice):
if self._type == EmpiricalType.MEMORY:
ret = Empirical(values=self._values[index], log_weights=self._log_weights[index], name=self.name)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='slice', index='{}'.format(index))
return ret
else:
raise NotImplementedError('Not implemented for type: {}'.format(str(self._type)))
else:
return self._get_value(index)
def expectation(self, func):
self._check_finalized()
ret = 0.
if self._type == EmpiricalType.MEMORY:
if self._uniform_weights:
ret = sum(map(func, self._values)) / self._length
else:
for i in range(self._length):
ret += util.to_tensor(func(self._values[i]), dtype=torch.float64) * self._categorical.probs[i]
elif self._type == EmpiricalType.FILE:
for i in range(self._length):
ret += util.to_tensor(func(self._shelf[str(i)]), dtype=torch.float64) * self._categorical.probs[i]
else: # CONCAT_MEMORY or CONCAT_FILE
for i in range(self._length):
ret += util.to_tensor(func(self._get_value(i)), dtype=torch.float64) * self._categorical.probs[i]
return util.to_tensor(ret)
def map(self, func, *args, **kwargs):
self._check_finalized()
values = []
for i in range(self._length):
values.append(func(self._get_value(i)))
ret = Empirical(values=values, log_weights=self._log_weights, name=self.name, *args, **kwargs)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='map', length=len(self), func=util.get_source(func))
return ret
def filter(self, func, *args, **kwargs):
self._check_finalized()
if self.length == 0:
return self
filtered_values = []
filtered_log_weights = []
for i in range(self._length):
value = self._get_value(i)
if func(value):
filtered_values.append(value)
filtered_log_weights.append(self._get_log_weight(i))
ret = Empirical(filtered_values, log_weights=filtered_log_weights, name=self.name, *args, **kwargs)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='filter', length=len(self), length_after=len(filtered_values), func=util.get_source(func))
return ret
def resample(self, num_samples, map_func=None, min_index=None, max_index=None, *args, **kwargs):
self._check_finalized()
# TODO: improve this with a better resampling algorithm
if map_func is None:
map_func = lambda x: x
if min_index is None:
min_index = 0
if max_index is None:
max_index = self.length
values = []
ess_before_resample = float(self.effective_sample_size)
status = 'Resample, num_samples: {}, min_index: {}, max_index: {}, ess_before_resample: {}'.format(num_samples, min_index, max_index, ess_before_resample)
util.progress_bar_init(status, num_samples, 'Samples')
for i in range(num_samples):
util.progress_bar_update(i)
values.append(map_func(self.sample(min_index=None, max_index=None)))
util.progress_bar_end()
ret = Empirical(values=values, name=self.name, *args, **kwargs)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='resample', length=len(self), num_samples=int(num_samples), min_index=int(min_index), max_index=int(max_index), ess_before=ess_before_resample)
return ret
def thin(self, num_samples, map_func=None, min_index=None, max_index=None, *args, **kwargs):
self._check_finalized()
if map_func is None:
map_func = lambda x: x
if min_index is None:
min_index = 0
if max_index is None:
max_index = self.length
step = max(1, math.floor((max_index - min_index) / num_samples))
indices = range(min_index, max_index, step)
values = []
log_weights = []
status = 'Thin, num_samples: {}, step: {}, min_index: {}, max_index: {}'.format(num_samples, step, min_index, max_index)
util.progress_bar_init(status, len(indices), 'Samples')
for i in range(len(indices)):
util.progress_bar_update(i)
values.append(map_func(self._get_value(indices[i])))
log_weights.append(self._get_log_weight(indices[i]))
util.progress_bar_end()
ret = Empirical(values=values, log_weights=log_weights, name=self.name, *args, **kwargs)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='thin', length=len(self), num_samples=int(num_samples), step=int(step), min_index=int(min_index), max_index=int(max_index))
return ret
@property
def mean(self):
if self._mean is None:
self._mean = self.expectation(lambda x: x)
return self._mean
@property
def variance(self):
if self._variance is None:
mean = self.mean
self._variance = self.expectation(lambda x: (x - mean)**2)
return self._variance
@property
def mode(self):
self._check_finalized()
if self._mode is None:
if self._uniform_weights:
counts = {}
util.progress_bar_init('Computing mode...', self._length, 'Values')
print(colored('Warning: weights are uniform and mode is correct only if values in Empirical are hashable', 'red', attrs=['bold']))
for i in range(self._length):
util.progress_bar_update(i)
value = self._get_value(i)
if value in counts:
counts[value] += 1
else:
counts[value] = 1
util.progress_bar_end()
self._mode = sorted(counts.items(), key=lambda x: x[1], reverse=True)[0][0]
else:
_, max_index = util.to_tensor(self._log_weights).max(-1)
self._mode = self._get_value(int(max_index))
return self._mode
def arg_max(self, map_func):
self._check_finalized()
max_val = map_func(self._get_value(0))
max_i = 0
util.progress_bar_init('Computing arg_max...', self._length, 'Values')
for i in range(self._length):
util.progress_bar_update(i)
val = map_func(self._get_value(i))
if val >= max_val:
max_val = val
max_i = i
util.progress_bar_end()
return self._get_value(max_i)
def arg_min(self, map_func):
self._check_finalized()
min_val = map_func(self._get_value(0))
min_i = 0
util.progress_bar_init('Computing arg_min...', self._length, 'Values')
for i in range(self._length):
util.progress_bar_update(i)
val = map_func(self._get_value(i))
if val <= min_val:
min_val = val
min_i = i
util.progress_bar_end()
return self._get_value(min_i)
@property
def effective_sample_size(self):
self._check_finalized()
if self._effective_sample_size is None:
weights = self._categorical.probs
self._effective_sample_size = 1. / weights.pow(2).sum()
# log_weights = self._categorical.logits
# self._effective_sample_size = torch.exp(2. * torch.logsumexp(log_weights, dim=0) - torch.logsumexp(2. * log_weights, dim=0))
return self._effective_sample_size
def unweighted(self, *args, **kwargs):
self._check_finalized()
ret = Empirical(values=self.get_values(), name=self.name, *args, **kwargs)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='discard_weights')
return ret
def _find_min_max(self):
try:
sorted_values = sorted(map(float, self.get_values()))
self._min = sorted_values[0]
self._max = sorted_values[-1]
except:
raise RuntimeError('Cannot compute the minimum and maximum of values in this Empirical. Make sure the distribution is over values that are scalar or castable to scalar, e.g., a PyTorch tensor of one element.')
@property
def min(self):
if self._min is None:
self._find_min_max()
return self._min
@property
def max(self):
if self._max is None:
self._find_min_max()
return self._max
def combine_duplicates(self, *args, **kwargs):
self._check_finalized()
if self._type == EmpiricalType.MEMORY:
distribution = collections.defaultdict(float)
# This can be simplified once PyTorch supports content-based hashing of tensors. See: https://github.com/pytorch/pytorch/issues/2569
hashable = util.is_hashable(self._values[0])
if hashable:
for i in range(self.length):
found = False
for key, value in distribution.items():
if torch.equal(util.to_tensor(key), util.to_tensor(self._values[i])):
# Differentiability warning: values[i] is discarded here. If we need to differentiate through all values, the gradients of values[i] and key should be tied here.
distribution[key] = torch.logsumexp(torch.stack((value, self._log_weights[i])), dim=0)
found = True
if not found:
distribution[self._values[i]] = self._log_weights[i]
values = list(distribution.keys())
log_weights = list(distribution.values())
ret = Empirical(values=values, log_weights=log_weights, name=self.name, *args, **kwargs)
ret._metadata = copy.deepcopy(self._metadata)
ret.add_metadata(op='combine_duplicates')
return ret
else:
raise RuntimeError('The values in this Empirical as not hashable. Combining of duplicates not currently supported.')
else:
raise NotImplementedError('Not implemented for type: {}'.format(str(self._type)))
# Deprecated in favor of concat_empiricals in constructor
# @staticmethod
# def combine(empirical_distributions, file_name=None):
# empirical_type = empirical_distributions[0]._type
# for dist in empirical_distributions:
# if dist._type != empirical_type:
# raise RuntimeError('Expecting all Empirical distributions to be of the same type. Encountered: {} and {}'.format(empirical_type, dist._type))
# if not isinstance(dist, Empirical):
# raise TypeError('Combination is only supported between Empirical distributions.')
#
# if empirical_type == EmpiricalType.FILE:
# if file_name is None:
# raise RuntimeError('Expecting a target file_name for the combined Empirical.')
# ret = Empirical(file_name=file_name)
# for dist in empirical_distributions:
# for i in range(dist._length):
# ret.add(value=dist._shelf[str(i)], log_weight=dist._log_weights[i])
# ret.finalize()
# return ret
# elif empirical_type == EmpiricalType.MEMORY:
# values = []
# log_weights = []
# length = empirical_distributions[0].length
# for dist in empirical_distributions:
# if dist.length != length:
# raise RuntimeError('Combination is only supported between Empirical distributions of equal length.')
# values += dist._values
# log_weights += dist._log_weights
# return Empirical(values=values, log_weights=log_weights, file_name=file_name)
# else:
# raise NotImplementedError('Not implemented for type: {}'.format(str(empirical_type)))
def values_numpy(self):
self._check_finalized()
try: # This can fail in the case values are an iterable collection of non-numeric types (strings, etc.)
return torch.stack(self.get_values()).cpu().numpy()
except:
try:
return np.array(self.get_values())
except:
raise RuntimeError('Cannot convert values to numpy.')
def weights_numpy(self):
self._check_finalized()
return util.to_numpy(self._categorical.probs)
def log_weights_numpy(self):
self._check_finalized()
return util.to_numpy(self._categorical.logits)
def plot_histogram(self, figsize=(10, 5), xlabel=None, ylabel='Frequency', xticks=None, yticks=None, log_xscale=False, log_yscale=False, file_name=None, show=True, density=1, fig=None, *args, **kwargs):
if fig is None:
if not show:
mpl.rcParams['axes.unicode_minus'] = False
plt.switch_backend('agg')
fig = plt.figure(figsize=figsize)
fig.tight_layout()
values = self.values_numpy()
weights = self.weights_numpy()
plt.hist(values, weights=weights, density=density, *args, **kwargs)
if log_xscale:
plt.xscale('log')
if log_yscale:
plt.yscale('log', nonposy='clip')
if xticks is not None:
plt.xticks(xticks)
if yticks is not None:
plt.xticks(yticks)
if xlabel is None:
xlabel = self.name
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if file_name is not None:
plt.savefig(file_name)
if show:
plt.show()
def save_metadata(self, file_name):
with open(file_name, 'w') as file:
file.write(yaml.dump(self._metadata))
|
11524102
|
import os
import numpy as np
import cv2
import math
def DCmakedir(path):
if os.path.exists(path):
return
else:
os.mkdir(path)
def imageRotate(img,theta):
rows,cols = img.shape[0:2]
angle = -theta*math.pi/180
a = math.sin(angle)
b = math.cos(angle)
width = int(cols * math.fabs(b) + rows * math.fabs(a))
heigth = int(rows * math.fabs(b) + cols * math.fabs(a))
M = cv2.getRotationMatrix2D((width/2,heigth/2),theta,1)
rot_move = np.dot(M,np.array([(width-cols)*0.5,(heigth-rows)*0.5,0]))
M[0,2] += rot_move[0]
M[1, 2] += rot_move[1]
imgout_xuanzhuan = cv2.warpAffine(img,M,(width,heigth),2,0,0)
return imgout_xuanzhuan
def rotate_bound(image, angle):
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def perspective_trans(img):
y1,x1 = img.shape[0:2]
pts1 = np.float32([[0,0],[x1,0],[0,y1],[x1,y1]])
x2_1 = np.random.randint(0, int(x1 / 6))
y2_1 = np.random.randint(0, int(y1 / 6))
x2_2 = np.random.randint(0, int(x1 / 6))
y2_2 = np.random.randint(0, int(y1 / 6))
x2_3 = np.random.randint(0, int(x1 / 6))
y2_3 = np.random.randint(0, int(y1 / 6))
x2_4 = np.random.randint(0, int(x1 / 6))
y2_4 = np.random.randint(0, int(y1 / 6))
pts2 = np.float32([[x2_1, y2_1], [x1-x2_2, y2_2],
[x2_3, y1-y2_3], [x1-x2_4, y1-y2_4]])
MM = cv2.getPerspectiveTransform(pts1,pts2)
#print(MM)
imgout_p = cv2.warpPerspective(img,MM,(x1,y1))#,cv2.INTER_LINEAR,0,0
imgout = imgout_p[min(y2_1,y2_2):y1 -min(y2_3,y2_4),min(x2_1,x2_3):x1-min(x2_2,x2_4)]
imgout = cv2.resize(imgout,(y1,x1))
return imgout
def online_data_generate(fgpath,bgpath,fgImglist,bgImglist,fgImgNum,bgImgNum):
returnImg = False
while(not returnImg):
randomfgIndex = np.random.randint(0,fgImgNum)
randombgIndex = np.random.randint(0,bgImgNum)
fgImg = cv2.imdecode(np.fromfile(os.path.join(fgpath,fgImglist[randomfgIndex]), dtype=np.uint8), -1)
#cv2.imread(os.path.join(fgpath,fgImglist[randomfgIndex]),cv2.IMREAD_COLOR)
bgImg = cv2.imdecode(np.fromfile(os.path.join(bgpath,bgImglist[randombgIndex]), dtype=np.uint8), -1)
#cv2.imread(os.path.join(bgpath,bgImglist[randombgIndex]),cv2.IMREAD_COLOR)
try:
fgImg.shape
bgImg.shape
#print("fgImg.shape",fgImg.shape)
#print("bgImg.shape",bgImg.shape)
except:
continue
try:
if (bgImg.shape[0]<520 or bgImg.shape[1]<520):
#print("bgImg:",bgImg.shape)
if(bgImg.shape[0]< bgImg.shape[1]) :
#print("fx=800./bgImg.shape[0], fy=800./bgImg.shape[0]",800./bgImg.shape[0], 800./bgImg.shape[0])
bgImg = cv2.resize(bgImg,(0, 0), fx=800./bgImg.shape[0], fy=800./bgImg.shape[0])
else:
# print("fx=800./bgImg.shape[1], fy=800./bgImg.shape[1]",800./bgImg.shape[1], 800./bgImg.shape[1])
bgImg = cv2.resize(bgImg,(0, 0), fx=800./bgImg.shape[1], fy=800./bgImg.shape[1])
#print(bgImg.shape)
bg_cut_r = np.random.randint(0,int(bgImg.shape[1]-512))
bg_cut_c = np.random.randint(0,int(bgImg.shape[0]-512))
bgImg = bgImg[bg_cut_c:bg_cut_c+512,bg_cut_r:bg_cut_r+512,:]
M = np.ones(fgImg.shape,dtype="uint8")*5
fgImg = cv2.add(fgImg,M)
need_per = np.random.randint(1,5)
if need_per%2==0:
#print("perspective_trans fgImg.shape",fgImg.shape)
img = perspective_trans(fgImg)
else:
img = fgImg
need_rotata = np.random.randint(1,5)
if need_rotata%2==0:
theta = np.random.randint(1,30)
img = rotate_bound(img,theta)
else:
theta = 1
img = rotate_bound(img,theta)
fgcols,fgrows = img.shape[0:2]
#print("fgrows,fgcols",fgrows,fgcols)
bgcols,bgrows= bgImg.shape[0:2]
#print("bgrows,bgcols",bgrows,bgcols)
fgLongSideRatio = 0.7+(1.0-0.7)*np.random.random()
if float(fgrows)/bgrows >float(fgcols)/bgcols:
#print("fgrows/bgrows ,fgcols/bgcols)",float(fgrows)/bgrows ,float(fgcols)/bgcols)
fg_r = int(bgrows*fgLongSideRatio)
fg_c = int(float(fg_r)/fgrows *fgcols)
else:
#print("fgrows/bgrows ,fgcols/bgcols)",float(fgrows)/bgrows ,float(fgcols)/bgcols)
fg_c = int(bgcols*fgLongSideRatio)
fg_r = int(float(fg_c)/fgcols *fgrows)
#print("fg_r,fg_c",fg_r,fg_c)
fgimg = cv2.resize(img,(fg_r,fg_c))
gray = cv2.cvtColor(fgimg,cv2.COLOR_BGR2GRAY)
_,mask1 = cv2.threshold(gray,1,255,cv2.THRESH_BINARY)
mask1[0:8,:] = 0
mask1[:,0:8] = 0
mask1[mask1.shape[0]-9:mask1.shape[0],:] = 0
mask1[:,mask1.shape[1]-9:mask1.shape[1]] = 0
kernel = np.ones((5,5),np.uint8)
mask = cv2.erode(mask1,kernel,iterations = 3)
edge = cv2.absdiff(mask,mask1)
mask_inv = cv2.bitwise_not(mask)
bord_r = np.random.randint(0,bgrows-fg_r)
bord_c = np.random.randint(0,bgcols-fg_c)
bord_r2 = bord_r+fg_r
bord_c2 = bord_c+fg_c
roi = bgImg[bord_c:bord_c2, bord_r:bord_r2]
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
img2_fg = cv2.bitwise_and(fgimg,fgimg,mask = mask)
dst = cv2.add(img1_bg,img2_fg)
edgeOut = np.zeros(bgImg.shape[0:2],dtype="uint8")
edgeOut[bord_c:bord_c2,bord_r:bord_r2 ]= edge
bgImg[bord_c:bord_c2,bord_r:bord_r2 ]= dst
if bgImg.shape[2]!=3:
continue
returnImg = True
except:
continue
return bgImg,edgeOut
# cv2.imwrite(os.path.join(saveImgpath,"img_"+str(index))+".jpg",bgImg)
# edgeOut = np.zeros(bgImg.shape[0:2],dtype="uint8")
# edgeOut[bord_c:bord_c2,bord_r:bord_r2 ]= edge
# cv2.imwrite(os.path.join(saveEdgepath,"img_"+str(index)+".bmp"),edgeOut)
# print("image",index,"is saved to",saveImgpath+"/img_"+str(index)+".bmp")
|
11524105
|
import unittest
from rastervision.pipeline.utils import split_into_groups
class TestUtils(unittest.TestCase):
def test_split_into_groups(self):
lst = [1, 2, 3, 4, 5, 6]
g1 = split_into_groups(lst[:5], 3)
self.assertEqual(g1, [[1, 2], [3, 4], [5]])
g2 = split_into_groups(lst, 7)
self.assertEqual(g2, [[1], [2], [3], [4], [5], [6]])
g3 = split_into_groups(lst[0:1], 7)
self.assertEqual(g3, [[1]])
g4 = split_into_groups(lst, 3)
self.assertEqual(g4, [[1, 2], [3, 4], [5, 6]])
if __name__ == '__main__':
unittest.main()
|
11524112
|
from flask import g, request, jsonify
from flask_restx import Resource, fields
from pyinfraboxutils.ibflask import OK
from pyinfraboxutils.ibrestplus import api
cluster_setting_model = api.model('ClusterSetting', {
'name': fields.String(required=True),
'enabled': fields.Boolean(required=True),
})
@api.route('/api/v1/admin/clusters', doc=False)
class Clusters(Resource):
def get(self):
clusters = g.db.execute_many_dict('''
SELECT name, active, enabled, last_update::text, last_active::text
FROM "cluster"
ORDER BY name
''')
return jsonify(clusters)
@api.expect(cluster_setting_model, validate=True)
def post(self):
body = request.get_json()
g.db.execute('''
UPDATE cluster
SET enabled=%s
WHERE name=%s
''', [body['enabled'], body['name']])
g.db.commit()
return OK("OK")
|
11524114
|
from collections import namedtuple
import enum
import unicodedata
class Align(enum.Enum):
LEFT = 0
CENTER = 1
RIGHT = 2
# from https://bugs.python.org/msg145523
W = {
'F': 2, # full-width, width 2, compatibility character for a narrow char
'H': 1, # half-width, width 1, compatibility character for a narrow char
'W': 2, # wide, width 2
'Na': 1, # narrow, width 1
'A': 1, # ambiguous; width 2 in Asian context, width 1 in non-Asian context
'N': 1, #neutral; not used in Asian text, so has no width. Practically, width can be considered as 1
}
def get_width(s):
# TODO:
# - Handle embedded attributes
return sum(W[unicodedata.east_asian_width(ch)] for ch in s)
def crop(s, width):
'''Crop a string to a certain visual length.'''
# TODO:
# - Handle embedded attributes
# - Ellipsis … ?
o = 0
l = 0
for ch in s:
w = W[unicodedata.east_asian_width(ch)]
if l + w > width:
break
l += w
o += 1
return (s[0:o], l)
def pad(s, width, align):
# TODO:
# - Handle embedded attributes
s, swidth = crop(s, width)
padding = width - swidth
if align == Align.LEFT:
return s + (' ' * padding)
elif align == Align.RIGHT:
return (' ' * padding) + s
elif align == Align.CENTER:
return (' ' * (padding // 2)) + s + (' ' * ((padding + 1) // 2))
class Column:
def __init__(self, title, width, align=Align.LEFT):
self.title = title
self.width = width
self.align = align
ColumnInfo = namedtuple('ColumnInfo', ['x', 'width'])
class TablePrinter:
def __init__(self, out, attr, columns):
self.out = out
self.attr = attr
self.columns = columns
def format_row(self, rec):
return ' '.join((entry[0] + pad(str(entry[1]), col.width, col.align) + self.attr.close(entry[0]) for entry, col in zip(rec, self.columns)))
def print_row(self, rec):
self.out.write(f'{self.format_row(rec)}\n')
def print_header(self, hdr_attr):
titles = [(hdr_attr, t.title) for t in self.columns]
self.out.write(f'{self.format_row(titles)}\n')
def column_info(self, idx):
x = 0
for i in range(0, idx):
x += self.columns[i].width + 1
return ColumnInfo(x=x, width=self.columns[idx].width)
|
11524198
|
import torch.nn as nn
from torch.nn import init
class c3d(nn.Module):
def __init__(self,num_class,init_weights=True):
super(c3d, self).__init__()
self.conv1a = nn.Conv3d(3, 64, kernel_size=3, padding=1)
self.conv2a = nn.Conv3d(64, 128, kernel_size=3, padding=1)
self.conv3a = nn.Conv3d(128, 256, kernel_size=3, padding=1)
self.conv3b = nn.Conv3d(256, 256, kernel_size=3, padding=1)
self.conv4a = nn.Conv3d(256, 512, kernel_size=3, padding=1)
self.conv4b = nn.Conv3d(512, 512, kernel_size=3, padding=1)
self.conv5a = nn.Conv3d(512, 512, kernel_size=3, padding=1)
self.conv5b = nn.Conv3d(512, 512, kernel_size=3, padding=1)
self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))
self.pool2 = nn.MaxPool3d(kernel_size=2, stride=2)
self.pool3 = nn.MaxPool3d(kernel_size=2, stride=2)
self.pool4 = nn.MaxPool3d(kernel_size=2, stride=2)
self.pool5 = nn.MaxPool3d(kernel_size=2, stride=2)#, padding=(0, 1, 1)
self.fc6 = nn.Linear(4608, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.out = nn.Linear(4096, num_class)
self.relu = nn.ReLU()
self.softmax = nn.Softmax()
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.conv1a(x)
x = self.relu(x)
x = self.pool1(x)
x = self.conv2a(x)
x = self.relu(x)
x = self.pool2(x)
x = self.conv3a(x)
x = self.relu(x)
x = self.conv3b(x)
x = self.relu(x)
x = self.pool3(x)
x = self.conv4a(x)
x = self.relu(x)
x = self.conv4b(x)
x = self.relu(x)
x = self.pool4(x)
x = self.conv5a(x)
x = self.relu(x)
x = self.conv5b(x)
x = self.relu(x)
x = self.pool5(x)
x = x.view(x.size(0), -1)
x = self.fc6(x)
x = self.relu(x)
x = self.fc7(x)
x = self.relu(x)
res = self.out(x)
# if you use CrossEntropyLoss, you don't need to add softmax in network
# res = self.softmax(x)
return res
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv3d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias,0)
elif isinstance(m, nn.Linear):
init.xavier_uniform_(m.weight)
init.constant_(m.bias, 0)
|
11524201
|
import random
def tamper(payload, **kwargs):
possible_spaces = [2, 3, 4]
retval = ""
encoder = "/**/"
for char in retval:
if char == " ":
retval += encoder * random.choice(possible_spaces)
else:
retval += char
return retval
|
11524210
|
from unittest import TestCase
from excel4lib.macro import *
from excel4lib.lang import *
class TestExcel4Instruction(TestCase):
def test_excel4instruction_str(self):
formula = Excel4Formula(1, 2, "test", "test", 1, 2, 3)
self.assertEqual(str(formula), '=test("test",1,2,3)', 'Should be =test("test",1,2,3)')
def test_excel4instruction_args(self):
argument = Excel4FormulaArgument("ISNUMBER", 1)
formula = Excel4Formula(1, 2, "test", "test", 1, 2, argument)
self.assertEqual(str(formula), '=test("test",1,2,ISNUMBER(1))', 'Should be =test("test",1,2,ISNUMBER(1))')
def test_excel4instruction_if(self):
argument = Excel4FormulaArgument("ISNUMBER", 1)
formula = Excel4ConditionFormula(1, 1, "IF", Excel4LogicalTest(argument, "=", 1),
Excel4Formula(1, 2, "GOTO", "A"), "B")
self.assertEqual(str(formula), '=IF(ISNUMBER(1)=1,R2C1,"B")', 'Should be =IF(ISNUMBER(1)=1,R2C1,"B")')
def test_get_reference(self):
instruction = Excel4Instruction(1,1)
c = "{}".format(instruction.get_reference("pl_PL"))
self.assertEqual(c, "W1K1", "Shuold be W1K1")
c = "{}".format(instruction.get_reference("en_US"))
self.assertEqual(c, "R1C1", "Shuold be R1C1")
def test_translate_address(self):
instruction = Excel4Instruction(1,1)
instruction.set_language("pl_PL")
c = "{}".format(instruction.get_address())
self.assertEqual(c, "W1K1", "Shuold be W1K1")
instruction.set_language("en_US")
c = "{}".format(instruction.get_address())
self.assertEqual(c, "R1C1", "Shuold be R1C1")
def test_revert_address_translation(self):
Excel4Translator.native_language = "en_US"
instruction = Excel4Instruction(1,1)
instruction.set_language("pl_PL")
c = "{}".format(instruction.get_address())
self.assertEqual(c, "W1K1", "Shuold be W1K1")
instruction.revert_address_translation()
c = "{}".format(instruction.get_address())
self.assertEqual(c, "R1C1", "Shuold be R1C1")
|
11524276
|
from collections import defaultdict
def repeat_sum(lst):
count = defaultdict(int)
for a in lst:
for b in set(a):
count[b] += 1
return sum(k for k, v in count.iteritems() if v > 1)
|
11524297
|
import os
import random
import time
import pytest
from ufo2ft.fontInfoData import (
dateStringToTimeValue,
getAttrWithFallback,
normalizeStringForPostscript,
)
@pytest.fixture
def info(InfoClass):
self = InfoClass()
self.familyName = "Family Name"
self.styleName = "Style Name"
self.unitsPerEm = 1000
self.descender = -250
self.xHeight = 450
self.capHeight = 600
self.ascender = 650
self.italicAngle = 0
return self
class GetAttrWithFallbackTest:
@pytest.mark.parametrize(
"infoDict,expected",
[
# no styleMapFamilyName, no styleMapStyleName
(
{},
{
"familyName": "Family Name",
"styleName": "Style Name",
"styleMapFamilyName": "Family Name Style Name",
"styleMapStyleName": "regular",
"openTypeNamePreferredFamilyName": "Family Name",
"openTypeNamePreferredSubfamilyName": "Style Name",
},
),
# no styleMapStyleName
(
{"styleMapFamilyName": "Style Map Family Name"},
{
"styleMapFamilyName": "Style Map Family Name",
"styleMapStyleName": "regular",
"openTypeNamePreferredFamilyName": "Family Name",
"openTypeNamePreferredSubfamilyName": "Style Name",
},
),
# no styleMapFamilyName, no styleMapStyleName but styleName="Regular"
(
{"styleName": "Regular"},
{
"familyName": "Family Name",
"styleName": "Regular",
"styleMapFamilyName": "Family Name",
"styleMapStyleName": "regular",
"openTypeNamePreferredFamilyName": "Family Name",
"openTypeNamePreferredSubfamilyName": "Regular",
},
),
# no styleMapFamilyName but styleName="Regular"
(
{"styleName": "Regular", "styleMapStyleName": "regular"},
{
"styleMapFamilyName": "Family Name",
"styleMapStyleName": "regular",
"openTypeNamePreferredFamilyName": "Family Name",
"openTypeNamePreferredSubfamilyName": "Regular",
},
),
# no styleMapStyleName but styleName="Regular"
(
{"styleName": "Regular", "styleMapFamilyName": "Style Map Family Name"},
{
"styleMapFamilyName": "Style Map Family Name",
"styleMapStyleName": "regular",
"openTypeNamePreferredFamilyName": "Family Name",
"openTypeNamePreferredSubfamilyName": "Regular",
},
),
# no styleMapFamilyName, no styleMapStyleName but styleName="Bold"
(
{"styleName": "Bold"},
{
"familyName": "Family Name",
"styleName": "Bold",
"styleMapFamilyName": "Family Name",
"styleMapStyleName": "bold",
"openTypeNamePreferredFamilyName": "Family Name",
"openTypeNamePreferredSubfamilyName": "Bold",
},
),
],
)
def test_family_and_style_names(self, info, infoDict, expected):
for key, value in infoDict.items():
setattr(info, key, value)
for key, value in expected.items():
assert getAttrWithFallback(info, key) == value
def test_redundant_metadata(self, info):
assert getAttrWithFallback(info, "openTypeNameVersion") == "Version 0.000"
info.versionMinor = 1
info.versionMajor = 1
assert getAttrWithFallback(info, "openTypeNameVersion") == "Version 1.001"
assert (
getAttrWithFallback(info, "openTypeNameUniqueID")
== "1.001;NONE;FamilyName-StyleName"
)
assert getAttrWithFallback(info, "postscriptSlantAngle") == 0
def test_unecessary_metadata(self, info):
assert getAttrWithFallback(info, "postscriptWeightName") is None
info.postscriptWeightName = "Normal"
assert getAttrWithFallback(info, "postscriptWeightName") == "Normal"
def test_vertical_metrics(self, info):
assert getAttrWithFallback(info, "openTypeHheaAscender") == 950
assert getAttrWithFallback(info, "openTypeHheaDescender") == -250
assert getAttrWithFallback(info, "openTypeOS2TypoAscender") == 650
assert getAttrWithFallback(info, "openTypeOS2TypoDescender") == -250
assert getAttrWithFallback(info, "openTypeOS2WinAscent") == 950
assert getAttrWithFallback(info, "openTypeOS2WinDescent") == 250
def test_caret_slope(self, info):
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRise") == 1
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRun") == 0
info.italicAngle = -12
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRise") == 1000
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRun") == 213
info.italicAngle = 12
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRise") == 1000
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRun") == -213
info.openTypeHheaCaretSlopeRise = 2048
assert info.openTypeHheaCaretSlopeRun is None
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRise") == 2048
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRun") == -435
info.openTypeHheaCaretSlopeRise = None
info.openTypeHheaCaretSlopeRun = 200
assert info.openTypeHheaCaretSlopeRise is None
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRise") == -941
assert getAttrWithFallback(info, "openTypeHheaCaretSlopeRun") == 200
def test_head_created(self, info):
os.environ["SOURCE_DATE_EPOCH"] = "1514485183"
try:
assert (
getAttrWithFallback(info, "openTypeHeadCreated")
== "2017/12/28 18:19:43"
)
finally:
del os.environ["SOURCE_DATE_EPOCH"]
assert getAttrWithFallback(info, "openTypeHeadCreated") != "2017/12/28 18:19:43"
def test_empty_info(self, InfoClass):
info = InfoClass()
assert getAttrWithFallback(info, "familyName") == "New Font"
assert getAttrWithFallback(info, "styleName") == "Regular"
assert getAttrWithFallback(info, "unitsPerEm") == 1000
assert getAttrWithFallback(info, "ascender") == 800
assert getAttrWithFallback(info, "capHeight") == 700
assert getAttrWithFallback(info, "xHeight") == 500
assert getAttrWithFallback(info, "descender") == -200
def test_empty_info_2048(self, InfoClass):
info = InfoClass()
info.unitsPerEm = 2048
assert getAttrWithFallback(info, "unitsPerEm") == 2048
assert getAttrWithFallback(info, "ascender") == 1638
assert getAttrWithFallback(info, "capHeight") == 1434
assert getAttrWithFallback(info, "xHeight") == 1024
assert getAttrWithFallback(info, "descender") == -410
class PostscriptBlueScaleFallbackTest:
def test_without_blue_zones(self, info):
postscriptBlueScale = getAttrWithFallback(info, "postscriptBlueScale")
assert postscriptBlueScale == 0.039625
def test_with_blue_zones(self, info):
info.postscriptBlueValues = [
-13,
0,
470,
483,
534,
547,
556,
569,
654,
667,
677,
690,
738,
758,
]
info.postscriptOtherBlues = [-255, -245]
postscriptBlueScale = getAttrWithFallback(info, "postscriptBlueScale")
assert postscriptBlueScale == 0.0375
class NormalizeStringForPostscriptTest:
def test_no_change(self):
assert (
normalizeStringForPostscript("Sample copyright notice.")
== "Sample copyright notice."
)
class DateStringToTimeValueTest:
def test_roundtrip_random_timestamp(self):
timestamp = random.randint(0, 10 ** 9)
ds = time.strftime("%Y/%m/%d %H:%M:%S", time.gmtime(timestamp))
assert dateStringToTimeValue(ds) == timestamp
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
11524316
|
import threading
import time
import typing
import uuid
from cauldron import environ
from cauldron.environ.response import Response
from cauldron.cli.sync import comm
def send_remote_command(
command: str,
raw_args: str = '',
asynchronous: bool = True,
remote_connection: 'environ.RemoteConnection' = None,
show_logs: bool = True
) -> 'AsyncCommandThread':
"""..."""
thread = AsyncCommandThread(
command=command,
arguments=raw_args,
is_async=asynchronous,
remote_connection=remote_connection,
is_logging=show_logs
)
thread.start()
return thread
class AsyncCommandThread(threading.Thread):
def __init__(
self,
command: str,
arguments: str = '',
is_async: bool = True,
remote_connection: 'environ.RemoteConnection' = None,
is_logging: bool = True,
**kwargs
):
super(AsyncCommandThread, self).__init__(**kwargs)
self.daemon = True
self.abort = False
self.uid = str(uuid.uuid4())
self.responses = [] # type: typing.List[Response]
self.exception = None
self.is_executing = False
self.command = command
self.args = arguments
self.is_async = is_async
self.is_logging = is_logging
self._remote_connection = remote_connection
@property
def remote_connection(self) -> 'environ.RemoteConnection':
return (
self._remote_connection
if self._remote_connection else
environ.remote_connection
)
@property
def is_finished(self) -> bool:
"""..."""
has_response = len(self.responses) > 0
has_finished_response = has_response and (
self.responses[-1].response.failed or
self.responses[-1].response.data['run_status'] != 'running'
)
return has_finished_response
def check_status(self) -> Response:
"""..."""
run_uid = self.responses[-1].data.get('run_uid', '')
endpoint = '/abort' if self.abort else '/run-status/{}'.format(run_uid)
return comm.send_request(
endpoint=endpoint,
remote_connection=self.remote_connection,
method='GET'
)
def add_response(self, response: Response) -> Response:
"""..."""
run_log = response.data.get('run_log', [])
previous = self.responses + []
self.responses.append(response)
if not self.is_logging:
return response
log_lengths = [len(r.data.get('run_log', [])) for r in previous]
start_index = max(log_lengths + [0])
new_log = run_log[start_index:]
for message in new_log:
environ.log_raw(message)
return response
def run(self):
"""..."""
self.is_executing = True
endpoint = '/command-async' if self.is_async else '/command-sync'
try:
self.add_response(comm.send_request(
endpoint=endpoint,
remote_connection=self.remote_connection,
data=dict(
command=self.command,
args=self.args
)
))
except Exception as error:
self.exception = error
self.add_response(Response().fail(
code='COMM_EXECUTION_ERROR',
error=error,
message='Communication execution error'
).response)
while not self.is_finished:
time.sleep(1)
self.add_response(self.check_status())
self.is_executing = False
|
11524380
|
import numpy as np
# We lazy-load torch the first time one of the functions that requires it is called
torch = None
def _check_torch_import():
global torch
if torch is not None:
return
import importlib
torch = importlib.import_module('torch')
def tonp(tensor):
"""Takes any PyTorch tensor and converts it to a numpy array or scalar as appropiate.
When given something that isn't a PyTorch tensor, it will attempt to convert to a NumPy array or scalar anyway.
Not heavily optimized."""
_check_torch_import()
if isinstance(tensor, torch.Tensor):
arr = tensor.data.detach().cpu().numpy()
else: # It's not a tensor! We'll handle it anyway
arr = np.array(tensor)
if arr.shape == ():
return np.asscalar(arr)
else:
return arr
def totensor(arr, device=None, type='float32'):
"""Converts any array-like or scalar to a PyTorch tensor, and checks that the array is in the correct type (defaults to float32) and on the correct device.
Equivalent to calling `torch.from_array(np.array(arr, dtype=type)).to(device)` but more efficient.
NOTE: If the input is a torch tensor, the type will not be checked.
Keyword arguments:
arr -- Any array-like object (eg numpy array, list, numpy varaible)
device (optional) -- Move the tensor to this device after creation
type -- the numpy data type of the tensor. Defaults to 'float32' (regardless of the input)
Returns:
tensor - A torch tensor"""
_check_torch_import()
# If we're given a tensor, send it right back.
if isinstance(arr, torch.Tensor):
if device:
return arr.to(device) # If tensor is already on the specified device, this doesn't copy the tensor.
else:
return arr
# Only call np.array() if it is not already an array, otherwise numpy will waste time copying the array
if not isinstance(arr, np.ndarray):
arr = np.array(arr)
# Likewise with type conversion
if arr.dtype != type:
arr = arr.astype(type, copy=False)
tensor = torch.from_numpy(arr)
if device:
tensor = tensor.to(device)
return tensor
class ShapeInferer:
def __init__(self, input_shape, batch_size=1):
self.input_shape = input_shape
self.batch_size = batch_size
self.batch = torch.rand((self.batch_size, *self.input_shape))
# Alias
self.prod = self.product
@property
def output_shape(self):
return self.batch.shape[1:]
def product(self):
prod = 0
for dim in self.output_shape:
prod += dim
return prod
def add_layer(self, layer):
if layer == 'flatten':
self.batch = self.batch.reshape(self.batch.size(0), -1)
else:
device = next(layer.parameters()).device
self.batch = layer(self.batch.to(device))
def accumulate_layers(self, *layers):
if len(layers) == 1 and hasattr(layers[0], '__iter__'):
layers = layers[0]
for layer in layers:
self.add_layer(layer)
# We define the class only at call-time to allow for lazy torch import
# TODO: Find a less hacky way of doing this. If you have any ideas let me know :)
def MLP(dim, dropout=0.3, hidden_activation='relu', output_activation=None):
"""A fully connected MLP network with the specified sizes at each layer.
Arguments:
shape -- A list of the shape of the network. The first item is the input size and the last item is the output size.
Eg. [128, 512, 512, 1] defines a network with 128 inputs, 1 output and two hidden layers of size 512.
dropout (default: 0.3) -- Proportion for dropout inbetween layers
hidden_activation (default: torch.nn.ReLU) -- Activation function between layers.
output_activation (optional) -- Activation function at the end of the network.
"""
_check_torch_import()
if hidden_activation == 'relu':
hidden_activation = torch.nn.ReLU
class mlc_MLP(torch.nn.Module):
def __init__(self, dim, dropout, hidden_activation, output_activation):
super(self, mlc_MLP).__init__()
self.layers = []
for f_in, f_out in zip(dim[:-2], dim[1:-1]):
self.layers += [
torch.nn.Linear(f_in, f_out),
hidden_activation(),
torch.nn.Dropout(dropout),
]
self.layers += [torch.nn.Linear(dim[-2], dim[-1])]
if output_activation:
self.layers += [output_activation()]
self.model = torch.nn.Sequential(*self.layers)
def forward(self, x):
return self.model(x)
return mlc_MLP(dim, dropout, hidden_activation, output_activation)
|
11524415
|
from django.http import HttpResponse
from django.core.serializers import serialize
from .models import Point, Voivodeship
from django.views.generic import TemplateView
from django.core.cache import cache
def points_view(request):
points_as_geojson = serialize('geojson', Point.objects.all())
return HttpResponse(points_as_geojson, content_type='json')
def voivodeships_view(request):
redis_key = 'voivodeships'
voivodeships = cache.get(redis_key)
if not voivodeships:
voivodeships = serialize('geojson', Voivodeship.objects.all())
cache.set(redis_key, voivodeships)
return HttpResponse(voivodeships, content_type='json')
class MainPageView(TemplateView):
template_name = 'voivodeships/index.html'
|
11524424
|
from .circle import Circle
from .circlemarker import CircleMarker
from .polygon import Polygon
from .polyline import Polyline
from .rectangle import Rectangle
|
11524434
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from signalflowgrapher.model.model import Model
from signalflowgrapher.commands.command_handler import CommandHandler
from signalflowgrapher.controllers.main_controller import MainController
from signalflowgrapher.commands.create_node_command import CreateNodeCommand
from signalflowgrapher.commands.transform_branch_command \
import TransformBranchCommand
from signalflowgrapher.commands.move_node_command import MoveNodeCommand
from signalflowgrapher.commands.move_label_command import MoveLabelCommand
from signalflowgrapher.commands.command_handler import ScriptCommand
from signalflowgrapher.commands.change_branch_weight_command \
import ChangeBranchWeightCommand
from signalflowgrapher.commands.change_node_name_command \
import ChangeNodeNameCommand
from signalflowgrapher.commands.create_branch_command \
import CreateBranchCommand
from signalflowgrapher.model.model \
import PositionedNode, CurvedBranch, LabeledObject
class TestMainController(TestCase):
def setUp(self):
self.model = MagicMock(Model)
self.command_handler = MagicMock(CommandHandler)
self.controller = MainController(self.model, self.command_handler)
def test_create_node(self):
node = MagicMock(PositionedNode)
command = MagicMock(CreateNodeCommand)
with patch("signalflowgrapher.controllers.main_controller."
"PositionedNode",
node):
with patch("signalflowgrapher.controllers.main_controller."
"CreateNodeCommand",
command):
self.controller.create_node(10, 20)
node.assert_called_once_with(self.model.graph, 10, 20, 0, 30)
command.assert_called_once_with(self.model.graph, node())
self.command_handler.add_command.assert_called_once_with(command())
def test_remove_nodes_and_branches(self):
node1 = MagicMock(PositionedNode)
node2 = MagicMock(PositionedNode)
node3 = MagicMock(PositionedNode)
branch1 = MagicMock(CurvedBranch)
branch2 = MagicMock(CurvedBranch)
branch3 = MagicMock(CurvedBranch)
branch_in = MagicMock(CurvedBranch)
branch_out = MagicMock(CurvedBranch)
node3.outgoing = {branch_in}
node3.ingoing = {branch_out}
script_command = MagicMock(ScriptCommand)
nodes_and_branches = [
node1,
node2,
node3,
branch1,
branch2,
branch3
]
with patch("signalflowgrapher.controllers.main_controller."
"ScriptCommand",
script_command):
self.controller.remove_nodes_and_branches(nodes_and_branches)
branch1.remove.assert_called_once()
branch2.remove.assert_called_once()
branch3.remove.assert_called_once()
node1.remove.assert_called_once()
node2.remove.assert_called_once()
node3.remove.assert_called_once()
branch_in.remove.assert_called_once()
branch_out.remove.assert_called_once()
script_command.assert_called_once()
def test_move_node(self):
node = MagicMock(PositionedNode)
branch1 = MagicMock(CurvedBranch)
branch2 = MagicMock(CurvedBranch)
node.ingoing = {branch1}
node.outgoing = {branch2}
transform_command = MagicMock(TransformBranchCommand)
move_command = MagicMock(MoveNodeCommand)
with patch("signalflowgrapher.controllers.main_controller."
"TransformBranchCommand",
transform_command):
with patch("signalflowgrapher.controllers.main_controller."
"MoveNodeCommand",
move_command):
self.controller.move_node(node, 51, 22)
self.model.graph.move_node_relative.assert_called_once_with(
node, 51, 22)
self.model.graph.transform_branch.assert_any_call(branch1,
0,
0,
51,
22)
self.model.graph.transform_branch.assert_any_call(branch2,
51,
22,
0,
0)
move_command.assert_called_once_with(node, 51, 22, self.model.graph)
transform_command.assert_any_call(self.model.graph,
branch1,
0,
0,
51,
22)
transform_command.assert_any_call(self.model.graph,
branch2,
51,
22,
0,
0)
self.assertEqual(3, self.command_handler.add_command.call_count)
def test_move_label_relative(self):
labeled_object = MagicMock(LabeledObject)
command = MagicMock()
r_command = MagicMock(MoveLabelCommand)
command.return_value = r_command
with patch("signalflowgrapher.controllers.main_controller."
"MoveLabelCommand",
command):
self.controller.move_label_relative(labeled_object, 99, -22)
command.assert_called_once_with(labeled_object,
99,
-22,
self.model.graph)
r_command.redo.assert_called_once()
self.command_handler.add_command.assert_called_once_with(command())
def test_set_branch_weight(self):
branch = MagicMock(CurvedBranch)
command = MagicMock()
r_command = MagicMock(ChangeBranchWeightCommand)
command.return_value = r_command
with patch("signalflowgrapher.controllers.main_controller."
"ChangeBranchWeightCommand",
command):
self.controller.set_branch_weight(branch, "New Branch Weight")
command.assert_called_once_with(branch,
"New Branch Weight",
self.model.graph)
r_command.redo.assert_called_once()
self.command_handler.add_command.assert_called_once_with(command())
def test_set_node_name(self):
node = MagicMock(PositionedNode)
command = MagicMock()
r_command = MagicMock(ChangeNodeNameCommand)
command.return_value = r_command
with patch("signalflowgrapher.controllers.main_controller."
"ChangeNodeNameCommand",
command):
self.controller.set_node_name(node, "New Node Name")
command.assert_called_once_with(node,
"New Node Name",
self.model.graph)
r_command.redo.assert_called_once()
self.command_handler.add_command.assert_called_once_with(command())
def test_transform_branch(self):
branch = MagicMock(CurvedBranch)
command = MagicMock(TransformBranchCommand)
with patch("signalflowgrapher.controllers.main_controller."
"TransformBranchCommand",
command):
self.controller.transform_branch(branch,
1,
2,
3,
4)
command.assert_called_once_with(self.model.graph,
branch,
1,
2,
3,
4)
self.command_handler.add_command.assert_called_once_with(command())
self.model.graph.transform_branch.assert_called_once_with(branch,
1,
2,
3,
4)
def test_create_branch(self):
start_node = MagicMock(PositionedNode)
end_node = MagicMock(PositionedNode)
command = MagicMock(CreateBranchCommand)
branch = MagicMock(CurvedBranch)
with patch("signalflowgrapher.controllers.main_controller."
"CreateBranchCommand",
command):
with patch("signalflowgrapher.controllers.main_controller."
"CurvedBranch",
branch):
self.controller.create_branch(start_node,
end_node,
10,
20,
30,
40,
11,
12,
"Weight Test")
branch.assert_called_once_with(start_node,
end_node,
10,
20,
30,
40,
11,
12,
"Weight Test")
command.assert_called_once_with(self.model.graph,
branch())
def test_create_branch_auto_pos(self):
start_node = MagicMock(PositionedNode)
start_node.x = 10
start_node.y = 10
end_node = MagicMock(PositionedNode)
end_node.x = 100
end_node.y = 10
command = MagicMock(CreateBranchCommand)
branch = MagicMock(CurvedBranch)
with patch("signalflowgrapher.controllers.main_controller."
"CreateBranchCommand",
command):
with patch("signalflowgrapher.controllers.main_controller."
"CurvedBranch",
branch):
self.controller.create_branch_auto_pos(start_node,
end_node,
"Weight Test")
branch.assert_called_once_with(start_node,
end_node,
55,
10,
55,
10,
0,
30,
"Weight Test")
command.assert_called_once_with(self.model.graph,
branch())
def test_create_branch_auto_pos_near_branch(self):
existing_branch = MagicMock(CurvedBranch)
existing_branch.spline1_x = 55
existing_branch.spline1_y = 10
existing_branch.spline2_x = 55
existing_branch.spline2_y = 10
existing_branch.start.x = 10
existing_branch.start.y = 10
existing_branch.end.x = 100
existing_branch.end.y = 10
self.model.graph.branches = {existing_branch}
start_node = MagicMock(PositionedNode)
start_node.x = 10
start_node.y = 10
end_node = MagicMock(PositionedNode)
end_node.x = 100
end_node.y = 10
command = MagicMock(CreateBranchCommand)
branch = MagicMock(CurvedBranch)
with patch("signalflowgrapher.controllers.main_controller."
"CreateBranchCommand",
command):
with patch("signalflowgrapher.controllers.main_controller."
"CurvedBranch",
branch):
self.controller.create_branch_auto_pos(start_node,
end_node,
"Weight Test")
branch.assert_called_once_with(start_node,
end_node,
9.999999999999995,
-35,
100,
-35,
0,
30,
"Weight Test")
command.assert_called_once_with(self.model.graph,
branch())
def test_create_branch_auto_pos_same_line(self):
existing_branch = MagicMock(CurvedBranch)
existing_branch.spline1_x = 55
existing_branch.spline1_y = 10
existing_branch.spline2_x = 55
existing_branch.spline2_y = 10
existing_branch.start.x = 10
existing_branch.start.y = 10
existing_branch.end.x = 100
existing_branch.end.y = 10
self.model.graph.branches = {existing_branch}
start_node = MagicMock(PositionedNode)
start_node.x = 20
start_node.y = 10
end_node = MagicMock(PositionedNode)
end_node.x = 50
end_node.y = 10
command = MagicMock(CreateBranchCommand)
branch = MagicMock(CurvedBranch)
with patch("signalflowgrapher.controllers.main_controller."
"CreateBranchCommand",
command):
with patch("signalflowgrapher.controllers.main_controller."
"CurvedBranch",
branch):
self.controller.create_branch_auto_pos(start_node,
end_node,
"Weight Test")
branch.assert_called_once_with(start_node,
end_node,
19.999999999999996,
-5,
50,
-5,
0,
30,
"Weight Test")
command.assert_called_once_with(self.model.graph,
branch())
def test_create_self_loop(self):
start_node = MagicMock(PositionedNode)
start_node.x = 10
start_node.y = 10
existing_branch = MagicMock(CurvedBranch)
existing_branch.spline1_x = 80.71067811865476
existing_branch.spline1_y = -60.710678118654755
existing_branch.spline2_x = -60.710678118654755
existing_branch.spline2_y = -60.710678118654755
existing_branch.start = start_node
existing_branch.end = start_node
self.model.graph.branches = {existing_branch}
command = MagicMock(CreateBranchCommand)
branch = MagicMock(CurvedBranch)
with patch("signalflowgrapher.controllers.main_controller."
"CreateBranchCommand",
command):
with patch("signalflowgrapher.controllers.main_controller."
"CurvedBranch",
branch):
self.controller.create_self_loop(start_node,
"Weight Test")
branch.assert_called_once()
call = branch.call_args[0]
self.assertEqual(start_node, call[0])
self.assertEqual(start_node, call[1])
self.assertAlmostEqual(110.71067811865476, call[2], delta=0.00001)
self.assertAlmostEqual(-90.71067811865476, call[3], delta=0.00001)
self.assertAlmostEqual(-90.71067811865476, call[4], delta=0.00001)
self.assertAlmostEqual(-90.71067811865476, call[5], delta=0.00001)
self.assertEqual(0, call[6])
self.assertEqual(-40, call[7])
self.assertEqual("Weight Test", call[8])
command.assert_called_once_with(self.model.graph,
branch())
def test_create_self_loop_existing_loop(self):
start_node = MagicMock(PositionedNode)
start_node.x = 10
start_node.y = 10
command = MagicMock(CreateBranchCommand)
branch = MagicMock(CurvedBranch)
with patch("signalflowgrapher.controllers.main_controller."
"CreateBranchCommand",
command):
with patch("signalflowgrapher.controllers.main_controller."
"CurvedBranch",
branch):
self.controller.create_self_loop(start_node,
"Weight Test")
branch.assert_called_once()
call = branch.call_args[0]
self.assertEqual(start_node, call[0])
self.assertEqual(start_node, call[1])
self.assertAlmostEqual(80.710678118654, call[2], delta=0.00001)
self.assertAlmostEqual(-60.71067811865, call[3], delta=0.00001)
self.assertAlmostEqual(-60.71067811865, call[4], delta=0.00001)
self.assertAlmostEqual(-60.71067811865, call[5], delta=0.00001)
self.assertEqual(0, call[6])
self.assertEqual(-40, call[7])
self.assertEqual("Weight Test", call[8])
command.assert_called_once_with(self.model.graph,
branch())
|
11524439
|
from openprocurement.tender.core.procedure.views.award_document import BaseAwardDocumentResource
from openprocurement.tender.core.procedure.models.document import PostDocument, PatchDocument, Document
from openprocurement.tender.core.procedure.validation import (
validate_input_data,
validate_patch_data,
validate_item_owner,
unless_bots,
update_doc_fields_on_put_document,
validate_upload_document,
validate_data_model,
)
from openprocurement.tender.limited.procedure.validation import (
validate_award_document_add_not_in_pending,
validate_document_operation_not_in_active,
)
from openprocurement.api.utils import json_view
from cornice.resource import resource
@resource(
name="reporting:Tender Award Documents",
collection_path="/tenders/{tender_id}/awards/{award_id}/documents",
path="/tenders/{tender_id}/awards/{award_id}/documents/{document_id}",
procurementMethodType="reporting",
description="Tender award documents",
)
class ReportingAwardDocumentResource(BaseAwardDocumentResource):
@json_view(
validators=(
unless_bots(validate_item_owner("tender")),
validate_input_data(PostDocument, allow_bulk=True),
validate_award_document_add_not_in_pending,
validate_document_operation_not_in_active,
),
permission="upload_award_documents",
)
def collection_post(self):
return super().collection_post()
@json_view(
validators=(
validate_item_owner("tender"),
validate_input_data(PostDocument),
validate_document_operation_not_in_active,
update_doc_fields_on_put_document,
validate_upload_document,
validate_data_model(Document),
),
permission="upload_award_documents",
)
def put(self):
return super().put()
@json_view(
content_type="application/json",
validators=(
validate_item_owner("tender"),
validate_input_data(PatchDocument, none_means_remove=True),
validate_patch_data(Document, item_name="document"),
validate_document_operation_not_in_active,
),
permission="edit_award_documents",
)
def patch(self):
return super().patch()
@resource(
name="negotiation:Tender Award Documents",
collection_path="/tenders/{tender_id}/awards/{award_id}/documents",
path="/tenders/{tender_id}/awards/{award_id}/documents/{document_id}",
procurementMethodType="negotiation",
description="Tender award documents",
)
class NegotiationAwardDocumentResource(ReportingAwardDocumentResource):
pass
@resource(
name="negotiation.quick:Tender Award Documents",
collection_path="/tenders/{tender_id}/awards/{award_id}/documents",
path="/tenders/{tender_id}/awards/{award_id}/documents/{document_id}",
procurementMethodType="negotiation.quick",
description="Tender award documents",
)
class NegotiationQuickAwardDocumentResource(ReportingAwardDocumentResource):
pass
|
11524446
|
from itertools import chain
import re
WORDS_TO_ROLLUP = {
'rollup-': 0,
'rollup': 1,
'rollup=maybe': 0,
'rollup=never': -2,
'rollup=iffy': -1,
'rollup=always': 1,
}
class IssueCommentCommand:
"""
A command that has been parsed out of a GitHub issue comment.
E.g., `@bors r+` => an issue command with action == 'approve'
"""
def __init__(self, action):
self.action = action
@classmethod
def approve(cls, approver, commit):
command = cls('approve')
command.commit = commit
command.actor = approver.lstrip('@')
return command
@classmethod
def unapprove(cls):
return cls('unapprove')
@classmethod
def prioritize(cls, priority):
command = cls('prioritize')
command.priority = priority
return command
@classmethod
def delegate_author(cls):
return cls('delegate-author')
@classmethod
def delegate(cls, delegate_to):
command = cls('delegate')
command.delegate_to = delegate_to.lstrip('@')
return command
@classmethod
def undelegate(cls):
return cls('undelegate')
@classmethod
def retry(cls):
return cls('retry')
@classmethod
def try_(cls):
return cls('try')
@classmethod
def untry(cls):
return cls('untry')
@classmethod
def rollup(cls, rollup_value):
command = cls('rollup')
command.rollup_value = rollup_value
return command
@classmethod
def squash(cls):
return cls('squash')
@classmethod
def unsquash(cls):
return cls('unsquash')
@classmethod
def force(cls):
return cls('force')
@classmethod
def clean(cls):
return cls('clean')
@classmethod
def ping(cls, ping_type='standard'):
command = cls('ping')
command.ping_type = ping_type
return command
@classmethod
def treeclosed(cls, treeclosed_value):
command = cls('treeclosed')
command.treeclosed_value = treeclosed_value
return command
@classmethod
def untreeclosed(cls):
return cls('untreeclosed')
@classmethod
def hook(cls, hook_name, hook_extra=None):
command = cls('hook')
command.hook_name = hook_name
command.hook_extra = hook_extra
return command
def is_sha(sha):
"""
Try to determine if the input is a git sha
"""
return re.match(r'^[0-9a-f]{4,}$', sha)
def hook_with_extra_is_in_hooks(word, hooks):
"""
Determine if the word given is the name of a valid hook, with extra data
hanging off of it (e.g., `validhookname=extradata`).
hook_with_extra_is_in_hooks(
'validhookname=stuff',
['validhookname', 'other'])
#=> True
hook_with_extra_is_in_hooks(
'invalidhookname=stuff',
['validhookname', 'other'])
#=> False
hook_with_extra_is_in_hooks(
'validhookname',
['validhookname', 'other'])
#=> False
"""
for hook in hooks:
if word.startswith('{}='.format(hook)):
return True
return False
def parse_issue_comment(username, body, sha, botname, hooks=[]):
"""
Parse an issue comment looking for commands that Homu should handle
Parameters:
username: the username of the user that created the issue comment.
This is without the leading @
body: the full body of the comment (markdown)
sha: the commit that the comment applies to
botname: the name of bot. This is without the leading @.
So if we should respond to `@bors {command}`, botname will be `bors`
hooks: a list of strings that are valid hook names.
E.g. `['hook1', 'hook2', 'hook3']`
"""
botname_regex = re.compile(r'^.*(?=@' + botname + ')')
# All of the 'words' after and including the botname
words = list(chain.from_iterable(
re.findall(r'\S+', re.sub(botname_regex, '', x))
for x
in body.splitlines()
if '@' + botname in x and not x.lstrip().startswith('>'))) # noqa
commands = []
if words[1:] == ["are", "you", "still", "there?"]:
commands.append(IssueCommentCommand.ping('portal'))
for i, word in enumerate(words):
if word is None:
# We already parsed the next word, and we set it to an empty string
# to signify that we did.
continue
if word == '@' + botname:
continue
if word == '@' + botname + ':':
continue
if word == 'r+' or word.startswith('r='):
approved_sha = sha
if i + 1 < len(words) and is_sha(words[i + 1]):
approved_sha = words[i + 1]
words[i + 1] = None
approver = word[len('r='):] if word.startswith('r=') else username
# Ignore "r=me"
if approver == 'me':
continue
commands.append(
IssueCommentCommand.approve(approver, approved_sha))
elif word == 'r-':
commands.append(IssueCommentCommand.unapprove())
elif word.startswith('p='):
try:
pvalue = int(word[len('p='):])
except ValueError:
continue
commands.append(IssueCommentCommand.prioritize(pvalue))
elif word.startswith('delegate='):
delegate = word[len('delegate='):]
commands.append(IssueCommentCommand.delegate(delegate))
elif word == 'delegate-':
commands.append(IssueCommentCommand.undelegate())
elif word == 'delegate+':
commands.append(IssueCommentCommand.delegate_author())
elif word == 'retry':
commands.append(IssueCommentCommand.retry())
elif word == 'try':
commands.append(IssueCommentCommand.try_())
elif word == 'try-':
commands.append(IssueCommentCommand.untry())
elif word in WORDS_TO_ROLLUP:
rollup_value = WORDS_TO_ROLLUP[word]
commands.append(IssueCommentCommand.rollup(rollup_value))
elif word == 'squash':
commands.append(IssueCommentCommand.squash())
elif word == 'squash-':
commands.append(IssueCommentCommand.unsquash())
elif word == 'force':
commands.append(IssueCommentCommand.force())
elif word == 'clean':
commands.append(IssueCommentCommand.clean())
elif (word == 'hello?' or word == 'ping'):
commands.append(IssueCommentCommand.ping())
elif word.startswith('treeclosed='):
try:
treeclosed = int(word[len('treeclosed='):])
commands.append(IssueCommentCommand.treeclosed(treeclosed))
except ValueError:
pass
elif word == 'treeclosed-':
commands.append(IssueCommentCommand.untreeclosed())
elif word in hooks:
commands.append(IssueCommentCommand.hook(word))
elif hook_with_extra_is_in_hooks(word, hooks):
# word is like `somehook=data` and `somehook` is in our list of
# potential hooks
(hook_name, hook_extra) = word.split('=', 2)
commands.append(IssueCommentCommand.hook(hook_name, hook_extra))
else:
# First time we reach an unknown word, stop parsing.
break
return commands
|
11524461
|
import gym
from gym_extensions.continuous import mujoco
from gym.wrappers import Monitor
# available env list: https://github.com/Rowing0914/gym-extensions/blob/mujoco200/tests/all_tests.py
HalfCheetah_Env_list = [
"HalfCheetahGravityHalf-v1",
"HalfCheetahGravityThreeQuarters-v1",
"HalfCheetahGravityOneAndHalf-v1",
"HalfCheetahGravityOneAndQuarter-v1",
"HalfCheetahWall-v1",
"HalfCheetahWithSensor-v1",
"HalfCheetahBigTorso-v1",
"HalfCheetahBigThigh-v1",
"HalfCheetahBigLeg-v1",
"HalfCheetahBigFoot-v1",
"HalfCheetahSmallTorso-v1",
"HalfCheetahSmallThigh-v1",
"HalfCheetahSmallLeg-v1",
"HalfCheetahSmallFoot-v1",
"HalfCheetahSmallHead-v1",
"HalfCheetahBigHead-v1"
]
for env_name in HalfCheetah_Env_list:
print(env_name)
env = gym.make(env_name)
env = Monitor(env, "./video/video_{}".format(env_name), force=True)
n_trial = 10
all_rewards = list()
env.reset()
done = False
for _ in range(100):
action = env.action_space.sample()
s, r, done, info = env.step(action) # take a random action
env.close()
|
11524469
|
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
h = {}
for c in s:
if c not in h:
h[c] = 0
h[c] += 1
for c in t:
if c not in h:
h[c] = 0
h[c] -= 1
for k in h:
if h[k] != 0:
return False
return True
|
11524474
|
import collections
import logging
logger = logging.getLogger(__name__)
VERY_EARLY = -100
EARLY = -50
NORMAL = 0
LATE = 50
VERY_LATE = 100
Handler = collections.namedtuple("Handler", "handler priority")
class ServiceLocator:
def __init__(self):
self.providers = collections.defaultdict(lambda: [])
def register(self, service, handler, priority=NORMAL):
self.providers[service].append(Handler(handler, priority))
self.providers[service] = sorted(self.providers[service], key=lambda x: x.priority)
def provide(self, service, priority=NORMAL):
def decorator(f):
self.register(service, f, priority)
return f
return decorator
async def first_value(self, service, *args, **kwargs):
for handler in self.providers[service]:
value = await handler.handler(*args, **kwargs)
if value:
return value
locator = ServiceLocator()
|
11524527
|
from __future__ import unicode_literals
from django.conf.urls import include, url
from braces.views import FormMessagesMixin
from envelope.views import ContactView
class MessagesContactView(FormMessagesMixin, ContactView):
form_invalid_message = "There was en error in the contact form."
form_valid_message = "Thank you for your message."
template_name = "envelope/messages_contact.html"
urlpatterns = [
url(r'', include('envelope.urls')),
url(r'^crispy/', ContactView.as_view(template_name='envelope/crispy_contact.html'), name='crispy-contact'),
url(r'^messages/', MessagesContactView.as_view(), name='messages-contact'),
]
|
11524529
|
from genedom import BarcodesCollection
barcodes_collection = BarcodesCollection.from_specs(
n_barcodes=96, barcode_length=20, spacer='AA',
forbidden_enzymes=('BsaI', 'BsmBI', 'BbsI'),
barcode_tmin=55, barcode_tmax=70,
other_primer_sequences=(), heterodim_tmax=5,
max_homology_length=10, include_spacers=True,
names_template="B_%03d")
barcodes_collection.to_fasta('example_barcodes_collection.fa')
|
11524549
|
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), "r") as fh:
long_description = fh.read()
setup(name='google-search-results-serpwow',
version='1.1.11',
description='PIP package to scrape and parse Google, Bing, Yahoo, Yandex, Amazon, Ebay and Naver Search Results using SerpWow. Visit https://serpwow.com to get a free API key.',
url='https://github.com/serpwow/google-search-results-python',
author='SerpWow',
author_email='<EMAIL>',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*',
install_requires = ["requests"],
packages=['serpwow'],
long_description=long_description,
long_description_content_type="text/markdown"
)
|
11524582
|
from spinup.utils.run_utils import ExperimentGrid
from spinup import ppo_pytorch
import torch
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--num_runs', type=int, default=3)
args = parser.parse_args()
eg = ExperimentGrid(name='ppo-pyt-bench')
eg.add('env_name', 'CartPole-v0', '', True)
eg.add('seed', [10*i for i in range(args.num_runs)])
eg.add('epochs', 10)
eg.add('steps_per_epoch', 4000)
eg.add('ac_kwargs:hidden_sizes', [(32,), (64,64)], 'hid')
eg.add('ac_kwargs:activation', [torch.nn.Tanh, torch.nn.ReLU], '')
eg.run(ppo_pytorch, num_cpu=args.cpu)
|
11524592
|
class Digitizing:
def dig(self, node1="", node2="", ninc="", **kwargs):
"""Digitizes nodes to a surface.
APDL Command: DIG
Parameters
----------
node1, node2, ninc
Digitize nodes NODE1 through NODE2 in steps of NINC. NODE2
defaults to NODE1 and NINC defaults to 1.
Notes
-----
Digitizes nodes to the surface defined by the DSURF command. The nodes
indicated must be digitized from the tablet after this command is
given. The program must be in the interactive mode and the graphics
terminal show option [/SHOW] must be active. The global Cartesian
coordinates of the nodes are stored.
"""
command = f"DIG,{node1},{node2},{ninc}"
return self.run(command, **kwargs)
def dmove(self, node1="", node2="", ninc="", **kwargs):
"""Digitizes nodes on surfaces and along intersections.
APDL Command: DMOVE
Parameters
----------
node1, node2, ninc
Digitize nodes NODE1through NODE2 in steps of NINC. NODE2 defaults
to NODE1 and NINC defaults to 1.
Notes
-----
Digitizes nodes on undefined surfaces, warped surfaces, and along
intersection lines. Two orthogonal views showing the nodes on a plane
in each view are required. No surfaces need be specified. Two
coordinates are determined from the second view and the other
coordinate is retained from the first view. Use the DIG command to
first define nodes in one view (as determined from the DSET command).
Then reset the view and use this command to move the nodes to the
proper location.
"""
command = f"DMOVE,{node1},{node2},{ninc}"
return self.run(command, **kwargs)
def dset(self, node1="", node2="", node3="", ddev="", **kwargs):
"""Sets the scale and drawing plane orientation for a digitizing tablet.
APDL Command: DSET
Parameters
----------
node1, node2, node3
Any three (noncolinear) nodes defining a plane parallel to the
drawing. Nodes and actual locations (in any coordinate system)
must have been previously defined.
ddev
Digitizing device type number (device dependent).
Notes
-----
Sets drawing scale size and defines the drawing plane orientation for
use with a digitizing tablet. Drawings must be to scale. Views must
represent standard orthogonal parallel projections. The three nodes
indicated must be digitized [DIG] from the tablet after this command is
issued.
"""
command = f"DSET,{node1},{node2},{node3},{ddev}"
return self.run(command, **kwargs)
def dsurf(self, kcn="", xsurf="", ysurf="", zsurf="", **kwargs):
"""Defines the surface upon which digitized nodes lie.
APDL Command: DSURF
Parameters
----------
kcn
Surface is located in coordinate system KCN. KCN may be 0,1,2 or
any previously defined local coordinate system number.
xsurf, ysurf, zsurf
Input one value to define the surface constant. Input 999 in the
other two fields. Interpret fields as R, θ, Z for cylindrical or
R, θ, Φ for spherical or toroidal coordinate systems. XSURF and
YSURF default to 999 if KCN = 0.
Notes
-----
Defines the surface upon which the nodes to be digitized (with the DIG
command) actually lie. Surfaces are defined by a coordinate system
number and a coordinate constant [MOVE]. Two coordinates are
determined from the drawing and converted to surface coordinates. The
third coordinate is defined from the input surface constant. If nodes
lie on warped or undefined surfaces, use the DMOVE command.
"""
command = f"DSURF,{kcn},{xsurf},{ysurf},{zsurf}"
return self.run(command, **kwargs)
|
11524595
|
import os
import typing
from argparse import ArgumentParser
from cauldron import cli
from cauldron import environ
from cauldron.cli.commands.create import actions as create_actions
from cauldron.cli.commands.open import opener as project_opener
from cauldron.cli.commands.open import remote as remote_project_opener
from cauldron.cli.interaction import autocompletion
NAME = 'create'
DESCRIPTION = 'Create a new Cauldron project'
def populate(
parser: ArgumentParser,
raw_args: typing.List[str],
assigned_args: dict
):
"""..."""
parser.add_argument(
'project_name',
type=str,
default=None,
help=cli.reformat(
"""
The name of the project you want to create. A folder with this
name will be created and the cauldron project will be stored
within
"""
)
)
parser.add_argument(
'directory',
type=str,
default=None,
help=cli.reformat(
"""
The parent directory where the cauldron project directory will be
created.
"""
)
)
parser.add_argument(
'-t', '--title',
dest='title',
type=str,
default='',
help=cli.reformat('The title for the new project')
)
parser.add_argument(
'-s', '--summary',
dest='summary',
type=str,
default='',
help=cli.reformat('A short summary description of the new project')
)
parser.add_argument(
'-a', '--author',
dest='author',
type=str,
default='',
help=cli.reformat('Names of the author or authors of the project')
)
parser.add_argument(
'--no-naming-scheme',
dest='no_naming_scheme',
default=False,
action='store_true',
help=cli.reformat('Disables the auto naming scheme for the project')
)
parser.add_argument(
'--forget',
dest='forget',
default=False,
action='store_true',
help=cli.reformat('Forget that this project was opened')
)
parser.add_argument(
'--libs',
dest='library_folder',
default=None,
type=str,
help=cli.reformat('The name of the internal library root folder')
)
parser.add_argument(
'--assets',
dest='assets_folder',
default=None,
type=str,
help=cli.reformat('The name of the internal assets folder')
)
def execute(
context: cli.CommandContext,
project_name: str,
directory: str,
title: str = '',
summary: str = '',
author: str = '',
forget: bool = False,
no_naming_scheme: bool = False,
library_folder: str = None,
assets_folder: str = None
) -> environ.Response:
"""..."""
response = context.response
response.consume(create_actions.create_project_directories(
project_name,
directory,
assets_folder=assets_folder,
library_folder=library_folder
))
if response.failed:
return response
definition = create_actions.create_definition(
name=project_name,
title=title,
summary=summary,
author=author,
no_naming_scheme=no_naming_scheme,
library_folder=library_folder,
assets_folder=assets_folder
)
source_directory = response.data['source_directory']
response.consume(create_actions.create_first_step(
source_directory,
project_name
))
if response.failed:
return response
definition['steps'].append(response.data['step_name'])
response.consume(create_actions.write_project_data(
source_directory,
definition
))
if response.failed:
return response
if context.remote_connection.active:
opened = remote_project_opener.sync_open(
context=context,
path=source_directory,
forget=forget
)
else:
opened = project_opener.open_project(
source_directory,
forget=forget
)
return response.consume(opened)
def autocomplete(segment: str, line: str, parts: typing.List[str]):
"""..."""
if len(parts) < 2:
return []
value = parts[-1]
if value.startswith('@home:'):
segment = value.split(':', 1)[-1]
return autocompletion.match_path(
segment,
environ.paths.clean(os.path.join('~', 'cauldron', segment)),
include_files=False
)
environ.configs.load()
aliases = environ.configs.fetch('folder_aliases', {})
matches = ['@{}:'.format(x) for x in aliases.keys()]
for m in matches:
if value.startswith(m):
return autocompletion.match_path(
segment,
environ.paths.clean(os.path.join(
aliases[m[1:-1]]['path'],
value.split(':', 1)[-1]
)),
include_files=False
)
matches.append('@home:')
if value.startswith('@'):
return autocompletion.matches(segment, value, matches)
return autocompletion.match_path(segment, parts[-1])
|
11524619
|
from aws_cdk import (
aws_lambda as _lambda,
aws_apigatewayv2 as api_gw,
aws_apigatewayv2_integrations as integrations,
aws_ec2 as ec2,
aws_rds as rds,
aws_secretsmanager as secrets,
aws_ssm as ssm,
core
)
class TheRdsProxyStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# RDS needs to be setup in a VPC
vpc = ec2.Vpc(self, 'Vpc', max_azs=2)
# We need this security group to add an ingress rule and allow our lambda to query the proxy
lambda_to_proxy_group = ec2.SecurityGroup(self, 'Lambda to RDS Proxy Connection', vpc=vpc)
# We need this security group to allow our proxy to query our MySQL Instance
db_connection_group = ec2.SecurityGroup(self, 'Proxy to DB Connection', vpc=vpc)
db_connection_group.add_ingress_rule(db_connection_group,ec2.Port.tcp(3306), 'allow db connection')
db_connection_group.add_ingress_rule(lambda_to_proxy_group, ec2.Port.tcp(3306), 'allow lambda connection')
db_credentials_secret = secrets.Secret(self, 'DBCredentialsSecret',
secret_name=id+'-rds-credentials',
generate_secret_string=secrets.SecretStringGenerator(
secret_string_template="{\"username\":\"syscdk\"}",
exclude_punctuation=True,
include_space=False,
generate_string_key="password"
))
ssm.StringParameter(self, 'DBCredentialsArn',
parameter_name='rds-credentials-arn',
string_value=db_credentials_secret.secret_arn)
# MySQL DB Instance (delete protection turned off because pattern is for learning.)
# re-enable delete protection for a real implementation
rds_instance = rds.DatabaseInstance(self,
'DBInstance',
engine=rds.DatabaseInstanceEngine.mysql(
version=rds.MysqlEngineVersion.VER_5_7_30),
credentials=rds.Credentials.from_secret(db_credentials_secret),
instance_type=
ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL),
vpc=vpc,
removal_policy=core.RemovalPolicy.DESTROY,
deletion_protection=False,
security_groups=[db_connection_group])
# Create an RDS proxy
proxy = rds_instance.add_proxy(id+'-proxy',
secrets=[db_credentials_secret],
debug_logging=True,
vpc=vpc,
security_groups=[db_connection_group])
# Workaround for bug where TargetGroupName is not set but required
target_group = proxy.node.find_child('ProxyTargetGroup')
target_group.add_property_override('TargetGroupName', 'default')
rds_lambda = _lambda.Function(self, 'rdsProxyHandler',
runtime=_lambda.Runtime.NODEJS_12_X,
code=_lambda.Code.asset('lambda_fns/rds'),
handler='rdsLambda.handler',
vpc=vpc,
security_groups=[lambda_to_proxy_group],
environment={
"PROXY_ENDPOINT": proxy.endpoint,
"RDS_SECRET_NAME": id+'-rds-credentials'
})
db_credentials_secret.grant_read(rds_lambda)
# defines an API Gateway Http API resource backed by our "dynamoLambda" function.
api = api_gw.HttpApi(self, 'Endpoint',
default_integration=integrations.LambdaProxyIntegration(handler=rds_lambda));
core.CfnOutput(self, 'HTTP API Url', value=api.url);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.