text stringlengths 957 885k |
|---|
"""The graphics module implements a simple GUI library."""
import sys
import math
try:
import tkinter
except Exception as e:
print('Could not load tkinter: ' + str(e))
FRAME_TIME = 1/30
class Canvas:
"""A Canvas object supports drawing and animation primitives.
draw_* methods return the id number of a shape object in the underlying Tk
object. This id can be passed to move_* and edit_* methods.
Canvas is a singleton; only one Canvas instance can be created.
"""
_instance = None
def __init__(self, width=1100, height=768, title='', color='White', tk=None):
# Singleton enforcement
if Canvas._instance is not None:
raise Exception('Only one canvas can be instantiated.')
Canvas._instance = self
# Attributes
self.color = color
self.width = width
self.height = height
# Root window
self._tk = tk or tkinter.Tk()
self._tk.protocol('WM_DELETE_WINDOW', sys.exit)
self._tk.title(title or 'Graphics Window')
self._tk.bind('<Button-1>', self._click)
self._click_pos = None
# Canvas object
self._canvas = tkinter.Canvas(self._tk, width=width, height=height)
self._canvas.pack()
self._draw_background()
self._canvas.update()
self._images = dict()
def clear(self, shape='all'):
"""Clear all shapes, text, and images."""
self._canvas.delete(shape)
if shape == 'all':
self._draw_background()
self._canvas.update()
def draw_polygon(self, points, color='Black', fill_color=None, filled=1, smooth=0, width=1):
"""Draw a polygon and return its tkinter id.
points -- a list of (x, y) pairs encoding pixel positions
"""
if fill_color == None:
fill_color = color
if filled == 0:
fill_color = ""
return self._canvas.create_polygon(flattened(points), outline=color, fill=fill_color,
smooth=smooth, width=width)
def draw_circle(self, center, radius, color='Black', fill_color=None, filled=1, width=1):
"""Draw a cirlce and return its tkinter id.
center -- an (x, y) pair encoding a pixel position
"""
if fill_color == None:
fill_color = color
if filled == 0:
fill_color = ""
x0, y0 = [c - radius for c in center]
x1, y1 = [c + radius for c in center]
return self._canvas.create_oval(x0, y0, x1, y1, outline=color, fill=fill_color, width=width)
def draw_line(self, start, end, color='Blue', width=1):
"""Draw a line and return its tkinter id.
start, end -- (x, y) pairs encoding a pixel position
"""
x0, y0 = start
x1, y1 = end
return self._canvas.create_line(x0, y0, x1, y1, fill=color, width=width)
def draw_image(self, pos, image_file=None, scale=1, anchor=tkinter.NW, behind=0):
"""Draw an image from a file and return its tkinter id."""
key = (image_file, scale)
if key not in self._images:
image = tkinter.PhotoImage(file=image_file)
if scale >= 1:
image = image.zoom(int(scale))
else:
image = image.subsample(int(1/scale))
self._images[key] = image
image = self._images[key]
x, y = pos
id = self._canvas.create_image(x, y, image=image, anchor=anchor)
if behind > 0:
self._canvas.tag_lower(id, behind)
return id
def draw_text(self, text, pos, color='Black', font='Arial',
size=12, style='normal', anchor=tkinter.NW):
"""Draw text and return its tkinter id."""
x, y = pos
font = (font, str(size), style)
return self._canvas.create_text(x, y, fill=color, text=text, font=font, anchor=anchor)
def edit_text(self, id, text=None, color=None, font=None, size=12,
style='normal'):
"""Edit the text, color, or font of an existing text object."""
if color is not None:
self._canvas.itemconfigure(id, fill=color)
if text is not None:
self._canvas.itemconfigure(id, text=text)
if font is not None:
self._canvas.itemconfigure(id, font=(font, str(size), style))
def animate_shape(self, id, duration, points_fn, frame_count=0):
"""Animate an existing shape over points."""
max_frames = duration // FRAME_TIME
points = points_fn(frame_count)
self._canvas.coords(id, flattened(points))
if frame_count < max_frames:
def tail():
"""Continues the animation at the next frame."""
self.animate_shape(id, duration, points_fn, frame_count + 1)
self._tk.after(int(FRAME_TIME * 1000), tail)
def slide_shape(self, id, end_pos, duration, elapsed=0):
"""Slide an existing shape to end_pos."""
points = paired(self._canvas.coords(id))
start_pos = points[0]
max_frames = duration // FRAME_TIME
def points_fn(frame_count):
completed = frame_count / max_frames
offset = [(e - s) * completed for s, e in zip(start_pos, end_pos)]
return [shift_point(p, offset) for p in points]
self.animate_shape(id, duration, points_fn)
def wait_for_click(self, seconds=0):
"""Return (position, elapsed) pair of click position and elapsed time.
position: (x,y) pixel position of click
elapsed: milliseconds elapsed since call
seconds: maximum number of seconds to wait for a click
If there is still no click after the given time, return (None, seconds).
"""
elapsed = 0
while elapsed < seconds or seconds == 0:
if self._click_pos is not None:
pos = self._click_pos
self._click_pos = None
return pos, elapsed
self._sleep(FRAME_TIME)
elapsed += FRAME_TIME
return None, elapsed
def _draw_background(self):
w, h = self.width - 1, self.height - 1
corners = [(0,0), (0, h), (w, h), (w, 0)]
self.draw_polygon(corners, self.color, fill_color=self.color, filled=True, smooth=False)
def _click(self, event):
self._click_pos = (event.x, event.y)
def _sleep(self, seconds):
self._tk.update_idletasks()
self._tk.after(int(1000 * seconds), self._tk.quit)
self._tk.mainloop()
def flattened(points):
"""Return a flat list of coordinates from a list of pairs."""
coords = list()
[coords.extend(p) for p in points]
return tuple(coords)
def paired(coords):
"""Return a list of pairs from a flat list of coordinates."""
assert len(coords) % 2 == 0, 'Coordinates are not paired.'
points = []
x = None
for elem in coords:
if x is None:
x = elem
else:
points.append((x, elem))
x = None
return points
def translate_point(point, angle, distance):
"""Translate a point a distance in a direction (angle)."""
x, y = point
return (x + math.cos(angle) * distance, y + math.sin(angle) * distance)
def shift_point(point, offset):
"""Shift a point by an offset."""
x, y = point
dx, dy = offset
return (x + dx, y + dy)
def rectangle_points(pos, width, height):
"""Return the points of a rectangle starting at pos."""
x1, y1 = pos
x2, y2 = width + x1, height + y1
return [(x1, y1), (x1, y2), (x2, y2), (x2, y1)]
def format_color(r, g, b):
"""Format a color as a string.
r, g, b -- integers from 0 to 255
"""
return '#{0:02x}{1:02x}{2:02x}'.format(int(r * 255), int(g * 255), int(b * 255)) |
<reponame>aviadlevis/bhnerf<gh_stars>0
import numpy as np
import xarray as xr
import functools
import math
import jax.numpy as jnp
import matplotlib.pyplot as plt
mse = lambda true, est: float(np.mean((true - est)**2))
psnr = lambda true, est: float(10.0 * np.log10(np.max(true)**2 / mse(true, est)))
normalize = lambda vector: vector / np.sqrt(np.dot(vector, vector))
def linspace_xr(num, start=-0.5, stop=0.5, endpoint=True, units='unitless'):
"""
Return a DataArray with coordinates spaced over a specified interval in N-dimensions.
Parameters
----------
num: int or tuple
Number of grid points in 1D (x) or 2D (x, y) or 3D (x, y, z).
start: float
starting grid point (included in the grid)
stop: float
ending grid point (optionally included in the grid)
endpoint: bool
Optionally include the stop points in the grid.
units: str, default='unitless'
Store the units of the underlying grid.
Returns
-------
grid: xr.DataArray
A DataArray with coordinates linearly spaced over the desired interval
"""
dimensions = ['x', 'y', 'z']
num = np.atleast_1d(num)
coords = {}
for i, n in enumerate(num):
coord = np.linspace(start, stop, n, endpoint=endpoint)
coords[dimensions[i]] = coord
grid = xr.Dataset(coords=coords)
for dim in grid.dims:
grid[dim].attrs.update(units=units)
return grid
def gaussian_xr(resolution, center, std, fov=(1.0, 'unitless'), std_clip=np.inf):
"""
Generate a Gaussian image as xarray.DataArray.
Parameters
----------
resolution: int or nd-array,
Number of (x,y,z)-axis grid points.
center: int or nd-array,
Center of the gaussian in coordinates ('x', 'y', 'z')
std: (stdx, stdy, stdz), or float,
Gaussian standard deviation in x,y,z directions. If scalar specified isotropic std is used.
fov: (float, str), default=(1.0, 'unitless')
Field of view and units. Default is unitless 1.0.
std_clip: float, default=np.inf
Clip after this number of standard deviations
Returns
-------
emission: xr.DataArray,
A DataArray with Gaussian emission.
"""
if np.isscalar(std): std = (std, std, std)
if len(resolution) != len(center): raise AttributeError('resolution and center should have same length {} != {}'.format(
len(resolution), len(center)))
grid = linspace_xr(resolution, start=-fov[0]/2.0, stop=fov[0]/2.0, units=fov[1])
if 'x' in grid.dims and 'y' in grid.dims and 'z' in grid.dims:
data = np.exp(-0.5*( ((grid.x - center[0])/std[0])**2 + ((grid.y - center[1])/std[1])**2 + ((grid.z - center[2])/std[2])**2 ))
dims = ['x', 'y', 'z']
elif 'x' in grid.dims and 'y' in grid.dims:
data = np.exp(-0.5*( ((grid.y - center[1])/std[1])**2 + ((grid.x - center[0])/std[0])**2 ))
dims = ['y', 'x']
else:
raise AttributeError
threshold = np.exp(-0.5 * std_clip ** 2)
emission = xr.DataArray(
name='emission',
data=data.where(data > threshold).fillna(0.0),
coords=grid.coords,
dims=dims,
attrs={
'fov': fov,
'std': std,
'center': center,
'std_clip': std_clip
})
return emission
def rotation_matrix(axis, angle, use_jax=False):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis
Parameters
----------
axis: list or np.array, dim=3
Axis of rotation
angle: float or numpy array of floats,
Angle of rotation in radians
use_jax: bool, default=False
Compuatations using jax.
Returns
-------
rotation_matrix: np.array(shape=(3,3,...)),
A rotation matrix. If angle is a numpy array additional dimensions are stacked at the end.
References
----------
[1] https://en.wikipedia.org/wiki/Euler%E2%80%93Rodrigues_formula
[2] https://stackoverflow.com/questions/6802577/rotation-of-3d-vector
"""
_np = jnp if use_jax else np
axis = _np.array(axis)
axis = axis / _np.sqrt(_np.dot(axis, axis))
a = _np.cos(angle / 2.0)
b, c, d = _np.stack([-ax * _np.sin(angle / 2.0) for ax in axis])
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return _np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def spherical_coords_to_rotation_axis(theta, phi):
"""
Transform the spherical coordinates into a rotation axis and angle
Parameters
----------
theta: float,
zenith angle (rad)
phi: float,
azimuth angle (rad)
Returns
-------
rot_axis: 3-vector,
Rotation axis.
rot_angle: float,
Rotation angle about the rot_axis.
"""
z_axis = np.array([0, 0, 1])
r_vector = np.array([np.cos(phi)*np.sin(theta), np.sin(phi)*np.sin(theta), np.cos(theta)])
rot_axis_prime = np.cross(r_vector, z_axis)
rot_matrix = rotation_matrix(rot_axis_prime, np.pi/2)
rot_axis = np.matmul(rot_matrix, r_vector)
rot_angle = phi
return rot_axis, rot_angle
def world_to_image_coords(coords, fov, npix, use_jax=False):
_np = jnp if use_jax else np
image_coords = []
for i in range(coords.shape[-1]):
image_coords.append((coords[...,i] + fov[i]/2.0) / fov[i] * (npix[i] - 1))
image_coords = _np.stack(image_coords, axis=-1)
return image_coords
def intensity_to_nchw(intensity, cmap='viridis', gamma=0.5):
"""
Utility function to converent a grayscale image to NCHW image (for tensorboard logging).
N: number of images in the batch
C: number of channels of the image (ex: 3 for RGB, 1 for grayscale...)
H: height of the image
W: width of the image
Parameters
----------
intensity: array,
Grayscale intensity image.
cmap : str, default='viridis'
A registered colormap name used to map scalar data to colors.
gamma: float, default=0.5
Gamma correction term
Returns
-------
nchw_images: array,
Array of images.
"""
cm = plt.get_cmap(cmap)
norm_images = ( (intensity - np.min(intensity)) / (np.max(intensity) - np.min(intensity)) )**gamma
nchw_images = np.moveaxis(cm(norm_images)[...,:3], (0, 1, 2, 3), (3, 2, 0, 1))
return nchw_images
def anti_aliasing_filter(image_plane, window):
"""
Anti-aliasing flitering / blurring
Parameters
----------
image_plane: np.array,
2D image or 3D movie (frames are in the first index)
window: np.array
2D image used for anti-aliasing filtering
Returns
-------
image_plane: np.array,
2D image or 3D movie (frames are in the first index)
"""
fourier = jnp.fft.fft2(jnp.fft.ifftshift(image_plane, axes=(-2, -1))) * jnp.fft.fft2(jnp.fft.ifftshift(window))
image_plane = jnp.fft.ifftshift(jnp.fft.ifft2(fourier), axes=(-2, -1)).real
return image_plane |
"""Passive BLE monitor sensor platform."""
import asyncio
from datetime import timedelta
import logging
import queue
import statistics as sts
import struct
from threading import Thread
from Cryptodome.Cipher import AES
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_POWER,
BinarySensorEntity,
)
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
CONDUCTIVITY,
EVENT_HOMEASSISTANT_STOP,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
ATTR_BATTERY_LEVEL,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.dt as dt_util
# It was decided to temporarily include this file in the integration bundle
# until the issue with checking the adapter's capabilities is resolved in the official aioblescan repo
# see https://github.com/frawau/aioblescan/pull/30, thanks to @vicamo
from . import aioblescan_ext as aiobs
from . import (
CONF_DEVICES,
CONF_DISCOVERY,
CONF_ROUNDING,
CONF_DECIMALS,
CONF_PERIOD,
CONF_LOG_SPIKES,
CONF_USE_MEDIAN,
CONF_ACTIVE_SCAN,
CONF_HCI_INTERFACE,
CONF_BATT_ENTITIES,
CONF_REPORT_UNKNOWN,
CONF_RESTORE_STATE,
DEFAULT_HCI_INTERFACE,
)
from .const import (
CONF_TMIN,
CONF_TMAX,
CONF_HMIN,
CONF_HMAX,
XIAOMI_TYPE_DICT,
MMTS_DICT,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
# Structured objects for data conversions
TH_STRUCT = struct.Struct("<hH")
H_STRUCT = struct.Struct("<H")
T_STRUCT = struct.Struct("<h")
CND_STRUCT = struct.Struct("<H")
ILL_STRUCT = struct.Struct("<I")
FMDH_STRUCT = struct.Struct("<H")
async def async_setup_platform(hass, conf, add_entities, discovery_info=None):
"""Set up the sensor platform."""
_LOGGER.debug("Platform startup")
config = hass.data[DOMAIN]
monitor = BLEmonitor(config, add_entities)
monitor.start()
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, monitor.shutdown_handler)
_LOGGER.debug("Platform setup finished")
# Return successful setup
return True
async def async_setup_entry(hass, config_entry, add_entities):
"""Set up the sensor platform."""
_LOGGER.debug("Platform startup")
config = {}
for key, value in config_entry.data.items():
config[key] = value
if not CONF_HCI_INTERFACE in config:
config[CONF_HCI_INTERFACE] = [DEFAULT_HCI_INTERFACE,]
if not CONF_DEVICES in config:
config[CONF_DEVICES] = []
monitor = BLEmonitor(config, add_entities)
monitor.start()
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, monitor.shutdown_handler)
_LOGGER.debug("Platform setup finished")
# Return successful setup
return True
class BLEmonitor(Thread):
"""BLE ADV messages parser and entities updater."""
def __init__(self, config, add_entities):
"""Initiate BLE monitor."""
def reverse_mac(rmac):
"""Change LE order to BE."""
if len(rmac) != 12:
return None
return rmac[10:12] + rmac[8:10] + rmac[6:8] + rmac[4:6] + rmac[2:4] + rmac[0:2]
Thread.__init__(self)
_LOGGER.debug("BLE monitor initialization")
self.dataqueue = queue.Queue()
self.scanner = None
self.config = config
self.aeskeys = {}
self.whitelist = []
self.discovery = True
self.period = config[CONF_PERIOD]
self.log_spikes = config[CONF_LOG_SPIKES]
self.batt_entities = config[CONF_BATT_ENTITIES]
self.report_unknown = False
if config[CONF_REPORT_UNKNOWN]:
self.report_unknown = True
_LOGGER.info(
"Attention! Option report_unknown is enabled, be ready for a huge output..."
)
# prepare device:key lists to speedup parser
if config[CONF_DEVICES]:
for device in config[CONF_DEVICES]:
if "encryption_key" in device:
p_mac = bytes.fromhex(
reverse_mac(device["mac"].replace(":", "")).lower()
)
p_key = bytes.fromhex(device["encryption_key"].lower())
self.aeskeys[p_mac] = p_key
else:
continue
_LOGGER.debug("%s encryptors mac:key pairs loaded.", len(self.aeskeys))
if isinstance(config[CONF_DISCOVERY], bool):
if config[CONF_DISCOVERY] is False:
self.discovery = False
if config[CONF_DEVICES]:
for device in config[CONF_DEVICES]:
self.whitelist.append(device["mac"])
# remove duplicates from whitelist
self.whitelist = list(dict.fromkeys(self.whitelist))
_LOGGER.debug("whitelist: [%s]", ", ".join(self.whitelist).upper())
for i, mac in enumerate(self.whitelist):
self.whitelist[i] = bytes.fromhex(reverse_mac(mac.replace(":", "")).lower())
_LOGGER.debug("%s whitelist item(s) loaded.", len(self.whitelist))
self.add_entities = add_entities
_LOGGER.debug("BLE monitor initialized")
def shutdown_handler(self, event):
"""Run homeassistant_stop event handler."""
_LOGGER.debug("Running homeassistant_stop event handler: %s", event)
self.join()
def join(self, timeout=10):
"""Join BLEmonitor thread."""
_LOGGER.debug("BLE monitor thread: joining")
if isinstance(self.scanner, BLEScanner):
self.scanner.stop()
self.dataqueue.put(None)
Thread.join(self, timeout)
_LOGGER.debug("BLE monitor thread: joined")
def run(self):
"""Parser and entity update loop."""
def parse_raw_message(data):
"""Parse the raw data."""
# check if packet is Extended scan result
is_ext_packet = True if data[3] == 0x0d else False
# check for Xiaomi service data
xiaomi_index = data.find(b'\x16\x95\xFE', 15 + 15 if is_ext_packet else 0)
if xiaomi_index == -1:
return None
# check for no BR/EDR + LE General discoverable mode flags
advert_start = 29 if is_ext_packet else 14
adv_index = data.find(b"\x02\x01\x06", advert_start, 3 + advert_start)
adv_index2 = data.find(b"\x15\x16\x95", advert_start, 3 + advert_start)
if adv_index == -1 and adv_index2 == -1:
return None
if adv_index2 != -1:
adv_index = adv_index2
# check for BTLE msg size
msg_length = data[2] + 3
if msg_length != len(data):
return None
# check for MAC presence in message and in service data
xiaomi_mac_reversed = data[xiaomi_index + 8:xiaomi_index + 14]
mac_index = adv_index - 14 if is_ext_packet else adv_index
source_mac_reversed = data[mac_index - 7:mac_index - 1]
if xiaomi_mac_reversed != source_mac_reversed:
return None
# check for MAC presence in whitelist, if needed
if self.discovery is False:
if xiaomi_mac_reversed not in self.whitelist:
return None
packet_id = data[xiaomi_index + 7]
try:
prev_packet = parse_raw_message.lpacket_id[xiaomi_mac_reversed]
except KeyError:
prev_packet = None
if prev_packet == packet_id:
return None
parse_raw_message.lpacket_id[xiaomi_mac_reversed] = packet_id
# extract RSSI byte
rssi_index = 18 if is_ext_packet else msg_length - 1
(rssi,) = struct.unpack("<b", data[rssi_index:rssi_index + 1])
# strange positive RSSI workaround
if rssi > 0:
rssi = -rssi
try:
sensor_type = XIAOMI_TYPE_DICT[
data[xiaomi_index + 5:xiaomi_index + 7]
]
except KeyError:
if self.report_unknown:
_LOGGER.info(
"BLE ADV from UNKNOWN: RSSI: %s, MAC: %s, ADV: %s",
rssi,
''.join('{:02X}'.format(x) for x in xiaomi_mac_reversed[::-1]),
data.hex()
)
return None
# frame control bits
framectrl, = struct.unpack('>H', data[xiaomi_index + 3:xiaomi_index + 5])
# check data is present
if not (framectrl & 0x4000):
return {
"rssi": rssi,
"mac": ''.join('{:02X}'.format(x) for x in xiaomi_mac_reversed[::-1]),
"type": sensor_type,
"packet": packet_id,
"data": False,
}
# return None
xdata_length = 0
xdata_point = 0
# check capability byte present
if framectrl & 0x2000:
xdata_length = -1
xdata_point = 1
# xiaomi data length = message length
# -all bytes before XiaomiUUID
# -3 bytes Xiaomi UUID + ADtype
# -1 byte rssi
# -3+1 bytes sensor type
# -1 byte packet_id
# -6 bytes MAC
# - capability byte offset
xdata_length += msg_length - xiaomi_index - 15
if xdata_length < 3:
return None
xdata_point += xiaomi_index + 14
# check if xiaomi data start and length is valid
if xdata_length != len(data[xdata_point:-1]):
return None
# check encrypted data flags
if framectrl & 0x0800:
# try to find encryption key for current device
try:
key = self.aeskeys[xiaomi_mac_reversed]
except KeyError:
# no encryption key found
return None
nonce = b"".join(
[
xiaomi_mac_reversed,
data[xiaomi_index + 5:xiaomi_index + 7],
data[xiaomi_index + 7:xiaomi_index + 8]
]
)
encrypted_payload = data[xdata_point:msg_length - 1]
aad = b"\x11"
token = encrypted_payload[-4:]
payload_counter = encrypted_payload[-7:-4]
nonce = b"".join([nonce, payload_counter])
cipherpayload = encrypted_payload[:-7]
cipher = AES.new(key, AES.MODE_CCM, nonce=nonce, mac_len=4)
cipher.update(aad)
decrypted_payload = None
try:
decrypted_payload = cipher.decrypt_and_verify(cipherpayload, token)
except ValueError as error:
_LOGGER.error("Decryption failed: %s", error)
_LOGGER.error("token: %s", token.hex())
_LOGGER.error("nonce: %s", nonce.hex())
_LOGGER.error("encrypted_payload: %s", encrypted_payload.hex())
_LOGGER.error("cipherpayload: %s", cipherpayload.hex())
return None
if decrypted_payload is None:
_LOGGER.error(
"Decryption failed for %s, decrypted payload is None",
"".join("{:02X}".format(x) for x in xiaomi_mac_reversed[::-1]),
)
return None
# replace cipher with decrypted data
msg_length -= len(data[xdata_point:msg_length - 1])
data = b"".join((data[:xdata_point], decrypted_payload, data[-1:]))
msg_length += len(decrypted_payload)
result = {
"rssi": rssi,
"mac": ''.join('{:02X}'.format(x) for x in xiaomi_mac_reversed[::-1]),
"type": sensor_type,
"packet": packet_id,
"data": True,
}
# loop through xiaomi payload
# assume that the data may have several values of different types,
# although I did not notice this behavior with my LYWSDCGQ sensors
res = None
while True:
xvalue_typecode = data[xdata_point:xdata_point + 2]
try:
xvalue_length = data[xdata_point + 2]
except ValueError as error:
_LOGGER.error("xvalue_length conv. error: %s", error)
_LOGGER.error("xdata_point: %s", xdata_point)
_LOGGER.error("data: %s", data.hex())
result = {}
break
except IndexError as error:
_LOGGER.error("Wrong xdata_point: %s", error)
_LOGGER.error("xdata_point: %s", xdata_point)
_LOGGER.error("data: %s", data.hex())
result = {}
break
xnext_point = xdata_point + 3 + xvalue_length
xvalue = data[xdata_point + 3:xnext_point]
vlength = len(xvalue)
if vlength == 4:
if xvalue_typecode == b'\x0D\x10':
(temp, humi) = TH_STRUCT.unpack(xvalue)
res = {"temperature": temp / 10, "humidity": humi / 10}
if vlength == 2:
if xvalue_typecode == b'\x06\x10':
(humi,) = H_STRUCT.unpack(xvalue)
res = {"humidity": humi / 10}
if xvalue_typecode == b'\x04\x10':
(temp,) = T_STRUCT.unpack(xvalue)
res = {"temperature": temp / 10}
if xvalue_typecode == b'\x09\x10':
(cond,) = CND_STRUCT.unpack(xvalue)
res = {"conductivity": cond}
if xvalue_typecode == b'\x10\x10':
(fmdh,) = FMDH_STRUCT.unpack(xvalue)
res = {"formaldehyde": fmdh / 100}
if vlength == 1:
if xvalue_typecode == b'\x0A\x10':
res = {"battery": xvalue[0]}
if xvalue_typecode == b'\x08\x10':
res = {"moisture": xvalue[0]}
if xvalue_typecode == b'\x12\x10':
res = {"switch": xvalue[0]}
if xvalue_typecode == b'\x18\x10':
res = {"light": xvalue[0]}
if xvalue_typecode == b'\x19\x10':
res = {"opening": xvalue[0]}
if xvalue_typecode == b'\x13\x10':
res = {"consumable": xvalue[0]}
if vlength == 3:
if xvalue_typecode == b'\x07\x10':
(illum,) = ILL_STRUCT.unpack(xvalue + b'\x00')
res = {"illuminance": illum}
if res:
result.update(res)
else:
if self.report_unknown:
_LOGGER.info(
"UNKNOWN data from DEVICE: %s, MAC: %s, ADV: %s",
sensor_type,
''.join('{:02X}'.format(x) for x in xiaomi_mac_reversed[::-1]),
data.hex()
)
if xnext_point > msg_length - 3:
break
xdata_point = xnext_point
return result
def temperature_limit(config, mac, temp):
"""Set limits for temperature measurement in °C or °F."""
fmac = ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2))
if config[CONF_DEVICES]:
for device in config[CONF_DEVICES]:
if fmac in device["mac"].upper():
if "temperature_unit" in device:
if device["temperature_unit"] == TEMP_FAHRENHEIT:
temp_fahrenheit = temp * 9 / 5 + 32
return temp_fahrenheit
break
return temp
_LOGGER.debug("Dataparser loop started!")
self.scanner = BLEScanner(self.config, self.dataqueue)
self.scanner.start()
parse_raw_message.lpacket_id = {}
sensors_by_mac = {}
batt = {} # batteries
rssi = {}
hcievent_cnt = 0
mibeacon_cnt = 0
hpriority = []
ts_last = dt_util.now()
ts_now = ts_last
data = None
while True:
try:
advevent = self.dataqueue.get(block=True, timeout=1)
if advevent is None:
_LOGGER.debug("Dataparser loop stopped")
return True
data = parse_raw_message(advevent)
hcievent_cnt += 1
except queue.Empty:
pass
if len(hpriority) > 0:
for entity in hpriority:
if entity.ready_for_update is True:
hpriority.remove(entity)
entity.schedule_update_ha_state(True)
if data:
mibeacon_cnt += 1
mac = data["mac"]
# the RSSI value will be averaged for all valuable packets
if mac not in rssi:
rssi[mac] = []
rssi[mac].append(int(data["rssi"]))
batt_attr = None
sensortype = data["type"]
t_i, h_i, m_i, c_i, i_i, f_i, cn_i, sw_i, op_i, l_i, b_i = MMTS_DICT[sensortype]
if mac not in sensors_by_mac:
sensors = []
if t_i != 9:
sensors.insert(t_i, TemperatureSensor(self.config, mac, sensortype))
if h_i != 9:
sensors.insert(h_i, HumiditySensor(self.config, mac, sensortype))
if m_i != 9:
sensors.insert(m_i, MoistureSensor(self.config, mac, sensortype))
if c_i != 9:
sensors.insert(c_i, ConductivitySensor(self.config, mac, sensortype))
if i_i != 9:
sensors.insert(i_i, IlluminanceSensor(self.config, mac, sensortype))
if f_i != 9:
sensors.insert(f_i, FormaldehydeSensor(self.config, mac, sensortype))
if cn_i != 9:
sensors.insert(cn_i, ConsumableSensor(self.config, mac, sensortype))
if sw_i != 9:
sensors.insert(sw_i, PowerBinarySensor(self.config, mac, sensortype))
if op_i != 9:
sensors.insert(op_i, OpeningBinarySensor(self.config, mac, sensortype))
if l_i != 9:
sensors.insert(l_i, LightBinarySensor(self.config, mac, sensortype))
if self.batt_entities and (b_i != 9):
sensors.insert(b_i, BatterySensor(self.config, mac, sensortype))
sensors_by_mac[mac] = sensors
self.add_entities(sensors)
else:
sensors = sensors_by_mac[mac]
if data["data"] is False:
data = None
continue
# store found readings per device
if (b_i != 9):
if "battery" in data:
batt[mac] = int(data["battery"])
batt_attr = batt[mac]
if self.batt_entities:
sensors[b_i].collect(data)
else:
try:
batt_attr = batt[mac]
except KeyError:
batt_attr = None
# schedule an immediate update of binary sensors
if "switch" in data:
switch = sensors[sw_i]
switch.collect(data, batt_attr)
if switch.ready_for_update is True:
switch.schedule_update_ha_state(True)
else:
hpriority.append(switch)
if "opening" in data:
opening = sensors[op_i]
opening.collect(data, batt_attr)
if opening.ready_for_update is True:
opening.schedule_update_ha_state(True)
else:
hpriority.append(opening)
if "light" in data:
light = sensors[l_i]
light.collect(data, batt_attr)
if light.ready_for_update is True:
light.schedule_update_ha_state(True)
else:
hpriority.append(light)
# measuring sensors
if "temperature" in data:
if (
temperature_limit(self.config, mac, CONF_TMAX)
>= data["temperature"]
>= temperature_limit(self.config, mac, CONF_TMIN)
):
sensors[t_i].collect(data, batt_attr)
elif self.log_spikes:
_LOGGER.error(
"Temperature spike: %s (%s)",
data["temperature"],
mac,
)
if "humidity" in data:
if CONF_HMAX >= data["humidity"] >= CONF_HMIN:
sensors[h_i].collect(data, batt_attr)
elif self.log_spikes:
_LOGGER.error(
"Humidity spike: %s (%s)",
data["humidity"],
mac,
)
if "conductivity" in data:
sensors[c_i].collect(data, batt_attr)
if "moisture" in data:
sensors[m_i].collect(data, batt_attr)
if "illuminance" in data:
sensors[i_i].collect(data, batt_attr)
if "formaldehyde" in data:
sensors[f_i].collect(data, batt_attr)
if "consumable" in data:
sensors[cn_i].collect(data, batt_attr)
data = None
ts_now = dt_util.now()
if ts_now - ts_last < timedelta(seconds=self.period):
continue
ts_last = ts_now
# restarting scanner
jres = self.scanner.stop()
if jres is False:
_LOGGER.error("HCIdump thread(s) is not completed, interrupting data processing!")
continue
self.scanner.start()
# for every updated device
upd_evt = False
for mac, elist in sensors_by_mac.items():
for entity in elist:
if entity.pending_update is True:
if entity.ready_for_update is True:
entity.rssi_values = rssi[mac].copy()
entity.schedule_update_ha_state(True)
upd_evt = True
if upd_evt:
rssi[mac].clear()
upd_evt = False
rssi.clear()
_LOGGER.debug(
"%i HCI Events parsed, %i valuable MiBeacon BLE ADV messages. Found %i known device(s) total. Priority queue = %i",
hcievent_cnt,
mibeacon_cnt,
len(sensors_by_mac),
len(hpriority),
)
hcievent_cnt = 0
mibeacon_cnt = 0
class HCIdump(Thread):
"""Mimic deprecated hcidump tool."""
def __init__(self, config, dataqueue):
"""Initiate HCIdump thread."""
Thread.__init__(self)
_LOGGER.debug("HCIdump thread: Init")
self._interfaces = config[CONF_HCI_INTERFACE]
self._active = int(config[CONF_ACTIVE_SCAN] is True)
self.dataqueue = dataqueue
self._event_loop = None
def process_hci_events(self, data):
"""Collect HCI events."""
self.dataqueue.put(data)
def run(self):
"""Run HCIdump thread."""
_LOGGER.debug("HCIdump thread: Run")
mysocket = {}
fac = {}
conn = {}
btctrl = {}
self._event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._event_loop)
for hci in self._interfaces:
try:
mysocket[hci] = aiobs.create_bt_socket(hci)
except OSError as error:
_LOGGER.error("HCIdump thread: OS error (hci%i): %s", hci, error)
else:
fac[hci] = getattr(self._event_loop, "_create_connection_transport")(
mysocket[hci], aiobs.BLEScanRequester, None, None
)
conn[hci], btctrl[hci] = self._event_loop.run_until_complete(fac[hci])
_LOGGER.debug("HCIdump thread: connected to hci%i", hci)
btctrl[hci].process = self.process_hci_events
self._event_loop.run_until_complete(btctrl[hci].send_scan_request(self._active))
_LOGGER.debug("HCIdump thread: start main event_loop")
try:
self._event_loop.run_forever()
finally:
_LOGGER.debug("HCIdump thread: main event_loop stopped, finishing")
for hci in self._interfaces:
self._event_loop.run_until_complete(btctrl[hci].stop_scan_request())
conn[hci].close()
self._event_loop.run_until_complete(asyncio.sleep(0))
self._event_loop.close()
_LOGGER.debug("HCIdump thread: Run finished")
def join(self, timeout=10):
"""Join HCIdump thread."""
_LOGGER.debug("HCIdump thread: joining")
try:
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
except AttributeError as error:
_LOGGER.debug("%s", error)
finally:
Thread.join(self, timeout)
_LOGGER.debug("HCIdump thread: joined")
class BLEScanner:
"""BLE scanner."""
def __init__(self, config, dataqueue):
"""Init."""
self.dataqueue = dataqueue
self.dumpthread = None
self.config = config
def start(self):
"""Start receiving broadcasts."""
_LOGGER.debug("Spawning HCIdump thread")
self.dumpthread = HCIdump(
config=self.config,
dataqueue=self.dataqueue,
)
self.dumpthread.start()
def stop(self):
"""Stop HCIdump thread(s)."""
result = True
if self.dumpthread is None:
return True
if self.dumpthread.is_alive():
self.dumpthread.join()
if self.dumpthread.is_alive():
result = False
_LOGGER.error(
"Waiting for the HCIdump thread to finish took too long! (>10s)"
)
return result
class MeasuringSensor(RestoreEntity):
"""Base class for measuring sensor entity."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
self.ready_for_update = False
self._config = config
self._mac = mac
self._name = ""
self._state = None
self._unit_of_measurement = ""
self._device_class = None
self._device_type = devtype
self._device_state_attributes = {}
self._device_state_attributes["sensor type"] = devtype
self._device_state_attributes["mac address"] = (
':'.join(mac[i:i + 2] for i in range(0, len(mac), 2))
)
self._unique_id = ""
self._measurement = "measurement"
self._measurements = []
self.rssi_values = []
self.pending_update = False
self._rdecimals = config[CONF_DECIMALS]
self._jagged = False
self._fmdh_dec = 0
self._rounding = config[CONF_ROUNDING]
self._use_median = config[CONF_USE_MEDIAN]
self._restore_state = config[CONF_RESTORE_STATE]
self._err = None
async def async_added_to_hass(self):
"""Handle entity which will be added."""
_LOGGER.debug("async_added_to_hass called for %s", self.name)
await super().async_added_to_hass()
# Restore the old state if available
if self._restore_state is False:
self.ready_for_update = True
return
old_state = await self.async_get_last_state()
if not old_state:
self.ready_for_update = True
return
self._state = old_state.state
if "median" in old_state.attributes:
self._device_state_attributes["median"] = old_state.attributes["median"]
if "mean" in old_state.attributes:
self._device_state_attributes["mean"] = old_state.attributes["mean"]
if "last median of" in old_state.attributes:
self._device_state_attributes["last median of"] = old_state.attributes["last median of"]
if "last mean of" in old_state.attributes:
self._device_state_attributes["last mean of"] = old_state.attributes["last mean of"]
if "rssi" in old_state.attributes:
self._device_state_attributes["rssi"] = old_state.attributes["rssi"]
if "last packet id" in old_state.attributes:
self._device_state_attributes["last packet id"] = old_state.attributes["last packet id"]
if ATTR_BATTERY_LEVEL in old_state.attributes:
self._device_state_attributes[ATTR_BATTERY_LEVEL] = old_state.attributes[ATTR_BATTERY_LEVEL]
self.ready_for_update = True
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_info(self):
return {
"identifiers": {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self.get_sensorname())
},
"name": self.get_sensorname(),
"model": self._device_state_attributes["sensor type"],
}
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def force_update(self):
"""Force update."""
return True
def collect(self, data, batt_attr=None):
"""Measurements collector."""
if self._jagged is True:
self._measurements.append(int(data[self._measurement]))
else:
self._measurements.append(data[self._measurement])
self._device_state_attributes["last packet id"] = data["packet"]
if batt_attr is not None:
self._device_state_attributes[ATTR_BATTERY_LEVEL] = batt_attr
self.pending_update = True
def update(self):
"""Updates sensor state and attributes."""
textattr = ""
rdecimals = self._rdecimals
# formaldehyde decimals workaround
if self._fmdh_dec > 0:
rdecimals = self._fmdh_dec
try:
measurements = self._measurements
if self._rounding:
state_median = round(sts.median(measurements), rdecimals)
state_mean = round(sts.mean(measurements), rdecimals)
else:
state_median = sts.median(measurements)
state_mean = sts.mean(measurements)
if self._use_median:
textattr = "last median of"
self._state = state_median
else:
textattr = "last mean of"
self._state = state_mean
self._device_state_attributes[textattr] = len(measurements)
self._measurements.clear()
self._device_state_attributes["median"] = state_median
self._device_state_attributes["mean"] = state_mean
self._device_state_attributes["rssi"] = round(sts.mean(self.rssi_values))
self.rssi_values.clear()
except (AttributeError, AssertionError):
_LOGGER.debug("Sensor %s not yet ready for update", self._name)
except ZeroDivisionError as err:
self._err = err
except IndexError as err:
self._err = err
except RuntimeError as err:
self._err = err
if self._err:
_LOGGER.error("Sensor %s (%s) update error: %s", self._name, self._device_type, self._err)
self.pending_update = False
def get_sensorname(self):
"""Set sensor name."""
fmac = ":".join(self._mac[i:i + 2] for i in range(0, len(self._mac), 2))
if self._config[CONF_DEVICES]:
for device in self._config[CONF_DEVICES]:
if fmac in device["mac"].upper():
if "name" in device:
custom_name = device["name"]
_LOGGER.debug(
"Name of %s sensor with mac adress %s is set to: %s",
self._measurement,
fmac,
custom_name,
)
return custom_name
break
return self._mac
class TemperatureSensor(MeasuringSensor):
"""Representation of a sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "temperature"
self._sensor_name = self.get_sensorname()
self._name = "ble temperature {}".format(self._sensor_name)
self._unique_id = "t_" + self._sensor_name
self._unit_of_measurement = self.get_temperature_unit()
self._device_class = DEVICE_CLASS_TEMPERATURE
def get_temperature_unit(self):
"""Set temperature unit to °C or °F."""
fmac = ":".join(self._mac[i:i + 2] for i in range(0, len(self._mac), 2))
if self._config[CONF_DEVICES]:
for device in self._config[CONF_DEVICES]:
if fmac in device["mac"].upper():
if "temperature_unit" in device:
_LOGGER.debug(
"Temperature sensor with mac address %s is set to receive data in %s",
fmac,
device["temperature_unit"],
)
return device["temperature_unit"]
break
_LOGGER.debug(
"Temperature sensor with mac address %s is set to receive data in °C",
fmac,
)
return TEMP_CELSIUS
class HumiditySensor(MeasuringSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "humidity"
self._sensor_name = self.get_sensorname()
self._name = "ble humidity {}".format(self._sensor_name)
self._unique_id = "h_" + self._sensor_name
self._unit_of_measurement = PERCENTAGE
self._device_class = DEVICE_CLASS_HUMIDITY
# LYWSD03MMC / MHO-C401 "jagged" humidity workaround
if devtype in ('LYWSD03MMC', 'MHO-C401'):
self._jagged = True
class MoistureSensor(MeasuringSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "moisture"
self._sensor_name = self.get_sensorname()
self._name = "ble moisture {}".format(self._sensor_name)
self._unique_id = "m_" + self._sensor_name
self._unit_of_measurement = PERCENTAGE
self._device_class = DEVICE_CLASS_HUMIDITY
class ConductivitySensor(MeasuringSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "conductivity"
self._sensor_name = self.get_sensorname()
self._name = "ble conductivity {}".format(self._sensor_name)
self._unique_id = "c_" + self._sensor_name
self._unit_of_measurement = CONDUCTIVITY
self._device_class = None
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:flash-circle"
class IlluminanceSensor(MeasuringSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "illuminance"
self._sensor_name = self.get_sensorname()
self._name = "ble illuminance {}".format(self._sensor_name)
self._unique_id = "l_" + self._sensor_name
self._unit_of_measurement = "lx"
self._device_class = DEVICE_CLASS_ILLUMINANCE
class FormaldehydeSensor(MeasuringSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "formaldehyde"
self._sensor_name = self.get_sensorname()
self._name = "ble formaldehyde {}".format(self._sensor_name)
self._unique_id = "f_" + self._sensor_name
self._unit_of_measurement = "mg/m³"
self._device_class = None
self._fmdh_dec = 3
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:chemical-weapon"
class BatterySensor(MeasuringSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "battery"
self._sensor_name = self.get_sensorname()
self._name = "ble battery {}".format(self._sensor_name)
self._unique_id = "batt_" + self._sensor_name
self._unit_of_measurement = PERCENTAGE
self._device_class = DEVICE_CLASS_BATTERY
def collect(self, data, batt_attr=None):
"""Battery measurements collector."""
self._state = data[self._measurement]
self._device_state_attributes["last packet id"] = data["packet"]
self.pending_update = True
def update(self):
"""Update sensor state and attributes."""
self._device_state_attributes["rssi"] = round(sts.mean(self.rssi_values))
self.rssi_values.clear()
self.pending_update = False
class ConsumableSensor(MeasuringSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "consumable"
self._sensor_name = self.get_sensorname()
self._name = "ble consumable {}".format(self._sensor_name)
self._unique_id = "cn_" + self._sensor_name
self._unit_of_measurement = PERCENTAGE
self._device_class = None
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:mdi-recycle-variant"
def collect(self, data, batt_attr=None):
"""Measurements collector."""
self._state = data[self._measurement]
self._device_state_attributes["last packet id"] = data["packet"]
if batt_attr is not None:
self._device_state_attributes[ATTR_BATTERY_LEVEL] = batt_attr
self.pending_update = True
def update(self):
"""Update."""
self._device_state_attributes["rssi"] = round(sts.mean(self.rssi_values))
self.rssi_values.clear()
self.pending_update = False
class SwitchingSensor(RestoreEntity, BinarySensorEntity):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
self.ready_for_update = False
self._sensor_name = ""
self._mac = mac
self._config = config
self._restore_state = config[CONF_RESTORE_STATE]
self._name = ""
self._state = None
self._unique_id = ""
self._device_type = devtype
self._device_state_attributes = {}
self._device_state_attributes["sensor type"] = devtype
self._device_state_attributes["mac address"] = (
':'.join(mac[i:i + 2] for i in range(0, len(mac), 2))
)
self._device_class = None
self._newstate = None
self._measurement = "measurement"
self.pending_update = False
async def async_added_to_hass(self):
"""Handle entity which will be added."""
_LOGGER.debug("async_added_to_hass called for %s", self.name)
await super().async_added_to_hass()
# Restore the old state if available
if self._restore_state is False:
self.ready_for_update = True
return
old_state = await self.async_get_last_state()
_LOGGER.info(old_state)
if not old_state:
self.ready_for_update = True
return
self._state = True if old_state.state == STATE_ON else False
if "ext_state" in old_state.attributes:
self._device_state_attributes["ext_state"] = old_state.attributes["ext_state"]
if "rssi" in old_state.attributes:
self._device_state_attributes["rssi"] = old_state.attributes["rssi"]
if "last packet id" in old_state.attributes:
self._device_state_attributes["last packet id"] = old_state.attributes["last packet id"]
if ATTR_BATTERY_LEVEL in old_state.attributes:
self._device_state_attributes[ATTR_BATTERY_LEVEL] = old_state.attributes[ATTR_BATTERY_LEVEL]
self.ready_for_update = True
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return bool(self._state) if self._state is not None else None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the binary sensor."""
if self.is_on is None:
return None
return STATE_ON if self.is_on else STATE_OFF
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def force_update(self):
"""Force update."""
return True
def get_sensorname(self):
"""Set sensor name."""
fmac = ":".join(self._mac[i:i + 2] for i in range(0, len(self._mac), 2))
if self._config[CONF_DEVICES]:
for device in self._config[CONF_DEVICES]:
if fmac in device["mac"].upper():
if "name" in device:
custom_name = device["name"]
_LOGGER.debug(
"Name of %s sensor with mac adress %s is set to: %s",
self._measurement,
fmac,
custom_name,
)
return custom_name
break
return self._mac
def collect(self, data, batt_attr=None):
"""Measurements collector."""
self._newstate = data[self._measurement]
self._device_state_attributes["last packet id"] = data["packet"]
self._device_state_attributes["rssi"] = data["rssi"]
if batt_attr is not None:
self._device_state_attributes[ATTR_BATTERY_LEVEL] = batt_attr
def update(self):
"""Update sensor state and attribute."""
self._state = self._newstate
class PowerBinarySensor(SwitchingSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "switch"
self._sensor_name = self.get_sensorname()
self._name = "ble switch {}".format(self._sensor_name)
self._unique_id = "sw_" + self._sensor_name
self._device_class = DEVICE_CLASS_POWER
class LightBinarySensor(SwitchingSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "light"
self._sensor_name = self.get_sensorname()
self._name = "ble light {}".format(self._sensor_name)
self._unique_id = "lt_" + self._sensor_name
self._device_class = DEVICE_CLASS_LIGHT
class OpeningBinarySensor(SwitchingSensor):
"""Representation of a Sensor."""
def __init__(self, config, mac, devtype):
"""Initialize the sensor."""
super().__init__(config, mac, devtype)
self._measurement = "opening"
self._sensor_name = self.get_sensorname()
self._name = "ble opening {}".format(self._sensor_name)
self._unique_id = "op_" + self._sensor_name
self._ext_state = None
self._device_class = DEVICE_CLASS_OPENING
def update(self):
"""Update sensor state and attributes."""
self._ext_state = self._newstate
self._state = not bool(self._newstate) if self._ext_state < 2 else bool(self._newstate)
self._device_state_attributes["ext_state"] = self._ext_state
|
# Generated by Django 2.2 on 2019-07-09 05:45
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Feed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('archived_datetime', models.DateTimeField(help_text='The time the feed was pulled.', null=True)),
('type', models.CharField(choices=[('m1', 'Magnitude > 1.0')], max_length=500)),
('format', models.CharField(choices=[('geojson', 'GeoJSON')], max_length=500)),
('timeframe', models.CharField(choices=[('one-hour', 'One hour')], max_length=500)),
('content', models.FileField(upload_to='')),
('generated', models.BigIntegerField(null=True)),
('url', models.CharField(blank=True, max_length=5000)),
('title', models.CharField(blank=True, max_length=5000)),
('api', models.CharField(blank=True, max_length=5000)),
('count', models.IntegerField(null=True)),
('status', models.IntegerField(null=True)),
],
options={
'ordering': ('-archived_datetime',),
'get_latest_by': 'archived_datetime',
},
),
migrations.CreateModel(
name='FeedEarthquake',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mag', models.FloatField(null=True)),
('place', models.CharField(blank=True, max_length=5000)),
('time', models.BigIntegerField(null=True)),
('updated', models.BigIntegerField(null=True)),
('tz', models.IntegerField(null=True)),
('url', models.CharField(blank=True, max_length=5000)),
('detail', models.CharField(blank=True, max_length=5000)),
('felt', models.IntegerField(null=True)),
('cdi', models.FloatField(null=True)),
('mmi', models.FloatField(null=True)),
('alert', models.CharField(blank=True, max_length=5000)),
('status', models.CharField(blank=True, max_length=5000)),
('tsunami', models.IntegerField(null=True)),
('sig', models.IntegerField(null=True)),
('net', models.CharField(blank=True, max_length=5000)),
('code', models.CharField(blank=True, max_length=5000)),
('ids', models.CharField(blank=True, max_length=5000)),
('sources', models.CharField(blank=True, max_length=5000)),
('types', models.CharField(blank=True, max_length=5000)),
('nst', models.IntegerField(null=True)),
('dmin', models.FloatField(null=True)),
('rms', models.FloatField(null=True)),
('gap', models.FloatField(null=True)),
('magType', models.CharField(blank=True, max_length=5000)),
('type', models.CharField(blank=True, max_length=5000)),
('title', models.CharField(blank=True, max_length=5000)),
('depth', models.FloatField(null=True)),
('point', django.contrib.gis.db.models.fields.PointField(null=True, srid=4326)),
('usgs_id', models.CharField(blank=True, max_length=5000)),
('feed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='anss.Feed')),
],
options={
'ordering': ('-feed_id', '-time'),
},
),
]
|
<gh_stars>10-100
import boto3
import datetime
import time
import json
from botocore.vendored import requests
import tweepy
import logging
from PIL import Image
from io import BytesIO
#from aws_xray_sdk.core import xray_recorder
#from aws_xray_sdk.core import patch_all
# Get the service resource
#sqs = boto3.resource('sqs', region_name='us-west-2')
ssm = boto3.client('ssm', region_name='us-west-2')
reko = boto3.client('rekognition', region_name='us-west-2')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Get the image to transform original one
MASK = Image.open("mask.png")
if MASK.mode != 'RGBA':
MASK = MASK.convert('RGBA')
def authenticate_twitter():
consumer_key = (ssm.get_parameter(Name='twitter-consumer-key', WithDecryption=True))['Parameter']['Value']
consumer_secret = (ssm.get_parameter(Name='twitter-consumer-secret', WithDecryption=True))['Parameter']['Value']
access_token = (ssm.get_parameter(Name='twitter-access-token', WithDecryption=True))['Parameter']['Value']
access_token_secret = (ssm.get_parameter(Name='twitter-access-token-secret', WithDecryption=True))['Parameter']['Value']
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return auth
def get_faces(image):
resp = reko.detect_faces(Image={'Bytes': image})
if 'FaceDetails' in resp:
return resp['FaceDetails']
else:
return []
def get_face_boxes(faces, source_size):
# this list comprehension builds a bounding box around the faces
return [
(
int(f['BoundingBox']['Left'] * source_size[0]),
int(f['BoundingBox']['Top'] * source_size[1]),
int((f['BoundingBox']['Left'] + f['BoundingBox']['Width']) * source_size[0]),
int((f['BoundingBox']['Top'] + f['BoundingBox']['Height']) * source_size[1]),
# we store the final coordinate of the bounding box as the pitch of the face
f['Pose']['Roll']
)
for f in faces
]
def build_masked_image(source, mask, boxes):
print "PutMask"
for box in boxes:
size = (box[2] - box[0], box[3] - box[1])
scaled_mask = mask.rotate(-box[4], expand=1).resize(size, Image.ANTIALIAS)
# we cut off the final element of the box because it's the rotation
source.paste(scaled_mask, box[:4], scaled_mask)
def updateImage(faceDetails, imageSource):
print "UpdateImage"
boxes = get_face_boxes(faceDetails, imageSource.size)
if boxes:
build_masked_image(imageSource, MASK, boxes)
else:
return None
return imageSource
def tweetImage(authent, imageProcessed):
print "tweetImage"
api = tweepy.API(authent)
destination = '/tmp/image_out.jpeg'
imageProcessed.save(destination, "JPEG", quality=80, optimize=True, progressive=True)
upload_result = api.media_upload('/tmp/image_out.jpeg')
api.update_status(status="Image updated with Ninja faces", media_ids=[upload_result.media_id_string])
def addidtolist(tweet_list, tweet_id):
if tweet_list == 'null':
tweet_list = tweet_id
ssm.put_parameter(Name='day-tweet-processed', Type='StringList', Value=tweet_list, Overwrite=True)
else:
tweet_list = tweet_list + ',' + tweet_id
ssm.put_parameter(Name='day-tweet-processed', Type='StringList', Value=tweet_list, Overwrite=True)
print 'New list: ' + tweet_list
return tweet_list
def idinlist(t_list, idcheck):
words = t_list.split(',')
for word in words:
if idcheck == word:
return True
print word
return False
def process_messages(auth, eventReceived):
count = 0
imageWithFaces = 0
for message in eventReceived['Records']:
# Print out the body and tweet id
body = message['body']
print 'message body {}'.format(body)
step_0 = body.split(' ')
tweet_id = step_0[0]
mediaURL = step_0[1]
print "url: " + mediaURL
step_1 = tweet_id.split('@')
t_id = step_1[1]
step_2 = t_id.split(':')
t_id = step_2[0]
print "tweetID: " + t_id
# Check if tweet has been already processed
t_list = (ssm.get_parameter(Name='day-tweet-processed'))['Parameter']['Value']
print "Liste: " + t_list
check = idinlist(t_list, t_id)
print "Check value: " + str(check)
if(check != True):
print "Go to tweet if faces present"
# Gather the image and check if faces are present
count = count + 1
logger.info("Count: %i" % count)
respImage = requests.get(mediaURL+":large")
faceDetails = get_faces(respImage.content)
logger.info(faceDetails)
img = Image.open(BytesIO(respImage.content))
addidtolist(t_list, t_id)
if len(faceDetails) == 0:
print "No faces in the image"
else:
imageWithFaces += 1
processed = updateImage(faceDetails, img)
tweetImage(auth, processed)
return imageWithFaces
def lambda_handler(event, context):
try:
#patch_all()
authTwitter = authenticate_twitter()
nbofImageFaces = process_messages(authTwitter, event)
logger.info("Images with Faces: " + str(nbofImageFaces))
except Exception as e: # take care of all those ugly errors if there are some
print(e)
return {
"statusCode": 200,
"body": json.dumps(
{"message": "Update Image on twitter"}
),
} |
"""
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
__author__ = "EUROCONTROL (SWIM)"
import json
import re
from datetime import datetime, timezone
import dateutil.parser
import geog
import numpy as np
import shapely.geometry
POLYGON_TO_CIRCLE_EDGES = 10
_ISO_8601_CHECK_PATTERN = r'^P(?!$)((?P<years>\d+)Y)?((?P<months>\d+)M)?(\d+W)?(\d+D)?(T(?=\d)(\d+H)?(\d+M)?(\d+S)?)?$'
_iso_8601_check_regex = re.compile(_ISO_8601_CHECK_PATTERN)
def time_str_from_datetime_str(date_string: str) -> str:
"""
Extracts the time parts of a datetime.
Example:
2019-12-03T09:00:00.12345 will be converted to:
09:00:00.12345
:param date_string:
:return:
"""
return date_string.split('T')[1]
def datetime_str_from_time_str(time_str: str) -> str:
"""
Applies a dummy date on a time string for further storage as datetime
:param time_str:
:return:
"""
return f"2000-01-01T{time_str}"
def make_datetime_string_aware(dt: str) -> str:
"""
Applies UTC timezone on a datetime string,
:param dt:
:return:
"""
return make_datetime_aware(dateutil.parser.parse(dt)).isoformat()
def make_datetime_aware(dt: datetime) -> datetime:
"""
Applies UTC timezone of a datetime
:param dt:
:return:
"""
return dt.replace(tzinfo=timezone.utc)
def is_valid_duration_format(iso_duration: str) -> bool:
"""
Validates ISO 8601 duration strings as described at https://en.wikipedia.org/wiki/ISO_8601#Durations
:param iso_duration:
:return:
"""
return _iso_8601_check_regex.match(iso_duration) is not None
def inscribed_polygon_from_circle(lon: float, lat: float, radius_in_m: float, n_edges: int):
"""
:param lon:
:param lat:
:param radius_in_m:
:param n_edges: how many edges should the polygon have
:return:
"""
center_point = shapely.geometry.Point([lon, lat])
# linspace accepts number of points so we add 1 to have the desired number of edges
angles = np.linspace(0, 360, n_edges + 1)
polygon = geog.propagate(center_point, angles, radius_in_m)
result = shapely.geometry.mapping(shapely.geometry.Polygon(polygon))
return json.loads(json.dumps(result))
def circumscribed_polygon_from_circle(lon: float,
lat: float,
radius_in_m: float,
n_edges: int = POLYGON_TO_CIRCLE_EDGES):
"""
By increasing the radius 5% and having 10 edges we get a good enough approximation
of the desired circumscribed polygon.
:param lon:
:param lat:
:param radius_in_m:
:param n_edges:
:return:
"""
return inscribed_polygon_from_circle(lon, lat, radius_in_m * 1.05, n_edges)
|
<reponame>flipson/dd3d
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright 2021 Toyota Research Institute. All rights reserved.
import copy
import numpy as np
import torch
from fvcore.transforms import NoOpTransform
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from detectron2.data.detection_utils import read_image
from detectron2.data.transforms import RandomFlip, ResizeShortestEdge, ResizeTransform, apply_augmentations
from detectron2.layers import batched_nms
from detectron2.structures import Boxes, Instances
from detectron2.utils.comm import get_world_size
from tridet.layers import bev_nms
from tridet.modeling.dd3d.core import DD3D
from tridet.structures.boxes3d import Boxes3D
__all__ = ["DatasetMapperTTA", "DD3DWithTTA"]
class DatasetMapperTTA:
"""
Implement test-time augmentation for detection data.
It is a callable which takes a dataset dict from a detection dataset,
and returns a list of dataset dicts where the images
are augmented from the input image by the transformations defined in the config.
This is used for test-time augmentation.
"""
def __init__(self, cfg):
self.min_sizes = cfg.TEST.AUG.MIN_SIZES
self.max_size = cfg.TEST.AUG.MAX_SIZE
self.flip = cfg.TEST.AUG.FLIP
self.image_format = cfg.INPUT.FORMAT
def __call__(self, dataset_dict):
"""
Args:
dict: a dict in standard model input format. See tutorials for details.
Returns:
list[dict]:
a list of dicts, which contain augmented version of the input image.
The total number of dicts is ``len(min_sizes) * (2 if flip else 1)``.
Each dict has field "transforms" which is a TransformList,
containing the transforms that are used to generate this image.
"""
numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy()
shape = numpy_image.shape
orig_shape = (dataset_dict["height"], dataset_dict["width"])
if shape[:2] != orig_shape:
# It transforms the "original" image in the dataset to the input image
pre_tfm = ResizeTransform(orig_shape[0], orig_shape[1], shape[0], shape[1])
else:
pre_tfm = NoOpTransform()
# Create all combinations of augmentations to use
aug_candidates = [] # each element is a list[Augmentation]
for min_size in self.min_sizes:
resize = ResizeShortestEdge(min_size, self.max_size)
aug_candidates.append([resize]) # resize only
if self.flip:
flip = RandomFlip(prob=1.0)
aug_candidates.append([resize, flip]) # resize + flip
# Apply all the augmentations
ret = []
for aug in aug_candidates:
new_image, tfms = apply_augmentations(aug, np.copy(numpy_image))
torch_image = torch.from_numpy(np.ascontiguousarray(new_image.transpose(2, 0, 1)))
dic = copy.deepcopy(dataset_dict)
dic["transforms"] = pre_tfm + tfms
dic["image"] = torch_image
if "intrinsics" in dic:
intrinsics = dic['intrinsics'].cpu().numpy().astype(np.float32)
intrinsics = tfms.apply_intrinsics(intrinsics)
dic["intrinsics"] = torch.as_tensor(intrinsics)
dic["inv_intrinsics"] = torch.as_tensor(np.linalg.inv(intrinsics))
ret.append(dic)
return ret
class DD3DWithTTA(nn.Module):
"""
A GeneralizedRCNN with test-time augmentation enabled.
Its :meth:`__call__` method has the same interface as :meth:`GeneralizedRCNN.forward`.
"""
def __init__(self, cfg, model, tta_mapper=None):
"""
Args:
cfg (CfgNode):
model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
super().__init__()
if isinstance(model, DistributedDataParallel):
model = model.module
assert isinstance(model, DD3D), "DD3DwithTTA only supports on DD3D. Got a model of type {}".format(type(model))
assert not model.postprocess_in_inference, "To use test-time augmentation, `postprocess_in_inference` must be False."
self.cfg = cfg.copy()
self.model = model
self.nms_thresh = cfg.DD3D.FCOS2D.INFERENCE.NMS_THRESH
if tta_mapper is None:
tta_mapper = DatasetMapperTTA(cfg)
self.tta_mapper = tta_mapper
self.batch_size = cfg.TEST.IMS_PER_BATCH // get_world_size()
def _batch_inference(self, batched_inputs):
"""
Execute inference on a list of inputs,
using batch size = self.batch_size, instead of the length of the list.
Inputs & outputs have the same format as :meth:`GeneralizedRCNN.inference`
"""
outputs = []
inputs = []
for idx, input in enumerate(batched_inputs):
inputs.append(input)
if len(inputs) == self.batch_size or idx == len(batched_inputs) - 1:
# This runs NMS per each augmented image.
outputs.extend([res['instances'] for res in self.model(inputs)])
inputs = []
return outputs
def __call__(self, batched_inputs):
"""
Same input/output format as :meth:`DD3D`
"""
def _maybe_read_image(dataset_dict):
ret = copy.copy(dataset_dict)
if "image" not in ret:
image = read_image(ret.pop("file_name"), self.tta_mapper.image_format)
image = torch.from_numpy(np.ascontiguousarray(image.transpose(2, 0, 1))) # CHW
ret["image"] = image
if "height" not in ret and "width" not in ret:
ret["height"] = image.shape[1]
ret["width"] = image.shape[2]
return ret
return [self._inference_one_image(_maybe_read_image(x)) for x in batched_inputs]
def _inference_one_image(self, x):
"""
Args:
x (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (x["height"], x["width"])
augmented_inputs, tfms = self._get_augmented_inputs(x)
merged_instances = self._get_augmented_instances(augmented_inputs, tfms, orig_shape)
if len(merged_instances) > 0:
if self.model.do_nms:
# Multiclass NMS.
keep = batched_nms(
merged_instances.pred_boxes.tensor, merged_instances.scores_3d, merged_instances.pred_classes,
self.nms_thresh
)
merged_instances = merged_instances[keep]
if not self.model.only_box2d and self.model.do_bev_nms:
# Bird-eye-view NMS.
keep = bev_nms(
merged_instances.pred_boxes3d,
merged_instances.scores_3d,
self.model.bev_nms_iou_thresh,
class_idxs=merged_instances.pred_classes,
class_agnostic=False
)
merged_instances = merged_instances[keep]
return {"instances": merged_instances}
def _get_augmented_inputs(self, input):
augmented_inputs = self.tta_mapper(input)
tfms = [x.pop("transforms") for x in augmented_inputs]
return augmented_inputs, tfms
def _get_augmented_instances(self, augmented_inputs, tfms, orig_shape):
# 1: forward with all augmented images
outputs = self._batch_inference(augmented_inputs)
# 2: union the results
all_boxes = []
all_boxes3d = []
all_scores = []
all_scores_3d = []
all_classes = []
for input, output, tfm in zip(augmented_inputs, outputs, tfms):
# Need to invert the transforms on boxes, to obtain results on original image
inv_tfm = tfm.inverse()
# 2D boxes
pred_boxes = output.pred_boxes.tensor
orig_pred_boxes = inv_tfm.apply_box(pred_boxes.cpu().numpy())
orig_pred_boxes = torch.from_numpy(orig_pred_boxes).to(pred_boxes.device)
all_boxes.append(Boxes(orig_pred_boxes))
# 3D boxes
pred_boxes_3d = output.pred_boxes3d
vectorized_boxes_3d = pred_boxes_3d.vectorize().cpu().numpy()
orig_vec_pred_boxes_3d = [inv_tfm.apply_box3d(box3d_as_vec) for box3d_as_vec in vectorized_boxes_3d]
# intrinsics
orig_intrinsics = inv_tfm.apply_intrinsics(input['intrinsics'].cpu().numpy())
orig_pred_boxes_3d = Boxes3D.from_vectors(
orig_vec_pred_boxes_3d, orig_intrinsics, device=pred_boxes_3d.device
)
all_boxes3d.append(orig_pred_boxes_3d)
all_scores.extend(output.scores)
all_scores_3d.extend(output.scores_3d)
all_classes.extend(output.pred_classes)
all_boxes = Boxes.cat(all_boxes)
all_boxes3d = Boxes3D.cat(all_boxes3d)
all_scores = torch.cat([x.scores for x in outputs])
all_scores_3d = torch.cat([x.scores_3d for x in outputs])
all_classes = torch.cat([x.pred_classes for x in outputs])
return Instances(
image_size=orig_shape,
pred_boxes=all_boxes,
pred_boxes3d=all_boxes3d,
pred_classes=all_classes,
scores=all_scores,
scores_3d=all_scores_3d,
)
|
import os
import sys
import json
from os.path import expanduser, join, abspath
import subprocess
import datetime
import shutil
import paramiko
class ExpRunner:
def __init__(self, config) -> None:
""""""
self.config = config
self._config_parser(self.config)
self._init_host_ssh()
def _config_parser(self, config):
""" parse json object
"""
self.host_user_dir = config["host_user_dir"]
self.docker_user_dir = config["docker_user_dir"]
self.docker_user = config["docker_user"]
self.docker_ssh_port = config["docker_ssh_port"]
self.script_path = self._trans_docker_path(config["script_path"])
self.script_args = config["script_args"]
self.nodes = config['nodes']
self.nGPU = config['nGPU'] # for each machine
self.eth = config['eth'] # name if NIC
self.bw_limit = config['bw_limit']
self.default_bw = config['default_bw']
self.log_folder = config['log_folder']
self.host_key = paramiko.RSAKey.from_private_key_file(expanduser(config["host_ssh_key"]))
self.docker_key = paramiko.RSAKey.from_private_key_file(config["docker_ssh_key"])
def _trans_docker_path(self, path):
return path.replace('~', self.docker_user_dir)
def _init_host_ssh(self):
print('='*10, 'initializing ssh connections')
self.host_nodes = []
for node in self.nodes:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname=node, username="ubuntu", pkey=self.host_key)
self.host_nodes.append((node, client))
print('IP', node, 'DONE')
print('='*10, 'initialization for ssh host node DONE')
def _init_host_env(self):
""""""
for ip, cli in self.host_nodes:
check_cmd = "mkdir ~/autorun; mkdir ~/autorun/horovod_logs; " \
"mkdir ~/autorun/horovod_logs/hooks; "\
"mkdir ~/autorun/horovod_logs/model_log; "\
"mkdir ~/autorun/horovod_logs/mpi_events; "\
"mkdir ~/autorun/logs/; "\
"mkdir ~/autorun/logs/net; mkdir ~/autorun/logs/cpu; mkdir ~/data "
self._exec_cli_cmd(cli, check_cmd)
check_cmd = "cd ~/autorun; ls|grep distributed-training"
_, stdout, stderr = cli.exec_command(check_cmd)
if stdout.read() != b"":
git_pull = "cd ~/autorun/distributed-training; git pull"
self._exec_cli_cmd(cli, git_pull, '{}: git pull'.format(ip))
else:
cmd = "cd ~/autorun;"\
"git clone https://github.com/zarzen/distributed-training.git"
self._exec_cli_cmd(cli, cmd, "{}: clone training scripts".format(ip))
def _exec_cli_cmd(self, cli, cmd, msg=None):
if msg:
print('>'*10, msg, '<'*10)
_, stdout, stderr = cli.exec_command(cmd)
print('cmd stdout: ', stdout.read().decode('utf-8'),
"cmd stderr: ", stderr.read().decode('utf-8'))
if msg:
print('>'*10, 'DONE', msg, '<'*10)
def _start_containers(self):
""""""
stop_cmd = "docker kill $(docker ps -q)"
pull_cmd = "docker pull zarzen/horovod-mod:1.0"
start_cmd = "sudo docker run --gpus all --network=host --detach --ipc=host "\
"-v {}/autorun/distributed-training:{}/distributed-training "\
"-v {}/autorun/horovod_logs:{}/horovod_logs "\
"-v {}/data:{}/data "\
"zarzen/horovod-mod:1.0".format(self.host_user_dir, self.docker_user_dir,
self.host_user_dir, self.docker_user_dir,
self.host_user_dir, self.docker_user_dir)
self.docker_ids = {}
for (ip, cli) in self.host_nodes:
print('>'*10, ip, '<'*10)
self._exec_cli_cmd(cli, stop_cmd, "{}: stop all containers".format(ip))
self._exec_cli_cmd(cli, pull_cmd, "{}: pull docker image".format(ip))
_, stdout, stderr = cli.exec_command(start_cmd)
_docker_id = stdout.read().decode('utf-8')
self.docker_ids[ip] = _docker_id
print('docker_id', _docker_id)
print('Start Errors:', stderr.read().decode('utf-8'))
print('='*10, ip, 'start container DONE', '='*10)
def _kill_containers(self):
""" after experiments done"""
print('*'*10, 'killing docker containers')
kill_cmd = "docker container kill {}"
for ip, cli in self.host_nodes:
if ip in self.docker_ids:
self._exec_cli_cmd(cli, kill_cmd.format(self.docker_ids[ip]), ip)
print('*'*10, 'kill containers done')
def bandwith_control(self):
"""
"""
del_cmd = "sudo tc qdisc del dev {} root tbf rate 40Gbit latency 400ms burst 3000kbit".format(self.eth)
# if self.bw_limit = "" then we don't execute the add_cmd
add_cmd = "sudo tc qdisc add dev {} root tbf rate {} latency 400ms burst 3000kbit".format(self.eth, self.bw_limit)
for (ip, cli) in self.host_nodes:
# try to delete rate limit
self._exec_cli_cmd(cli, del_cmd, "{}: delete bandwidth limit".format(ip))
# ensure limit deleted
self._exec_cli_cmd(cli, del_cmd, "{}: delete bandwidth limit".format(ip))
if self.bw_limit:
self._exec_cli_cmd(cli, add_cmd, "{}: add bandwidth limit {}".format(ip, self.bw_limit))
def exec_dist_train(self):
""" execute distributed training script at rank0
:return process:
"""
train_cmd = self.build_train_cmd()
print("Exec:", " ".join(train_cmd))
# ssh into rank0 container
ip, _ = self.host_nodes[0]
rank0 = paramiko.SSHClient()
rank0.set_missing_host_key_policy(paramiko.AutoAddPolicy())
rank0.connect(hostname=ip, port=self.docker_ssh_port,
username=self.docker_user,
pkey=self.docker_key)
def line_buffered(f):
line_buf = ""
while not f.channel.exit_status_ready():
c = f.read(1).decode('utf-8')
if c != '\n':
line_buf += c
else:
yield line_buf
line_buf = ''
_, stdout, stderr = rank0.exec_command(" ".join(train_cmd), bufsize=100)
print("-"*10, 'training log')
for line in line_buffered(stdout):
print(line)
print(stdout.read().decode('utf-8'))
print(stderr.read().decode('utf-8'))
print('-'*10, 'training log end')
def build_train_cmd(self):
""""""
nNodes = len(self.nodes)
np = str(nNodes * int(self.nGPU))
hosts = ",".join(["{}:{}".format(ip, self.nGPU) for ip in self.nodes])
cmd = ["NCCL_DEBUG=INFO",
"HOROVOD_NUM_NCCL_STREAMS=4",
"horovodrun",
"-np", np,
"-H", hosts,
"python3",
self.script_path,
self.script_args,
"|& grep -v \"Read -1\""]
return cmd
def _get_logs(self):
cpu_logs, net_logs = self._get_cpu_net_log()
hook_logs, model_logs, mpi_logs = self._get_horovod_logs()
return cpu_logs, net_logs, hook_logs, model_logs, mpi_logs
def run(self):
""""""
print('initiating host env')
self._init_host_env()
self.exist_logs = self._get_logs()
print('='*10, "working on bandwidth control")
self.bandwith_control()
print('='*10, "bandwidth control DONE")
cpu_p, net_p = self._exe_res_monitor()
print(">"*10, 'launched CPU & Network monitoring')
print('='*10, 'Start containers', )
self._start_containers()
print('*'*10, 'Start working on experiment script')
self.exec_dist_train()
print('*'*10, 'Experiment finished')
cpu_p.terminate()
net_p.terminate()
print('End experiment')
self.move_log()
def _exe_res_monitor(self):
""" execute cpu and network bandwidth monitor
"""
# record existing logs
cpu_monitor_script = expanduser("~/autorun/monitor_cpu.py")
net_monitor_script = expanduser("~/autorun/monitor_net.py")
cpu_p = subprocess.Popen(["python3", cpu_monitor_script],
stdout=subprocess.DEVNULL)
net_p = subprocess.Popen(["python3", net_monitor_script],
stdout=subprocess.DEVNULL)
return cpu_p, net_p
def move_log(self):
""" rename horovod_logs -> horovod_logs_<bandwidth>,
moving cpu.log and net.log into horovod_logs_<bandwidth> folder
"""
# cpu, net, hook, model, mpi
n_cpu, n_net, n_hook, n_model, n_mpi = self._get_logs()
e_cpu, e_net, e_hook, e_model, e_mpi = self.exist_logs
def _moving(src, dst, files):
for _f in files:
shutil.copy2(join(src, _f), join(dst, _f))
dst_folder = "./log_archives/{}-{}-{}".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"),
self.bw_limit, self.default_bw)
if self.log_folder:
dst_folder += '-' + self.log_folder
os.makedirs(dst_folder)
_moving("./logs/cpu", dst_folder, n_cpu - e_cpu)
_moving("./logs/net", dst_folder, n_net - e_net)
_moving("./horovod_logs/hooks", dst_folder, n_hook-e_hook)
_moving("./horovod_logs/model_log/", dst_folder, n_model-e_model)
_moving("./horovod_logs/mpi_events", dst_folder, n_mpi-e_mpi)
with open(join(dst_folder, "readme.txt"), 'w+') as ofile:
ofile.write("bandwidth limit: {}\n".format(self.bw_limit))
train_cmd = self.build_train_cmd()
ofile.write("execute cmd: {}\n".format(" ".join(train_cmd)))
with open(join(dst_folder, "config.json"), 'w') as ofile:
json.dump(self.config, ofile, indent=4)
def _get_cpu_net_log(self):
"""
record current exisiting logs
"""
log_path = "./logs"
log_path = expanduser(log_path)
net_logs = os.listdir(join(log_path, 'net'))
cpu_logs = os.listdir(join(log_path, 'cpu'))
return set(cpu_logs), set(net_logs)
def _create_horovod_logs_folder(self):
base_dir = "./horovod_logs"
if not os.path.exists(base_dir):
os.makedirs('./horovod_logs')
if not os.path.exists(join(base_dir, "hooks")):
os.makedirs(join(base_dir, "hooks"))
if not os.path.exists(join(base_dir, "model_log")):
os.makedirs(join(base_dir, "model_log"))
if not os.path.exists(join(base_dir, "mpi_events")):
os.makedirs(join(base_dir, "mpi_events"))
def _get_horovod_logs(self):
base_dir = "./horovod_logs"
hook_logs = os.listdir(join(base_dir, "hooks"))
model_logs = os.listdir(join(base_dir, "model_log"))
mpi_logs = os.listdir(join(base_dir, "mpi_events"))
return set(hook_logs), set(model_logs), set(mpi_logs)
def __del__(self):
self._kill_containers()
def main():
""""""
if len(sys.argv) < 2:
print("Please specific config file")
sys.exit()
return
with open(sys.argv[1]) as config_file:
config = json.load(config_file)
exp = ExpRunner(config)
exp.run()
if __name__ == "__main__":
main() |
<filename>temporalis/__init__.py
from temporalis.time import int_to_weekday
import pendulum
from pprint import pprint
class DataPoint:
def __init__(self, name, value, units,
min_val=None, max_val=None,
low_val=None, high_val=None,
time=None, min_time=None, max_time=None,
low_time=None, high_time=None,
prob=None, prob_min=None, prob_max=None):
self.name = name
self.units = units
self.value = value
self.min_val = min_val or value
self.max_val = max_val or value
self.low_val = low_val or self.min_val
self.high_val = high_val or self.max_val
self.prob = prob # probability of value
self.prob_min = prob_min or prob # probability of min_value
self.prob_max = prob_max or prob # probability of max_value
self.time = time # ts of prediction
self.min_time = min_time or time # predicted ts for min
self.max_time = max_time or time # predicted ts for max
self.low_time = low_time or time
self.high_time = high_time or time
@staticmethod
def _stamp_to_datetime(stamp, tz=None):
tz = tz or "UTC"
return pendulum.from_timestamp(stamp, tz=tz)
def as_dict(self):
data = self.__dict__
for k in dict(data):
if not data[k]:
data.pop(k)
return data
@staticmethod
def from_dict(data):
if not isinstance(data, int) and not data:
return None
if isinstance(data, DataPoint):
return data
assert isinstance(data, dict)
name = data["name"]
units = data.get("units") or data.get("unit")
tz = data.get("timezone")
dt = None
if data.get("time"):
dt = data["time"]
try:
dt = DataPoint._stamp_to_datetime(data["time"], tz)
except:
pass
if data.get("datetime"):
dt = data["datetime"]
try:
dt = DataPoint._stamp_to_datetime(data["datetime"], tz)
except:
pass
time = min_time = max_time = dt
value = min_val = max_val = data.get("value")
prob = prob_min = prob_max = data.get("prob")
min_val = data.get("min_val") or data.get("min_value") or min_val
max_val = data.get("max_val") or data.get("max_value") or max_val
prob_min = data.get("min_prob") or data.get("prob_min") or prob_min
prob_max = data.get("max_prob") or data.get("prob_max") or prob_max
return DataPoint(name, value, units, min_val, max_val, time,
min_time, max_time, prob, prob_min, prob_max)
def __repr__(self):
return str(self.value) + " " + self.units
# timestamped weather data
class WeatherData:
def __init__(self):
self.datetime = None
self.apparentTemperature = None
self.cloudCover = None
self.dewPoint = None
self.humidity = None
self.icon = None
self.ozone = None
self.precipitation = None
self.pressure = None
self.summary = None
self.temperature = None
self.uvIndex = None
self.visibility = None
self.windBearing = None
self.windGust = None
self.windSpeed = None
self.snow = None
def __repr__(self):
return str(self.datetime) + ":" + self.summary
def pprint(self):
pprint(self.as_dict())
def print(self):
print(self.weekday, self.datetime.date(), self.datetime.time(),
self.timezone, ":", self.summary)
print("temperature:", self.temperature)
print("humidity:", self.humidity)
print("cloudCover:", self.cloudCover)
print("windSpeed:", self.windSpeed)
print("precipitation:", self.precipitation)
print("visibility:", self.visibility)
@property
def timezone(self):
if not self.datetime:
return pendulum.timezone("UTC")
return self.datetime.timezone_name
@property
def weekday(self):
if self.datetime is None:
return -1
return int_to_weekday(self.datetime.weekday())
def as_dict(self):
data = self.__dict__
for k in dict(data):
try:
data[k] = data[k].as_dict()
except:
pass
if not data[k]:
data.pop(k)
return data
def _stamp_to_datetime(self, stamp, tz=None):
tz = tz or self.timezone
return pendulum.from_timestamp(stamp, tz=tz)
@staticmethod
def from_dict(data):
assert isinstance(data, dict)
point = WeatherData()
point.icon = data.get("icon")
point.summary = data.get("summary")
if data.get("datetime"):
dt = data["datetime"]
try:
dt = point._stamp_to_datetime(data["datetime"])
except:
pass
point.datetime = dt
point.temperature = DataPoint.from_dict(data.get("temperature"))
point.apparentTemperature = DataPoint.from_dict(
data.get("apparentTemperature")) or point.temperature
point.cloudCover = DataPoint.from_dict(data.get("cloudCover"))
point.dewPoint = DataPoint.from_dict(data.get("dewPoint"))
point.humidity = DataPoint.from_dict(data.get("humidity"))
point.ozone = DataPoint.from_dict(data.get("ozone"))
point.pressure = DataPoint.from_dict(data.get("pressure"))
point.uvIndex = DataPoint.from_dict(data.get("uvIndex"))
point.visibility = DataPoint.from_dict(data.get("visibility"))
point.windBearing = DataPoint.from_dict(data.get("windBearing"))
point.windGust = DataPoint.from_dict(data.get("windGust"))
point.windSpeed = DataPoint.from_dict(data.get("windSpeed"))
point.precipitation = DataPoint.from_dict(data.get("precipitation"))
point.snow = DataPoint.from_dict(data.get("snow"))
return point
# Collection of forecasts
class HourlyForecast:
def __init__(self, date, hours, weather):
self.datetime = date
self.hours = hours
self.weather = weather
def __getitem__(self, item):
return self.hours[item]
def __iter__(self):
for m in self.hours:
yield m
@property
def summary(self):
return self.weather.summary
@property
def icon(self):
return self.weather.icon
def print(self):
print(self.datetime, ":", self.summary)
def pprint(self):
pprint(self.as_dict())
def as_dict(self):
return {"datetime": self.datetime,
"hours": [m.as_dict() for m in self.hours],
"weather": self.weather.as_dict()}
class DailyForecast:
def __init__(self, date, days, weather):
self.datetime = date
self.days = days
self.weather = weather
def __getitem__(self, item):
return self.days[item]
def __iter__(self):
for m in self.days:
yield m
def print(self):
print(self.datetime, ":", self.summary)
def pprint(self):
pprint(self.as_dict())
@property
def summary(self):
return self.weather.summary
@property
def icon(self):
return self.weather.icon
def as_dict(self):
return {"datetime": self.datetime,
"days": [m.as_dict() for m in self.days],
"weather": self.weather.as_dict()}
|
# A program to analyzing data of bladder cancer in human
# Author: <NAME>
# Date: 04/16/2017
import matplotlib.pyplot as pyplot
import numpy as np
def readData_gender(filename):
'''
The function reads the Clincal data of Bladder Cancer patients
data file from cbioportal
Parameter:
filename = a name of file (in strings)
Return value: number of male and female patients
'''
bladder_cancer = open(filename, 'r', encoding = 'utf-8')
line = bladder_cancer.readline()
while line[0] == ('#'):
line = bladder_cancer.readline()
female = 0
male = 0
for line in bladder_cancer:
row = line.split(',')
if row[6] != '[Not Available]' and row[6] == 'MALE':
male = male +1
if row[6] != '[Not Available]' and row[6] == 'FEMALE':
female = female +1
return male, female
def readData_stage(filename):
'''
The function reads the Clincal data of Bladder Cancer patients
data file from cbioportal
Parameter:
filename = a name of file (in strings)
Return value: frequency of each cancer stage
'''
bladder_cancer = open(filename, 'r', encoding = 'utf-8')
line = bladder_cancer.readline()
while line[0] == ('#'):
line = bladder_cancer.readline()
stageI = 0
stageII = 0
stageIII = 0
stageIV = 0
for line in bladder_cancer:
row = line.split(',')
if row[57] != '[Not Available]' and row[57] == 'Stage I':
stageI = stageI + 1
if row[57] != '[Not Available]' and row[57] == 'Stage II':
stageII = stageII + 1
if row[57] != '[Not Available]' and row[57] == 'Stage III':
stageIII = stageIII + 1
if row[57] != '[Not Available]' and row[57] == 'Stage IV':
stageIV = stageIV + 1
return stageI, stageII, stageIII, stageIV
def readData_Minnesota(filename2):
'''
The function reads the bladder cancer data in the population of Minnesota
from 1988-2013
Parameter:
filename = a name of file (in strings)
Return value: list of incidence rate in male and female population with
list of recorded year
'''
bladder_minnesota = open(filename2, 'r', encoding = 'utf-8')
line = bladder_minnesota.readline()
while line[0] == ('#'):
line = bladder_minnesota.readline()
year = []
incidence_rate_male = []
incidence_rate_female = []
for line in bladder_minnesota:
row = line.split(',')
if row[2] == 'Male':
year.append(int(row[0]))
incidence_rate_male.append(float(row[6]))
if row[2] == 'Female':
incidence_rate_female.append(float(row[6]))
return year, incidence_rate_male, incidence_rate_female
def readData_Minnesota2(filename2):
'''
The function reads the bladder cancer data in the population of Minnesota
from 1988-2013
Parameter:
filename = a name of file (in strings)
Return value: list of number of new cancers and total population in
Minnesota
'''
bladder_minnesota = open(filename2, 'r', encoding = 'utf-8')
line = bladder_minnesota.readline()
while line[0] == ('#'):
line = bladder_minnesota.readline()
new_cancer = []
population = []
for line in bladder_minnesota:
row = line.split(',')
if row[2] == 'Both combined':
new_cancer.append(int(row[4]))
population.append(int(row[5]))
return new_cancer, population
def drawHist_gender(male, female):
'''
A function to plot the histogram of bladder cancer by gender
Parameters:
male: number of male patients
female: number of female patients
Return value: None
'''
count = []
count.append(male)
count.append(female)
status = ['Male', 'Female']
pos = np.arange(len(status))
width = 0.5
ax = pyplot.axes()
ax.set_xticks(pos + (width/2))
ax.set_xticklabels(status)
pyplot.bar(pos, count, width, color = 'blue')
pyplot.xlim([min(pos) - 0.5, max(pos) + 1.0])
pyplot.title('Frequency of diagnosed patients by gender')
pyplot.show()
male, female = readData_gender('data_bcr_clinical_data_patient.csv')
def drawHist_stage(stageI, stageII, stageIII, stageIV):
'''
A function to plot the frequency of cancer stages among patients
Parameters:
stageI: number of patients in stage I
stageII: number of patients in stage II
stageIII: number of patients in stage III
stageIV: number of patients in stage IV
Return value: None
'''
count_stage = []
count_stage.append(stageI)
count_stage.append(stageII)
count_stage.append(stageIII)
count_stage.append(stageIV)
status = ['Stage I', 'Stage II', 'Stage III', 'Stage IV']
pos = np.arange(len(status))
width = 0.5
ax = pyplot.axes()
ax.set_xticks(pos + (width/2))
ax.set_xticklabels(status)
pyplot.bar(pos, count_stage, width, color = 'blue')
pyplot.xlim([min(pos) - 0.5, max(pos) + 1.0])
pyplot.title('Frequency of cancer stages among diagnosed patients')
pyplot.show()
stageI, stageII, stageIII, stageIV = readData_stage('data_bcr_clinical_data_patient.csv')
def draw_minnesota(year, incidence_rate_male, incidence_rate_female):
'''
A function to plot incidence rate of bladder cancer by gender in Minnesota
Parameters:
year: list of recorded year
incidence_rate_male: list of incidence rate in male population
incidence_rate_female: list of incidence rate in female population
Return value: None
'''
pyplot.plot(year, incidence_rate_male, label = 'Male')
pyplot.plot(year, incidence_rate_female, label = 'Female')
pyplot.legend(loc = 'center')
pyplot.title('Incidence rate of bladder cancer in Minnesota from 1988-2013')
pyplot.xlabel('Year')
pyplot.ylabel('Incidence Rate (per 100,000)')
pyplot.show()
year, incidence_rate_male, incidence_rate_female = readData_Minnesota('Bladder - Minnesota.csv')
def linearRegression(x,y):
'''
The function computes the slope and y-intercept of the least squares
requession line.
Parameters:
x = a list of x coordinates
y = a list of y coordinates
Return value:
The slope and y-intercept of a linear regression.
'''
n = len(x) # number of points
sumx = 0 # sum of x coordinates
sumy = 0
sumxy = 0
sumx_square = 0
for index in range(n):
sumx = sumx + x[index]
sumy = sumy + y[index]
sumxy = sumxy + (x[index]*y[index])
sumx_square = sumx_square + (x[index]**2)
m = (n*sumxy - sumx*sumy)/(n*sumx_square - (sumx)**2)
b = (sumy - m*sumx)/n
return m, b
def draw_minnesota2(new_cancer, population):
'''
The function to plot a linear correlation between number of new cancer and
the total population in Minnesota from 1988-2013
Parameters:
new_cancer: number of new cancer over year
population: number of total population over year
Return value: None
'''
pyplot.scatter(new_cancer, population)
m, b = linearRegression(new_cancer, population)
minX = min(new_cancer)
maxX = max(new_cancer)
pyplot.plot([minX, maxX], [m * minX + b, m * maxX + b], color = 'red')
pyplot.xlabel('Number of new cancer')
pyplot.ylabel('Population')
pyplot.title('Number of new cancer vs Total population in Minnesota')
pyplot.show()
new_cancer, population = readData_Minnesota2('Bladder - Minnesota.csv')
def main():
drawHist_gender(male, female)
drawHist_stage(stageI, stageII, stageIII, stageIV)
draw_minnesota(year, incidence_rate_male, incidence_rate_female)
draw_minnesota2(new_cancer, population)
main()
|
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from django.http import HttpResponse
from django.http import JsonResponse
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.template.loader import render_to_string
from django.template.loader import get_template
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles import finders
from django.template.defaultfilters import date as _date
import datetime
import uuid
import tempfile
import os
import pathlib
import zipfile
from shutil import copyfile
import jinja2
from .models import CourseType
from .models import CourseEvent
from .models import Attendee
from .models import CourseAttendee
from .models import InvoiceDetail
from .forms import RegistrationForm
def courses_json(request):
latest_courses_list = CourseEvent.objects.order_by('date')
return JsonResponse({
"courses": [course.json for course in latest_courses_list if
course.date > datetime.date.today()]})
def courses_atom(request):
latest_courses_list = CourseEvent.objects.order_by('date')
courses = latest_courses_list
context = {
"courses": courses,
"date": courses[0].date,
"uuid": uuid.uuid1(),
}
return render(request, "atom.xml", context)
def courses(request):
latest_courses_list = CourseEvent.objects.exclude(status=CourseEvent.CREATED).filter(date__gt=datetime.date.today()).order_by('date')
if request.GET.get("env") == settings.TEST_KEY:
latest_courses_list = latest_courses_list.filter(course_type__title__contains=settings.TEST_TITLE)
else:
latest_courses_list = latest_courses_list.exclude(course_type__title__contains=settings.TEST_TITLE)
context = {
"latest_courses_list": latest_courses_list,
"level_choices": CourseType.level_choices,
"google_tag": settings.GOOGLE_TAG,
}
return render(request, "courses.html", context)
def _empty_form(request, course_id):
course = get_object_or_404(CourseEvent, pk=course_id)
test_env = False
if request.GET.get("env") == settings.TEST_KEY:
test_env = settings.TEST_KEY
if course.course_type.level is not None:
level = course.course_type.level_choices[course.course_type.level][1]
else:
level = None
context = {
"course": course,
"level": level,
"form": RegistrationForm(),
"test_env": test_env,
"google_tag": settings.GOOGLE_TAG,
}
return render(request, "course-forms.html", context)
def _create_new_attende(name, email, gdpr, marketing):
"""Register new generic attendee
:return: new_attendee
"""
new_attendee = Attendee.objects.create(
name=name,
email=email,
gdpr=gdpr,
marketing=marketing,
token=<KEY>(),
date_signed=datetime.date.today()
)
new_attendee.save()
return new_attendee
def _update_attendee_by_email(email, marketing, gdpr, name=None):
"""Update attendee marketing information and GDPR of user identified by
e-mail
"""
attendee = Attendee.objects.get(email=email)
attendee.date_signed = datetime.date.today()
attendee.marketing = marketing
attendee.gdpr = gdpr
if name:
attendee.name = name
attendee.save()
return attendee
def _register_new_attendee(request, course_id):
"""Register new attendee person in our database
"""
form = RegistrationForm(request.POST)
is_test = (request.GET.get("env") == settings.TEST_KEY)
# Validate the form: the captcha field will automatically
# check the input
if not form.is_valid():
# return defaults.bad_request(request,
# SuspiciousOperation("Form not valid"))
pass
name = request.POST["name"]
email = request.POST["email_attendee"]
attendee = None
course_attendee = None
course_event = get_object_or_404(CourseEvent, pk=course_id)
gdpr = False
marketing = False
student = False
amount = 0
if course_event.course_type.level is not None:
level = list(filter(lambda c: c[0] == course_event.course_type.level,
CourseType.level_choices))[0][1]
else:
level = None
if "gdpr" in request.POST and request.POST["gdpr"] == "on":
gdpr = True
if "marketing" in request.POST and request.POST["marketing"] == "on":
marketing = True
if "student" in request.POST and request.POST["student"] == "on":
student = True
else:
student = False
existing_attendees = course_event.courseattendee_set.filter(
attendee__email=email
)
if level is not None:
title = "{} - {}".format(course_event.course_type.title, level)
else:
title = course_event.course_type.title
if len(existing_attendees) > 0:
context = {
"name": existing_attendees[0].attendee.name,
"email": existing_attendees[0].attendee.email,
"title": title,
"google_tag": settings.GOOGLE_TAG,
}
return render(request, "already_registered.html", context)
# save attendee details only if attendee was registered
attendee = None
try:
attendee = Attendee.objects.get(email=email)
_update_attendee_by_email(email, marketing, gdpr, name)
except ObjectDoesNotExist as e:
attendee = _create_new_attende(name, email, gdpr, marketing)
amount = 0
if datetime.date.today() <= course_event.early_date:
if student:
amount = course_event.price_student
else:
amount = course_event.price_regular
else:
amount = course_event.price_late
course_attendee = CourseAttendee(
attendee=attendee,
course=course_event,
student=student,
registration_date=datetime.date.today(),
level=request.POST["level"],
note=request.POST["note"],
topics=request.POST["topics"],
next_topics=request.POST["next_topics"],
attended=False,
amount=amount,
token=<KEY>()
)
attendee.courses.add(course_event)
organisation = request.POST["organisation"]
if not organisation:
organisation = attendee.name
invoicemail = request.POST["invoicemail"]
if not invoicemail:
invoicemail = request.POST["email_attendee"]
order = request.POST["order"]
invoice_detail = None
if order:
invoice_details = InvoiceDetail.objects.filter(
order=order,
name=organisation
)
if len(invoice_details) > 0:
invoice_detail = invoice_details[0]
if not invoice_detail:
invoice_detail = InvoiceDetail(
address="{street}\n{zipcode} {city}".format(
street=request.POST["street"],
zipcode=request.POST["zip_code"],
city=request.POST["city"]),
name=organisation,
ico=request.POST["ico"],
dic=request.POST["dic"],
order=request.POST["order"],
email=invoicemail
)
invoice_detail.save()
course_attendee.invoice_detail = invoice_detail
course_attendee.save()
_send_mails(course_event, attendee, title, organisation, amount, is_test)
context = {
"course_name": title,
"course_date": course_event.date,
"attendee": attendee.name,
"mail": attendee.email,
"course_id": course_event.id,
"google_tag": settings.GOOGLE_TAG,
}
return render(request, "submitted.html", context)
def _send_mails(course_event, attendee, title,
organisation, amount, is_test=False):
"""Send e-mails to info at gismentors and to new course attendee
"""
if is_test:
send_mail(
'[GISMentors-kurzy] {} {}'.format(title, course_event.date),
"""
Kurz: {}
Účastník: {}
E-mail: {}
Organizace: {}
Celkem registrovaných účastníků: {}
Celkem peněz (bez DPH): {}
""".format(
title,
attendee.name,
attendee.email,
organisation,
len(course_event.courseattendee_set.all()),
course_event.suma_netto
),
'<EMAIL>',
[settings.TEST_MAIL],
fail_silently=True,
)
else:
send_mail(
'[GISMentors-kurzy] {} {}'.format(title, course_event.date),
"""
Kurz: {}
Účastník: {}
E-mail: {}
Organizace: {}
Celkem registrovaných účastníků: {}
Celkem peněz (bez DPH): {}
""".format(
title,
attendee.name,
attendee.email,
organisation,
len(course_event.courseattendee_set.all()),
course_event.suma_netto
),
'<EMAIL>',
[settings.INFO_MAIL],
fail_silently=True,
)
send_mail(
'[GISMentors-kurzy] Potvrzení přihlášky',
render_to_string('potvrzeni.txt', {
'name': attendee.name,
"title": title,
"date": course_event.date,
"amount": int(amount)
}),
'<EMAIL>',
[attendee.email],
fail_silently=True,
)
def course(request, course_id):
course_event = get_object_or_404(CourseEvent, pk=course_id)
if course_event.date <= datetime.date.today():
raise Http404("Kurz již proběhl. | The course took already place.")
if request.POST:
return _register_new_attendee(request, course_id)
else:
return _empty_form(request, course_id)
def get_certificates_zip(course_id):
course_event = get_object_or_404(CourseEvent, pk=course_id)
attendees = course_event.courseattendee_set.all()
temp_dir = tempfile.mkdtemp(prefix="gismentors-certificates-")
temp_file = "{}-certifikaty.zip".format(course_event.__str2__())
os.mkdir(os.path.join(temp_dir, "certs"))
os.mkdir(os.path.join(temp_dir, "images"))
template = get_template("certificate.tex")
mydir = str(pathlib.Path(template.origin.name).parent)
latex_jinja_env = jinja2.Environment(
block_start_string='\BLOCK{',
block_end_string='}',
variable_start_string='\VAR{',
variable_end_string='}',
comment_start_string='\#{',
comment_end_string='}',
line_statement_prefix='%%',
line_comment_prefix='%#',
trim_blocks=True,
autoescape=False,
loader=jinja2.FileSystemLoader(mydir)
)
certificate_template = latex_jinja_env.get_template("certificate.tex")
copyfile(
course_event.course_type.image.path,
os.path.join(temp_dir,
os.path.basename(course_event.course_type.image.name))
)
os.chdir(temp_dir)
copyfile(finders.find("logo_labels.png"),
"logo_labels.png")
copyfile(finders.find("Makefile"),
"Makefile")
content = [l.strip() for l in
course_event.course_type.certificate_content.split("\n")]
with zipfile.ZipFile(temp_file, 'w') as myzip:
myzip.write(os.path.basename(course_event.course_type.image.name))
for attendee in attendees:
context = {
"name": attendee.attendee.name,
"course_title": course_event.course_type.long_str,
"logo": course_event.course_type.image.name,
"place": course_event.location.postal_code,
"date": _date(course_event.date, "j. E Y"),
"content": content,
"lectors": [lector.name for lector in course_event.lectors.all()],
"detail": course_event.course_type.detail,
}
file_name = "{}-{}-{}.tex".format(
course_event.date.strftime("%Y-%m-%d"),
course_event.course_type.title,
str(attendee.id)
)
with open(file_name, "w") as out:
out.write(certificate_template.render(context))
myzip.write(os.path.basename(file_name))
myzip.write("logo_labels.png")
myzip.write("Makefile")
return (temp_file, temp_dir)
@login_required(login_url='/admin/login/')
def certificates(request, course_id):
"""Generate certificates for given course, save them to ZIP file and return
back
"""
course_event = get_object_or_404(CourseEvent, pk=course_id)
outzip = get_certificates_zip(course_id)
with open(outzip, 'rb') as myzip:
response = HttpResponse(myzip.read())
response['Content-Disposition'] = 'attachment; filename=certifikaty-{}-{}.zip'.format(
course_event.date.strftime("%Y-%m-%d"),
course_event.course_type.title
)
response['Content-Type'] = 'application/x-zip'
return response
|
import torch
from .num_nodes import maybe_num_nodes
def contains_self_loops(edge_index):
r"""Returns :obj:`True` if the graph given by :attr:`edge_index` does not
contain self-loops.
Args:
edge_index (LongTensor): The edge indices.
:rtype: bool
"""
row, col = edge_index
mask = row == col
return mask.sum().item() > 0
def remove_self_loops(edge_index, edge_attr=None):
r"""Removes every self-loop in the graph given by :attr:`edge_index`, so
that :math:`(i,i) \not\in \mathcal{E}` for every :math:`i \in \mathcal{V}`.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor, optional): Edge weights or multi-dimensional
edge features. (default: :obj:`None`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
row, col = edge_index
mask = row != col
edge_attr = edge_attr if edge_attr is None else edge_attr[mask]
edge_index = edge_index[:, mask]
return edge_index, edge_attr
def add_self_loops(edge_index, edge_weight=None, fill_value=1, num_nodes=None):
r"""Adds a self-loop :math:`(i,i) \in \mathcal{E}` to every node
:math:`i \in \mathcal{V}` in the graph given by :attr:`edge_index`.
In case the graph is weighted, all existent self-loops will be removed and
replaced by weights denoted by :obj:`fill_value`.
Args:
edge_index (LongTensor): The edge indices.
edge_weight (Tensor, optional): One-dimensional edge weights.
(default: :obj:`None`)
fill_value (int, optional): If :obj:`edge_weight` is not :obj:`None`,
will add self-loops with edge weights of :obj:`fill_value` to the
graph. (default: :obj:`1`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
loop_index = torch.arange(
0, num_nodes, dtype=torch.long, device=edge_index.device)
loop_index = loop_index.unsqueeze(0).repeat(2, 1)
if edge_weight is not None:
assert edge_weight.numel() == edge_index.size(1)
loop_weight = edge_weight.new_full((num_nodes, ), fill_value)
edge_weight = torch.cat([edge_weight, loop_weight], dim=0)
edge_index = torch.cat([edge_index, loop_index], dim=1)
return edge_index, edge_weight
def add_remaining_self_loops(edge_index,
edge_weight=None,
fill_value=1,
num_nodes=None):
r"""Adds remaining self-loop :math:`(i,i) \in \mathcal{E}` to every node
:math:`i \in \mathcal{V}` in the graph given by :attr:`edge_index`.
In case the graph is weighted and already contains a few self-loops, only
non-existent self-loops will be added with weights denoted by
:obj:`fill_value`.
Args:
edge_index (LongTensor): The edge indices.
edge_weight (Tensor, optional): One-dimensional edge weights.
(default: :obj:`None`)
fill_value (int, optional): If :obj:`edge_weight` is not :obj:`None`,
will add self-loops with edge weights of :obj:`fill_value` to the
graph. (default: :obj:`1`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
:rtype: (:class:`LongTensor`, :class:`Tensor`)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index
mask = row != col
inv_mask = 1 - mask
loop_weight = torch.full(
(num_nodes, ),
fill_value,
dtype=None if edge_weight is None else edge_weight.dtype,
device=edge_index.device)
if edge_weight is not None:
assert edge_weight.numel() == edge_index.size(1)
remaining_edge_weight = edge_weight[inv_mask]
if remaining_edge_weight.numel() > 0:
loop_weight[row[inv_mask]] = remaining_edge_weight
edge_weight = torch.cat([edge_weight[mask], loop_weight], dim=0)
loop_index = torch.arange(0, num_nodes, dtype=row.dtype, device=row.device)
loop_index = loop_index.unsqueeze(0).repeat(2, 1)
edge_index = torch.cat([edge_index[:, mask], loop_index], dim=1)
return edge_index, edge_weight
|
"""The RISC-V CPU"""
from baremetal import *
from chips_v.decode import decode
from chips_v.execute import execute
from chips_v.m_extension import m_extension
from chips_v.utils import *
def cpu(instruction, clk, bus, march="rv32im"):
master = bus.add_master()
debug = Debug()
# generate a global enable signal
stall = Boolean().wire()
global_enable = ~stall
# simple state machine to control pipeline
flush = Boolean().wire()
fetch_en = Boolean().constant(1)
decode_en = Boolean().register(clk, en=global_enable, init=0, d=fetch_en & ~flush)
execute_en = Boolean().register(clk, en=global_enable, init=0, d=decode_en & ~flush)
#########################################################################
# Fetch - pipeline stage 0
#########################################################################
pc = Unsigned(32).register(clk, init=0, en=fetch_en & global_enable)
instruction_en = fetch_en & global_enable
(this_pc,) = register(clk, instruction_en, pc)
fetched_instruction = instruction
#########################################################################
# Decode - pipeline stage 1
#########################################################################
# read registers
decoder_rs1 = instruction[19:15]
decoder_rs2 = instruction[24:20]
registersa = Signed(32).ram(clk=clk, depth=32)
registersb = Signed(32).ram(clk=clk, depth=32)
src1 = registersa.read(decoder_rs1)
src2 = registersb.read(decoder_rs2)
# decode instruction
fwd1 = Boolean().wire()
fwd2 = Boolean().wire()
fwd_val = Signed(32).wire()
decode_outputs = decode(instruction, src1, src2, fwd1, fwd2, fwd_val, this_pc)
decode_outputs += (instruction, this_pc)
# register outputs
decode_outputs = register(clk, global_enable & decode_en, *decode_outputs)
(
src1,
src2,
A,
B,
operation,
shift_amount,
add_sub,
signed,
instruction,
this_pc,
) = decode_outputs
#########################################################################
# Execute - pipeline stage 2
#########################################################################
# execute instruction
execute_outputs = execute(
instruction,
src1,
src2,
A,
B,
operation,
add_sub,
shift_amount,
signed,
master.s2m,
this_pc,
)
(
write_data,
write_enable,
data_out,
address,
byte_enable,
data_valid,
write_read,
take_branch,
branch_address,
) = execute_outputs
master.address.drive(address)
master.m2s.drive(data_out)
master.byte_enable.drive(byte_enable)
master.write_read.drive(write_read)
master.valid.drive(data_valid & execute_en)
# optionally enable multiply/divide/modulo logic for m extension
if "m" in march:
(m_write_data, m_write_enable, m_wait) = m_extension(clk, A, B, instruction)
write_data = write_data.subtype.select(m_write_enable, write_data, m_write_data)
write_enable = write_enable | m_write_enable
debug.m_write_data = m_write_data
debug.m_write_enable = m_write_enable
debug.m_wait = m_wait
else:
m_wait = Boolean().constant(0)
# write registers
rd = instruction[11:7]
registersa.write(rd, write_data, write_enable & execute_en & global_enable)
registersb.write(rd, write_data, write_enable & execute_en & global_enable)
# register forwarding
fwd1.drive((decoder_rs1 == rd) & write_enable & execute_en & global_enable)
fwd2.drive((decoder_rs2 == rd) & write_enable & execute_en & global_enable)
fwd_val.drive(write_data)
#########################################################################
# Increment Program Counter
pc.d(Unsigned(32).select(take_branch & execute_en, pc + 4, branch_address))
flush.drive(execute_en & take_branch)
# stall the whole pipeline if we are waiting for some event before we
# continue
stall.drive(execute_en & ((master.valid & ~master.ready) | m_wait))
debug.valid = master.valid
debug.ready = master.ready
debug.write_read = master.write_read
debug.address = master.address
debug.byte_enable = master.byte_enable
debug.data_out = master.m2s
debug.data_in = master.s2m
debug.fetch_en = fetch_en
debug.decode_en = decode_en
debug.execute_en = execute_en
debug.this_pc = this_pc
debug.fetched_instruction = fetched_instruction
debug.instruction = instruction
debug.flush = flush
debug.stall = stall
debug.take_branch = take_branch
debug.src1 = src1
debug.src2 = src2
debug.fwd1 = fwd1
debug.fwd2 = fwd2
debug.fwd_val = fwd_val
debug.write_data = write_data
debug.write_enable = write_enable
debug.rd = rd
debug.global_enable = global_enable
debug.branch_address = branch_address
debug.pc = pc
debug.rd = rd
debug.decoder_rs1 = decoder_rs1
debug.decoder_rs2 = decoder_rs2
return pc, instruction_en, debug
|
<filename>CDN_Networking/cs5700_project5-master/dnsserver.py
#!/usr/bin/env python3
import sys
import socket
import dns.query
import dns.message
import dns.rrset
import threading
import geoip2.database
import math
import http.client
import time
import signal
# maxmind key: <KEY>
# key 2 : <KEY>
# get database commands
# wget -O ip_db.tar.gz 'https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=yihZe7a2Exa3ArYt&suffix=tar.gz'
# tar -zxf ip_db.tar.gz --wildcards */GeoLite2-ASN.mmdb --strip-components=1
# rm -r ip_db.tar.gz
# https://programtalk.com/python-examples/dns.message.make_response/
# https://dnspython.readthedocs.io/en/latest/query.html
# -p 40020 -n cs5700cdn.example.com
# raw ip addresses of ec2 instances
EC2_IPS = [
'172.16.17.32', # N. Virginia
'172.16.31.10', # Tokyo
'192.168.127.12', # Sydney
'172.16.31.10', # Ireland
'172.16.31.10', # Sao Paulo
'192.168.3.11' # N. California
]
# geolocations for ec2 ip addresses
# tuple (latitude, longitude, ip address)
EC2_GEOLOCATIONS = [
(39.0481, -77.4728, '172.16.17.32'), # N. Virginia
(35.685, 139.7514, '172.16.31.10'), # Tokyo
(-33.8612, 151.1982, '192.168.127.12'), # Sydney
(53.3331, -6.2489, '172.16.31.10'), # Ireland
(-23.5733, -46.6417, '172.16.31.10'), # Sao Paulo
(37.33053, -121.83823, '192.168.3.11') # N. California
]
# dictionary containing client ip addresses and the last
# replica server they were sent to
# key=client_ip, value=replica_ip
CLIENT_IP_GEO = {}
# contains active measurments for a server
# key=ip address, value=rtt
CLIENT_IP_RTT = {}
# holds client ips that have accessed the server
CLIENT_IP = set()
# key for ec2 to identify dns server
DNS_KEY = '<KEY>'
# holds socket for kill via PID
SOCK = None
# will stop thread
STOP_MEAS = False
def handle_kill(*args):
global STOP_MEAS
STOP_MEAS = True
if SOCK is not None:
SOCK.close()
def listen():
'''
'''
global SOCK
# Checks to ensure the correct amount of arguments are included in the command call
if len(sys.argv) < 5:
raise ValueError('Insufficient command line arguments provided')
# Check to make sure commands arguments are in correct order
if sys.argv[1] == '-p' and sys.argv[3] == '-n':
# assign port and name of dns server
port = sys.argv[2]
name = sys.argv[4]
elif sys.argv[3] == '-p' and sys.argv[1] == '-n':
# assign port and name of dns server
port = sys.argv[4]
name = sys.argv[2]
else:
raise ValueError("Bad command line arguments provided")
# Error check and make sure port is an integer
try:
port = int(port)
except ValueError:
raise ValueError('Port must be an integer')
# get the hostname
hostname = socket.gethostname()
# get the ip address of for host
name_ip = socket.gethostbyname(hostname)
# create socket connection
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# bind socket connection to the host ip address at the specified port #
sock.bind((name_ip, port))
# collect sock in global variable so it can be closed on kill pid
SOCK = sock
# now that SOCK is socket, set handle_kill to close socket
signal.signal(signal.SIGTERM, handle_kill)
signal.signal(signal.SIGINT, handle_kill)
rtt_lock = threading.Lock()
client_lock = threading.Lock()
geo_ip_lock = threading.Lock()
active_meas_thread = threading.Thread(target=doActiveMeasurements, args=(port, rtt_lock, client_lock, geo_ip_lock))
active_meas_thread.start()
# doActiveMeasurements(port, rtt_lock, client_lock)
while True:
# get query
val = dns.query.receive_udp(sock)
# pull the Question from the Message
question = val[0].question[0]
# get requested server hostname
q_name = question.name
str_q_name = str(q_name)
# if requested name is not name handed to server,
# bad name, not for our dns
if str_q_name[:-1] != name:
continue
# get the closest server to client
# acquire active lock in case its being written to in active measurements
has_rtt_val = False
rtt_lock.acquire()
if val[2][0] in CLIENT_IP_RTT:
closest_ip = CLIENT_IP_RTT[val[2][0]]
has_rtt_val = True
rtt_lock.release()
# if not rtt val was found, check for geo val
# will allow for locking mechanism later on
has_geo_val = False
# lock geo_ip in case of concurrent reading in active measurement thread
geo_ip_lock.acquire()
if not has_rtt_val and val[2][0] in CLIENT_IP_GEO:
closest_ip = CLIENT_IP_GEO[val[2][0]]
has_geo_val = True
geo_ip_lock.release()
# if not rtt val or geo val was found, search for geo val
if not has_rtt_val and not has_geo_val:
closest_ip = getClosestServer(val[2][0])
# lock geo_ip in case of concurrent reading in active measurement thread
geo_ip_lock.acquire()
CLIENT_IP_GEO[val[2][0]] = closest_ip
geo_ip_lock.release()
# lock CLIENT_IPS in case active measurement thread is reading from CLIENT_IP
client_lock.acquire()
if val[2][0] not in CLIENT_IP:
CLIENT_IP.add(val[2][0])
client_lock.release()
# Create Answer -->
# (Question name (queried hostname), 128 ttl, 'IN' rdclass, 'A' rdtype,
# replica ip (where to go))
answer = dns.rrset.from_text(q_name, 128, 'IN', 'A', closest_ip)
# make reesponse from init message (adds RQ flag to response)
res = dns.message.make_response(val[0])
# add Answer to response
res.answer.append(answer)
# send answer back to requesting client (addr tuple in val[2])
dns.query.send_udp(sock, res, val[2])
#-------- GEO LOCATION ---------------#
def findGeoLocation(ip):
'''
Finds the geolocation
:param ip: ip address of client
:return: latitude and longitude
'''
# probe database for provided IP
reader = geoip2.database.Reader('./GeoLite2-City.mmdb')
response = reader.city(ip)
# collect longitude and latitude from response from database
latitutde = response.location.latitude
longitude = response.location.longitude
# return (long, lat) as tuple
return latitutde, longitude
def getClosestServer(ip):
'''
Find the closest server to the client
:param ip: ip address of client
:param: closest server to the client
'''
# get the latitude and longitude of the client
lat, long = findGeoLocation(ip)
# get a server
closest = EC2_GEOLOCATIONS[0][2]
# make dist HUGEEE
dist = (1 << 32)
# for each of the locations in geolocations, find the closest
for locations in EC2_GEOLOCATIONS:
curr_dist = math.sqrt(math.pow(abs(lat - locations[0]), 2) + math.pow(abs(long - locations[1]), 2))
if curr_dist < dist:
dist = curr_dist
closest = locations[2]
return closest
#------------ACTIVE MEASUREMENT JUNK--------------
def doActiveMeasurements(port, rtt_lock: threading.Lock, client_lock: threading.Lock, geo_ip_lock: threading.Lock):
'''
Runs active measurements to gather RTT information about client ips.
:param port: port to connect to http server on
:param rtt_lock: locks information regarding to client_rtts for multithread read/write
:param client_lock: locks information regarding client IPs for multithread read/write
:return: None
'''
# initially, wait for digs to populate client addresses - wait 10 seconds
time.sleep(10)
# repeat this action for duration of program
while not STOP_MEAS:
# store start time of current probe set
start_time = time.time()
# generate collection of client IPS
# lock for CLIENT_IP -- parent thread writes to this, get state as currently is
client_lock.acquire()
client_ip_data = ''
for each in CLIENT_IP:
client_ip_data += '*' + each
client_lock.release()
# replica lock is used to write to client_rtts in each thread for each replica
replica_lock = threading.Lock()
client_rtts = {}
# collect all threads in list to wait for them to finish
threads = []
for ip in EC2_IPS:
client_rtts[ip] = []
# send to http and get response
meas_thread = threading.Thread(target=doActiveMeasurement, args=(ip, client_ip_data, port, client_rtts, replica_lock))
# add thread to list of active threads
threads.append(meas_thread)
# start thread
meas_thread.start()
# wait for all threads to finish before writing
for thread in threads:
thread.join()
# holds best times
# key: client ip, value: tuple(rtt, ec2 ip)
best_times = {}
# iterate through ec2 ip results
for ec2_ip in client_rtts.keys():
# get results for all clients for current ec2
ec2_results = client_rtts[ec2_ip]
# iterate through results
# results: tuple(client ip, rtt)
for results in ec2_results:
# if client ip has no record in best times, make one
# assign rtt as first val of tuple, ec2 ip as second val
if results[0] not in best_times:
best_times[results[0]] = (results[1], ec2_ip)
else:
# otherwise, get the current best time for the client ip
curr_best = best_times[results[0]][0]
# get the rtt for the current result
curr_rtt = results[1]
# if rtt for curent result is lower than best
if float(curr_rtt) < float(curr_best):
# replace entry for client ip with rtt for result and associated ec2 address
best_times[results[0]] = (results[1], ec2_ip)
# iterate throguh client ip results (keys in best_times)
for client_ip in best_times:
# if the best time is over 999ms ping, problem contacting server from all replicas
# set to closest geo IP instead
if float(best_times[client_ip][0]) >= 999:
geo_ip_lock.acquire()
CLIENT_IP_RTT[client_ip] = CLIENT_IP_GEO[client_ip]
geo_ip_lock.release()
# set client ip to correspond to the ec2 instance with the best time
# lock global CLIENT_IP_RTT to prevent data race with requesting clients in main thread
else:
rtt_lock.acquire()
CLIENT_IP_RTT[client_ip] = best_times[client_ip][1]
rtt_lock.release()
# calculate time to wait -- run every 30 seconds at minimum
end_time = time.time()
wait_val = 30 - (end_time - start_time)
if wait_val < 0:
wait_val = 0
# wait given time seconds in between probes to re-measure network
time.sleep(wait_val)
def doActiveMeasurement(ec2_ip, client_ip_data, port, client_rtts, replica_lock: threading.Lock):
'''
Requests active measurment data for specific ec2 http server.
:param ec2_ip: ip address of ec2 http server
:param client_ip_data: ips to request RTTs for
:param port: port to connect to http server on
:param client_rtts: holds information about rtts for client
:param replica_lock: locks client_rtts for multithread read/write
:return: None
'''
# connect to http server at ec2 ip address
conn = http.client.HTTPConnection(ec2_ip, port)
# request active measurment data
conn.request('GET', DNS_KEY, client_ip_data)
# get the response from the server
res = conn.getresponse()
# if 200 status, good response
if res.status == 200:
# parse data here and place in client_rtt data
data = res.read().decode()
ip_rtts = list(filter(None, data.split('--')))
for rtts in ip_rtts:
info = rtts.split('::')
# append (client ip, rtt) to list at entry for ec2 ip
# enact lock hear to write to client_rtts from parent thread doActiveMeasurements
replica_lock.acquire()
client_rtts[ec2_ip].append((info[0], info[1]))
replica_lock.release()
# run dns server
listen()
|
<filename>test/test_policy.py
import hashlib
import pytest
from datetime import date
from ssh_audit.policy import Policy
from ssh_audit.ssh2_kex import SSH2_Kex
from ssh_audit.writebuf import WriteBuf
class TestPolicy:
@pytest.fixture(autouse=True)
def init(self, ssh_audit):
self.Policy = Policy
self.wbuf = WriteBuf
self.ssh2_kex = SSH2_Kex
def _get_kex(self):
'''Returns an SSH2.Kex object to simulate a server connection.'''
w = self.wbuf()
w.write(b'\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff')
w.write_list(['kex_alg1', 'kex_alg2'])
w.write_list(['key_alg1', 'key_alg2'])
w.write_list(['cipher_alg1', 'cipher_alg2', 'cipher_alg3'])
w.write_list(['cipher_alg1', 'cipher_alg2', 'cipher_alg3'])
w.write_list(['mac_alg1', 'mac_alg2', 'mac_alg3'])
w.write_list(['mac_alg1', 'mac_alg2', 'mac_alg3'])
w.write_list(['comp_alg1', 'comp_alg2'])
w.write_list(['comp_alg1', 'comp_alg2'])
w.write_list([''])
w.write_list([''])
w.write_byte(False)
w.write_int(0)
return self.ssh2_kex.parse(w.write_flush())
def test_builtin_policy_consistency(self):
'''Ensure that the BUILTIN_POLICIES struct is consistent.'''
for policy_name in Policy.BUILTIN_POLICIES:
# Ensure that the policy name ends with " (version X)", where X is the 'version' field.
version_str = " (version %s)" % Policy.BUILTIN_POLICIES[policy_name]['version']
assert(policy_name.endswith(version_str))
# Ensure that each built-in policy can be loaded with Policy.load_builtin_policy().
assert(Policy.load_builtin_policy(policy_name) is not None)
# Ensure that both server and client policy names are returned.
server_policy_names, client_policy_names = Policy.list_builtin_policies()
assert(len(server_policy_names) > 0)
assert(len(client_policy_names) > 0)
def test_policy_basic(self):
'''Ensure that a basic policy can be parsed correctly.'''
policy_data = '''# This is a comment
name = "Test Policy"
version = 1
compressions = comp_alg1
host keys = key_alg1
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
assert str(policy) == "Name: [Test Policy]\nVersion: [1]\nBanner: {undefined}\nCompressions: comp_alg1\nHost Keys: key_alg1\nOptional Host Keys: {undefined}\nKey Exchanges: kex_alg1, kex_alg2\nCiphers: cipher_alg1, cipher_alg2, cipher_alg3\nMACs: mac_alg1, mac_alg2, mac_alg3\nHost Key Sizes: {undefined}\nCA Key Sizes: {undefined}\nDH Modulus Sizes: {undefined}\nServer Policy: True"
def test_policy_invalid_1(self):
'''Basic policy, but with 'ciphersx' instead of 'ciphers'.'''
policy_data = '''# This is a comment
name = "Test Policy"
version = 1
compressions = comp_alg1
host keys = key_alg1
key exchanges = kex_alg1, kex_alg2
ciphersx = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
failed = False
try:
self.Policy(policy_data=policy_data)
except ValueError:
failed = True
assert failed, "Invalid policy did not cause Policy object to throw exception"
def test_policy_invalid_2(self):
'''Basic policy, but is missing the required name field.'''
policy_data = '''# This is a comment
#name = "Test Policy"
version = 1
compressions = comp_alg1
host keys = key_alg1
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
failed = False
try:
self.Policy(policy_data=policy_data)
except ValueError:
failed = True
assert failed, "Invalid policy did not cause Policy object to throw exception"
def test_policy_invalid_3(self):
'''Basic policy, but is missing the required version field.'''
policy_data = '''# This is a comment
name = "<NAME>"
#version = 1
compressions = comp_alg1
host keys = key_alg1
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
failed = False
try:
self.Policy(policy_data=policy_data)
except ValueError:
failed = True
assert failed, "Invalid policy did not cause Policy object to throw exception"
def test_policy_invalid_4(self):
'''Basic policy, but is missing quotes in the name field.'''
policy_data = '''# This is a comment
name = <NAME>
version = 1
compressions = comp_alg1
host keys = key_alg1
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
failed = False
try:
self.Policy(policy_data=policy_data)
except ValueError:
failed = True
assert failed, "Invalid policy did not cause Policy object to throw exception"
def test_policy_invalid_5(self):
'''Basic policy, but is missing quotes in the banner field.'''
policy_data = '''# This is a comment
name = "<NAME>"
version = 1
banner = 0mg
compressions = comp_alg1
host keys = key_alg1
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
failed = False
try:
self.Policy(policy_data=policy_data)
except ValueError:
failed = True
assert failed, "Invalid policy did not cause Policy object to throw exception"
def test_policy_invalid_6(self):
'''Basic policy, but is missing quotes in the header field.'''
policy_data = '''# This is a comment
name = "<NAME>"
version = 1
header = 0mg
compressions = comp_alg1
host keys = key_alg1
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
failed = False
try:
self.Policy(policy_data=policy_data)
except ValueError:
failed = True
assert failed, "Invalid policy did not cause Policy object to throw exception"
def test_policy_create_1(self):
'''Creates a policy from a kex and ensures it is generated exactly as expected.'''
kex = self._get_kex()
pol_data = self.Policy.create('www.l0l.com', 'bannerX', kex, False)
# Today's date is embedded in the policy, so filter it out to get repeatable results.
pol_data = pol_data.replace(date.today().strftime('%Y/%m/%d'), '[todays date]')
# Instead of writing out the entire expected policy--line by line--just check that it has the expected hash.
assert hashlib.sha256(pol_data.encode('ascii')).hexdigest() == '4af7777fb57a1dad0cf438c899a11d4f625fd9276ea3bb5ef5c9fe8806cb47dc'
def test_policy_evaluate_passing_1(self):
'''Creates a policy and evaluates it against the same server'''
kex = self._get_kex()
policy_data = self.Policy.create('www.l0l.com', None, kex, False)
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', kex)
assert ret is True
assert len(errors) == 0
print(error_str)
assert len(error_str) == 0
def test_policy_evaluate_failing_1(self):
'''Ensure that a policy with a specified banner fails against a server with a different banner'''
policy_data = '''name = "Test Policy"
version = 1
banner = "XXX mismatched banner XXX"
compressions = comp_alg1, comp_alg2
host keys = key_alg1, key_alg2
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', self._get_kex())
assert ret is False
assert len(errors) == 1
assert error_str.find('Banner did not match.') != -1
def test_policy_evaluate_failing_2(self):
'''Ensure that a mismatched compressions list results in a failure'''
policy_data = '''name = "Test Policy"
version = 1
compressions = XXXmismatchedXXX, comp_alg1, comp_alg2
host keys = key_alg1, key_alg2
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', self._get_kex())
assert ret is False
assert len(errors) == 1
assert error_str.find('Compression did not match.') != -1
def test_policy_evaluate_failing_3(self):
'''Ensure that a mismatched host keys results in a failure'''
policy_data = '''name = "Test Policy"
version = 1
compressions = comp_alg1, comp_alg2
host keys = <KEY>, key_alg1, key_alg2
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', self._get_kex())
assert ret is False
assert len(errors) == 1
assert error_str.find('Host keys did not match.') != -1
def test_policy_evaluate_failing_4(self):
'''Ensure that a mismatched key exchange list results in a failure'''
policy_data = '''name = "Test Policy"
version = 1
compressions = comp_alg1, comp_alg2
host keys = key_alg1, key_alg2
key exchanges = <KEY>, kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', self._get_kex())
assert ret is False
assert len(errors) == 1
assert error_str.find('Key exchanges did not match.') != -1
def test_policy_evaluate_failing_5(self):
'''Ensure that a mismatched cipher list results in a failure'''
policy_data = '''name = "Test Policy"
version = 1
compressions = comp_alg1, comp_alg2
host keys = key_alg1, key_alg2
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, XXXmismatched, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', self._get_kex())
assert ret is False
assert len(errors) == 1
assert error_str.find('Ciphers did not match.') != -1
def test_policy_evaluate_failing_6(self):
'''Ensure that a mismatched MAC list results in a failure'''
policy_data = '''name = "Test Policy"
version = 1
compressions = comp_alg1, comp_alg2
host keys = key_alg1, key_alg2
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, XXXmismatched, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', self._get_kex())
assert ret is False
assert len(errors) == 1
assert error_str.find('MACs did not match.') != -1
def test_policy_evaluate_failing_7(self):
'''Ensure that a mismatched host keys and MACs results in a failure'''
policy_data = '''name = "Test Policy"
version = 1
compressions = comp_alg1, comp_alg2
host keys = key_alg1, key_alg2, XXXmismatchedXXX
key exchanges = kex_alg1, kex_alg2
ciphers = cipher_alg1, cipher_alg2, cipher_alg3
macs = mac_alg1, mac_alg2, XXXmismatchedXXX, mac_alg3'''
policy = self.Policy(policy_data=policy_data)
ret, errors, error_str = policy.evaluate('SSH Server 1.0', self._get_kex())
assert ret is False
assert len(errors) == 2
assert error_str.find('Host keys did not match.') != -1
assert error_str.find('MACs did not match.') != -1
|
<reponame>thomasjpfan/sk_typing<gh_stars>1-10
from typing import Optional
from typing import Union
from collections.abc import Callable
import numpy as np
from .typing import RandomStateType
from .typing import Literal
class DictionaryLearning:
components_: np.ndarray
error_: np.ndarray
n_iter_: int
def __init__(
self,
n_components: Optional[int] = None,
alpha: float = 1,
max_iter: int = 1000,
tol: float = 1e-08,
fit_algorithm: Literal["lars", "cd"] = "lars",
transform_algorithm: Literal[
"lasso_lars", "lasso_cd", "lars", "omp", "threshold"
] = "omp",
transform_n_nonzero_coefs: Optional[int] = None,
transform_alpha: Optional[float] = None,
n_jobs: Optional[int] = None,
code_init: Optional[np.ndarray] = None,
dict_init: Optional[np.ndarray] = None,
verbose: bool = False,
split_sign: bool = False,
random_state: RandomStateType = None,
positive_code: bool = False,
positive_dict: bool = False,
transform_max_iter: int = 1000,
):
...
class FactorAnalysis:
components_: np.ndarray
loglike_: list
noise_variance_: np.ndarray
n_iter_: int
mean_: np.ndarray
def __init__(
self,
n_components: Optional[int] = None,
tol: float = 0.01,
copy: bool = True,
max_iter: int = 1000,
noise_variance_init: Optional[np.ndarray] = None,
svd_method: Literal["lapack", "randomized"] = "randomized",
iterated_power: int = 3,
random_state: RandomStateType = 0,
):
...
class FastICA:
components_: np.ndarray
mixing_: np.ndarray
mean_: np.ndarray
n_iter_: int
whitening_: np.ndarray
def __init__(
self,
n_components: Optional[int] = None,
algorithm: Literal["parallel", "deflation"] = "parallel",
whiten: bool = True,
fun: Union[Literal["logcosh", "exp", "cube"], Callable] = "logcosh",
fun_args: Optional[dict] = None,
max_iter: int = 200,
tol: float = 0.0001,
w_init: Optional[np.ndarray] = None,
random_state: RandomStateType = None,
):
...
class IncrementalPCA:
components_: np.ndarray
explained_variance_: np.ndarray
explained_variance_ratio_: np.ndarray
singular_values_: np.ndarray
mean_: np.ndarray
var_: np.ndarray
noise_variance_: float
n_components_: int
n_samples_seen_: int
def __init__(
self,
n_components: Optional[int] = None,
whiten: bool = False,
copy: bool = True,
batch_size: Optional[int] = None,
):
...
class KernelPCA:
lambdas_: np.ndarray
alphas_: np.ndarray
dual_coef_: np.ndarray
X_transformed_fit_: np.ndarray
X_fit_: np.ndarray
def __init__(
self,
n_components: Optional[None] = None,
kernel: Literal[
"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"
] = "linear",
gamma: Optional[float] = None,
degree: int = 3,
coef0: float = 1,
kernel_params: Optional[dict] = None,
alpha: float = 1.0,
fit_inverse_transform: bool = False,
eigen_solver: Literal["auto", "dense", "arpack"] = "auto",
tol: float = 0,
max_iter: Optional[None] = None,
remove_zero_eig: bool = False,
random_state: RandomStateType = None,
copy_X: bool = True,
n_jobs: Optional[int] = None,
):
...
class LatentDirichletAllocation:
components_: np.ndarray
n_batch_iter_: int
n_iter_: int
bound_: float
doc_topic_prior_: float
topic_word_prior_: float
def __init__(
self,
n_components: int = 10,
doc_topic_prior: Optional[float] = None,
topic_word_prior: Optional[float] = None,
learning_method: Literal["batch", "online"] = "batch",
learning_decay: float = 0.7,
learning_offset: float = 10.0,
max_iter: int = 10,
batch_size: int = 128,
evaluate_every: int = -1,
total_samples: int = 1_000_000,
perp_tol: float = 0.1,
mean_change_tol: float = 0.001,
max_doc_update_iter: int = 100,
n_jobs: Optional[int] = None,
verbose: int = 0,
random_state: RandomStateType = None,
):
...
class MiniBatchDictionaryLearning:
components_: np.ndarray
inner_stats_: tuple
n_iter_: int
iter_offset_: int
random_state_: np.random.RandomState
def __init__(
self,
n_components: Optional[None] = None,
alpha: float = 1,
n_iter: int = 1000,
fit_algorithm: Literal["lars", "cd"] = "lars",
n_jobs: Optional[int] = None,
batch_size: int = 3,
shuffle: bool = True,
dict_init: Optional[np.ndarray] = None,
transform_algorithm: Literal[
"lasso_lars", "lasso_cd", "lars", "omp", "threshold"
] = "omp",
transform_n_nonzero_coefs: Optional[int] = None,
transform_alpha: Optional[float] = None,
verbose: bool = False,
split_sign: bool = False,
random_state: RandomStateType = None,
positive_code: bool = False,
positive_dict: bool = False,
transform_max_iter: int = 1000,
):
...
class MiniBatchSparsePCA:
components_: np.ndarray
n_iter_: int
mean_: np.ndarray
def __init__(
self,
n_components: Optional[int] = None,
alpha: int = 1,
ridge_alpha: float = 0.01,
n_iter: int = 100,
callback: Optional[Callable] = None,
batch_size: int = 3,
verbose: Union[int, bool] = False,
shuffle: bool = True,
n_jobs: Optional[int] = None,
method: Literal["lars", "cd"] = "lars",
random_state: RandomStateType = None,
normalize_components: str = "deprecated",
):
...
class NMF:
components_: np.ndarray
n_components_: int
reconstruction_err_: float
n_iter_: int
def __init__(
self,
n_components: Optional[int] = None,
init: Optional[
Literal["random", "nndsvd", "nndsvda", "nndsvdar", "custom", "warn"]
] = None,
solver: Literal["cd", "mu"] = "cd",
beta_loss: Union[
float, Literal["frobenius", "kullback-leibler", "itakura-saito"]
] = "frobenius",
tol: float = 0.0001,
max_iter: int = 200,
random_state: RandomStateType = None,
alpha: float = 0.0,
l1_ratio: float = 0.0,
verbose: int = 0,
shuffle: bool = False,
):
...
class PCA:
components_: np.ndarray
explained_variance_: np.ndarray
explained_variance_ratio_: np.ndarray
singular_values_: np.ndarray
mean_: np.ndarray
n_components_: np.ndarray
n_features_: int
n_samples_: int
noise_variance_: float
def __init__(
self,
n_components: Union[int, float, None, Literal["mle"]] = None,
copy: bool = True,
whiten: bool = False,
svd_solver: Literal["auto", "full", "arpack", "randomized"] = "auto",
tol: float = 0.0,
iterated_power: Union[int, Literal["auto"]] = "auto",
random_state: RandomStateType = None,
):
...
class SparseCoder:
components_: np.ndarray
def __init__(
self,
dictionary: np.ndarray,
transform_algorithm: Literal[
"lasso_lars", "lasso_cd", "lars", "omp", "threshold"
] = "omp",
transform_n_nonzero_coefs: Optional[int] = None,
transform_alpha: Optional[float] = None,
split_sign: bool = False,
n_jobs: Optional[int] = None,
positive_code: bool = False,
transform_max_iter: int = 1000,
):
...
class SparsePCA:
components_: np.ndarray
error_: np.ndarray
n_iter_: int
mean_: np.ndarray
def __init__(
self,
n_components: Optional[int] = None,
alpha: float = 1,
ridge_alpha: float = 0.01,
max_iter: int = 1000,
tol: float = 1e-08,
method: Literal["lars", "cd"] = "lars",
n_jobs: Optional[int] = None,
U_init: Optional[np.ndarray] = None,
V_init: Optional[np.ndarray] = None,
verbose: Union[int, bool] = False,
random_state: RandomStateType = None,
normalize_components: str = "deprecated",
):
...
class TruncatedSVD:
components_: np.ndarray
explained_variance_: np.ndarray
explained_variance_ratio_: np.ndarray
singular_values_: np.ndarray
def __init__(
self,
n_components: int = 2,
algorithm: Literal["arpack", "randomized"] = "randomized",
n_iter: int = 5,
random_state: RandomStateType = None,
tol: float = 0.0,
):
...
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 27 20:54:30 2019
@author: AR
"""
import cv2
import numpy as np
def get_color(image):
image = image.reshape(image.shape[0]*image.shape[1],3)
clf = cv2.kmeans(n_clusters = 1)
labels = clf.fit_predict(image)
counts = cv2.Counter(labels)
center_colors = clf.cluster_centers_
ordered_colors = [center_colors[i]/255 for i in counts.keys()]
rgb_colors = [ordered_colors[i]*255 for i in counts.keys()]
print(rgb_colors)
img = cv2.imread('/Users/AR/Desktop/Screenshot 2019-04-27 at 9.45.14 PM.png', cv2.IMREAD_COLOR)
#img = cv2.resize(img, (1280,800))
font = cv2.FONT_HERSHEY_SIMPLEX
#roi = img[110:340, 95:300]
#
#print(roi)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
green_lower = np.array([40,40,40])
green_upper = np.array([80,255,255])
green = cv2.inRange(hsv, green_lower, green_upper)
blue_lower = np.array([97, 100, 117])
blue_upper = np.array([117,255,255])
blue = cv2.inRange(hsv, blue_lower, blue_upper)
white_lower = np.array([0,0,230])
white_upper = np.array([255,15,255])
white = cv2.inRange(hsv, white_lower, white_upper)
_,contours_green,_=cv2.findContours(green,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
_,contours_blue,_=cv2.findContours(blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#_,contours,_=cv2.findContours(white,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#for contour in contours:
# area = cv2.contourArea(contour)
# if area>300 and area<3000:
# x,y,w,h = cv2.boundingRect(contour)
# img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),5)
#for contour in contours_blue:
# area = cv2.contourArea(contour)
# if area>300:
# approx = cv2.approxPolyDP(contour, 0.05*cv2.arcLength(contour, True), True)
# cv2.drawContours(img, [approx], -1, (0,0,255), 5)
# x = approx.ravel()[0]
# y = approx.ravel()[1]
# if len(approx) == 4:
# cv2.putText(img, "Rectangle", (x, y), font, 1, (0))
for contour in contours_green:
area = cv2.contourArea(contour)
if area>300:
approx = cv2.approxPolyDP(contour, 0.1*cv2.arcLength(contour, True), True)
if len(approx) == 4:
(x, y, w, h) = cv2.boundingRect(approx)
ar = w/float(h)
if ar >= 0.8 and ar <= 1.2:
cv2.putText(img, "Green", (x, y), font, 1, (255,255,255))
cv2.drawContours(img, [approx], 0, (0,255,0), 60)
x = int(x)
y = int(y)
cube = img[y-20:y+20, x-20:x+20]
# cube = cv2.resize(cube, (10,10))
# cube = cv2.cvtColor(cube, cv2.COLOR_BGR2YUV)
cv2.imshow('cube', cube)
(Y,U,V,DA)= cv2.mean(cube)
print((Y,U,V,DA))
if Y > 120 and float(U/V)>0.9 :
print('W')
elif U > 130 and U > V and float (U/Y)> 1.15:
print("B")
elif float(U/V) > 1.1 and float(U/V) < 2:
print("G")
elif V > 120 and float (U/Y) > 0.7:
if float(U/Y) < 1.9:
print("O")
else:
print('R')
elif Y > 110 and float(V/U)>0.95:
print('Y')
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<reponame>ovinc/imgbasics
"""Cropping image tools and related functions."""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from drapo import Cursor, rinput
# ======================= IMCROP and related functions =======================
def _cropzone_draw(ax, cropzone, c='r', linewidth=2):
"""Draw cropzone on axes."""
x, y, w, h = cropzone
rect = patches.Rectangle((x - 1 / 2, y - 1 / 2), w, h, linewidth=linewidth,
edgecolor=c, facecolor='none')
ax.add_patch(rect)
ax.figure.canvas.draw()
return rect
def imcrop(*args, cmap=None, c='r', closefig=True, cursor=None,
draggable=False, message='Crop Image', ax=None):
"""Interactive (or not)image cropping function using Numpy and Matplotlib.
The *args allow to use the function in the two following ways:
Main parameters (*args)
-----------------------
Depending on how the function is called, the cropping is interactive
(manual selection on image) or imperative (crop zone (x, y, w, h) as input):
*INTERACTIVE*
`img_crop, cropzone = imcrop(img)`
Input --> image (numpy array or equivalent)
Output --> tuple (cropped image, crop rectangle (x, y, w, h))
*IMPERATIVE*
`img_crop = imcrop(img, cropzone)`
Input --> image (numpy array or equivalent), crop zone (x, y, w, h)
Output --> cropped image
Other optional parameters
-------------------------
- cmap: colormap to display image in matplotlib imshow
- c: color of lines / cursors in interactive mode
- closefig: if True (default), close figure at end of interactive selection
- cursor: appears to help selection by default but not in draggable mode
(but can be forced in draggable mode by setting it to true, or can be
completely suppressed by setting it to False). Default: None.
- draggable: if True, use a draggable rectangle instead of clicks
(only in interactive mode, see above)
- message: message to show as title of the matplotlib window
(only in interactive mode, see above)
- ax: if not None, image shown in the ax matplotlib axes
(only in interactive mode, see above)
Note: when selecting, the pixels taken into account are those which have
their centers closest to the click, not their edges closest to the click.
For example, to crop to a single pixel, one needs to click two times in
this pixel (possibly at the same location). For images with few pixels,
this results in a visible offset between the dotted lines plotted after the
clicks (running through the centers of the pixels clicked) and the final
rectangle which runs along the edges of all pixels selected.
Contrary to the Matlab imcrop function, the cropped rectangle is really of
the width and height requested (w*h), not w+1 and h+1 as in Matlab.
"""
img = args[0] # load image
sy, sx, *_ = img.shape # size of image in pixels
if len(args) == 2:
interactive = False
xmin, ymin, w, h = args[1]
else:
interactive = True
if interactive: # Interactive Drawing of Crop Rectangle -----------------
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
if img.ndim == 2: # grayscale image, use grayscale colormap
cmap = 'gray' if cmap is None else cmap
ax.imshow(img, cmap=cmap)
ax.set_title(message)
ax.set_xlabel('Click 2 pts to define crop (opposite corners of rectangle)')
# Manage cursor visibility depending on mode -------------------------
if cursor is None:
cursor = False if draggable else True
if cursor:
Cursor()
# --------------------------------------------------------------------
if draggable:
x_min, y_min, _w, _h = rinput(c=c)
x_max = x_min + _w
y_max = y_min + _h
else:
clicks = []
for i in range(2): # two clicks for two corners
[(x_click, y_click)] = plt.ginput(1)
clicks.append((x_click, y_click))
# now, draw for visual clues ---------------------------------
x_draw, y_draw = round(x_click), round(y_click)
# draw lines corresponding to click (the -1/2 are used so that
# the lines extend to the edges of the pixels)
ax.plot([-1 / 2, sx - 1 / 2], [y_draw, y_draw], ':', color=c)
ax.plot([x_draw, x_draw], [-1 / 2, sy - 1 / 2], ':', color=c)
fig.canvas.draw()
[(x1, y1), (x2, y2)] = clicks
x_min, x_max = sorted((x1, x2))
y_min, y_max = sorted((y1, y2))
# Now, get pixels correspongind to clicks (center of a pixel is a
# round number)
xmin, xmax, ymin, ymax = [int(round(z)) for z in (x_min, x_max,
y_min, y_max)]
# Calculate witdh and height in pixels
w = xmax - xmin + 1
h = ymax - ymin + 1
cropzone = xmin, ymin, w, h
_cropzone_draw(ax, cropzone, c)
if closefig:
plt.pause(0.1)
plt.close(fig)
# Now, in all cases, crop image to desired dimensions --------------------
img_crop = img[ymin: ymin + h, xmin: xmin + w]
if not interactive:
return img_crop
else:
return img_crop, cropzone
|
# -*- coding: utf-8 -*-
"""
mslib.mscolab.seed
~~~~~~~~~~~~~~~~~~~~
Seeder utility for database
This file is part of mss.
:copyright: Copyright 2019 <NAME>
:copyright: Copyright 2019-2021 by the mss team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import fs
from flask import Flask
import git
from sqlalchemy.exc import IntegrityError
from mslib.mscolab.conf import mscolab_settings
from mslib.mscolab.models import User, db, Permission, Project
app = Flask(__name__, static_url_path='')
def add_all_users_to_all_projects(access_level='collaborator'):
""" on db level we add all users as collaborator to all projects """
app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
with app.app_context():
all_projects = Project.query.all()
all_path = [project.path for project in all_projects]
db.session.close()
for path in all_path:
access_level = 'collaborator'
if path == "TEMPLATE":
access_level = 'admin'
add_all_users_default_project(path=path, access_level=access_level)
def add_all_users_default_project(path='TEMPLATE', description="Project to keep all users", access_level='admin'):
""" on db level we add all users to the project TEMPLATE for user handling"""
app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
with app.app_context():
project_available = Project.query.filter_by(path=path).first()
if not project_available:
project = Project(path, description)
db.session.add(project)
db.session.commit()
with fs.open_fs(mscolab_settings.MSCOLAB_DATA_DIR) as file_dir:
if not file_dir.exists(path):
file_dir.makedir(path)
file_dir.writetext(f'{path}/main.ftml', mscolab_settings.STUB_CODE)
# initiate git
r = git.Repo.init(fs.path.join(mscolab_settings.DATA_DIR, 'filedata', path))
r.git.clear_cache()
r.index.add(['main.ftml'])
r.index.commit("initial commit")
project = Project.query.filter_by(path=path).first()
p_id = project.id
user_list = User.query \
.join(Permission, (User.id == Permission.u_id) & (Permission.p_id == p_id), isouter=True) \
.add_columns(User.id, User.username) \
.filter(Permission.u_id.is_(None))
new_u_ids = [user.id for user in user_list]
new_permissions = []
for u_id in new_u_ids:
new_permissions.append(Permission(u_id, project.id, access_level))
db.session.add_all(new_permissions)
try:
db.session.commit()
return True
except IntegrityError as err:
db.session.rollback()
logging.debug(f"Error writing to db: {err}")
db.session.close()
def delete_user(email):
app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
with app.app_context():
user = User.query.filter_by(emailid=str(email)).first()
if user:
print(f"User: {email} deleted from db")
db.session.delete(user)
db.session.commit()
db.session.close()
def add_user(email, username, password):
"""
on db level we add a user
"""
app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
template = f"""
"MSCOLAB_mailid": "{email}",
"MSCOLAB_password": "{password}",
"""
with app.app_context():
user_email_exists = User.query.filter_by(emailid=str(email)).first()
user_name_exists = User.query.filter_by(username=str(username)).first()
if not user_email_exists and not user_name_exists:
db_user = User(email, username, password)
db.session.add(db_user)
db.session.commit()
db.session.close()
print(f"Userdata: {email} {username} {password}")
print(template)
else:
print(f"{user_name_exists} already in db")
def seed_data():
app.config['SQLALCHEMY_DATABASE_URI'] = mscolab_settings.SQLALCHEMY_DB_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
with app.app_context():
# create users
users = [{
'username': 'a',
'id': 8,
'password': 'a',
'emailid': 'a'
}, {
'username': 'b',
'id': 9,
'password': 'b',
'emailid': 'b'
}, {
'username': 'c',
'id': 10,
'password': 'c',
'emailid': 'c'
}, {
'username': 'd',
'id': 11,
'password': 'd',
'emailid': 'd'
}, {
'username': 'test1',
'id': 12,
'password': '<PASSWORD>',
'emailid': 'test1'
}, {
'username': 'test2',
'id': 13,
'password': '<PASSWORD>',
'emailid': 'test2'
}, {
'username': 'test3',
'id': 14,
'password': '<PASSWORD>',
'emailid': 'test3'
}, {
'username': 'test4',
'id': 15,
'password': '<PASSWORD>',
'emailid': 'test4'
}, {
'username': 'mscolab_user',
'id': 16,
'password': 'password',
'emailid': 'mscolab_user'
}, {
'username': 'merge_waypoints_user',
'id': 17,
'password': 'password',
'emailid': 'merge_waypoints_user'
}]
for user in users:
db_user = User(user['emailid'], user['username'], user['password'])
db_user.id = user['id']
db.session.add(db_user)
# create projects
projects = [{
'id': 1,
'path': 'one',
'description': 'a, b'
}, {
'id': 2,
'path': 'two',
'description': 'b, c'
}, {
'id': 3,
'path': 'three',
'description': 'a, c'
}, {
'id': 4,
'path': 'four',
'description': 'd'
}, {
'id': 5,
'path': 'Admin_Test',
'description': 'Project for testing admin window'
}, {
'id': 6,
'path': 'test_mscolab',
'description': 'Project for testing mscolab main window'
}]
for project in projects:
db_project = Project(project['path'], project['description'])
db_project.id = project['id']
db.session.add(db_project)
# create permissions
permissions = [{
'u_id': 8,
'p_id': 1,
'access_level': "creator"
}, {
'u_id': 9,
'p_id': 1,
'access_level': "collaborator"
}, {
'u_id': 9,
'p_id': 2,
'access_level': "creator"
}, {
'u_id': 10,
'p_id': 2,
'access_level': "collaborator"
}, {
'u_id': 10,
'p_id': 3,
'access_level': "creator"
}, {
'u_id': 8,
'p_id': 3,
'access_level': "collaborator"
}, {
'u_id': 10,
'p_id': 1,
'access_level': "viewer"
}, {
'u_id': 11,
'p_id': 4,
'access_level': 'creator'
}, {
'u_id': 8,
'p_id': 4,
'access_level': 'admin'
}, {
'u_id': 13,
'p_id': 3,
'access_level': 'viewer'
}, {
'u_id': 12,
'p_id': 5,
'access_level': 'creator'
}, {
'u_id': 12,
'p_id': 3,
'access_level': 'collaborator'
}, {
'u_id': 15,
'p_id': 5,
'access_level': 'viewer'
}, {
'u_id': 14,
'p_id': 3,
'access_level': 'collaborator'
}, {
'u_id': 15,
'p_id': 3,
'access_level': 'collaborator'
}, {
'u_id': 16,
'p_id': 6,
'access_level': 'creator'
}, {
'u_id': 17,
'p_id': 6,
'access_level': 'admin'
}]
for perm in permissions:
db_perm = Permission(perm['u_id'], perm['p_id'], perm['access_level'])
db.session.add(db_perm)
db.session.commit()
db.session.close()
with fs.open_fs(mscolab_settings.MSCOLAB_DATA_DIR) as file_dir:
file_paths = ['one', 'two', 'three', 'four', 'Admin_Test', 'test_mscolab']
for file_path in file_paths:
file_dir.makedir(file_path)
file_dir.writetext(f'{file_path}/main.ftml', mscolab_settings.STUB_CODE)
# initiate git
r = git.Repo.init(fs.path.join(mscolab_settings.DATA_DIR, 'filedata', file_path))
r.git.clear_cache()
r.index.add(['main.ftml'])
r.index.commit("initial commit")
|
from pathlib import Path
from fastjsonschema import compile as compile_schema, JsonSchemaException
import jsonref as json
import pytest
SCHEMA_FOLDER = Path("../schema.igsn.org/json/registration/0.1/").absolute()
def get_validator(base_folder, schema_file, defn):
"Generate a schema for some definition fragment."
schema = json.loads(
f'{{ "$ref": "#/definitions/{defn}" }}',
base_uri=(base_folder / schema_file).as_uri(),
)
return compile_schema(schema)
def check(validator, obj, expected):
""" Test a definition in our JSON schema using some examples
Examples take the form of a Python object and the expected
validation outcome (i.e. True or False)
Parameters:
validator - A validation instance
obj - the object to check
expected - True if the object should validate, False
otherwise
"""
try:
validator(obj)
if not expected:
raise AssertionError(f"Object {obj} unexpectedly validated")
except JsonSchemaException as err:
if expected:
raise AssertionError(
f"Object {obj} failed to validate. Error is {err.message()}"
)
@pytest.mark.parametrize(
"obj,expected",
[
({"kind": "orcid", "id": "0234-4568-7895-1655"}, True),
({"kind": "orcid", "id": "0234 4568 7895 1655"}, True),
({"kind": "orcid", "id": "0234-XXXXD-7895-1655"}, False),
({"kind": "orcid", "id": "0234-4581-7895-1655-4561"}, False),
({"kind": "orcid", "id": "http://orcid.com/0234-4568-7895-1655"}, True),
({"kind": "orcid", "id": "http://orcid.com/0234 4568 7895 1655"}, False),
({"kind": "orcid", "id": "http://orcid.com/0234-XXXX-7895-1655"}, False),
({"kind": "orcid", "id": "http://orcid.com/0234-4581-7895-1655-4561"}, False),
({"kind": "orcid", "id": "https://orcid.com/0234-4568-7895-1655"}, True),
({"kind": "orcid", "id": "https://orcid.com/0234 4568 7895 1655"}, False),
({"kind": "orcid", "id": "https://orcid.com/0234-XXXX-7895-1655"}, False),
({"kind": "orcid", "id": "https://orcid.com/0234-4581-7895-1655-4561"}, False),
],
)
def test_orcids(registration_schema_folder, obj, expected):
validator = get_validator(registration_schema_folder, "identifiers.json", "orcid")
check(validator, obj, expected)
@pytest.mark.parametrize(
"obj,expected",
[
({"kind": "researcherId", "id": "X-1238-2347"}, True),
({"kind": "researcherId", "id": "S-4816-2540"}, True),
({"kind": "researcherID", "id": "S-4816-2540"}, False),
({"kind": "researcherId", "id": "S-XXXX-7895-1655"}, False),
({"kind": "researcherId", "id": "S-4581-7895-1655"}, False),
({"kind": "researcherId", "id": "lorem ipsum"}, False)
]
)
def test_researcherId(registration_schema_folder, obj, expected):
validator = get_validator(
registration_schema_folder, "identifiers.json", "researcherId"
)
check(validator, obj, expected)
|
<filename>app/models.py
from sqlalchemy import Column, Integer, String, Table, ForeignKey, CHAR
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
Base = declarative_base()
class LexicalEntry(Base):
__tablename__ = "lexical_entries"
id = Column(CHAR(35), primary_key=True)
kljuc = Column(String(64), index=True)
zapis = Column(String(32), nullable=True)
besedna_vrsta = Column(String(32), nullable=True)
vrsta = Column(String(32), nullable=True)
norma = Column(String(32), nullable=True)
SPSP = Column(String(32), nullable=True)
SP2001 = Column(String(32), nullable=True)
tip = Column(String(32), nullable=True)
vid = Column(String(32), nullable=True)
spol_svojine = Column(String(32), nullable=True)
stevilo_svojine = Column(String(32), nullable=True)
def __repr__(self):
return f"<LexicalEntry {self.id}>"
class Lemma(Base):
__tablename__ = "lemmas"
id = Column(Integer, primary_key=True)
zapis_oblike = Column(String(255), index=True)
naglasena_beseda_1 = Column(String(255), nullable=True)
naglasena_beseda_2 = Column(String(255), nullable=True)
naglasena_beseda_3 = Column(String(255), nullable=True)
naglasena_beseda_4 = Column(String(255), nullable=True)
lexical_entry_id = Column(
CHAR(35), ForeignKey("lexical_entries.id"), nullable=False, index=True
)
lexical_entry = relationship(
"LexicalEntry",
backref=backref("lemmas", cascade="all, delete-orphan", lazy="dynamic"),
)
def __repr__(self):
return f"<Lemma {self.zapis_oblike}/>"
class WordForm(Base):
__tablename__ = "word_forms"
id = Column(Integer, primary_key=True)
msd = Column(String(32), nullable=True)
stevilo = Column(String(32), nullable=True)
stopnja = Column(String(32), nullable=True)
sklon = Column(String(32), nullable=True)
dolocnost = Column(String(32), nullable=True)
nikalnost = Column(String(32), nullable=True)
zivost = Column(String(32), nullable=True)
spol = Column(String(32), nullable=True)
spol_svojine = Column(String(32), nullable=True)
stevilo_svojine = Column(String(32), nullable=True)
oblika = Column(String(32), nullable=True)
oseba = Column(String(32), nullable=True)
lexical_entry_id = Column(
CHAR(35), ForeignKey("lexical_entries.id"), nullable=False, index=True
)
lexical_entry = relationship(
"LexicalEntry",
backref=backref("word_forms", cascade="all, delete-orphan", lazy="dynamic"),
)
def __repr__(self):
return f"<WordForm {self.msd}/>"
class FormRepresentation(Base):
__tablename__ = "form_representations"
id = Column(Integer, primary_key=True)
zapis_oblike = Column(String(255), index=True)
SPSP = Column(String(32), nullable=True)
norma = Column(String(32), nullable=True)
tip = Column(String(32), nullable=True)
pogostnost = Column(Integer, default=0)
naglasena_beseda_1 = Column(String(255), nullable=True)
naglasena_beseda_2 = Column(String(255), nullable=True)
naglasena_beseda_3 = Column(String(255), nullable=True)
naglasena_beseda_4 = Column(String(255), nullable=True)
SAMPA_1 = Column(String(255), nullable=True)
SAMPA_2 = Column(String(255), nullable=True)
SAMPA_3 = Column(String(255), nullable=True)
SAMPA_4 = Column(String(255), nullable=True)
IPA_1 = Column(String(255), nullable=True)
IPA_2 = Column(String(255), nullable=True)
IPA_3 = Column(String(255), nullable=True)
IPA_4 = Column(String(255), nullable=True)
word_form_id = Column(
Integer, ForeignKey("word_forms.id"), nullable=False, index=True
)
word_form = relationship(
"WordForm",
backref=backref(
"form_representations", cascade="all, delete-orphan", lazy="dynamic"
),
)
lexical_entry_id = Column(
CHAR(35), ForeignKey("lexical_entries.id"), nullable=False, index=True
)
lexical_entry = relationship(
"LexicalEntry",
backref=backref(
"form_representations", cascade="all, delete-orphan", lazy="dynamic"
),
)
|
<gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmdet.ops import arb_batched_nms
from mmdet.core import obb2hbb
from mmdet.models.builder import HEADS
from .obb_anchor_head import OBBAnchorHead
from ..rpn_test_mixin import RPNTestMixin
@HEADS.register_module()
class OrientedRPNHead(RPNTestMixin, OBBAnchorHead):
"""RPN head.
Args:
in_channels (int): Number of channels in the input feature map.
""" # noqa: W605
def __init__(self, in_channels, **kwargs):
super(OrientedRPNHead, self).__init__(
1,
in_channels,
bbox_type='obb',
reg_dim=6,
background_label=0,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
self.rpn_cls = nn.Conv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels, 1)
self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 6, 1)
def init_weights(self):
"""Initialize weights of the head."""
normal_init(self.rpn_conv, std=0.01)
normal_init(self.rpn_cls, std=0.01)
normal_init(self.rpn_reg, std=0.01)
def forward_single(self, x):
"""Forward feature map of a single scale level."""
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
rpn_cls_score = self.rpn_cls(x)
rpn_bbox_pred = self.rpn_reg(x)
return rpn_cls_score, rpn_bbox_pred
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
losses = super(OrientedRPNHead, self).loss(
cls_scores,
bbox_preds,
gt_bboxes,
None,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (num_anchors * 4, H, W).
mlvl_anchors (list[Tensor]): Box reference for each scale level
with shape (num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Returns:
Tensor: Labeled boxes in shape (n, 5), where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1.
"""
cfg = self.test_cfg if cfg is None else cfg
# bboxes from different level should be independent during NMS,
# level_ids are used as labels for batched NMS to separate them
level_ids = []
mlvl_scores = []
mlvl_bbox_preds = []
mlvl_valid_anchors = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
# we set FG labels to [0, num_class-1] and BG label to
# num_class in other heads since mmdet v2.0, However we
# keep BG label as 0 and FG label as 1 in rpn head
scores = rpn_cls_score.softmax(dim=1)[:, 1]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, self.reg_dim)
anchors = mlvl_anchors[idx]
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
# sort is faster than topk
# _, topk_inds = scores.topk(cfg.nms_pre)
ranked_scores, rank_inds = scores.sort(descending=True)
topk_inds = rank_inds[:cfg.nms_pre]
scores = ranked_scores[:cfg.nms_pre]
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
mlvl_scores.append(scores)
mlvl_bbox_preds.append(rpn_bbox_pred)
mlvl_valid_anchors.append(anchors)
level_ids.append(
scores.new_full((scores.size(0), ), idx, dtype=torch.long))
scores = torch.cat(mlvl_scores)
anchors = torch.cat(mlvl_valid_anchors)
rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
proposals = self.bbox_coder.decode(
anchors, rpn_bbox_pred, max_shape=img_shape)
ids = torch.cat(level_ids)
if cfg.min_bbox_size > 0:
w, h = proposals[:, 2], proposals[:, 3]
valid_inds = torch.nonzero(
(w >= cfg.min_bbox_size)
& (h >= cfg.min_bbox_size),
as_tuple=False).squeeze()
if valid_inds.sum().item() != len(proposals):
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
ids = ids[valid_inds]
# TODO: remove the hard coded nms type
hproposals = obb2hbb(proposals)
nms_cfg = dict(type='nms', iou_thr=cfg.nms_thr)
_, keep = arb_batched_nms(hproposals, scores, ids, nms_cfg)
dets = torch.cat([proposals, scores[:, None]], dim=1)
dets = dets[keep]
return dets[:cfg.nms_post]
|
import logging
import random
import numpy as np
import torchvision.transforms.functional as tf
from PIL import Image, ImageOps, ImageEnhance
logger = logging.getLogger("Logger")
def get_augmentation(cfg_aug):
if cfg_aug is None:
logger.info(f'[{"DATA".center(9)}] [augmentation] No Augmentations')
return
if "operations" in list(cfg_aug):
max_operations_per_instance = cfg_aug["max_operations_per_instance"]
augment_p = cfg_aug["augment_p"]
operations = cfg_aug["operations"]
logger.info(
f'[{"DATA".center(9)}] [augmentation] Using Stochastic Augmentation: Max Op {max_operations_per_instance}'
)
else:
max_operations_per_instance = None
augment_p = None
operations = cfg_aug
augmentations = []
for aug_key, aug_param in operations.items():
if aug_param:
if type(aug_param) == dict:
augmentations.append(key2aug[aug_key](**aug_param))
else:
augmentations.append(key2aug[aug_key](aug_param))
logger.info(
f'[{"DATA".center(9)}] [augmentation] [operation] {aug_key} [params] {aug_param}'
)
else:
logger.info(
f'[{"DATA".center(9)}] [augmentation] [operation] {aug_key} [NOT ACTIVATED]'
)
return Compose(augmentations, max_operations_per_instance, augment_p)
class Compose(object):
def __init__(self, augmentations, max_operations_per_instance=None, augment_p=None):
self.augmentations = augmentations
self.max_operations_per_instance = (
max_operations_per_instance if max_operations_per_instance else len(augmentations)
)
self.augment_p = augment_p if augment_p else 1.0
self.PIL2Numpy = False
def __call__(self, img, mask=None):
augmentations = random.sample(self.augmentations, self.max_operations_per_instance)
if mask is None:
if isinstance(img, np.ndarray):
img = Image.fromarray(img, mode="RGB")
self.PIL2Numpy = True
for a in augmentations:
if random.random() < self.augment_p:
img = a(img)
if self.PIL2Numpy:
img = np.array(img)
return img
else:
if isinstance(img, np.ndarray):
img = Image.fromarray(img, mode="RGB")
mask = Image.fromarray(mask, mode="L")
self.PIL2Numpy = True
if not type(mask) == dict:
assert img.size == mask.size
for a in augmentations:
if random.random() < self.augment_p:
img, mask = a(img, mask)
if self.PIL2Numpy:
img, mask = np.array(img), np.array(mask, dtype=np.uint8)
return img, mask
class RandomHorizontallyFlip(object):
def __init__(self, p):
self.p = p
def __call__(self, img, mask):
if random.random() < self.p:
return (
img.transpose(Image.FLIP_LEFT_RIGHT),
mask.transpose(Image.FLIP_LEFT_RIGHT),
)
return img, mask
class RandomHorizontallyFlipOnlyImg(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
class RandomVerticallyFlip(object):
def __init__(self, p):
self.p = p
def __call__(self, img, mask):
if random.random() < self.p:
return (
img.transpose(Image.FLIP_TOP_BOTTOM),
mask.transpose(Image.FLIP_TOP_BOTTOM),
)
return img, mask
class RandomVerticallyFlipOnlyImg(object):
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return img.transpose(Image.FLIP_TOP_BOTTOM)
return img
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, img, mask):
rotate_degree = random.random() * 2 * self.degree - self.degree
img_np = np.array(img)
return (
tf.affine(
img,
translate=(0, 0),
scale=1.0,
angle=rotate_degree,
resample=Image.BILINEAR,
fillcolor=(
int(np.mean(img_np[..., 0])),
int(np.mean(img_np[..., 1])),
int(np.mean(img_np[..., 2])),
),
shear=0.0,
),
tf.affine(
mask,
translate=(0, 0),
scale=1.0,
angle=rotate_degree,
resample=Image.NEAREST,
fillcolor=0,
shear=0.0,
),
)
class RandomRotate90(object):
def __init__(self, dummy):
self.dummy = dummy
def __call__(self, img, mask):
# rotate_degree = random.random() * 2 * self.degree - self.degree
rotate_degree = 90 * random.choice([0, 1, 2, 3])
return (
tf.rotate(img, rotate_degree, False, True, None),
tf.rotate(mask, rotate_degree, False, True, None),
)
class RandomRotate90OnlyImg(object):
def __init__(self, dummy):
self.dummy = dummy
def __call__(self, img):
# rotate_degree = random.random() * 2 * self.degree - self.degree
rotate_degree = 90 * random.choice([0, 1, 2, 3])
return tf.rotate(img, rotate_degree, False, True, None)
class Indentity(object):
"""Dummy augmentation operation that does nothing"""
def __init__(self, magnitude):
pass
def __call__(self, img, mask):
return img, mask
key2aug = {
"hflip": RandomHorizontallyFlip,
"hflip_onlyimg": RandomHorizontallyFlipOnlyImg,
"vflip": RandomVerticallyFlip,
"vflip_onlyimg": RandomVerticallyFlipOnlyImg,
"rotate": RandomRotate,
"rotate90": RandomRotate90,
"rotate90_onlyimg": RandomRotate90OnlyImg,
"identity": Indentity,
}
|
<reponame>broadinstitute/scp-ingest-service
from bson.objectid import ObjectId
nineteen_genes_100k_cell_models = {
"data_arrays": {
"dense_matrix_19_genes_1000_cells.txt Cells": {
"name": "dense_matrix_19_genes_1000_cells.txt Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1",
"Granuloma1_10",
"Granuloma1_100",
"Granuloma1_1000",
"Granuloma1_1001",
"Granuloma1_1002",
"Granuloma1_1003",
"Granuloma1_1004",
"Granuloma1_1005",
"Granuloma1_1006",
"Granuloma1_1007",
"Granuloma1_1008",
"Granuloma1_1009",
"Granuloma1_101",
"Granuloma1_1010",
"Granuloma1_1011",
"Granuloma1_1012",
"Granuloma1_1013",
"Granuloma1_1014",
"Granuloma1_1015",
"Granuloma1_1016",
"Granuloma1_1017",
"Granuloma1_1018",
"Granuloma1_1019",
"Granuloma1_102",
"Granuloma1_1020",
"Granuloma1_1021",
"Granuloma1_1022",
"Granuloma1_1023",
"Granuloma1_1024",
"Granuloma1_1025",
"Granuloma1_1026",
"Granuloma1_1027",
"Granuloma1_1028",
"Granuloma1_1029",
"Granuloma1_103",
"Granuloma1_1030",
"Granuloma1_1031",
"Granuloma1_1032",
"Granuloma1_1033",
"Granuloma1_1034",
"Granuloma1_1035",
"Granuloma1_1036",
"Granuloma1_1037",
"Granuloma1_1038",
"Granuloma1_1039",
"Granuloma1_104",
"Granuloma1_1040",
"Granuloma1_1041",
"Granuloma1_1042",
"Granuloma1_1043",
"Granuloma1_1044",
"Granuloma1_1045",
"Granuloma1_1046",
"Granuloma1_1047",
"Granuloma1_1048",
"Granuloma1_1049",
"Granuloma1_105",
"Granuloma1_1050",
"Granuloma1_1051",
"Granuloma1_1052",
"Granuloma1_1053",
"Granuloma1_1054",
"Granuloma1_1055",
"Granuloma1_1056",
"Granuloma1_1057",
"Granuloma1_1058",
"Granuloma1_1059",
"Granuloma1_106",
"Granuloma1_1060",
"Granuloma1_1061",
"Granuloma1_1062",
"Granuloma1_1063",
"Granuloma1_1064",
"Granuloma1_1065",
"Granuloma1_1066",
"Granuloma1_1067",
"Granuloma1_1068",
"Granuloma1_1069",
"Granuloma1_107",
"Granuloma1_1070",
"Granuloma1_1071",
"Granuloma1_1072",
"Granuloma1_1073",
"Granuloma1_1074",
"Granuloma1_1075",
"Granuloma1_1076",
"Granuloma1_1077",
"Granuloma1_1078",
"Granuloma1_1079",
"Granuloma1_108",
"Granuloma1_1080",
"Granuloma1_1083",
"Granuloma1_1084",
"Granuloma1_1085",
"Granuloma1_1086",
"Granuloma1_1087",
"Granuloma1_1088",
"Granuloma1_1089",
"Granuloma1_109",
"Granuloma1_1090",
"Granuloma1_1091",
"Granuloma1_1092",
"Granuloma1_1093",
"Granuloma1_1094",
"Granuloma1_1095",
"Granuloma1_1096",
"Granuloma1_1097",
"Granuloma1_1098",
"Granuloma1_1099",
"Granuloma1_11",
"Granuloma1_110",
"Granuloma1_1100",
"Granuloma1_1101",
"Granuloma1_1102",
"Granuloma1_1103",
"Granuloma1_1104",
"Granuloma1_1105",
"Granuloma1_1106",
"Granuloma1_1107",
"Granuloma1_1108",
"Granuloma1_1109",
"Granuloma1_111",
"Granuloma1_1110",
"Granuloma1_1111",
"Granuloma1_1112",
"Granuloma1_1113",
"Granuloma1_1114",
"Granuloma1_1115",
"Granuloma1_1116",
"Granuloma1_1117",
"Granuloma1_1118",
"Granuloma1_1119",
"Granuloma1_112",
"Granuloma1_1120",
"Granuloma1_1121",
"Granuloma1_1122",
"Granuloma1_1123",
"Granuloma1_1124",
"Granuloma1_1125",
"Granuloma1_1126",
"Granuloma1_1127",
"Granuloma1_1128",
"Granuloma1_1129",
"Granuloma1_113",
"Granuloma1_1130",
"Granuloma1_1131",
"Granuloma1_1132",
"Granuloma1_1133",
"Granuloma1_1134",
"Granuloma1_1135",
"Granuloma1_1136",
"Granuloma1_1137",
"Granuloma1_1138",
"Granuloma1_1139",
"Granuloma1_114",
"Granuloma1_1140",
"Granuloma1_1141",
"Granuloma1_1142",
"Granuloma1_1143",
"Granuloma1_1144",
"Granuloma1_1145",
"Granuloma1_1146",
"Granuloma1_1147",
"Granuloma1_1148",
"Granuloma1_1149",
"Granuloma1_115",
"Granuloma1_1150",
"Granuloma1_1151",
"Granuloma1_1152",
"Granuloma1_1153",
"Granuloma1_1154",
"Granuloma1_1155",
"Granuloma1_1156",
"Granuloma1_1157",
"Granuloma1_1158",
"Granuloma1_1159",
"Granuloma1_116",
"Granuloma1_1160",
"Granuloma1_1161",
"Granuloma1_1162",
"Granuloma1_1163",
"Granuloma1_1164",
"Granuloma1_1165",
"Granuloma1_1166",
"Granuloma1_1167",
"Granuloma1_1168",
"Granuloma1_1169",
"Granuloma1_117",
"Granuloma1_1170",
"Granuloma1_1171",
"Granuloma1_1172",
"Granuloma1_1173",
"Granuloma1_1174",
"Granuloma1_1175",
"Granuloma1_1177",
"Granuloma1_1178",
"Granuloma1_1179",
"Granuloma1_118",
"Granuloma1_1180",
"Granuloma1_1182",
"Granuloma1_1183",
"Granuloma1_1184",
"Granuloma1_1185",
"Granuloma1_1186",
"Granuloma1_1187",
"Granuloma1_1188",
"Granuloma1_1189",
"Granuloma1_119",
"Granuloma1_1190",
"Granuloma1_1191",
"Granuloma1_1193",
"Granuloma1_1194",
"Granuloma1_1195",
"Granuloma1_1196",
"Granuloma1_1197",
"Granuloma1_1198",
"Granuloma1_1199",
"Granuloma1_12",
"Granuloma1_120",
"Granuloma1_1200",
"Granuloma1_1201",
"Granuloma1_1202",
"Granuloma1_1203",
"Granuloma1_1204",
"Granuloma1_1205",
"Granuloma1_1206",
"Granuloma1_1207",
"Granuloma1_1208",
"Granuloma1_1209",
"Granuloma1_121",
"Granuloma1_1210",
"Granuloma1_1211",
"Granuloma1_1212",
"Granuloma1_1214",
"Granuloma1_1215",
"Granuloma1_1216",
"Granuloma1_1217",
"Granuloma1_1218",
"Granuloma1_1219",
"Granuloma1_122",
"Granuloma1_1220",
"Granuloma1_1221",
"Granuloma1_1222",
"Granuloma1_1224",
"Granuloma1_1225",
"Granuloma1_1226",
"Granuloma1_1227",
"Granuloma1_1228",
"Granuloma1_1229",
"Granuloma1_123",
"Granuloma1_1230",
"Granuloma1_1231",
"Granuloma1_1232",
"Granuloma1_1233",
"Granuloma1_1234",
"Granuloma1_1235",
"Granuloma1_1236",
"Granuloma1_1237",
"Granuloma1_1238",
"Granuloma1_1239",
"Granuloma1_124",
"Granuloma1_1240",
"Granuloma1_1241",
"Granuloma1_1243",
"Granuloma1_1244",
"Granuloma1_1245",
"Granuloma1_1246",
"Granuloma1_1248",
"Granuloma1_1249",
"Granuloma1_125",
"Granuloma1_1250",
"Granuloma1_1251",
"Granuloma1_1252",
"Granuloma1_1253",
"Granuloma1_1254",
"Granuloma1_1255",
"Granuloma1_1256",
"Granuloma1_1257",
"Granuloma1_1258",
"Granuloma1_1259",
"Granuloma1_126",
"Granuloma1_1260",
"Granuloma1_1261",
"Granuloma1_1262",
"Granuloma1_1263",
"Granuloma1_1264",
"Granuloma1_1266",
"Granuloma1_1267",
"Granuloma1_1268",
"Granuloma1_1269",
"Granuloma1_127",
"Granuloma1_1270",
"Granuloma1_1271",
"Granuloma1_1272",
"Granuloma1_1273",
"Granuloma1_1274",
"Granuloma1_1275",
"Granuloma1_1276",
"Granuloma1_1277",
"Granuloma1_1278",
"Granuloma1_1279",
"Granuloma1_128",
"Granuloma1_1280",
"Granuloma1_1282",
"Granuloma1_1283",
"Granuloma1_1284",
"Granuloma1_1286",
"Granuloma1_1287",
"Granuloma1_1288",
"Granuloma1_1289",
"Granuloma1_129",
"Granuloma1_1290",
"Granuloma1_1291",
"Granuloma1_1292",
"Granuloma1_1293",
"Granuloma1_1294",
"Granuloma1_1295",
"Granuloma1_1297",
"Granuloma1_1299",
"Granuloma1_13",
"Granuloma1_130",
"Granuloma1_1300",
"Granuloma1_1301",
"Granuloma1_1302",
"Granuloma1_1304",
"Granuloma1_1305",
"Granuloma1_1306",
"Granuloma1_1307",
"Granuloma1_1308",
"Granuloma1_1309",
"Granuloma1_131",
"Granuloma1_1310",
"Granuloma1_1311",
"Granuloma1_1312",
"Granuloma1_1313",
"Granuloma1_1315",
"Granuloma1_1316",
"Granuloma1_1317",
"Granuloma1_1318",
"Granuloma1_132",
"Granuloma1_1320",
"Granuloma1_1321",
"Granuloma1_1322",
"Granuloma1_1323",
"Granuloma1_1325",
"Granuloma1_1326",
"Granuloma1_1327",
"Granuloma1_1328",
"Granuloma1_1329",
"Granuloma1_133",
"Granuloma1_1330",
"Granuloma1_1332",
"Granuloma1_1334",
"Granuloma1_1335",
"Granuloma1_1336",
"Granuloma1_1337",
"Granuloma1_1338",
"Granuloma1_1339",
"Granuloma1_134",
"Granuloma1_1340",
"Granuloma1_1341",
"Granuloma1_1342",
"Granuloma1_1343",
"Granuloma1_1344",
"Granuloma1_1345",
"Granuloma1_1346",
"Granuloma1_1347",
"Granuloma1_1348",
"Granuloma1_1349",
"Granuloma1_135",
"Granuloma1_1350",
"Granuloma1_1351",
"Granuloma1_1352",
"Granuloma1_1353",
"Granuloma1_1355",
"Granuloma1_1356",
"Granuloma1_1357",
"Granuloma1_1358",
"Granuloma1_1359",
"Granuloma1_136",
"Granuloma1_1361",
"Granuloma1_1362",
"Granuloma1_1363",
"Granuloma1_1364",
"Granuloma1_1365",
"Granuloma1_1366",
"Granuloma1_1367",
"Granuloma1_1368",
"Granuloma1_1369",
"Granuloma1_137",
"Granuloma1_1370",
"Granuloma1_1371",
"Granuloma1_1373",
"Granuloma1_1374",
"Granuloma1_1375",
"Granuloma1_1376",
"Granuloma1_1377",
"Granuloma1_1378",
"Granuloma1_138",
"Granuloma1_1381",
"Granuloma1_1382",
"Granuloma1_1383",
"Granuloma1_1384",
"Granuloma1_1385",
"Granuloma1_1387",
"Granuloma1_1388",
"Granuloma1_1389",
"Granuloma1_139",
"Granuloma1_1391",
"Granuloma1_1392",
"Granuloma1_1393",
"Granuloma1_1394",
"Granuloma1_1395",
"Granuloma1_1396",
"Granuloma1_1397",
"Granuloma1_1398",
"Granuloma1_1399",
"Granuloma1_14",
"Granuloma1_140",
"Granuloma1_1400",
"Granuloma1_1401",
"Granuloma1_1402",
"Granuloma1_1403",
"Granuloma1_1404",
"Granuloma1_1405",
"Granuloma1_1406",
"Granuloma1_1407",
"Granuloma1_1408",
"Granuloma1_1409",
"Granuloma1_141",
"Granuloma1_1410",
"Granuloma1_1411",
"Granuloma1_1412",
"Granuloma1_1413",
"Granuloma1_1414",
"Granuloma1_1415",
"Granuloma1_1416",
"Granuloma1_1417",
"Granuloma1_1418",
"Granuloma1_142",
"Granuloma1_1420",
"Granuloma1_1421",
"Granuloma1_1422",
"Granuloma1_1423",
"Granuloma1_1424",
"Granuloma1_1425",
"Granuloma1_1426",
"Granuloma1_1427",
"Granuloma1_1428",
"Granuloma1_1429",
"Granuloma1_143",
"Granuloma1_1430",
"Granuloma1_1431",
"Granuloma1_1432",
"Granuloma1_1433",
"Granuloma1_1434",
"Granuloma1_1435",
"Granuloma1_1436",
"Granuloma1_1437",
"Granuloma1_1438",
"Granuloma1_1439",
"Granuloma1_144",
"Granuloma1_1440",
"Granuloma1_1441",
"Granuloma1_1443",
"Granuloma1_1444",
"Granuloma1_1445",
"Granuloma1_1446",
"Granuloma1_1447",
"Granuloma1_1448",
"Granuloma1_1449",
"Granuloma1_145",
"Granuloma1_1450",
"Granuloma1_1451",
"Granuloma1_1453",
"Granuloma1_1454",
"Granuloma1_1455",
"Granuloma1_1456",
"Granuloma1_1457",
"Granuloma1_1459",
"Granuloma1_146",
"Granuloma1_1460",
"Granuloma1_1461",
"Granuloma1_1463",
"Granuloma1_1466",
"Granuloma1_1467",
"Granuloma1_1468",
"Granuloma1_1469",
"Granuloma1_147",
"Granuloma1_1470",
"Granuloma1_1472",
"Granuloma1_1473",
"Granuloma1_1474",
"Granuloma1_1476",
"Granuloma1_1477",
"Granuloma1_1478",
"Granuloma1_1479",
"Granuloma1_148",
"Granuloma1_1480",
"Granuloma1_1481",
"Granuloma1_1482",
"Granuloma1_1483",
"Granuloma1_1484",
"Granuloma1_1485",
"Granuloma1_1486",
"Granuloma1_1487",
"Granuloma1_1489",
"Granuloma1_149",
"Granuloma1_1490",
"Granuloma1_1491",
"Granuloma1_1492",
"Granuloma1_1493",
"Granuloma1_1494",
"Granuloma1_1495",
"Granuloma1_1496",
"Granuloma1_1497",
"Granuloma1_1498",
"Granuloma1_1499",
"Granuloma1_15",
"Granuloma1_150",
"Granuloma1_1500",
"Granuloma1_1501",
"Granuloma1_1502",
"Granuloma1_1503",
"Granuloma1_1504",
"Granuloma1_1505",
"Granuloma1_1507",
"Granuloma1_1508",
"Granuloma1_1509",
"Granuloma1_151",
"Granuloma1_1510",
"Granuloma1_1511",
"Granuloma1_1512",
"Granuloma1_1513",
"Granuloma1_1514",
"Granuloma1_1515",
"Granuloma1_1516",
"Granuloma1_1517",
"Granuloma1_1518",
"Granuloma1_1519",
"Granuloma1_152",
"Granuloma1_1520",
"Granuloma1_1521",
"Granuloma1_1522",
"Granuloma1_1523",
"Granuloma1_1524",
"Granuloma1_1525",
"Granuloma1_1526",
"Granuloma1_1527",
"Granuloma1_1528",
"Granuloma1_1529",
"Granuloma1_153",
"Granuloma1_1530",
"Granuloma1_1531",
"Granuloma1_1532",
"Granuloma1_1533",
"Granuloma1_1534",
"Granuloma1_1535",
"Granuloma1_1536",
"Granuloma1_1537",
"Granuloma1_1538",
"Granuloma1_1539",
"Granuloma1_154",
"Granuloma1_1540",
"Granuloma1_1541",
"Granuloma1_1542",
"Granuloma1_1543",
"Granuloma1_1545",
"Granuloma1_1546",
"Granuloma1_1547",
"Granuloma1_1548",
"Granuloma1_1549",
"Granuloma1_155",
"Granuloma1_1550",
"Granuloma1_1551",
"Granuloma1_1552",
"Granuloma1_1553",
"Granuloma1_1554",
"Granuloma1_1555",
"Granuloma1_1556",
"Granuloma1_1557",
"Granuloma1_1558",
"Granuloma1_1559",
"Granuloma1_156",
"Granuloma1_1560",
"Granuloma1_1562",
"Granuloma1_1563",
"Granuloma1_1564",
"Granuloma1_1565",
"Granuloma1_1566",
"Granuloma1_1567",
"Granuloma1_1568",
"Granuloma1_1569",
"Granuloma1_157",
"Granuloma1_1570",
"Granuloma1_1571",
"Granuloma1_1572",
"Granuloma1_1573",
"Granuloma1_1574",
"Granuloma1_1575",
"Granuloma1_1576",
"Granuloma1_1577",
"Granuloma1_1578",
"Granuloma1_1579",
"Granuloma1_158",
"Granuloma1_1580",
"Granuloma1_1581",
"Granuloma1_1582",
"Granuloma1_1583",
"Granuloma1_1584",
"Granuloma1_1585",
"Granuloma1_1586",
"Granuloma1_1587",
"Granuloma1_1588",
"Granuloma1_1589",
"Granuloma1_1590",
"Granuloma1_1591",
"Granuloma1_1592",
"Granuloma1_1593",
"Granuloma1_1594",
"Granuloma1_1595",
"Granuloma1_1596",
"Granuloma1_1597",
"Granuloma1_1598",
"Granuloma1_1599",
"Granuloma1_16",
"Granuloma1_160",
"Granuloma1_1600",
"Granuloma1_1601",
"Granuloma1_1602",
"Granuloma1_1603",
"Granuloma1_1604",
"Granuloma1_1605",
"Granuloma1_1606",
"Granuloma1_1607",
"Granuloma1_1608",
"Granuloma1_1609",
"Granuloma1_161",
"Granuloma1_1610",
"Granuloma1_1611",
"Granuloma1_1612",
"Granuloma1_1613",
"Granuloma1_1614",
"Granuloma1_1615",
"Granuloma1_1616",
"Granuloma1_1617",
"Granuloma1_1618",
"Granuloma1_1619",
"Granuloma1_162",
"Granuloma1_1620",
"Granuloma1_1621",
"Granuloma1_1622",
"Granuloma1_1623",
"Granuloma1_1625",
"Granuloma1_1626",
"Granuloma1_1627",
"Granuloma1_1628",
"Granuloma1_1629",
"Granuloma1_163",
"Granuloma1_1630",
"Granuloma1_1631",
"Granuloma1_1632",
"Granuloma1_1633",
"Granuloma1_1635",
"Granuloma1_1636",
"Granuloma1_1637",
"Granuloma1_1638",
"Granuloma1_1639",
"Granuloma1_164",
"Granuloma1_1641",
"Granuloma1_1642",
"Granuloma1_1643",
"Granuloma1_1644",
"Granuloma1_1645",
"Granuloma1_1646",
"Granuloma1_1647",
"Granuloma1_1648",
"Granuloma1_1649",
"Granuloma1_165",
"Granuloma1_1650",
"Granuloma1_1651",
"Granuloma1_1652",
"Granuloma1_1653",
"Granuloma1_1654",
"Granuloma1_1656",
"Granuloma1_1657",
"Granuloma1_1658",
"Granuloma1_1659",
"Granuloma1_166",
"Granuloma1_1660",
"Granuloma1_1661",
"Granuloma1_1662",
"Granuloma1_1663",
"Granuloma1_1664",
"Granuloma1_1665",
"Granuloma1_1666",
"Granuloma1_1667",
"Granuloma1_1668",
"Granuloma1_1669",
"Granuloma1_167",
"Granuloma1_1670",
"Granuloma1_1671",
"Granuloma1_1672",
"Granuloma1_1673",
"Granuloma1_1674",
"Granuloma1_1675",
"Granuloma1_1676",
"Granuloma1_1678",
"Granuloma1_1679",
"Granuloma1_168",
"Granuloma1_1680",
"Granuloma1_1681",
"Granuloma1_1682",
"Granuloma1_1683",
"Granuloma1_1684",
"Granuloma1_1685",
"Granuloma1_1686",
"Granuloma1_1687",
"Granuloma1_1688",
"Granuloma1_1689",
"Granuloma1_169",
"Granuloma1_1690",
"Granuloma1_1691",
"Granuloma1_1692",
"Granuloma1_1693",
"Granuloma1_1694",
"Granuloma1_1695",
"Granuloma1_1696",
"Granuloma1_1697",
"Granuloma1_1698",
"Granuloma1_1699",
"Granuloma1_17",
"Granuloma1_170",
"Granuloma1_1701",
"Granuloma1_1702",
"Granuloma1_1703",
"Granuloma1_1704",
"Granuloma1_1705",
"Granuloma1_1706",
"Granuloma1_1708",
"Granuloma1_1709",
"Granuloma1_171",
"Granuloma1_1710",
"Granuloma1_1711",
"Granuloma1_1712",
"Granuloma1_1713",
"Granuloma1_1714",
"Granuloma1_1715",
"Granuloma1_1716",
"Granuloma1_1717",
"Granuloma1_1718",
"Granuloma1_1719",
"Granuloma1_172",
"Granuloma1_1720",
"Granuloma1_1721",
"Granuloma1_1722",
"Granuloma1_1723",
"Granuloma1_1724",
"Granuloma1_1725",
"Granuloma1_1726",
"Granuloma1_1727",
"Granuloma1_1728",
"Granuloma1_1729",
"Granuloma1_173",
"Granuloma1_1730",
"Granuloma1_1731",
"Granuloma1_1732",
"Granuloma1_1733",
"Granuloma1_1734",
"Granuloma1_1735",
"Granuloma1_1736",
"Granuloma1_1737",
"Granuloma1_1738",
"Granuloma1_1739",
"Granuloma1_174",
"Granuloma1_1740",
"Granuloma1_1741",
"Granuloma1_1742",
"Granuloma1_1743",
"Granuloma1_1744",
"Granuloma1_1745",
"Granuloma1_1746",
"Granuloma1_1747",
"Granuloma1_1748",
"Granuloma1_1749",
"Granuloma1_175",
"Granuloma1_1750",
"Granuloma1_1751",
"Granuloma1_1752",
"Granuloma1_1753",
"Granuloma1_1754",
"Granuloma1_1755",
"Granuloma1_1756",
"Granuloma1_1757",
"Granuloma1_1758",
"Granuloma1_1759",
"Granuloma1_176",
"Granuloma1_1760",
"Granuloma1_1761",
"Granuloma1_1762",
"Granuloma1_1763",
"Granuloma1_1765",
"Granuloma1_1766",
"Granuloma1_1767",
"Granuloma1_1768",
"Granuloma1_1769",
"Granuloma1_177",
"Granuloma1_1770",
"Granuloma1_1771",
"Granuloma1_1772",
"Granuloma1_1773",
"Granuloma1_1774",
"Granuloma1_1775",
"Granuloma1_1776",
"Granuloma1_1777",
"Granuloma1_1778",
"Granuloma1_1779",
"Granuloma1_178",
"Granuloma1_1780",
"Granuloma1_1781",
"Granuloma1_1782",
"Granuloma1_1783",
"Granuloma1_1784",
"Granuloma1_1787",
"Granuloma1_1788",
"Granuloma1_1789",
"Granuloma1_179",
"Granuloma1_1790",
"Granuloma1_1791",
"Granuloma1_1793",
"Granuloma1_1794",
"Granuloma1_1795",
"Granuloma1_1796",
"Granuloma1_1797",
"Granuloma1_1798",
"Granuloma1_1799",
"Granuloma1_18",
"Granuloma1_180",
"Granuloma1_1800",
"Granuloma1_1801",
"Granuloma1_1802",
"Granuloma1_1803",
"Granuloma1_1804",
"Granuloma1_1805",
"Granuloma1_1806",
"Granuloma1_1807",
"Granuloma1_1808",
"Granuloma1_1809",
"Granuloma1_181",
"Granuloma1_1810",
"Granuloma1_1811",
"Granuloma1_1812",
"Granuloma1_1813",
"Granuloma1_1814",
"Granuloma1_1815",
"Granuloma1_1816",
"Granuloma1_1817",
"Granuloma1_1818",
"Granuloma1_1819",
"Granuloma1_182",
"Granuloma1_1820",
"Granuloma1_1821",
"Granuloma1_1822",
"Granuloma1_1823",
"Granuloma1_1825",
"Granuloma1_1826",
"Granuloma1_1827",
"Granuloma1_1828",
"Granuloma1_1829",
"Granuloma1_183",
"Granuloma1_1830",
"Granuloma1_1831",
"Granuloma1_1832",
"Granuloma1_1833",
"Granuloma1_1834",
"Granuloma1_1835",
"Granuloma1_1836",
"Granuloma1_1837",
"Granuloma1_1838",
"Granuloma1_1839",
"Granuloma1_184",
"Granuloma1_1840",
"Granuloma1_1841",
"Granuloma1_1842",
"Granuloma1_1843",
"Granuloma1_1844",
"Granuloma1_1845",
"Granuloma1_1846",
"Granuloma1_1847",
"Granuloma1_1848",
"Granuloma1_1849",
"Granuloma1_185",
"Granuloma1_1850",
"Granuloma1_1851",
"Granuloma1_1852",
"Granuloma1_1853",
"Granuloma1_1854",
"Granuloma1_1855",
"Granuloma1_1856",
"Granuloma1_1857",
"Granuloma1_1858",
"Granuloma1_1859",
"Granuloma1_186",
"Granuloma1_1860",
"Granuloma1_1861",
"Granuloma1_1862",
"Granuloma1_1863",
"Granuloma1_1864",
"Granuloma1_1865",
"Granuloma1_1866",
"Granuloma1_1868",
"Granuloma1_1869",
"Granuloma1_187",
"Granuloma1_1870",
"Granuloma1_1871",
"Granuloma1_1872",
"Granuloma1_1874",
"Granuloma1_1877",
"Granuloma1_1878",
"Granuloma1_1879",
"Granuloma1_188",
"Granuloma1_1880",
"Granuloma1_1882",
"Granuloma1_1883",
"Granuloma1_1884",
"Granuloma1_1885",
"Granuloma1_1886",
"Granuloma1_1887",
"Granuloma1_1888",
"Granuloma1_189",
"Granuloma1_1890",
"Granuloma1_1891",
"Granuloma1_1893",
"Granuloma1_1894",
"Granuloma1_1895",
"Granuloma1_1896",
"Granuloma1_1898",
"Granuloma1_1899",
"Granuloma1_19",
"Granuloma1_190",
"Granuloma1_1900",
"Granuloma1_1901",
"Granuloma1_1902",
"Granuloma1_1904",
"Granuloma1_1905",
"Granuloma1_1906",
"Granuloma1_1907",
"Granuloma1_1908",
"Granuloma1_191",
"Granuloma1_1910",
"Granuloma1_1911",
"Granuloma1_1912",
"Granuloma1_1913",
"Granuloma1_1914",
"Granuloma1_1915",
"Granuloma1_1916",
"Granuloma1_1917",
"Granuloma1_1918",
"Granuloma1_1919",
"Granuloma1_192",
"Granuloma1_1920",
"Granuloma1_1921",
"Granuloma1_1922",
"Granuloma1_1923",
"Granuloma1_1924",
"Granuloma1_1925",
"Granuloma1_1926",
"Granuloma1_1927",
"Granuloma1_1928",
"Granuloma1_1929",
"Granuloma1_193",
"Granuloma1_1930",
"Granuloma1_1931",
"Granuloma1_1932",
"Granuloma1_1933",
"Granuloma1_1934",
"Granuloma1_1935",
"Granuloma1_1936",
"Granuloma1_1937",
"Granuloma1_1938",
"Granuloma1_1939",
"Granuloma1_194",
"Granuloma1_1940",
"Granuloma1_1941",
"Granuloma1_1942",
"Granuloma1_1943",
"Granuloma1_1944",
"Granuloma1_1945",
"Granuloma1_1946",
"Granuloma1_1947",
"Granuloma1_1948",
"Granuloma1_1949",
"Granuloma1_195",
"Granuloma1_1950",
"Granuloma1_1951",
"Granuloma1_1952",
"Granuloma1_1953",
"Granuloma1_1954",
"Granuloma1_1955",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Study",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"A1BG Cells": {
"name": "<NAME>",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": ["Granuloma1_1"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"A1BG Expression": {
"name": "A1BG Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [1.0],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"A1CF Cells": {
"name": "A1CF Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": ["Granuloma1_1"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"A1CF Expression": {
"name": "A1CF Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [1.0],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"A4GALT Cells": {
"name": "A4GALT Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1",
"Granuloma1_1021",
"Granuloma1_1025",
"Granuloma1_1045",
"Granuloma1_1050",
"Granuloma1_1062",
"Granuloma1_1063",
"Granuloma1_1067",
"Granuloma1_1070",
"Granuloma1_1096",
"Granuloma1_11",
"Granuloma1_1102",
"Granuloma1_1106",
"Granuloma1_111",
"Granuloma1_1189",
"Granuloma1_119",
"Granuloma1_1194",
"Granuloma1_1195",
"Granuloma1_1196",
"Granuloma1_1197",
"Granuloma1_12",
"Granuloma1_1200",
"Granuloma1_1201",
"Granuloma1_1208",
"Granuloma1_1211",
"Granuloma1_1214",
"Granuloma1_1216",
"Granuloma1_1217",
"Granuloma1_1221",
"Granuloma1_1228",
"Granuloma1_123",
"Granuloma1_1235",
"Granuloma1_1239",
"Granuloma1_1240",
"Granuloma1_1244",
"Granuloma1_1248",
"Granuloma1_1252",
"Granuloma1_1259",
"Granuloma1_1262",
"Granuloma1_1275",
"Granuloma1_1278",
"Granuloma1_1283",
"Granuloma1_1288",
"Granuloma1_1289",
"Granuloma1_1292",
"Granuloma1_1304",
"Granuloma1_1311",
"Granuloma1_1330",
"Granuloma1_1332",
"Granuloma1_1343",
"Granuloma1_1367",
"Granuloma1_137",
"Granuloma1_1374",
"Granuloma1_1375",
"Granuloma1_138",
"Granuloma1_1391",
"Granuloma1_1395",
"Granuloma1_1400",
"Granuloma1_1401",
"Granuloma1_1402",
"Granuloma1_1420",
"Granuloma1_143",
"Granuloma1_1433",
"Granuloma1_1438",
"Granuloma1_1451",
"Granuloma1_1459",
"Granuloma1_146",
"Granuloma1_1481",
"Granuloma1_1493",
"Granuloma1_1498",
"Granuloma1_15",
"Granuloma1_1504",
"Granuloma1_1521",
"Granuloma1_1525",
"Granuloma1_1529",
"Granuloma1_1540",
"Granuloma1_1558",
"Granuloma1_156",
"Granuloma1_1565",
"Granuloma1_1569",
"Granuloma1_16",
"Granuloma1_1601",
"Granuloma1_1620",
"Granuloma1_163",
"Granuloma1_1652",
"Granuloma1_1662",
"Granuloma1_1673",
"Granuloma1_1675",
"Granuloma1_1678",
"Granuloma1_168",
"Granuloma1_1687",
"Granuloma1_169",
"Granuloma1_17",
"Granuloma1_174",
"Granuloma1_1775",
"Granuloma1_1776",
"Granuloma1_179",
"Granuloma1_1793",
"Granuloma1_1795",
"Granuloma1_1812",
"Granuloma1_1818",
"Granuloma1_1863",
"Granuloma1_189",
"Granuloma1_1898",
"Granuloma1_1916",
"Granuloma1_1918",
"Granuloma1_1931",
"Granuloma1_1941",
"Granuloma1_1944",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"A4GALT Expression": {
"name": "A4GALT Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.319,
1.065,
1.12,
1.314,
1.384,
0.443,
0.454,
0.654,
0.764,
1.051,
0.533,
1.065,
1.65,
1.504,
1.769,
1.047,
1.299,
0.975,
0.645,
1.044,
0.552,
1.475,
0.769,
0.905,
0.952,
0.987,
0.986,
1.473,
1.03,
1.097,
1.531,
1.138,
1.188,
1.195,
1.223,
2.588,
1.249,
1.284,
1.285,
1.915,
2.548,
1.428,
1.451,
1.464,
1.479,
1.566,
1.577,
1.658,
1.646,
1.686,
1.803,
1.118,
1.85,
1.869,
1.123,
1.605,
1.055,
1.156,
1.179,
1.221,
1.968,
1.136,
1.497,
1.535,
1.601,
1.644,
1.165,
0.517,
0.713,
1.162,
1.184,
1.503,
1.87,
1.335,
0.892,
0.908,
0.957,
1.227,
0.975,
1.485,
0.571,
1.102,
1.133,
1.259,
1.769,
1.225,
1.247,
1.252,
1.259,
1.267,
1.293,
1.808,
0.584,
1.275,
1.498,
1.501,
1.3,
1.541,
2.122,
1.579,
1.601,
1.717,
1.322,
1.905,
0.67,
0.683,
0.822,
0.885,
0.904,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAAS Cells": {
"name": "AAAS Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1",
"Granuloma1_10",
"Granuloma1_1000",
"Granuloma1_1009",
"Granuloma1_1016",
"Granuloma1_105",
"Granuloma1_1053",
"Granuloma1_1063",
"Granuloma1_1068",
"Granuloma1_1072",
"Granuloma1_1074",
"Granuloma1_1079",
"Granuloma1_1089",
"Granuloma1_1097",
"Granuloma1_1101",
"Granuloma1_1102",
"Granuloma1_1110",
"Granuloma1_1129",
"Granuloma1_1136",
"Granuloma1_1199",
"Granuloma1_120",
"Granuloma1_1204",
"Granuloma1_1206",
"Granuloma1_1235",
"Granuloma1_1240",
"Granuloma1_127",
"Granuloma1_1329",
"Granuloma1_1338",
"Granuloma1_1388",
"Granuloma1_1389",
"Granuloma1_1406",
"Granuloma1_1414",
"Granuloma1_1428",
"Granuloma1_1439",
"Granuloma1_144",
"Granuloma1_146",
"Granuloma1_1481",
"Granuloma1_1482",
"Granuloma1_1484",
"Granuloma1_1489",
"Granuloma1_1538",
"Granuloma1_1551",
"Granuloma1_1552",
"Granuloma1_1554",
"Granuloma1_1560",
"Granuloma1_1576",
"Granuloma1_1584",
"Granuloma1_1587",
"Granuloma1_1592",
"Granuloma1_16",
"Granuloma1_161",
"Granuloma1_1613",
"Granuloma1_1629",
"Granuloma1_165",
"Granuloma1_1692",
"Granuloma1_1696",
"Granuloma1_1746",
"Granuloma1_1837",
"Granuloma1_1912",
"Granuloma1_1917",
"Granuloma1_1925",
"Granuloma1_193",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAAS Expression": {
"name": "AAAS Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.319,
0.528,
0.885,
0.987,
1.024,
0.989,
1.396,
0.454,
1.331,
0.768,
0.778,
0.845,
0.953,
1.051,
1.065,
1.065,
1.21,
1.392,
1.435,
0.692,
1.039,
0.79,
0.828,
1.138,
1.195,
1.079,
1.625,
1.685,
0.761,
0.767,
1.782,
1.35,
2.033,
1.535,
1.175,
1.165,
0.517,
0.871,
0.583,
0.657,
0.914,
0.929,
0.932,
0.942,
0.969,
1.032,
1.058,
1.073,
1.08,
0.571,
1.257,
1.127,
1.156,
1.253,
1.31,
1.314,
1.443,
1.645,
0.607,
0.679,
0.742,
1.347,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AACS Cells": {
"name": "<NAME>",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1",
"Granuloma1_1005",
"Granuloma1_1006",
"Granuloma1_1022",
"Granuloma1_1036",
"Granuloma1_1044",
"Granuloma1_1067",
"Granuloma1_1072",
"Granuloma1_1073",
"Granuloma1_1075",
"Granuloma1_1083",
"Granuloma1_1086",
"Granuloma1_1095",
"Granuloma1_1104",
"Granuloma1_1117",
"Granuloma1_1122",
"Granuloma1_1140",
"Granuloma1_1143",
"Granuloma1_1162",
"Granuloma1_1182",
"Granuloma1_1205",
"Granuloma1_1215",
"Granuloma1_1220",
"Granuloma1_1245",
"Granuloma1_1251",
"Granuloma1_1270",
"Granuloma1_128",
"Granuloma1_1325",
"Granuloma1_1336",
"Granuloma1_1347",
"Granuloma1_1428",
"Granuloma1_145",
"Granuloma1_1461",
"Granuloma1_1483",
"Granuloma1_1486",
"Granuloma1_1487",
"Granuloma1_1499",
"Granuloma1_1503",
"Granuloma1_1505",
"Granuloma1_1512",
"Granuloma1_152",
"Granuloma1_1536",
"Granuloma1_1550",
"Granuloma1_1552",
"Granuloma1_1567",
"Granuloma1_1576",
"Granuloma1_1599",
"Granuloma1_160",
"Granuloma1_1600",
"Granuloma1_1626",
"Granuloma1_1666",
"Granuloma1_1672",
"Granuloma1_1675",
"Granuloma1_169",
"Granuloma1_1696",
"Granuloma1_1709",
"Granuloma1_1718",
"Granuloma1_1759",
"Granuloma1_1768",
"Granuloma1_1779",
"Granuloma1_18",
"Granuloma1_183",
"Granuloma1_1833",
"Granuloma1_19",
"Granuloma1_190",
"Granuloma1_1913",
"Granuloma1_1914",
"Granuloma1_1919",
"Granuloma1_1928",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AACS Expression": {
"name": "AACS Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.561,
0.942,
0.945,
1.081,
1.227,
1.313,
0.654,
0.768,
0.778,
0.786,
0.884,
0.899,
1.038,
1.114,
1.313,
1.337,
1.464,
1.475,
1.552,
1.684,
0.807,
0.982,
1.004,
1.227,
1.245,
1.322,
1.578,
1.609,
1.666,
1.715,
1.463,
1.162,
1.649,
0.54,
1.254,
0.608,
0.739,
0.777,
0.77,
0.822,
1.206,
0.906,
1.404,
0.932,
1.478,
1.032,
1.101,
1.244,
1.1,
1.152,
1.233,
1.251,
1.252,
1.267,
1.314,
1.339,
1.372,
1.46,
1.481,
1.505,
0.592,
1.317,
1.641,
0.62,
1.338,
0.608,
0.617,
0.695,
0.778,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AADACL3 Cells": {
"name": "AADACL3 Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": ["Granuloma1_1499"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AADACL3 Expression": {
"name": "AADACL3 Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [0.739],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AADAT Cells": {
"name": "AADAT Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1",
"Granuloma1_100",
"Granuloma1_1001",
"Granuloma1_1012",
"Granuloma1_1019",
"Granuloma1_102",
"Granuloma1_1026",
"Granuloma1_103",
"Granuloma1_1031",
"Granuloma1_1038",
"Granuloma1_107",
"Granuloma1_108",
"Granuloma1_11",
"Granuloma1_111",
"Granuloma1_113",
"Granuloma1_114",
"Granuloma1_115",
"Granuloma1_1158",
"Granuloma1_118",
"Granuloma1_12",
"Granuloma1_120",
"Granuloma1_1206",
"Granuloma1_122",
"Granuloma1_1224",
"Granuloma1_124",
"Granuloma1_125",
"Granuloma1_127",
"Granuloma1_1277",
"Granuloma1_13",
"Granuloma1_1315",
"Granuloma1_135",
"Granuloma1_137",
"Granuloma1_139",
"Granuloma1_14",
"Granuloma1_1404",
"Granuloma1_141",
"Granuloma1_1433",
"Granuloma1_144",
"Granuloma1_145",
"Granuloma1_146",
"Granuloma1_1468",
"Granuloma1_148",
"Granuloma1_150",
"Granuloma1_1509",
"Granuloma1_152",
"Granuloma1_153",
"Granuloma1_154",
"Granuloma1_1540",
"Granuloma1_157",
"Granuloma1_158",
"Granuloma1_16",
"Granuloma1_1615",
"Granuloma1_1643",
"Granuloma1_166",
"Granuloma1_167",
"Granuloma1_168",
"Granuloma1_1688",
"Granuloma1_17",
"Granuloma1_1705",
"Granuloma1_171",
"Granuloma1_1716",
"Granuloma1_173",
"Granuloma1_1735",
"Granuloma1_175",
"Granuloma1_176",
"Granuloma1_179",
"Granuloma1_18",
"Granuloma1_181",
"Granuloma1_1814",
"Granuloma1_182",
"Granuloma1_1822",
"Granuloma1_183",
"Granuloma1_184",
"Granuloma1_1842",
"Granuloma1_186",
"Granuloma1_187",
"Granuloma1_19",
"Granuloma1_190",
"Granuloma1_191",
"Granuloma1_1914",
"Granuloma1_1934",
"Granuloma1_1937",
"Granuloma1_194",
"Granuloma1_1954",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AADAT Expression": {
"name": "AADAT Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.918,
1.773,
0.911,
0.983,
1.064,
2.41,
1.105,
1.794,
1.156,
1.243,
1.483,
1.487,
1.339,
1.504,
2.293,
2.102,
1.028,
1.545,
2.297,
0.905,
1.538,
0.828,
1.873,
1.058,
1.05,
1.878,
1.586,
1.938,
0.903,
1.584,
1.092,
2.86,
1.126,
0.925,
1.227,
1.129,
1.497,
1.7,
2.484,
1.165,
1.725,
2.059,
2.074,
1.542,
1.206,
2.727,
2.893,
0.908,
1.777,
1.779,
1.854,
1.128,
1.208,
1.271,
2.159,
1.267,
1.841,
1.88,
1.324,
2.169,
1.365,
1.817,
1.428,
1.286,
2.187,
1.3,
1.896,
1.297,
1.582,
1.862,
1.611,
1.866,
1.329,
1.66,
1.314,
1.325,
2.064,
1.338,
2.257,
0.617,
0.838,
0.843,
2.249,
1.438,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAED1 Cells": {
"name": "AAED1 Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1000",
"Granuloma1_1001",
"Granuloma1_1004",
"Granuloma1_1005",
"Granuloma1_1007",
"Granuloma1_1008",
"Granuloma1_1010",
"Granuloma1_1018",
"Granuloma1_1026",
"Granuloma1_1030",
"Granuloma1_1038",
"Granuloma1_104",
"Granuloma1_1041",
"Granuloma1_1049",
"Granuloma1_105",
"Granuloma1_1052",
"Granuloma1_106",
"Granuloma1_1062",
"Granuloma1_1063",
"Granuloma1_1064",
"Granuloma1_1067",
"Granuloma1_1069",
"Granuloma1_1070",
"Granuloma1_1071",
"Granuloma1_1072",
"Granuloma1_1074",
"Granuloma1_1077",
"Granuloma1_1080",
"Granuloma1_1087",
"Granuloma1_1097",
"Granuloma1_1098",
"Granuloma1_1099",
"Granuloma1_11",
"Granuloma1_1101",
"Granuloma1_1107",
"Granuloma1_1119",
"Granuloma1_1120",
"Granuloma1_1124",
"Granuloma1_1125",
"Granuloma1_1126",
"Granuloma1_1128",
"Granuloma1_1129",
"Granuloma1_1138",
"Granuloma1_1141",
"Granuloma1_115",
"Granuloma1_1150",
"Granuloma1_1151",
"Granuloma1_1152",
"Granuloma1_1153",
"Granuloma1_1163",
"Granuloma1_1169",
"Granuloma1_1173",
"Granuloma1_119",
"Granuloma1_1193",
"Granuloma1_1198",
"Granuloma1_12",
"Granuloma1_1200",
"Granuloma1_1202",
"Granuloma1_1203",
"Granuloma1_1204",
"Granuloma1_1206",
"Granuloma1_1208",
"Granuloma1_1211",
"Granuloma1_1216",
"Granuloma1_1217",
"Granuloma1_1219",
"Granuloma1_1222",
"Granuloma1_1227",
"Granuloma1_1228",
"Granuloma1_1229",
"Granuloma1_1249",
"Granuloma1_1261",
"Granuloma1_1262",
"Granuloma1_1266",
"Granuloma1_1269",
"Granuloma1_127",
"Granuloma1_1273",
"Granuloma1_129",
"Granuloma1_1293",
"Granuloma1_1294",
"Granuloma1_1305",
"Granuloma1_1308",
"Granuloma1_1315",
"Granuloma1_1322",
"Granuloma1_1328",
"Granuloma1_135",
"Granuloma1_1365",
"Granuloma1_1368",
"Granuloma1_137",
"Granuloma1_1387",
"Granuloma1_1388",
"Granuloma1_1389",
"Granuloma1_1393",
"Granuloma1_1395",
"Granuloma1_1397",
"Granuloma1_1402",
"Granuloma1_1403",
"Granuloma1_1413",
"Granuloma1_1416",
"Granuloma1_1427",
"Granuloma1_143",
"Granuloma1_1430",
"Granuloma1_1432",
"Granuloma1_1439",
"Granuloma1_1448",
"Granuloma1_145",
"Granuloma1_1453",
"Granuloma1_1469",
"Granuloma1_1478",
"Granuloma1_1481",
"Granuloma1_1485",
"Granuloma1_1487",
"Granuloma1_1489",
"Granuloma1_1490",
"Granuloma1_1491",
"Granuloma1_1492",
"Granuloma1_1494",
"Granuloma1_1495",
"Granuloma1_1496",
"Granuloma1_1497",
"Granuloma1_1499",
"Granuloma1_1502",
"Granuloma1_1505",
"Granuloma1_1508",
"Granuloma1_1509",
"Granuloma1_1512",
"Granuloma1_152",
"Granuloma1_1521",
"Granuloma1_1522",
"Granuloma1_1530",
"Granuloma1_1531",
"Granuloma1_1532",
"Granuloma1_1534",
"Granuloma1_1535",
"Granuloma1_1536",
"Granuloma1_1537",
"Granuloma1_1540",
"Granuloma1_1546",
"Granuloma1_1547",
"Granuloma1_1548",
"Granuloma1_155",
"Granuloma1_1555",
"Granuloma1_1556",
"Granuloma1_1563",
"Granuloma1_1566",
"Granuloma1_1571",
"Granuloma1_1574",
"Granuloma1_1578",
"Granuloma1_1581",
"Granuloma1_1583",
"Granuloma1_1585",
"Granuloma1_1592",
"Granuloma1_1594",
"Granuloma1_1595",
"Granuloma1_1597",
"Granuloma1_16",
"Granuloma1_1605",
"Granuloma1_1607",
"Granuloma1_1608",
"Granuloma1_1612",
"Granuloma1_1615",
"Granuloma1_1623",
"Granuloma1_1626",
"Granuloma1_1628",
"Granuloma1_163",
"Granuloma1_1630",
"Granuloma1_1633",
"Granuloma1_1635",
"Granuloma1_1646",
"Granuloma1_165",
"Granuloma1_1650",
"Granuloma1_1653",
"Granuloma1_1665",
"Granuloma1_1666",
"Granuloma1_167",
"Granuloma1_1670",
"Granuloma1_1671",
"Granuloma1_1673",
"Granuloma1_1685",
"Granuloma1_1686",
"Granuloma1_1688",
"Granuloma1_1690",
"Granuloma1_1691",
"Granuloma1_1694",
"Granuloma1_1695",
"Granuloma1_1696",
"Granuloma1_1699",
"Granuloma1_17",
"Granuloma1_1706",
"Granuloma1_1710",
"Granuloma1_1714",
"Granuloma1_1717",
"Granuloma1_1721",
"Granuloma1_1738",
"Granuloma1_1751",
"Granuloma1_1757",
"Granuloma1_176",
"Granuloma1_1776",
"Granuloma1_1781",
"Granuloma1_1797",
"Granuloma1_18",
"Granuloma1_1807",
"Granuloma1_1808",
"Granuloma1_181",
"Granuloma1_1817",
"Granuloma1_1828",
"Granuloma1_1833",
"Granuloma1_1838",
"Granuloma1_1842",
"Granuloma1_1846",
"Granuloma1_1865",
"Granuloma1_1879",
"Granuloma1_1888",
"Granuloma1_1891",
"Granuloma1_1896",
"Granuloma1_1900",
"Granuloma1_1912",
"Granuloma1_1913",
"Granuloma1_1914",
"Granuloma1_1917",
"Granuloma1_1918",
"Granuloma1_192",
"Granuloma1_1920",
"Granuloma1_1922",
"Granuloma1_1924",
"Granuloma1_1925",
"Granuloma1_1926",
"Granuloma1_1929",
"Granuloma1_193",
"Granuloma1_1930",
"Granuloma1_1936",
"Granuloma1_1937",
"Granuloma1_1939",
"Granuloma1_1940",
"Granuloma1_1944",
"Granuloma1_1954",
"Granuloma1_1955",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAED1 Expression": {
"name": "AAED1 Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.885,
0.911,
0.935,
0.942,
0.958,
0.973,
1.468,
1.534,
1.105,
1.153,
2.128,
0.991,
2.135,
1.343,
0.989,
1.383,
0.991,
0.749,
1.193,
0.517,
0.654,
0.667,
0.764,
1.194,
0.768,
0.778,
1.263,
0.878,
0.933,
1.051,
1.061,
1.064,
0.879,
1.569,
1.143,
1.323,
1.329,
1.929,
1.369,
1.397,
1.386,
1.952,
2.387,
2.039,
1.028,
2.072,
1.501,
1.502,
2.086,
1.563,
1.578,
1.635,
1.047,
0.553,
0.687,
1.166,
0.753,
1.199,
0.784,
1.226,
1.275,
1.372,
0.952,
0.986,
0.986,
0.991,
1.535,
1.089,
1.097,
1.106,
1.24,
1.282,
1.285,
1.302,
1.865,
1.079,
1.333,
1.084,
2.049,
1.491,
1.582,
1.558,
1.584,
1.614,
1.622,
1.092,
1.798,
1.804,
1.118,
0.498,
1.189,
1.196,
1.003,
1.055,
1.625,
1.221,
1.23,
1.322,
1.366,
1.466,
1.136,
2.045,
1.483,
1.535,
1.578,
1.685,
1.617,
2.333,
1.82,
0.517,
0.956,
0.608,
0.657,
0.688,
0.697,
0.703,
1.139,
0.719,
0.739,
0.743,
0.739,
0.769,
1.2,
1.236,
0.8,
1.267,
1.206,
0.864,
0.87,
1.363,
0.897,
0.907,
1.371,
0.909,
1.373,
0.905,
0.908,
0.927,
1.718,
0.936,
1.224,
0.948,
0.955,
1.454,
0.984,
1.004,
1.016,
1.531,
1.054,
1.061,
1.069,
1.08,
1.084,
1.087,
1.1,
0.932,
1.104,
1.12,
1.12,
1.129,
1.128,
1.146,
1.152,
1.154,
1.259,
1.161,
1.166,
1.176,
2.083,
1.253,
1.211,
1.217,
1.234,
1.233,
1.268,
1.238,
1.244,
1.247,
1.289,
1.835,
1.295,
1.302,
1.31,
1.305,
1.863,
1.314,
1.315,
0.584,
1.329,
1.343,
1.916,
1.368,
1.939,
1.998,
1.445,
1.451,
1.291,
1.501,
1.515,
2.124,
0.962,
1.563,
1.567,
1.297,
1.595,
1.623,
1.641,
1.648,
1.66,
1.655,
1.735,
1.791,
1.826,
1.854,
1.874,
1.955,
0.982,
0.984,
0.996,
0.679,
0.683,
1.891,
0.689,
1.12,
0.712,
0.742,
0.774,
0.787,
1.901,
0.799,
1.293,
0.843,
0.851,
0.869,
0.904,
0.958,
0.962,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAGAB Cells": {
"name": "AAGAB Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_101",
"Granuloma1_1013",
"Granuloma1_1015",
"Granuloma1_1064",
"Granuloma1_1065",
"Granuloma1_1068",
"Granuloma1_1071",
"Granuloma1_1078",
"Granuloma1_1099",
"Granuloma1_1101",
"Granuloma1_1105",
"Granuloma1_114",
"Granuloma1_1152",
"Granuloma1_1170",
"Granuloma1_1184",
"Granuloma1_1193",
"Granuloma1_1196",
"Granuloma1_120",
"Granuloma1_1202",
"Granuloma1_1226",
"Granuloma1_125",
"Granuloma1_1272",
"Granuloma1_1278",
"Granuloma1_1286",
"Granuloma1_1311",
"Granuloma1_134",
"Granuloma1_1383",
"Granuloma1_1388",
"Granuloma1_1395",
"Granuloma1_1412",
"Granuloma1_1414",
"Granuloma1_1427",
"Granuloma1_1457",
"Granuloma1_1460",
"Granuloma1_1483",
"Granuloma1_1491",
"Granuloma1_1494",
"Granuloma1_1498",
"Granuloma1_1501",
"Granuloma1_1504",
"Granuloma1_1529",
"Granuloma1_1533",
"Granuloma1_1534",
"Granuloma1_1538",
"Granuloma1_1539",
"Granuloma1_156",
"Granuloma1_1567",
"Granuloma1_1568",
"Granuloma1_1572",
"Granuloma1_1579",
"Granuloma1_1607",
"Granuloma1_1614",
"Granuloma1_1626",
"Granuloma1_1627",
"Granuloma1_1657",
"Granuloma1_1673",
"Granuloma1_170",
"Granuloma1_1735",
"Granuloma1_1740",
"Granuloma1_1751",
"Granuloma1_1756",
"Granuloma1_1779",
"Granuloma1_1783",
"Granuloma1_1805",
"Granuloma1_1816",
"Granuloma1_1846",
"Granuloma1_1848",
"Granuloma1_1891",
"Granuloma1_1914",
"Granuloma1_1915",
"Granuloma1_1946",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAGAB Expression": {
"name": "AAGAB Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.968,
0.997,
1.007,
0.517,
0.61,
1.05,
1.194,
0.832,
1.064,
1.065,
1.136,
1.028,
1.502,
1.585,
1.711,
0.553,
1.034,
1.039,
0.769,
1.079,
1.046,
1.407,
1.372,
1.448,
1.577,
1.091,
1.982,
0.761,
1.055,
1.314,
1.35,
1.466,
1.634,
1.659,
0.54,
0.697,
0.724,
0.741,
0.767,
0.772,
0.892,
1.369,
0.904,
1.384,
0.907,
1.227,
0.99,
0.99,
1.009,
1.037,
1.12,
1.129,
1.152,
1.163,
1.221,
1.247,
1.272,
1.993,
1.432,
2.013,
1.46,
1.505,
1.515,
1.553,
1.588,
1.655,
1.664,
1.854,
0.617,
0.688,
0.902,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAK1 Cells": {
"name": "AAK1 Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_10",
"Granuloma1_1007",
"Granuloma1_1014",
"Granuloma1_103",
"Granuloma1_1049",
"Granuloma1_1051",
"Granuloma1_1062",
"Granuloma1_1065",
"Granuloma1_1067",
"Granuloma1_1079",
"Granuloma1_1080",
"Granuloma1_1084",
"Granuloma1_1093",
"Granuloma1_1096",
"Granuloma1_1103",
"Granuloma1_1104",
"Granuloma1_1107",
"Granuloma1_1108",
"Granuloma1_1116",
"Granuloma1_1121",
"Granuloma1_1128",
"Granuloma1_1131",
"Granuloma1_114",
"Granuloma1_1144",
"Granuloma1_1148",
"Granuloma1_115",
"Granuloma1_1153",
"Granuloma1_1154",
"Granuloma1_1158",
"Granuloma1_1168",
"Granuloma1_117",
"Granuloma1_1172",
"Granuloma1_1189",
"Granuloma1_1190",
"Granuloma1_1193",
"Granuloma1_1197",
"Granuloma1_1206",
"Granuloma1_1210",
"Granuloma1_1211",
"Granuloma1_1219",
"Granuloma1_1221",
"Granuloma1_1230",
"Granuloma1_1232",
"Granuloma1_1234",
"Granuloma1_125",
"Granuloma1_1259",
"Granuloma1_1262",
"Granuloma1_1263",
"Granuloma1_127",
"Granuloma1_1271",
"Granuloma1_1282",
"Granuloma1_1284",
"Granuloma1_1297",
"Granuloma1_1299",
"Granuloma1_1326",
"Granuloma1_1344",
"Granuloma1_1374",
"Granuloma1_1376",
"Granuloma1_1393",
"Granuloma1_1403",
"Granuloma1_1405",
"Granuloma1_1406",
"Granuloma1_1407",
"Granuloma1_1412",
"Granuloma1_1413",
"Granuloma1_1434",
"Granuloma1_1440",
"Granuloma1_1443",
"Granuloma1_1482",
"Granuloma1_1483",
"Granuloma1_1486",
"Granuloma1_1489",
"Granuloma1_1491",
"Granuloma1_1500",
"Granuloma1_1501",
"Granuloma1_1504",
"Granuloma1_1513",
"Granuloma1_1536",
"Granuloma1_1538",
"Granuloma1_1557",
"Granuloma1_1559",
"Granuloma1_1562",
"Granuloma1_1564",
"Granuloma1_1566",
"Granuloma1_1581",
"Granuloma1_1587",
"Granuloma1_1608",
"Granuloma1_1617",
"Granuloma1_163",
"Granuloma1_1632",
"Granuloma1_164",
"Granuloma1_1647",
"Granuloma1_1652",
"Granuloma1_1668",
"Granuloma1_1671",
"Granuloma1_1694",
"Granuloma1_1703",
"Granuloma1_1726",
"Granuloma1_1728",
"Granuloma1_1729",
"Granuloma1_1736",
"Granuloma1_1740",
"Granuloma1_1745",
"Granuloma1_1749",
"Granuloma1_1753",
"Granuloma1_1770",
"Granuloma1_1776",
"Granuloma1_1793",
"Granuloma1_1815",
"Granuloma1_1840",
"Granuloma1_1865",
"Granuloma1_1884",
"Granuloma1_1898",
"Granuloma1_1902",
"Granuloma1_1906",
"Granuloma1_1917",
"Granuloma1_1922",
"Granuloma1_1923",
"Granuloma1_1924",
"Granuloma1_1935",
"Granuloma1_1936",
"Granuloma1_1940",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAK1 Expression": {
"name": "AAK1 Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
1.128,
0.958,
1.0,
1.468,
1.343,
1.387,
0.443,
0.986,
0.654,
0.845,
0.878,
0.887,
0.978,
1.051,
1.102,
1.114,
1.663,
1.681,
1.303,
1.337,
1.386,
1.395,
1.028,
1.474,
1.49,
1.028,
1.51,
1.529,
1.545,
1.578,
1.015,
1.617,
1.769,
1.836,
0.553,
0.653,
0.828,
0.914,
0.952,
0.991,
1.03,
1.115,
1.121,
1.138,
1.046,
1.284,
1.285,
1.287,
1.079,
1.325,
1.419,
1.432,
1.516,
1.518,
1.617,
1.707,
1.85,
1.862,
1.003,
1.23,
1.775,
1.244,
1.262,
1.314,
1.322,
1.499,
2.127,
1.572,
0.528,
0.54,
0.982,
0.657,
0.697,
0.752,
0.767,
1.203,
0.833,
0.906,
0.914,
0.952,
0.961,
0.968,
0.98,
0.984,
1.054,
1.073,
1.635,
1.131,
1.259,
1.16,
1.259,
1.207,
1.233,
1.236,
1.244,
1.305,
1.32,
1.401,
1.407,
1.406,
1.43,
1.432,
1.435,
1.445,
1.452,
1.485,
1.501,
2.121,
1.578,
1.653,
1.735,
2.421,
1.905,
1.967,
2.118,
0.679,
1.12,
0.713,
0.712,
0.84,
0.842,
0.869,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAMDC Cells": {
"name": "AAMDC Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_10",
"Granuloma1_1010",
"Granuloma1_1013",
"Granuloma1_1019",
"Granuloma1_1028",
"Granuloma1_1066",
"Granuloma1_108",
"Granuloma1_1080",
"Granuloma1_109",
"Granuloma1_11",
"Granuloma1_1109",
"Granuloma1_111",
"Granuloma1_1112",
"Granuloma1_1128",
"Granuloma1_1135",
"Granuloma1_114",
"Granuloma1_1196",
"Granuloma1_1207",
"Granuloma1_1214",
"Granuloma1_1217",
"Granuloma1_1229",
"Granuloma1_1245",
"Granuloma1_1250",
"Granuloma1_1258",
"Granuloma1_1261",
"Granuloma1_1264",
"Granuloma1_1283",
"Granuloma1_1306",
"Granuloma1_131",
"Granuloma1_1334",
"Granuloma1_1348",
"Granuloma1_1395",
"Granuloma1_1403",
"Granuloma1_1421",
"Granuloma1_1456",
"Granuloma1_1498",
"Granuloma1_1508",
"Granuloma1_1514",
"Granuloma1_1540",
"Granuloma1_1554",
"Granuloma1_1569",
"Granuloma1_1581",
"Granuloma1_160",
"Granuloma1_1600",
"Granuloma1_1620",
"Granuloma1_166",
"Granuloma1_1673",
"Granuloma1_1697",
"Granuloma1_170",
"Granuloma1_1712",
"Granuloma1_1728",
"Granuloma1_1745",
"Granuloma1_1758",
"Granuloma1_1773",
"Granuloma1_178",
"Granuloma1_180",
"Granuloma1_1826",
"Granuloma1_1858",
"Granuloma1_1861",
"Granuloma1_1883",
"Granuloma1_1920",
"Granuloma1_1922",
"Granuloma1_1924",
"Granuloma1_1930",
"Granuloma1_1942",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAMDC Expression": {
"name": "AAMDC Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.528,
0.982,
0.997,
1.567,
1.124,
0.637,
0.998,
0.878,
1.483,
0.533,
1.194,
1.011,
1.783,
1.946,
1.42,
1.028,
0.645,
0.825,
1.8,
0.986,
1.106,
1.227,
1.239,
1.355,
1.282,
1.299,
1.428,
2.142,
1.081,
1.658,
1.716,
1.055,
1.23,
1.408,
1.62,
0.741,
0.798,
0.832,
0.908,
0.942,
0.996,
1.054,
1.244,
1.1,
1.133,
1.271,
1.247,
1.313,
1.272,
1.353,
1.407,
1.435,
1.465,
1.488,
1.848,
1.302,
1.615,
1.714,
1.72,
1.812,
0.689,
0.709,
0.712,
0.799,
0.895,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAMP Cells": {
"name": "AAMP Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1",
"Granuloma1_1008",
"Granuloma1_1012",
"Granuloma1_1013",
"Granuloma1_1014",
"Granuloma1_1016",
"Granuloma1_102",
"Granuloma1_1020",
"Granuloma1_1030",
"Granuloma1_1031",
"Granuloma1_1034",
"Granuloma1_1038",
"Granuloma1_1040",
"Granuloma1_1045",
"Granuloma1_1050",
"Granuloma1_1056",
"Granuloma1_1062",
"Granuloma1_1063",
"Granuloma1_1064",
"Granuloma1_1065",
"Granuloma1_107",
"Granuloma1_1070",
"Granuloma1_1071",
"Granuloma1_1072",
"Granuloma1_1073",
"Granuloma1_1076",
"Granuloma1_1079",
"Granuloma1_1083",
"Granuloma1_1084",
"Granuloma1_1085",
"Granuloma1_1087",
"Granuloma1_1095",
"Granuloma1_1097",
"Granuloma1_11",
"Granuloma1_110",
"Granuloma1_1105",
"Granuloma1_1110",
"Granuloma1_1113",
"Granuloma1_1122",
"Granuloma1_1128",
"Granuloma1_1136",
"Granuloma1_1141",
"Granuloma1_115",
"Granuloma1_1165",
"Granuloma1_1166",
"Granuloma1_1173",
"Granuloma1_1178",
"Granuloma1_118",
"Granuloma1_1195",
"Granuloma1_1196",
"Granuloma1_1197",
"Granuloma1_1200",
"Granuloma1_1205",
"Granuloma1_1214",
"Granuloma1_1217",
"Granuloma1_1218",
"Granuloma1_1222",
"Granuloma1_1227",
"Granuloma1_1228",
"Granuloma1_1229",
"Granuloma1_1231",
"Granuloma1_1233",
"Granuloma1_1234",
"Granuloma1_1236",
"Granuloma1_1239",
"Granuloma1_1244",
"Granuloma1_1250",
"Granuloma1_126",
"Granuloma1_1264",
"Granuloma1_1271",
"Granuloma1_1273",
"Granuloma1_1278",
"Granuloma1_1280",
"Granuloma1_1282",
"Granuloma1_1287",
"Granuloma1_1288",
"Granuloma1_1289",
"Granuloma1_1292",
"Granuloma1_1293",
"Granuloma1_1294",
"Granuloma1_1295",
"Granuloma1_13",
"Granuloma1_1306",
"Granuloma1_1307",
"Granuloma1_1322",
"Granuloma1_1327",
"Granuloma1_133",
"Granuloma1_1343",
"Granuloma1_1350",
"Granuloma1_1357",
"Granuloma1_1384",
"Granuloma1_1387",
"Granuloma1_139",
"Granuloma1_1392",
"Granuloma1_1393",
"Granuloma1_1397",
"Granuloma1_14",
"Granuloma1_1414",
"Granuloma1_1418",
"Granuloma1_1424",
"Granuloma1_143",
"Granuloma1_1430",
"Granuloma1_1434",
"Granuloma1_145",
"Granuloma1_1456",
"Granuloma1_1457",
"Granuloma1_1459",
"Granuloma1_146",
"Granuloma1_1469",
"Granuloma1_1477",
"Granuloma1_1481",
"Granuloma1_1482",
"Granuloma1_1483",
"Granuloma1_1484",
"Granuloma1_1486",
"Granuloma1_1491",
"Granuloma1_1496",
"Granuloma1_1499",
"Granuloma1_15",
"Granuloma1_150",
"Granuloma1_1508",
"Granuloma1_1514",
"Granuloma1_1516",
"Granuloma1_1517",
"Granuloma1_152",
"Granuloma1_1523",
"Granuloma1_1528",
"Granuloma1_1529",
"Granuloma1_1533",
"Granuloma1_1534",
"Granuloma1_1536",
"Granuloma1_1538",
"Granuloma1_1539",
"Granuloma1_1540",
"Granuloma1_1541",
"Granuloma1_1546",
"Granuloma1_1548",
"Granuloma1_1555",
"Granuloma1_1556",
"Granuloma1_1562",
"Granuloma1_1565",
"Granuloma1_1566",
"Granuloma1_1574",
"Granuloma1_158",
"Granuloma1_1585",
"Granuloma1_1587",
"Granuloma1_1588",
"Granuloma1_1590",
"Granuloma1_1593",
"Granuloma1_1595",
"Granuloma1_1596",
"Granuloma1_16",
"Granuloma1_1601",
"Granuloma1_1604",
"Granuloma1_1612",
"Granuloma1_1614",
"Granuloma1_1615",
"Granuloma1_1616",
"Granuloma1_1618",
"Granuloma1_1619",
"Granuloma1_1625",
"Granuloma1_1638",
"Granuloma1_1639",
"Granuloma1_164",
"Granuloma1_1642",
"Granuloma1_1646",
"Granuloma1_1653",
"Granuloma1_1654",
"Granuloma1_1656",
"Granuloma1_1657",
"Granuloma1_166",
"Granuloma1_1661",
"Granuloma1_1663",
"Granuloma1_1664",
"Granuloma1_1672",
"Granuloma1_1674",
"Granuloma1_1684",
"Granuloma1_169",
"Granuloma1_1691",
"Granuloma1_1698",
"Granuloma1_17",
"Granuloma1_1701",
"Granuloma1_1709",
"Granuloma1_1712",
"Granuloma1_172",
"Granuloma1_1723",
"Granuloma1_1725",
"Granuloma1_173",
"Granuloma1_1737",
"Granuloma1_1741",
"Granuloma1_1743",
"Granuloma1_175",
"Granuloma1_1762",
"Granuloma1_1767",
"Granuloma1_1774",
"Granuloma1_1788",
"Granuloma1_1795",
"Granuloma1_1798",
"Granuloma1_1805",
"Granuloma1_1810",
"Granuloma1_1818",
"Granuloma1_1819",
"Granuloma1_1822",
"Granuloma1_1826",
"Granuloma1_1844",
"Granuloma1_1855",
"Granuloma1_1871",
"Granuloma1_1877",
"Granuloma1_1888",
"Granuloma1_19",
"Granuloma1_1907",
"Granuloma1_1911",
"Granuloma1_1912",
"Granuloma1_1913",
"Granuloma1_1914",
"Granuloma1_1916",
"Granuloma1_1919",
"Granuloma1_1923",
"Granuloma1_1924",
"Granuloma1_1926",
"Granuloma1_1937",
"Granuloma1_1938",
"Granuloma1_1940",
"Granuloma1_1941",
"Granuloma1_1946",
"Granuloma1_1952",
"Granuloma1_1954",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAMP Expression": {
"name": "AAMP Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.561,
0.973,
0.983,
1.486,
1.0,
1.024,
1.476,
1.056,
1.153,
1.678,
1.706,
1.243,
1.25,
1.314,
1.384,
1.522,
0.443,
1.002,
0.856,
0.986,
0.995,
0.764,
1.194,
0.768,
0.778,
0.808,
0.845,
0.884,
0.887,
1.356,
0.933,
1.038,
1.051,
0.879,
1.002,
1.136,
1.21,
1.26,
2.243,
1.386,
1.435,
1.468,
1.028,
1.565,
1.566,
1.635,
1.663,
1.026,
0.975,
1.034,
0.653,
0.753,
0.807,
0.987,
1.473,
0.988,
1.037,
1.089,
1.608,
1.618,
1.124,
1.123,
1.138,
1.164,
1.188,
1.223,
1.239,
1.07,
1.299,
1.325,
1.333,
1.372,
1.381,
1.419,
1.449,
1.451,
1.464,
1.479,
1.477,
2.065,
1.501,
0.55,
1.56,
1.563,
1.614,
1.627,
1.088,
1.686,
1.725,
2.359,
2.067,
1.078,
1.126,
0.95,
1.003,
1.111,
0.566,
1.35,
1.383,
1.415,
1.136,
1.474,
1.499,
1.685,
1.62,
2.224,
1.644,
1.165,
1.733,
2.42,
1.109,
0.528,
0.54,
0.583,
0.607,
0.697,
0.739,
0.739,
1.184,
1.73,
0.798,
0.832,
0.836,
0.838,
1.206,
0.873,
1.35,
1.356,
1.369,
0.904,
0.906,
1.384,
0.907,
0.908,
0.913,
1.399,
0.936,
0.948,
1.434,
0.968,
1.459,
2.237,
1.016,
1.241,
1.574,
1.073,
1.066,
1.076,
1.084,
1.087,
1.093,
0.571,
1.102,
1.106,
1.129,
1.129,
1.128,
1.13,
1.139,
1.138,
1.148,
1.182,
1.713,
1.259,
1.194,
1.207,
1.75,
1.227,
1.217,
1.755,
1.271,
1.231,
1.224,
1.238,
1.251,
1.25,
1.28,
1.267,
1.31,
1.324,
0.584,
1.319,
1.892,
1.353,
1.274,
1.381,
1.394,
1.274,
1.422,
1.426,
1.433,
1.83,
1.466,
1.477,
1.499,
1.525,
2.122,
1.545,
1.553,
1.563,
1.601,
1.596,
2.199,
1.615,
2.241,
1.69,
1.741,
1.77,
1.826,
0.62,
2.152,
0.539,
0.607,
0.984,
1.27,
0.67,
1.102,
1.125,
0.712,
0.774,
0.843,
0.843,
0.869,
0.885,
0.902,
1.988,
0.958,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AANAT Cells": {
"name": "AANAT Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": ["Granuloma1_1160", "Granuloma1_1926"],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AANAT Expression": {
"name": "AANAT Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [1.551, 1.206],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAR2 Cells": {
"name": "AAR2 Cells",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "cells",
"array_index": 0,
"values": [
"Granuloma1_1007",
"Granuloma1_1016",
"Granuloma1_1023",
"Granuloma1_1062",
"Granuloma1_1064",
"Granuloma1_1075",
"Granuloma1_1079",
"Granuloma1_1110",
"Granuloma1_1117",
"Granuloma1_1122",
"Granuloma1_1131",
"Granuloma1_1141",
"Granuloma1_1146",
"Granuloma1_1154",
"Granuloma1_1159",
"Granuloma1_1168",
"Granuloma1_1219",
"Granuloma1_127",
"Granuloma1_1282",
"Granuloma1_1287",
"Granuloma1_1289",
"Granuloma1_1316",
"Granuloma1_1327",
"Granuloma1_1335",
"Granuloma1_1340",
"Granuloma1_1355",
"Granuloma1_1365",
"Granuloma1_139",
"Granuloma1_1393",
"Granuloma1_1416",
"Granuloma1_1430",
"Granuloma1_1449",
"Granuloma1_1469",
"Granuloma1_149",
"Granuloma1_15",
"Granuloma1_1508",
"Granuloma1_1517",
"Granuloma1_1534",
"Granuloma1_1536",
"Granuloma1_1537",
"Granuloma1_1554",
"Granuloma1_1565",
"Granuloma1_1579",
"Granuloma1_1582",
"Granuloma1_1583",
"Granuloma1_1587",
"Granuloma1_1589",
"Granuloma1_1617",
"Granuloma1_1623",
"Granuloma1_1627",
"Granuloma1_1653",
"Granuloma1_166",
"Granuloma1_1696",
"Granuloma1_1697",
"Granuloma1_1779",
"Granuloma1_178",
"Granuloma1_1805",
"Granuloma1_1818",
"Granuloma1_1851",
"Granuloma1_19",
"Granuloma1_1912",
"Granuloma1_1917",
"Granuloma1_1918",
"Granuloma1_1920",
"Granuloma1_1929",
"Granuloma1_1931",
"Granuloma1_1947",
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
"AAR2 Expression": {
"name": "AAR2 Expression",
"cluster_name": "dense_matrix_19_genes_1000_cells.txt",
"array_type": "expression",
"array_index": 0,
"values": [
0.958,
1.024,
1.092,
0.443,
0.517,
0.786,
0.845,
1.21,
1.313,
1.337,
1.395,
1.468,
1.49,
1.529,
1.537,
1.578,
0.991,
1.079,
1.419,
1.449,
1.464,
1.595,
1.627,
1.652,
1.679,
1.75,
1.798,
1.126,
1.003,
1.366,
2.045,
1.583,
1.733,
1.188,
0.563,
0.798,
0.838,
0.904,
0.906,
0.905,
1.419,
0.975,
1.037,
1.055,
1.061,
1.073,
1.07,
1.131,
1.146,
1.163,
1.217,
1.271,
1.314,
1.313,
1.505,
1.301,
1.553,
1.601,
1.674,
0.62,
0.607,
0.679,
0.683,
0.689,
0.787,
0.822,
0.911,
],
"subsample_threshold": None,
"subsample_annotation": None,
"linear_data_type": "Gene",
"study_id": ObjectId("5d276a50421aa9117c982845"),
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
},
},
"gene_models": {
"A1BG": {
"name": "A1BG",
"searchable_name": "a1bg",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"A1CF": {
"name": "A1CF",
"searchable_name": "a1cf",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"A2ML1": {
"name": "A2ML1",
"searchable_name": "a2ml1",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"A3GALT2": {
"name": "A3GALT2",
"searchable_name": "a3galt2",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"A4GALT": {
"name": "A4GALT",
"searchable_name": "a4galt",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"A4GNT": {
"name": "A4GNT",
"searchable_name": "a4gnt",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AAAS": {
"name": "AAAS",
"searchable_name": "aaas",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AACS": {
"name": "AACS",
"searchable_name": "aacs",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AADACL2": {
"name": "AADACL2",
"searchable_name": "aadacl2",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AADACL3": {
"name": "AADACL3",
"searchable_name": "aadacl3",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AADACL4": {
"name": "AADACL4",
"searchable_name": "aadacl4",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AADAT": {
"name": "AADAT",
"searchable_name": "aadat",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AAED1": {
"name": "AAED1",
"searchable_name": "aaed1",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AAGAB": {
"name": "AAGAB",
"searchable_name": "aagab",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AAK1": {
"name": "AAK1",
"searchable_name": "aak1",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AAMDC": {
"name": "AAMDC",
"searchable_name": "aamdc",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AAMP": {
"name": "AAMP",
"searchable_name": "aamp",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AANAT": {
"name": "AANAT",
"searchable_name": "aanat",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
"AAR2": {
"name": "AAR2",
"searchable_name": "aar2",
"study_file_id": ObjectId("5dd5ae25421aa910a723a337"),
"study_id": ObjectId("5d276a50421aa9117c982845"),
"gene_id": None,
},
},
}
|
#!/usr/bin/env python
# Kamek - build tool for custom C++ code in New Super Mario Bros. Wii
# All rights reserved (c) Treeki 2010
# Some function definitions by megazig
# Requires PyYAML
version_str = 'Kamek 0.1 by Treeki'
import binascii
import os
import os.path
import shutil
import struct
import subprocess
import sys
import tempfile
import yaml
import hooks
u32 = struct.Struct('>I')
verbose = True
use_rels = True
override_config_file = None
def parse_cmd_options():
global use_rels, override_config_file
if '--no-rels' in sys.argv:
use_rels = False
for arg in sys.argv:
if arg.startswith('--configs='):
override_config_file = arg[10:]
def print_debug(s):
if verbose: print '* '+s
def read_configs(filename):
with open(filename, 'r') as f:
data = f.read()
return yaml.safe_load(data)
current_unique_id = 0
def generate_unique_id():
# this is used for temporary filenames, to ensure that .o files
# do not overwrite each other
global current_unique_id
current_unique_id += 1
return current_unique_id
def windowsize(path): # Windows-ize, not "window size" :P
#if path.startswith('/mnt/c/'):
# return ('C:/' + path[7:]).replace('/', '\\')
#return path
return subprocess.check_output(['wslpath', '-m', path]).rstrip('\n')
def align_addr_up(addr, align):
align -= 1
return (addr + align) & ~align
def generate_riiv_mempatch(offset, data):
return '<memory offset="0x%08X" value="%s" />' % (offset, binascii.hexlify(data))
def generate_ocarina_patch(destOffset, data):
out = []
count = len(data)
sourceOffset = 0
destOffset -= 0x80000000
for i in xrange(count >> 2):
out.append('%08X %s' % (destOffset | 0x4000000, binascii.hexlify(data[sourceOffset:sourceOffset+4])))
sourceOffset += 4
destOffset += 4
# take care
remainder = count % 4
if remainder == 3:
out.append('%08X 0000%s' % (destOffset | 0x2000000, binascii.hexlify(data[sourceOffset:sourceOffset+2])))
out.append('%08X 000000%s' % (destOffset, binascii.hexlify(data[sourceOffset+2])))
elif remainder == 2:
out.append('%08X 0000%s' % (destOffset | 0x2000000, binascii.hexlify(data[sourceOffset:sourceOffset+2])))
elif remainder == 1:
out.append('%08X 000000%s' % (destOffset, binascii.hexlify(data[sourceOffset])))
return '\n'.join(out)
def generate_kamek_patches(patchlist):
kamekpatch = ''
for patch in patchlist:
if len(patch[1]) > 4:
# block patch
kamekpatch += u32.pack(align_addr_up(len(patch[1]), 4) / 4)
kamekpatch += u32.pack(patch[0])
kamekpatch += patch[1]
# align it
if len(patch[1]) % 4 != 0:
kamekpatch += '\0' * (4 - (len(patch[1]) % 4))
else:
# single patch
kamekpatch += u32.pack(patch[0])
kamekpatch += patch[1]
kamekpatch += u32.pack(0xFFFFFFFF)
return kamekpatch
class KamekModule(object):
_requiredFields = ['source_files']
def __init__(self, filename):
# load the module data
self.modulePath = os.path.normpath(filename)
self.moduleName = os.path.basename(self.modulePath)
self.moduleDir = os.path.dirname(self.modulePath)
with open(self.modulePath, 'r') as f:
self.rawData = f.read()
self.data = yaml.safe_load(self.rawData)
if not isinstance(self.data, dict):
raise ValueError, 'the module file %s is an invalid format (it should be a YAML mapping)' % self.moduleName
# verify it
for field in self._requiredFields:
if field not in self.data:
raise ValueError, 'Missing field in the module file %s: %s' % (self.moduleName, field)
class KamekBuilder(object):
def __init__(self, project, configs):
self.project = project
self.configs = configs
def build(self):
print_debug('Starting build')
self._prepare_dirs()
for config in self.configs:
self._set_config(config)
self._configTempDir = tempfile.mkdtemp()
print_debug('Temp files for this configuration are in: '+self._configTempDir)
self._builtCodeAddr = 0x80001800
if 'code_address' in self.project.data:
self._builtCodeAddr = self.project.data['code_address']
self._patches = []
self._rel_patches = []
self._hooks = []
# hook setup
self._hook_contexts = {}
for name, hookType in hooks.HookTypes.iteritems():
if hookType.has_context:
self._hook_contexts[hookType] = hookType.context_type()
self._create_hooks()
self._compile_modules()
self._link()
self._read_symbol_map()
for hook in self._hooks:
hook.create_patches()
self._create_patch()
shutil.rmtree(self._configTempDir)
def _prepare_dirs(self):
self._outDir = self.project.makeRelativePath(self.project.data['output_dir'])
print_debug('Project will be built in: '+self._outDir)
if not os.path.isdir(self._outDir):
os.makedirs(self._outDir)
print_debug('Created that directory')
def _set_config(self, config):
self._config = config
print_debug('---')
print_debug('Building for configuration: '+config['friendly_name'])
self._config_short_name = config['short_name']
self._rel_area = (config['rel_area_start'], config['rel_area_end'])
def _create_hooks(self):
print_debug('---')
print_debug('Creating hooks')
for m in self.project.modules:
if 'hooks' in m.data:
for hookData in m.data['hooks']:
assert 'name' in hookData and 'type' in hookData
print_debug('Hook: %s : %s' % (m.moduleName, hookData['name']))
if hookData['type'] in hooks.HookTypes:
hookType = hooks.HookTypes[hookData['type']]
hook = hookType(self, m, hookData)
self._hooks.append(hook)
else:
raise ValueError, 'Unknown hook type: %s' % hookData['type']
def _compile_modules(self):
print_debug('---')
print_debug('Compiling modules')
cc_command = ['/mnt/c/devkitPro/devkitPPC/bin/powerpc-eabi-gcc.exe', '-nodefaultlibs', '-I.', '-fno-builtin', '-Os', '-fno-exceptions']
for d in self._config['defines']:
cc_command.append('-D%s' % d)
for i in self._config['include_dirs']:
cc_command.append('-I%s' % i)
self._moduleFiles = []
for m in self.project.modules:
for normal_sourcefile in m.data['source_files']:
print_debug('Compiling %s : %s' % (m.moduleName, normal_sourcefile))
objfile_name = '%d.o' % generate_unique_id()
objfile = os.path.join(self._configTempDir, objfile_name)
objfile_win = windowsize(self._configTempDir) + '\\' + objfile_name
sourcefile = os.path.join(m.moduleDir, normal_sourcefile)
new_command = cc_command + ['-c', '-o', objfile_win, windowsize(sourcefile)]
errorVal = subprocess.call(new_command)
if errorVal != 0:
print 'BUILD FAILED!'
print 'g++ returned %d - an error occurred while compiling %s' % (errorVal, sourcefile)
sys.exit(1)
self._moduleFiles.append(objfile)
print_debug('Compilation complete')
def _link(self):
print_debug('---')
print_debug('Linking project')
self._mapFile = '%s_linkmap.map' % (self._config_short_name)
self._outFile = '%s_out.bin' % (self._config_short_name)
ld_command = ['/mnt/c/devkitPro/devkitPPC/bin/powerpc-eabi-ld.exe', '-L.']
ld_command.append('-o')
ld_command.append(windowsize(self._outDir) + '\\' + self._outFile)
ld_command.append('-Ttext')
ld_command.append('0x%08X' % self._builtCodeAddr)
ld_command.append('-T')
ld_command.append(self._config['linker_script'])
ld_command.append('-Map')
ld_command.append(windowsize(self._outDir) + '\\' + self._mapFile)
ld_command.append('--no-demangle') # for debugging
ld_command += [windowsize(f) for f in self._moduleFiles]
errorVal = subprocess.call(ld_command)
if errorVal != 0:
print 'BUILD FAILED!'
print 'ld returned %d' % errorVal
sys.exit(1)
print_debug('Linked successfully')
def _read_symbol_map(self):
print_debug('---')
print_debug('Reading symbol map')
self._symbols = []
file = open(self._outDir + '/' + self._mapFile, 'r')
for line in file:
if '__text_start' in line:
self._textSegStart = int(line.split()[0],0)
break
# now read the individual symbols
# this is probably a bad method to parse it, but whatever
for line in file:
if '__text_end' in line:
self._textSegEnd = int(line.split()[0],0)
break
if not line.startswith(' '): continue
sym = line.split()
sym[0] = int(sym[0],0)
self._symbols.append(sym)
# we've found __text_end, so now we should be at the output section
currentEndAddress = self._textSegEnd
for line in file:
if line[0] == '.':
# probably a segment
data = line.split()
if len(data) < 3: continue
segAddr = int(data[1],0)
segSize = int(data[2],0)
if segAddr+segSize > currentEndAddress:
currentEndAddress = segAddr+segSize
self._codeStart = self._textSegStart
self._codeEnd = currentEndAddress
file.close()
print_debug('Read, %d symbol(s) parsed' % len(self._symbols))
# next up, run it through c++filt
print_debug('Running c++filt')
p = subprocess.Popen('/mnt/c/devkitPro/devkitPPC/bin/powerpc-eabi-c++filt.exe', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
symbolNameList = [sym[1] for sym in self._symbols]
filtResult = p.communicate('\n'.join(symbolNameList))
filteredSymbols = filtResult[0].splitlines()
for sym, filt in zip(self._symbols, filteredSymbols):
sym.append(filt)
print_debug('Done. All symbols complete.')
print_debug('Generated code is at 0x%08X .. 0x%08X' % (self._codeStart, self._codeEnd - 4))
print('SYMBOLS', self._symbols)
def _find_func_by_symbol(self, find_symbol):
for sym in self._symbols:
if sym[2] == find_symbol:
return sym[0]
raise ValueError, 'Cannot find function: %s' % find_symbol
def _add_patch(self, offset, data):
if offset >= self._rel_area[0] and offset <= self._rel_area[1] and use_rels:
self._rel_patches.append((offset, data))
else:
self._patches.append((offset, data))
def _create_patch(self):
print_debug('---')
print_debug('Creating patch')
# convert the .rel patches to KamekPatcher format
if len(self._rel_patches) > 0:
kamekpatch = generate_kamek_patches(self._rel_patches)
#self._patches.append((0x817F4800, kamekpatch))
self._patches.append((0x80002F60, kamekpatch))
# add the outfile as a patch
file = open(self._outDir + '/' + self._outFile, 'rb')
patch = (self._codeStart, file.read())
file.close()
self._patches.append(patch)
# generate a Riivolution patch
riiv = open('%s/%s_riiv.xml' % (self._outDir, self._config['short_name']), 'w')
for patch in self._patches:
riiv.write(generate_riiv_mempatch(*patch) + '\n')
riiv.close()
# generate an Ocarina patch
ocarina = open('%s/%s_ocarina.txt' % (self._outDir, self._config['short_name']), 'w')
for patch in self._patches:
ocarina.write(generate_ocarina_patch(*patch) + '\n')
ocarina.close()
# generate a KamekPatcher patch
kpatch = open('%s/%s_loader.bin' % (self._outDir, self._config['short_name']), 'wb')
kpatch.write(generate_kamek_patches(self._patches))
kpatch.close()
print_debug('Patches generated')
class KamekProject(object):
_requiredFields = ['output_dir', 'modules']
def __init__(self, filename):
# load the project data
self.projectPath = os.path.abspath(filename)
self.projectName = os.path.basename(self.projectPath)
self.projectDir = os.path.dirname(self.projectPath)
with open(self.projectPath, 'r') as f:
self.rawData = f.read()
self.data = yaml.safe_load(self.rawData)
if not isinstance(self.data, dict):
raise ValueError, 'the project file is an invalid format (it should be a YAML mapping)'
# verify it
for field in self._requiredFields:
if field not in self.data:
raise ValueError, 'Missing field in the project file: %s' % field
# load each module
self.modules = []
for moduleName in self.data['modules']:
modulePath = self.makeRelativePath(moduleName)
self.modules.append(KamekModule(modulePath))
def makeRelativePath(self, path):
return os.path.normpath(os.path.join(self.projectDir, path))
def build(self):
# compile everything in the project
builder = KamekBuilder(self, self.configs)
builder.build()
def main():
print version_str
print
if len(sys.argv) < 2:
print 'No input file specified'
sys.exit()
parse_cmd_options()
project = KamekProject(os.path.normpath(sys.argv[1]))
if override_config_file:
project.configs = read_configs(override_config_file)
else:
project.configs = read_configs('kamek_configs.yaml')
project.build()
if __name__ == '__main__':
main()
|
<reponame>dlee960504/nn_schematics
import sys
sys.path.append('../')
from pycore.tikzeng import *
arch = [
to_head('..'),
to_cor(),
to_begin(),
# Detail branch
to_Conv_color('detail1_attn', s_filer="CBAM", n_filer='', height=32, depth=32, width=1, color=5),
to_Conv_color('detail1', s_filer="I/2", n_filer='', offset='(1,0,0)',to='(detail1_attn-east)', height=32, depth=32, width=2),
to_Conv_color('detail2', s_filer="I/4", n_filer='', offset='(2.5,0,0)' , to='(detail1-east)', height=20, depth=20, width=4),
to_connection('detail1', 'detail2'),
to_Conv_color('detail3', s_filer="I/8", n_filer='', offset='(2,0,0)', to='(detail2-east)', height=12, depth=12, width=6),
to_connection('detail2', 'detail3'),
# semantic branch
to_Conv_color('semantic1', s_filer='I/2', n_filer='', offset='(0,-8,0)', to='(detail1_attn-south)', height=32, depth=32, width=2, color=1),
to_Conv_color('semantic2', s_filer='I/4', n_filer='', offset='(2.5,0,0)', to='(semantic1-east)', height=20, depth=20, width=4, color=1),
to_connection('semantic1', 'semantic2'),
to_Conv_color('semantic3', s_filer='I/8', n_filer='', offset='(2,0,0)', to='(semantic2-east)', height=12, depth=12, width=6, color=1),
to_connection('semantic2', 'semantic3'),
to_Conv_color('semantic4', s_filer='I/16', n_filer='', offset='(2,0,0)', to='(semantic3-east)', height=8, depth=8, width=10, color=1),
to_connection('semantic3', 'semantic4'),
to_Conv_color('semantic5', s_filer='I/32', n_filer='', offset='(2,0,0)', to='(semantic4-east)', height=4, depth=4, width=16, color=1),
to_connection('semantic4', 'semantic5'),
# aggregation layer
to_Conv_color('aggregation', s_filer="Aggregation", n_filer='', offset='(4,6,0)', to='(semantic5-east)', height=12, depth=12, width=6, color=2),
to_connection2('detail3', 'aggregation', 2.75, 'up'),
to_connection2('semantic5', 'aggregation', 3, 'down'),
# input
to_Conv_color('input', s_filer='projected view', n_filer='', offset='(-30,0,0)', to='(aggregation-west)', height=32, depth=100, width=1, color=3),
# CAM
to_Conv_color('CAM', s_filer='CAM', n_filer='', offset='(2.5,0,0)', to='(input-east)', height=32, depth=100, width=4, color=4),
to_connection('input', 'CAM'),
to_connection3('CAM', 'detail1_attn', 6),
to_connection3('CAM', 'semantic1', 6),
# output
to_Conv_color('output', s_filer='result', n_filer='', offset='(5,0,0)', to='(aggregation-east)', height=32, depth=100, width=1, color=3),
to_connection('aggregation', 'output'),
to_end()
]
def main():
namefile = str(sys.argv[0]).split('.')[0]
to_generate(arch, namefile + '.tex')
if __name__=='__main__':
main()
|
<reponame>begeekmyfriend/cn-text-normalizer
# coding: utf-8
# The MIT License (MIT)
# Copyright (c) 2015 by <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
''' Chinese number <=> int/float conversion methods '''
__author__ = '<NAME> <<EMAIL>>; <NAME> <<EMAIL>>'
__version__ = '2018-06-01'
if 'constants': # for code folding
CHINESE_DIGIS = u'零一二三四五六七八九'
BIG_CHINESE_DIGIS_SIMPLIFIED = u'零壹贰叁肆伍陆柒捌玖'
BIG_CHINESE_DIGIS_TRADITIONAL = u'零壹貳參肆伍陸柒捌玖'
SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'十百千万'
SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'拾佰仟萬'
LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'亿兆京垓秭穰沟涧正载'
LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'億兆京垓秭穰溝澗正載'
SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'十百千万'
SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'拾佰仟萬'
ZERO_ALT = u'〇'
TWO_ALTS = [u'两', u'兩']
POSITIVE = [u'正', u'正']
NEGATIVE = [u'负', u'負']
POINT = [u'点', u'點']
NUMBERING_TYPES = ['low', 'mid', 'high']
if 'class definitions': # for code folding
class ChineseChar(object):
"""
Chinese charactors.
Each has simplified and traditional strings,
e.g. simplified = '负', traditional = '負'
When converted to string, it will shows the simplified string or traditional string or None.
"""
def __init__(self, simplified, traditional):
self.simplified = simplified
self.traditional = traditional
self.__repr__ = self.__str__
def __str__(self):
return self.simplified or self.traditional or None
def __repr__(self):
return self.__str__()
class ChineseNumberUnit(ChineseChar):
"""
Chinese number unit.number
Each of it is an ChineseChar with additional big type strings.
e.g. '陆' and '陸'
"""
def __init__(self, power, simplified, traditional, big_s, big_t):
super(ChineseNumberUnit, self).__init__(simplified, traditional)
self.power = power
self.big_s = big_s
self.big_t = big_t
def __str__(self):
return '10^{}'.format(self.power)
@classmethod
def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False):
if small_unit:
return ChineseNumberUnit(power=index + 1,
simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[0]:
return ChineseNumberUnit(power=index + 8,
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[1]:
return ChineseNumberUnit(power=(index + 2) * 4,
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
elif numbering_type == NUMBERING_TYPES[2]:
return ChineseNumberUnit(power=pow(2, index + 3),
simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1])
else:
raise ValueError(
'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type))
class ChineseNumberDigi(ChineseChar):
def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None):
super(ChineseNumberDigi, self).__init__(simplified, traditional)
self.value = value
self.big_s = big_s
self.big_t = big_t
self.alt_s = alt_s
self.alt_t = alt_t
def __str__(self):
return str(self.value)
@classmethod
def create(cls, i, v):
return ChineseNumberDigi(i, v[0], v[1], v[2], v[3])
class ChineseMath(ChineseChar):
def __init__(self, simplified, traditional, symbol, expression=None):
super(ChineseMath, self).__init__(simplified, traditional)
self.symbol = symbol
self.expression = expression
self.big_s = simplified
self.big_t = traditional
CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigi, ChineseMath
class CountingSystem(object):
pass
class MathSymbols(object):
"""
Math symbols used in a Chinese number counting system (for both traditional and simplified Chinese), e.g.
positive = ['正', '正']
negative = ['负', '負']
point = ['点', '點']
"""
def __init__(self, positive, negative, point):
self.positive = positive
self.negative = negative
self.point = point
def __iter__(self):
for v in self.__dict__.values():
yield v
if 'create systems': # for code folding
def create_system(numbering_type=NUMBERING_TYPES[1]):
"""
Create a numbering system depends on the numbering system type.
NUMBERING_TYPES = ['low', 'mid', 'high']: Chinese numbering system type.
low: '兆' = '亿' * '十' = $10^{9}$, '京' = '兆' * '十', etc.
mid: '兆' = '亿' * '万' = $10^{12}$, '京' = '兆' * '万', etc.
high: '兆' = '亿' * '亿' = $10^{16}$, '京' = '兆' * '兆', etc.
Returns a number counting system.
"""
# chinese number units of '亿' and larger
all_larger_units = zip(
LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL)
larger_units = [CNU.create(i, v, numbering_type, False)
for i, v in enumerate(all_larger_units)]
# chinese number units of '十, 百, 千, 万'
all_smaller_units = zip(
SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL)
smaller_units = [CNU.create(i, v, small_unit=True)
for i, v in enumerate(all_smaller_units)]
# digis
chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS,
BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL)
digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)]
digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT
digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1]
positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x)
negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x)
point_cn = CM(POINT[0], POINT[1], '.', lambda x,
y: float(str(x) + '.' + str(y)))
system = CountingSystem()
system.units = smaller_units + larger_units
system.digits = digits
system.math = MathSymbols(positive_cn, negative_cn, point_cn)
return system
def cn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]):
def get_symbol(char, system):
for u in system.units:
if char in [u.traditional, u.simplified, u.big_s, u.big_t]:
return u
for d in system.digits:
if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]:
return d
for m in system.math:
if char in [m.traditional, m.simplified]:
return m
def string2symbols(chinese_string, system):
int_string, dec_string = chinese_string, ''
for p in [system.math.point.simplified, system.math.point.traditional]:
if p in chinese_string:
int_string, dec_string = chinese_string.split(p)
break
return [get_symbol(c, system) for c in int_string], \
[get_symbol(c, system) for c in dec_string]
def correct_symbols(integer_symbols, system):
"""
一百八 to 一百八十
一亿一千三百万 to 一亿 一千万 三百万
"""
if integer_symbols and isinstance(integer_symbols[0], CNU):
if integer_symbols[0].power == 1:
integer_symbols = [system.digits[1]] + integer_symbols
if len(integer_symbols) > 1:
if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU):
integer_symbols.append(
CNU(integer_symbols[-2].power - 1, None, None, None, None))
result = []
unit_count = 0
for s in integer_symbols:
if isinstance(s, CND):
result.append(s)
unit_count = 0
elif isinstance(s, CNU):
current_unit = CNU(s.power, None, None, None, None)
unit_count += 1
if unit_count == 1:
result.append(current_unit)
elif unit_count > 1:
for i in range(len(result)):
if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power:
result[-i - 1] = CNU(result[-i - 1].power +
current_unit.power, None, None, None, None)
return result
def compute_value(integer_symbols):
"""
Compute the value.
When current unit is larger than previous unit, current unit * all previous units will be used as all previous units.
e.g. '两千万' = 2000 * 10000 not 2000 + 10000
"""
value = [0]
last_power = 0
for s in integer_symbols:
if isinstance(s, CND):
value[-1] = s.value
elif isinstance(s, CNU):
value[-1] *= pow(10, s.power)
if s.power > last_power:
value[:-1] = list(map(lambda v: v *
pow(10, s.power), value[:-1]))
last_power = s.power
value.append(0)
return sum(value)
system = create_system(numbering_type)
int_part, dec_part = string2symbols(chinese_string, system)
int_part = correct_symbols(int_part, system)
int_value = compute_value(int_part)
dec_str = ''.join([str(d.value) for d in dec_part])
if dec_part:
return float('{0}.{1}'.format(str(int_value), dec_str))
else:
return int_value
def num2cn(num_str, numbering_type=NUMBERING_TYPES[0], big=False, traditional=False, alt_zero=False, alt_two=True, use_zeros=True, use_units=True):
def get_value(value_string, use_zeros=True):
striped_string = value_string.lstrip('0')
# record nothing if all zeros
if not striped_string:
return []
# record one digits
elif len(striped_string) == 1:
if use_zeros and len(value_string) != len(striped_string):
return [system.digits[0], system.digits[int(striped_string)]]
else:
return [system.digits[int(striped_string)]]
# recursively record multiple digits
else:
result_unit = next(u for u in reversed(
system.units) if u.power < len(striped_string))
result_string = value_string[:-result_unit.power]
return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:])
system = create_system(numbering_type)
int_dec = num_str.split('.')
if len(int_dec) == 1:
int_string = int_dec[0]
dec_string = ""
elif len(int_dec) == 2:
int_string = int_dec[0]
dec_string = int_dec[1]
else:
raise ValueError(
"invalid input num string with more than one dot: {}".format(num_str))
if use_units and len(int_string) > 1:
result_symbols = get_value(int_string)
else:
result_symbols = [system.digits[int(c)] for c in int_string]
dec_symbols = [system.digits[int(c)] for c in dec_string]
if dec_string:
result_symbols += [system.math.point] + dec_symbols
if alt_two:
liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t,
system.digits[2].big_s, system.digits[2].big_t)
for i, v in enumerate(result_symbols):
if isinstance(v, CND) and v.value == 2:
next_symbol = result_symbols[i +
1] if i < len(result_symbols) - 1 else None
previous_symbol = result_symbols[i - 1] if i > 0 else None
if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))):
if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)):
result_symbols[i] = liang
# if big is True, '两' will not be used and `alt_two` has no impact on output
if big:
attr_name = 'big_'
if traditional:
attr_name += 't'
else:
attr_name += 's'
else:
if traditional:
attr_name = 'traditional'
else:
attr_name = 'simplified'
result = ''.join([getattr(s, attr_name) for s in result_symbols])
if alt_zero:
result = result.replace(
getattr(system.digits[0], attr_name), system.digits[0].alt_s)
for i, p in enumerate(POINT):
if result.startswith(p):
return CHINESE_DIGIS[0] + result
# ^10, 11, .., 19
if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0], SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \
result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]:
result = result[1:]
return result
if __name__ == '__main__':
all_chinese_number_string = (
CHINESE_DIGIS + BIG_CHINESE_DIGIS_SIMPLIFIED + BIG_CHINESE_DIGIS_TRADITIONAL +
LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED + LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL +
SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED + SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL + ZERO_ALT +
''.join(TWO_ALTS + POSITIVE + NEGATIVE + POINT))
print('num:', cn2num('一万零四百零三点八零五'))
print('num:', cn2num('一亿六点三'))
print('num:', cn2num('一亿零六点三'))
print('num:', cn2num('两千零一亿六点三'))
c = num2cn('1161', numbering_type='low',
alt_two=True, big=False, traditional=False)
print(c)
print(all_chinese_number_string)
|
from __future__ import absolute_import
import datetime
import httpretty
import pygerduty
import pygerduty.v2
import pytest
import uuid
###################
# Version 1 Tests #
###################
@httpretty.activate
def test_loads_with_datetime_v1():
body = open('tests/fixtures/incident_resp_v1.json').read()
httpretty.register_uri(
httpretty.GET, "https://acme.pagerduty.com/api/v1/incidents/PIJ90N7",
body=body, status=200
)
pd = pygerduty.PagerDuty("acme", "password", parse_datetime=True)
incident = pd.incidents.show("PIJ90N7")
assert incident.last_status_change_on == datetime.datetime(2012, 12, 22, 0, 35, 22)
assert incident.created_on == datetime.datetime(2012, 12, 22, 0, 35, 21)
assert incident.assigned_to[0].at == datetime.datetime(2012, 12, 22, 0, 35, 21)
assert incident.pending_actions[0].at == datetime.datetime(2014, 1, 1, 8, 0)
assert incident.pending_actions[1].at == datetime.datetime(2014, 1, 1, 10, 0)
assert incident.pending_actions[2].at == datetime.datetime(2014, 1, 1, 11, 0)
@httpretty.activate
def test_loads_without_datetime_v1():
body = open('tests/fixtures/incident_resp_v1.json').read()
httpretty.register_uri(
httpretty.GET, "https://acme.pagerduty.com/api/v1/incidents/PIJ90N7",
body=body, status=200
)
pd = pygerduty.PagerDuty("acme", "password", parse_datetime=False)
incident = pd.incidents.show("PIJ90N7")
assert incident.last_status_change_on == "2012-12-22T00:35:22Z"
assert incident.created_on == "2012-12-22T00:35:21Z"
assert incident.assigned_to[0].at == "2012-12-22T00:35:21Z"
assert incident.pending_actions[0].at == "2014-01-01T08:00:00Z"
assert incident.pending_actions[1].at == "2014-01-01T10:00:00Z"
assert incident.pending_actions[2].at == "2014-01-01T11:00:00Z"
def test_datetime_encoder_decoder_v1():
obj = {
"d": datetime.datetime(2014, 1, 1, 8, 0),
"s": "string",
"i": 10,
}
# Make sure we can roundtrip
assert obj == pygerduty._json_loader(pygerduty._json_dumper(obj))
# Test our encoder uses default properly
with pytest.raises(TypeError) as excinfo:
pygerduty._json_dumper({"test": uuid.uuid4()})
excinfo.match(r"UUID\('.*'\) is not JSON serializable")
###################
# Version 2 Tests #
###################
@httpretty.activate
def test_loads_with_datetime_v2():
body = open('tests/fixtures/incident_resp_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/incidents/PT4KHLK",
body=body, status=200
)
pd = pygerduty.v2.PagerDuty("password", parse_datetime=True)
incident = pd.incidents.show("PT4KHLK")
assert incident.last_status_change_at == datetime.datetime(2015, 10, 6, 21, 38, 23)
assert incident.created_at == datetime.datetime(2015, 10, 6, 21, 30, 42)
assert incident.assignments[0].at == datetime.datetime(2015, 11, 10, 0, 31, 52)
assert incident.pending_actions[0].at == datetime.datetime(2015, 11, 10, 1, 2, 52)
assert incident.pending_actions[1].at == datetime.datetime(2015, 11, 10, 4, 31, 52)
@httpretty.activate
def test_loads_without_datetime_v2():
body = open('tests/fixtures/incident_resp_v2.json').read()
httpretty.register_uri(
httpretty.GET, "https://api.pagerduty.com/incidents/PT4KHLK",
body=body, status=200
)
pd = pygerduty.v2.PagerDuty("password", parse_datetime=False)
incident = pd.incidents.show("PT4KHLK")
assert incident.last_status_change_at == "2015-10-06T21:38:23Z"
assert incident.created_at == "2015-10-06T21:30:42Z"
assert incident.assignments[0].at == "2015-11-10T00:31:52Z"
assert incident.pending_actions[0].at == "2015-11-10T01:02:52Z"
assert incident.pending_actions[1].at == "2015-11-10T04:31:52Z"
def test_datetime_encoder_decoder_v2():
obj = {
"d": datetime.datetime(2014, 1, 1, 8, 0),
"s": "string",
"i": 10,
}
# Make sure we can roundtrip
assert obj == pygerduty.common._json_loader(pygerduty.common._json_dumper(obj))
# Test our encoder uses default properly
with pytest.raises(TypeError) as excinfo:
pygerduty.common._json_dumper({"test": uuid.uuid4()})
excinfo.match(r"UUID\('.*'\) is not JSON serializable")
|
<reponame>zachjweiner/pystella<gh_stars>10-100
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pyopencl as cl
import pyopencl.clrandom as clr
import pystella as ps
import pytest
from common import get_errs
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
@pytest.mark.filterwarnings(
"ignore::pyopencl.characterize.CLCharacterizationWarning")
@pytest.mark.filterwarnings("ignore::loopy.diagnostic.LoopyAdvisory")
@pytest.mark.parametrize("dtype", [np.float64, np.float32])
@pytest.mark.parametrize("stream", [True, False])
def test_stencil(ctx_factory, grid_shape, proc_shape, dtype, stream, h=1,
timing=False):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
rank_shape = tuple(Ni // pi for Ni, pi in zip(grid_shape, proc_shape))
from pymbolic import var
x = var("x")
y = var("y")
i, j, k = var("i"), var("j"), var("k")
map_dict = {}
map_dict[y[i, j, k]] = (
x[i + h + h, j + h, k + h]
+ x[i + h, j + h + h, k + h]
+ x[i + h, j + h, k + h + h]
+ x[i - h + h, j + h, k + h]
+ x[i + h, j - h + h, k + h]
+ x[i + h, j + h, k - h + h]
)
if stream:
try:
stencil_map = ps.StreamingStencil(
map_dict, prefetch_args=["x"], halo_shape=h
)
except: # noqa
pytest.skip("StreamingStencil unavailable")
else:
stencil_map = ps.Stencil(map_dict, h, prefetch_args=["x"])
x = clr.rand(queue, tuple(ni + 2*h for ni in rank_shape), dtype)
y = clr.rand(queue, rank_shape, dtype)
x_h = x.get()
y_true = (
x_h[2*h:, h:-h, h:-h]
+ x_h[h:-h, 2*h:, h:-h]
+ x_h[h:-h, h:-h, 2*h:]
+ x_h[:-2*h, h:-h, h:-h]
+ x_h[h:-h, :-2*h, h:-h]
+ x_h[h:-h, h:-h, :-2*h]
)
stencil_map(queue, x=x, y=y)
max_rtol = 5e-14 if dtype == np.float64 else 1e-5
avg_rtol = 5e-14 if dtype == np.float64 else 1e-5
max_err, avg_err = get_errs(y_true, y.get())
assert max_err < max_rtol and avg_err < avg_rtol, \
f"y innaccurate for {grid_shape=}, {h=}, {proc_shape=}" \
f": {max_err=}, {avg_err=}"
if timing:
from common import timer
t = timer(lambda: stencil_map(queue, x=x, y=y)[0])
print(f"stencil took {t:.3f} ms for {grid_shape=}, {h=}, {proc_shape=}")
bandwidth = (x.nbytes + y.nbytes) / 1024**3 / t * 1000
print(f"Bandwidth = {bandwidth} GB/s")
if __name__ == "__main__":
from common import parser
args = parser.parse_args()
for h in range(1, 4):
for stream in [True, False]:
test_stencil(
ps.choose_device_and_make_context,
grid_shape=args.grid_shape, proc_shape=args.proc_shape,
dtype=args.dtype, timing=args.timing,
stream=stream, h=h
)
|
<gh_stars>0
"""
Collection of Two-View-Geometry Functions
"""
# global
import ivy_mech as _ivy_mech
from ivy.framework_handler import get_framework as _get_framework
# local
from ivy_vision import projective_geometry as _ivy_pg
from ivy_vision import single_view_geometry as _ivy_svg
MIN_DENOMINATOR = 1e-12
def pixel_to_pixel_coords(pixel_coords1, cam1to2_full_mat, batch_shape=None, image_dims=None, dev=None, f=None):
"""
Transform depth scaled homogeneous pixel co-ordinates image in first camera frame
:math:`\mathbf{X}_{p1}\in\mathbb{R}^{h×w×3}` to depth scaled homogeneous pixel co-ordinates image in second camera
frame :math:`\mathbf{X}_{p2}\in\mathbb{R}^{h×w×3}`, given camera to camera projection matrix
:math:`\mathbf{P}_{1→2}\in\mathbb{R}^{3×4}`.\n
`[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=174>`_
:param pixel_coords1: Depth scaled homogeneous pixel co-ordinates image in frame 1 *[batch_shape,h,w,3]*
:type pixel_coords1: array
:param cam1to2_full_mat: Camera1-to-camera2 full projection matrix *[batch_shape,3,4]*
:type cam1to2_full_mat: array
:param batch_shape: Shape of batch. Inferred from inputs if None.
:type batch_shape: sequence of ints, optional
:param image_dims: Image dimensions. Inferred from inputs in None.
:type image_dims: sequence of ints, optional
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: str, optional
:param f: Machine learning library. Inferred from inputs if None.
:type f: ml_framework, optional
:return: Depth scaled homogeneous pixel co-ordinates image in frame 2 *[batch_shape,h,w,3]*
"""
f = _get_framework(pixel_coords1, f=f)
if batch_shape is None:
batch_shape = pixel_coords1.shape[:-3]
if image_dims is None:
image_dims = pixel_coords1.shape[-3:-1]
if dev is None:
dev = f.get_device(pixel_coords1)
# shapes as list
batch_shape = list(batch_shape)
image_dims = list(image_dims)
# BS x H x W x 4
pixel_coords_homo = f.concatenate((pixel_coords1,
f.ones(batch_shape + image_dims + [1], dev=dev)), -1)
# BS x H x W x 3
return _ivy_pg.transform(pixel_coords_homo, cam1to2_full_mat, batch_shape, image_dims, f=f)
def cam_to_cam_coords(cam_coords1, cam1to2_ext_mat, batch_shape=None, image_dims=None, dev=None, f=None):
"""
Transform camera-centric homogeneous co-ordinates image for camera 1 :math:`\mathbf{X}_{c1}\in\mathbb{R}^{h×w×4}` to
camera-centric homogeneous co-ordinates image for camera 2 :math:`\mathbf{X}_{c2}\in\mathbb{R}^{h×w×4}`.\n
`[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=174>`_
:param cam_coords1: Camera-centric homogeneous co-ordinates image in frame 1 *[batch_shape,h,w,4]*
:type cam_coords1: array
:param cam1to2_ext_mat: Camera1-to-camera2 extrinsic projection matrix *[batch_shape,3,4]*
:type cam1to2_ext_mat: array
:param batch_shape: Shape of batch. Inferred from inputs if None.
:type batch_shape: sequence of ints, optional
:param image_dims: Image dimensions. Inferred from inputs in None.
:type image_dims: sequence of ints, optional
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: str, optional
:param f: Machine learning library. Inferred from inputs if None.
:type f: ml_framework, optional
:return: Depth scaled homogeneous pixel co-ordinates image in frame 2 *[batch_shape,h,w,3]*
"""
f = _get_framework(cam_coords1, f=f)
if batch_shape is None:
batch_shape = cam_coords1.shape[:-3]
if image_dims is None:
image_dims = cam_coords1.shape[-3:-1]
if dev is None:
dev = f.get_device(cam_coords1)
# shapes as list
batch_shape = list(batch_shape)
image_dims = list(image_dims)
# BS x H x W x 3
cam_coords2 = _ivy_pg.transform(cam_coords1, cam1to2_ext_mat, batch_shape, image_dims, f=f)
# BS x H x W x 4
return f.concatenate((cam_coords2, f.ones(batch_shape + image_dims + [1], dev=dev)), -1)
def sphere_to_sphere_coords(sphere_coords1, cam1to2_ext_mat, batch_shape=None, image_dims=None, f=None):
"""
Convert camera-centric ego-sphere polar co-ordinates image in frame 1 :math:`\mathbf{S}_{c1}\in\mathbb{R}^{h×w×3}`
to camera-centric ego-sphere polar co-ordinates image in frame 2 :math:`\mathbf{S}_{c2}\in\mathbb{R}^{h×w×3}`.\n
`[reference] <https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates>`_
:param sphere_coords1: Camera-centric ego-sphere polar co-ordinates image in frame 1 *[batch_shape,h,w,3]*
:type sphere_coords1: array
:param cam1to2_ext_mat: Camera1-to-camera2 extrinsic projection matrix *[batch_shape,3,4]*
:type cam1to2_ext_mat: array
:param batch_shape: Shape of batch. Inferred from inputs if None.
:type batch_shape: sequence of ints, optional
:param image_dims: Image dimensions. Inferred from inputs in None.
:type image_dims: sequence of ints, optional
:param f: Machine learning library. Inferred from inputs if None.
:type f: ml_framework, optional
:return: Camera-centric ego-sphere polar co-ordinates image in frame 2 *[batch_shape,h,w,3]*
"""
f = _get_framework(sphere_coords1, f=f)
if batch_shape is None:
batch_shape = sphere_coords1.shape[:-3]
if image_dims is None:
image_dims = sphere_coords1.shape[-3:-1]
# shapes as list
batch_shape = list(batch_shape)
image_dims = list(image_dims)
# BS x H x W x 4
cam_coords1 = _ivy_svg.sphere_to_cam_coords(sphere_coords1, batch_shape, image_dims, f=f)
cam_coords2 = cam_to_cam_coords(cam_coords1, cam1to2_ext_mat, batch_shape, image_dims)
# BS x H x W x 3
return _ivy_svg.cam_to_sphere_coords(cam_coords2, batch_shape, image_dims, f=f)
def angular_pixel_to_angular_pixel_coords(angular_pixel_coords1, cam1to2_ext_mat, pixels_per_degree, batch_shape=None,
image_dims=None, f=None):
"""
Convert angular pixel co-ordinates image in frame 1 :math:`\mathbf{A}_{p1}\in\mathbb{R}^{h×w×3}` to angular pixel
co-ordinates image in frame 2 :math:`\mathbf{A}_{p2}\in\mathbb{R}^{h×w×3}`.\n
`[reference] <https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates>`_
:param angular_pixel_coords1: Angular pixel co-ordinates image in frame 1 *[batch_shape,h,w,3]*
:type angular_pixel_coords1: array
:param cam1to2_ext_mat: Camera1-to-camera2 extrinsic projection matrix *[batch_shape,3,4]*
:type cam1to2_ext_mat: array
:param pixels_per_degree: Number of pixels per angular degree
:type pixels_per_degree: float
:param batch_shape: Shape of batch. Inferred from inputs if None.
:type batch_shape: sequence of ints, optional
:param image_dims: Image dimensions. Inferred from inputs in None.
:type image_dims: sequence of ints, optional
:param f: Machine learning library. Inferred from inputs if None.
:type f: ml_framework, optional
:return: Camera-centric ego-sphere polar co-ordinates image in frame 2 *[batch_shape,h,w,3]*
"""
f = _get_framework(angular_pixel_coords1, f=f)
if batch_shape is None:
batch_shape = angular_pixel_coords1.shape[:-3]
if image_dims is None:
image_dims = angular_pixel_coords1.shape[-3:-1]
# shapes as list
batch_shape = list(batch_shape)
image_dims = list(image_dims)
# BS x H x W x 3
sphere_coords1 = _ivy_svg.angular_pixel_to_sphere_coords(angular_pixel_coords1, pixels_per_degree, f=f)
# BS x H x W x 3
sphere_coords2 = sphere_to_sphere_coords(sphere_coords1, cam1to2_ext_mat, batch_shape, image_dims)
# BS x H x W x 3
return _ivy_svg.sphere_to_angular_pixel_coords(sphere_coords2, pixels_per_degree, f=f)
def get_fundamental_matrix(full_mat1, full_mat2, camera_center1=None, pinv_full_mat1=None, batch_shape=None, dev=None,
f=None):
"""
Compute fundamental matrix :math:`\mathbf{F}\in\mathbb{R}^{3×3}` between two cameras, given their extrinsic
matrices :math:`\mathbf{E}_1\in\mathbb{R}^{3×4}` and :math:`\mathbf{E}_2\in\mathbb{R}^{3×4}`.\n
`[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=262>`_
bottom of page 244, section 9.2.2, equation 9.1
:param full_mat1: Frame 1 full projection matrix *[batch_shape,3,4]*
:type full_mat1: array
:param full_mat2: Frame 2 full projection matrix *[batch_shape,3,4]*
:type full_mat2: array
:param camera_center1: Frame 1 camera center, inferred from full_mat1 if None *[batch_shape,3,1]*
:type camera_center1: array, optional
:param pinv_full_mat1: Frame 1 full projection matrix pseudo-inverse, inferred from full_mat1 if None *[batch_shape,4,3]*
:type pinv_full_mat1: array, optional
:param batch_shape: Shape of batch. Inferred from inputs if None.
:type batch_shape: sequence of ints, optional
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: str, optional
:param f: Machine learning library. Inferred from inputs if None.
:type f: ml_framework, optional
:return: Fundamental matrix connecting frames 1 and 2 *[batch_shape,3,3]*
"""
f = _get_framework(full_mat1, f=f)
if batch_shape is None:
batch_shape = full_mat1.shape[:-2]
if dev is None:
dev = f.get_device(full_mat1)
# shapes as list
batch_shape = list(batch_shape)
if camera_center1 is None:
inv_full_mat1 = f.inv(_ivy_mech.make_transformation_homogeneous(full_mat1, batch_shape, dev, f=f))[..., 0:3, :]
camera_center1 = _ivy_svg.inv_ext_mat_to_camera_center(inv_full_mat1, f=f)
if pinv_full_mat1 is None:
pinv_full_mat1 = f.pinv(full_mat1)
# BS x 4 x 1
camera_center1_homo = f.concatenate((camera_center1, f.ones(batch_shape + [1, 1], dev=dev)), -2)
# BS x 3
e2 = f.matmul(full_mat2, camera_center1_homo)[..., -1]
# BS x 3 x 3
e2_skew_symmetric = f.linalg.vector_to_skew_symmetric_matrix(e2, batch_shape)
# BS x 3 x 3
return f.matmul(e2_skew_symmetric, f.matmul(full_mat2, pinv_full_mat1))
def closest_mutual_points_along_two_skew_rays(camera_centers, world_ray_vectors, batch_shape=None, image_dims=None,
dev=None, f=None):
"""
Compute closest mutual homogeneous co-ordinates :math:`\mathbf{x}_{1,i,j}\in\mathbb{R}^{4}` and
:math:`\mathbf{x}_{2,i,j}\in\mathbb{R}^{4}` along two world-centric rays
:math:`\overset{\sim}{\mathbf{C}_1} + λ_1\mathbf{rv}_{1,i,j}` and
:math:`\overset{\sim}{\mathbf{C}_2} + λ_2\mathbf{rv}_{2,i,j}`, for each index aligned pixel between two
world-centric ray vector images :math:`\mathbf{RV}_1\in\mathbb{R}^{h×w×3}` and
:math:`\mathbf{RV}_2\in\mathbb{R}^{h×w×3}`. The function returns two images of closest mutual homogeneous
co-ordinates :math:`\mathbf{X}_1\in\mathbb{R}^{h×w×4}` and :math:`\mathbf{X}_2\in\mathbb{R}^{h×w×4}`,
concatenated together into a single array.\n
`[reference] <https://math.stackexchange.com/questions/1414285/location-of-shortest-distance-between-two-skew-lines-in-3d>`_
second answer in forum
:param camera_centers: Camera center *[batch_shape,2,3,1]*
:type camera_centers: array
:param world_ray_vectors: World ray vectors *[batch_shape,2,h,w,3]*
:type world_ray_vectors: array
:param batch_shape: Shape of batch. Inferred from inputs if None.
:type batch_shape: sequence of ints, optional
:param image_dims: Image dimensions. Inferred from inputs in None.
:type image_dims: sequence of ints, optional
:param dev: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
:type dev: str, optional
:param f: Machine learning library. Inferred from inputs if None.
:type f: ml_framework, optional
:return: Closest mutual points image *[batch_shape,2,h,w,4]*
"""
f = _get_framework(camera_centers, f=f)
if batch_shape is None:
batch_shape = world_ray_vectors.shape[:-4]
if image_dims is None:
image_dims = world_ray_vectors.shape[-3:-1]
if dev is None:
dev = f.get_device(camera_centers)
# shapes as list
batch_shape = list(batch_shape)
image_dims = list(image_dims)
# BS x 3 x 1
camera_center0 = camera_centers[..., 0, :, :]
camera_center1 = camera_centers[..., 1, :, :]
# BS x 1 x 1 x 3
cam1_to_cam2 = f.reshape(camera_center1 - camera_center0, batch_shape + [1, 1, 3])
cam2_to_cam1 = f.reshape(camera_center0 - camera_center1, batch_shape + [1, 1, 3])
# BS x 2 x H x W x 3
ds = world_ray_vectors
# BS x H x W x 3
ds0 = ds[..., 0, :, :, :]
ds1 = ds[..., 1, :, :, :]
n = f.cross(ds0, ds1)
n1 = f.cross(ds0, n)
n2 = f.cross(ds1, n)
# BS x 1 x H x W
t1 = f.expand_dims(f.reduce_sum(cam1_to_cam2 * n2, -1) / (
f.reduce_sum(ds0 * n2, -1) + MIN_DENOMINATOR), -3)
t2 = f.expand_dims(f.reduce_sum(cam2_to_cam1 * n1, -1) / (
f.reduce_sum(ds1 * n1, -1) + MIN_DENOMINATOR), -3)
# BS x 2 x H x W
ts = f.expand_dims(f.concatenate((t1, t2), -3), -1)
# BS x 2 x H x W x 3
world_coords = f.reshape(camera_centers[..., 0], batch_shape + [2, 1, 1, 3]) + ts * world_ray_vectors
# BS x 2 x H x W x 4
return f.concatenate((world_coords, f.ones(batch_shape + [2] + image_dims + [1], dev=dev)), -1)
def _triangulate_depth_by_closest_mutual_points(pixel_coords, full_mats, inv_full_mats, camera_centers, batch_shape,
image_dims, f):
# single view geom batch shape
svg_batch_shape = batch_shape + [2]
# BS x 2 x H x W x 3
world_rays_flat = _ivy_svg.pixel_coords_to_world_ray_vectors(pixel_coords, inv_full_mats, camera_centers,
svg_batch_shape, image_dims, f=f)
# BS x 2 x H x W x 3
world_rays = f.reshape(world_rays_flat, svg_batch_shape + image_dims + [3])
# BS x 2 x H x W x 4
world_points = closest_mutual_points_along_two_skew_rays(camera_centers, world_rays, batch_shape, image_dims, f=f)
# BS x H x W x 3
return _ivy_svg.world_to_pixel_coords(world_points[..., 0, :, :, :], full_mats[..., 0, :, :],
batch_shape, image_dims, f=f)
def _triangulate_depth_by_homogeneous_dlt(pixel_coords, full_mats, _, _1, batch_shape, image_dims, f):
# num batch dims
num_batch_dims = len(batch_shape)
# BS x 2 x H x W x 3
pixel_coords_normalized = pixel_coords / (pixel_coords[..., -1:] + MIN_DENOMINATOR)
# BS x 3 x 4
P = full_mats[..., 0, :, :]
P_dash = full_mats[..., 1, :, :]
# BS x (HxW) x 4
p1T = f.tile(P[..., 0:1, :], [1] * num_batch_dims + [image_dims[0] * image_dims[1], 1])
p2T = f.tile(P[..., 1:2, :], [1] * num_batch_dims + [image_dims[0] * image_dims[1], 1])
p3T = f.tile(P[..., 2:3, :], [1] * num_batch_dims + [image_dims[0] * image_dims[1], 1])
p_dash_1T = f.tile(P_dash[..., 0:1, :], [1] * num_batch_dims + [image_dims[0] * image_dims[1], 1])
p_dash_2T = f.tile(P_dash[..., 1:2, :], [1] * num_batch_dims + [image_dims[0] * image_dims[1], 1])
p_dash_3T = f.tile(P_dash[..., 2:3, :], [1] * num_batch_dims + [image_dims[0] * image_dims[1], 1])
# BS x (WxH) x 1
x = f.reshape(pixel_coords_normalized[..., 0, :, :, 0], batch_shape + [-1, 1])
y = f.reshape(pixel_coords_normalized[..., 0, :, :, 1], batch_shape + [-1, 1])
x_dash = f.reshape(pixel_coords_normalized[..., 1, :, :, 0], batch_shape + [-1, 1])
y_dash = f.reshape(pixel_coords_normalized[..., 1, :, :, 1], batch_shape + [-1, 1])
# BS x (HxW) x 1 x 4
A_row1 = f.expand_dims(x * p3T - p1T, -2)
A_row2 = f.expand_dims(y * p3T - p2T, -2)
A_row3 = f.expand_dims(x_dash * p_dash_3T - p_dash_1T, -2)
A_row4 = f.expand_dims(y_dash * p_dash_3T - p_dash_2T, -2)
# BS x (HxW) x 4 x 4
A = f.concatenate((A_row1, A_row2, A_row3, A_row4), -2)
# BS x (HxW) x 4
X = _ivy_pg.solve_homogeneous_dlt(A, f=f)
# BS x W x H x 4
coords_wrt_world_homo_unscaled = f.reshape(X, batch_shape + image_dims + [4])
coords_wrt_world = coords_wrt_world_homo_unscaled / (coords_wrt_world_homo_unscaled[..., -1:] + MIN_DENOMINATOR)
# BS x W x H x 3
return _ivy_svg.world_to_pixel_coords(coords_wrt_world, full_mats[..., 0, :, :], batch_shape, image_dims, f=f)
TRI_METHODS = {'cmp': _triangulate_depth_by_closest_mutual_points,
'dlt': _triangulate_depth_by_homogeneous_dlt}
def triangulate_depth(pixel_coords, full_mats, inv_full_mats=None, camera_centers=None, method='cmp', batch_shape=None,
image_dims=None, f=None):
"""
Triangulate depth in frame 1, returning depth scaled homogeneous pixel co-ordinate image
:math:`\mathbf{X}\in\mathbb{R}^{h×w×3}` in frame 1.\n
:param pixel_coords: Homogeneous pixel co-ordinate images: *[batch_shape,h,w,3]*
:type pixel_coords: array
:param full_mats: Full projection matrices *[batch_shape,2,3,4]*
:type full_mats: array
:param inv_full_mats: Inverse full projection matrices, required for closest_mutual_points method *[batch_shape,2,3,4]*
:type inv_full_mats: array, optional
:param camera_centers: Camera centers, required for closest_mutual_points method *[batch_shape,2,3,1]*
:type camera_centers: array, optional
:param method: Triangulation method, one of [cmp|dlt], for closest mutual points or homogeneous dlt approach, closest_mutual_points by default
:type method: str, optional
:param batch_shape: Shape of batch. Inferred from inputs if None.
:type batch_shape: sequence of ints, optional
:param image_dims: Image dimensions. Inferred from inputs in None.
:type image_dims: sequence of ints, optional
:param f: Machine learning library. Inferred from inputs if None.
:type f: ml_framework, optional
:return: Depth scaled homogeneous pixel co-ordinates image in frame 1 *[batch_shape,h,w,3]*
"""
f = _get_framework(pixel_coords, f=f)
if batch_shape is None:
batch_shape = pixel_coords.shape[:-4]
if image_dims is None:
image_dims = pixel_coords.shape[-3:-1]
# shapes as list
batch_shape = list(batch_shape)
image_dims = list(image_dims)
if method == 'cmt':
if inv_full_mats is None:
inv_full_mats = f.inv(_ivy_mech.make_transformation_homogeneous(
full_mats, batch_shape + [2], f=f))[..., 0:3, :]
if camera_centers is None:
camera_centers = _ivy_svg.inv_ext_mat_to_camera_center(inv_full_mats, f=f)
try:
return TRI_METHODS[method](pixel_coords, full_mats, inv_full_mats, camera_centers, batch_shape, image_dims, f)
except KeyError:
raise Exception('Triangulation method must be one of [cmp|dlt], but found {}'.format(method))
|
<gh_stars>1-10
import os
import logging
import json_config
import gi
gi.require_version('Gtk', '3.0') # nopep8
from pathlib import Path
from gi.repository import Gtk
from .login_window import LoginWindow
TOP_DIR = os.path.dirname(os.path.abspath(__file__))
config = json_config.connect('config.json')
class WatsonCredentialsDialog(Gtk.Dialog):
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Enter Credentials", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150, 100)
username_field = Gtk.Entry()
username_field.set_placeholder_text("Username")
password_field = Gtk.Entry()
password_field.set_placeholder_text("Password")
password_field.set_visibility(False)
password_field.set_invisible_char('*')
self.username_field = username_field
self.password_field = password_<PASSWORD>
box = self.get_content_area()
box.set_margin_top(10)
box.set_margin_bottom(10)
box.set_margin_left(10)
box.set_margin_right(10)
box.set_spacing(10)
box.add(username_field)
box.add(password_field)
self.show_all()
class BingCredentialDialog(Gtk.Dialog):
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Enter API Key", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_default_size(150, 100)
api_key_field = Gtk.Entry()
api_key_field.set_placeholder_text("API Key")
self.api_key_field = api_key_field
box = self.get_content_area()
box.set_margin_top(10)
box.set_margin_bottom(10)
box.set_margin_left(10)
box.set_margin_right(10)
box.set_spacing(10)
box.add(api_key_field)
self.show_all()
class ConfigurationWindow:
def __init__(self) -> None:
super().__init__()
builder = Gtk.Builder()
builder.add_from_file(os.path.join(
TOP_DIR, "glade_files/configure.glade"))
self.window = builder.get_object("configuration_window")
self.stt_combobox = builder.get_object("stt_combobox")
self.tts_combobox = builder.get_object("tts_combobox")
self.auth_switch = builder.get_object("auth_switch")
self.snowboy_switch = builder.get_object("snowboy_switch")
self.wake_button_switch = builder.get_object("wake_button_switch")
self.init_auth_switch()
self.init_tts_combobox()
self.init_stt_combobox()
self.init_hotword_switch()
self.init_wake_button_switch()
builder.connect_signals(ConfigurationWindow.Handler(self))
self.window.set_resizable(False)
def show_window(self):
self.window.show_all()
Gtk.main()
def exit_window(self):
self.window.destroy()
Gtk.main_quit()
def init_tts_combobox(self):
default_tts = config['default_tts']
if default_tts == 'google':
self.tts_combobox.set_active(0)
elif default_tts == 'flite':
self.tts_combobox.set_active(1)
elif default_tts == 'watson':
self.tts_combobox.set_active(2)
else:
self.tts_combobox.set_active(0)
config['default_tts'] = 'google'
def init_stt_combobox(self):
default_stt = config['default_stt']
if default_stt == 'google':
self.stt_combobox.set_active(0)
elif default_stt == 'watson':
self.stt_combobox.set_active(1)
elif default_stt == 'bing':
self.stt_combobox.set_active(2)
else:
self.tts_combobox.set_active(0)
config['default_tts'] = 'google'
def init_auth_switch(self):
usage_mode = config['usage_mode']
if usage_mode == 'authenticated':
self.auth_switch.set_active(True)
else:
self.auth_switch.set_active(False)
def init_hotword_switch(self):
try:
import snowboy
except ImportError:
self.snowboy_switch.set_sensitive(False)
config['hotword_engine'] = 'PocketSphinx'
if config['hotword_engine'] == 'Snowboy':
self.snowboy_switch.set_active(True)
else:
self.snowboy_switch.set_active(False)
def init_wake_button_switch(self):
try:
import RPi.GPIO
if config['WakeButton'] == 'enabled':
self.wake_button_switch.set_active(True)
else:
self.wake_button_switch.set_active(False)
except ImportError:
self.wake_button_switch.set_sensitive(False)
except RuntimeError:
self.wake_button_switch.set_sensitive(False)
class Handler:
def __init__(self, config_window):
self.config_window = config_window
def on_delete_window(self, *args):
self.config_window.exit_window()
def on_stt_combobox_changed(self, combo: Gtk.ComboBox):
selection = combo.get_active()
if selection == 0:
config['default_stt'] = 'google'
elif selection == 1:
credential_dialog = WatsonCredentialsDialog(
self.config_window.window)
response = credential_dialog.run()
if response == Gtk.ResponseType.OK:
username = credential_dialog.username_field.get_text()
password = credential_dialog.password_field.get_text()
config['default_stt'] = 'watson'
config['watson_stt_config']['username'] = username
config['watson_stt_config']['password'] = password
else:
self.config_window.init_stt_combobox()
credential_dialog.destroy()
elif selection == 2:
credential_dialog = BingCredentialDialog(
self.config_window.window)
response = credential_dialog.run()
if response == Gtk.ResponseType.OK:
api_key = credential_dialog.api_key_field.get_text()
config['default_stt'] = 'bing'
config['bing_speech_api_key']['username'] = api_key
else:
self.config_window.init_stt_combobox()
credential_dialog.destroy()
def on_tts_combobox_changed(self, combo):
selection = combo.get_active()
if selection == 0:
config['default_tts'] = 'google'
elif selection == 1:
config['default_tts'] = 'flite'
elif selection == 2:
credential_dialog = WatsonCredentialsDialog(
self.config_window.window)
response = credential_dialog.run()
if response == Gtk.ResponseType.OK:
username = credential_dialog.username_field.get_text()
password = credential_dialog.password_field.get_text()
config['default_tts'] = 'watson'
config['watson_tts_config']['username'] = username
config['watson_tts_config']['password'] = password
config['watson_tts_config']['voice'] = 'en-US_AllisonVoice'
else:
self.config_window.init_tts_combobox()
credential_dialog.destroy()
def on_auth_switch_active_notify(self, switch, gparam):
if switch.get_active():
login_window = LoginWindow()
login_window.show_window()
if config['usage_mode'] == 'authenticated':
switch.set_active(True)
else:
switch.set_active(False)
def on_snowboy_switch_active_notify(self, switch, gparam):
if switch.get_active():
config['hotword_engine'] = 'Snowboy'
else:
config['hotword_engine'] = 'PocketSphinx'
def on_wake_button_switch_active_notify(self, switch, gparam):
if switch.get_active():
config['wake_button'] = 'enabled'
else:
config['wake_button'] = 'disabled'
|
<reponame>mutazag/ilab1<filename>utils/config.py
# %%
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
class Config:
"""Config class encapsulate operations to manage folder paths for data sets
"""
__data_dir = ""
__predicted_300K = "300k_PREDICTED.csv"
__predicted_18M = "18M_PREDICTED.csv"
__small_300K = "300K_small.csv"
__small_18M = "18M_small.csv"
__uid_300K = "300K_uid_2.csv"
__uid_18M = "18M_uid_2.csv" # update name
__features_300K = "300K_full_features.csv"
__features_300K_v2 = "300K_full_features_conflag.csv"
__features_300K_v3 = "300K_full_features_flags.csv"
__features_18M = "18M_full_features.csv"
__private = "private"
__layercategories = "layercategories.csv"
__descriptors_dir = ""
__descriptors_IE = "lasso_monolayer_data_IE.csv"
__descriptors_C33 = "lasso_monolayer_data_C33.csv"
__descriptors_column_names = 'all_descriptors300K_columns.csv'
__descriptors_column_names_C33 = 'all_descriptors300K_columns_C33.csv'
__descriptors_column_names_IE = 'all_descriptors300K_columns_IE.csv'
__descriptors_master = 'descriptors_master.csv'
__descriptors_master_6k = 'descriptors_master_6k.csv'
__con_bilayers = 'con_bilayers.csv'
__dft_C33_uid = 'C33_DFT_uid.csv'
__dft_IE_uid = 'IE_DFT_uid.csv'
def __init__(self, datafolder="data/ML_IE_C33", descriptorsfolder="descriptors"):
self.__data_dir = Path(datafolder)
self.__descriptors_dir = Path(descriptorsfolder)
def __get_file_18M(self, small=True):
if (small):
return self.__data_dir / self.__small_18M
else:
return self.__data_dir / self.__predicted_18M
def __set_file_18M(self, filename, small=True):
if (small):
self.__small_18M = filename
else:
self.__predicted_18M = filename
def __get_file_300K(self, small=True):
if (small):
return self.__data_dir / self.__small_300K
else:
return self.__data_dir / self.__predicted_300K
def __set_file_300K(self, filename, small=True):
if (small):
self.__small_300K = filename
else:
self.__predicted_300K = filename
@property
def predicted_18M(self):
return self.__get_file_18M(small=False)
@predicted_18M.setter
def predicted_18M(self, filename):
self.__set_file_18M(filename, small=False)
@property
def small_18M(self):
return self.__get_file_18M(small=True)
@small_18M.setter
def small_18M(self, filename):
self.__set_file_18M(filename, small=True)
@property
def uid_18M(self):
return self.__data_dir / self.__uid_18M
@uid_18M.setter
def uid_18M(self, filename):
self.__uid_18M = filename
@property
def predicted_300K(self):
return self.__get_file_300K(small=False)
@predicted_300K.setter
def predicted_300K(self, filename):
self.__set_file_300K(filename, small=False)
@property
def small_300K(self):
return self.__get_file_300K(small=True)
@small_300K.setter
def small_300K(self, filename):
self.__set_file_300K(filename, small=True)
@property
def uid_300K(self):
return self.__data_dir / self.__uid_300K
@uid_300K.setter
def uid_300K(self, filename):
self.__uid_300K = filename
@property
def features_300K(self):
return self.__data_dir / self.__features_300K
@property
def features_300K_v2(self):
return self.__data_dir / self.__features_300K_v2
@property
def features_300K_v3(self):
return self.__data_dir / self.__features_300K_v3
@property
def features_18M(self):
return self.__data_dir / self.__features_18M
@property
def layer_categories(self):
return self.__data_dir / self.__layercategories
@layer_categories.setter
def layer_categories(self, filename):
self.__layercategories = filename
@property
def descriptors_IE(self):
return self.get_descriptorspath(self.__descriptors_IE)
@property
def descriptors_C33(self):
return self.get_descriptorspath(self.__descriptors_C33)
@property
def descriptors_master(self):
return self.get_descriptorspath(self.__descriptors_master)
@property
def descriptors_master_6k(self):
return self.get_descriptorspath(self.__descriptors_master_6k)
@property
def descriptors_column_names(self):
return self.get_descriptorspath(self.__descriptors_column_names)
@property
def descriptors_column_names_C33(self):
return self.get_descriptorspath(self.__descriptors_column_names_C33)
@property
def descriptors_column_names_IE(self):
return self.get_descriptorspath(self.__descriptors_column_names_IE)
@property
def con_bilayers(self):
return self.get_descriptorspath(self.__con_bilayers)
@property
def dft_C33_uid(self):
return self.get_descriptorspath(self.__dft_C33_uid)
@property
def dft_IE_uid(self):
return self.get_descriptorspath(self.__dft_IE_uid)
def get_datapath(self, filename):
return self.__data_dir / filename
def get_descriptorspath(self, filename):
return self.__descriptors_dir / filename
def validate_files(self):
filetype = ["predicted", "small", "uid", "predicted",
"small", "uid", "reference", "descriptors", "descriptors"]
fileorigin = ["18M", "18M", "18M", "300K", "300K",
"300K", "reference", "descriptors", "descriptors"]
filenames = [
self.__predicted_18M,
self.__small_18M,
self.__uid_18M,
self.__predicted_300K,
self.__small_300K,
self.__uid_300K,
self.__layercategories,
self.__descriptors_IE,
self.__descriptors_C33]
filepaths = [
self.predicted_18M,
self.small_18M,
self.uid_18M,
self.predicted_300K,
self.small_300K,
self.__uid_300K,
self.layer_categories,
self.descriptors_IE,
self.descriptors_C33]
filevalidation = [
self.predicted_18M.exists(),
self.small_18M.exists(),
self.uid_18M.exists(),
self.predicted_300K.exists(),
self.small_300K.exists(),
self.uid_300K.exists(),
self.layer_categories.exists(),
self.descriptors_IE.exists(),
self.descriptors_C33.exists()]
validation = {
"filename": filenames,
"filepaths": filepaths,
"fileexists": filevalidation
}
index = pd.MultiIndex.from_arrays(
[filetype, fileorigin], names=["type", "origin"])
return pd.DataFrame(validation, index=index)
|
import sys, os
ROOT_PATH = os.path.abspath(".")
if ROOT_PATH not in sys.path:
sys.path.append(ROOT_PATH)
import pathlib
#print(pathlib.Path(__file__).parent.absolute())
#print(pathlib.Path().absolute())
import warnings
# this disables a warning in sklearn for linear models:
# FutureWarning: The default value of multioutput
# (not exposed in score method) will change from
# 'variance_weighted' to 'uniform_average' in 0.23
# to keep consistent with 'metrics.r2_score'.
# To specify the default value manually and avoid the warning,
# please either call 'metrics.r2_score' directly or make a
# custom scorer with 'metrics.make_scorer'
# (the built-in scorer 'r2' uses multioutput='uniform_average').
warnings.simplefilter(action='ignore', category=FutureWarning)
from keras.models import load_model
from keras.callbacks.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.utils import plot_model
from howiml.utils import plots
from howiml.utils import prints
import pickle
import numpy as np
import math
import random
def printModelSummary(model):
# Prints the model summary of a machine learning model
# The model summary is a list of structure and number of parameters
if hasattr(model, "summary"):
# Keras Model object
print(model.summary())
elif hasattr(model, "model"):
# MachineLearningModel object
if hasattr(model.model, "summary"):
# MachineLearningModel.model will be a Keras Model object
printModelSummary(model.model)
elif hasattr(model, "models"):
# EnsembleModel object
print("Model is of type Ensemble Model")
print("Sub model summaries will follow")
print("-------------------------------")
for mod in model.models:
# EnsembleModel.models will be a list of MachineLearningModels
printModelSummary(mod)
else:
print("Simple models have no summary")
def printModelWeights(model):
# Prints the model weights of a machine learning model
# The model weights combined with the model architecture is what calculates the output
if hasattr(model, "summary"):
# Keras Model object
for layer in model.layers: print(layer.get_config(), layer.get_weights())
elif hasattr(model, "model"):
# MachineLearningModel object
if hasattr(model.model, "summary"):
# MachineLearningModel.model will be a Keras Model object
printModelWeights(model.model)
elif hasattr(model, "models"):
# EnsembleModel object
print("Model is of type Ensemble Model")
print("Sub model summaries will follow")
print("-------------------------------")
for mod in model.models:
# EnsembleModel.models will be a list of MachineLearningModels
printModelWeights(mod)
else:
if hasattr(model, "get_params"):
print(model.get_params())
else:
print("No weights found")
def plotKerasModel(model):
# Plots models using the built-in Keras plotting function
plot_model(model.model)
def getBasicCallbacks(monitor="val_loss", patience_es=200, patience_rlr=80):
# Two callbacks are used by default for all models:
# - EarlyStopping (stop training when validation loss increases)
# - ReduceLROnPlateau (reduce learning rate to facilitate continued learning)
return [
EarlyStopping(
monitor = monitor, min_delta = 0.00001, patience = patience_es, mode = 'auto', restore_best_weights=True
),
ReduceLROnPlateau(
monitor = monitor, factor = 0.5, patience = patience_rlr, verbose = 1, min_lr=5e-4,
)
]
def getBasicHyperparams():
# Some default hyperparameters used
return {
'activation': 'relu',
'loss': 'mean_squared_error',
'optimizer': 'adam',
'metrics': ['mean_squared_error'],
}
def trainModels(modelList, filename, targetColumns, retrain=False, save=True):
# Trains or loads each model of a provided list of models
if retrain:
for mod in modelList:
print("Training model " + mod.name)
mod.train()
else:
for mod in modelList:
if mod.modelType != "Ensemble":
loadedModel, loadedHistory = loadModel(mod.name, filename, targetColumns)
if loadedModel is not None:
print("Model " + mod.name + " was loaded from file")
mod.model = loadedModel
mod.history = loadedHistory
else:
print("Training model " + mod.name)
mod.train()
else:
for model in mod.models:
loadedModel, loadedHistory = loadModel(model.name, filename, targetColumns, ensembleName=mod.name)
if loadedModel is not None:
print("Model " + mod.name + " was loaded from file")
model.model = loadedModel
model.history = loadedHistory
else:
print("Training submodel " + model.name + " of Ensemble " + mod.name)
model.train()
mod.trainEnsemble()
if save:
saveModels(modelList, filename, targetColumns)
trainingSummary = getTrainingSummary(modelList)
if trainingSummary:
prints.printTrainingSummary(trainingSummary)
plots.plotTrainingSummary(trainingSummary)
def loadModel(modelname, filename, targetColumns, ensembleName=None):
# Loads a single model based on a defined modelname-filename-targetColumns combination
subdir = filename.split('/')[-2]
datafile = filename.split('/')[-1].split('.')[0]
joinedColumns = "_".join(targetColumns)
modName = "_".join(modelname.split(' '))
if ensembleName is None:
directory = ROOT_PATH + '/howiml/ml/trained_models/' + subdir + '/' + datafile + '/' + modName + '_' + joinedColumns
else:
ensName = "_".join(ensembleName.split(' '))
directory = ROOT_PATH + '/howiml/ml/trained_models/' + subdir + '/' + datafile + '/' + ensName + '_' + joinedColumns + '/' + modName
if os.path.isfile((directory + ".h5")) and os.path.isfile((directory + ".h5")):
model = load_model(directory + ".h5")
history = pickle.load(open(directory + ".pickle", "rb"))
else:
model = None
history = None
return [model, history]
def saveModels(modelList, filename, targetColumns):
# Saves each model of a provided list of models
subdir = filename.split('/')[-2]
datafile = filename.split('/')[-1].split('.')[0]
joinedColumns = "_".join(targetColumns)
for model in modelList:
modName = "_".join(model.name.split(' '))
directory = ROOT_PATH + '/howiml/ml/trained_models/' + subdir + '/' + datafile + '/'
if not os.path.exists(directory):
os.makedirs(directory)
modelPath = directory
modelName = modName + '_' + joinedColumns
metricsPath = directory + modName + '_' + joinedColumns + ".txt"
model.save(modelPath, modelName)
def getTrainingSummary(modelList):
# Calculates relevant metrics such as validation loss, loss and length of model training
loss_dict = {}
modelNames = list(map(lambda mod: mod.name, modelList))
for model in modelList:
if model.modelType != "Ensemble":
if model.history is not None:
loss = model.history['loss']
val_loss = model.history['val_loss']
loss_best = np.amin(loss)
loss_loc = np.where(loss == loss_best)[0]
val_loss_best = np.amin(val_loss)
val_loc = np.where(val_loss == val_loss_best)[0]
loss_actual = loss[val_loc[0]]
loss_dict[model.name] = {
'loss': loss,
'val_loss': val_loss,
'loss_final': loss_best,
'loss_loc': loss_loc,
'loss_actual': loss_actual,
'val_loss_final': val_loss_best,
'val_loss_loc': val_loc,
'length': len(loss),
}
else:
for submodel in model.models:
if submodel.history is not None and submodel.name not in modelNames:
loss = submodel.history['loss']
val_loss = submodel.history['val_loss']
loss_best = np.amin(loss)
loss_loc = np.where(loss == loss_best)[0]
val_loss_best = np.amin(val_loss)
val_loc = np.where(val_loss == val_loss_best)[0]
loss_actual = loss[val_loc[0]]
loss_dict[model.name + ", " + submodel.name] = {
'loss': loss,
'val_loss': val_loss,
'loss_final': loss_best,
'loss_loc': loss_loc,
'loss_actual': loss_actual,
'val_loss_final': val_loss_best,
'val_loss_loc': val_loc,
'length': len(loss),
}
return loss_dict
def getRNNSplit(x_data, y_data, lookback, validation_split=0.2):
# Splits a dataset into training and validation data for use in RNN networks
# By default, 20% of data is used for validation
num_x_signals = x_data.shape[1]
num_y_signals = y_data.shape[1]
num_x_samples = x_data.shape[0]
num_y_samples = y_data.shape[0]
length_valid = math.ceil(num_x_samples * validation_split)
length_train = num_x_samples - length_valid
x_shape_train = (length_train, lookback, num_x_signals)
x_shape_val = (length_valid, lookback, num_x_signals)
X = np.zeros(shape=x_shape_train, dtype=np.float16)
X_val = np.zeros(shape=x_shape_val, dtype=np.float16)
y_shape_train = (length_train, num_y_signals)
y_shape_val = (length_valid, num_y_signals)
Y = np.zeros(shape=y_shape_train, dtype=np.float16)
Y_val = np.zeros(shape=y_shape_val, dtype=np.float16)
train_samples = random.sample(range(num_x_samples-lookback), length_train)
samples = list(range(num_x_samples-lookback))
valid_samples = list(set(samples)-set(train_samples))
for i, sample in enumerate(train_samples):
X[i] = x_data[sample:sample+lookback]
Y[i] = y_data[sample+lookback]
for i, sample in enumerate(valid_samples):
X_val[i] = x_data[sample:sample+lookback]
Y_val[i] = y_data[sample+lookback]
return [X, X_val, Y, Y_val] |
<filename>tests/test_dms.py
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import BaseTest
class ReplInstance(BaseTest):
def test_describe_augment_no_tags(self):
session_factory = self.replay_flight_data(
'test_dms_repl_instance_describe_sans_tags')
p = self.load_policy({
'name': 'dms-replinstance',
'resource': 'dms-instance'},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ReplicationInstanceIdentifier'],
'replication-instance-1')
def test_describe_get_resources(self):
session_factory = self.replay_flight_data(
'test_dms_repl_instance_delete')
p = self.load_policy({
'name': 'dms-replinstance',
'resource': 'dms-instance'},
session_factory=session_factory)
resources = p.resource_manager.get_resources(
['replication-instance-1'])
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ReplicationInstanceIdentifier'],
'replication-instance-1')
def test_delete(self):
session_factory = self.replay_flight_data(
'test_dms_repl_instance_delete')
client = session_factory().client('dms')
p = self.load_policy({
'name': 'dms-replinstance',
'resource': 'dms-instance',
'actions': ['delete']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['ReplicationInstanceIdentifier'],
'replication-instance-1')
instances = client.describe_replication_instances().get(
'ReplicationInstances')
self.assertEqual(instances[0]['ReplicationInstanceStatus'], 'deleting')
class ReplicationInstanceTagging(BaseTest):
def test_replication_instance_tag(self):
session_factory = self.replay_flight_data('test_dms_tag')
p = self.load_policy({
'name': 'tag-dms-instance',
'resource': 'dms-instance',
'filters': [{
'tag:RequiredTag': 'absent'}],
'actions': [{
'type': 'tag',
'key': 'RequiredTag',
'value': 'RequiredValue'
}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dms')
tag_list = client.list_tags_for_resource(
ResourceArn=resources[0]['ReplicationInstanceArn'])['TagList']
tag_value = [t['Value'] for t in tag_list if t['Key'] == 'RequiredTag']
self.assertEqual(tag_value[0], 'RequiredValue')
def test_remove_replication_instance_tag(self):
session_factory = self.replay_flight_data('test_dms_tag_remove')
p = self.load_policy({
'name': 'remove-dms-tag',
'resource': 'dms-instance',
'filters': [{
'tag:RequiredTag': 'RequiredValue'}],
'actions': [{
'type': 'remove-tag',
'tags': ["RequiredTag"]
}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dms')
tag_list = client.list_tags_for_resource(
ResourceArn=resources[0]['ReplicationInstanceArn'])['TagList']
self.assertFalse([t for t in tag_list if t['Key'] == 'RequiredTag'])
def test_replication_instance_markforop(self):
session_factory = self.replay_flight_data('test_dms_mark_for_op')
p = self.load_policy({
'name': 'dms-instance-markforop',
'resource': 'dms-instance',
'filters': [{
'tag:RequiredTag': 'absent'}],
'actions': [{
'type': 'mark-for-op',
'tag': 'custodian_cleanup',
'op': 'delete',
'days': 2}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dms')
tag_list = client.list_tags_for_resource(
ResourceArn=resources[0]['ReplicationInstanceArn'])['TagList']
self.assertTrue(
[t['Value'] for t in tag_list if t['Key'] == 'custodian_cleanup'])
def test_replication_instance_markedforop(self):
session_factory = self.replay_flight_data('test_dms_marked_for_op')
p = self.load_policy({
'name': 'dms-instance-markedforop',
'resource': 'dms-instance',
'filters': [{
'type': 'marked-for-op',
'tag': 'custodian_cleanup',
'op': 'delete',
'skew': 2}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(
resources[0]['ReplicationInstanceIdentifier'],
'replication-instance-1')
class DmsEndpointTests(BaseTest):
def test_resource_query(self):
session_factory = self.replay_flight_data('test_dms_resource_query')
p = self.load_policy({
'name': 'dms-endpoint-query',
'resource': 'dms-endpoint'}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_endpoint_modify_sql(self):
session_factory = self.replay_flight_data(
'test_dms_modify_endpoint_sql')
p = self.load_policy({
'name': 'dms-sql-ssl',
'resource': 'dms-endpoint',
'filters': [
{'EndpointIdentifier': 'c7n-dms-sql-ep'},
{'ServerName': 'c7n-sql-db'}
],
'actions': [{
'type': 'modify-endpoint',
'Port': 3305,
'SslMode': 'require',
'Username': 'admin',
'Password': '<PASSWORD>',
'ServerName': 'c7n-sql-db-02',
'DatabaseName': 'c7n-db-02',
}]}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dms')
ep = client.describe_endpoints()['Endpoints'][0]
self.assertEqual(
[ep['Port'], ep['SslMode'], ep['Username'],
ep['ServerName'], ep['DatabaseName']],
[3305, 'require', 'admin', 'c7n-sql-db-02', 'c7n-db-02'])
def test_endpoint_modify_s3(self):
session_factory = self.replay_flight_data(
'test_dms_modify_endpoint_s3')
p = self.load_policy({
'name': 'dms-s3-bucket',
'resource': 'dms-endpoint',
'filters': [
{'EndpointIdentifier': 'c7n-dms-s3-ep'},
{'S3Settings.BucketFolder': 'absent'}
],
'actions': [{
'type': 'modify-endpoint',
'S3Settings': {
'BucketFolder': 's3_dms',
'ServiceAccessRoleArn': 'arn:aws:iam::644160558196:role/DMS-Test-Role-02',
'CompressionType': 'gzip'}}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dms')
ep = client.describe_endpoints()['Endpoints'][0]
self.assertEqual([
ep['S3Settings']['ServiceAccessRoleArn'],
ep['S3Settings']['CompressionType'],
ep['S3Settings']['BucketFolder']],
['arn:aws:iam::644160558196:role/DMS-Test-Role-02', 'GZIP',
's3_dms'])
def test_endpoint_modify_mongodb(self):
session_factory = self.replay_flight_data(
'test_dms_modify_endpoint_mongodb')
p = self.load_policy({
'name': 'dms-mongo-db',
'resource': 'dms-endpoint',
'filters': [{'EndpointIdentifier': 'c7n-dms-mongo-ep'}],
'actions': [{
'type': 'modify-endpoint',
'MongoDbSettings': {
'Username': 'madmin',
'Password': '<PASSWORD>',
'ServerName': 'c7n-mongo-db-02',
'NestingLevel': 'one',
'AuthSource': 'c7n-users-02'}}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dms')
ep = client.describe_endpoints()['Endpoints'][0]['MongoDbSettings']
self.assertEqual([
ep['Username'], ep['ServerName'],
ep['NestingLevel'], ep['AuthSource']],
['madmin', 'c7n-mongo-db-02', 'one', 'c7n-users-02'])
def test_endpoint_modify_dynamo(self):
session_factory = self.replay_flight_data(
'test_dms_modify_endpoint_dynamo')
p = self.load_policy({
'name': 'dms-mongo-db',
'resource': 'dms-endpoint',
'filters': [
{'EndpointIdentifier': 'c7n-dms-dynamo-ep'},
{'DynamoDbSettings.ServiceAccessRoleArn': 'arn:aws:iam::644160558196:role/DMS-Test-Role-01'}
],
'actions': [{
'type': 'modify-endpoint',
'DynamoDbSettings': {
'ServiceAccessRoleArn': 'arn:aws:iam::644160558196:role/DMS-Test-Role-02'
}}]},session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = session_factory(region='us-east-1').client('dms')
ep = client.describe_endpoints()['Endpoints'][0]['DynamoDbSettings']
self.assertEqual(ep['ServiceAccessRoleArn'],
'arn:aws:iam::644160558196:role/DMS-Test-Role-02')
|
<gh_stars>1-10
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jordan-Wigner transform on fermionic operators."""
import itertools
import numpy
from openfermion.ops.operators import (FermionOperator, MajoranaOperator,
QubitOperator)
from openfermion.ops.representations import (DiagonalCoulombHamiltonian,
InteractionOperator)
import openfermion.utils.operator_utils as op_utils
def jordan_wigner(operator):
r""" Apply the Jordan-Wigner transform to a FermionOperator,
InteractionOperator, or DiagonalCoulombHamiltonian to convert
to a QubitOperator.
Operators are mapped as follows:
a_j^\dagger -> Z_0 .. Z_{j-1} (X_j - iY_j) / 2
a_j -> Z_0 .. Z_{j-1} (X_j + iY_j) / 2
Returns:
transformed_operator: An instance of the QubitOperator class.
Warning:
The runtime of this method is exponential in the maximum locality
of the original FermionOperator.
Raises:
TypeError: Operator must be a FermionOperator,
DiagonalCoulombHamiltonian, or InteractionOperator.
"""
if isinstance(operator, FermionOperator):
return jordan_wigner_fermion_operator(operator)
if isinstance(operator, MajoranaOperator):
return jordan_wigner_majorana_operator(operator)
if isinstance(operator, DiagonalCoulombHamiltonian):
return jordan_wigner_diagonal_coulomb_hamiltonian(operator)
if isinstance(operator, InteractionOperator):
return jordan_wigner_interaction_op(operator)
raise TypeError("Operator must be a FermionOperator, "
"MajoranaOperator, "
"DiagonalCoulombHamiltonian, or "
"InteractionOperator.")
def jordan_wigner_fermion_operator(operator):
transformed_operator = QubitOperator()
for term in operator.terms:
# Initialize identity matrix.
transformed_term = QubitOperator((), operator.terms[term])
# Loop through operators, transform and multiply.
for ladder_operator in term:
z_factors = tuple(
(index, 'Z') for index in range(ladder_operator[0]))
pauli_x_component = QubitOperator(
z_factors + ((ladder_operator[0], 'X'),), 0.5)
if ladder_operator[1]:
pauli_y_component = QubitOperator(
z_factors + ((ladder_operator[0], 'Y'),), -0.5j)
else:
pauli_y_component = QubitOperator(
z_factors + ((ladder_operator[0], 'Y'),), 0.5j)
transformed_term *= pauli_x_component + pauli_y_component
transformed_operator += transformed_term
return transformed_operator
def jordan_wigner_majorana_operator(operator):
transformed_operator = QubitOperator()
for term, coeff in operator.terms.items():
transformed_term = QubitOperator((), coeff)
for majorana_index in term:
q, b = divmod(majorana_index, 2)
z_string = tuple((i, 'Z') for i in range(q))
bit_flip_op = 'Y' if b else 'X'
transformed_term *= QubitOperator(z_string + ((q, bit_flip_op),))
transformed_operator += transformed_term
return transformed_operator
def jordan_wigner_diagonal_coulomb_hamiltonian(operator):
n_qubits = op_utils.count_qubits(operator)
qubit_operator = QubitOperator((), operator.constant)
# Transform diagonal one-body terms
for p in range(n_qubits):
coefficient = operator.one_body[p, p] + operator.two_body[p, p]
qubit_operator += QubitOperator(((p, 'Z'),), -.5 * coefficient)
qubit_operator += QubitOperator((), .5 * coefficient)
# Transform other one-body terms and two-body terms
for p, q in itertools.combinations(range(n_qubits), 2):
# One-body
real_part = numpy.real(operator.one_body[p, q])
imag_part = numpy.imag(operator.one_body[p, q])
parity_string = [(i, 'Z') for i in range(p + 1, q)]
qubit_operator += QubitOperator([(p, 'X')] + parity_string + [(q, 'X')],
.5 * real_part)
qubit_operator += QubitOperator([(p, 'Y')] + parity_string + [(q, 'Y')],
.5 * real_part)
qubit_operator += QubitOperator([(p, 'Y')] + parity_string + [(q, 'X')],
.5 * imag_part)
qubit_operator += QubitOperator([(p, 'X')] + parity_string + [(q, 'Y')],
-.5 * imag_part)
# Two-body
coefficient = operator.two_body[p, q]
qubit_operator += QubitOperator(((p, 'Z'), (q, 'Z')), .5 * coefficient)
qubit_operator += QubitOperator((p, 'Z'), -.5 * coefficient)
qubit_operator += QubitOperator((q, 'Z'), -.5 * coefficient)
qubit_operator += QubitOperator((), .5 * coefficient)
return qubit_operator
def jordan_wigner_interaction_op(iop, n_qubits=None):
"""Output InteractionOperator as QubitOperator class under JW transform.
One could accomplish this very easily by first mapping to fermions and
then mapping to qubits. We skip the middle step for the sake of speed.
This only works for real InteractionOperators (no complex numbers).
Returns:
qubit_operator: An instance of the QubitOperator class.
"""
if n_qubits is None:
n_qubits = op_utils.count_qubits(iop)
if n_qubits < op_utils.count_qubits(iop):
raise ValueError('Invalid number of qubits specified.')
# Initialize qubit operator as constant.
qubit_operator = QubitOperator((), iop.constant)
# Transform diagonal one-body terms
for p in range(n_qubits):
coefficient = iop[(p, 1), (p, 0)]
qubit_operator += jordan_wigner_one_body(p, p, coefficient)
# Transform other one-body terms and "diagonal" two-body terms
for p, q in itertools.combinations(range(n_qubits), 2):
# One-body
coefficient = .5 * (iop[(p, 1), (q, 0)] + iop[(q, 1),
(p, 0)].conjugate())
qubit_operator += jordan_wigner_one_body(p, q, coefficient)
# Two-body
coefficient = (iop[(p, 1), (q, 1), (p, 0),
(q, 0)] - iop[(p, 1), (q, 1), (q, 0),
(p, 0)] - iop[(q, 1), (p, 1), (p, 0),
(q, 0)] + iop[(q, 1),
(p, 1),
(q, 0),
(p, 0)])
qubit_operator += jordan_wigner_two_body(p, q, p, q, coefficient)
# Transform the rest of the two-body terms
for (p, q), (r, s) in itertools.combinations(
itertools.combinations(range(n_qubits), 2), 2):
coefficient = 0.5 * (iop[(p, 1), (q, 1), (r, 0),
(s, 0)] + iop[(s, 1), (r, 1), (q, 0),
(p, 0)].conjugate() -
iop[(p, 1), (q, 1), (s, 0),
(r, 0)] - iop[(r, 1), (s, 1), (q, 0),
(p, 0)].conjugate() -
iop[(q, 1), (p, 1), (r, 0),
(s, 0)] - iop[(s, 1), (r, 1), (p, 0),
(q, 0)].conjugate() +
iop[(q, 1), (p, 1), (s, 0),
(r, 0)] + iop[(r, 1), (s, 1), (p, 0),
(q, 0)].conjugate())
qubit_operator += jordan_wigner_two_body(p, q, r, s, coefficient)
return qubit_operator
def jordan_wigner_one_body(p, q, coefficient=1.):
r"""Map the term a^\dagger_p a_q + h.c. to QubitOperator.
Note that the diagonal terms are divided by a factor of 2
because they are equal to their own Hermitian conjugate.
"""
# Handle off-diagonal terms.
qubit_operator = QubitOperator()
if p != q:
if p > q:
p, q = q, p
coefficient = coefficient.conjugate()
parity_string = tuple((z, 'Z') for z in range(p + 1, q))
for c, (op_a, op_b) in [(coefficient.real, 'XX'),
(coefficient.real, 'YY'),
(coefficient.imag, 'YX'),
(-coefficient.imag, 'XY')]:
operators = ((p, op_a),) + parity_string + ((q, op_b),)
qubit_operator += QubitOperator(operators, .5 * c)
# Handle diagonal terms.
else:
qubit_operator += QubitOperator((), .5 * coefficient)
qubit_operator += QubitOperator(((p, 'Z'),), -.5 * coefficient)
return qubit_operator
def jordan_wigner_two_body(p, q, r, s, coefficient=1.):
r"""Map the term a^\dagger_p a^\dagger_q a_r a_s + h.c. to QubitOperator.
Note that the diagonal terms are divided by a factor of two
because they are equal to their own Hermitian conjugate.
"""
# Initialize qubit operator.
qubit_operator = QubitOperator()
# Return zero terms.
if (p == q) or (r == s):
return qubit_operator
# Handle case of four unique indices.
elif len(set([p, q, r, s])) == 4:
if (p > q) ^ (r > s):
coefficient *= -1
# Loop through different operators which act on each tensor factor.
for ops in itertools.product('XY', repeat=4):
# Get coefficients.
if ops.count('X') % 2:
coeff = .125 * coefficient.imag
if ''.join(ops) in ['XYXX', 'YXXX', 'YYXY', 'YYYX']:
coeff *= -1
else:
coeff = .125 * coefficient.real
if ''.join(ops) not in ['XXYY', 'YYXX']:
coeff *= -1
if not coeff:
continue
# Sort operators.
[(a, operator_a), (b, operator_b), (c, operator_c),
(d, operator_d)] = sorted(zip([p, q, r, s], ops))
# Compute operator strings.
operators = ((a, operator_a),)
operators += tuple((z, 'Z') for z in range(a + 1, b))
operators += ((b, operator_b),)
operators += ((c, operator_c),)
operators += tuple((z, 'Z') for z in range(c + 1, d))
operators += ((d, operator_d),)
# Add term.
qubit_operator += QubitOperator(operators, coeff)
# Handle case of three unique indices.
elif len(set([p, q, r, s])) == 3:
# Identify equal tensor factors.
if p == r:
if q > s:
a, b = s, q
coefficient = -coefficient.conjugate()
else:
a, b = q, s
coefficient = -coefficient
c = p
elif p == s:
if q > r:
a, b = r, q
coefficient = coefficient.conjugate()
else:
a, b = q, r
c = p
elif q == r:
if p > s:
a, b = s, p
coefficient = coefficient.conjugate()
else:
a, b = p, s
c = q
elif q == s:
if p > r:
a, b = r, p
coefficient = -coefficient.conjugate()
else:
a, b = p, r
coefficient = -coefficient
c = q
# Get operators.
parity_string = tuple((z, 'Z') for z in range(a + 1, b))
pauli_z = QubitOperator(((c, 'Z'),))
for c, (op_a, op_b) in [(coefficient.real, 'XX'),
(coefficient.real, 'YY'),
(coefficient.imag, 'YX'),
(-coefficient.imag, 'XY')]:
operators = ((a, op_a),) + parity_string + ((b, op_b),)
if not c:
continue
# Add term.
hopping_term = QubitOperator(operators, c / 4)
qubit_operator -= pauli_z * hopping_term
qubit_operator += hopping_term
# Handle case of two unique indices.
elif len(set([p, q, r, s])) == 2:
# Get coefficient.
if p == s:
coeff = -.25 * coefficient
else:
coeff = .25 * coefficient
# Add terms.
qubit_operator -= QubitOperator((), coeff)
qubit_operator += QubitOperator(((p, 'Z'),), coeff)
qubit_operator += QubitOperator(((q, 'Z'),), coeff)
qubit_operator -= QubitOperator(((min(q, p), 'Z'), (max(q, p), 'Z')),
coeff)
return qubit_operator
|
# -*- coding: utf-8 -*-
import xbmc
import xbmcgui
import xbmcplugin
import xbmcaddon
import json
import requests
import routing
import sys
from urllib.parse import quote, unquote
from .lib import helpers, lookups, auth
if sys.version_info.major < 3:
reload(sys)
sys.setdefaultencoding('utf8')
plugin = routing.Plugin()
addon = xbmcaddon.Addon()
def run():
lookups.shared['pagination'] = lookups.settings['pagination_options'][int(xbmcplugin.getSetting(plugin.handle, 'pagination'))]
credentialsAvailable = auth.performCredentialCheck()
if credentialsAvailable:
plugin.run()
else:
xbmc.executebuiltin("Action(Back,%s)" % xbmcgui.getCurrentWindowId())
sys.exit(1)
"""
ROOT MENU
"""
@plugin.route('/')
def root():
for item in lookups.menu_items:
li = xbmcgui.ListItem(item['title'])
li.setArt( {'icon': item['icon']} )
url = plugin.url_for_path( '/section/{0}/'.format(item['resource']) )
xbmcplugin.addDirectoryItem(plugin.handle, url, li, isFolder=True)
settingsItem = xbmcgui.ListItem('Nastavení')
settingsItem.setArt( {'icon': 'DefaultAddonService.png'} )
xbmcplugin.addDirectoryItem(plugin.handle,
plugin.url_for_path('/action/settings'),
settingsItem,
isFolder=True
)
xbmcplugin.endOfDirectory(plugin.handle)
"""
SECTION LISTING
"""
@plugin.route('/section/<resource>/')
def section(resource):
page = int(plugin.args['page'][0]) if 'page' in plugin.args else 0
search = plugin.args['search'][0] if 'search' in plugin.args else ''
items = helpers.requestResource(resource, page=page, postOptions={'search': search})
if page == 0 and not search:
if 'searchable' in lookups.resources[resource]:
url = plugin.url_for_path( '/action/search/?origin={0}'.format(resource) )
li = xbmcgui.ListItem('Hledat')
li.setArt( {'icon': 'DefaultAddonsSearch.png'} )
xbmcplugin.addDirectoryItem(plugin.handle, url, li, isFolder=True)
if 'subsections' in lookups.resources[resource]:
for item in lookups.resources[resource]['subsections']:
xbmcplugin.addDirectoryItem(plugin.handle,
plugin.url_for_path( '/section/{0}/'.format(item['resource']) ),
xbmcgui.ListItem(item['title']),
isFolder=True
)
renderItems(items)
if len(items) == lookups.shared['pagination']:
xbmcplugin.addDirectoryItem(plugin.handle,
plugin.url_for_path( '/section/{0}/?page={1}'.format(resource, page+1) ),
xbmcgui.ListItem('Další strana'),
isFolder=True
)
xbmcplugin.endOfDirectory(plugin.handle)
"""
PROGRAM LISTING
"""
@plugin.route('/program/<nid>/')
def program(nid):
page = int(plugin.args['page'][0]) if 'page' in plugin.args else 0
programDetail = helpers.requestResource( 'program_by_id', page=page, postOptions={'nid': nid} )
if page == 0:
for season in programDetail['seasons'] or []:
li = xbmcgui.ListItem(season)
url = lookups.shared['plugin_path'] + '/sublisting/{0}/{1}/'.format(nid, quote(season, safe=''))
xbmcplugin.addDirectoryItem(plugin.handle, url, li, isFolder=True)
bonuses = helpers.requestResource( 'bonus', postOptions={'programId': nid, 'count': 1} )
if len(bonuses) > 0:
li = xbmcgui.ListItem('Bonusy')
url = plugin.url_for_path( '/sublisting/{0}/bonus/'.format(nid) )
xbmcplugin.addDirectoryItem(plugin.handle, url, li, isFolder=True)
renderItems(programDetail['episodes'])
if len(programDetail['episodes']) == lookups.shared['pagination']:
xbmcplugin.addDirectoryItem(plugin.handle,
plugin.url_for_path( '/program/{0}/?page={1}'.format(nid, page+1) ),
xbmcgui.ListItem('Další strana'),
isFolder=True
)
xbmcplugin.endOfDirectory(plugin.handle)
"""
SUBPROGRAM LISTING
"""
@plugin.route('/sublisting/<programId>/<season>/')
def sublisting(programId, season):
page = int(plugin.args['page'][0]) if 'page' in plugin.args else 0
if season == 'bonus':
items = helpers.requestResource( 'bonus', page=page, postOptions={'programId': programId} )
else:
items = helpers.requestResource( 'season', page=page, postOptions={'programId': programId, 'season': unquote(season)} )
renderItems(items)
if len(items) == lookups.shared['pagination']:
xbmcplugin.addDirectoryItem(plugin.handle,
plugin.url_for_path( '/sublisting/{0}/{1}?page={2}'.format(programId, season, page+1) ),
xbmcgui.ListItem('Další strana'),
isFolder=True
)
xbmcplugin.endOfDirectory(plugin.handle)
"""
ITEMS RENDERING
"""
def renderItems(items):
for item in items:
if 'admittanceType' in item and item['admittanceType'] not in lookups.free_admittance_types:
continue
label = item['name'] if 'name' in item else item['title']
itemType = item['type'] if 'type' in item else 'video'
genres = item['genres'] if 'genres' in item else ''
teaser = item['teaser'] if 'teaser' in item else ''
isPlayable = helpers.isPlayable(itemType)
li = xbmcgui.ListItem(label)
if isPlayable:
url = plugin.url_for_path( '/action/play/?videoId={0}'.format(item['playId']) )
li.setProperty('IsPlayable', 'true')
else:
url = plugin.url_for_path( '/program/{0}/'.format(item['nid']) )
infoLabels = {
'genre': ', '.join( genres or '' ),
'plot': teaser or ''
}
if 'length' in item:
infoLabels['duration'] = item['length']
if 'premiereDate' in item:
infoLabels['premiered'] = item['premiereDate'].split('T')[0]
if 'thumbnailData' in item and item['thumbnailData']:
li.setArt({ 'thumb': item['thumbnailData']['url'] })
if 'logo' in item:
li.setArt({ 'thumb': item['logo'] })
li.setInfo( type='video', infoLabels=infoLabels )
xbmcplugin.addDirectoryItem(plugin.handle, url, li, isFolder=not isPlayable)
"""
ACTIONS
"""
@plugin.route('/action/<name>')
def action(name):
if name == 'search':
keyboard = xbmc.Keyboard('', 'Zadejte název pořadu nebo jeho část:')
keyboard.doModal()
if (keyboard.isConfirmed()):
txt = keyboard.getText()
origin = plugin.args['origin'][0]
plugin.args['search'] = [txt]
section(origin) # cannot use redirect here beacuse of bug in routing module
if name == 'settings':
addon.openSettings()
if name == 'play':
videoId = plugin.args['videoId'][0]
videoDetail = helpers.requestResource('play', replace={'id': videoId})
try:
url = videoDetail['streamInfos'][0]['url']
except:
helpers.displayMessage('Nenalezen žádný stream pro video', 'ERROR')
return
li = xbmcgui.ListItem(path=url)
xbmcplugin.setResolvedUrl(plugin.handle, True, li)
|
<reponame>eo1989/VectorBTanalysis<filename>.venv/lib/python3.8/site-packages/beakerx/plots/tests/test_heatmap.py
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from random import randint
import pandas as pd
from ..chart import HeatMap, XYChart
from ..legend import LegendPosition
class TestHeatMap(unittest.TestCase):
def test_empty_data(self):
# given
# when
widget = HeatMap(data=[])
# then
model = widget.model
self.assertFalse(model[XYChart.TOO_MANY_ROWS])
self.assertEqual(model[XYChart.TOTAL_NUMBER_OF_POINTS], 0)
self.assertEqual(model[XYChart.NUMBER_OF_POINTS_TO_DISPLAY], 0)
self.assertEqual(model[XYChart.ROWS_LIMIT_ITEMS], widget.rows_limit)
def test_xLowerMargin(self):
# given
# when
widget = HeatMap(data=[], xLowerMargin=1.0)
# then
model = widget.model
self.assertEqual(model["x_lower_margin"], 1.0)
def test_yLowerMargin(self):
# given
# when
widget = HeatMap(data=[], yLowerMargin=2.0)
# then
model = widget.model
self.assertEqual(model["y_lower_margin"], 2.0)
def test_yUpperMargin(self):
# given
# when
widget = HeatMap(data=[], yUpperMargin=3.0)
# then
model = widget.model
self.assertEqual(model["y_upper_margin"], 3.0)
def test_should_not_limit_data(self):
# given
maxdepth = 10
data = [[randint(1, 100) for x in range(maxdepth)] for y in range(maxdepth)]
# when
widget = HeatMap(data=data)
# then
model = widget.model
self.assertFalse(model[XYChart.TOO_MANY_ROWS])
self.assertEqual(model[XYChart.TOTAL_NUMBER_OF_POINTS], 100)
self.assertEqual(model[XYChart.NUMBER_OF_POINTS_TO_DISPLAY], 100)
self.assertEqual(model[XYChart.ROWS_LIMIT_ITEMS], widget.rows_limit)
def test_should_limit_data(self):
# given
maxdepth = 1001
data = [[randint(1, 100) for x in range(maxdepth)] for y in range(maxdepth)]
# when
widget = HeatMap(100, 10, data=data)
# then
model = widget.model
self.assertTrue(model[XYChart.TOO_MANY_ROWS])
self.assertEqual(model[XYChart.TOTAL_NUMBER_OF_POINTS], 1002001)
self.assertEqual(model[XYChart.NUMBER_OF_POINTS_TO_DISPLAY], 10201)
self.assertEqual(model[XYChart.ROWS_LIMIT_ITEMS], 100)
def test_support_data_frame_series(self):
# given
maxdepth = 1001
data = [[randint(1, 100) for x in range(maxdepth)] for y in range(maxdepth)]
heat_map_df = pd.DataFrame({'data': data})
# when
widget = HeatMap(100, 10, data=heat_map_df['data'])
# then
self.assertEqual(len(widget.model['graphics_list'][0]), 101)
model = widget.model
self.assertTrue(model[XYChart.TOO_MANY_ROWS])
self.assertEqual(model[XYChart.TOTAL_NUMBER_OF_POINTS], 1002001)
self.assertEqual(model[XYChart.NUMBER_OF_POINTS_TO_DISPLAY], 10201)
self.assertEqual(model[XYChart.ROWS_LIMIT_ITEMS], 100)
def test_support_data_frame(self):
# given
maxdepth = 1001
data = [[randint(1, 100) for x in range(maxdepth)] for y in range(maxdepth)]
heat_map_df = pd.DataFrame(data)
# when
widget = HeatMap(100, 10, data=heat_map_df)
# then
model = widget.model
self.assertTrue(model[XYChart.TOO_MANY_ROWS])
self.assertEqual(model[XYChart.TOTAL_NUMBER_OF_POINTS], 1002001)
self.assertEqual(model[XYChart.NUMBER_OF_POINTS_TO_DISPLAY], 10201)
self.assertEqual(model[XYChart.ROWS_LIMIT_ITEMS], 100)
def test_legend_default_position(self):
# given
# when
widget = HeatMap(data=[], legendPosition=LegendPosition.TOP)
# then
model = widget.model
self.assertEqual(model['legend_position']['position'], "TOP")
self.assertEqual(model['legend_position']['type'], "LegendPosition")
def test_legend_default_layout(self):
# given
# when
widget = HeatMap(data=[])
# then
model = widget.model
self.assertEqual(model['legend_layout'], "HORIZONTAL")
|
import re
import sys
import nameparser
import sqlalchemy as sa
import sqlalchemy.orm as saorm
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import (
Column, String, Text, Integer, Enum, Date,
CHAR, FLOAT,
ForeignKey, CheckConstraint, UniqueConstraint
)
from mixins import BasicMixin, UniqueMixin
engine = sa.create_engine('sqlite:///nsf-award-data.db', echo=True)
session_factory = saorm.sessionmaker(bind=engine)
Session = saorm.scoped_session(session_factory)
Base = declarative_base()
class Directorate(UniqueMixin, Base):
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
code = Column(CHAR(4), unique=True)
phone = Column(String(15), unique=True)
divisions = saorm.relationship(
'Division', backref='directorate',
cascade='all, delete-orphan', passive_deletes=True)
def __init__(self, name, code=None, phone=None):
self.name = name
self.code = code
self.phone = phone
@classmethod
def unique_hash(cls, name, *args, **kwargs):
return name
@classmethod
def unique_filter(cls, query, name, *args, **kwargs):
return query.filter(Directorate.name == name)
class Division(UniqueMixin, Base):
id = Column(Integer, primary_key=True)
name = Column(String(80), nullable=False)
code = Column(CHAR(4), unique=True)
phone = Column(String(15), unique=True)
dir_id = Column(
Integer, ForeignKey('directorate.id', ondelete='CASCADE'),
nullable=False)
programs = saorm.relationship(
'Program', backref='division',
cascade='all, delete-orphan', passive_deletes=True)
def __init__(self, name, code=None, phone=None, dir_id=None):
self.name = name
self.code = code
self.phone = phone
self.dir_id = dir_id
@classmethod
def unique_hash(cls, name, *args, **kwargs):
return name
@classmethod
def unique_filter(cls, query, name, *args, **kwargs):
return query.filter(Division.name == name)
class Program(UniqueMixin, Base):
id = Column(Integer, primary_key=True)
code = Column(CHAR(4), unique=True, nullable=False)
name = Column(String(80))
div_id = Column(CHAR(4), ForeignKey('division.id', ondelete='CASCADE'))
related_programs = association_proxy(
'_related_programs', 'secondary',
creator=lambda code, name: RelatedPrograms(
secondary=Program(code, name))
)
def __init__(self, code, name=None, div_id=None):
self.code = code
self.name = name
self.div_id = div_id
@classmethod
def unique_hash(cls, code, *args, **kwargs):
return code
@classmethod
def unique_filter(cls, query, code, *args, **kwargs):
return query.filter(Program.code == code)
class RelatedPrograms(UniqueMixin, Base):
pgm1_id = Column(
Integer, ForeignKey('program.id', ondelete='CASCADE'),
primary_key=True)
pgm2_id = Column(
Integer, ForeignKey('program.id', ondelete='CASCADE'),
primary_key=True)
primary = saorm.relationship(
'Program', foreign_keys='RelatedPrograms.pgm1_id',
uselist=False, single_parent=True,
backref=saorm.backref(
'_related_programs', cascade='all, delete-orphan',
collection_class=attribute_mapped_collection('secondary'),
passive_deletes=True)
)
secondary = saorm.relationship(
'Program', foreign_keys='RelatedPrograms.pgm2_id',
uselist=False, single_parent=True)
__table_args__ = (
CheckConstraint(pgm1_id != pgm2_id),
)
def __init__(self, pgm1_id, pgm2_id):
self.pgm1_id = pgm1_id
self.pgm2_id = pgm2_id
@classmethod
def unique_hash(cls, pgm1_id, pgm2_id):
return (pgm1_id, pgm2_id)
@classmethod
def unique_filter(cls, query, pgm1_id, pgm2_id):
return query.filter(
RelatedPrograms.pgm1_id == pgm1_id and
RelatedPrograms.pgm2_id == pgm2_id)
class Award(UniqueMixin, Base):
id = Column(Integer, primary_key=True)
code = Column(CHAR(7), nullable=False, unique=True)
title = Column(String(100))
abstract = Column(Text)
effective = Column(Date)
expires = Column(Date)
first_amended = Column(Date)
last_amended = Column(Date)
amount = Column(Integer)
arra_amount = Column(Integer)
instrument = Column(String(100))
publications = saorm.relationship(
'Publication', backref=saorm.backref('award', uselist=False))
institutions = association_proxy('affiliations', 'institution')
people = association_proxy(
'affiliations', 'person',
creator=lambda kwargs: Person.from_fullname(**kwargs))
@classmethod
def unique_hash(cls, code, *args, **kwargs):
return code
@classmethod
def unique_filter(cls, query, code, *args, **kwargs):
return query.filter(Award.code == code)
class Funding(UniqueMixin, Base):
pgm_id = Column(
Integer, ForeignKey('program.id', ondelete='CASCADE'),
primary_key=True)
award_id = Column(
Integer, ForeignKey('award.id', ondelete='CASCADE'),
primary_key=True)
program = saorm.relationship('Program', uselist=False, single_parent=True)
award = saorm.relationship(
'Award', uselist=False, # single_parent=True,
backref=saorm.backref(
'funding_programs', cascade='all, delete-orphan',
passive_deletes=True)
)
def __init__(self, program, award):
self.program = program
self.award = award
@classmethod
def unique_hash(cls, pgm_id, award_id):
return (pgm_id, award_id)
@classmethod
def unique_filter(cls, query, pgm, award):
return query.filter(Funding.pgm_id == pgm.id and
Funding.award_id == award.id)
class Publication(BasicMixin, Base):
id = Column(Integer, primary_key=True)
title = Column(String(255), nullable=False)
abstract = Column(Text)
journal = Column(String(255))
volume = Column(String(10))
pages = Column(String(30))
year = Column(Integer)
uri = Column(String(255))
award_id = Column(Integer, ForeignKey('award.id', ondelete='SET NULL'))
class State(BasicMixin, Base):
abbr = Column(CHAR(2), primary_key=True)
name = Column(String(14), nullable=False, unique=True)
class Country(BasicMixin, Base):
alpha2 = Column(CHAR(2), primary_key=True)
name = Column(String(100), nullable=False)
class Address(UniqueMixin, Base):
id = Column(Integer, primary_key=True)
street = Column(String(50), nullable=False)
city = Column(String(50), nullable=False)
state = Column(CHAR(2), ForeignKey('state.abbr'), nullable=False)
country = Column(CHAR(2), ForeignKey('country.alpha2'), nullable=False)
zipcode = Column(String(10), nullable=False)
lat = Column(FLOAT)
lon = Column(FLOAT)
__table_args__ = (
UniqueConstraint('street', 'city', 'state', 'country', 'zipcode',
name='_address_uc'),
)
@classmethod
def unique_hash(cls, street, city, state, country, zipcode,
*args, **kwargs):
return (street, city, state, country, zipcode)
@classmethod
def unique_filter(cls, query, *args, **kwargs):
return query.filter_by(*args)
class Institution(UniqueMixin, Base):
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
phone = Column(String(15), unique=True)
address_id = Column(Integer, ForeignKey('address.id', ondelete='SET NULL'))
address = saorm.relationship('Address', uselist=False)
people = association_proxy('_people', 'person')
@classmethod
def unique_hash(cls, name, phone, *args, **kwargs):
return phone
@classmethod
def unique_filter(cls, query, *args, **kwargs):
return query.filter_by(*args)
class Person(UniqueMixin, Base):
id = Column(Integer, primary_key=True)
fname = Column(String(50), nullable=False)
lname = Column(String(50), nullable=False)
mname = Column(String(50))
nickname = Column(String(20))
title = Column(String(10))
suffix = Column(String(10))
email = Column(String(100), unique=True)
publications = association_proxy('_publications', 'publication')
institutions = association_proxy('affiliations', 'institution')
awards = association_proxy('roles', 'award')
__table_args__ = (
UniqueConstraint('fname', 'lname', 'mname', name='_person_name_uc'),
)
@classmethod
def unique_hash(cls, *args, **kwargs):
return args
@classmethod
def unique_filter(cls, query, *args, **kwargs):
return query.filter_by(*args)
@classmethod
def from_fullname(cls, session, name, email=None):
parsed_name = nameparser.HumanName(name)
return cls.as_unique(session,
fname=parsed_name.first.strip('.'),
lname=parsed_name.last.strip('.'),
mname=parsed_name.middle.strip('.'),
title=parsed_name.title.strip('.'),
suffix=parsed_name.suffix.strip('.'),
nickname=parsed_name.nickname.strip('.'),
email=email
)
@hybrid_property
def full_name(self):
pieces = []
if self.title is not None:
pieces.append(self.title)
pieces.append(self.fname)
if self.nickname is not None:
pieces.append('({})'.format(self.nickname))
if self.mname is not None:
pieces.append(self.mname)
pieces.append(self.lname)
if self.suffix is not None:
pieces.append(self.suffix)
return ' '.join(pieces)
class Author(UniqueMixin, Base):
person_id = Column(
Integer, ForeignKey('person.id', ondelete='CASCADE'),
primary_key=True)
pub_id = Column(
Integer, ForeignKey('publication.id', ondelete='CASCADE'),
primary_key=True)
person = saorm.relationship(
'Person', uselist=False, single_parent=True,
backref=saorm.backref(
'_publications', cascade='all,delete-orphan', passive_deletes=True)
)
publication = saorm.relationship(
'Publication', uselist=False, single_parent=True)
def __init__(self, person, pub):
self.person_id = person.id
self.pub_id = pub.id
@classmethod
def unique_hash(cls, person_id, award_id, *args, **kwargs):
return (person_id, award_id)
@classmethod
def unique_filter(cls, query, person_id, award_id, *args, **kwargs):
return query.filter(Role.person_id == person_id and
Role.award_id == award_id)
class Role(UniqueMixin, Base):
person_id = Column(
Integer, ForeignKey('person.id', ondelete='CASCADE'),
primary_key=True)
award_id = Column(
Integer, ForeignKey('award.id', ondelete='CASCADE'),
primary_key=True)
role = Column(Enum('pi', 'copi', 'fpi', 'po'))
start = Column(Date)
end = Column(Date)
award = saorm.relationship('Award', uselist=False, single_parent=True)
person = saorm.relationship(
'Person', uselist=False, single_parent=True,
backref=saorm.backref(
'roles', cascade='all, delete-orphan', passive_deletes=True)
)
def __init__(self, person, award, role, start, end):
self.person = person
self.award = award
self.role = role
self.start = start
self.end = end
@classmethod
def unique_hash(cls, person, award, *args, **kwargs):
return (person.id, award.id)
@classmethod
def unique_filter(cls, query, person, award, *args, **kwargs):
return query.filter(Role.person_id == person.id and
Role.award_id == award.id)
class Affiliation(UniqueMixin, Base):
person_id = Column(
Integer, ForeignKey('person.id', ondelete='CASCADE'),
primary_key=True)
institution_id = Column(
Integer, ForeignKey('institution.id', ondelete='CASCADE'),
primary_key=True)
award_id = Column(
Integer, ForeignKey('award.id', ondelete='CASCADE'),
primary_key=True)
person = saorm.relationship(
'Person',
backref=saorm.backref(
'affiliations', cascade='all, delete-orphan', passive_deletes=True)
)
institution = saorm.relationship(
'Institution',
backref=saorm.backref(
'affiliations', cascade='all, delete-orphan', passive_deletes=True)
)
award = saorm.relationship(
'Award',
backref=saorm.backref(
'affiliations', cascade='all, delete-orphan', passive_deletes=True)
)
def __init__(self, person, institution, award):
self.person = person
self.institution = institution
self.award = award
@classmethod
def unique_hash(cls, person, institution, award, *args, **kwargs):
return (person.id, institution.id, award.id)
@classmethod
def unique_filter(cls, query, person, institution, award,
*args, **kwargs):
return query.filter(
Affiliation.person_id == person.id and
Affiliation.institution_id == institution.id and
Affiliation.award_id == award.id
)
def main():
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<reponame>inspire-group/wf-in-the-age-of-quic
#!/usr/bin/env python3
"""Usage: split-dataset [options] DATASET [OUTFILE]
Select TCP indices from DATASET to be used for k-fold cross validation,
and write them as a json stream to OUTFILE.
"""
import math
import time
import json
import logging
from typing import IO, Optional
from dataclasses import dataclass
import h5py
import doceasy
import sklearn
import numpy as np
import pandas as pd
from typing_extensions import Final
from sklearn.model_selection import (
StratifiedKFold, train_test_split, GroupKFold, GroupShuffleSplit,
)
#: The number of folds to use in the k-fold split
N_SPLITS: Final[int] = 10
#: The fraction of samples to set-aside for validation
VALIDATION_SIZE: Final[float] = 0.1
#: The seed for the random number generator
RNG_SEED: Final[int] = 32514
_LOGGER = logging.getLogger(__name__)
@dataclass
class ExperimentSplitter:
"""Splits the TCP-only dataset for the experiment using stratified
k-fold.
"""
n_splits: int = 5
validation_size: float = 0.10
random_state: Optional[np.random.RandomState] = None
def _check_postconditions(self, labels, train_idx, val_idx, test_idx):
all_idx = np.concatenate([train_idx, val_idx, test_idx])
n_train_val = len(train_idx) + len(val_idx)
n_tcp_labels = len(labels)
# Ensure that there are only TCP samples
assert sum(~labels.iloc[all_idx]["protocol"].isin([b"tcp", "tcp"])) == 0
# Ensure that the number of train+val samples is correct
assert math.isclose(n_train_val / n_tcp_labels, 1 - (1 / self.n_splits),
abs_tol=0.02)
# Ensure that the number of validation samples is correct
assert math.isclose(len(val_idx) / n_train_val, self.validation_size,
abs_tol=0.02)
# Ensure that the number of test samples is correct
assert math.isclose(len(test_idx) / n_tcp_labels, 1 / self.n_splits,
abs_tol=0.02)
# Ensure that none of the indices overlap
assert len(np.intersect1d(train_idx, val_idx)) == 0
assert len(np.intersect1d(train_idx, test_idx)) == 0
assert len(np.intersect1d(val_idx, test_idx)) == 0
def split(self, labels: pd.DataFrame):
"""Split the labels based on their region and class.
"""
assert "protocol" in labels
assert labels["protocol"].isin(["tcp", b"tcp"]).all()
random_state = sklearn.utils.check_random_state(self.random_state)
for mon_splits, unmon_splits in zip(
self._split_monitored(labels, random_state),
self._split_unmonitored(labels, random_state)
):
train_idx = np.concatenate([mon_splits[0], unmon_splits[0]])
random_state.shuffle(train_idx)
val_idx = np.concatenate([mon_splits[1], unmon_splits[1]])
random_state.shuffle(val_idx)
test_idx = np.concatenate([mon_splits[2], unmon_splits[2]])
random_state.shuffle(test_idx)
self._check_postconditions(labels, train_idx, val_idx, test_idx)
yield (train_idx, val_idx, test_idx)
def _split_monitored(self, labels: pd.DataFrame, random_state):
assert "class" in labels
assert "region" in labels
mask = (labels["class"] != -1)
labels = labels[mask]
indices = np.arange(len(mask))[mask]
splitter = StratifiedKFold(n_splits=self.n_splits, shuffle=False)
stratify_on = labels[["class", "region"]].to_records(index=False)
for train_val_idx, test_idx in splitter.split(indices, stratify_on):
train_idx, val_idx = train_test_split(
train_val_idx, test_size=self.validation_size,
random_state=random_state, stratify=stratify_on[train_val_idx])
yield (indices[train_idx], indices[val_idx], indices[test_idx])
def _split_unmonitored(self, labels: pd.DataFrame, random_state):
assert "class" in labels
assert "group" in labels
mask = (labels["class"] == -1)
labels = labels[mask]
indices = np.arange(len(mask))[mask]
splitter = GroupKFold(self.n_splits)
for train_val_idx, test_idx in splitter.split(
indices, groups=labels["group"]
):
val_splitter = GroupShuffleSplit(
n_splits=1, test_size=self.validation_size,
random_state=random_state)
# pylint: disable=stop-iteration-return
train_val_idx__train_idx, train_val_idx__val_idx = next(
val_splitter.split(
train_val_idx, groups=labels["group"].iloc[train_val_idx])
)
train_idx = train_val_idx[train_val_idx__train_idx]
val_idx = train_val_idx[train_val_idx__val_idx]
yield (indices[train_idx], indices[val_idx], indices[test_idx])
def main(dataset: str, outfile: IO[str]):
"""Load the dataset, create the splits and write them to outfile
as a json stream.
"""
logging.basicConfig(
format='[%(asctime)s] [%(levelname)s] %(name)s - %(message)s',
level=logging.INFO)
start = time.perf_counter()
_LOGGER.info("Loading dataset from %r...", dataset)
with h5py.File(dataset, mode="r") as h5in:
labels = pd.DataFrame.from_records(np.asarray(h5in["labels"]))
splitter = ExperimentSplitter(
n_splits=N_SPLITS, validation_size=VALIDATION_SIZE,
random_state=RNG_SEED)
_LOGGER.info("Splitting dataset using %s...", splitter)
for train_idx, val_idx, test_idx in ExperimentSplitter(
n_splits=N_SPLITS, validation_size=VALIDATION_SIZE,
random_state=RNG_SEED
).split(labels):
json.dump({
"train": train_idx.tolist(),
"val": val_idx.tolist(),
"test": test_idx.tolist(),
"train-val": np.concatenate([train_idx, val_idx]).tolist()
}, outfile, indent=None, separators=(",", ":"))
outfile.write("\n")
_LOGGER.info("Splitting complete in %.2fs.", time.perf_counter() - start)
if __name__ == "__main__":
main(**doceasy.doceasy(__doc__, {
"DATASET": str,
"OUTFILE": doceasy.File(mode="w", default="-")
}))
|
# -*- coding: utf-8 -*-
#!/usr/bin/python3
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from time import sleep
class crawler:
"""This class contains all functions responsible for crawling webpages.
All functions build upon webdriver to fetch pages. Among other things,
the functions in this class are responsible for initiing and closing
the webdriver, fetching webpages, etc.
"""
def __init__(self, run_headless, non_headless_width, non_headless_height, sleeptime):
"""The constructor which sets the parameters regarding the webdriver.
If run_headless is True, webdriver will run in
headless mode, i.e., no browser window will open
during program runtime. In case the option is set
to False, the other two variables define the height
and width of the the browser window.
"""
self.headless = run_headless # True ... run in headless mode
self.non_headless_height = non_headless_height # height of the browser window (if run_headless == False)
self.non_headless_width = non_headless_width # width of the browser window (if run_headless == False)
self.sleeptime_fetchpage = sleeptime # used in the function fetch_page() to ensure JS has been loaded
def init_driver(self):
"""Initiate the webdriver (as defined by the user).
Using the provided options (headlessness, user agent, browser
window height and width, etc.), this function initiates the
webdriver.
"""
print ('initing driver')
# browser options for chrome
chrome_options = Options()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--no-sandbox") # linux only
#chrome_options.add_experimental_option("detach", True)
# option for running headless (opening a visible browser window or not)
if self.headless == True:
chrome_options.add_argument("--headless")
# set the user agent
chrome_options.add_argument("user-agent = Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36")
# set the driver (chrome)
driver = webdriver.Chrome(options = chrome_options)
# set the browser window size (if run_headless == False)
if self.headless == False:
driver.set_window_size(self.non_headless_width, self.non_headless_height)
"""
Return the handle to keep the browser open over the span
of the program (else each function call would open and
close the browser completely)
"""
return driver
def close_driver(self, driver):
'''Close the webdriver properly.'''
print ('closing driver')
driver.close() # close the current browser window
driver.quit() # calls driver.dispose which closes all the browser windows and ends the webdriver session properly
def fetch_page(self, driver, page):
"""Fetches a single website using webdriver.
The arguments of the function are the driver instance object
as well as the page which should be crawled.
"""
print ('fetching page: ', page)
# fetch the page (open the headless browser)
driver.get(page)
"""
Wait until the javascript code has been delivered. If this
waiting time is not set, the retrieved page is faulty, i.e., it
will contain a warning to 'enable JS'. This is due to the fact that
the page sets a JS cookie, reloads/redirects and this must be resolved
before fetching the page or it (the fetching) will not succeed!
"""
sleep(self.sleeptime_fetchpage)
inner_div_content = self.verify_page_crawl(driver, page)
if inner_div_content != "":
print(inner_div_content)
else:
# TODO: increase time, recrawl
pass
def verify_page_crawl(self, driver, page):
"""Check if the retrieved source code (incl. JS) has been fetched properly
Example error when the page has not been fetched properly (JS error):
"selenium.common.exceptions.JavascriptException: Message: javascript error:
Cannot read property 'innerHTML' of null"
"""
""" fetch the contents in the targed div (id = contentInner). This
div contains the information desired for crawling and it should be
fetched. If the page was crawled too fast, this div does not exist
since the page looks differently (JS error page). Hence, the try
block fails when the JS code has not been loaded properly.
"""
try:
inner_div_content = \
driver.execute_script('return window.document.getElementById("contentInner").innerHTML')
except:
print("Error fetching page " + page + " using sleeptime of " +
str(self.sleeptime_fetchpage))
inner_div_content = "" # no contentInner div found. Define it here.
return inner_div_content
|
<reponame>Yuessiah/Othello-Minimax<gh_stars>0
import copy
import datetime
import sys
__author__ = 'bengt, yuessiah'
from game.settings import *
class AlphaBetaPruner(object):
"""Alpha-Beta Pruning algorithm."""
def __init__(self, mutex, duration, pieces, first_player, second_player):
self.mutex = mutex
self.board = 2
self.white = 0
self.black = 1
self.max_depth = 0
self.duration = duration
self.complexity = 0
self.lifetime = None
self.first_player, self.second_player = (self.white, self.black) \
if first_player == WHITE else (self.black, self.white)
self.state = self.make_state(pieces)
def make_state(self, pieces):
results = {BOARD: self.board, MOVE: self.board, WHITE: self.white, BLACK: self.black}
return [results[p.get_state()] for p in pieces], self.first_player
def alpha_beta_search(self):
self.lifetime = datetime.datetime.now() + datetime.timedelta(seconds=self.duration)
left = self.state[0].count(self.board)
if left >= 44:
self.max_depth = 4
else:
self.max_depth = 5
sys.stdout.write("\x1b7\x1b[%d;%dfMax depth: %d\x1b8" % (10, 22, self.max_depth))
moves = self.get_moves(self.state[0], self.state[1])
if len(moves) == 0:
raise NoMovesError
fn = lambda state, move: self.opening_evaluation(state[0], self.first_player, move) + \
self.negamax(0, state, move, -float('Inf'), float('Inf'))
scores = [(fn(self.next_state(self.state, move), move), move) for move in moves]
return max(scores, key=lambda value: value[0])[1]
def negamax(self, depth, state, action, alpha, beta):
if self.cutoff_test(depth):
eval = self.ending_evaluation(state[0], self.first_player^(depth&1), action)
self.complexity += 1
sys.stdout.write("\x1b7\x1b[%d;%dfComplexity: %d\x1b8" % (13, 22, self.complexity))
sys.stdout.flush()
return eval
value = alpha
moves = self.get_moves(state[0], state[1])
for move in moves:
value = max([value, -self.negamax(depth + 1, self.next_state(state, move), move, -beta, -value)])
if value >= beta:
return value
return value
def opening_evaluation(self, state, player, action):
board = self.board
placed = action[0] + (action[1] * WIDTH)
parity_count = [1]
X = (state[0] == board and state[9] == player) + \
(state[7] == board and state[14] == player) + \
(state[56] == board and state[49] == player) + \
(state[63] == board and state[54] == player)
C = (state[0] == board and (placed == 1 or placed == 8 )) or \
(state[7] == board and (placed == 6 or placed == 15)) or \
(state[56] == board and (placed == 48 or placed == 57)) or \
(state[63] == board and (placed == 55 or placed == 62))
parity = 1 if self.parity(0, copy.copy(state), placed, parity_count) else -0.45 #odd: 1, even: -0.45
eval = (X*-50) + (C*-20) + (parity*100)
sys.stdout.write("\x1b7\x1b[%d;%dfOpening eval: %f\x1b8" % (11, 22, eval))
return eval
def ending_evaluation(self, state, player_to_check, action):
board = self.board
player = player_to_check
opponent = self.opponent(player)
edge_eval = mobility = corner_eval = stability_eval = 0
player_piece = len([p for p in state if p == player ])
opponent_piece = len([p for p in state if p == opponent])
count_eval = (player_piece - opponent_piece) / (player_piece + opponent_piece)
player_move = len(self.get_moves(state, player))
opponent_move = len(self.get_moves(self.next_state((state, player), action)[0], opponent))
if player_move + opponent_move:
mobility = (player_move - opponent_move) / (player_move + opponent_move)
corner_player = (state[0] == player ) + (state[7] == player ) + \
(state[56] == player ) + (state[63] == player )
corner_opponent = (state[0] == opponent) + (state[7] == opponent) + \
(state[56] == opponent) + (state[63] == opponent)
if corner_player + corner_opponent:
corner_eval = (corner_player - corner_opponent) / (corner_player + corner_opponent)
edge_player = len([p for i, p in enumerate(state) if p == player and (i%8==0 or i%8==7 or i/8==0 or i/8==7)])
edge_opponent = len([p for i, p in enumerate(state) if p == opponent and (i%8==0 or i%8==7 or i/8==0 or i/8==7)])
if edge_player + edge_opponent:
edge_eval = (edge_player - edge_opponent) / (edge_player + edge_opponent)
player_stability = self.stability(state, player, opponent)
opponent_stability = self.stability(self.next_state((state, player), action)[0], opponent, player)
if player_stability + opponent_stability:
stability_eval = (player_stability - opponent_stability) / (player_stability + opponent_stability)
eval = (count_eval*100) + (corner_eval*100) + (edge_eval*100) + (mobility*100) + (stability_eval*100)
sys.stdout.write("\x1b7\x1b[%d;%dfEnding eval: %f\x1b8" % (12, 22, eval))
return eval
def opponent(self, player):
return self.second_player if player is self.first_player else self.first_player
def parity(self, depth, state, placed, count):
for d in DIRECTIONS:
if outside_board(placed, d):
continue
if state[placed+d] == self.board:
count[0] += 1
state[placed+d] = 1 #visited
self.parity(depth + 1, state, placed+d, count)
if depth == 0:
return count[0] % 2
def stability(self, state, player, opponent):
bad_piece = set()
for piece, colour in enumerate(state):
if colour != opponent:
continue
for d in DIRECTIONS:
if outside_board(piece, d):
continue
to_store = set()
tile = piece + d
while state[tile] == player and not outside_board(tile, d):
to_store.add((int(tile%WIDTH), int(tile/HEIGHT)))
tile += d
if state[tile] == self.board:
bad_piece.update(to_store)
return state.count(player) - len(bad_piece)
def next_state(self, current_state, action):
placed = action[0] + (action[1] * WIDTH)
state = copy.copy(current_state[0])
player = copy.copy(current_state[1])
opponent = self.opponent(player)
state[placed] = player
for d in DIRECTIONS:
if outside_board(placed, d):
continue
to_flip = []
tile = placed + d
while state[tile] == opponent and not outside_board(tile, d):
to_flip.append(tile)
tile += d
if state[tile] == player:
for piece in to_flip:
state[piece] = player
return state, opponent
def get_moves(self, state, player):
""" Returns a generator of (x,y) coordinates.
"""
moves = [self.mark_move(self.opponent(player), tile, state, d)
for tile, colour in enumerate(state)
for d in DIRECTIONS
if colour == player and not outside_board(tile, d)]
return list(set([(x, y) for found, x, y in moves if found]))
def mark_move(self, opponent, tile, pieces, direction):
tile += direction
while pieces[tile] == opponent and not outside_board(tile, direction):
tile += direction
if pieces[tile] == self.board:
return True, int(tile%WIDTH), int(tile/HEIGHT)
return False, int(tile%WIDTH), int(tile/HEIGHT)
def cutoff_test(self, depth):
return depth >= self.max_depth or datetime.datetime.now() > self.lifetime
|
# coding: utf-8
# # Estimating the carbon content of marine bacteria and archaea
#
# In order to estimate the characteristic carbon content of marine bacteria and archaea, we rely on two main methodologies - volume based estimates and amino acid based estimates.
#
# ## Volume-based estimates
# We collected measurements of the characeteristic volume of bacteria and archaea in the marine deep subsurface from 4 different studies. For 3 of those studies, we collected reported average cell volumes. Here are the average values we collected from those three studies:
# In[1]:
import pandas as pd
import numpy as np
from scipy.stats import gmean
import sys
sys.path.insert(0, '../../../statistics_helper')
from CI_helper import *
pd.options.display.float_format = '{:,.2f}'.format
volumes = pd.read_excel('marine_deep_subsurface_prok_carbon_content_data.xlsx','Volume based')
volumes
# In addition we used data from [Braun et al.](http://dx.doi.org/10.3389/fmicb.2016.01375) which measured cell volumes for three cell morphologies (coccoid, elongated and filamentous), along with the relative fraction of each morphology in each site sampled. Here is the data extracted from Braun et al.:
# In[2]:
braun_volumes = pd.read_excel('marine_deep_subsurface_prok_carbon_content_data.xlsx','Braun', skiprows=1)
braun_volumes
# We first calculate the characteristic volume of a single cell from the data in Braun et al. to be able to compare it with the other resources:
# In[3]:
# Group by depth
braun_depth_binned = braun_volumes.groupby(['Depth (m)'])
# Define the function which will to the weighted average of volume based on the fraction of the
# population of each cell type
def groupby_weighted_average(input):
return np.average(input['Mean volume (µm^3)'],weights=input['Fraction FM'])
# Calculate the weighted average volume for each depth sample
braun_weighted_average = braun_depth_binned.apply(groupby_weighted_average)
# Calculate the geometric mean of the volumes from different depths
braun_characteristic_volume = gmean(braun_weighted_average)
print(r'The characteristic volume of bacterial and archaeal cells in the marine deep subsurface based on Braun et al. is ≈%.2fµm^3' %braun_characteristic_volume)
volumes.append(pd.DataFrame.from_dict([{'Study': 'Braun et al.', 'Mean cell volume (µm^3)':braun_characteristic_volume}]))
# In order to covert the five different estimates for the characteristic volume of bacterial and archaeal cell in the marine deep subsurface into estimates of carbon content, we use two independent models that have been used in the literature: [Fry et al.](http://dx.doi.org/10.1016/S0580-9517(08)70239-3) which estimates ≈300 fg C per $µm^3$, and [<NAME>](http://dx.doi.org/10.3354/meps051201), which developed an allometric model of the carbon content of cells with different volumes. The allometric model they developed is:
# $$C = 88.1 \times V^{0.59}$$
# Where C is the carbon content of a single cell [fg C cell$^{-1}$], and V is cell volume [$µm^3$]. We apply these two independent conversion equations to the volumes we gathered from the literature to produce 10 estimates for the characteristic carbon content of bacterial and archaeal cells in the marine deep subsurface.
# In[4]:
# Apply the conversion equations to the volumes reported in the literature
volumes['Fry et al.'] = volumes['Mean cell volume (µm^3)']*310
volumes['Simon and Azam'] = 88.1*volumes['Mean cell volume (µm^3)']**0.59
volumes
# We calculate the geometric mean of the values from different studies using the same conversion equation to generate a characteristic carbon content for each conversion method.
# In[5]:
fry_volume_mean = gmean(volumes['Fry et al.'])
sa_volume_mean = gmean(volumes['Simon and Azam'])
print('The characteristic carbon content of a single bacterial or archaeal cell in the marine deep subsurface based on cell volume converted using the conversion equation from Fry et al. is ≈%.0f fg C cell^-1\n' %fry_volume_mean)
print('The characteristic carbon content of a single bacterial or archaeal cell in the marine deep subsurface based on cell volume converted using the conversion equation from Simon & Azam is ≈%.0f fg C cell^-1' %sa_volume_mean)
# We compute the geometric mean of the characteristic values from the two volume to carbon content conversion methods and use it as our best estimate for the carbon content of bacterial and archaeal cells in the marine deep subsurface, based on volume measurements.
# In[6]:
vol_best_carbon_content = gmean([fry_volume_mean,sa_volume_mean])
print('Our best volume-based estimate for the carbon content of bacterial and archaeal cells in the marine deep subsurface is %.0f fg C cell^-1' %vol_best_carbon_content)
# ## Amino acid-based estimate
# We rely on the study by Braun et al., which measured carobon content of bacterial and archaeal cells in the marine deep subsurface based on amino acid carbon mass, and assuming ≈55% of the carbon mass of single cells is stored in amino acids. Here are the values reported by Braun et al.:
# In[7]:
aa_based = pd.read_excel('marine_deep_subsurface_prok_carbon_content_data.xlsx', 'Amino acid based', skiprows=1)
aa_based
# We use the geometric mean of the values reported by Braun et al. as our best estimate for the amino acid-based estimate of the carbon content of bacterial and archaeal cells in the marine deep subsurface.
# In[8]:
aa_best_carbon_content = gmean(aa_based['Carbon content (fg C cell-1)'])
print('Our best amino acid-based estimate for the carbon content of bacterial and archaeal cells in the marine deep subsurface is %.0f fg C cell^-1' %aa_best_carbon_content)
# As our best estimate for the carbon content of bacterial and archaeal cells in the marine deep subsurface, we use the geometric mean of the volume-based and amino acid-based estimates.
# In[9]:
best_estimate = gmean([vol_best_carbon_content,aa_best_carbon_content])
print('Our best estimate for the carbon content of bacterial and archaeal cells in the marine deep subsurface is %.0f fg C cell^-1' %best_estimate)
# # Uncertainty analysis
# To calculate the uncertainty associated with the estimate for the total number of of bacteria and archaea in the marine deep subsurface, we first collect all available uncertainties and then take the largest value as our best projection for the uncertainty.
#
# ## Volume-based
#
# ### intra-study uncertainty
# For the volume based approaches, we had data on intra-study uncertainty only for the Braun et al. study. We calculate the intra study uncertainty of the volumes reported in Braun et al. by calculating the 95% confidence interval of the values reported in Braun et al.
# In[10]:
vol_braun_intra_CI = geo_CI_calc(braun_weighted_average)
print('The intra-study uncertainty for Braun et al. is ≈%.1f-fold' %vol_braun_intra_CI)
# ### Interstudy uncertainty
# As a measure of the interstudy uncertainty, we compare the 95% confidence interval for the geometric mean of the carbon content from different studies, using the same conversion method.
# We also use the 95% confidence interval for the geometric mean of the carbon content estimates from the two different conversion methods (Fry et al. and Simon & Azam) as a measure of interstudy uncertainty.
# In[11]:
carbon_content_fry_CI = geo_CI_calc(volumes['Fry et al.'])
carbon_content_sa_CI = geo_CI_calc(volumes['Simon and Azam'])
print('The interstudy uncertainty of the geometric mean of carbon content using the conversion method of Fry et al. is ≈%.1f-fold' %carbon_content_fry_CI)
print('The interstudy uncertainty of the geometric mean of carbon content using the conversion method of Simon & Azam is ≈%.1f-fold' %carbon_content_sa_CI)
carbon_content_vol_CI = geo_CI_calc([fry_volume_mean,sa_volume_mean])
print('The interstudy uncertainty of the geometric mean of carbon content between conversion methods is ≈%.1f-fold' %carbon_content_vol_CI)
# ## Amino acid-based
#
# ### Intra-study uncertainty
# We calculate the 95% confidence interval of the geometric mean of values for the carbon content from Braun et al. as a measure of the intra-study uncertainty.
# In[12]:
aa_intra_CI = geo_CI_calc(aa_based['Carbon content (fg C cell-1)'])
print('The intra-study uncertainty of amino acid-based carbon content estimates from Braun et al. is ≈%.1f-fold' %aa_intra_CI)
# ## Inter-method uncertainty
# As another measure of uncertainty we calculate the 95% confidence interval of the geometric mean of the estimates for carbon content calculated using either the volume-based method or the amino acid-based method.
# In[13]:
inter_method_CI = geo_CI_calc([vol_best_carbon_content,aa_best_carbon_content])
print('The intra-method uncertainty for the caron content of bacretial and archaeal cells in the marine deep subsurface is ≈%.1f-fold' %inter_method_CI)
# We use the highest uncertainty among this collection, which is ≈2.2-fold, as our best projection of the uncertainty associated with our estimate of the carbon content of bacterial and archaeal cells in the marine deep subsurface.
#
# Our final parameters are:
# In[14]:
# Take the maximal uncetainty as our best projection of uncertainty
mul_CI = np.max([inter_method_CI,aa_intra_CI,carbon_content_vol_CI,carbon_content_fry_CI,carbon_content_sa_CI,vol_braun_intra_CI])
print('Carbon content of bacterial and archaeal cells in the marine deep subsurface: %.0f fg C' % best_estimate)
print('Uncertainty associated with the carbon content of bacterial and archaeal cells in the marine deep subsurface: %.1f-fold' % mul_CI)
old_results = pd.read_excel('../marine_deep_subsurface_prok_biomass_estimate.xlsx')
result = old_results.copy()
result.loc[1] = pd.Series({
'Parameter': 'Carbon content of bacterial and archaeal cells in the marine deep subsurface',
'Value': int(best_estimate),
'Units': 'fg C cell^-1',
'Uncertainty': "{0:.1f}".format(mul_CI)
})
result.to_excel('../marine_deep_subsurface_prok_biomass_estimate.xlsx',index=False)
|
<reponame>lifengjin/transition-amr-parser
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import os
import shutil
import torch
from tqdm import tqdm
import argparse
from fairseq.tokenizer import tokenize_line
from fairseq.models.roberta import RobertaModel
from transition_amr_parser.io import read_sentences
def argument_parsing():
parser = argparse.ArgumentParser(
description='unit test for roberta unicode handling'
)
parser.add_argument(
'-i', '--in-tokenized-sentences',
type=str,
required=True,
help='File with one __tokenized__ sentence per line'
)
parser.add_argument(
'-p', '--pretrained-embed',
type=str,
required=True,
default="roberta.large",
help='roberta model to load'
)
parser.add_argument(
'-o', '--output-file',
type=str,
required=True,
help='File to store bad unicode sentences'
)
parser.add_argument(
'--raise-error',
action='store_true',
help='Set to force exception if unicode error found'
)
return parser.parse_args()
def main():
args = argument_parsing()
sentences = read_sentences(args.in_tokenized_sentences)
split_sentences = []
for sentence in sentences:
split_sentences.append(tokenize_line(sentence))
print(len(split_sentences))
bad_unicode = open(args.output_file, 'w')
def load_roberta(name=None, roberta_cache_path=None):
if not roberta_cache_path:
roberta = torch.hub.load('pytorch/fairseq', name)
else:
roberta = RobertaModel.from_pretrained(roberta_cach_path, checkpoint_file='model.pt')
roberta.eval()
if torch.cuda.is_available():
roberta.cuda()
return roberta
def get_wordpiece_to_word_map(sentence, roberta_bpe, raise_error):
# Get word and worpiece tokens according to RoBERTa
# sentence = sentence.replace(u'\x91', u' ')
# sentence = sentence.replace(u'\x96', u' ')
word_tokens = sentence.split()
wordpiece_tokens = [
roberta_bpe.decode(wordpiece)
for wordpiece in roberta_bpe.encode(sentence).split()
]
#print("wp_tokens: ", wordpiece_tokens)
assert len(word_tokens) <= len(wordpiece_tokens)
assert isinstance(word_tokens, list)
assert isinstance(wordpiece_tokens, list)
w_index = 0
word_to_wordpiece = []
subword_sequence = []
bad_unicode_flag = 0
for wp_index in range(len(wordpiece_tokens)):
if w_index in range(len(word_tokens)):
word = word_tokens[w_index]
if word == wordpiece_tokens[wp_index]:
word_to_wordpiece.append(wp_index)
w_index += 1
else:
subword_sequence.append(wp_index)
word_from_pieces = "".join([
# NOTE: Facebooks BPE signals SOW with whitesplace
wordpiece_tokens[i].lstrip()
for i in subword_sequence
])
if word == word_from_pieces:
word_to_wordpiece.append(subword_sequence)
w_index += 1
subword_sequence = []
elif word_from_pieces not in word:
word_to_wordpiece.append(subword_sequence)
w_index += 1
subword_sequence = []
bad_unicode_flag = 1
if bad_unicode_flag == 1:
bad_unicode.write(sentence)
wp = " ".join(wordpiece_tokens)
print("\n\nsentence: ", sentence)
print("wp: ", wp)
print("\n")
bad_unicode.write("\n")
bad_unicode.write(wp)
bad_unicode.write("\n\n")
if raise_error:
raise Exception('Unicode splitting failed')
return word_to_wordpiece
def check_wordpiece_to_word_map(input_file, raise_error):
num_sents = 0
with open(input_file, 'r') as fid:
for sentence in tqdm(fid):
if not sentence:
break
sentence = " ".join(tokenize_line(str(sentence.rstrip())))
#print("input: ", sentence)
word2piece = get_wordpiece_to_word_map(
sentence,
roberta.bpe,
raise_error
)
roberta = load_roberta(name=args.pretrained_embed)
check_wordpiece_to_word_map(args.in_tokenized_sentences, args.raise_error)
if __name__ == "__main__":
main()
|
<filename>continual_learning/datasets/base/utils.py
import bisect
import os
from abc import ABC, abstractmethod
from os import makedirs
from os.path import join, dirname, exists
from typing import Callable, Tuple, Union, Sequence
import numpy as np
from torch.utils.data import DataLoader
from continual_learning.datasets.base import UnsupervisedDataset, \
SupervisedDataset, DatasetSplitsContainer, DatasetSplits
# from continual_learning.datasets.base.base import DatasetSplits
class DatasetSplitContexView:
def __init__(self, dataset: DatasetSplitsContainer,
split: DatasetSplits):
self._dataset = dataset
self._current_split = dataset.current_split
self._split = split
def __enter__(self):
self._dataset.current_split = self._split
return self._dataset
def __exit__(self, type, value, traceback):
self._dataset.current_split = self._current_split
return True
class DownloadableDataset(ABC):
def __init__(self,
name: str,
transformer: Callable = None,
download_if_missing: bool = True,
data_folder: str = None,
**kwargs):
"""
An abstract class used to download the datasets.
:param name: The name of the dataset.
:param transformer: The transformer function used when a sample is retrieved.
:param download_if_missing: If the dataset needs to be downloaded if missing.
:param data_folder: Where the dataset is stored.
"""
if data_folder is None:
data_folder = join(dirname(__file__), 'downloaded_datasets', name)
self.data_folder = data_folder
self._name = name
self.transformer = transformer \
if transformer is not None else lambda x: x
missing = not self._check_exists()
if missing:
if not download_if_missing:
raise IOError("Data not found and "
"`download_if_missing` is False")
else:
if not exists(self.data_folder):
makedirs(self.data_folder)
print('Downloading dataset {}'.format(self.name))
self.download_dataset()
@property
def name(self):
return self._name
@abstractmethod
def download_dataset(self):
raise NotImplementedError
@abstractmethod
def _check_exists(self) -> bool:
raise NotImplementedError
class UnsupervisedDownloadableDataset(DownloadableDataset,
UnsupervisedDataset,
ABC):
def __init__(self,
name: str,
download_if_missing: bool = True,
data_folder: str = None,
transformer: Callable = None,
target_transformer: Callable = None,
**kwargs):
super().__init__(name=name,
transformer=transformer,
download_if_missing=download_if_missing,
data_folder=data_folder)
x, (train, test, dev) = self.load_dataset()
if kwargs.get('is_path_dataset', False):
kwargs['images_path'] = os.path.join(self.data_folder,
kwargs['images_path'])
super(DownloadableDataset, self).__init__(x=x,
train=train,
test=test,
dev=dev,
transformer=transformer,
target_transformer=
target_transformer,
**kwargs)
@abstractmethod
def load_dataset(self) -> Tuple[np.ndarray, Tuple[list, list, list]]:
raise NotImplementedError
class SupervisedDownloadableDataset(DownloadableDataset, SupervisedDataset,
ABC):
def __init__(self,
name: str,
download_if_missing: bool = True,
data_folder: str = None,
transformer: Callable = None,
test_transformer: Callable = None,
target_transformer: Callable = None,
**kwargs):
super().__init__(name=name,
transformer=transformer,
download_if_missing=download_if_missing,
data_folder=data_folder,
**kwargs)
(x, y), (train, test, dev) = self.load_dataset()
if kwargs.get('is_path_dataset', False):
kwargs['images_path'] = os.path.join(self.data_folder,
kwargs['images_path'])
super(DownloadableDataset, self).__init__(x=x,
y=y,
train=train,
test=test,
dev=dev,
transformer=transformer,
target_transformer=
target_transformer,
test_transformer=
test_transformer,
**kwargs)
@abstractmethod
def load_dataset(self) -> Tuple[
Tuple[np.ndarray, np.ndarray], Tuple[list, list, list]]:
raise NotImplementedError
class ConcatDataset:
r"""
modified version of ConcatDataset from torch
"""
@property
def cumulative_sizes(self):
return self.cumsum(self.datasets)
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
def __init__(self, datasets: Sequence[
Union[UnsupervisedDataset, SupervisedDataset]]) -> None:
super(ConcatDataset, self).__init__()
if len(datasets) == 0:
raise ValueError('No datase given')
lens = len(datasets[0][0])
cond = all([len(d[0]) == lens for d in datasets])
if not cond:
raise ValueError('The dataset\'s __getitem__ are not comparable, '
'because return different number of values: {}'.
format([len(d[0]) for d in datasets]))
self.datasets = list(datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError(
"absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
def get_iterator(self, batch_size, shuffle=True, sampler=None,
num_workers=0, pin_memory=False):
return DataLoader(self, batch_size=batch_size, shuffle=shuffle,
sampler=sampler, pin_memory=pin_memory,
num_workers=num_workers)
def train(self) -> None:
for d in self.datasets:
d.train()
def dev(self) -> None:
for d in self.datasets:
d.dev()
def test(self) -> None:
for d in self.datasets:
d.test()
def all(self) -> None:
for d in self.datasets:
d.all()
@property
def current_split(self) -> DatasetSplits:
return self.datasets[0].current_split
@current_split.setter
def current_split(self, v: DatasetSplits) -> None:
for d in self.datasets:
d.current_split = v
|
"""
Copyright 2020 ICES, University of Manchester, Evenset Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#Code by <NAME>
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from keras import backend as K
from keras.models import Model, Input
from keras.layers.merge import add
from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Lambda
from utils.spec_tokenizers import tokenize_fa
class NER_BiLSTM_ELMo_i2b2(object):
"""Class that implements and performs named entity recognition using BiLSTM
neural network architecture.
The architecture uses GloVe embeddings trained on common crawl dataset.
Then the algorithm is trained on i2b2 2014 dataset. """
def __init__(self):
"""Implementation of initialization"""
# load json and create model
self.sess = tf.Session()
K.set_session(self.sess)
self.elmo_model = hub.Module("https://tfhub.dev/google/elmo/2", trainable=True)
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.tables_initializer())
self.max_len = 50
self.batch_size = 32
self.n_tags = 9
self.model = self.createModel("", "")
if os.path.exists("Models/NER_BiLSTM_ELMo.h5"):
print("Loading model")
self.model.load_weights("Models/NER_BiLSTM_ELMo.h5")
print("Loaded model")
self.GLOVE_DIR = ""
self.MAX_SEQUENCE_LENGTH = 200
self.EMBEDDING_DIM = 300
self.MAX_NB_WORDS = 2200000
self.tags = None
def perform_NER(self, text):
"""
Function that perform BiLSTM-based NER
:param text: Text that should be analyzed and tagged
:return: returns sequence of sequences with labels
"""
sequences = tokenize_fa([text])
word_sequences = []
X_test = []
tokens = []
for seq in sequences:
features_seq = []
sentence = []
for i in range(0, len(seq)):
features_seq.append(seq[i][0])
tokens.append(seq[i][0])
sentence.append(seq[i][0])
X_test.append(sentence)
word_sequences.append(sentence)
X = []
remaining = len(word_sequences)%32
additional_seq = 32 - remaining
for i in range(0,additional_seq):
X_seq = []
for i in range(0,self.max_len):
X_seq.append("PADword")
word_sequences.append(X_seq)
for tok_seq in word_sequences:
X_seq = []
for i in range(0, self.max_len):
try:
X_seq.append(tok_seq[i])
except:
X_seq.append("PADword")
X.append(X_seq)
for i in range(len(word_sequences), 32):
X_seq = []
for i in range(0, self.max_len):
X_seq.append("PADword")
X.append(X_seq)
index2tags = {0:'O', 1:'ID', 2:'PHI', 3:'NAME', 4:'CONTACT',
5:'DATE', 6:'AGE', 7:'PROFESSION', 8:'LOCATION'}
predictions = self.model.predict([X])
Y_pred_F = []
for i in range(0, len(word_sequences)):
seq = []
for j in range(0, len(word_sequences[i])):
max_k = 0
max_k_val = 0
if j>=50:
continue
for k in range(0, len(predictions[i][j])):
if predictions[i][j][k] > max_k_val:
max_k_val = predictions[i][j][k]
max_k = k
max_str = index2tags[max_k]
seq.append(max_str)
Y_pred_F.append(seq)
final_sequences = []
for j in range(0, len(Y_pred_F)):
sentence = []
if j>=len(sequences):
continue
for i in range(len(Y_pred_F[j])-len(sequences[j]), len(Y_pred_F[j])):
sentence.append((sequences[j][i-(len(Y_pred_F[j])-len(sequences[j]))][0], Y_pred_F[j][i]))
final_sequences.append(sentence)
return final_sequences
def ElmoEmbedding(self, x):
return self.elmo_model(
inputs={"tokens": tf.squeeze(tf.cast(x, tf.string)), "sequence_len": tf.constant(self.batch_size * [self.max_len])
},
signature="tokens",
as_dict=True)["elmo"]
def createModel(self, text, GLOVE_DIR):
input_text = Input(shape=(self.max_len,), dtype="string")
embedding = Lambda(self.ElmoEmbedding, output_shape=(self.max_len, 1024))(input_text)
x = Bidirectional(LSTM(units=512, return_sequences=True,
recurrent_dropout=0.2, dropout=0.2))(embedding)
x_rnn = Bidirectional(LSTM(units=512, return_sequences=True,
recurrent_dropout=0.2, dropout=0.2))(x)
x = add([x, x_rnn]) # residual connection to the first biLSTM
out = TimeDistributed(Dense(self.n_tags, activation="softmax"))(x)
self.model = Model(input_text, out)
self.model.compile(optimizer="adam", loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
self.model.summary()
return self.model
def transform_sequences(self, token_sequences):
text = []
for ts in token_sequences:
for t in ts:
text.append(t[0])
X = []
Y = []
all_tags = []
for tok_seq in token_sequences:
X_seq = []
Y_seq = []
for i in range(0, self.max_len):
try:
X_seq.append(tok_seq[i][0])
Y_seq.append(tok_seq[i][1])
all_tags.append(tok_seq[i][1])
except:
X_seq.append("PADword")
Y_seq.append("O")
X.append(X_seq)
Y.append(Y_seq)
self.n_tags = len(set(all_tags))
self.tags = set(all_tags)
tags2index = {'O':0, 'ID':1, 'PHI':2, 'NAME':3, 'CONTACT':4,
'DATE':5, 'AGE':6, 'PROFESSION':7, 'LOCATION':8}
Y = [[tags2index[w] for w in s] for s in Y]
return X, Y
def learn(self, X, Y, epochs=1):
"""
Method for the training ELMo BiLSTM NER model
:param X: Training sequences
:param Y: Results of training sequences
:param epochs: number of epochs
:return:
"""
first = int(np.floor(0.9*len(X)/self.batch_size))
second = int(np.floor(0.1*len(X)/self.batch_size))
X_tr, X_val = X[:first * self.batch_size], X[-second * self.batch_size:]
y_tr, y_val = Y[:first * self.batch_size], Y[-second * self.batch_size:]
y_tr = np.array(y_tr)
y_val = np.array(y_val)
y_tr = y_tr.reshape(y_tr.shape[0], y_tr.shape[1], 1)
y_val = y_val.reshape(y_val.shape[0], y_val.shape[1], 1)
self.model.fit(np.array(X_tr), y_tr, validation_data=(np.array(X_val), y_val),
batch_size=self.batch_size, epochs=epochs)
def evaluate(self, X, Y):
"""
Function that evaluates the model and calculates precision, recall and F1-score
:param X: sequences that should be evaluated
:param Y: true positive predictions for evaluation
:return: prints the table with precision,recall and f1-score
"""
first = int(np.floor(int(len(X) / self.batch_size)) * self.batch_size)
X = X[:first]
Y = Y[:first]
Y_pred = self.model.predict(np.array(X))
from sklearn import metrics
index2tags = {0:'O', 1:'ID', 2:'PHI', 3:'NAME', 4:'CONTACT',
5:'DATE', 6:'AGE', 7:'PROFESSION', 8:'LOCATION'}
labels = ["ID", "PHI", "NAME", "CONTACT", "DATE", "AGE",
"PROFESSION", "LOCATION"]
Y_pred_F = []
for i in range(0, len(Y_pred)):
for j in range(0, len(Y_pred[i])):
max_k = 0
max_k_val = 0
for k in range(0, len(Y_pred[i][j])):
if Y_pred[i][j][k] > max_k_val:
max_k_val = Y_pred[i][j][k]
max_k = k
Y_pred_F.append(index2tags[max_k])
Y_test_F = []
for i in range(0, len(Y)):
for j in range(0, len(Y[i])):
Y_test_F.append(index2tags[Y[i][j]])
print(metrics.classification_report(Y_pred_F, Y_test_F, labels=labels))
def save(self, model_path):
"""
Function to save model. Models are saved as h5 files in Models directory. Name is passed as argument
:param model_path: Name of the model file
:return: Doesn't return anything
"""
self.model.save("Models/"+model_path+".h5")
print("Saved model to disk")
|
"""Plugin specification for Tox 4"""
from __future__ import annotations
import os
import shutil
import sys
import typing as t
from pathlib import Path
from tox.config.cli.parser import DEFAULT_VERBOSITY
from tox.config.sets import EnvConfigSet
from tox.execute.api import Execute
from tox.execute.local_sub_process import LocalSubProcessExecuteInstance
from tox.execute.request import StdinSource
from tox.plugin import impl
from tox.tox_env.package import Package, PackageToxEnv
from tox.tox_env.python.api import Python, PythonInfo
from tox.tox_env.python.package import WheelPackage
from tox.tox_env.python.pip.pip_install import Pip
from tox.tox_env.python.runner import PythonRun
from virtualenv.discovery.builtin import get_interpreter
from tox_pdm.utils import clone_pdm_files, get_env_lib_path, is_same_path, is_sub_array
if t.TYPE_CHECKING:
from argparse import ArgumentParser
from tox.execute.api import ExecuteInstance, ExecuteOptions
from tox.execute.request import ExecuteRequest
from tox.execute.stream import SyncWrite
from tox.tox_env.api import ToxEnvCreateArgs
from tox.tox_env.register import ToxEnvRegister
@impl
def tox_add_option(parser: ArgumentParser) -> None:
os.environ["TOX_TESTENV_PASSENV"] = "PYTHONPATH"
parser.add_argument("--pdm", default="pdm", help="The executable path of PDM")
@impl
def tox_register_tox_env(register: ToxEnvRegister) -> t.Optional[bool]:
register.add_run_env(PdmRunner)
register.add_package_env(PdmPackageEnv)
register.default_run_env = "pdm"
class Pdm(Python):
def __init__(self, create_args: ToxEnvCreateArgs) -> None:
self._executor: t.Optional[Execute] = None
self._installer: t.Optional[Pip] = None
self._package_path: t.Optional[Path] = None
super().__init__(create_args)
@property
def executor(self) -> Execute:
if not self._executor:
self._executor = PDMRunExecutor(self.options.is_colored)
return self._executor
@property
def installer(self) -> Pip:
if self._installer is None:
self._installer = PipPep582(self)
return self._installer
@property
def package_path(self) -> Path:
if not self._package_path:
self._package_path = Path(
get_env_lib_path(self.options.pdm, self.env_dir)
).parent
return self._package_path
@property
def runs_on_platform(self) -> str:
return sys.platform
def create_python_env(self) -> None:
clone_pdm_files(self.env_dir, self.core["toxinidir"])
self.execute(
["pdm", "use", "-f", self.base_python.extra["executable"]], StdinSource.OFF
)
def _get_python(self, base_python: t.List[str]) -> t.Optional[PythonInfo]:
interpreter = next(
filter(None, (get_interpreter(p, []) for p in base_python)), None
)
if not interpreter:
return None
return PythonInfo(
implementation=interpreter.implementation,
version_info=interpreter.version_info,
version=interpreter.version,
is_64=(interpreter.architecture == 64),
platform=interpreter.platform,
extra={"executable": Path(interpreter.system_executable)},
)
def python_cache(self) -> t.Dict[str, t.Any]:
base = super().python_cache()
base.update({"executable": str(self.base_python.extra["executable"])})
return base
def env_bin_dir(self) -> Path:
return self.package_path / ("Scripts" if os.name == "nt" else "bin")
def env_python(self) -> Path:
return t.cast(Path, self.base_python.extra["executable"])
def env_site_package_dir(self) -> Path:
return self.package_path / "lib"
def prepend_env_var_path(self) -> t.List[Path]:
return [self.env_bin_dir]
class PdmRunner(Pdm, PythonRun):
def _setup_env(self) -> None:
super()._setup_env()
groups = self.conf["groups"]
cmd = ["pdm", "install", "--no-self"]
for group in groups:
cmd.extend(("--group", group))
self.execute(cmd, StdinSource.OFF)
bin_path = self.env_site_package_dir() / "bin"
if not bin_path.exists():
return
scripts_path = self.env_bin_dir()
for file in bin_path.iterdir():
shutil.move(os.fspath(file), os.fspath(scripts_path))
def register_config(self) -> None:
super().register_config()
self.conf.add_config(
"groups",
of_type=t.List[str],
default=[],
desc="Specify the dependency groups to install",
)
@property
def _package_tox_env_type(self) -> str:
return "pdm-pep-517"
@staticmethod
def id() -> str:
return "pdm"
@property
def _external_pkg_tox_env_type(self) -> str:
return "virtualenv-cmd-builder"
class PdmPackageEnv(Pdm, PackageToxEnv):
def register_config(self) -> None:
super().register_config()
self.conf.add_config(
keys=["pkg_dir"],
of_type=Path,
default=lambda conf, name: self.env_dir / "dist",
desc="directory where to put project packages",
)
@property
def pkg_dir(self) -> Path:
return t.cast(Path, self.conf["pkg_dir"])
def perform_packaging(self, for_env: EnvConfigSet) -> t.List[Package]:
of_type: str = for_env["package"]
cmd = [
"pdm",
"build",
"-p",
str(self.conf["package_root"]),
"-d",
str(self.pkg_dir),
]
if of_type == "wheel":
cmd.append("--no-sdist")
suffix = ".whl"
else:
cmd.append("--no-wheel")
suffix = ".tar.gz"
self.execute(cmd, StdinSource.OFF)
path = next(self.pkg_dir.glob(f"*{suffix}"))
package = WheelPackage(path, [])
return [package]
@staticmethod
def id() -> str:
return "pdm-pep-517"
def child_pkg_envs(self, run_conf: EnvConfigSet) -> t.Iterator[PackageToxEnv]:
return iter(())
class PipPep582(Pip):
def installed(self) -> t.List[str]:
cmd = ["pdm", "list", "--freeze"]
result = self._env.execute(
cmd=cmd,
stdin=StdinSource.OFF,
run_id="freeze",
show=self._env.options.verbosity > DEFAULT_VERBOSITY,
)
result.assert_success()
return [line.strip() for line in result.out.splitlines() if line.strip()]
class PDMRunExecutor(Execute):
def build_instance(
self,
request: ExecuteRequest,
options: ExecuteOptions,
out: SyncWrite,
err: SyncWrite,
) -> "ExecuteInstance":
return PDMRunExecuteInstance(request, options, out, err)
class PDMRunExecuteInstance(LocalSubProcessExecuteInstance):
@property
def cmd(self) -> t.Sequence[str]:
if self._cmd is None:
pdm_exe = self.options._env.options.pdm
cmd = self.request.cmd
if cmd[0] == "pdm":
cmd[0] = pdm_exe
if is_same_path(cmd[0], pdm_exe):
if "-p" not in cmd and "--project" not in cmd:
cmd[2:2] = ["-p", str(self.options._env.env_dir)]
elif is_sub_array(["pip", "install"], cmd):
cmd.extend(["-t", get_env_lib_path(pdm_exe, self.options._env.env_dir)])
else:
cmd = [pdm_exe, "run", "-p", str(self.options._env.env_dir)] + cmd
self._cmd = cmd
return self._cmd
|
<reponame>RamyaSonar/Natural-language-and-Recommendation-engine-for-Asset-Classification
import pandas as pd
import config
import pickle
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
import numpy as np
# Description: Reads the train and test files
def readTestFiles():
X_train = pd.read_csv(config.X_train_data1, index_col = False)
print(X_train.shape)
X_test = pd.read_csv(config.X_test_data1, index_col = False)
Y_train = pd.read_csv(config.Y_train_data1, index_col = False)
Y_test = pd.read_csv(config.Y_test_data1, index_col = False)
return X_train, X_test, Y_train, Y_test
# Description : Generates the performance metrics for the predictions generated
def scores(y_pred, Y_test):
print('score')
acc = accuracy_score(y_pred, Y_test)
pr = precision_score(y_pred, Y_test,average = 'macro')
re = recall_score(y_pred, Y_test, average = 'macro')
f1 = f1_score(y_pred, Y_test, average = 'macro')
score = [acc, pr, re, f1]
return score
# Description: This function generates a csv file with the performance metrics of the shallow learning models.
def performanceData1(X_test, Y_test):
# Naive Bayes
print('NB')
nb = pickle.load(open(config.nb_model_data1,'rb'))
y_pred = nb.predict(X_test)
nb_scores = scores(y_pred, Y_test)
# KNN
print('KNN')
knn = pickle.load(open(config.knn_model_data1, 'rb'))
y_pred = knn.predict(X_test)
knn_scores = scores(y_pred, Y_test)
# DT
print('DT')
dt = pickle.load(open(config.dt_model_data1,'rb'))
y_pred = dt.predict(X_test)
dt_scores = scores(y_pred, Y_test)
# RF
print('RF')
rf = pickle.load(open(config.rf_model_data1, 'rb'))
y_pred = rf.predict(X_test)
rf_scores = scores(y_pred, Y_test)
#creating dataframe
df = pd.DataFrame([nb_scores, knn_scores, dt_scores, rf_scores], columns = ["accuracy", "precision", "recall", "F1-score"])
print(df.head())
df.to_csv(config.performance, index = False)
return df
# Description: This function loads the Random Forest Model to generate the top 20 features that are used for predicting asset classes.
def importantFeatures(X_train):
rf = pickle.load(open(config.rf_model_data1, 'rb'))
names = list(X_train.columns)
importances = rf.feature_importances_
indices = np.argsort(importances)[::-1]
new_indices = indices[:20]
features = X_train.columns[indices]
indices = rf.feature_importances_[indices]
features = list(features[:20])
indices = list(indices[:20])
#print(features)
#print(indices)
return features, indices
# Description: This function reads the deep_learning_metrics csv file to generate results dynamically on the server
def deep_learning_metrics():
df = pd.read_csv(config.performance_for_all_DL_models)
return list(df['accuracy']), list(df['precision']), list(df['recall']), list(df['f1_score'])
# Description: This function invokes all other functions
def main():
X_train, X_test, Y_train, Y_test = readTestFiles()
df = performanceData1(X_test, Y_test)
df = pd.read_csv(config.performance)
#print(list(df['accuracy']))
return list(df['accuracy']), list(df['precision']), list(df['recall']), list(df['F1-score'])
#features, indices = importantFeatures(X_train)
main()
|
import cv2
import numpy as np
from plot_one import plot_me_one
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.integrate import simps
M_sun=1.989*10**30;
R_sun=696340*10**3;
M=0.62*M_sun
r_star=0.0151*R_sun
G=6.67408*10**(-11);
####
####
#### This code will create the sandbox and allow user to play around with densities. To begin one needs a density to start with.
#### You can generate one by running one of the other programs.
#### The controls are:
####
#### c - switches between drawing circles and drawing by hand. Circles are drawn between inner and outer radius
#### B - sets color/density to 0
#### b - decreases current color/density by 1
#### w - increases current color/density by 1
#### backspace - Plot the emission lines from current density
#### Esc - close
####
img=np.load("density_todraw.npy")
# variables
ix = -1
iy = -1
drawing = False
size=img.shape[0]
color=1
circle=True
consts={'e':0.0,
'b':0.0,
'view_angle':np.pi/2,
'inclination_angle':np.pi/5,
'r_max':550*r_star,
'r_min':r_star
}
def on_change(val):
consts['b']=4*(val-100)/100
print(4*(val-100)/100)
def draw_rectangle_with_drag(event, x, y, flags, param):
global ix, iy,ir, drawing, img
if event == cv2.EVENT_LBUTTONDOWN and circle:
if not drawing:
ix = x
iy = y
ir = np.sqrt((ix-size//2)**2+(iy-size//2)**2)
if drawing:
r = np.sqrt((x-size//2)**2+(y-size//2)**2)
print(r,ir)
cv2.circle(img, (size//2, size//2), ((r+ir)/2).astype(int), color=color, thickness=np.abs((r-ir)/2).astype(int))
print('drawn 1')
print(x,y)
drawing = not drawing
if event == cv2.EVENT_LBUTTONDOWN and not circle:
drawing = True
ix=x
iy=y
elif event == cv2.EVENT_MOUSEMOVE and not circle:
if drawing == True:
cv2.line(img,(ix,iy),(x,y),color,50)
ix=x
iy=y
elif event == cv2.EVENT_LBUTTONUP and not circle:
if(drawing):
cv2.line(img,(ix,iy),(x,y),color,50)
drawing = False
cv2.namedWindow(winname = "Density of gas")
cv2.createTrackbar('Emissivity(b)', "Density of gas", 100, 200, on_change)
cv2.setMouseCallback("Density of gas",
draw_rectangle_with_drag)
fig_hist = plt.figure(1)
ax_hist = fig_hist.add_subplot(1, 1, 1)
plt.ion()
plt.xlabel("Velocity/Wavelength")
plt.ylabel("Flux")
inst_names=['Xshooter','MIKE2']
for j,inst_name in enumerate(inst_names):
x,y=np.loadtxt('data/SiII'+'_'+inst_name+'.csv', delimiter=',', unpack=True)
area = simps((y-1),x)
y=(y-1)/area
ax_hist.plot(x,y, linewidth=1,label=inst_name)
while True:
# imgC = cv2.applyColorMap(img, cv2.COLORMAP_JET)
if img.max()!=0: cv2.imshow("Density of gas", img/img.max())
else: cv2.imshow("Density of gas", img)
k = cv2.waitKey(33)
if k == 27:
break
elif k== ord(' '):
print('Plotting')
plot_me_one(img,ax_hist,consts)
plt.show()
plt.pause(0.001)
elif k== ord('B'):
color=0
print('Density now: '+str(color))
elif k== ord('b'):
color-=1
print('Density now: '+str(color))
elif k== ord('w'):
color+=1
print('Density now: '+str(color))
elif k== ord('c'):
circle = not circle
drawing=False
if(circle):
print('Now in circle mode')
else:
print('Now in drawing mode')
cv2.destroyAllWindows()
|
<reponame>robertolaru/chip8py
import random
import constants
import pygame
import sys
class CPU:
def __init__(self) -> None:
self.memory = bytearray(constants.MEMORY_SIZE)
self.display = bytearray(constants.DISPLAY_SIZE)
self.key = None
# Create registers
self.v = [0x0 for i in range(16)]
self.i = 0
self.pc = 0
self.sp = 0
self.stack = []
self.delay_timer = 0
self.sound_timer = 0
# Initialize PC at entry point
self.pc = 0x200
# Draw flag
self.draw_flag = False
# Keys
self.KEYS = {
pygame.K_1: 0x1,
pygame.K_2: 0x2,
pygame.K_3: 0x3,
pygame.K_4: 0xc,
pygame.K_q: 0x4,
pygame.K_w: 0x5,
pygame.K_e: 0x6,
pygame.K_r: 0xd,
pygame.K_a: 0x7,
pygame.K_s: 0x8,
pygame.K_d: 0x9,
pygame.K_f: 0xe,
pygame.K_z: 0xa,
pygame.K_x: 0x0,
pygame.K_c: 0xb,
pygame.K_v: 0xf
}
# Opcodes
self.CODES = (
# (pattern, self.function)
("1000", self.jmp),
("2000", self.call),
("3000", self.skip_equ_val),
("4000", self.skip_not_val),
("5000", self.skip_equ_reg),
("6000", self.set_val),
("7000", self.add_val),
("8001", self.set_or),
("8002", self.set_and),
("8003", self.set_xor),
("8004", self.add_reg),
("8005", self.sub_reg),
("8006", self.rshift),
("8007", self.diff),
("800e", self.lshift),
("8000", self.set_reg),
("9000", self.skip_not_reg),
("a000", self.set_i_addr),
("b000", self.jmp_v0),
("c000", self.rand),
("d000", self.draw),
("e09e", self.skip_key_pressed),
("e0a1", self.skip_key_not_pressed),
("f015", self.set_delay),
("f018", self.set_sound_t),
("f01e", self.add_i_reg),
("f029", self.set_i_sprite),
("f033", self.bcd),
("f055", self.save),
("f065", self.load),
("f007", self.get_delay),
("f00a", self.get_key)
)
self.FONT = (
# (char_sprite_data, offset)
(0xf0909090f0, 0), # 0
(0x2060202070, 5), # 1
(0xf010f080f0, 10), # 2
(0xf010f010f0, 15), # 3
(0x9090f01010, 20), # 4
(0xf080f010f0, 25), # 5
(0xf080f090f0, 30), # 6
(0xf010204040, 35), # 7
(0xf090f090f0, 40), # 8
(0xf090f010f0, 45), # 9
(0xf090f09090, 50), # A
(0xe090e090e0, 55), # B
(0xf0808080f0, 60), # C
(0xe0909090e0, 65), # D
(0xf080f080f0, 70), # E
(0xf080f08080, 75) # F
)
# Load font in memory
cursor = constants.FONT_ADDRESS # Starting position for font data
for code in self.FONT:
self.memory[cursor: cursor + 5] = code[0].to_bytes(5, "big")
cursor += 5
# Convert bytearray to int
def get_int(self, _int):
return int.from_bytes(_int, "big")
# Skip next instruction
def skip(self):
self.pc += 2
# Don't skip next instruction.
# Decreases PC to cancel cycle skip
def prevent_skip(self):
self.pc -= 2
def load_bin(self, data):
self.memory[0x200: 0x200 + len(data)] = data
def load_opcode(self):
this_opcode = self.opcode.hex().lower()
# opcodes with exact match
if this_opcode == "00e0":
self.clear()
elif this_opcode == "00ee":
self.ret()
# opcodes where partial match with pattern is expected
else:
for _opcode, function in self.CODES:
all_match = True
for i, ch in enumerate(_opcode.lower()):
if ch == "0":
pass
elif not all_match:
break
elif ch != this_opcode[i]:
all_match = False
if all_match:
function()
break
def get_key_and_events(self, halt=False):
key_acquired = False
while (not key_acquired):
events = pygame.event.get()
if events != []:
for event in events:
if event.type == pygame.KEYDOWN:
key_acquired = True
key = self.KEYS.get(event.key)
self.key = key
elif event.type == pygame.QUIT:
print("Quitting...")
pygame.quit()
sys.exit()
if not key_acquired:
self.key = None
if not halt:
break
# Methods to fetch values from opcodes, depending on opcode pattern
def get_addr(self):
# _NNN
return self.get_int(self.opcode) & 0x0fff
def get_reg(self):
# _X___
return (self.get_int(self.opcode) & 0x0f00) >> 8
def get_reg_val(self):
# _XNN
return self.get_reg(), self.get_int(self.opcode) & 0x00ff
def get_regs(self):
# _XY_
return self.get_reg(), (self.get_int(self.opcode) & 0x00f0) >> 4
def get_regs_val(self):
# _XYN
return *self.get_regs(), self.get_int(self.opcode) & 0x000f
# Opcode methods
def clear(self):
self.display = bytearray(constants.DISPLAY_SIZE)
self.draw_flag = True
def ret(self):
_pop = self.stack.pop()
self.pc = _pop + 2
self.prevent_skip()
def jmp(self, v0=0):
self.pc = self.get_addr() + v0
self.prevent_skip()
def call(self):
self.stack.append(self.pc & 0xfff)
self.pc = self.get_addr() # called address
self.prevent_skip()
def skip_equ_val(self):
vx, nn = self.get_reg_val()
if self.v[vx] == nn:
self.skip()
def skip_not_val(self):
vx, nn = self.get_reg_val()
if self.v[vx] != nn:
self.skip()
def skip_equ_reg(self):
vx, vy = self.get_regs()
if self.v[vx] == self.v[vy]:
self.skip()
def set_val(self):
vx, nn = self.get_reg_val()
self.v[vx] = nn
def add_val(self):
vx, nn = self.get_reg_val()
res = (self.v[vx] + nn) & 0xff
self.v[vx] = res
def set_reg(self):
vx, vy = self.get_regs()
self.v[vx] = self.v[vy]
def set_or(self):
vx, vy = self.get_regs()
self.v[vx] |= self.v[vy]
def set_and(self):
vx, vy = self.get_regs()
self.v[vx] &= self.v[vy]
def set_xor(self):
vx, vy = self.get_regs()
self.v[vx] ^= self.v[vy]
def add_reg(self):
vx, vy = self.get_regs()
res = (self.v[vx] + self.v[vy]) & 0xff
self.v[vx] = res
if res > 0xff:
self.v[0xf] = 1 # set to 1 if carry
else:
self.v[0xf] = 0
def sub_reg(self):
vx, vy = self.get_regs()
res = (self.v[vx] - self.v[vy]) & 0xff
self.v[vx] = res
if res < 0:
self.v[0xf] = 0 # set to 0 if borrow
else:
self.v[0xf] = 1
def rshift(self):
vx, vy = self.get_regs()
self.v[0xf] = self.v[vy] & 0x1 # least significant bit
self.v[vx] = self.v[vy] >> 1
def diff(self):
vx, vy = self.get_regs()
res = (self.v[vy] - self.v[vx]) & 0xff
self.v[vx] = res
if res < 0:
self.v[0xf] = 0 # borrow flag
else:
self.v[0xf] = 1
def lshift(self):
vx, vy = self.get_regs()
self.v[0xf] = self.v[vy] & 0x80 # most significant bit
self.v[vx] = self.v[vy] << 1
def skip_not_reg(self):
vx, vy = self.get_regs()
if self.v[vx] != self.v[vy]:
self.skip()
def set_i_addr(self):
self.i = self.get_addr() & 0xfff
def jmp_v0(self):
self.jmp(v0=self.v[0])
def rand(self):
vx, mask = self.get_reg_val()
rnd = random.randint(0, 255)
self.v[vx] = rnd & mask
def draw(self):
vx, vy, height = self.get_regs_val()
x, y = self.v[vx], self.v[vy]
sprite_data = [format(self.memory[self.i + h], "b").zfill(8)
for h in range(height)]
collision = False
for h in range(height):
start_pixel = x + (y + h) * 64
for i, pixel in enumerate(sprite_data[h]):
pos = (start_pixel + i) & 0x7ff
prev = self.display[pos]
if int(pixel) == 1:
if prev == 1:
collision = True
self.display[pos] = 0
else:
self.display[pos] = 1
if collision:
self.v[0xf] = 1
else:
self.v[0xf] = 0
self.draw_flag = True
def skip_key_pressed(self):
self.get_key_and_events(halt=False)
vx = self.get_reg()
if self.key == self.v[vx]:
self.skip()
def skip_key_not_pressed(self):
self.get_key_and_events(halt=False)
vx = self.get_reg()
if self.key != self.v[vx]:
self.skip()
def get_delay(self):
vx = self.get_reg()
self.v[vx] = self.delay_timer
def get_key(self):
self.get_key_and_events(halt=True)
vx = self.get_reg()
self.v[vx] = self.key
def set_delay(self):
vx = self.get_reg()
self.delay_timer = self.v[vx]
def set_sound_t(self):
vx = self.get_reg()
self.sound_timer = self.v[vx]
def add_i_reg(self):
vx = self.get_reg()
self.i += self.v[vx]
self.i &= 0xfff
def set_i_sprite(self):
vx = self.get_reg()
char = self.v[vx]
# Set i to font address + char offset
self.i = (constants.FONT_ADDRESS + self.FONT[char][1]) & 0xfff
def bcd(self):
vx = self.get_reg()
x = self.v[vx]
# Obtain digits
digits = str(x).zfill(3)
# Store each digit back as int in I + i
for i in range(3):
self.memory[self.i + i] = int(digits[i])
def save(self):
vx = self.get_reg()
for i in range(vx + 1): # vx included
self.memory[self.i + i] = self.v[i]
def load(self):
vx = self.get_reg()
for i in range(vx + 1): # vx included
self.v[i] = self.memory[self.i + i]
def cycle(self):
# Turn off draw flag. It will be enabled by the appropriate functions
# if needed
self.draw_flag = False
# Fetch
self.opcode = self.memory[self.pc: self.pc + 2]
# Decode & execute
self.load_opcode()
# Increment PC for the next instruction
self.pc += 2
|
<gh_stars>1-10
from django.urls import path
from drf_spectacular.views import SpectacularJSONAPIView, SpectacularSwaggerView
from api import views
urlpatterns = [
path('user/profile/v2/', views.UserProfileV2View.as_view(), name="api-user-profile-v2"),
path('user/app-review/', views.UserAppReview.as_view(), name="api-user-app-review"),
path('user/countries/', views.CountriesView.as_view(), name="api-user-countries"),
path('user/', views.UserView.as_view(), name="api-user"),
path('general-recommendations/v2/', views.GeneralRecommendationsView.as_view(), name="api-general-recommendations"),
path('general-recommendations/read/', views.CreateGeneralRecommendationReadView.as_view(),
name="api-general-recommendations-read"),
path('nutrition/products/search/', views.ProductSearchView.as_view(), name="api-products-search"),
path('nutrition/products/missing/', views.MissingProductCreateView.as_view(), name="api-products-missing-create"),
path('nutrition/daily-reports/light/', views.DailyIntakesReportsLightView.as_view(),
name="api-daily-reports"),
path('nutrition/daily-reports/<str:date>/', views.DailyIntakesReportView.as_view(),
name="api-daily-report"),
path('nutrition/intake/', views.IntakeCreateView.as_view(), name="api-intake"),
path('nutrition/intake/<int:id>/', views.IntakeView.as_view(), name="api-intake"),
path('nutrition/screen/v2/', views.NutritionScreenV2View.as_view(), name="api-nutrition-screen-v2"),
path('nutrition/weekly/', views.NutritionWeeklyScreenView.as_view(), name="api-nutrition-weekly-screen"),
path('health-status/blood-pressure/', views.BloodPressureCreateView.as_view(), name="api-blood-pressure-create"),
path('health-status/blood-pressure/<int:id>/', views.BloodPressureUpdateView.as_view(),
name="api-blood-pressure-update"),
path('health-status/pulse/', views.PulseCreateView.as_view(), name="api-pulse-create"),
path('health-status/pulse/<int:id>/', views.PulseUpdateView.as_view(), name="api-pulse-update"),
path('health-status/screen/', views.HealthStatusScreenView.as_view(), name="api-health-status-screen"),
path('health-status/weekly/', views.HealthStatusWeeklyScreenView.as_view(), name="api-health-status-weekly"),
path('health-status/<str:date>/', views.DailyHealthStatusByDateView.as_view(), name="api-health-status-by-date"),
path('health-status/', views.DailyHealthStatusView.as_view(), name="api-health-status"),
path('peritoneal-dialysis/manual/dialysis/create/', views.CreateManualPeritonealDialysisView.as_view(),
name="api-peritoneal-dialysis-manual-create"),
path('peritoneal-dialysis/manual/dialysis/<int:id>/', views.UpdateManualPeritonealDialysisView.as_view(),
name="api-peritoneal-dialysis-manual-dialysis"),
path('peritoneal-dialysis/manual/screen/v2/', views.ManualPeritonealDialysisScreenView.as_view(),
name="api-peritoneal-dialysis-manual-screen"),
path('peritoneal-dialysis/automatic/dialysis/create/', views.CreateAutomaticPeritonealDialysisView.as_view(),
name="api-peritoneal-dialysis-automatic-create"),
path('peritoneal-dialysis/automatic/dialysis/<str:date>/', views.UpdateAutomaticPeritonealDialysisView.as_view(),
name="api-peritoneal-dialysis-automatic-dialysis"),
path('peritoneal-dialysis/automatic/screen/', views.AutomaticPeritonealDialysisScreenView.as_view(),
name="api-peritoneal-dialysis-automatic-screen"),
path('peritoneal-dialysis/automatic/period/', views.AutomaticPeritonealDialysisPeriodView.as_view(),
name="api-peritoneal-dialysis-automatic-period"),
path('schema.json/', SpectacularJSONAPIView.as_view(), name='schema'),
# Optional UI:
path('', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),
]
|
# -*- coding: utf-8 -*-
from .BaseTest import BaseTest
class PanelTest(BaseTest):
def test_classes(self):
"""
Tests a panel with a user specified custom class
"""
self.do_component_fixture_test('panel', 'panel-classes')
def test_panel_footer(self):
"""
Test the panel footer and you can use custom directives within it (e.g: buttons)
"""
self.do_component_fixture_test('panel', 'panel-footer')
def test_panel_footer_simple(self):
"""
Tests a simple panel footer, simple text can be provided via argument
and test custom class via :class: option
"""
self.do_component_fixture_test('panel', 'panel-footer-simple')
def test_panel_heading(self):
"""
Test panel heading not inside a .panel-title
"""
self.do_component_fixture_test('panel', 'panel-heading')
def test_panel_heading_inline_role(self):
"""
Test panel heading not inside a .panel-title with our inline parser (font awesome icons)
"""
self.do_component_fixture_test('panel', 'panel-heading-inline-role')
def test_panel_list(self):
"""
Tests lists inside a panel are rendered outside the `.panel-body`
Tests a single list only (no other nodes in a panel)
"""
self.do_component_fixture_test_with_real_sphinx('panel', 'panel-list')
def test_panel_list_class(self):
"""
Test we can apply a class to list items in a panel
"""
self.do_component_fixture_test_with_real_sphinx('panel', 'panel-list-class')
def test_panel_list_node_first(self):
"""
Tests lists inside a panel are rendered outside the `.panel-body`
Tests multiple nodes, the list should be outside the panel FIRST, the paragraphs inside.
"""
self.do_component_fixture_test_with_real_sphinx('panel', 'panel-list-node-first')
def test_panel_list_node_last(self):
"""
Tests lists inside a panel are rendered outside the `.panel-body`
Tests multiple nodes, the list should be outside the panel LAST, the paragraphs inside.
"""
self.do_component_fixture_test_with_real_sphinx('panel', 'panel-list-node-last')
def test_panel_simple(self):
"""
Tests a simple panel, By default, we default to the default class if nothing provided.
"""
self.do_component_fixture_test('panel', 'panel-simple')
def test_panel_table(self):
"""
Tests tables inside a panel are rendered outside the `.panel-body`
Tests a single table only (no other nodes in a panel)
"""
self.do_component_fixture_test_with_real_sphinx('panel', 'panel-table')
def test_panel_table_node_first(self):
"""
Tests tables inside a panel are rendered outside the `.panel-body`
Tests multiple nodes, the table should be outside the panel FIRST, the paragraphs inside.
"""
self.do_component_fixture_test_with_real_sphinx('panel', 'panel-table-node-first')
def test_panel_table_node_last(self):
"""
Tests tables inside a panel are rendered outside the `.panel-body`
Tests multiple nodes, the table should be outside the panel LAST, the paragraphs inside.
"""
self.do_component_fixture_test_with_real_sphinx('panel', 'panel-table-node-last')
def test_panel_title(self):
"""
Tests the panel heading with an exclamation mark ! (to indicate it should be placed inside a .panel-title)
"""
self.do_component_fixture_test('panel', 'panel-title')
def test_panel_title_inline_role(self):
"""
Tests we can use inline roles in a panel title
"""
self.do_component_fixture_test('panel', 'panel-title-inline-role')
def test_panel_contextual(self):
"""
Tests panel contextual
"""
self.do_component_fixture_test('panel', 'panel-contextual')
|
"""
Module: 'flowlib.m5cloud' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1
class Btn:
''
def attach():
pass
def deinit():
pass
def detach():
pass
def multiBtnCb():
pass
def restart():
pass
def timerCb():
pass
class BtnChild:
''
def deinit():
pass
def isPressed():
pass
def isReleased():
pass
def pressFor():
pass
def restart():
pass
def upDate():
pass
def wasDoublePress():
pass
def wasPressed():
pass
def wasReleased():
pass
class IP5306:
''
def getBatteryLevel():
pass
def init():
pass
def isChargeFull():
pass
def isCharging():
pass
def setCharge():
pass
def setChargeVolt():
pass
def setVinMaxCurrent():
pass
class M5Cloud:
''
def _backend():
pass
def _daemonTask():
pass
def _error():
pass
def _exec_respond():
pass
def _msg_deal():
pass
def _send_data():
pass
def on_connect():
pass
def on_data():
pass
def run():
pass
class MQTTClient:
''
def _clean_sock_buffer():
pass
def _recv_len():
pass
def _send_str():
pass
def check_msg():
pass
def connect():
pass
def disconnect():
pass
def lock_msg_rec():
pass
def ping():
pass
def publish():
pass
def set_block():
pass
def set_callback():
pass
def set_last_will():
pass
def socket_connect():
pass
def subscribe():
pass
def topic_get():
pass
def topic_msg_get():
pass
def unlock_msg_rec():
pass
def wait_msg():
pass
class Rgb_multi:
''
def deinit():
pass
def setBrightness():
pass
def setColor():
pass
def setColorAll():
pass
def setColorFrom():
pass
def setShowLock():
pass
def show():
pass
STA_BUSY = 1
STA_DOWNLOAD = 3
STA_IDLE = 0
STA_UPLOAD = 2
class Speaker:
''
def _timeout_cb():
pass
def checkInit():
pass
def setBeat():
pass
def setVolume():
pass
def sing():
pass
def tone():
pass
_thread = None
apikey = '<KEY>'
binascii = None
btn = None
btnA = None
btnB = None
btnC = None
def btnText():
pass
def cfgRead():
pass
def cfgWrite():
pass
config_normal = '{\n "start": "flow",\n "mode": "internet",\n "server": "Flow.m5stack.com", \n "wifi": {\n "ssid": "",\n "password": ""\n }\n}\n'
def const():
pass
def core_start():
pass
display = None
def flowDeinit():
pass
class flowExit:
''
gc = None
def getP2PData():
pass
def get_sd_state():
pass
def hwDeinit():
pass
io = None
json = None
lcd = None
def loopExit():
pass
def loopSetIdle():
pass
def loopState():
pass
m5base = None
machine = None
def modeSet():
pass
module = None
network = None
node_id = '840d8e2598b4'
os = None
power = None
def reconnect():
pass
def remoteInit():
pass
def resetDefault():
pass
rgb = None
def sd_mount():
pass
def sd_umount():
pass
def sendP2PData():
pass
def setP2PData():
pass
speaker = None
def start():
pass
def startBeep():
pass
sys = None
timEx = None
time = None
timeSchedule = None
time_ex = None
timerSch = None
unit = None
def wait():
pass
def wait_ms():
pass
wlan_sta = None
|
<reponame>gerlichlab/HiCognition
"""Module with tests realted adding and managing sessions."""
import unittest
from hicognition.test_helpers import LoginTestCase, TempDirTestCase
# add path to import app
# import sys
# sys.path.append("./")
from app import db
from app.models import Dataset, Session, Collection
class TestAddSessionObject(LoginTestCase, TempDirTestCase):
"""Tests whether post routes for sessions works."""
def setUp(self):
super().setUp()
# add datasets
self.empty_owned_dataset_1 = Dataset(id=1, user_id=1)
self.empty_owned_dataset_2 = Dataset(id=2, user_id=1)
self.owned_datasets = [self.empty_owned_dataset_1, self.empty_owned_dataset_2]
self.empty_unowned_dataset = Dataset(id=1, user_id=2)
self.collection_1 = Collection(id=1, user_id=1)
self.collection_2 = Collection(id=2, user_id=2)
def test_access_denied_without_token(self):
"""Test whether post request results in 401 error
if no token is provided."""
# dispatch post request
response = self.client.post(
"/api/sessions/", content_type="multipart/form-data"
)
self.assertEqual(response.status_code, 401)
def test_invalid_form_no_form_data(self):
"""Test whether post request without form is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
# dispatch post request
response = self.client.post(
"/api/sessions/", content_type="multipart/form-data", headers=token_headers
)
self.assertEqual(response.status_code, 400)
def test_invalid_form_no_name(self):
"""Test whether post request without name is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
data = {
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[1, 2, 3, 4]",
"used_collections": "[1,2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 400)
def test_invalid_form_no_session_object(self):
"""Test whether post request without session_object is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
data = {
"name": "test-session",
"session_type": "compare",
"used_datasets": "[1, 2, 3, 4]",
"used_collections": "[1,2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 400)
def test_invalid_form_no_session_type(self):
"""Test whether post request without session_type is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
data = {
"name": "test-session",
"session_object": "test-object",
"used_datasets": "[1, 2, 3, 4]",
"used_collections": "[1,2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 400)
def test_invalid_form_no_used_datasets(self):
"""Test whether post request without used_datasets is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_collections": "[1,2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 400)
def test_invalid_form_no_used_collections(self):
"""Test whether post request without used_datasets is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[1,2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 400)
def test_invalied_from_non_existing_datasets(self):
"""Test whether post request with non-existing datasets is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[1, 2, 3]",
"used_collections": "[]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 400)
def test_invalid_from_non_existing_collections(self):
"""Test whether post request with non-existing datasets is rejected."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[]",
"used_collections": "[1,2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 400)
def test_session_w_unowned_dataset_rejected(self):
"""Test whether post request to add a session with a dataset
that is not owned is rejected"""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
# add dataset
db.session.add(self.empty_unowned_dataset)
db.session.commit()
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[1]",
"used_collections": "[1,2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 403)
def test_session_w_unowned_collections_rejected(self):
"""Test whether post request to add a session with a dataset
that is not owned is rejected"""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
# add dataset
db.session.add(self.collection_2)
db.session.commit()
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[]",
"used_collections": "[2]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 403)
def test_session_w_existing_dataset_added_correctly(self):
"""Test whether post request to add a session with a single dataset is
processed correctly."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
# add dataset
db.session.add(self.empty_owned_dataset_1)
db.session.commit()
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[1]",
"used_collections": "[]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"session_id": "1"})
session = Session.query.get(1)
self.assertTrue(session is not None)
self.assertEqual(session.datasets, [self.empty_owned_dataset_1])
self.assertEqual(len(session.collections), 0)
def test_session_w_existing_datasets_and_collections_added_correctly(self):
"""Test whether post request to add a session with multiple datasets is
processed correctly."""
# authenticate
token = self.add_and_authenticate("test", "asdf")
token_headers = self.get_token_header(token)
# add dataset
db.session.add_all(self.owned_datasets)
db.session.add(self.collection_1)
db.session.commit()
data = {
"name": "test-session",
"session_object": "test-object",
"session_type": "compare",
"used_datasets": "[1, 2]",
"used_collections": "[1]",
}
# dispatch post request
response = self.client.post(
"/api/sessions/",
content_type="multipart/form-data",
headers=token_headers,
data=data,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"session_id": "1"})
session = Session.query.get(1)
self.assertTrue(session is not None)
self.assertEqual(session.datasets, self.owned_datasets)
self.assertEqual(session.collections, [self.collection_1])
if __name__ == "__main__":
res = unittest.main(verbosity=3, exit=False)
|
"""Bernoulli-Bernoulli Restricted Boltzmann Machines with Energy-based Dropout.
"""
import time
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import learnergy.utils.exception as ex
import learnergy.utils.logging as l
from learnergy.models.bernoulli import RBM
logger = l.get_logger(__name__)
class EDropoutRBM(RBM):
"""An EDropoutRBM class provides the basic implementation for
Bernoulli-Bernoulli Restricted Boltzmann Machines along with a Energy-based Dropout regularization.
References:
<NAME>, <NAME>, <NAME>, <NAME>.
Energy-based Dropout in Restricted Boltzmann Machines: Why Do Not Go Random.
IEEE Transactions on Emerging Topics in Computational Intelligence (2020).
"""
def __init__(self, n_visible=128, n_hidden=128, steps=1, learning_rate=0.1,
momentum=0, decay=0, temperature=1, use_gpu=False):
"""Initialization method.
Args:
n_visible (int): Amount of visible units.
n_hidden (int): Amount of hidden units.
steps (int): Number of Gibbs' sampling steps.
learning_rate (float): Learning rate.
momentum (float): Momentum parameter.
decay (float): Weight decay used for penalization.
temperature (float): Temperature factor.
use_gpu (boolean): Whether GPU should be used or not.
"""
logger.info('Overriding class: RBM -> EDropoutRBM.')
super(EDropoutRBM, self).__init__(n_visible, n_hidden, steps, learning_rate,
momentum, decay, temperature, use_gpu)
# Initializes the Energy-based Dropout mask
self.M = torch.Tensor()
logger.info('Class overrided.')
@property
def M(self):
"""torch.Tensor: Energy-based Dropout mask.
"""
return self._M
@M.setter
def M(self, M):
self._M = M
def hidden_sampling(self, v, scale=False):
"""Performs the hidden layer sampling, i.e., P(h|v).
Args:
v (torch.Tensor): A tensor incoming from the visible layer.
scale (bool): A boolean to decide whether temperature should be used or not.
Returns:
The probabilities and states of the hidden layer sampling.
"""
# Calculating neurons' activations
activations = F.linear(v, self.W.t(), self.b)
# If scaling is true
if scale:
# Calculate probabilities with temperature
probs = torch.mul(torch.sigmoid(
torch.div(activations, self.T)), self.M)
# If scaling is false
else:
# Calculate probabilities as usual
probs = torch.mul(torch.sigmoid(activations), self.M)
# Sampling current states
states = torch.bernoulli(probs)
return probs, states
def total_energy(self, h, v):
"""Calculates the total energy of the model.
Args:
h (torch.Tensor): Hidden sampling states.
v (torch.Tensor): Visible sampling states.
Returns:
The total energy of the model.
"""
# Calculates the energy of the hidden layer
e_h = -torch.mv(h, self.b)
# Calculates the energy of the visible layer
e_v = -torch.mv(v, self.a)
# Calculates the energy of the reconstruction
e_rec = -torch.mean(torch.mm(v, torch.mm(self.W, h.t())), dim=1)
# Calculates the total energy
energy = torch.mean(e_h + e_v + e_rec)
return energy
def energy_dropout(self, e, p_prob, n_prob):
"""Performs the Energy-based Dropout over the model.
Args:
e (torch.Tensor): Model's total energy.
p_prob (torch.Tensor): Positive phase hidden probabilities.
n_prob (torch.Tensor): Negative phase hidden probabilities.
"""
# Calculates the Importance Level
I = torch.div(torch.div(n_prob, p_prob), torch.abs(e))
# Normalizes the Importance Level
I = torch.div(I, torch.max(I, 0)[0])
# Samples a probability tensor
p = torch.rand((I.size(0), I.size(1)), device=self.device)
# Calculates the Energy-based Dropout mask
self.M = (I < p).float()
def fit(self, dataset, batch_size=128, epochs=10):
"""Fits a new RBM model.
Args:
dataset (torch.utils.data.Dataset): A Dataset object containing the training data.
batch_size (int): Amount of samples per batch.
epochs (int): Number of training epochs.
Returns:
MSE (mean squared error), log pseudo-likelihood and time from the training step.
"""
# Transforming the dataset into training batches
batches = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0)
# For every epoch
for epoch in range(epochs):
logger.info('Epoch %d/%d', epoch+1, epochs)
# Calculating the time of the epoch's starting
start = time.time()
# Resetting epoch's MSE and pseudo-likelihood to zero
mse, pl = 0, 0
# For every batch
for samples, _ in tqdm(batches):
# Gathering the size of the batch
batch_size = samples.size(0)
# Returns the Energy-based Dropout mask to one
self.M = torch.ones(
(batch_size, self.n_hidden), device=self.device)
# Flattening the samples' batch
samples = samples.reshape(len(samples), self.n_visible)
# Checking whether GPU is avaliable and if it should be used
if self.device == 'cuda':
# Applies the GPU usage to the data
samples = samples.cuda()
# Performs the initial Gibbs sampling procedure (pre-dropout)
pos_hidden_probs, pos_hidden_states, neg_hidden_probs, neg_hidden_states, visible_states = self.gibbs_sampling(
samples)
# Calculating energy of positive phase sampling
e = self.total_energy(pos_hidden_states, samples)
# Calculating energy of negative phase sampling
e1 = self.total_energy(neg_hidden_states, visible_states)
# Performing the energy-based dropout
self.energy_dropout(e1 - e, pos_hidden_probs, neg_hidden_probs)
# Performs the post Gibbs sampling procedure (post-dropout)
_, _, _, _, visible_states = self.gibbs_sampling(samples)
# Detaching the visible states from GPU for further computation
visible_states = visible_states.detach()
# Calculates the loss for further gradients' computation
cost = torch.mean(self.energy(samples)) - \
torch.mean(self.energy(visible_states))
# Initializing the gradient
self.optimizer.zero_grad()
# Computing the gradients
cost.backward()
# Updating the parameters
self.optimizer.step()
# Calculating current's batch MSE
batch_mse = torch.div(
torch.sum(torch.pow(samples - visible_states, 2)), batch_size)
# Calculating the current's batch logarithm pseudo-likelihood
batch_pl = self.pseudo_likelihood(samples)
# Summing up to epochs' MSE and pseudo-likelihood
mse += batch_mse
pl += batch_pl
# Normalizing the MSE and pseudo-likelihood with the number of batches
mse /= len(batches)
pl /= len(batches)
# Calculating the time of the epoch's ending
end = time.time()
# Dumps the desired variables to the model's history
self.dump(mse=mse.item(), pl=pl.item(), time=end-start)
logger.info('MSE: %f | log-PL: %f', mse, pl)
return mse, pl
def reconstruct(self, dataset):
"""Reconstructs batches of new samples.
Args:
dataset (torch.utils.data.Dataset): A Dataset object containing the training data.
Returns:
Reconstruction error and visible probabilities, i.e., P(v|h).
"""
logger.info('Reconstructing new samples ...')
# Resetting MSE to zero
mse = 0
# Defining the batch size as the amount of samples in the dataset
batch_size = len(dataset)
# Transforming the dataset into training batches
batches = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=0)
# For every batch
for samples, _ in tqdm(batches):
# Returns the Energy-based Dropout mask to one
self.M = torch.ones(
(batch_size, self.n_hidden), device=self.device)
# Flattening the samples' batch
samples = samples.reshape(len(samples), self.n_visible)
# Checking whether GPU is avaliable and if it should be used
if self.device == 'cuda':
# Applies the GPU usage to the data
samples = samples.cuda()
# Calculating positive phase hidden probabilities and states
_, pos_hidden_states = self.hidden_sampling(samples)
# Calculating visible probabilities and states
visible_probs, visible_states = self.visible_sampling(
pos_hidden_states)
# Calculating current's batch reconstruction MSE
batch_mse = torch.div(
torch.sum(torch.pow(samples - visible_states, 2)), batch_size)
# Summing up the reconstruction's MSE
mse += batch_mse
# Normalizing the MSE with the number of batches
mse /= len(batches)
logger.info('MSE: %f', mse)
return mse, visible_probs
|
<reponame>remo5000/magma<gh_stars>0
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from functools import partial
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import dataclass_json
from marshmallow import fields as marshmallow_fields
from .datetime_utils import fromisoformat
DATETIME_FIELD = field(
metadata={
"dataclasses_json": {
"encoder": datetime.isoformat,
"decoder": fromisoformat,
"mm_field": marshmallow_fields.DateTime(format="iso"),
}
}
)
def enum_field(enum_type):
def encode_enum(value):
return value.value
def decode_enum(t, value):
return t(value)
return field(
metadata={
"dataclasses_json": {
"encoder": encode_enum,
"decoder": partial(decode_enum, enum_type),
}
}
)
class PropertyKind(Enum):
string = "string"
int = "int"
bool = "bool"
float = "float"
date = "date"
enum = "enum"
range = "range"
email = "email"
gps_location = "gps_location"
equipment = "equipment"
location = "location"
service = "service"
datetime_local = "datetime_local"
@dataclass_json
@dataclass
class EditEquipmentTypeInput:
@dataclass_json
@dataclass
class EquipmentPositionInput:
name: str
id: Optional[str] = None
index: Optional[int] = None
visibleLabel: Optional[str] = None
@dataclass_json
@dataclass
class EquipmentPortInput:
name: str
id: Optional[str] = None
index: Optional[int] = None
visibleLabel: Optional[str] = None
portTypeID: Optional[str] = None
bandwidth: Optional[str] = None
@dataclass_json
@dataclass
class PropertyTypeInput:
name: str
type: PropertyKind = enum_field(PropertyKind)
id: Optional[str] = None
index: Optional[int] = None
category: Optional[str] = None
stringValue: Optional[str] = None
intValue: Optional[int] = None
booleanValue: Optional[bool] = None
floatValue: Optional[float] = None
latitudeValue: Optional[float] = None
longitudeValue: Optional[float] = None
rangeFromValue: Optional[float] = None
rangeToValue: Optional[float] = None
isEditable: Optional[bool] = None
isInstanceProperty: Optional[bool] = None
isMandatory: Optional[bool] = None
isDeleted: Optional[bool] = None
id: str
name: str
positions: List[EquipmentPositionInput]
ports: List[EquipmentPortInput]
properties: List[PropertyTypeInput]
category: Optional[str] = None
@dataclass_json
@dataclass
class EditEquipmentTypeMutation:
__QUERY__ = """
mutation EditEquipmentTypeMutation($input: EditEquipmentTypeInput!) {
editEquipmentType(input: $input) {
id
name
category
propertyTypes {
id
name
type
index
stringValue
intValue
booleanValue
floatValue
latitudeValue
longitudeValue
isEditable
isInstanceProperty
}
positionDefinitions {
id
name
index
visibleLabel
}
portDefinitions {
id
name
index
visibleLabel
}
}
}
"""
@dataclass_json
@dataclass
class EditEquipmentTypeMutationData:
@dataclass_json
@dataclass
class EquipmentType:
@dataclass_json
@dataclass
class PropertyType:
id: str
name: str
type: PropertyKind = enum_field(PropertyKind)
index: Optional[int] = None
stringValue: Optional[str] = None
intValue: Optional[int] = None
booleanValue: Optional[bool] = None
floatValue: Optional[float] = None
latitudeValue: Optional[float] = None
longitudeValue: Optional[float] = None
isEditable: Optional[bool] = None
isInstanceProperty: Optional[bool] = None
@dataclass_json
@dataclass
class EquipmentPositionDefinition:
id: str
name: str
index: Optional[int] = None
visibleLabel: Optional[str] = None
@dataclass_json
@dataclass
class EquipmentPortDefinition:
id: str
name: str
index: Optional[int] = None
visibleLabel: Optional[str] = None
id: str
name: str
propertyTypes: List[PropertyType]
positionDefinitions: List[EquipmentPositionDefinition]
portDefinitions: List[EquipmentPortDefinition]
category: Optional[str] = None
editEquipmentType: Optional[EquipmentType] = None
data: Optional[EditEquipmentTypeMutationData] = None
errors: Any = None
@classmethod
# fmt: off
def execute(cls, client, input: EditEquipmentTypeInput):
# fmt: off
variables = {"input": input}
response_text = client.call(cls.__QUERY__, variables=variables)
return cls.from_json(response_text).data
|
"""
UI for scheduling and playout of clock chimes stored as .mp3 files
Manage a church or other electronic carillon to playout .mp3 files of
user-provided songs, peals, tolls, and hourly strikes at scheduled system
time(s). Requires Python3 but no desktop environment. Creates a text
based user interface between stdin and stdout for adding, deleting, or
changing scheduled playouts. Requires the playsound module for audio
playout which in turn uses windll.winm on Windows, AppKit.NSSound
on Apple OS, or GStreamer on Linux. The Playsound module for Python2 is
incompatible with that for Python3, so if Python2 is present on the
target system, playsound must be installed with 'pip3 install playsound'"""
# <NAME> July 2021
import threading
import time
from playsound import playsound
# The file path is prepended to user given file names. It will be
# platform and installation-dependent
file_path = "/home/dave/Carillon/"
# The schedule is an unordered list of playout events
# Default schedule for a tower clock
schedule=[]
schedule.append(["",0,6,0,23,59,"Hour"])
schedule.append(["",0,6,0,23,0,"Strike"])
schedule.append(["",0,6,0,23,15,"Quarter"])
schedule.append(["",0,6,0,23,30,"Half"])
schedule.append(["",0,6,0,23,45,"ThreeQuarter"])
# Weekday List and Dictionary for validation/encoding of user input
day_list = ['su','mo','tu','we','th','fr','sa']
day_dict = {'su':0,'mo':1,'tu':2,'we':3,'th':4,'fr':5,'sa':6}
def main():
"""Text UI for Scheduling playout of electronic chime sounds in .mp3 files.
Start a playout thread then enter an infinite while loop for user input. Wait
on user input to build or maintain an unordered list of scheduled events. The
user may request instructions, display any existing schedule, enter a line number
followed by space-delimited parameters to add or replace an event, or enter a
line number alone to delete a scheduled event."""
# Start the playout thread as a non-daemon process
threading.Thread(target = playout,).start()
# Display user instructions
show_instructions()
# Wait on and accept user-input continuously
#"You can check out any time you like, but you can never leave"
while True:
# Show schedule in case lines were renumbered
print("")
show_schedule()
while True:
try:
# Prompt for user input then await it
print(">",end = "")
command = input()
# ? shows instructions
if command.split(" ")[0] == "?":
show_instructions()
break
# null line shows schedule
if command.split(" ")[0] == "":
show_schedule()
break
# Otherwise, first user-input field must be a line number
if (command.split(" ")[0]).isnumeric():
line_number = int(command.split(" ")[0])
else:
print("Error: Input must begin with a line number")
break
# Line number only is a request to delete
if len(command.split(" ")) == 1:
# If there is anything to delete
if len(schedule) >0:
# Delete it
try:
schedule.pop(line_number-1)
except:
print("No line ",line_number," to delete")
break
# Check for a complete entry, five single-spaced parameters
if len(command.split(" "))<5:
print ("Error: Enter five items, separated by single space")
print (" Line# Day Hour(s) Minute and Tune")
break
# The second user input field is date, weekday, or weekday range
# User entered a hard date as mm/dd/yy
if "/" in command.split(" ")[1]:
if len(command.split(" ")[1]) != 8:
print("Error: Date must be mm/dd/yy")
break
# Convert to integer for validation
month = int(command.split(" ")[1].split("/")[0])
day = int(command.split(" ")[1].split("/")[1])
year = int(command.split(" ")[1].split("/")[2])
if month <0 or month >12:
print("Error: ", month, " is not a valid month")
break
if day <0 or day>31:
print("Error: ",day," is not a valid Day")
break
if year <21:
print("Error: ",year," is not a valid Year")
break
# Date is valid, but retrieve the string to save in event
date = command.split(" ")[1]
# Make weekdays out of range to disable daily playout
start_day = end_day = 8
# User entered a weekday or weekday range e.g. su, su-sa, etc.
else:
# Weekday ranges are defined by a "-" delimiter
days = command.split(" ")[1].split("-")
# Convert first weekday in string to an integer start day index
start_day = day_dict.get(days[0])
# Default to a single weekday if not a weekday range
end_day = start_day
# Weekday range contains a second string
if len(days) == 2:
# Convert second weekday in string to an integer end day index
end_day = day_dict.get(days[1])
elif len(days)>2:
print ("Error: Weekday list. Use multiple events instead")
break
# If the weekday is not in the dictionary
if start_day == None or end_day == None:
print ("Error: Day(s) must be su, mo, tu, we, th, fr or sa")
break
# User specified weekday(s) so null the hard date field in event
date = ""
# The third user input field is an hour or hour range
hours = command.split(" ")[2]
start_hour = hours.split("-")[0]
# Default to single hour
end_hour = start_hour
# An hour range is delimited by "-"
if len(hours.split("-")) == 2:
end_hour = hours.split("-")[1]
# Hour lists not supported
elif len(hours.split("-"))>2:
print ("Error: Hour range must be start-end")
break
# Convert start hour to integer for validation
if start_hour.isnumeric():
start_hour = int(start_hour)
else:
print("Start Hour(s) ", start_hour," must be numeric")
break
if start_hour < 0 or start_hour > 23:
print ("Start Hour ",start_hour," must between 0 and 23")
break
# Convert end hour to integer for validation
if end_hour.isnumeric():
end_hour = int(end_hour)
else:
print("End Hour ",end_hour," must be numeric")
break
if end_hour < 0 or end_hour > 23:
print ("End Hour ", end_hour," must be between 0 and 23")
# The fourth user input field is a minute value, never a range
minute = command.split(" ")[3]
# Convert minute to integer for validation
if minute.isnumeric:
minute = int(minute)
else:
print ("Minute ",minute," must be numeric")
break
if minute <0 or minute > 59:
print ("Minute ",minute," must be between 0 and 59")
break
# The fifth user input field is a file name (.mp3 file) which must
# allow for embedded spaces or the "Strike" keyword
file = command.split(" ",4)[4]
# The strike keyword is case insensitive and
# will be parsed into Strikexx.mp3 by the playout thread when used
if file.lower() == "strike":
file_name = "Strike"
# Striking requires twelve Strikehh files
for i in range(12):
file_name = file_path + "Strike" + str(i+1)+".mp3"
try:
f = open(file_name, "r")
if not f.readable():
f.close()
raise
break
except:
print("Error: File ", file_name," is missing")
break
# Validate the user-supplied file name
else:
if file.lower().endswith(".mp3"):
file_name = file_path+file
else:
file_name = file_path+file+".mp3"
try:
f = open(file_name, "r")
if not f.readable():
print("Error: ", file_name," is not readable")
break
f.close()
except:
print("Error:", file_name," not found - check sPeLLing")
break
# The user's input line is fully parsed and validated
# No break out of inner while loop due to a user error
# Build an event list and insert or append it to schedule list
# An event is an ordered list of parameters of mixed types defining
# date, weekday or day range, hour or hour range, minute, and
# a filename or the keyword "Strike"
event = []
event.append(date) # nul for day range or hard date as mm/dd/yy string
event.append(start_day) # int starting weekday number
event.append(end_day) # int ending weekday number
event.append(start_hour) # int starting hour
event.append(end_hour) # int ending hour
event.append(minute) # int minute
event.append(file) # prepend path, append .mp3 at playout
# Append the event to or insert into in the schedule
if int(command.split(" ")[0])-1 >= len(schedule):
schedule.append(event)
else:
schedule[int(command.split(" ")[0])-1] = event
#wend of main()
# Mop up any unanticipated error in parsing user input
except:
print("Unanticipated Error: User input line discarded")
# wend of user input loop
def show_schedule():
"""Display the list of scheduled events.
Events are displayed in line order in roughly the same space-delimited
format they were (or should have been) entered in. Consecutive line
numbers are prepended, new if an event has been deleted, to aid the user
in editing"""
print('Day(s) Hr(s) Min Tune')
for i in range(0, len(schedule)):
print(i+1,end = '')
print(": ",end = '')
# The first item in the event may be a hard date string
if schedule[i][0] != "":
print(schedule[i][0],end = " ")
# or it may be a weekday number or weekday number range
else:
# Print the weekday or first weekday of a range
print(day_list[schedule[i][1]],end = '')
# and complete it if it's a range
if day_list[schedule[i][2]]!= day_list[schedule[i][1]]:
print ("-",end = '')
print(day_list[schedule[i][2]],end = ' ')
else:
print(" ",end = '')
# Display the event's hour or hour range
print(schedule[i][3],end = '')
if schedule[i][3]!= schedule[i][4]:
print ("-",end = '')
print(schedule[i][4],end = ' ')
else:
print(" ",end = "")
# Display the event's minute
print (schedule[i][5],end = " ")
# Finally display the strike keyword or file name
print (schedule[i][6])
def playout():
"""Playout .mp3 files from a schedule list at :00 per their respective time-stamps.
Upon launch or reawaken, the playout thread waits on the system minute, scans the
schedule list, and plays any entries. Upon completion of the last entry, it calculates
the time to the next system minute then requests sleep from the platform. Upon wake,
it waits for the system minute then scans again. Note that a playout of length exceeding
one minute may play instead of another event scheduled for that or the next minute.
In case an unanticipated user error makes its way into the schedule list or the
list is otherwise corrupted, an error is displayed, the event is removed from the
schedule, and both UI and playout continue."""
import datetime
# Always keep running, even in the aftermath of user error
while True:
# Synchronize to the next even minute by waiting on it
while datetime.datetime.now().strftime("%S")!= "00":
pass
# Synchronized
try:
# Scan all events in the schedule list
for i in range(0,len(schedule)):
# Parse for reasons why the ith event in the schedule isn't to be played or
# struck right now(). Continue immediately to the next schedule item in
# the for-loop if it isn't
# Event is on hard date, continue if not today
if schedule[i][0]!= "" and \
datetime.datetime.now().strftime("%x")!= schedule[i][0]:
continue
# Event is on a weekday range, continue if now() is before it
if schedule[i][1] <8 and \
int((datetime.datetime.now().strftime("%w")).lower())<schedule[i][1]:
continue
# ... or after it
if schedule[i][2] <8 and \
int((datetime.datetime.now().strftime("%w")).lower())>schedule[i][2]:
continue
# Continue if event is set for a later hour today
if int((datetime.datetime.now().strftime("%H")))<schedule[i][3]:
continue
# ... or an earlier hour, so it must have already happened
if int((datetime.datetime.now().strftime("%H")))>schedule[i][4]:
continue
# continue if not right this minute (and second)
if int((datetime.datetime.now().strftime("%M")))!= schedule[i][5]:
continue
# All reasons a scheduled entry is not to be played now() have been cleared
# So if its a strike, build the strike file name and play the strikes
if schedule[i][6] == "Strike":
# Strike 12 hour time, with 12 strikes at Noon and Midnight
hour = int(datetime.datetime.now().strftime("%H"))%12
if hour == 0:
hour = 12
playsound(file_path+"Strike"+str(hour)+".mp3")
# Its a previously validated file, so prepend the path, append the type,
# and play it
else:
##print("Playing ",file_path+schedule[i][6]+".mp3")
playsound(file_path+schedule[i][6]+".mp3")
# Playsound returns control only when play is done
# wend of test for all parameters of the ith event or play it
# end of for-next loop to examine all events in the schedule
# Mop up any oversignt in prior input validations or corruption of the schedule
# list. Display an error and the failed event then delete that event. Keep
# playout() running. Assume main() is still running, so display a replacement
# user input prompt after the error messages
except:
print("Internal Error: A scheduled event could not be played")
print("Event ",i+1, schedule[i])
print("Event ",i+1," deleted. Resuming schedule")
show_schedule()
print(">",end = "")
schedule.pop(i)
# All scheduled events checked and those for this minute played
# Nothing to do until the next :00 so put this playout thread to sleep
time.sleep(59-int(datetime.datetime.now().strftime("%S")))
# wend to keep playout thread from exiting unless main() is closed
def show_instructions():
"""Display instructions for the UI on stdout
- Enter Line# Day(s) Hour(s) Minute and File Name or Strike..
- Separate line# and event parameters with a single space.
- Day is mm/dd/yy, su, mo, tu, we, th, fr, or sa.
- Hour is 24-hour time between 0 and 23.
- Minute is between 0 to 59. Events play at hh:mm:00.
- Ranges are allowed and inclusive: 0-23 = hourly, su-sa = daily.
- Tunes are filenames and are cAsE SeNsiTiVe.
- Line#<enter> to delete a line.
- ?<enter> to repeat these instructions"""
print("""- Enter Line# Day(s) Hour(s) Minute and File Name or Strike..
- Separate line# and event parameters with a single space.
- Day is mm/dd/yy, su, mo, tu, we, th, fr, or sa.
- Hour is 24-hour time between 0 and 23.
- Minute is between 0 to 59. Events play at hh:mm:00.
- Ranges are allowed and inclusive: 0-23 = hourly, su-sa = daily.
- Tunes are filenames and are cAsE SeNsiTiVe.
- Line#<enter> to delete a line.
- ?<enter> to repeat these instructions""")
# Run main() as a standalone program
if __name__ == "__main__":
main()
|
from flask import Flask, request, jsonify, url_for
import db
import traceback
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.errorhandler(Exception)
def exception_handler(error):
tracelist = str(traceback.format_exc()).split('\n')
return jsonify({"message":"Internal server error","trace":tracelist}),500
def message(message,code):
return jsonify({"message":message}),code
@app.route("/",methods=["GET","POST"])
def root():
"HTML client"
if request.method=="GET":
return app.send_static_file('index.html')
elif request.method=="POST":
j = request.get_json(force=True)
q = request.args
return jsonify({"json":j,"query":q})
@app.route("/swagger",methods=["GET"])
def swagger():
"Swagger client"
return app.send_static_file('swagger.html')
@app.route("/api",methods=["GET"])
def api_list():
"List API endpoints"
apilist = []
for rule in app.url_map.iter_rules():
url = str(rule)
apilist.append({"url":url,"methods":list(rule.methods),"desc":app.view_functions[rule.endpoint].__doc__})
return jsonify({"api":apilist})
@app.route("/api/conn",methods=["GET","POST"])
def conn():
"Get list of open connections, open new connection"
if request.method=="GET":
l = [{"token":token,"desc":db.conndict[token].desc} for token in db.conndict.keys()]
return jsonify(l)
elif request.method=="POST":
json = request.get_json(force=True)
connstr = json.get("conn",None);
if connstr is None:
connstr = 'scott/oracle@orcl'
desc = json.get("desc","")
token = db.open_connection(connstr,desc)
return jsonify({"token":token,"desc":desc}),201
@app.route("/api/conn/<token>",methods=["GET","POST","DELETE"])
def conn_id(token):
"Execute code within connection specified by token, close connection"
if request.method=="GET":
if token in db.conndict.keys():
c = db.conndict[token]
return jsonify({"desc":c.desc,"token":token})
else:
return message("token %s not found"%token,404)
elif request.method=="DELETE":
t = db.close_connection(token)
if t is None:
return message("token %s not found"%token,404)
return message("token %s deleted"%token,200)
elif request.method=="POST":
conn = db.get_connection(token)
if conn is None:
return message("token %s not found"%token,404)
cur = conn.cursor()
json = request.get_json(force=True)
sql = json.get("sql",None);
if sql is None:
return message("sql key not in json data",400)
invars = json.get("invars",{})
outvars = json.get("outvars",{})
fetchmax = json.get("fetchmax",500)
try:
if fetchmax is None:
fetchmax = 500
else:
fetchmax = int(fetchmax)
except ValueError:
return message("invalid fetchmax key format",400)
if fetchmax<1:
return message("number of rows to fetch should be greater than 0",400)
desc,data = db.execute_sql(cur,sql,fetchmax,invars,outvars)
cur.close()
return jsonify({"desc":desc,"data":data,"sql":sql,"fetchmax":fetchmax,"invars":invars,"outvars":outvars})
#db.open_connection('scott/oracle@orcl','First connection')
#db.open_connection('scott/oracle@orcl','Second connection')
if __name__=="__main__":
app.run(host='0.0.0.0',port=8000, debug=True)
|
<reponame>markjin1990/foofah<gh_stars>10-100
from timeit import default_timer as timer
import json
import Queue
import argparse
import os
import csv
from tabulate import tabulate
from foofah_libs.foofah_node import FoofahNode
import foofah_libs.operators as Operations
import numpy as np
from foofah_libs.generate_prog import create_python_prog
MAX_STEPS = float("inf")
ALGO_BFS = 0
ALGO_A_STAR = 1
ALGO_A_STAR_NAIVE = 2
ALGO_AWA = 3
def reconstruct_path(current):
if current is None:
total_path = []
else:
total_path = [current]
while current.parent is not None:
current = current.parent
total_path.append(current)
return total_path
def a_star_search(raw_data, target, ops, debug=0, timeout=300, algo=ALGO_A_STAR, batch=True,
epsilon=1, bound=float("inf"), p1=True, p2=True, p3=True):
FoofahNode.target = target
root_op = ({'fxn': None, 'name': 'start', 'char': 'start', 'cost': 1.0}, 0)
root = FoofahNode(raw_data, root_op, None, {})
goal_op = ({'fxn': None, 'name': 'end', 'char': 'end', 'cost': 0.0}, 0)
goal_node = FoofahNode(target, goal_op, None, {})
FoofahNode.goal_node = goal_node
root.g_score = 0.0
if algo == ALGO_BFS:
root.h_score = 0
elif algo == ALGO_A_STAR:
root.h_score = root.get_h_score(batch=batch)
elif algo == ALGO_A_STAR_NAIVE:
root.h_score = root.get_h_score_rule()
root.f_score = root.g_score + epsilon * root.h_score
# Switch to using priority queue because it is thread safe
open_q = Queue.PriorityQueue()
open_q_cache = None
cost_q = {}
closed_nodes = set()
final_node = None
start_time = timer()
open_q.put(root)
while not open_q.empty():
node = open_q.get(block=False)
cur_time = timer()
if cur_time - start_time > timeout:
print "*** Exceeded time limit of %d seconds" % timeout
break
if debug >= 1:
if node.parent:
print "f_score:", node.f_score, "h_score:", node.h_score, "g_score:", node.g_score, "id:", node.node_id, "p_id:", node.parent.node_id, "depth:", node.depth, node, node.contents
print
else:
print "f_score:", node.f_score, "h_score:", node.h_score, "g_score:", node.g_score, "id:", node.node_id, "p_id:", "None", "depth:", node.depth, node, node.contents
print
closed_nodes.add(node)
if node == goal_node:
final_node = node
break
my_children = node.make_children(ops, bound=bound, p1=p1, p2=p2, p3=p3)
for c in my_children:
if c in closed_nodes:
continue
if algo == ALGO_BFS:
c.h_score = 0
c.g_score = node.g_score + node.operation[0]['cost']
elif algo == ALGO_A_STAR:
c.h_score = c.get_h_score(batch=batch)
c.g_score = node.g_score + node.operation[0]['cost']
elif algo == ALGO_A_STAR_NAIVE:
c.h_score = c.get_h_score_rule()
c.g_score = node.g_score + node.operation[0]['cost']
# Check if destination has been found, if it is, return.
if c.h_score == 0:
if c == goal_node:
final_node = c
open_q.put(c)
cost_q[c] = c.f_score
if debug >= 2:
if c.parent:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", c.parent.node_id, "depth:", c.depth, c, c.contents
else:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", "None", "depth:", c.depth, c, c.contents
return final_node, open_q, closed_nodes
c.f_score = c.g_score + epsilon * c.h_score
if (c not in cost_q or (c in cost_q and c.f_score < cost_q[c])) and c.f_score < float("inf"):
open_q.put(c)
cost_q[c] = c.f_score
if debug >= 2:
if c.parent:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", c.parent.node_id, "depth:", c.depth, c, c.contents
else:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", "None", "depth:", c.depth, c, c.contents
if open_q_cache:
while open_q.qsize() > 0:
open_q_cache.put(open_q.get())
return final_node, open_q_cache, closed_nodes
else:
return final_node, open_q, closed_nodes
def extract_table(raw_data):
if len(raw_data) == 1 and len(raw_data[0]) == 1:
input_str = raw_data[0][0]
rows = input_str.splitlines()
delimiter_list = ["\t", ",", " "]
quotechar_list = ["'", '"']
for delimiter in delimiter_list:
for quote_char in quotechar_list:
temp_table = list(csv.reader(rows, delimiter=delimiter, quotechar=quote_char))
row_len = set()
for row in temp_table:
row_len.add(len(row))
if len(row_len) == 1:
return temp_table
return raw_data
else:
return raw_data
def main():
final_node = None
open_nodes = None
closed_nodes = None
FoofahNode.if_awa = False
#
# Command Line Arguments
#
parser = argparse.ArgumentParser()
parser.add_argument('--details', action='store_true', default=False,
help="Print the detailed synthesized programs and intermediate tables")
parser.add_argument('--input', type=str, nargs='+',
help="List of input test data files separated by spaces")
parser.add_argument('--debug_level', type=int, default=0,
help="Debug level. 0 = none, 1 = simple, etc.")
parser.add_argument('--timeout', type=int, default=300,
help="Search will stop after this many seconds.")
parser.add_argument('--auto_read', action='store_true', help="Automatically read csv file using csv reader")
parser.add_argument('--validate', action='store_true', default=False,
help="Validating the correctness of synthesized program")
parser.add_argument('--search_algo', type=int, default=1,
help="Searh algorithm: 0 = BFS, 1 (default) = A*, 2 = naive heuristic")
parser.add_argument('--no_batch', action='store_true', default=False, help="Disable batch")
parser.add_argument('--weight', type=float, default=1, help="Weighted A*")
parser.add_argument('--bound', type=float, default=float("inf"), help="Depth bound")
parser.add_argument('--p1off', action='store_true', default=False, help="turn off prune rule 1")
parser.add_argument('--p2off', action='store_true', default=False, help="turn off prune rule 2")
parser.add_argument('--p3off', action='store_true', default=False, help="turn off prune rule 3")
parser.add_argument('--globalPruneOff', action='store_true', default=False, help="turn off global pruning rules")
parser.add_argument('--opPruneOff', action='store_true', default=False, help="turn off operator pruning rules")
parser.add_argument('--wrap1off', action='store_true', default=False, help="turn off 1st wrap operator")
parser.add_argument('--wrap2off', action='store_true', default=False, help="turn off 2nd wrap operator")
parser.add_argument('--wrap3off', action='store_true', default=False, help="turn off 3rd wrap operator")
#
# Read Command Line Arguments
#
args = parser.parse_args()
if_detail = args.details
input_files = args.input
debug_level = args.debug_level
timeout = args.timeout
if_auto_read = False
if args.auto_read:
if_auto_read = True
if_validate = args.validate
search_algo = args.search_algo
if_batch = not args.no_batch
epsilon = args.weight
bound = args.bound
p1off = args.p1off
p2off = args.p2off
p3off = args.p3off
op_prune_off = args.opPruneOff
wrap1off = args.wrap1off
wrap2off = args.wrap2off
wrap3off = args.wrap3off
if op_prune_off:
Operations.PRUNE_1 = False
if wrap1off:
Operations.WRAP_1 = False
if wrap2off:
Operations.WRAP_2 = False
if wrap3off:
Operations.WRAP_3 = False
global_prune_off = args.globalPruneOff
if global_prune_off:
p1off = True
p2off = True
p3off = True
if input_files is None or len(input_files) == 0:
print "*** No test input file specified. ***"
exit()
test_files = input_files
for test_file in test_files:
with open(test_file, 'rb') as f:
test_data = json.load(f)
raw_data = [map(str, x) for x in test_data['InputTable']]
target = [map(str, x) for x in test_data['OutputTable']]
if if_auto_read:
raw_data = extract_table(raw_data)
start = timer()
if search_algo == ALGO_BFS:
final_node, open_nodes, closed_nodes = a_star_search(raw_data, target, Operations.add_ops(), debug_level,
timeout, batch=if_batch, algo=search_algo,
p1=not p1off, p2=not p2off, p3=not p3off)
elif search_algo == ALGO_A_STAR:
final_node, open_nodes, closed_nodes = a_star_search(raw_data, target, Operations.add_ops(), debug_level,
timeout, batch=if_batch, epsilon=epsilon,
bound=bound, algo=search_algo, p1=not p1off,
p2=not p2off,
p3=not p3off)
elif search_algo == ALGO_A_STAR_NAIVE:
final_node, open_nodes, closed_nodes = a_star_search(raw_data, target, Operations.add_ops(), debug_level,
timeout, batch=if_batch, epsilon=epsilon,
bound=bound, algo=search_algo, p1=not p1off,
p2=not p2off,
p3=not p3off)
end = timer()
if final_node:
path = reconstruct_path(final_node)
# Some statistics
num_visited = len(closed_nodes)
nodes_created = open_nodes.qsize() + len(closed_nodes)
poly = np.ones(len(path) + 1)
poly[len(path)] = -nodes_created
branch_factor = max(np.real(np.roots(poly)))
if not if_detail:
program = create_python_prog(path, raw_data)
print "#", "-" * 50
print "# A Program Has Been Successfully Synthesized"
print "#"
print "# Input file:", test_file
print "# Total operations:", len(path) - 1
print "# Time elapsed: %.3f s Nodes visited: %d Nodes created: %d" % (
(end - start), num_visited, nodes_created)
print "# Naive branching factor: %d Effective branching factor: %.2f" % (
len(Operations.add_ops()), branch_factor)
print "# Make child time: %.2f s Heuristic time: %.2f s" % (
sum(final_node.times['children']), sum(final_node.times['scores']))
print "#", "-" * 50
print
print program
else:
print "-" * 50
train_data = []
for i, n in enumerate(reversed(path)):
# Operations including transpose, unfold and unfold_header do not have parameters
if len(n.operation) > 1:
if n.operation[1]:
print "%2d. %-13s at %d: H-score: %.1f Actual: %d" % (
i + 1, n.operation[0]['name'], n.operation[1], n.h_score, len(path) - i - 1)
else:
print "%2d. %-13s : H-score: %.1f Actual: %d" % (
i + 1, n.operation[0]['name'], n.h_score, len(path) - i - 1)
print tabulate(n.contents, tablefmt="grid")
else:
print "%2d. %-13s: H-score: %.1f Actual: %d" % (
i + 1, n.operation[0]['name'], n.h_score, len(path) - i - 1)
print tabulate(n.contents, tablefmt="grid")
remaining_steps = len(path) - i - 1
if remaining_steps > 0:
temp = dict()
temp["raw_table"] = n.contents
temp["target_table"] = target
temp["steps"] = remaining_steps
train_data.append(temp)
if final_node.contents != target:
print
print "%2d. Only \"Moves\" are needed to create a extact same view as target (TO BE COMPLETED)." % (
len(path) + 1)
print
print "-" * 50
print "Input file:", test_file
print "Total operations:", len(path) - 1
print "Time elapsed: %.3f s Nodes visited: %d Nodes created: %d" % (
(end - start), num_visited, nodes_created)
print "Naive branching factor: %d Effective branching factor: %.2f" % (len(Operations.add_ops()), branch_factor)
print "Make child time: %.2f s Heuristic time: %.2f s" % (
sum(final_node.times['children']), sum(final_node.times['scores']))
if if_validate:
test_table = test_data['TestingTable']
try:
for i, node in enumerate(reversed(path)):
if i > 0:
op = node.operation[0]
if op['num_params'] == 1:
test_table = op['fxn'](test_table)
else:
test_table = op['fxn'](test_table, node.operation[1])
except:
test_table = None
if test_table:
test_data["TransformedTestTable"] = test_table
test_data["Success"] = True
print "-" * 50
print "Experiment 1: Apply the synthetic program on other data"
print "-" * 30
print "Testing Table"
print tabulate(test_data['TestingTable'], tablefmt="grid")
print "Transformed Table"
print tabulate(test_data["TransformedTestTable"], tablefmt="grid")
print "-" * 30
print "Result: Success"
print "-" * 50
else:
test_data["TransformedTestTable"] = test_table
test_data["Success"] = False
print "-" * 50
print "Experiment 1: Apply the synthetic program on other data"
print "-" * 30
print "Testing Table"
print tabulate(test_data['TestingTable'], tablefmt="grid")
print "-" * 30
print "Result: Failure"
print "-" * 50
dirname = os.getcwd() + "/test_results/validate"
filename = dirname + "/exp0_results_" + str(test_data['TestName']) + "_" + str(
test_data['NumSamples']) + ".txt"
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
raise
with open(filename, 'w') as outfile:
json.dump(test_data, outfile)
else:
print "*** Solution Not Found ***"
if __name__ == "__main__":
main()
|
<reponame>MissMeriel/BeamNGpy
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from PIL import Image
from shapely.geometry import Polygon
from beamngpy import BeamNGpy, Vehicle, Scenario, Road
from beamngpy.sensors import Camera
beamng = BeamNGpy('localhost', 64256, home='C:/Users/merie/Documents/BeamNG.research.v1.7.0.0')
scenario = Scenario('GridMap', 'vehicle_bbox_example')
road = Road('track_editor_C_center', rid='main_road', texture_length=5)
orig = (-107, 70, 0)
goal = (-300, 70, 0)
road.nodes = [
(*orig, 7),
(*goal, 7),
]
scenario.add_road(road)
vehicle = Vehicle('ego_vehicle', model='etk800', licence='PYTHON')
overhead = Camera((0, -10, 5), (0, 1, -0.75), 60, (1024, 1024))
vehicle.attach_sensor('overhead', overhead)
scenario.add_vehicle(vehicle, pos=orig)
scenario.make(beamng)
bng = beamng.open()
bng.load_scenario(scenario)
bng.start_scenario()
road_geometry = bng.get_road_edges('main_road')
left_edge_x = np.array([e['left'][0] for e in road_geometry])
left_edge_y = np.array([e['left'][1] for e in road_geometry])
right_edge_x = np.array([e['right'][0] for e in road_geometry])
right_edge_y = np.array([e['right'][1] for e in road_geometry])
def plot_road(ax):
x_min = min(left_edge_x.min(), right_edge_x.min()) - 10 # We add/subtract 10 from the min/max coordinates to pad
x_max = max(left_edge_x.max(), right_edge_x.max()) + 10 # the area of the plot a bit
y_min = min(left_edge_y.min(), right_edge_y.min()) - 10
y_max = max(left_edge_y.max(), right_edge_y.max()) + 10
ax.set_aspect('equal', 'datalim')
ax.set_xlim(left=x_max, right=x_min) # pyplot & bng coordinate systems have different origins
ax.set_ylim(bottom=y_max, top=y_min) # so we flip them here
ax.plot(left_edge_x, left_edge_y, 'b-')
ax.plot(right_edge_x, right_edge_y, 'b-')
plt.figure(figsize=(10, 10))
plot_road(plt.gca())
plt.show()
script = [{'x': orig[0], 'y': orig[1], 'z': .5, 't': 0}]
i = 0.2
while script[-1]['x'] > goal[0]:
node = {
'x': -10 * i + orig[0],
'y': 8 * np.sin(i) + orig[1],
'z': 0.3,
't': 1.5 * i,
}
script.append(node)
i += 0.2
script_x = [s['x'] for s in script]
script_y = [s['y'] for s in script]
def plot_script(ax):
ax.plot(script_x, script_y, 'y-')
plt.figure(figsize=(10, 10))
plot_road(plt.gca())
plot_script(plt.gca())
plt.show()
vehicle.ai_set_script(script)
bng.pause()
bng.step(1)
road_poly = list(zip(left_edge_x, left_edge_y))
road_poly.extend(zip(right_edge_x[::-1], right_edge_y[::-1]))
road_poly = Polygon(road_poly)
def inbounds(bbox_x, bbox_y):
bbox_poly = zip(bbox_x, bbox_y)
bbox_poly = Polygon(bbox_poly)
inter = bbox_poly.intersection(road_poly)
return inter.area / bbox_poly.area > 0.5
def plot_bbox(ax):
bbox = vehicle.get_bbox()
boundary_x = [
bbox['front_bottom_left'][0],
bbox['front_bottom_right'][0],
bbox['rear_bottom_right'][0],
bbox['rear_bottom_left'][0],
bbox['front_bottom_left'][0],
]
boundary_y = [
bbox['front_bottom_left'][1],
bbox['front_bottom_right'][1],
bbox['rear_bottom_right'][1],
bbox['rear_bottom_left'][1],
bbox['front_bottom_left'][1],
]
if inbounds(boundary_x, boundary_y):
ax.plot(boundary_x, boundary_y, 'g-')
else:
ax.plot(boundary_x, boundary_y, 'r-')
plt.figure(figsize=(10, 10))
plot_road(plt.gca())
plot_script(plt.gca())
plot_bbox(plt.gca())
plt.show()
def plot_overhead(ax):
view = bng.poll_sensors(vehicle)['overhead']['colour']
view = view.convert('RGB') # Drop alpha channel as it messes up the plot
ax.imshow(np.asarray(view))
ax.set_aspect('equal', 'datalim')
plt.figure(figsize=(10, 10))
plot_overhead(plt.gca())
plt.show()
plt.clf()
plt.close()
fig, ax = plt.subplots(10, 2, figsize=(20, 100))
for row in range(10):
bng.step(400)
plot_road(ax[row, 0])
plot_script(ax[row, 0])
plot_bbox(ax[row, 0])
plot_overhead(ax[row, 1])
plt.show() |
<reponame>pjeanjean/dakara-player<gh_stars>0
import logging
import os
from threading import Timer
import mpv
from dakara_player_vlc.media_player import MediaPlayer
from dakara_player_vlc.version import __version__
logger = logging.getLogger(__name__)
class MpvPlayer(MediaPlayer):
"""Interface for the Python mpv wrapper
This class allows the usage of mpv as a player for Dakara.
The playlist is virtually handled using song-end callbacks.
Attributes:
player (mpv.Mpv): instance of mpv, attached to the actual player.
media_pending (str): path of a song which will be played after the transition
screen.
"""
def init_player(self, config, tempdir):
# set mpv player options and logging
config_loglevel = config.get("loglevel") or "info"
self.player = mpv.MPV(log_handler=self.handle_log_messages,
loglevel=config_loglevel)
config_mpv = config.get("mpv") or {}
for mpv_option in config_mpv:
self.player[mpv_option] = config_mpv[mpv_option]
# set mpv callbacks
self.set_mpv_default_callbacks()
# media containing a song which will be played after the transition
# screen
self.media_pending = None
def load_player(self):
# check mpv version
self.check_mpv_version()
# set mpv fullscreen
self.player.fullscreen = self.fullscreen
# force a single window
self.player.force_window = True
def check_mpv_version(self):
"""Print the mpv version
"""
# get and log version
logger.info(self.player.mpv_version)
def set_mpv_default_callbacks(self):
"""Set mpv player default callbacks
"""
# wrapper to use the event_callback decorator for setting handle_end_reached
@self.player.event_callback("end_file")
def end_file_callback(event):
self.handle_end_reached(event)
def handle_end_reached(self, event):
"""Callback called when a media ends
This happens when:
- A transition screen ends, leading to playing the actual song;
- A song ends, leading to calling the callback
`callbacks["finished"]`;
- An idle screen ends, leading to reloop it.
A new thread is created in any case.
Args:
event (mpv.MpvEventEndFile): mpv end fle event object.
"""
# check that the reason is actually a file ending (could be a force stop)
if (event["event"]["reason"] != mpv.MpvEventEndFile.EOF):
return
logger.debug("Song end callback called")
if self.in_transition:
# if the transition screen has finished,
# request to play the song itself
self.in_transition = False
# manually set the subtitles as a workaround for the matching of mpv being
# too permissive
filename_without_ext = os.path.splitext(self.media_pending)[0]
sub_file = None
if os.path.exists(f"{filename_without_ext}.ass"):
sub_file = f"{filename_without_ext}.ass"
elif os.path.exists(f"{filename_without_ext}.ssa"):
sub_file = f"{filename_without_ext}.ssa"
thread = self.create_thread(
target=self.play_media, args=(self.media_pending, sub_file)
)
thread.start()
# get file path
logger.info("Now playing '%s'", self.media_pending)
# call the callback for when a song starts
self.callbacks["started_song"](self.playing_id)
return
if self.is_idle():
# if the idle screen has finished, restart it
thread = self.create_thread(target=self.play_idle_screen)
thread.start()
return
# otherwise, the song has finished,
# so call the right callback
self.callbacks["finished"](self.playing_id)
def handle_log_messages(self, loglevel, component, message):
"""Callback called when a log message occurs
Direct the message to the logger for Dakara Player.
If the level is 'error' or higher, call the callbacks
`callbackss["finished"]` and `callbacks["error"]`
Args:
loglevel (str): level of the log message
component (str): component of mpv that generated the message
message (str): actual log message
"""
if loglevel == "fatal":
intlevel = logging.CRITICAL
elif loglevel == "error":
intlevel = logging.ERROR
elif loglevel == "warn":
intlevel = logging.WARNING
elif loglevel == "info":
intlevel = logging.INFO
elif loglevel == "debug":
intlevel = logging.DEBUG
else:
intlevel = logging.NOTSET
logger.log(intlevel, f"mpv: {component}: {message}")
if intlevel >= logging.ERROR:
message = "Unable to play current media"
logger.error(message)
self.in_transition = False
self.callbacks["finished"](self.playing_id)
self.callbacks["error"](self.playing_id, message)
def play_media(self, media, sub_file=None):
"""Play the given media
Args:
media (str): path to media
"""
self.player["sub-files"] = [sub_file] if sub_file else []
self.player.loadfile(media)
def play_playlist_entry(self, playlist_entry):
# file location
file_path = self.kara_folder_path / playlist_entry["song"]["file_path"]
# Check file exists
if not file_path.exists():
logger.error("File not found '%s'", file_path)
self.callbacks["could_not_play"](playlist_entry["id"])
self.callbacks["error"](
playlist_entry["id"], "File not found '{}'".format(file_path)
)
return
# create the media
self.playing_id = playlist_entry["id"]
self.media_pending = str(file_path)
# create the transition screen
with self.transition_text_path.open("w", encoding="utf8") as file:
file.write(self.text_generator.create_transition_text(playlist_entry,
fade_in=False))
media_transition = str(self.background_loader.backgrounds["transition"])
self.in_transition = True
self.player.image_display_duration = int(self.durations["transition"])
self.play_media(media_transition, self.transition_text_path)
logger.info("Playing transition for '%s'", file_path)
self.callbacks["started_transition"](playlist_entry["id"])
def play_idle_screen(self):
# set idle state
self.playing_id = None
self.in_transition = False
# create idle screen media
media = str(self.background_loader.backgrounds["idle"])
# create the idle screen
with self.idle_text_path.open("w", encoding="utf8") as file:
file.write(
self.text_generator.create_idle_text(
{
"notes": [
self.player.mpv_version,
"Dakara player " + __version__,
]
}
)
)
self.player.image_display_duration = "inf"
self.play_media(media, self.idle_text_path)
logger.debug("Playing idle screen")
def get_timing(self):
if self.is_idle() or self.in_transition:
return 0
timing = self.player.time_pos
if timing is None:
return 0
return int(timing)
def is_paused(self):
return self.player.pause
def set_pause(self, pause):
if not self.is_idle():
if pause:
if self.is_paused():
logger.debug("Player already in pause")
return
logger.info("Setting pause")
self.player.pause = True
logger.debug("Set pause")
self.callbacks["paused"](self.playing_id, self.get_timing())
else:
if not self.is_paused():
logger.debug("Player already playing")
return
logger.info("Resuming play")
self.player.pause = False
logger.debug("Resumed play")
self.callbacks["resumed"](self.playing_id, self.get_timing())
def stop_player(self):
logger.info("Stopping player")
# send a warning within 3 seconds if mpv has not stopped already
timer_stop_player_too_long = Timer(3, self.warn_stop_player_too_long)
timer_stop_player_too_long.start()
self.player.terminate()
# clear the warning
timer_stop_player_too_long.cancel()
logger.debug("Stopped player")
@staticmethod
def warn_stop_player_too_long():
"""Notify the user that mpv takes too long to stop
"""
logger.warning("mpv takes too long to stop")
|
from json import dump, load
from os import path
from devip.utils import get_input, current_ip, cidr, console, require, log
SETTINGS_FILENAME = '{}/.devip.json'.format(path.expanduser('~'))
USER_DEFAULTS = {'temp': [], 'perm': []}
class Service(object):
name = None
default_settings = {}
required_settings = []
@staticmethod
def _load_settings():
if path.isfile(SETTINGS_FILENAME):
with open(SETTINGS_FILENAME) as fp:
settings = load(fp)
else:
settings = {}
settings.setdefault('user', USER_DEFAULTS)
return settings
def __init__(self):
settings = Service._load_settings()
self.user = settings['user']
setattr(self, 'name', self.name)
attrs = self.default_settings.copy()
if self.name in settings:
attrs.update({k: v for k, v in settings[self.name].items() if v})
for key, value in attrs.items():
setattr(self, key, value)
@log('Set up {name}...')
def setup(self):
data = Service._load_settings()
data[self.name] = {}
for key, default in self.default_settings.items():
data[self.name][key] = get_input('Enter {name} {key}'.format(name=self.name, key=key), default)
with open(SETTINGS_FILENAME, 'w') as fp:
dump(data, fp, indent=2)
@log('Show {list_name} IP addresses...', list_name='all')
def show(self, list_name=None):
data = {}
if not list_name or list_name == 'temp':
data['Temporary IPs'], _ = self._get_user_setting('temp')
if not list_name or list_name == 'perm':
data['Permanent IPs'], _ = self._get_user_setting('perm')
for key, values in data.items():
if not list_name:
console('{}:'.format(key))
if values:
for value in values:
console(' * {}'.format(value))
else:
console(' No records')
@log('Clear {list_name} IP addresses...', list_name='all')
def clear(self, list_name=None):
if not list_name or list_name == 'temp':
self._update_user_setting_list('temp', None)
if not list_name or list_name == 'perm':
self._update_user_setting_list('perm', None)
@log('Move to {list_name} IP address `{address}`', address=cidr(current_ip()), list_name='temp')
def move(self, address, list_name=None):
require(msg='Service `{}` requires settings to be set-up before use.'.format(self.name), **self.__dict__)
list_name = list_name or 'temp'
if not address:
self._move_here(list_name)
else:
self._move_ip(list_name, address)
def _move_here(self, list_name):
self._move_ip(list_name, cidr(current_ip()))
def _move_ip(self, list_name, ip):
service_ips = self.get_service_ips()
temp_ips = [cidr(x) for x in self.user['temp']]
perm_ips = [cidr(x) for x in self.user['perm']]
for x in temp_ips:
if x in service_ips and x != ip:
self.revoke_ip(x)
for x in perm_ips + [ip]:
if x not in service_ips:
self.allow_ip(x)
self.add(list_name, ip)
self.remove(Service._other_list(list_name), ip)
@staticmethod
def _other_list(list_name):
return 'perm' if list_name == 'temp' else 'temp'
@log('Add IP address `{ip}` to {list_name}', ip='undefined', list_name='undefined')
def add(self, list_name, ip):
require(list=list_name, ip=ip)
switcher = {
'temp': lambda: self._update_user_setting_list('temp', ip),
'perm': lambda: self._update_user_setting_list('perm', ip)
}
switcher[list_name]()
@log('Remove IP address `{ip}` from {list_name}', ip='undefined', list_name='undefined')
def remove(self, list_name, ip):
require(list=list_name, ip=ip)
switcher = {
'temp': lambda: self._update_user_setting_list('temp', ip, remove=True),
'perm': lambda: self._update_user_setting_list('perm', ip, remove=True)
}
switcher[list_name]()
def _get_user_setting(self, key):
settings = Service._load_settings()
return settings['user'][key], settings
def _update_user_setting(self, key, value, settings):
settings['user'][key] = value
with open(SETTINGS_FILENAME, 'w') as fp:
dump(settings, fp, indent=2)
def _update_user_setting_list(self, key, value, remove=False):
source, settings = self._get_user_setting(key)
if not value:
source = []
elif not remove:
source.append(value)
elif value in source:
source.remove(value)
target = list(set(source))
self._update_user_setting(key, target, settings)
def get_service_ips(self):
raise NotImplementedError()
def revoke_ip(self, ip):
raise NotImplementedError()
def allow_ip(self, ip):
raise NotImplementedError()
|
#coding:utf-8
from mantis.fundamental.utils.useful import hash_object,object_assign
from mantis.fundamental.network.message import JsonMessage
from mantis.fanbei.smarthome.base import *
"""
"""
# class MessageGetServerTime(Message):
# """获取系统时钟"""
#
# def __init__(self):
# Message.__init__(self)
# pass
#
#
# class MessageInitParamReq(Message):
# def __init__(self):
# Message.__init__(self, 'initparam_req')
# self.dev_id = '' # 设备编号
# self.dev_type = '' # 设备类型
# self.version = '' # 系统版本
# self.time = '' # 当前时间
# self.signature = '' # 数据签名
class MessageTraverse(JsonMessage):
OFFSET_BID = 0 #
OFFSET_DOWN = 1
OFFSET_UP = 2
NAME = ''
def __init__(self, name):
JsonMessage.__init__(self, name)
self.device_id = ''
# self.id = ''
# self.name = name
# self.values = {}
# self.extras = {'time': '', 'ver': ''} #
# self.offset = self.OFFSET_BID
# self.mod = ModuleType.Primary
def values(self):
return hash_object(self,excludes=('id_','name_','values_','extras_','NAME','OFFSET_BID','OFFSET_DOWN','OFFSET_UP'))
# def dict(self):
# data = dict(id=self.id, name=self.name, values=self.values, extras=self.extras, offset=self.offset)
# return data
#
# def json(self):
# return json.dumps(self.dict())
# def values(self):
# return dict(device_id=self.device_id)
class MessageTraverseDown(MessageTraverse):
"""下行消息"""
def __init__(self, name):
MessageTraverse.__init__(self, name)
# self.offset = self.OFFSET_DOWN
class MessageTraverseUp(MessageTraverse):
"""上行消息"""
def __init__(self, name):
MessageTraverse.__init__(self, name)
# self.offset = self.OFFSET_UP
class MessageLogin(MessageTraverseUp):
"""设备登陆请求"""
NAME = 'login'
def __init__(self):
MessageTraverseUp.__init__(self, self.NAME)
self.token = ''
# def values(self):
# data = MessageTraverse.values(self)
# data['token'] = self.token
# return data
class MessageLoginResp(MessageTraverseDown):
"""设备登陆反馈消息"""
NAME = 'login_resp'
def __init__(self):
MessageTraverseDown.__init__(self, self.NAME)
self.error = 0 # 错误码 0 : 成功
self.message = ''
self.server_time = 0
class MessageHeartBeat(MessageTraverse):
"""设备与平台之间的心跳消息"""
NAME = 'heartbeat'
def __init__(self):
MessageTraverse.__init__(self, self.NAME)
class MessageDeviceStatusQuery(MessageTraverseDown):
"""平台下发设备状态查询请求"""
NAME = 'dev_status_query'
def __init__(self):
MessageTraverseDown.__init__(self, self.NAME)
class MessageDeviceStatus(MessageTraverseUp):
NAME = 'dev_status'
def __init__(self):
MessageTraverseUp.__init__(self, self.NAME)
# self.host_ver = '' # 版本
# self.mcu_ver = ''
# self.status_time = 0 # 主机时间
# self.boot_time = 0 # 设备启动时间
self.params = {} # 指定特定功能的运行参数
# def values(self):
# data = {}
# data.update(hash_object(self.host,key_prefix='host_'))
# data.update(hash_object(self.mcu,key_prefix='mcu_'))
# return data
class MessageDeviceValueSet(MessageTraverseDown):
"""设备运行参数设置"""
NAME = 'dev_val_set'
def __init__(self):
MessageTraverseDown.__init__(self, self.NAME)
self.mod_type = ModuleType.First # 主设备
self.param_name = '' #
self.param_value = ''
class MessageSensorStatusQuery(MessageTraverseUp):
"""查询指定传感器模块运行参数"""
NAME = 'sensor_status_query'
def __init__(self):
MessageTraverseUp.__init__(self, self.NAME)
self.sensor_type = SensorType.All
self.sensor_id = Constants.Undefined
class MessageSensorStatus(MessageTraverseUp):
"""上传传感器模块运行参数"""
NAME = 'sensor_status'
def __init__(self):
MessageTraverseUp.__init__(self, self.NAME)
self.sensor_type = SensorType.Undefined
self.sensor_id = Constants.Undefined
self.params = {} # 指定特定功能的运行参数
class MessageSensorValueSet(MessageTraverseDown):
"""下发对传感器的控制"""
NAME = 'sensor_val_set'
def __init__(self):
MessageTraverseDown.__init__(self, self.NAME)
self.sensor_type = SensorType.Undefined
self.sensor_id = Constants.Undefined
self.param_name = ''
self.param_value = ''
# class MessageIoTSensorValueSet(MessageSensorValueSet):
# name = 'iot_sensor_val_set'
# def __init__(self):
# MessageSensorValueSet.__init__(self,self.NAME)
# self.device_id = '' # smartbox 硬件编号
class MessageDeviceCommand(MessageTraverseDown):
"""平台下发设备控制命令"""
NAME = 'dev_command'
def __init__(self):
MessageTraverseDown.__init__(self, self.NAME)
self.mod_type = ModuleType.Primary # 默认主机
self.command = '' # 待更新的版本
self.params = {} # 命令参数
class MessageDeviceUpgrade(MessageTraverseDown):
"""平台下发设备升级请求"""
NAME = 'dev_upgrade'
def __init__(self):
MessageTraverseDown.__init__(self, self.NAME)
self.mod_type = ModuleType.Primary # 默认主机
self.ver = '' # 待更新的版本
self.md5 = '' # 散列值
self.access_code = '' # 访问身份码
self.server_url = '' # 更新服务器地址
class MessageDeviceLogInfo(MessageTraverseUp):
"""设备运行日志上报"""
NAME = 'dev_log'
def __init__(self):
MessageTraverseUp.__init__(self, self.NAME)
# self.mod_type = ModuleType.Primary
self.time = 0 # 日志时间
self.level = ''
self.content = '' # 日志内容
MessageClsDict ={}
def registerMessageObject(msgcls):
MessageClsDict[msgcls.NAME] = msgcls
for key,value in locals().items():
if key.find('Message')==0 and key not in ('MessageClsDict','Message','MessageType','MessageSplitter'):
registerMessageObject(value)
def parseMessage(data):
print data
if isinstance(data,str):
data = json.loads(data)
message = data.get('name')
msgcls = MessageClsDict.get(message)
if not msgcls:
print 'Message Type unKnown. value:{}'.format(message)
return None
data = data.get('values',{})
msg = msgcls()
msg.assign(data)
return msg
if __name__=='__main__':
data='''{
"id": "",
"name": "sensor_status",
"values": {
"params": {
"1": "0"
},
"sensor_id": 1,
"sensor_type": 1
}
}'''
m = parseMessage(data)
print m.id
|
<filename>qplan/plugins/FOCAS.py<gh_stars>1-10
#
# FOCAS.py -- OB converter for FOCAS instrument
#
# <NAME> (<EMAIL>)
#
import time
from ginga import trcalc
from q2ope import BaseConverter
class Converter(BaseConverter):
def _setup_target(self, d, ob):
funky_ra = self.ra_to_funky(ob.target.ra)
funky_dec = self.dec_to_funky(ob.target.dec)
autoguide = 'NO'
if ob.inscfg.guiding:
autoguide = 'YES'
if ob.inscfg.filter is None:
# TODO: what should this be?
filtername = 'B'
else:
filtername = ob.inscfg.filter.upper()
d.update(dict(object=ob.target.name,
ra="%010.3f" % funky_ra, dec="%+010.2f" % funky_dec,
equinox=ob.target.equinox, pa=ob.inscfg.pa,
exptime=ob.inscfg.exp_time,
num_exp=ob.inscfg.num_exp,
dither_ra=ob.inscfg.dither_ra,
dither_dec=ob.inscfg.dither_dec,
dither_theta=ob.inscfg.dither_theta,
binning=ob.inscfg.binning,
offset_sec=0.0,
offset_ra=ob.inscfg.offset_ra,
offset_dec=ob.inscfg.offset_dec,
filter=filtername,
autoguide=autoguide))
# prepare target parameters substring common to all SETUPFIELD
# and GETOBJECT commands
if filtername.startswith('N'):
fcsname = 'NB_%s' % (filtername[1:])
else:
fcsname = 'BB_%s' % (filtername)
d['filter'] = fcsname
tgtstr = 'OBJECT="%(object)s" RA=%(ra)s DEC=%(dec)s EQUINOX=%(equinox)6.1f INSROT_PA=%(pa).1f $FILTER_%(filter)s' % d
d.update(dict(tgtstr=tgtstr))
def write_ope_header(self, out_f):
out = self._mk_out(out_f)
preamble = """
:header
# this file was automatically generated at %(curtime)s
#
OBSERVATION_FILE_TYPE=OPE
#OBSERVATION_START_DATE=
#OBSERVATION_START_TIME=
#OBSERVATION_END_DATE=
#OBSERVATION_END_TIME=
:parameter
*LOAD "FOCAS_FOCASPARAM.prm"
*LOAD "FOCAS_MOS.prm"
*LOAD "FOCAS_STDSTAR.prm"
:command
"""
d = dict(curtime=time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime()),
)
out(preamble % d)
def ob_to_ope(self, ob, out_f):
out = self._mk_out(out_f)
# special cases: filter change, long slew, calibrations, etc.
if ob.derived != None:
if ob.comment.startswith('Filter change'):
#self.out_filterchange(ob, out_f)
return
elif ob.comment.startswith('Long slew'):
out("\n# %s" % (ob.comment))
d = {}
self._setup_target(d, ob)
cmd_str = 'SetupField $DEF_IMAG %(tgtstr)s $MASK_NONE Shift_Sec=%(offset_sec)d Delta_Ra=%(offset_ra)d Delta_Dec=%(offset_dec)d' % d
cmd_str = cmd_str + (' AUTOGUIDE=%(autoguide)s' % d)
out(cmd_str)
return
elif ob.comment.startswith('Delay for'):
out("\n# %s" % (ob.comment))
d = dict(sleep_time=int(ob.total_time))
cmd_str = 'EXEC OBS TIMER SLEEP_TIME=%(sleep_time)d' % d
out(cmd_str)
return
tgtname = ob.target.name.lower()
if tgtname == 'domeflat':
out("\n# %s" % (ob.comment))
d = {}
self._setup_target(d, ob)
cmd_str = 'GetDomeflat $DEF_IMAG $DEF_DOMEFLAT $MASK_NONE $FILTER_%(filter)s $CCD_%(binning)s ExpTime=7 VOLT=20' % d
for i in range(ob.inscfg.num_exp):
out(cmd_str)
return
elif tgtname == 'bias':
out("\n# %s" % (ob.comment))
d = {}
self._setup_target(d, ob)
cmd_str = 'GetBias $DEF_IMAG $CCD_%(binning)s OBJECT=BIAS' % d
for i in range(ob.inscfg.num_exp):
out(cmd_str)
return
# <-- normal OBs
out = self._mk_out(out_f)
out("\n# %s (%s %s) %s: %s" % (ob, ob.program.proposal,
ob.program.pi, ob.name,
ob.target.name))
d = {}
self._setup_target(d, ob)
cmd_str = 'SetupField $DEF_IMAG %(tgtstr)s $MASK_NONE Shift_Sec=%(offset_sec)d Delta_Ra=%(offset_ra)d Delta_Dec=%(offset_dec)d' % d
if ob.inscfg.guiding:
dith_cmd = "MoveGuide0"
cmd_str = cmd_str + (' AUTOGUIDE=%(autoguide)s' % d)
else:
dith_cmd = "MoveTelescope"
cmd_str = cmd_str + (' AUTOGUIDE=%(autoguide)s' % d)
# output setupfield command to position telescope
out(cmd_str)
d_ra, d_dec = d['dither_ra'], d['dither_dec']
d_theta = d['dither_theta']
abs_off = ((0, 0), (d_ra, d_dec), (-d_dec, d_ra), (-d_ra, -d_dec),
(d_dec, -d_ra), (0, 0))
# rotate box points according to dither theta
abs_off = [trcalc.rotate_pt(p[0], p[1], d_theta) for p in abs_off]
# 5 dither sequence
d['mask'] = 'NONE'
if ob.inscfg.num_exp == 1:
cmd_str = 'GetObject $DEF_IMAG $MASK_%(mask)s %(tgtstr)s $CCD_%(binning)s EXPTIME=%(exptime)d' % d
out(cmd_str)
else:
for i in range(5):
out("# Dither point %d" % (i+1))
cmd_str = 'GetObject $DEF_IMAG $MASK_%(mask)s %(tgtstr)s $CCD_%(binning)s EXPTIME=%(exptime)d' % d
out(cmd_str)
# calculate deltas for positioning at next dither pos
cur_off_ra, cur_off_dec = abs_off[i]
#print("current off ra, dec=%.3f,%.3f" % (cur_off_ra, cur_off_dec))
next_off_ra, next_off_dec = abs_off[i+1]
#print("next off ra, dec=%.3f,%.3f" % (next_off_ra, next_off_dec))
delta_ra = next_off_ra - cur_off_ra
delta_dec = next_off_dec - cur_off_dec
# issue command for offsetting to next dither pos
cmd_str = '%s $DEF_TOOL Delta_RA=%.3f Delta_DEC=%.3f' % (
dith_cmd, delta_ra, delta_dec)
out(cmd_str)
d['mask'] = 'NOP'
#cur_off_ra, cur_off_dec = next_off_ra, next_off_dec
'''
# Broad Band (BB) #
FILTER_NONE=Grism=0 Filter01=0 Filter02=0 Filter03=0 Polarizer=Nop
FILTER_BB_U=Grism=0 Filter01=1 Filter02=0 Filter03=0 Polarizer=Nop
FILTER_BB_B=Grism=0 Filter01=2 Filter02=0 Filter03=0 Polarizer=Nop
FILTER_BB_V=Grism=0 Filter01=3 Filter02=0 Filter03=0 Polarizer=Nop
FILTER_BB_R=Grism=0 Filter01=4 Filter02=0 Filter03=0 Polarizer=Nop
FILTER_BB_I=Grism=0 Filter01=5 Filter02=0 Filter03=0 Polarizer=Nop
# Narrow Band (NB) #
#FILTER_NB_373=Grism=0 Filter01=0 Filter02=0 Filter03=5 Polarizer=Nop
#FILTER_NB_386=Grism=0 Filter01=0 Filter02=0 Filter03=7 Polarizer=Nop
#FILTER_NB_487=Grism=0 Filter01=7 Filter02=0 Filter03=0 Polarizer=Nop
#FILTER_NB_502=Grism=0 Filter01=0 Filter02=1 Filter03=0 Polarizer=Nop
#FILTER_NB_512=Grism=0 Filter01=0 Filter02=2 Filter03=0 Polarizer=Nop
#FILTER_NB_642=Grism=0 Filter01=0 Filter02=3 Filter03=0 Polarizer=Nop
#FILTER_NB_658=Grism=0 Filter01=0 Filter02=4 Filter03=0 Polarizer=Nop
#FILTER_NB_670=Grism=0 Filter01=0 Filter02=5 Filter03=0 Polarizer=Nop
FILTER_NB_373=Grism=0 Filter01=0 Filter02=5 Filter03=0 Polarizer=Nop
#FILTER_NB_386=Grism=0 Filter01=0 Filter02=5 Filter03=0 Polarizer=Nop
#FILTER_NB_487=Grism=0 Filter01=7 Filter02=0 Filter03=0 Polarizer=Nop
FILTER_NB_502=Grism=0 Filter01=0 Filter02=4 Filter03=0 Polarizer=Nop
FILTER_NB_512=Grism=0 Filter01=0 Filter02=2 Filter03=0 Polarizer=Nop
#FILTER_NB_642=Grism=0 Filter01=0 Filter02=0 Filter03=7 Polarizer=Nop
FILTER_NB_658=Grism=0 Filter01=0 Filter02=0 Filter03=5 Polarizer=Nop
#FILTER_NB_670=Grism=0 Filter01=0 Filter02=4 Filter03=0 Polarizer=Nop
# Narrow Band (NB) alias #
#FILTER_NB_O2_ON=Grism=0 Filter01=0 Filter02=0 Filter03=5 Polarizer=Nop
#FILTER_NB_O2_OFF=Grism=0 Filter01=0 Filter02=0 Filter03=6 Polarizer=Nop
#FILTER_NB_HB_ON=Grism=0 Filter01=0 Filter02=0 Filter03=7 Polarizer=Nop
#FILTER_NB_HB_OFF=Grism=0 Filter01=0 Filter02=1 Filter03=0 Polarizer=Nop
#FILTER_NB_O3_ON=Grism=0 Filter01=0 Filter02=2 Filter03=0 Polarizer=Nop
#FILTER_NB_HA_ON=Grism=0 Filter01=0 Filter02=3 Filter03=0 Polarizer=Nop
#FILTER_NB_HA_OFF=Grism=0 Filter01=0 Filter02=4 Filter03=0 Polarizer=Nop
#FILTER_NB_S2_ON=Grism=0 Filter01=0 Filter02=5 Filter03=0 Polarizer=Nop
# Polarizing filters (PF) #
FILTER_PF_OPT=Filter03=3
FILTER_PF_NIR=Filter03=4
# SDSS filterS
FILTER_BB_Z=Grism=0 Filter01=0 Filter02=0 Filter03=1 Polarizer=Nop
FILTER_SDSS_Z=Grism=0 Filter01=0 Filter02=0 Filter03=1 Polarizer=Nop
FILTER_SDSS_I=Grism=0 Filter01=7 Filter02=0 Filter03=0 Polarizer=Nop
FILTER_SDSS_R=GRISM=0 FILTER01=0 FILTER02=0 FILTER03=7 POLARIZER=NOP
FILTER_SDSS_G=GRISM=0 FILTER01=0 FILTER02=1 FILTER03=0 POLARIZER=NOP
FILTER_BB_G=GRISM=0 FILTER01=0 FILTER02=1 FILTER03=0 POLARIZER=NOP
SetupField $DEF_IMAGE $SA110 OFFSET_RA=0 OFFSET_DEC=30 Filter="W-J-B"
GetStandard $DEF_IMAGE $SA110 EXPTIME=5 DELTA_Z=0.4 OFFSET_RA=0 OFFSET_DEC=30 Filter="W-J-B"
SetupField $DEF_IMAGE_VGW $SA112 AG_SELECT=SEMIAUTO OFFSET_RA=0 OFFSET_DEC=0 Filter="W-S-Z+"
GetObject $DEF_IMAGE_VGW $SA112 AG_SELECT=SEMIAUTO OFFSET_RA=0 OFFSET_DEC=0 EXPTIME=20 Filter="W-S-Z+"
Setupfield $DEF_IMAGE5 $SA113 DITH_RA=60 DITH_DEC=60 OFFSET_RA=0 OFFSET_DEC=0 Filter="W-S-Z+"
GetObject $DEF_IMAGE5 $SA113 DITH_RA=60 DITH_DEC=60 EXPTIME=20 OFFSET_RA=0 OFFSET_DEC=0 Filter="W-S-Z+"
Setupfield $DEF_IMAGE5_VGW $SA113 DITH_RA=60 DITH_DEC=60 OFFSET_RA=0 OFFSET_DEC=0 Filter="W-S-Z+"
GetObject $DEF_IMAGE5_VGW $SA113 DITH_RA=60 DITH_DEC=60 EXPTIME=20 OFFSET_RA=0 OFFSET_DEC=0 Filter="W-S-Z+"
SetupField $DEF_IMAGEN $SA113 OFFSET_RA=0 OFFSET_DEC=0 NDITH=3 RDITH=60.0 TDITH=
15 Filter="W-S-Z+"
GetObject $DEF_IMAGEN $SA113 OFFSET_RA=0 OFFSET_DEC=0 EXPTIME=20 NDITH=3 RDITH=
60.0 TDITH=15 Filter="W-S-Z+"
SetupField $DEF_IMAGEN_VGW $GUIDE $NGC6705 OFFSET_RA=0 OFFSET_DEC=-320 NDITH=6 RDITH=25 TDITH=15 Filter="W-J-V"
GetObject $DEF_IMAGEN_VGW $GUIDE $NGC6705 OFFSET_RA=0 OFFSET_DEC=-320 EXPTIME=300 NDITH=6 RDITH=25.0 TDITH=15 Filter="W-J-V"
# Skyflat
SetupField $DEF_IMAGE RA=!STATS.RA DEC=!STATS.DEC OFFSET_RA=10 OFFSET_DEC=10 Filter="W-J-B"
GetSkyFlat $DEF_IMAGE RA=!STATS.RA DEC=!STATS.DEC EXPTIME=30 Filter="W-J-B"
# Domeflat
SetupDomeFlat $DEF_CMNTOOL SETUP=SETUP LAMP=4X10W VOLT=6.00 AMP=6.33
GetDomeFlat $DEF_IMAGE EXPTIME=40 Filter="W-J-B"
How to convert SHIFT_SEC to Delta_RA and Delta_DEC depends
on the position angle.
From the skelton file,
Delta_RA = SHIFT_SEC * sin (PA)
Delta_Dec= SHIFT_SEC * cos (PA)
It is in arcsec.
> 2) For the standard 5 point dither, what would be a typical value
> for the MoveGuide0 offset in DELTA_RA and DELTA_DEC?
> In other words, what is the absolute value of the offset from
> the origin for each point?
Let's use a tilted square.
In absolute coordinates,
(0,0)
(8,2)
(-2,8)
(-8,-2)
(2,-8)
MoveGuide0 is the offset from the current position.
So, for the above dithering pattern, we need
GetObject ...
MoveGuide0 $DEF_TOOL DELTA_RA=8 DELTA_DEC=2
GetObject ...
MoveGuide0 $DEF_TOOL DELTA_RA=-10 DELTA_DEC=6
GetObject ...
MoveGuide0 $DEF_TOOL DELTA_RA=-6 DELTA_DEC=-10
GetObject ...
MoveGuide0 $DEF_TOOL DELTA_RA=10 DELTA_DEC=-6
GetObject ...
MoveGuide0 $DEF_TOOL DELTA_RA=-2 DELTA_DEC=8
'''
#END
|
<filename>trainer/rl_distributed_trainer.py
# -*- coding: utf-8 -*-
#tensorboard --logdir ./logs
import os,sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../utility'))
import time, copy
import random
import numpy as np
import multiprocessing as mp
import tensorflow as tf
from collections import deque
from collections import OrderedDict
from utils import Utils
from queue import Queue
from display_as_gif import display_frames_as_gif
from replay_memory import ReplayBuffer,PrioritizeReplayBuffer,Rollout
os.environ["CUDA_VISIBLE_DEVICES"] = ""
class BasedDistributedTrainer():
def __init__(self,
agent,
env,
n_episode,
max_step,
n_workers=1,
replay_size=32,
data_size=10**6,
n_warmup=5*10**4,
priority=False,
multi_step=1,
render=False,
test_render=False,
test_episode=5,
test_interval=1000,
test_frame=False,
metrics=None,
init_model_dir=None):
self.agent = agent
self.env = env
self.n_workers = n_workers
self.n_episode = n_episode
self.max_steps = max_step
self.render = render
self.data_size = data_size
self.n_warmup = n_warmup
self.replay_size = replay_size # batch_size
self.multi_step = multi_step
self.util = Utils(prefix=self.agent.__class__.__name__)
self.util.initial()
self.global_step = tf.train.get_or_create_global_step()
self.test_episode = test_episode
self.test_interval = test_interval if test_interval is not None else 10000
self.test_render = test_render
self.test_frame = test_frame
self.init_model_dir = init_model_dir
self.metrics = metrics
self.build_process()
def build_process(self, *args, **kwargs):
raise Exception('please Write build_process function')
def train(self):
assert len(self.process_list) > 0
for i, worker in enumerate(self.process_list):
worker.daemon = True
print("Starting worker {}".format(i + 1))
worker.start()
try:
[w.join() for w in self.process_list]
except KeyboardInterrupt:
[w.terminate() for w in self.process_list]
return
class A3CTrainer(BasedDistributedTrainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.replay_buf = Rollout(self.max_steps)
self.global_agent = copy.deepcopy(self.agent)
def build_process(self):
self.process_list = [mp.Process(target=self.worker, args=[i+1]) for i in range(self.n_workers)]
return
def worker(self, num):
self.total_steps = 0
self.learning_flag = 0
self.num = num
if self.init_model_dir is not None:
self.util.restore_agent(self.agent ,self.init_model_dir)
for episode in range(1, self.n_episode+1):
self.global_step.assign_add(1)
self.step(episode)
self.episode_end()
return
def step(self, episode):
with tf.contrib.summary.always_record_summaries():
state = self.env.reset()
total_reward = 0
for step in range(1, self.max_steps+1):
if self.render:
self.env.render()
action = self.agent.choose_action(state)
state_, reward, done, _ = self.env.step(action)
# the smaller theta and closer to center the better
if self.env.__class__.__name__ == 'CartPoleEnv':
x, x_dot, theta, theta_dot = state_
r1 = (self.env.x_threshold - abs(x))/self.env.x_threshold - 0.8
r2 = (self.env.theta_threshold_radians - abs(theta))/self.env.theta_threshold_radians - 0.5
reward = r1 + r2
self.replay_buf.push(state, action, done, state_, reward, step-1)
total_reward += reward
if done or step == self.max_steps:
_, transitions, weights = self.replay_buf.sample()
train_data = map(np.array, zip(*transitions))
gradient = self.agent.update_q_net(train_data, weights)
self.global_agent.update_global_net(gradient)
self.replay_buf.clear()
self.agent.pull_global_net(self.global_agent.var)
self.learning_flag = 1
#self.summary()
self.step_end(episode, step, total_reward)
break
state = state_
return
def step_end(self, episode, step, total_reward):
self.total_steps += step
tf.contrib.summary.scalar('train/total_steps', self.total_steps)
tf.contrib.summary.scalar('train/steps_per_episode', step)
tf.contrib.summary.scalar('train/total_reward', total_reward)
tf.contrib.summary.scalar('train/average_reward', total_reward / step)
print("worker: %d episode: %d total_steps: %d steps/episode: %d total_reward: %0.2f"%(self.num, episode, self.total_steps, step, total_reward))
metrics = OrderedDict({
"worker": self.num,
"episode": episode,
"total_steps": self.total_steps,
"steps/episode":step,
"total_reward": total_reward})
self.util.write_log(message=metrics)
#if episode % 50:
# self.util.save_model()
return
def episode_end(self):
self.env.close()
return
|
import prona2019Mod.utils as utils
import itertools as it
from six import iteritems, string_types, PY2, next
import numpy as np
import sys
def _is_single(obj):
"""
Check whether `obj` is a single document or an entire corpus.
Returns (is_single, new) 2-tuple, where `new` yields the same
sequence as `obj`.
`obj` is a single document if it is an iterable of strings. It
is a corpus if it is an iterable of documents.
"""
obj_iter = iter(obj)
temp_iter = obj_iter
try:
peek = next(obj_iter)
obj_iter = it.chain([peek], obj_iter)
except StopIteration:
# An empty object is a single document
return True, obj
if isinstance(peek, string_types):
# It's a document, return the iterator
return True, obj_iter
if temp_iter == obj:
# Checking for iterator to the object
return False, obj_iter
else:
# If the first item isn't a string, assume obj is a corpus
return False, obj
'''
def _apply(corpus, chunksize=None, **kwargs):
"""Apply the transformation to a whole corpus and get the result as another corpus.
Parameters
----------
corpus : iterable of list of (int, number)
Corpus in BoW format.
chunksize : int, optional
If provided - more effective processing (by group of documents) will performed.
kwargs
Arbitrary keyword arguments.
Returns
-------
:class:`~gensim.interfaces.TransformedCorpus`
Transformed corpus.
"""
return TransformedCorpus(self, corpus, chunksize, **kwargs)
'''
def score_item(worda, wordb, components, scorer, phrasegrams):
"""score is retained from original dataset
"""
try:
return phrasegrams[tuple(components)][1]
except KeyError:
return -1
def analyze_sentence(sentence, threshold, common_terms, scorer,phrasegrams):
"""Analyze a sentence
`sentence` a token list representing the sentence to be analyzed.
`threshold` the minimum score for a bigram to be taken into account
`common_terms` the list of common terms, they have a special treatment
`scorer` the scorer function, as given to Phrases
"""
s = [utils.any2utf8(w) for w in sentence]
last_uncommon = None
in_between = []
# adding None is a trick that helps getting an automatic happy ending
# has it won't be a common_word, nor score
for word in s + [None]:
is_common = word in common_terms
if not is_common and last_uncommon:
chain = [last_uncommon] + in_between + [word]
# test between last_uncommon
score = score_item(
worda=last_uncommon,
wordb=word,
components=chain,
scorer=scorer,
phrasegrams=phrasegrams
)
if score > threshold:
yield (chain, score)
last_uncommon = None
in_between = []
else:
# release words individually
for w in it.chain([last_uncommon], in_between):
yield (w, None)
in_between = []
last_uncommon = word
elif not is_common:
last_uncommon = word
else: # common term
if last_uncommon:
# wait for uncommon resolution
in_between.append(word)
else:
yield (word, None)
def get_phrase(sentence,phrase_model):
is_single, sentence = _is_single(sentence)
if not is_single:
# if the input is an entire corpus (rather than a single sentence),
# return an iterable stream.
sys.exit("It is not a protein sequence")
delimiter = phrase_model['delimiter']
bigrams = analyze_sentence(
sentence,
threshold=phrase_model['threshold'],
common_terms=phrase_model['common_terms'],
scorer=None,
phrasegrams=phrase_model['phrasegrams']) # we will use our score_item function redefinition
new_s = []
for words, score in bigrams:
if score is not None:
words = delimiter.join(words)
new_s.append(words)
return [utils.to_unicode(w) for w in new_s]
def split_ngrams(seq, n):
"""
'AGAMQSASM' => [['AGA', 'MQS', 'ASM'], ['GAM','QSA'], ['AMQ', 'SAS']]
"""
all_ngrams=[]
for x in range(n):
all_ngrams.append(zip(*[iter(seq[x:])]*n))
str_ngrams = []
for ngrams in all_ngrams:
x = []
for ngram in ngrams:
x.append("".join(ngram))
str_ngrams.append(x)
return str_ngrams
def to_vecs(seq,phrase_model,kmer,word2vec_index):
"""
convert sequence to three n-length vectors
e.g. 'AGAMQSASM' => [ array([ ... * 100 ], array([ ... * 100 ], array([ ... * 100 ] ]
"""
ngram_patterns = split_ngrams(seq, kmer)
protvecs = []
for ngrams in ngram_patterns:
ngram_vecs = []
if phrase_model=='none':
ngramss = ngrams
else:
ngramss=get_phrase(get_phrase(ngrams,phrase_model),phrase_model)
for ngram in ngramss:
try:
ngram_vecs.append(np.array(word2vec_index[ngram]))
except KeyError:
continue
protvecs.append(sum(ngram_vecs))
return protvecs
|
#!/usr/bin/python
"""
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
import os
import time
import traceback
import sys
import json
import logging
from avocado import Test, main, skip
sys.path.append('./util')
sys.path.append('../util')
sys.path.append('../../../utils/py')
sys.path.append('./../../utils/py')
import ServerUtils
import WriteHostFile
from daos_api import DaosContext, DaosPool, DaosContainer, DaosLog, DaosApiError
from daos_cref import DaosObjId
class ObjOpenBadParam(Test):
"""
Test Class Description:
Pass an assortment of bad parameters to the daos_obj_open function.
"""
def __init__(self, *args, **kwargs):
"""
Initialize values for variables that are used in tearDown() such that
if setUp() fails for any reason, tearDown() will avoid throwing
an AttributeError exception.
"""
super(ObjOpenBadParam, self).__init__(*args, **kwargs)
self.container = None
self.pool = None
def setUp(self):
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as f:
build_paths = json.load(f)
self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
self.server_group = self.params.get("server_group",'/server/',
'daos_server')
# setup the DAOS python API
self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
self.d_log = DaosLog(self.context)
self.hostlist = self.params.get("test_machines",'/run/hosts/*')
self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.workdir)
ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)
try:
# parameters used in pool create
createmode = self.params.get("mode",'/run/pool/createmode/')
createsetid = self.params.get("setname",'/run/pool/createset/')
createsize = self.params.get("size",'/run/pool/createsize/')
createuid = os.geteuid()
creategid = os.getegid()
# initialize a python pool object then create the underlying
# daos storage
self.pool = DaosPool(self.context)
self.pool.create(createmode, createuid, creategid,
createsize, createsetid, None)
# need a connection to create container
self.pool.connect(1 << 1)
# create a container
self.container = DaosContainer(self.context)
self.container.create(self.pool.handle)
# now open it
self.container.open()
# create an object and write some data into it
thedata = "a string that I want to stuff into an object"
self.datasize = len(thedata) + 1
self.dkey = "this is the dkey"
self.akey = "this is the akey"
self.obj, self.epoch = self.container.write_an_obj(thedata,
self.datasize,
self.dkey,
self.akey,
obj_cls=1)
thedata2 = self.container.read_an_obj(self.datasize, self.dkey,
self.akey, self.obj,
self.epoch)
if thedata not in thedata2.value:
print(thedata)
print(thedata2.value)
err_str = "Error reading back data, test failed during the " \
"initial setup."
self.d_log.error(err_str)
self.fail(err_str)
# setup leaves object in open state, so closing to start clean
self.obj.close()
except DaosApiError as e:
print(e)
print(traceback.format_exc())
self.fail("Test failed during the initial setup.")
def tearDown(self):
try:
self.container.close()
self.container.destroy()
self.pool.disconnect()
self.pool.destroy(1)
finally:
ServerUtils.stopServer()
ServerUtils.killServer(self.hostlist)
def test_bad_obj_handle(self):
"""
Test ID: DAOS-1320
Test Description: Attempt to open a garbage object handle.
:avocado: tags=object,objopen,objopenbadhand,regression,vm,small
"""
saved_handle = self.obj.oh
self.obj.oh = 8675309
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1002" in str(excep):
self.d_log.error("test expected a -1002 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1002 but did not get it")
finally:
self.obj.oh = saved_handle
def test_invalid_container_handle(self):
"""
Test ID: DAOS-1320
Test Description: Attempt to open an object with a garbage container
handle.
:avocado: tags=object,objopen,objopenbadconthand,regression,vm,small
"""
saved_coh = self.container.coh
self.container.coh = 8675309
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1002" in str(excep):
self.d_log.error("test expected a -1002 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1002 but did not get it")
finally:
self.container.coh = saved_coh
def test_closed_container_handle(self):
"""
Test ID: DAOS-1320
Test Description: Attempt to open an object in a container with
a closed handle.
:avocado: tags=object,objopen,objopenclosedcont,regression,vm,small
"""
self.container.close()
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1002" in str(excep):
self.d_log.error("test expected a -1002 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1002 but did not get it")
finally:
self.container.open()
def test_pool_handle_as_obj_handle(self):
"""
Test ID: DAOS-1320
Test Description: Adding this test by request, this test attempts
to open an object that's had its handle set to
be the same as a valid pool handle.
:avocado: tags=object,objopen,objopenpoolhandle,regression,vm,small
"""
saved_oh = self.obj.oh
self.obj.oh = self.pool.handle
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1002" in str(excep):
self.d_log.error("test expected a -1002 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1002 but did not get it")
finally:
self.obj.oh = saved_oh
def test_null_ranklist(self):
"""
Test ID: DAOS-1320
Test Description: Attempt to open an object in a container with
an empty ranklist.
:avocado: tags=object,objopen,objopennullrl,regression,vm,small
"""
# null rl
saved_rl = self.obj.tgt_rank_list
self.obj.tgt_rank_list = None
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1003" in str(excep):
self.d_log.error("test expected a -1003 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1003 but did not get it")
finally:
self.obj.tgt_rank_list = saved_rl
def test_null_oid(self):
"""
Test ID: DAOS-1320
Test Description: Attempt to open an object in a container with
null object id.
:avocado: tags=object,objopen,objopennulloid,regression,vm,small
"""
# null oid
saved_oid = self.obj.c_oid
self.obj.c_oid = DaosObjId(0, 0)
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1003" in str(excep):
self.d_log.error("Test expected a -1003 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1003 but did not get it")
finally:
self.obj.c_oid = saved_oid
def test_null_tgts(self):
"""
Test ID: DAOS-1320
Test Description: Attempt to open an object in a container with
null tgt.
:avocado: tags=object,objopen,objopennulltgts,regression,vm,small
"""
# null tgts
saved_ctgts = self.obj.c_tgts
self.obj.c_tgts = 0
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1003" in str(excep):
self.d_log.error("Test expected a -1003 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1003 but did not get it")
finally:
self.obj.c_tgts = saved_ctgts
def test_null_attrs(self):
"""
Test ID: DAOS-1320
Test Description: Attempt to open an object in a container with
null object attributes.
:avocado: tags=object,objopen,objopennullattr,regression,vm,small
"""
# null attr
saved_attr = self.obj.attr
self.obj.attr = 0
try:
dummy_obj = self.obj.open()
except DaosApiError as excep:
if not "-1003" in str(excep):
self.d_log.error("test expected a -1003 but did not get it")
self.d_log.error(traceback.format_exc())
self.fail("test expected a -1003 but did not get it")
finally:
self.obj.attr = saved_attr
|
<gh_stars>0
#-*- coding: utf8 -*-
import re
RGX = {
"page": r"<page>",
"title": r"<title>(?P<title>[^<]+)</title>",
"wikid": r"<id>(?P<id>[0-9]+)</id>",
"term": r"\[\[(?P<term>[0-9a-zA-Z\-' ]+)(#[^\|]+)?(\|[0-9a-zA-Z\-' ]+)?\]\]",
"etymology_section": r"===Etymology===",
"synonym_section": r"=====?Synonyms=====?",
"antonym_section": r"=====?Antonyms=====?",
"hypernym_section": r"=====?Hypernyms=====?",
"hyponym_section": r"=====?Hyponyms=====?",
"abbreviation_section": r"=====?Abbreviations=====?",
"translation_section": r"====?Translations====?",
"lang_content_section": r"(^|[^=])==(?P<lang>[a-zA-Z -]+)==([^=]|$)",
"redirect": r"#REDIRECT( |:|: )?\[\[(?P<term>[^\]]+)\]\]",
"section_begin": r"(^| +)=+[^=]+=+",
"prefix": r"{{prefix\|(?P<pre>[^\|]+)\|(?P<stem>[^\|]+)\|lang=(?P<lang>[^}]+)}}",
"suffix": r"{{suffix\|(?P<stem>[^\|]+)\|(?P<suf>[^\|]+)\|lang=(?P<lang>[^}]+)}}",
"confix": r"{{confix\|(?P<parts>[^\}]+)\|lang=(?P<lang>[^}]+)}}",
"affix": r"{{affix\|(?P<lang>[^\|]+)\|(?P<parts>[^\}]+)}}",
"attr_generic": r"{{([^}]+)}}",
"attr_specific": r"{{(?P<attr>(context|sense|qualifier|gloss|term|taxlink))\|(?P<val>[^\}]+)(\|lang=(?P<lang>[a-z]+))?}}",
"label": r"{{(?P<attr>(lb?|label))\|(?P<lang>[a-z]+)(\|(?P<val>[^\}]+))}}",
"wikne": r"{{(?P<attr>w)\|(?P<val>[0-9a-zA-Z\-' ]+)(#[^\|]+)?(\|[0-9a-zA-Z\-' ]+)?}}",
"inflection": r"{{(?P<inflec>[a-zA-Z\- ]+) of\|(?P<val>[^\}]+)(\|lang=(?P<lang>[a-z]+))?}}",
"etym_link": r"{{m\|(?P<lang_orig>[^\|]+)\|(?P<term>[^\}]+)}}",
"sense": r"^##? (?P<sense>.+)",
"example": r"^#: ''(?P<example>.+)''$",
"synonym": r"^\* (?P<syn>.+)",
"antonym": r"^\* (?P<ant>.+)",
"hypernym": r"^\* (?P<hyper>.+)",
"hyponym": r"^\* (?P<hypo>.+)",
"abbreviation": r"^\* (?P<abbrev>.+)",
"translation_child": r"\*(?P<child>:)? (?P<lang>[^:]+): (?P<trls>{{.+)",
"translation_parent": r"\*:? (?P<lang>[^:]+):$",
"translation_open": r"{{trans-top\|(?P<mng>[^}]+)}}",
"translation_item": r"{{t\+?\|(?P<langcode>[a-z]+)\|(?P<term>[^\|}]+)[^}]*}}",
"definition_prefix1": r"^show ",
"html_escape": r"&[^;]+;"
}
WIKI_REJECT = r":"
for k in RGX.keys():
RGX[k] = re.compile(RGX[k])
ATTR_TYPES_ORDER = [
"inflection",
"label",
"wikne",
#"attr_specific",
"attr_generic"
]
ATTR_GEN = {
"inflection": lambda mo: ("inflec", mo.groupdict()["inflec"].split("-")[-1], mo.groupdict()["val"].split("|")[0]),
"label": lambda mo: ("label", mo.groupdict()["val"], mo.groupdict()["lang"]),
"wikne": lambda mo: ("wikne", mo.groupdict()["val"]),
#"attr_specific": lambda mo: (mo.groupdict()["attr"], mo.groupdict()["val"], mo.groupdict()["lang"]),
"attr_generic": lambda mo: mo.group(1).split("|")
}
def clean_expr(s, attr_type="", keeplabels=False):
cln_str = s
if (not attr_type or attr_type == "term"):
cln_str = RGX["term"].sub(r"\g<term>", cln_str)
if (not attr_type or attr_type == "inflection"):
cln_str = RGX["inflection"].sub(r"\g<inflec> \g<val>", cln_str)
if (not attr_type or attr_type == "attr_specific"):
cln_str = RGX["attr_specific"].sub(r"", cln_str)
if (not attr_type or attr_type == "label"):
if (not keeplabels):
cln_str = RGX["label"].sub(r"", cln_str)
else:
cln_str = RGX["label"].sub(r"\g<val>", cln_str)
if (not attr_type or attr_type == "wikne"):
cln_str = RGX["wikne"].sub(r"\g<val>", cln_str)
return cln_str.strip()
|
#!/usr/bin/env python
from __future__ import with_statement
import argparse
import hashlib
import os
import sys
import tempfile
import shutil
import logging
from logging import getLogger, StreamHandler
# Update here and in setup.py
VERSION = '1.0.0rc3-dev'
try:
import boto # noqa
import boto.s3.connection
import boto.exception
except ImportError:
boto = None # noqa
from virtualenv import ( # noqa
call_subprocess,
create_bootstrap_script,
)
logger = getLogger(__name__)
# http://www.astro.keele.ac.uk/oldusers/rno/Computing/File_magic.html
MAGIC_NUM = {
# magic code, offset
'ELF': ('.ELF', 0),
'GZIP': ('\x1f\x8b', 0),
'BZIP': ('\x42\x5a', 0),
'TAR': ('ustar', 257),
}
def rmtree(path):
try:
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
return True
except OSError, why:
logger.warn(
'Failed to remove %s. '
'Make sure you have permissions to this path. '
'%s' % (path, why)
)
return False
# Helper method to determine the actual type of the file without relying on the
# file extension
def get_type(path):
with open(path) as f:
for file_type, magic in MAGIC_NUM.items():
f.seek(magic[1])
if magic[0] == f.read(len(magic[0])):
return file_type
return None
class Terrarium(object):
def __init__(self, args):
self.args = args
self._requirements = None
self._digest = None
logger.debug('Terrarium created with %s', args)
@property
def digest(self):
if self._digest is not None:
return self._digest
m = hashlib.new(self.args.digest_type)
m.update('\n'.join(self.requirements))
self._digest = m.hexdigest()
return self._digest
@property
def requirements(self):
if self._requirements is not None:
return self._requirements
lines = []
for arg in self.args.reqs:
if os.path.exists(arg):
with open(arg, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
lines.append(line)
self._requirements = sorted(lines)
return self._requirements
def install(self):
logger.debug('Running install')
old_target = os.path.abspath(self.args.target)
new_target = old_target
prompt = os.path.basename(new_target)
# Are we building a new environment, or replacing an existing one?
old_target_exists = os.path.exists(os.path.join(
old_target,
'bin',
'activate',
))
if old_target_exists:
new_target = tempfile.mkdtemp(
prefix='%s.' % os.path.basename(old_target),
dir=os.path.dirname(old_target),
)
# Can the requested environment be downloaded?
downloaded = False
if self.args.download:
downloaded = self.download(new_target)
if not downloaded:
# Create a self-contained script to create a virtual environment
# and install all of the requested requirements
logger.info('Building new environment')
fd, bootstrap = tempfile.mkstemp(
prefix='terrarium_bootstrap-',
suffix='.py',
)
self.create_bootstrap(bootstrap)
# Run the bootstrap script which pip installs everything that has
# been defined as a requirement
call_subprocess([
sys.executable,
bootstrap,
'--prompt=(%s)' % prompt,
new_target
])
# Do we want to copy the bootstrap into the environment for future
# use?
if self.args.bootstrap:
logger.info('Copying bootstrap script to new environment')
dest = os.path.join(
new_target, 'bin', 'terrarium_bootstrap.py')
shutil.copyfile(bootstrap, dest)
os.chmod(dest, 0744)
os.close(fd)
os.unlink(bootstrap)
if self.args.upload:
self.upload(new_target)
old_target_backup = '%s%s' % (old_target, self.args.backup_suffix)
if old_target_exists:
logger.info('Moving old environment out of the way')
if os.path.exists(old_target_backup):
if not rmtree(old_target_backup):
old_target_backup = tempfile.mkdtemp(
prefix='terrarium_old_backup_target-'
)
old_target_backup = os.path.join(old_target_backup, prompt)
logger.info(
'Backing environment up to %s' % old_target_backup)
try:
os.rename(old_target, old_target_backup)
except OSError, why:
logger.error(
'Failed to move environment out of the way. '
'Check that you have the correct permissions. '
'%s' % why
)
return 1
# Fix paths
Terrarium.replace_all_in_directory(
os.path.join(new_target, 'bin'),
new_target,
old_target,
)
try:
# move the new environment into the target's place
os.rename(new_target, old_target)
except OSError, why:
logger.error(
'Failed to move the new environment into the correct path. '
'Check that you have the correct permissions. '
'%s' % why
)
return 1
# Do we keep a backup of the old environment around or wipe it?
if os.path.isdir(old_target_backup) and not self.args.backup:
logger.info('Deleting old environment')
rmtree(old_target_backup)
logger.info('Terrarium is finished')
return 0
@staticmethod
def replace_all_in_directory(location, old,
replace='__VIRTUAL_ENV__', binary=False):
for name in os.listdir(location):
full_path = os.path.join(location, name)
data = None
with open(full_path) as f:
header = f.read(len(MAGIC_NUM['ELF']))
# Skip binary files
if binary or header != MAGIC_NUM['ELF']:
data = header + f.read()
if not data:
continue
new_data = data.replace(old, replace)
if new_data == data:
continue
with open(full_path, 'w') as f:
data = f.write(new_data)
@staticmethod
def wipe_all_precompiled_python_files_in_dir(path):
return call_subprocess([
'find', path, '-type', 'f', '-name', '*.py[c|o]', '-delete'
])
@staticmethod
def make_bin_dir_paths_relative(bin_dir, target):
Terrarium.replace_all_in_directory(bin_dir, target)
@staticmethod
def make_bin_dir_paths_absolute(bin_dir, target):
Terrarium.replace_all_in_directory(
bin_dir,
'__VIRTUAL_ENV__',
target,
)
def archive(self, target):
logger.info('Building terrarium bundle')
bin_dir = os.path.join(target, 'bin')
Terrarium.wipe_all_precompiled_python_files_in_dir(target)
Terrarium.make_bin_dir_paths_relative(bin_dir, target)
archive = '%s.tar' % target
# Create an archive of the environment
call_subprocess([
'tar',
'--exclude', '.svn',
'--exclude', '.git',
'--exclude', '.bzr',
'--exclude', '.hg',
'--exclude', 'bin/python',
'-cf', archive,
'-C', target,
'.'
])
if self.args.compress:
# Compress the tarball
call_subprocess(['gzip', archive])
archive = '%s.gz' % archive
Terrarium.make_bin_dir_paths_absolute(bin_dir, target)
return archive
def extract(self, archive, target):
logger.info('Extracting terrarium bundle')
archive_type = get_type(archive)
if archive_type == 'GZIP':
tar_op = 'xzf'
elif archive_type == 'BZIP':
tar_op = 'xjf'
elif archive_type == 'TAR':
tar_op = 'xf'
else:
logger.error(
'Failed to extract archive, unknown or unsupported file type')
return
if not os.path.exists(target):
os.mkdir(target)
call_subprocess(['tar', tar_op, archive, '-C', target])
bin_dir = os.path.join(target, 'bin')
# Restore python binary
path_to_python = sys.executable
dest = python_binary = os.path.basename(path_to_python)
if python_binary.startswith('python') and python_binary != 'python':
dest = 'python'
dest = os.path.join(bin_dir, dest)
if not os.path.exists(dest):
call_subprocess([
'cp',
path_to_python,
dest,
])
# Fix up paths
Terrarium.make_bin_dir_paths_absolute(bin_dir, target)
def _get_s3_bucket(self):
if not boto:
return None
conn = boto.s3.connection.S3Connection(
aws_access_key_id=self.args.s3_access_key,
aws_secret_access_key=self.args.s3_secret_key
)
try:
conn.create_bucket(
self.args.s3_bucket,
policy='public-read',
)
except boto.exception.S3CreateError:
pass
return boto.s3.bucket.Bucket(conn, name=self.args.s3_bucket)
def download(self, target):
if self.args.storage_dir:
remote_archive = os.path.join(
self.args.storage_dir,
self.make_remote_key(),
)
if os.path.exists(remote_archive):
logger.info(
'Copying environment from %s (this may take time) ...'
% self.args.storage_dir,
)
local_archive = '%s.tar.gz' % target
shutil.copyfile(
remote_archive,
local_archive,
)
self.extract(local_archive, target)
os.unlink(local_archive)
return True
logger.error('Download archive failed')
if boto and self.args.s3_bucket:
bucket = self._get_s3_bucket()
if bucket:
remote_key = self.make_remote_key()
key = bucket.get_key(remote_key)
if key:
logger.info(
'Downloading %s/%s from S3 '
'(this may take time) ...'
% (self.args.s3_bucket, remote_key)
)
fd, archive = tempfile.mkstemp()
key.get_contents_to_filename(archive)
self.extract(archive, target)
os.close(fd)
os.unlink(archive)
return True
def make_remote_key(self):
import platform
major, minor, patch = platform.python_version_tuple()
context = {
'digest': self.digest,
'python_vmajor': major,
'python_vminor': minor,
'python_vpatch': patch,
'arch': platform.machine(),
}
return self.args.remote_key_format % context
def upload_to_storage_dir(self, target, storage_dir):
logger.info('Copying environment to storage directory')
dest = os.path.join(storage_dir, self.make_remote_key())
if os.path.exists(dest):
logger.error(
'Environment already exists at %s'
% dest,
)
else:
archive = self.archive(target)
if not archive:
logger.error('Archiving failed')
shutil.copyfile(archive, dest)
logger.info('Archive copied to storage directory')
os.unlink(archive)
def upload_to_s3(self, target):
logger.info('Uploading environment to S3')
attempts = 0
bucket = self._get_s3_bucket()
if not bucket:
return False
key = bucket.new_key(self.make_remote_key())
archive = self.archive(target)
if not archive:
logger.error('Archiving failed')
try:
key.set_contents_from_filename(archive)
logger.debug('upload finished')
os.unlink(archive)
return True
except Exception:
attempts = attempts + 1
logger.warning('There was an error uploading the file')
if attempts > self.args.s3_max_retries:
logger.error(
'Attempted to upload archive to S3, but failed'
)
raise
else:
logger.info('Retrying S3 upload')
def upload(self, target):
if self.args.storage_dir:
self.upload_to_storage_dir(target,
self.args.storage_dir)
if boto and self.args.s3_bucket:
self.upload_to_s3(target)
def create_bootstrap(self, dest):
extra_text = (
TERRARIUM_BOOTSTRAP_EXTRA_TEXT %
{
'REQUIREMENTS': self.requirements,
'VENV_LOGGING': self.args.virtualenv_log_level,
'PIP_LOGGING': self.args.pip_log_level,
}
)
output = create_bootstrap_script(extra_text)
with open(dest, 'w') as f:
f.write(output)
TERRARIUM_BOOTSTRAP_EXTRA_TEXT = '''
def adjust_options(options, args):
options.use_distribute = True
options.system_site_packages = False
REQUIREMENTS = %(REQUIREMENTS)s
def after_install(options, base):
# Debug logging for virtualenv
logger.consumers = [(%(VENV_LOGGING)s, sys.stdout)]
home_dir, lib_dir, inc_dir, bin_dir = path_locations(base)
# Update prefix and executable to point to the virtualenv
sys.prefix = os.path.abspath(base)
sys.exec_prefix = sys.prefix
sys.executable = join(os.path.abspath(bin_dir), 'python')
# Create a symlink for pythonM.N
pyversion = (sys.version_info[0], sys.version_info[1])
pyversion_path = join(bin_dir, 'python%%d.%%d' %% pyversion)
# If virtualenv is run using pythonM.N, that binary will already exist so
# there's no need to create it
if not os.path.exists(pyversion_path):
os.symlink('python', pyversion_path)
# Activate the virtualenv
activate_this = join(bin_dir, 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import pip
from pip.commands.install import InstallCommand
import shlex
# Debug logging for pip
pip.logger.consumers = [(%(PIP_LOGGING)s, sys.stdout)]
# If we are on a version of pip before 1.2, load version control modules
# for installing 'editables'
if hasattr(pip, 'version_control'):
pip.version_control()
# Run pip install
c = InstallCommand()
reqs = shlex.split(' '.join(REQUIREMENTS))
options, args = c.parser.parse_args(reqs)
options.require_venv = True
options.ignore_installed = True
requirementSet = c.run(options, args)
make_environment_relocatable(base)
'''
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument(
'-V', '--version',
action='version',
version='%(prog)s ' + VERSION,
)
ap.add_argument(
'-v', '--verbose',
action='append_const',
const=-10,
default=[logging.INFO],
dest='v',
help='Increase verbosity',
)
ap.add_argument(
'-q', '--quiet',
action='append_const',
const=10,
default=[logging.INFO],
dest='v',
help='Decrease verbosity',
)
ap.add_argument(
'-t', '--target',
dest='target',
default=os.environ.get('VIRTUAL_ENV', None),
help='''
Replace or build new environment at this location. If you are
already within a virtual environment, this option defaults to
VIRTUAL_ENV.
''',
)
ap.add_argument(
'--pip-log-level',
default=25,
help='''
Set the log level for pip
''',
)
ap.add_argument(
'--virtualenv-log-level',
default=25,
help='''
Set the log level for virtualenv
''',
)
ap.add_argument(
'--no-download',
default=True,
action='store_false',
dest='download',
help='''
If an external storage location is specified, terrarium will
attempt to download an existing terrarium bundle instead of
building a new one. Using --no-download forces terrarium to build a
new environment.
''',
)
ap.add_argument(
'--no-upload',
default=True,
action='store_false',
dest='upload',
help='''
If an external storage location is specified, terrarium will upload
a new environment after it has been built. Using --no-upload,
terrarium will not upload the resulting environment to the external
storage location.
''',
)
ap.add_argument(
'--no-backup',
default=True,
action='store_false',
dest='backup',
help='''
By default, terrarium preserves the old environment. See
--backup-suffix. Using this option, terrarium will delete the old
environment.
''',
)
ap.add_argument(
'--backup-suffix',
default='.bak',
help='''
The suffix to use when preserving an old environment. This option
is ignored if --no-backup is used. Default is .bak.
'''
)
ap.add_argument(
'--no-compress',
default=True,
action='store_false',
dest='compress',
help='''
By default, terrarium compresses the archive using gzip before
uploading it.
''',
)
ap.add_argument(
'--storage-dir',
default=os.environ.get('TERRARIUM_STORAGE_DIR', None),
help='''
Path to a directory in which terrarium bundles will be retrieved
and stored for speedy re-installation. This will usually be a
shared drive.
''',
)
ap.add_argument(
'--digest-type',
default='md5',
help='Choose digest type (md5, sha, see hashlib). Default is md5.',
)
ap.add_argument(
'--no-bootstrap',
default=True,
action='store_false',
dest='bootstrap',
help='''
By default, terrarium will create a script called
'terrarium_bootstrap.py' in the new environment bin directory.
Running this script will create a new environment at the specified
location using all of the packages that were defined at the time of
its creation. To prevent this script from being created, use
--no-bootstrap.
''',
)
default_remote_key_format = '''
%(arch)s-%(python_vmajor)s.%(python_vminor)s-%(digest)s
'''.strip()
ap.add_argument(
'--remote-key-format',
default=default_remote_key_format,
help='''
Key name format to use when storing the archive. Default is "%s"
''' % default_remote_key_format.replace('%', '%%'),
)
ap.add_argument(
'--s3-bucket',
default=os.environ.get('S3_BUCKET', None),
help='''
S3 bucket name. Defaults to S3_BUCKET env variable.
'''
)
ap.add_argument(
'--s3-access-key',
default=os.environ.get('S3_ACCESS_KEY', None),
help='''
Defaults to S3_ACCESS_KEY env variable.
'''
)
ap.add_argument(
'--s3-secret-key',
default=os.environ.get('S3_SECRET_KEY', None),
help='''
Defaults to S3_SECRET_KEY env variable.
'''
)
ap.add_argument(
'--s3-max-retries',
default=os.environ.get('S3_MAX_RETRIES', 3),
help='''
Number of times to attempt a S3 operation before giving up.
Default is 3.
''',
)
subparsers = ap.add_subparsers(
title='Basic Commands',
dest='command',
)
commands = {
'hash': subparsers.add_parser(
'hash',
help='Display digest for current requirement set',
),
'key': subparsers.add_parser(
'key',
help='Display remote key for current requirement set and platform',
),
'exists': subparsers.add_parser(
'exists',
help='''
Return exit code 0 if environment matches requirement set
''',
),
'install': subparsers.add_parser(
'install',
help='''
Replace current environment with the one given by the
requirement set.
''',
),
}
for command in commands.values():
command.add_argument('reqs', nargs=argparse.REMAINDER)
args = ap.parse_args()
if not boto and args.s3_bucket is not None:
ap.error(
'--s3-bucket requires that you have boto installed, '
'which does not appear to be the case'
)
return args
def main():
args = parse_args()
log_level = max(logging.DEBUG, sum(args.v))
logger.setLevel(log_level)
logger.addHandler(StreamHandler())
terrarium = Terrarium(args)
if args.command == 'hash':
sys.stdout.write('%s\n' % terrarium.digest)
if args.command == 'key':
key = terrarium.make_remote_key()
sys.stdout.write('%s\n' % key)
elif args.command == 'check':
if terrarium.is_clean():
sys.exit(0)
else:
sys.exit(1)
elif args.command == 'install':
r = terrarium.install()
sys.exit(r)
if __name__ == '__main__':
main()
|
import pytest
from requests import Response
from tests.conftest import create_mock_response
from tests.conftest import TEST_DEVICE_GUID
from py42.exceptions import Py42HTTPError
from py42.exceptions import Py42StorageSessionInitializationError
from py42.services._connection import Connection
from py42.services.devices import DeviceService
from py42.services.storage._auth import StorageAuth
from py42.services.storage._service_factory import ConnectionManager
from py42.services.storage._service_factory import StorageServiceFactory
from py42.services.storage.archive import StorageArchiveService
from py42.services.storage.exfiltrateddata import ExfiltratedDataService
from py42.services.storage.preservationdata import StoragePreservationDataService
@pytest.fixture
def mock_tmp_auth(mocker):
mock = mocker.MagicMock(spec=StorageAuth)
mock.get_storage_url.return_value = "testhost.com"
return mock
@pytest.fixture
def mock_device_service(mocker):
service = mocker.MagicMock(spec=DeviceService)
response = create_mock_response(
mocker, '{"backupUsage": [{"targetComputerGuid": "123"}]}'
)
service.get_by_guid.return_value = response
return service
@pytest.fixture
def mock_connection_manager(mocker):
mock = mocker.MagicMock(spec=ConnectionManager)
return mock
class TestStorageServiceFactory:
def test_create_archive_service(
self, mock_successful_connection, mock_device_service, mock_connection_manager
):
factory = StorageServiceFactory(
mock_successful_connection, mock_device_service, mock_connection_manager
)
service = factory.create_archive_service("testguid", None)
assert type(service) == StorageArchiveService
def test_create_archive_service_when_given_destination_guid_does_not_call_device_service(
self, mock_successful_connection, mock_device_service, mock_connection_manager
):
factory = StorageServiceFactory(
mock_successful_connection, mock_device_service, mock_connection_manager
)
service = factory.create_archive_service("testguid", destination_guid=42)
assert mock_device_service.get_by_guid.call_count == 0
assert type(service) == StorageArchiveService
def test_auto_select_destination_guid_when_device_has_no_destination_raises_exception(
self,
mock_successful_connection,
mock_device_service,
mock_connection_manager,
mocker,
):
factory = StorageServiceFactory(
mock_successful_connection, mock_device_service, mock_connection_manager
)
response = create_mock_response(mocker, '{"backupUsage": []}')
mock_device_service.get_by_guid.return_value = response
with pytest.raises(Exception):
factory.auto_select_destination_guid(TEST_DEVICE_GUID)
def test_preservation_data_service(
self, mock_successful_connection, mock_device_service, mock_connection_manager
):
factory = StorageServiceFactory(
mock_successful_connection, mock_device_service, mock_connection_manager
)
service = factory.create_preservation_data_service("testhost.com")
assert type(service) == StoragePreservationDataService
def test_exfiltrated_data_service(
self, mock_successful_connection, mock_device_service, mock_connection_manager
):
factory = StorageServiceFactory(
mock_successful_connection, mock_device_service, mock_connection_manager
)
service = factory.create_exfiltrated_data_service("testhost.com")
assert type(service) == ExfiltratedDataService
class TestStorageSessionManager:
def test_get_storage_session_calls_session_factory_with_token_provider(
self, mock_tmp_auth
):
storage_session_manager = ConnectionManager()
connection = storage_session_manager.get_storage_connection(mock_tmp_auth)
assert type(connection) == Connection
def test_get_storage_session_with_multiple_calls_returns_same_session(
self, mock_tmp_auth
):
storage_session_manager = ConnectionManager()
session1 = storage_session_manager.get_storage_connection(mock_tmp_auth)
session2 = storage_session_manager.get_storage_connection(mock_tmp_auth)
assert session1 is session2
def test_get_storage_session_raises_session_init_error_when_tmp_auth_raises_http_error(
self, mock_tmp_auth, http_error, mocker
):
error = http_error
error.response = mocker.MagicMock(spec=Response)
error.response.text = ""
mock_tmp_auth.get_storage_url.side_effect = Py42HTTPError(error)
storage_session_manager = ConnectionManager()
with pytest.raises(Py42StorageSessionInitializationError):
storage_session_manager.get_storage_connection(mock_tmp_auth)
def test_get_storage_session_get_saved_session_initially_returns_none(self,):
storage_session_manager = ConnectionManager()
assert (
storage_session_manager.get_saved_connection_for_url("testhost.com") is None
)
def test_get_saved_session_returns_session_after_successful_call_to_get_session(
self, mock_tmp_auth
):
storage_session_manager = ConnectionManager()
storage_session_manager.get_storage_connection(mock_tmp_auth)
assert (
storage_session_manager.get_saved_connection_for_url("testhost.com")
is not None
)
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import get_nonlinear_func, expand_tensor, sample_laplace_noise, sample_unit_laplace_noise
from models.layers import MLP, WNMLP, Identity
def add_gaussian_noise(input, std):
eps = torch.randn_like(input)
return input + std*eps, eps
def add_uniform_noise(input, val):
#raise NotImplementedError
#eps = 2.*val*torch.rand_like(input) - val
eps = torch.rand_like(input)
return input + 2.*val*eps-val, eps
def add_laplace_noise(input, scale):
eps = sample_unit_laplace_noise(shape=input.size(), dtype=input.dtype, device=input.device)
return input + scale*eps, eps
class DAE(nn.Module):
def __init__(self,
input_dim=2,
h_dim=1000,
std=0.1,
num_hidden_layers=1,
nonlinearity='tanh',
noise_type='gaussian',
#init=True,
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.std = std
self.num_hidden_layers = num_hidden_layers
self.nonlinearity = nonlinearity
self.noise_type = noise_type
self.main = MLP(input_dim, h_dim, input_dim, use_nonlinearity_output=False, num_hidden_layers=num_hidden_layers, nonlinearity=nonlinearity)
def add_noise(self, input, std=None):
std = self.std if std is None else std
if self.noise_type == 'gaussian':
return add_gaussian_noise(input, std)
elif self.noise_type == 'uniform':
return add_uniform_noise(input, std)
elif self.noise_type == 'laplace':
return add_laplace_noise(input, std)
else:
raise NotImplementedError
def loss(self, input, target):
# recon loss (likelihood)
recon_loss = F.mse_loss(input, target)#, reduction='sum')
return recon_loss
def forward(self, input, std=None):
# init
std = self.std if std is None else std
batch_size = input.size(0)
input = input.view(-1, self.input_dim)
# add noise
x_bar, eps = self.add_noise(input, std)
# predict
glogprob = self.main(x_bar)
''' get loss '''
#loss = (std**2)*self.loss(std*glogprob, -eps)
loss = self.loss(std*glogprob, -eps)
# return
return None, loss
def glogprob(self, input, std=None):
std = self.std if std is None else std
batch_size = input.size(0)
input = input.view(-1, self.input_dim)
# predict
glogprob = self.main(input)
return glogprob
class ARDAE(nn.Module):
def __init__(self,
input_dim=2,
h_dim=1000,
std=0.1,
num_hidden_layers=1,
nonlinearity='tanh',
noise_type='gaussian',
#init=True,
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.std = std
self.num_hidden_layers = num_hidden_layers
self.nonlinearity = nonlinearity
self.noise_type = noise_type
#self.init = init
self.main = MLP(input_dim+1, h_dim, input_dim, use_nonlinearity_output=False, num_hidden_layers=num_hidden_layers, nonlinearity=nonlinearity)
def add_noise(self, input, std=None):
std = self.std if std is None else std
if self.noise_type == 'gaussian':
return add_gaussian_noise(input, std)
elif self.noise_type == 'uniform':
return add_uniform_noise(input, std)
elif self.noise_type == 'laplace':
return add_laplace_noise(input, std)
else:
raise NotImplementedError
def loss(self, input, target):
# recon loss (likelihood)
recon_loss = F.mse_loss(input, target)#, reduction='sum')
return recon_loss
def forward(self, input, std=None):
# init
batch_size = input.size(0)
input = input.view(-1, self.input_dim)
if std is None:
std = input.new_zeros(batch_size, 1)
else:
assert torch.is_tensor(std)
# add noise
x_bar, eps = self.add_noise(input, std)
# concat
h = torch.cat([x_bar, std], dim=1)
# predict
glogprob = self.main(h)
''' get loss '''
loss = self.loss(std*glogprob, -eps)
# return
return None, loss
def glogprob(self, input, std=None):
batch_size = input.size(0)
input = input.view(-1, self.input_dim)
if std is None:
std = input.new_zeros(batch_size, 1)
else:
assert torch.is_tensor(std)
# concat
h = torch.cat([input, std], dim=1)
# predict
glogprob = self.main(h)
return glogprob
class ConditionalDAE(nn.Module):
def __init__(self,
input_dim=2, #10,
h_dim=128,
context_dim=2,
std=0.01,
num_hidden_layers=1,
nonlinearity='tanh',
noise_type='gaussian',
enc_input=True,
enc_ctx=True,
#init=True,
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.context_dim = context_dim
self.std = std
self.num_hidden_layers = num_hidden_layers
self.nonlinearity = nonlinearity
self.noise_type = noise_type
self.enc_input = enc_input
if self.enc_input:
inp_dim = h_dim
else:
inp_dim = input_dim
self.enc_ctx = enc_ctx
if self.enc_ctx:
ctx_dim = h_dim
else:
ctx_dim = context_dim
#self.init = init
self.ctx_encode = Identity() if not self.enc_ctx \
else MLP(context_dim, h_dim, h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
self.inp_encode = Identity() if not self.enc_input \
else MLP(input_dim, h_dim, h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
self.dae = MLP(inp_dim+ctx_dim, h_dim, input_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=False)
def reset_parameters(self):
nn.init.normal_(self.dae.fc.weight)
def add_noise(self, input, std=None):
std = self.std if std is None else std
if self.noise_type == 'gaussian':
return add_gaussian_noise(input, std)
elif self.noise_type == 'uniform':
return add_uniform_noise(input, std)
elif self.noise_type == 'laplace':
return add_laplace_noise(input, std)
else:
raise NotImplementedError
def loss(self, input, target):
# recon loss (likelihood)
recon_loss = F.mse_loss(input, target)#, reduction='sum')
return recon_loss
def forward(self, input, context, std=None):
# init
assert input.dim() == 3 # bsz x ssz x x_dim
assert context.dim() == 3 # bsz x 1 x ctx_dim
std = self.std if std is None else std
batch_size = input.size(0)
sample_size = input.size(1)
# reschape
input = input.view(batch_size*sample_size, self.input_dim) # bsz*ssz x xdim
_, context = expand_tensor(context, sample_size=sample_size, do_unsqueeze=False) # bsz*ssz x xdim
#context = context.view(batch_size*sample_size, -1) # bsz*ssz x xdim
# add noise
x_bar, eps = self.add_noise(input, std)
# encode
ctx = self.ctx_encode(context)
inp = self.inp_encode(x_bar)
# concat
h = torch.cat([inp, ctx], dim=1)
# de-noise with context
glogprob = self.dae(h)
''' get loss '''
#loss = (std**2)*self.loss(std*glogprob, -eps)
loss = self.loss(std*glogprob, -eps)
# return
return None, loss
def glogprob(self, input, context, std=None):
# init
assert input.dim() == 3 # bsz x ssz x x_dim
assert context.dim() == 3 # bsz x 1 x ctx_dim
std = self.std if std is None else std
batch_size = input.size(0)
sample_size = input.size(1)
# reschape
input = input.view(batch_size*sample_size, self.input_dim) # bsz*ssz x xdim
_, context = expand_tensor(context, sample_size=sample_size, do_unsqueeze=False) # bsz*ssz x xdim
#context = context.view(batch_size*sample_size, -1) # bsz*ssz x xdim
# encode
ctx = self.ctx_encode(context)
inp = self.inp_encode(input)
# concat
h = torch.cat([inp, ctx], dim=1)
# de-noise with context
glogprob = self.dae(h)
return glogprob.view(batch_size, sample_size, self.input_dim)
class ConditionalARDAE(nn.Module):
def __init__(self,
input_dim=2, #10,
h_dim=128,
context_dim=2,
std=0.01,
num_hidden_layers=1,
nonlinearity='tanh',
noise_type='gaussian',
enc_input=True,
enc_ctx=True,
#init=True,
std_method='default',
):
super().__init__()
self.input_dim = input_dim
self.h_dim = h_dim
self.context_dim = context_dim
self.std = std
self.num_hidden_layers = num_hidden_layers
self.nonlinearity = nonlinearity
self.noise_type = noise_type
self.enc_input = enc_input
if self.enc_input:
inp_dim = h_dim
else:
inp_dim = input_dim
self.enc_ctx = enc_ctx
if self.enc_ctx:
ctx_dim = h_dim
else:
ctx_dim = context_dim
self.ctx_encode = Identity() if not self.enc_ctx \
else MLP(context_dim, h_dim, h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
self.inp_encode = Identity() if not self.enc_input \
else MLP(input_dim, h_dim, h_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers-1, use_nonlinearity_output=True)
self.dae = MLP(inp_dim+ctx_dim+1, h_dim, input_dim, nonlinearity=nonlinearity, num_hidden_layers=num_hidden_layers, use_nonlinearity_output=False)
def reset_parameters(self):
nn.init.normal_(self.dae.fc.weight)
def add_noise(self, input, std=None):
std = self.std if std is None else std
if self.noise_type == 'gaussian':
return add_gaussian_noise(input, std)
elif self.noise_type == 'uniform':
return add_uniform_noise(input, std)
elif self.noise_type == 'laplace':
return add_laplace_noise(input, std)
else:
raise NotImplementedError
def loss(self, input, target):
# recon loss (likelihood)
recon_loss = F.mse_loss(input, target)#, reduction='sum')
return recon_loss
def forward(self, input, context, std=None, scale=None):
# init
assert input.dim() == 3 # bsz x ssz x x_dim
assert context.dim() == 3 # bsz x 1 x ctx_dim
batch_size = input.size(0)
sample_size = input.size(1)
if std is None:
std = input.new_zeros(batch_size, sample_size, 1)
else:
assert torch.is_tensor(std)
if scale is None:
scale = 1.
# reschape
input = input.view(batch_size*sample_size, self.input_dim) # bsz*ssz x xdim
_, context = expand_tensor(context, sample_size=sample_size, do_unsqueeze=False) # bsz*ssz x xdim
#context = context.view(batch_size*sample_size, -1) # bsz*ssz x xdim
std = std.view(batch_size*sample_size, 1)
# add noise
x_bar, eps = self.add_noise(input, std)
# encode
ctx = self.ctx_encode(context)
inp = self.inp_encode(x_bar)
# concat
h = torch.cat([inp, ctx, std], dim=1)
# de-noise with context
glogprob = self.dae(h)
''' get loss '''
#loss = (std**2)*self.loss(std*glogprob, -eps)
loss = self.loss(std*glogprob, -eps)
# return
return None, loss
def glogprob(self, input, context, std=None, scale=None):
# init
assert input.dim() == 3 # bsz x ssz x x_dim
assert context.dim() == 3 # bsz x 1 x ctx_dim
#std = self.std if std is None else std
batch_size = input.size(0)
sample_size = input.size(1)
if std is None:
std = input.new_zeros(batch_size*sample_size, 1)
else:
assert torch.is_tensor(std)
if scale is None:
scale = 1.
# reschape
input = input.view(batch_size*sample_size, self.input_dim) # bsz*ssz x xdim
_, context = expand_tensor(context, sample_size=sample_size, do_unsqueeze=False) # bsz*ssz x xdim
#context = context.view(batch_size*sample_size, -1) # bsz*ssz x xdim
std = std.view(batch_size*sample_size, 1)
# encode
ctx = self.ctx_encode(context)
inp = self.inp_encode(input)
# concat
h = torch.cat([inp, ctx, std], dim=1)
# de-noise with context
glogprob = self.dae(h)
return glogprob.view(batch_size, sample_size, self.input_dim)
|
<reponame>RifleZhang/CORD_CPD<filename>trainer.py
import math
import sys, os
import os.path as osp
import time
import pickle
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.exp_utils import create_exp_dir
from utils.utils import *
from utils.cp_data_utils import *
from utils.cp_function_utils import *
from models import cord_cpd
class Trainer(object):
def __init__(self, args):
super(Trainer, self).__init__()
self.args = args
self.args.cuda = not args.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda") if self.args.cuda else torch.device("cpu")
self.args.factor = not args.no_factor
self.exp_dir = args.exp_dir
seed = args.seed
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
self.logging = create_exp_dir(args.exp_dir)
meta_file_name = osp.join(args.exp_dir, "meta.txt")
meta_file = open(meta_file_name, "w")
meta_file.write(str(args))
meta_file.close()
def load_data(self):
args = self.args
if self.data_type == "sim":
self.train_loader, self.valid_loader, self.test_loader = load_cp_data(
args.data_path, args.batch_size, args.suffix, args.data_norm)
else:
self.train_loader, self.valid_loader, self.test_loader = load_cp_real_data(
args.data_path, args.batch_size)
off_diag = np.ones([args.num_atoms, args.num_atoms]) - np.eye(args.num_atoms)
rel_rec = np.array(encode_onehot(np.where(off_diag)[1]), dtype=np.float32)
rel_send = np.array(encode_onehot(np.where(off_diag)[0]), dtype=np.float32)
self.rel_rec = torch.FloatTensor(rel_rec).to(self.device)
self.rel_send = torch.FloatTensor(rel_send).to(self.device)
def set_model(self):
self.model = cord_cpd.Model(self.args)
self.model.to_device(self.device)
if self.args.load:
self.logging("loading model from {}".format(self.args.exp_dir))
self.model.load(self.exp_dir)
def train(self):
# Train model
st = time.time()
best_val_loss = np.inf
best_acc_val = 0
best_epoch = 0
for epoch in range(self.args.epochs):
t = time.time()
mse_train, delta_train, acc_train = self.train_one_epoch()
log_str_train = "Epoch: {:4d}, mse_train: {:.4f}, delta_train: {:.4f}, " \
"acc_train: {:.4f}, epoch time: {:.2f}s".format(
epoch, mse_train, delta_train,
acc_train, time.time() - t
)
log_str_eval = ""
if (epoch+1) % self.args.eval_epoch == 0:
mse_val, delta_val, acc_val, avg_roc, avg_dist, avg_tri = self.evaluate()
log_str_eval = "|| mse_val: {:.4f}, delta_val: {:.4f}, acc_val: {:.4f}, " \
"roc: {:.4f}, dist: {:.4f}, tri: {:.4f}, total time: {:.2f}s ||".format(
mse_val, delta_val, acc_val,
avg_roc, avg_dist, avg_tri, time.time()-st
)
if mse_val < best_val_loss:
best_val_loss = mse_val
best_epoch = epoch
self.model.save(self.exp_dir)
self.logging("save best model at epoch : {}".format(best_epoch))
if acc_val > best_acc_val:
best_acc_val = acc_val
self.model.save(self.exp_dir, "acc_model.t7")
self.logging("save acc model at epoch : {}".format(epoch))
self.logging(log_str_train + log_str_eval)
self.logging("Optimization Finished!")
self.logging("Best Epoch: {:04d}".format(best_epoch))
def train_one_epoch(self):
acc_train = []
mse_train = []
delta_train = []
self.model.set_train()
for batch_idx, (data, relations, cpd) in enumerate(self.train_loader):
data, relations = data.to(self.device), relations.to(self.device)
# data [batch_size, num_atoms, num_timesteps, num_dims]
data = data[:, :, :self.args.timesteps, :]
self.model.optimizer.zero_grad()
logits = self.model.encode(data, self.rel_rec, self.rel_send)
# loss_delta = 10000 * ((logits[:, :-1] - logits[:, 1:]) ** 2).mean()
# logits [batch, timestep, edge, relation]
sub_logits = logits[:, 5:-5]
loss_delta = 100 * ((sub_logits[:, :-1] - sub_logits[:, 1:]) ** 2).mean()
edges = F.gumbel_softmax(logits, tau=self.args.temp, hard=self.args.hard)
# prob = F.softmax(logits, -1)
output = self.model.decode(data, edges, self.rel_rec, self.rel_send)
target = data[:, :, 1:, :]
target = target[:, :, self.args.begin_steps:, :]
output = output[:, :, self.args.begin_steps:, :]
loss_mse = F.mse_loss(output, target) / (2 * self.args.var) * 400
loss = loss_mse + loss_delta
if self.data_type == "sim":
acc = edge_accuracy(logits, relations, begin_steps=5, end_steps=-5)
acc_train.append(acc)
else:
acc_train.append(np.nan)
loss.backward()
self.model.optimizer.step()
mse_train.append(loss_mse.item())
delta_train.append(loss_delta.item())
self.model.scheduler.step()
return np.mean(mse_train), np.mean(delta_train), np.mean(acc_train)
@torch.no_grad()
def evaluate(self):
acc_val = []
mse_val = []
delta_val = []
self.model.set_eval()
probs = []
cpds = []
recons = []
origs = []
for batch_idx, (data, relations, cpd) in enumerate(self.valid_loader):
data, relations = data.to(self.device), relations.to(self.device)
data = data[:, :, :self.args.timesteps, :]
logits = self.model.encode(data, self.rel_rec, self.rel_send)
sub_logits = logits[:, 5:-5]
loss_delta = 100 * ((sub_logits[:, :-1] - sub_logits[:, 1:]) ** 2).mean()
edges = F.gumbel_softmax(logits, tau=self.args.temp, hard=True)
prob = F.softmax(logits, -1)
probs.append(prob)
cpds.extend(cpd)
output = self.model.decode(data, edges, self.rel_rec, self.rel_send)
target = data[:, :, 1:, :]
target = target[:, :, self.args.begin_steps:, :]
output = output[:, :, self.args.begin_steps:, :]
loss_mse = F.mse_loss(output, target) / (2 * self.args.var) * 400
if self.data_type == 'sim':
acc = edge_accuracy(logits, relations, begin_steps=5, end_steps=-5)
acc_val.append(acc)
else:
origs.append(data.transpose(1, 2).contiguous().detach().cpu().numpy())
# validation output uses teacher forcing
recon = self.model.decoder.forward_reconstruct(data, edges, self.rel_rec, self.rel_send)
recons.append(recon.detach().cpu().numpy())
acc_val.append(np.nan)
mse_val.append(loss_mse.item())
delta_val.append(loss_delta.item())
probs = torch.cat(probs).detach().cpu().numpy()
cpds = np.array(cpds)
avg_roc, avg_dist, avg_tri = cpd_metrics(probs, cpds)
if self.report_combine:
recons = np.concatenate(recons)
origs = np.concatenate(origs)
type1_score = mse_anomaly(recons, origs, step=5)
type2_score = cal_cp_from_output(probs)
combined = anomaly_combined_score(type1_score, type2_score)
self.logging("-"*30)
self.logging("relation score: {}".format(cpd_metrics(type2_score, cpds, anomaly_input=True)))
self.logging("mse score: {}".format(cpd_metrics(type1_score, cpds, anomaly_input=True)))
self.logging("combined score: {}".format(cpd_metrics(combined, cpds, anomaly_input=True)))
self.logging("-" * 30)
self.model.set_train()
return np.mean(mse_val), np.mean(delta_val), np.mean(acc_val), avg_roc, avg_dist, avg_tri
|
# preprocess the arctic database
# - extract 1st channel of the dual-channel audio, which contains meaningful sound
# - normalize the audio
# - trim the silence at the beginning and at the end based on alignment info
#
# <NAME>, 2021-05-19
import os
import argparse
import glob
import numpy as np
# change working dir if needed
import sys
sys.path.append('utils')
from audio import extract_wav_channel, audioread, audiowrite, normalize_wav
from audio import wav_duration # for debugging
from vad import rmsilence, ignore_between_silence
from plot import plot_vad
def get_start_end(labfile):
"""get the start and end time after silence trimming using label file
note:
1. timestamp in each row of the label file indicates the end point
of that phone
2. the start time is the timestamp of the last 'pau' at the beginning
3. the end time is the timestamp of the phone before the last 'pau'
(count backwards)
4. example:
0.06200 125 pau
0.18075 125 pau
0.34325 125 ao
...
3.14950 125 ax
3.34325 125 pau
start time: 0.18075, end time: 3.14950
"""
lines = open(labfile, 'r').readlines()
# remove line without space (i.e. the first line)
lines = [line for line in lines if ' ' in line]
idx = [i for i, line in enumerate(lines) if ' pau' in line]
# find start time
cnt = 0
while cnt < len(idx):
if cnt+1 < len(idx) and idx[cnt+1] - idx[cnt] == 1:
cnt += 1
else:
start_idx = idx[cnt]
break
start_time = float(lines[start_idx].rstrip().split()[0])
# find the end time
cnt = len(idx)-1
if lines[idx[cnt]].rstrip().split()[-1] != 'pau':
raise Exception('not ending in pau!')
else:
while cnt > 0:
if idx[cnt] - idx[cnt-1] == 1:
cnt -= 1
else:
end_idx = idx[cnt] - 1
break
end_time = float(lines[end_idx].rstrip().split()[0])
return start_time, end_time
def get_start_end_vad(wavfile_mono_norm, plotfig=False):
d0, para = audioread(wavfile_mono_norm)
dmax = max(abs(d0))
d = np.asarray(d0, dtype='float64') / dmax
sr = para[2]
# VAD
Flags, Eor, E, Cor, C, Te, Tc = rmsilence(d, sr, segtime=float('inf'),
wintime=0.1, steptime=0.05, stepSmooth=3, medfilt=2, alpha1=5,
alpha2=5, steest=1, scest=1, tortime=0.15, showfig=False)
idx = [i for i, v in enumerate(Flags) if v == 1]
starttime, endtime = idx[0]/sr, idx[-1]/sr
# plot VAD figure
if plotfig:
nSample = len(Flags)
dSilenceZero = d[:nSample] * Flags
dt = d[:len(dSilenceZero)]
fig = plot_vad(E, Eor, C, Cor, Te, Tc, Flags, dt, sr)
f_vad = wavfile_mono_norm.replace('_mic1', '_vad').replace('.norm.wav', '.png')
fig.savefig(f_vad)
fig.clf()
return starttime, endtime
def get_start_end_fa(wavfile_align):
lines = open(wavfile_align, 'r').readlines()
starttime = float(lines[0].rstrip().split()[1])
endtime = float(lines[-1].rstrip().split()[2])
return starttime, endtime
def get_start_end_simple(labfile):
lines = open(labfile, 'r').readlines()
idx = [i for i, line in enumerate(lines) if ' pau' in line]
# find start time
if idx[0] == 0:
start_time = float(lines[idx[0]].rstrip().split()[0])
else:
start_time = float(lines[idx[0]+1].rstrip().split()[0])
# find end time
if lines[-1].rstrip().split()[-1] != 'pau':
raise Exception('not ending in pau!')
else:
end_time = float(lines[-1].rstrip().split()[0])
return start_time, end_time
def parse_args():
usage = 'usage: preprocess the arctic database'
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('-di', 'in-dir', type=str, help='input dir')
parser.add_argument('-do', 'out-dir', type=str, help='output dir')
parser.add_argument('-sp', 'spk', type=str, help='speaker')
parser.add_argument('-dl', 'lab_dir', type=str, help='label dir')
parser.add_argument('-bt', 'buffer_time', type=float, help='buffer time for silence')
parser.add_argument('-m', 'method', type=int, help='trimming method (1 for lab, 2 for vad)')
return parser.parse_args()
def main():
# # runtime mode
# args = parse_args()
root_dir = '/mnt/psvr/SpeechData/TTS/Arctic'
# interactive mode (comment out before running the script)
args = argparse.ArgumentParser()
args.in_dir = '/data/evs/Arctic/wav22'
# output dir contains files which are single-channel, normalized and silence trimmed
args.out_dir = '/data/evs/Arctic/wav22_silence_trimmed'
args.spk = 'bdl' # 'bdl' or 'slt'
args.lab_dir = os.path.join(root_dir,
'cmu_us_{}_arctic-0.95-release/cmu_us_{}_arctic/lab'.format(args.spk, args.spk))
args.buffer_time = 0.01 # originally 0.125, use 0.01 to prevent round truncation (precision: .2f)
args.method = 5 # 1 (lab), 2 (vad), 3 (fa), 4 (lab&vad), or 5 (lab&vad&fa)
print('input dir: {}'.format(args.in_dir))
print('output dir: {}'.format(args.out_dir))
print('speaker: {}'.format(args.spk))
print('label dir: {}'.format(args.lab_dir))
print('buffer time: {}'.format(args.buffer_time))
print('trimming method: {}'.format(args.method))
assert os.path.isdir(args.lab_dir), 'label dir: {} does not exist!'
in_dir = os.path.join(args.in_dir, args.spk)
wavfiles = sorted(glob.glob(os.path.join(in_dir, '*.wav')))
nwavs = len(wavfiles)
print('# of wav files in {}: {}'.format(in_dir, nwavs))
out_dir = os.path.join(args.out_dir, args.spk)
os.makedirs(out_dir, exist_ok=True)
blksize = 100
for i in range(nwavs):
if i % blksize == 0:
print('processing {}/{} - {}/{} ...'.format(
i, nwavs, min(i+blksize, nwavs), nwavs))
# extract mono channel (channel 0, must do before normalization)
filename = os.path.basename(wavfiles[i])
wavfile_mono = os.path.splitext(filename)[0] + '_mic1.wav'
wavfile_mono = os.path.join(out_dir, wavfile_mono)
extract_wav_channel(wavfiles[i], wavfile_mono, channel=0, verbose=False)
# normalize wav file
wavfile_mono_norm = os.path.splitext(wavfile_mono)[0] + '.norm.wav'
normalize_wav(wavfile_mono, wavfile_mono_norm, eps=0.1, verbose=False)
# get the start time and duration of the trimmed wav file
if args.method == 1 or args.method == 4 or args.method == 5:
# using lab files (method 1)
labfile = os.path.basename(wavfiles[i]).split('-')[1].replace('.wav', '.lab')
labfile = os.path.join(args.lab_dir, labfile)
assert os.path.isfile(labfile), '{} not exist!'.format(labfile)
starttime1, endtime1 = get_start_end(labfile)
if args.method == 2 or args.method == 4 or args.method == 5:
# using VAD
starttime2, endtime2 = get_start_end_vad(wavfile_mono_norm, plotfig=True)
if args.method == 3 or args.method == 5:
# using force-alignment
align_dir = os.path.abspath(os.path.join(in_dir, os.pardir, os.pardir,
'align', args.spk))
wavfile_align = os.path.join(align_dir, filename.replace('.wav', '.word'))
starttime3, endtime3 = get_start_end_fa(wavfile_align)
if args.method == 1:
starttime, endtime = starttime1, endtime1
elif args.method == 2:
starttime, endtime = starttime2, endtime2
elif args.method == 3:
starttime, endtime = starttime3, endtime3
elif args.method == 4:
starttime, endtime = max(starttime1, starttime2), max(endtime1, endtime2)
elif args.method == 5:
starttime = min(max(starttime1, starttime2), starttime3)
endtime = max(endtime1, endtime2, endtime3)
# a simple but inaccurate solution
# starttime, endtime = get_start_end_simple(labfile)
# disable trimming
# starttime, endtime = 0.0, float('inf')
# print(starttime, endtime)
# write the final processed wav files
duration_total = float('{:.2f}'.format(wav_duration(wavfile_mono_norm)))
starttime = max(0.0, starttime - args.buffer_time)
duration = min(endtime - starttime + 2*args.buffer_time, duration_total-starttime)
data, params = audioread(wavfile_mono_norm, starttime, duration)
wavfile_out = wavfile_mono.replace('_mic1', '')
audiowrite(wavfile_out, data, params)
# write the beginning and ending silence parts (for debugging only)
if starttime > 0:
data, params = audioread(wavfile_mono_norm, 0, starttime)
if data.size > 0:
wavfile_out = wavfile_mono.replace('_mic1', '_begin')
audiowrite(wavfile_out, data, params)
if duration_total > float('{:.2f}'.format(starttime+duration)):
data, params = audioread(wavfile_mono_norm, starttime+duration)
if data.size > 0:
wavfile_out = wavfile_mono.replace('_mic1', '_end')
audiowrite(wavfile_out, data, params)
# remove the temp files
tmpfiles = sorted(glob.glob(os.path.join(out_dir, '*_mic1*.wav')))
for f in tmpfiles:
os.remove(f)
|
# -*- coding: utf-8 -*-
import os
from fabric.api import run, env, settings, cd, task, put, execute
from fabric.contrib.files import exists, upload_template
from fabric.operations import _prefix_commands, _prefix_env_vars, require, sudo, local as local_
env.use_ssh_config = True
LOCAL_HOST = os.environ.get('LOCAL_HOST')
LOCAL_USER = os.environ.get('LOCAL_USER')
LOCAL_PASSWORD = <PASSWORD>('LOCAL_PASSWORD')
LOCAL_SERVER_NAME = os.environ.get('LOCAL_SERVER_NAME')
LOCAL_JWT_SECRET = os.environ.get('LOCAL_JWT_SECRET')
WWW_HOST = os.environ.get('WWW_HOST')
WWW_USER = os.environ.get('WWW_USER')
WWW_PASSWORD = <PASSWORD>('WWW_PASSWORD')
WWW_SERVER_NAME = os.environ.get('WWW_SERVER_NAME')
WWW_JWT_SECRET = os.environ.get('WWW_JWT_SECRET')
STAGES = {
'local': {
'hosts': [LOCAL_HOST],
'user': LOCAL_USER,
'pasword': <PASSWORD>,
'server_name': LOCAL_SERVER_NAME,
'jwt_secret': LOCAL_JWT_SECRET
},
'www': {
'hosts': [WWW_HOST],
'user': WWW_USER,
'server_name': WWW_SERVER_NAME,
'jwt_secret': WWW_JWT_SECRET
}
}
def stage_set(stage_name='local'):
"""Utility function to set an environment up for Fabric.
"""
env.stage = stage_name
for option, value in STAGES[env.stage].items():
setattr(env, option, value)
def stage_require():
"""Ensure a valid stage is enabled.
"""
require('stage', provided_by=(
local,
www))
@task
def local():
"""Use the local environment.
"""
stage_set('local')
@task
def www():
"""Use the live environment.
"""
stage_set('www')
@task
def check_sudo():
"""Run a command that uses sudo.
"""
stage_require()
sudo("date")
@task
def install():
"""Install all kilnshare.co.uk webserver components.
"""
stage_require()
remove()
setup_dirs()
install_openresty()
install_lua()
install_nginxjwt()
install_modules()
install_swaggerui()
install_mithril()
configure_firewall()
configure_certs()
configure_openresty()
restart_server()
@task
def remove():
stage_require()
sudo('rm -Rf /home/%s/deploy' % env.user)
sudo('rm -Rf /usr/local/openresty/')
@task
def setup_dirs():
stage_require()
run('mkdir -p /home/%s/deploy' % env.user)
run('mkdir -p /home/%s/deploy/api' % env.user)
run('mkdir -p /home/%s/deploy/bin' % env.user)
run('mkdir -p /home/%s/deploy/config' % env.user)
run('mkdir -p /home/%s/deploy/downloads' % env.user)
run('mkdir -p /home/%s/deploy/scripts' % env.user)
run('mkdir -p /home/%s/deploy/www' % env.user)
run('mkdir -p /home/%s/deploy/swagger-ui' % env.user)
@task
def install_openresty():
"""
"""
stage_require()
put('../webserver/scripts/install_openresty.sh',
'/home/%s/deploy/scripts/install_openresty.sh' % env.user,
mode=0755)
sudo('/home/%s/deploy/scripts/install_openresty.sh' % env.user)
@task
def install_lua():
"""Install and configure Lua and dependencies.
"""
stage_require()
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'scripts/install_lua.sh',
'/home/%s/deploy/scripts/install_lua.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir=template_dir)
sudo('/home/%s/deploy/scripts/install_lua.sh' % env.user)
@task
def install_nginxjwt():
"""Install and configure Lua and dependencies.
"""
stage_require()
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'scripts/install_nginxjwt.sh',
'/home/%s/deploy/scripts/install_nginxjwt.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir=template_dir)
sudo('/home/%s/deploy/scripts/install_nginxjwt.sh' % env.user)
@task
def install_modules():
"""
"""
stage_require()
put('../webserver/modules/kiln_share.lua', '/home/%s/deploy/bin/kiln_share.lua'
% env.user, use_sudo=True)
@task
def install_swaggerui():
"""
"""
stage_require()
put('../swagger-ui', '/home/%s/deploy/'
% env.user, use_sudo=True)
@task
def configure_firewall():
"""Configure Ubuntu firewall.
"""
stage_require()
sudo('ufw allow http')
sudo('ufw allow https')
@task
def configure_certs():
"""Create SSL certificates.
- Uses Letsencypt for all non local environments.
"""
stage_require()
if not env.stage == 'local':
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'templates/letsencrypt.sh',
'/home/%s/deploy/scripts/letsencrypt.sh' % env.user,
context=env,
use_jinja=True,
mode=0755,
backup=False,
template_dir = template_dir)
sudo('/home/%s/deploy/scripts/letsencrypt.sh' % env.user)
return
# local.kilnshare.co.uk does not have a DNS entry so we can't use
# Letsencrypt. Self sign instead.
sudo('mkdir -p /etc/letsencrypt/live/local.kilnshare.co.uk/')
sudo('cp /etc/ssl/certs/ssl-cert-snakeoil.pem /etc/letsencrypt/live/local.kilnshare.co.uk/fullchain.pem')
sudo('cp /etc/ssl/private/ssl-cert-snakeoil.key /etc/letsencrypt/live/local.kilnshare.co.uk/privkey.pem')
sudo('openssl dhparam -out ~/deploy/dhparams.pem 2048')
@task
def configure_openresty():
"""Upload Openresty configuration files.
- Make logging directories
- Configure systemctl
"""
stage_require()
sudo('mkdir -p /var/log/openresty')
sudo('mkdir -p /usr/local/openresty/nginx/sites')
template_dir = os.path.join(os.path.dirname(__file__), '../webserver')
upload_template(
'templates/openresty.service',
'/etc/systemd/system/openresty.service',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
upload_template(
'templates/nginx.conf',
'/usr/local/openresty/nginx/conf/nginx.conf',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
upload_template(
'templates/default.conf',
'/usr/local/openresty/nginx/sites/default.conf',
context=env,
use_jinja=True,
use_sudo=True,
backup=False,
template_dir=template_dir)
sudo('sudo systemctl daemon-reload')
sudo('sudo systemctl enable openresty')
@task
def start_server():
"""Start Openresty webserver.
"""
stage_require()
sudo('systemctl start openresty')
@task
def stop_server():
"""Stop Openresty webserver.
"""
stage_require()
sudo('systemctl stop openresty')
@task
def restart_server():
"""Restart Openresty webserver.
"""
stage_require()
sudo('systemctl restart openresty')
@task
def start_mongo():
"""Start Mongodb
"""
stage_require()
sudo('service mongod restart')
@task
def restart_mongo():
"""Restart Mongodb
"""
stage_require()
sudo('service mongod restart')
@task
def install_api():
"""Create a new virtualenv and install the Eve app.
"""
sudo('pkill -9 gunicorn')
local_('cd ../api && python setup.py sdist --formats=gztar', capture=False)
dist = local_('cd ../api && python setup.py --fullname', capture=True).strip()
filename = '%s.tar.gz' % dist
put('../api/dist/%s' % filename, '/tmp/%s' % filename)
sudo('virtualenv /home/%s/deploy/api' % env.user)
sudo('/home/%s/deploy/api/bin/pip install gunicorn' % (env.user))
sudo('/home/%s/deploy/api/bin/pip install /tmp/%s' % (env.user, filename))
@task
def start_api():
"""Run the Python api using Gunicorn.
"""
sudo('. /home/%s/deploy/api/bin/activate && gunicorn --daemon -b 0.0.0.0:8080 kiln_share:app' % (env.user))
@task
def stop_api():
"""Run the Python api using Gunicorn.
"""
sudo('pkill -9 gunicorn')
@task
def install_gui():
"""Build using node and copy the result.
"""
stage_require()
local_('cd ../gui && npm run build')
put('../gui/dist/*', '/home/%s/deploy/www/'
% env.user, use_sudo=True)
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Module for Popbill FAX API. It include base functionality of the
# RESTful web service request and parse json result. It uses Linkhub module
# to accomplish authentication APIs.
#
# http://www.popbill.com
# Author : <NAME> (<EMAIL>)
# Written : 2015-01-21
# Contributor : <NAME> (<EMAIL>)
# Updated : 2018-08-09
# Thanks for your interest.
from datetime import datetime
from .base import PopbillBase, PopbillException, File
class FaxService(PopbillBase):
""" 팝빌 팩스 API Service Implementation. """
def __init__(self, LinkID, SecretKey):
"""생성자
args
LinkID : 링크허브에서 발급받은 링크아이디(LinkID)
SecretKeye 링크허브에서 발급받은 비밀키(SecretKey)
"""
super(self.__class__, self).__init__(LinkID, SecretKey)
self._addScope("160")
def getChargeInfo(self, CorpNum, UserID=None):
""" 과금정보 확인
args
CorpNum : 회원 사업자번호
UserID : 팝빌 회원아이디
return
과금정보 객체
raise
PopbillException
"""
return self._httpget('/FAX/ChargeInfo', CorpNum, UserID)
def getURL(self, CorpNum, UserID, ToGo):
""" 팩스 관련 팝빌 URL
args
CorpNum : 팝빌회원 사업자번호
UserID : 팝빌회원 아이디
TOGO : 팩스관련 기능 지정 문자. (BOX - 전송내역조회)
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
if ToGo == None or ToGo == '':
raise PopbillException(-99999999, "TOGO값이 입력되지 않았습니다.")
result = self._httpget('/FAX/?TG=' + ToGo, CorpNum, UserID)
return result.url
def getSentListURL(self, CorpNum, UserID):
""" 발신번호 관리 팝업 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/FAX/?TG=BOX', CorpNum, UserID)
return result.url
def getSenderNumberMgtURL(self, CorpNum, UserID):
""" 팩스 전송내역 팝업 URL
args
CorpNum : 회원 사업자번호
UserID : 회원 팝빌아이디
return
30초 보안 토큰을 포함한 url
raise
PopbillException
"""
result = self._httpget('/FAX/?TG=SENDER', CorpNum, UserID)
return result.url
def getUnitCost(self, CorpNum):
""" 팩스 전송 단가 확인
args
CorpNum : 팝빌회원 사업자번호
return
전송 단가 by float
raise
PopbillException
"""
result = self._httpget('/FAX/UnitCost', CorpNum)
return int(result.unitCost)
def search(self, CorpNum, SDate, EDate, State, ReserveYN, SenderOnly, Page, PerPage, Order, UserID=None,
QString=None):
""" 목록 조회
args
CorpNum : 팝빌회원 사업자번호
SDate : 시작일자, 표시형식(yyyyMMdd)
EDate : 종료일자, 표시형식(yyyyMMdd)
State : 전송상태 배열, 1-대기, 2-성공, 3-실패, 4-취소
ReserveYN : 예약여부, False-전체조회, True-예약전송건 조회
SenderOnly : 개인조회여부, False-개인조회, True-회사조회
Page : 페이지번호
PerPage : 페이지당 목록개수
Order : 정렬방향, D-내림차순, A-오름차순
UserID : 팝빌 회원아이디
QString : 조회 검색어, 발신자명 또는 수신자명 기재
"""
if SDate == None or SDate == '':
raise PopbillException(-99999999, "시작일자가 입력되지 않았습니다.")
if EDate == None or EDate == '':
raise PopbillException(-99999999, "종료일자가 입력되지 않았습니다.")
uri = '/FAX/Search'
uri += '?SDate=' + SDate
uri += '&EDate=' + EDate
uri += '&State=' + ','.join(State)
if ReserveYN:
uri += '&ReserveYN=1'
if SenderOnly:
uri += '&SenderOnly=1'
uri += '&Page=' + str(Page)
uri += '&PerPage=' + str(PerPage)
uri += '&Order=' + Order
if QString is not None:
uri += '&QString=' + QString
return self._httpget(uri, CorpNum, UserID)
def getFaxResult(self, CorpNum, ReceiptNum, UserID=None):
""" 팩스 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
ReceiptNum : 전송요청시 발급받은 접수번호
UserID : 팝빌회원 아이디
return
팩스전송정보 as list
raise
PopbillException
"""
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "접수번호가 올바르지 않습니다.")
return self._httpget('/FAX/' + ReceiptNum, CorpNum, UserID)
def getFaxResultRN(self, CorpNum, RequestNum, UserID=None):
""" 팩스 전송결과 조회
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
팩스전송정보 as list
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.")
return self._httpget('/FAX/Get/' + RequestNum, CorpNum, UserID)
def cancelReserve(self, CorpNum, ReceiptNum, UserID=None):
""" 팩스 예약전송 취소
args
CorpNum : 팝빌회원 사업자번호
ReceiptNum : 팩스 전송요청(sendFAX)시 발급받은 접수번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "접수번호가 올바르지 않습니다.")
return self._httpget('/FAX/' + ReceiptNum + '/Cancel', CorpNum, UserID)
def cancelReserveRN(self, CorpNum, RequestNum, UserID=None):
""" 팩스 예약전송 취소
args
CorpNum : 팝빌회원 사업자번호
RequestNum : 팩스전송요청시 할당한 전송요청번호
UserID : 팝빌회원 아이디
return
처리결과. consist of code and message
raise
PopbillException
"""
if RequestNum == None or RequestNum == '':
raise PopbillException(-99999999, "요청번호가 입력되지 않았습니다.")
return self._httpget('/FAX/Cancel/' + RequestNum, CorpNum, UserID)
def sendFax(self, CorpNum, SenderNum, ReceiverNum, ReceiverName, FilePath, ReserveDT=None, UserID=None,
SenderName=None, adsYN=False, title=None, RequestNum=None):
""" 팩스 단건 전송
args
CorpNum : 팝빌회원 사업자번호
SenderNum : 발신자 번호
ReceiverNum : 수신자 번호
ReceiverName : 수신자 명
FilePath : 발신 파일경로
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
SenderName : 발신자명 (동보전송용)
adsYN : 광고팩스 여부
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.sendFax_multi(CorpNum, SenderNum, receivers, FilePath, ReserveDT, UserID, SenderName, adsYN, title,
RequestNum)
def sendFax_multi(self, CorpNum, SenderNum, Receiver, FilePath, ReserveDT=None, UserID=None, SenderName=None,
adsYN=False, title=None, RequestNum=None):
""" 팩스 전송
args
CorpNum : 팝빌회원 사업자번호
SenderNum : 발신자 번호 (동보전송용)
Receiver : 수신자 번호(동보전송용)
FilePath : 발신 파일경로
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
SenderName : 발신자명 (동보전송용)
adsYN : 광고팩스 여부
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
if SenderNum == None or SenderNum == "":
raise PopbillException(-99999999, "발신자 번호가 입력되지 않았습니다.")
if Receiver == None:
raise PopbillException(-99999999, "수신자 정보가 입력되지 않았습니다.")
if not (type(Receiver) is str or type(Receiver) is FaxReceiver or type(Receiver) is list):
raise PopbillException(-99999999, "'Receiver' argument type error. 'FaxReceiver' or List of 'FaxReceiver'.")
if FilePath == None:
raise PopbillException(-99999999, "발신 파일경로가 입력되지 않았습니다.")
if not (type(FilePath) is str or type(FilePath) is list):
raise PopbillException(-99999999, "발신 파일은 파일경로 또는 경로목록만 입력 가능합니다.")
if type(FilePath) is list and (len(FilePath) < 1 or len(FilePath) > 20):
raise PopbillException(-99999999, "파일은 1개 이상, 20개 까지 전송 가능합니다.")
req = {"snd": SenderNum, "sndnm": SenderName, "fCnt": 1 if type(FilePath) is str else len(FilePath), "rcvs": [],
"sndDT": None}
if (type(Receiver) is str):
Receiver = FaxReceiver(receiveNum=Receiver)
if (type(Receiver) is FaxReceiver):
Receiver = [Receiver]
if adsYN:
req['adsYN'] = True
for r in Receiver:
req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName})
if ReserveDT != None:
req['sndDT'] = ReserveDT
if title != None:
req['title'] = title
if RequestNum != None:
req['requestNum'] = RequestNum
postData = self._stringtify(req)
if (type(FilePath) is str):
FilePath = [FilePath]
files = []
for filePath in FilePath:
with open(filePath, "rb") as f:
files.append(File(fieldName='file',
fileName=f.name,
fileData=f.read())
)
result = self._httppost_files('/FAX', postData, files, CorpNum, UserID)
return result.receiptNum
def resendFax(self, CorpNum, ReceiptNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None,
UserID=None, title=None, RequestNum=None):
""" 팩스 단건 전송
args
CorpNum : 팝빌회원 사업자번호
ReceiptNum : 팩스 접수번호
SenderNum : 발신자 번호
SenderName : 발신자명
ReceiverNum : 수신번호
ReceiverName : 수신자명
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
receivers = None
if ReceiverNum != "" or ReceiverName != "":
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.resendFax_multi(CorpNum, ReceiptNum, SenderNum, SenderName, receivers, ReserveDT, UserID, title,
RequestNum)
def resendFax_multi(self, CorpNum, ReceiptNum, SenderNum, SenderName, Receiver, ReserveDT=None, UserID=None,
title=None, RequestNum=None):
""" 팩스 전송
args
CorpNum : 팝빌회원 사업자번호
ReceiptNum : 팩스 접수번호
SenderNum : 발신자 번호
SenderName : 발신자명
Receiver : 수신자정보 배열
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
req = {}
if ReceiptNum == None or len(ReceiptNum) != 18:
raise PopbillException(-99999999, "접수번호가 올바르지 않습니다.")
if SenderNum != "":
req['snd'] = SenderNum
if SenderName != "":
req['sndnm'] = SenderName
if ReserveDT != None:
req['sndDT'] = ReserveDT
if title != None:
req['title'] = title
if RequestNum != None:
req['requestNum'] = RequestNum
if Receiver != None:
req['rcvs'] = []
if (type(Receiver) is str):
Receiver = FaxReceiver(receiveNum=Receiver)
if (type(Receiver) is FaxReceiver):
Receiver = [Receiver]
for r in Receiver:
req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName})
postData = self._stringtify(req)
return self._httppost('/FAX/' + ReceiptNum, postData, CorpNum, UserID).receiptNum
def resendFaxRN(self, CorpNum, OrgRequestNum, SenderNum, SenderName, ReceiverNum, ReceiverName, ReserveDT=None,
UserID=None, title=None, RequestNum=None):
""" 팩스 단건 전송
args
CorpNum : 팝빌회원 사업자번호
OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호
ReceiptNum : 팩스 접수번호
SenderNum : 발신자 번호
SenderName : 발신자명
ReceiverNum : 수신번호
ReceiverName : 수신자명
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
receivers = None
if ReceiverNum != "" or ReceiverName != "":
receivers = []
receivers.append(FaxReceiver(receiveNum=ReceiverNum,
receiveName=ReceiverName)
)
return self.resendFaxRN_multi(CorpNum, OrgRequestNum, SenderNum, SenderName, receivers, ReserveDT,
UserID, title, RequestNum)
def resendFaxRN_multi(self, CorpNum, OrgRequestNum, SenderNum, SenderName, Receiver, ReserveDT=None, UserID=None,
title=None, RequestNum=None):
""" 팩스 전송
args
CorpNum : 팝빌회원 사업자번호
OrgRequestNum : 원본 팩스 전송시 할당한 전송요청번호
SenderNum : 발신자 번호
SenderName : 발신자명
Receiver : 수신자정보 배열
ReserveDT : 예약시간(형식 yyyyMMddHHmmss)
UserID : 팝빌회원 아이디
title : 팩스제목
RequestNum : 전송요청시 할당한 전송요청번호
return
접수번호 (receiptNum)
raise
PopbillException
"""
req = {}
if not OrgRequestNum:
raise PopbillException(-99999999, "원본 팩스 요청번호가 입력되지 않았습니다")
if SenderNum != "":
req['snd'] = SenderNum
if SenderName != "":
req['sndnm'] = SenderName
if ReserveDT != None:
req['sndDT'] = ReserveDT
if title != None:
req['title'] = title
if RequestNum != None:
req['requestNum'] = RequestNum
if Receiver != None:
req['rcvs'] = []
if (type(Receiver) is str):
Receiver = FaxReceiver(receiveNum=Receiver)
if (type(Receiver) is FaxReceiver):
Receiver = [Receiver]
for r in Receiver:
req['rcvs'].append({"rcv": r.receiveNum, "rcvnm": r.receiveName})
postData = self._stringtify(req)
return self._httppost('/FAX/Resend/' + OrgRequestNum, postData, CorpNum, UserID).receiptNum
def getSenderNumberList(self, CorpNum, UserID=None):
""" 팩스 발신번호 목록 확인
args
CorpNum : 팝빌회원 사업자번호
UserID : 팝빌회원 아이디
return
처리결과. list of SenderNumber
raise
PopbillException
"""
return self._httpget('/FAX/SenderNumber', CorpNum, UserID)
def getPreviewURL(self, CorpNum, ReceiptNum, UserID):
""" 팩스 발신번호 목록 확인
args
CorpNum : 팝빌회원 사업자번호
UserID : 팝빌회원 아이디
return
처리결과. list of SenderNumber
raise
PopbillException
"""
return self._httpget('/FAX/Preview/' + ReceiptNum, CorpNum, UserID).url
class FaxReceiver(object):
def __init__(self, **kwargs):
self.__dict__ = dict.fromkeys(['receiveNum', 'receiveName'])
self.__dict__.update(kwargs)
|
<reponame>racinmat/depth-voxelmap-estimation
import scipy.special
import scipy.io
import matplotlib.pyplot as plt
import pickle
from sklearn.metrics import roc_curve, auc
import numpy as np
def plot_roc(fpr, tpr, roc_auc, model_name):
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.4f)' % roc_auc)
# plt.plot([0, 1], [0, 1], 'k--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC)')
plt.legend(loc="lower right")
plt.savefig('{}.png'.format(model_name))
def process_calculated_all_roc(model_names):
for model_name in model_names:
with open('evaluate/roc-{}-train.rick'.format(model_name), 'rb') as f:
fpr, tpr, roc_auc = pickle.load(f)
plot_roc(fpr, tpr, roc_auc, model_name+'-train')
with open('evaluate/roc-{}-test.rick'.format(model_name), 'rb') as f:
fpr, tpr, roc_auc = pickle.load(f)
plot_roc(fpr, tpr, roc_auc, model_name+'-test')
def calc_roc(pred_voxels, gt_voxels, model_name, suffix):
print('calculating roc for', model_name, suffix)
known_mask = gt_voxels.flatten() != -1
gt_to_roc = gt_voxels.flatten()[known_mask]
# gt_to_roc[gt_to_roc == 0] = -1
pred_to_roc = pred_voxels.flatten()[known_mask]
print('size to roc', gt_to_roc.shape, pred_to_roc.shape)
# normalization of predictions to [0,1] range
pred_to_roc = scipy.special.expit(pred_to_roc)
num_free = np.sum(gt_to_roc == 0)
num_occup = np.sum(gt_to_roc == 1)
print(num_free)
print(num_occup)
# weights = np.ones_like(gt_to_roc, dtype=np.float32)
# weights[gt_to_roc == -1] = 1/num_free
# weights[gt_to_roc == 1] = 1/num_occup
# fpr, tpr, _ = roc_curve(gt_voxels.flatten(), pred_voxels.flatten(), 1, gt_voxels.flatten() != -1) # because of masking
# fpr, tpr, _ = roc_curve(gt_to_roc, pred_to_roc, 1, weights)
fpr, tpr, _ = roc_curve(gt_to_roc, pred_to_roc, 1)
roc_auc = auc(fpr, tpr)
plot_roc(fpr, tpr, roc_auc, model_name+'-'+suffix)
def calculate_all_roc():
# with open('evaluate/roc-dump-gt.rick', 'rb') as f:
# batch_voxels = pickle.load(f)
with open('evaluate/roc-dump-gt-test.rick', 'rb') as f:
batch_voxels_test = pickle.load(f)
# with open('evaluate/roc-dump-train.rick', 'rb') as f:
# results = pickle.load(f)
with open('evaluate/roc-dump-test.rick', 'rb') as f:
results_test = pickle.load(f)
print('data loaded, going to process')
# for model_name, res in results.items():
# pred_voxels, fn_val, tn_val, tp_val, fp_val = res
# calc_roc(pred_voxels, batch_voxels, model_name, 'train')
scipy.io.savemat('voxel_gt.mat', {'voxel_gt': batch_voxels_test})
for model_name, res in results_test.items():
pred_voxels, fn_val, tn_val, tp_val, fp_val = res
scipy.io.savemat('voxel_test.mat', {'voxel_pred': pred_voxels})
calc_roc(pred_voxels, batch_voxels_test, model_name, 'test')
break
def print_rates(model_names):
for model_name in model_names:
with open('evaluate/rates-{}-train.rick'.format(model_name), 'rb') as f:
fn, tn, tp, fp = pickle.load(f)
print('model {}, train'.format(model_name))
print('fn: {}, tn: {}, tp: {}, fp: {}'.format(fn, tn, tp, fp))
fpr = fp / (fp + tn)
tpr = tp / (fn + tp)
print('fpr: {}, tpr: {}'.format(fpr, tpr))
with open('evaluate/rates-{}-test.rick'.format(model_name), 'rb') as f:
fn, tn, tp, fp = pickle.load(f)
print('model {}, test'.format(model_name))
print('fn: {}, tn: {}, tp: {}, fp: {}'.format(fn, tn, tp, fp))
fpr = fp / (fp + tn)
tpr = tp / (fn + tp)
print('fpr: {}, tpr: {}'.format(fpr, tpr))
if __name__ == '__main__':
model_names = [
'2018-05-04--22-57-49',
'2018-05-04--23-03-46',
'2018-05-07--17-22-10',
'2018-05-08--23-37-07',
'2018-05-11--00-10-54',
]
# process_calculated_all_roc(model_names)
calculate_all_roc()
# print_rates(model_names) |
<filename>carts/views.py
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.generics import RetrieveAPIView
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from rest_framework.status import HTTP_400_BAD_REQUEST
from django.shortcuts import get_object_or_404
from .serializers import CartSerializer, OrderSerializer
from .models import Cart, Order
from products.models import Product
from accounts.models import Account
"""
people (Un-authenticated users) can add items into their cart.
Maintain a cart on the server in either DB or redis or MongoDb
On cart change (add / remove item) update the cart on server
For cart line items take product id, qty, storeLink as input and fetch product meta data from the DB and save them.
---->
We can achieve this by setting a cookie containing the cart data.
https://stackoverflow.com/questions/39826992/how-can-i-set-a-cookie-in-react
We'll not need any backend for this as this can be build with javascript.
"""
class GetCartDetail(RetrieveAPIView):
""" Retrieve Cart Detail"""
queryset = Cart.objects.all()
serializer_class = CartSerializer
lookup_field = 'user'
permission_classes = (IsAuthenticated,)
class AddToCartView(APIView):
""" Add item to CArt """
def post(self, request, *args, **kwargs):
product_id = request.data.get('product_id', None)
quantity = request.data.get('quantity')
user_id = request.data.get('user_id', None)
if product_id is None:
return Response({"message": "Invalid product id"}, status=HTTP_400_BAD_REQUEST)
if user_id is None:
return Response({"message": "Invalid user"}, status=HTTP_400_BAD_REQUEST)
product = get_object_or_404(Product, id=product_id)
user = get_object_or_404(Account, id=user_id)
cart = get_object_or_404(Cart, user=user)
cart.quantity = int(quantity)
cart.product = product
cart.save()
serializer = CartSerializer(cart)
return Response(serializer.data)
class RemoveFromCartView(APIView):
""" Remove item from Cart """
def post(self, request, *args, **kwargs):
product_id = request.data.get('product_id', None)
quantity = request.data.get('quantity')
user_id = request.data.get('user_id', None)
if product_id is None:
return Response({"message": "Invalid product id"}, status=HTTP_400_BAD_REQUEST)
if user_id is None:
return Response({"message": "Invalid user"}, status=HTTP_400_BAD_REQUEST)
product = get_object_or_404(Product, id=product_id)
user = get_object_or_404(Account, id=user_id)
cart = get_object_or_404(Cart, user=user)
cart.quantity = int(quantity)
cart.product = product
cart.save()
serializer = CartSerializer(cart)
return Response(serializer.data)
class ClearCartView(APIView):
""" Clear product and quantity from Cart """
def post(self, request, *args, **kwargs):
user_id = request.data.get('user_id', None)
if user_id is None:
return Response({"message": "Invalid user"}, status=HTTP_400_BAD_REQUEST)
user = get_object_or_404(Account, id=user_id)
cart = get_object_or_404(Cart, user=user)
cart.quantity = 0
cart.product = None
cart.save()
serializer = CartSerializer(cart)
return Response(serializer.data)
class OrderViewSet(viewsets.ModelViewSet):
"""
Lets User Create an order .
"""
queryset = Order.objects.all()
serializer_class = OrderSerializer
# Remove CSRF request verification for posts to this API
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(OrderViewSet, self).dispatch(*args, **kwargs)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
|
import codecs
import re
import pypandoc
import os
from trello import TrelloClient
from slugify import slugify
import datetime
import requests
from django.urls import reverse
from django.conf import settings
from django.db import IntegrityError
from django.contrib.sites.models import Site
base_dir = os.path.join(settings.MEDIA_ROOT, 'Schriften')
bib = os.path.join(base_dir, "scholarium.bib")
md_path = os.path.join(base_dir, "Markdown")
html_path = os.path.join(base_dir, "HTML")
def buffer(scholie):
try:
link = 'https://%s%s' % (Site.objects.get(pk=settings.SITE_ID).domain,
reverse('Scholien:artikel_detail', args=[scholie.slug]))
data = [
('access_token', settings.BUFFER_ACCESS_TOKEN),
('text', 'Neue Scholie:'),
('media[link]', link)
]
ids = [('profile_ids[]', id) for id in settings.BUFFER_SITE_IDS]
payload = ids + data
r = requests.post('https://api.bufferapp.com/1/updates/create.json', data=payload)
except AttributeError:
return 'Buffer values missing in settings.'
return 'Buffer response: %s' % r
def markdown_to_html(markdown):
# codecs.decode(markdown)
text = "---\nbibliography: {}\n---\n\n{}\n\n## Literatur".format(bib, markdown)
# to html
md = text
extra_args = []
filters = ['pandoc-citeproc']
html = pypandoc.convert(md, 'html', format='md', extra_args=extra_args, filters=filters)
# blockquotes mit class versehen
p = re.compile("<blockquote>")
html = p.sub("<blockquote class=\"blockquote\">", html)
# Gedankenstriche ("--" nach "–")
p = re.compile("--")
html = p.sub("–", html)
# Literaturverzeichnis
p = re.compile(r'<h2.*Literatur</h2>')
split = re.split(p, html)
literatur = split[1].lstrip() if len(split) > 1 else ""
if not literatur:
print('Keine Literatur gefunden.')
# Trennungszeichen
p = re.compile(r"<p><<<</p>")
split = re.split(p, split[0])
public = split[0]
# lstrip entfernt mögliche Absätze am Anfang.
private = split[1].lstrip() if len(split) > 1 else ""
public2 = split[2].lstrip() if len(split) > 2 else ""
if not private:
print('Keinen privaten Teil gefunden.')
return public, private, public2, literatur
# TODO: Beide Funktionen zusammenfassen.
def trelloToSQL():
'''
Liest Trello-Karten aus Liste "Texte lektoriert" des "Play-Boards" aus
und wandelt Sie mit Pandoc in html um. Inklusive Literaturverzeichnis.
Zusätzlich werden ein paar weitere Formatierungen vorgenommen.
Das Ergebnis wird dann in die Datenbank geschrieben.
Die Trello-Karten werden hinterher verschoben.
'''
from Scholien.models import Artikel
client = TrelloClient(api_key=settings.TRELLO_API_KEY, token=settings.TRELLO_TOKEN)
play_board = client.get_board('55d5dfee98d40fcb68fc0e0b')
played_board = client.get_board('55c4665a4afe2f058bd3cb0a')
target_list = played_board.get_list('5774e15c515d20dd2aa0b534')
for list in play_board.open_lists():
if list.name == "Texte lektoriert":
print('%d lektorierte(n) Text(e) gefunden.' % len(list.list_cards()))
# Karten werden von unten nach oben abgearbeitet.
for card in list.list_cards()[::-1]:
title = card.name
text = card.desc
fobj_out = codecs.open(os.path.join(md_path, "%s.md" % title), "w", "utf-8")
# meta = codecs.open("%s" %meta,"r","utf-8")
# fobj_out.write(meta.read())
# ids
p = re.compile(r"§§.*")
id = p.findall(text)
id = id[0][2:] if id else title
priority = 1 if id[0] == '!' else 0
id = slugify(id)
text = p.sub("", text, count=1)
fobj_out.write("---\nbibliography: {}\n---\n\n{}\n\n## Literatur".format(bib, text))
fobj_out.close
# to html
fobj_out = codecs.open(os.path.join(md_path, "%s.md" % title), "r", "utf-8")
md = fobj_out.read()
extra_args = []
filters = ['pandoc-citeproc']
html = pypandoc.convert(md, 'html', format='md', extra_args=extra_args, filters=filters)
# blockquotes mit class versehen
p = re.compile("<blockquote>")
html = p.sub("<blockquote class=\"blockquote\">", html)
# Gedankenstriche ("--" nach "–")
p = re.compile("--")
html = p.sub("–", html)
# Trennungszeichen
p = re.compile(r"<p><<<</p>")
split = re.split(p, html)
public = split[0]
# lstrip entfernt mögliche Absätze am Anfang.
private = split[1].lstrip() if len(split) > 1 else ""
if not private:
print('Keinen privaten Teil gefunden für', title)
# print(html)
try:
art_neu = Artikel.objects.create(
slug=id, bezeichnung=title, inhalt=public,
inhalt_nur_fuer_angemeldet=private, prioritaet=priority
) # *art_neu* currently not in use
print('%s erfolgreich in DB übertragen.' % title)
except IntegrityError as e:
print('Artikel schon vorhanden')
except Exception as e:
print(title, 'failed:', e)
continue
card.change_board(played_board.id, target_list.id)
card.set_pos('top')
def publish():
'''
Neuer Post alle 6 Tage. Nach Priorität sortieren.
'''
from Scholien.models import Artikel
artikel_pub = Artikel.objects.all().order_by('-datum_publizieren')
last = (datetime.date.today() - artikel_pub[0].datum_publizieren).days
message = ''
# Check, ob Beitrag in letzten Tagen
if last >= settings.RELEASE_PERIOD:
artikel_p = Artikel.objects.filter(datum_publizieren=None, prioritaet=True)
artikel_np = Artikel.objects.filter(datum_publizieren=None)
# Check, ob Artikel mit Priorität vorhanden ist.
if artikel_p:
neu = artikel_p[0]
elif artikel_np:
message = 'Kein Artikel mit Priorität gefunden.'
neu = artikel_np[0]
else:
message = 'Kein neuen Artikel gefunden.'
if neu:
neu.datum_publizieren = datetime.date.today()
neu.save()
r = buffer(neu)
message = '%s publiziert. %s' % (neu, r)
else:
message = 'Letzter Artikel bereits vor %d Tagen veröffentlicht.' % last
return message
|
# coding=utf-8
from typing import *
import abc
import six
from mdstudio.db.cursor import Cursor
from mdstudio.db.fields import Fields
from mdstudio.db.index import Index
from mdstudio.db.sort_mode import SortMode
from mdstudio.deferred.chainable import chainable
from mdstudio.deferred.return_value import return_value
try:
from pymongo.collection import Collection
CollectionType = Union[str, Dict[str, str], Collection]
except ImportError:
CollectionType = Union[str, Dict[str, str]]
DocumentType = Dict
AggregationOperator = Dict
ProjectionOperators = Dict
SortOperators = Optional[Union[List[Tuple[str, SortMode]], Tuple[str, SortMode]]]
IndexKeys = Union[List[Tuple[str, SortMode]], Tuple[str, SortMode]]
# noinspection PyShadowingBuiltins
@six.add_metaclass(abc.ABCMeta)
class IDatabase(object):
@abc.abstractmethod
def more(self, cursor_id, claims=None):
# type: (str, Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def rewind(self, cursor_id, claims=None):
# type: (str, Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def insert_one(self, collection, insert, fields=None, claims=None):
# type: (CollectionType, DocumentType, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def insert_many(self, collection, insert, fields=None, claims=None):
# type: (CollectionType, List[DocumentType], Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def replace_one(self, collection, filter, replacement, upsert=False, fields=None, claims=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def count(self, collection, filter=None, skip=None, limit=None, fields=None, claims=None, cursor_id=None, with_limit_and_skip=False):
# type: (CollectionType, Optional[DocumentType], Optional[int], Optional[int], Optional[Fields], Optional[dict], Optional[str], bool) -> Any
raise NotImplementedError
@abc.abstractmethod
def update_one(self, collection, filter, update, upsert=False, fields=None, claims=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def update_many(self, collection, filter, update, upsert=False, fields=None, claims=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def find_one(self, collection, filter, projection=None, skip=None, sort=None, fields=None, claims=None):
# type: (CollectionType, DocumentType, Optional[ProjectionOperators], Optional[int], SortOperators, Optional[Fields],Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def find_many(self, collection, filter, projection=None, skip=None, limit=None, sort=None, fields=None, claims=None):
# type: (CollectionType, DocumentType, Optional[ProjectionOperators], Optional[int], Optional[int], SortOperators, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def find_one_and_update(self, collection, filter, update, upsert=False, projection=None, sort=None, return_updated=False, fields=None,
claims=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[ProjectionOperators], SortOperators, bool, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def find_one_and_replace(self, collection, filter, replacement, upsert=False, projection=None, sort=None,
return_updated=False, fields=None, claims=None):
# type: (CollectionType, DocumentType, DocumentType, bool, Optional[ProjectionOperators], SortOperators, bool, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def find_one_and_delete(self, collection, filter, projection=None, sort=None, fields=None, claims=None):
# type: (CollectionType, DocumentType, Optional[ProjectionOperators], SortOperators, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def distinct(self, collection, field, filter=None, fields=None, claims=None):
# type: (CollectionType, str, Optional[DocumentType], Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def aggregate(self, collection, pipeline):
# type: (CollectionType, List[AggregationOperator]) -> Any
raise NotImplementedError
@abc.abstractmethod
def delete_one(self, collection, filter, fields=None, claims=None):
# type: (CollectionType, DocumentType, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def delete_many(self, collection, filter, fields=None, claims=None):
# type: (CollectionType, DocumentType, Optional[Fields], Optional[dict]) -> Any
raise NotImplementedError
@abc.abstractmethod
def create_indexes(self, collection, indices):
# type: (CollectionType, List[Index]) -> Any
raise NotImplementedError
@abc.abstractmethod
def drop_all_indexes(self, collection):
# type: (CollectionType) -> Any
raise NotImplementedError
@abc.abstractmethod
def drop_indexes(self, collection, indexes):
# type: (CollectionType, List[Index]) -> Any
raise NotImplementedError
@chainable
def make_cursor(self, results, fields):
res = yield results
return_value(Cursor(self, res, fields))
@staticmethod
def extract(result, prperty):
return result[prperty]
@staticmethod
@chainable
def transform(result, transformed):
res = yield result
return_value(None if res is None else transformed(res))
|
<gh_stars>1-10
import random
from m1n1.utils import *
from m1n1.constructutils import *
from construct import *
from .cmdqueue import *
__all__ = ["channelNames", "channelRings", "DeviceControlMsg", "EventMsg", "StatsMsg"]
class RunCmdQueueMsg(ConstructClass):
subcon = Struct (
"queue_type" / Default(Int32ul, 0),
"cmdqueue_addr" / Default(Hex(Int64ul), 0),
"cmdqueue" / Lazy(ROPointer(this.cmdqueue_addr, CommandQueueInfo)),
"head" / Default(Int32ul, 0),
"event_number" / Default(Int32ul, 0),
"new_queue" / Default(Int32ul, 0),
"data" / HexDump(Default(Bytes(0x18), bytes(0x18))),
)
TYPES = {
0: "SubmitTA",
1: "Submit3D",
2: "SubmitCompute",
}
def __str__(self):
s = super().__str__() + "\n"
if self.cmdqueue_addr == 0:
return s + "<Empty RunCmdQueueMsg>"
r = random.randrange(2**64)
s += f"{self.TYPES[self.queue_type]}(0x{self.cmdqueue_addr & 0xfff_ffffffff:x}, {self.head}, ev={self.event_number}, new={self.new_queue}) //{r:x}"
return s
class DC_DestroyContext(ConstructClass):
subcon = Struct (
"msg_type" / Const(0x17, Int32ul),
"unk_4" / Hex(Int32ul),
"unk_8" / Hex(Int32ul),
"unk_c" / Hex(Int32ul),
"unk_10" / Hex(Int32ul),
"unk_14" / Hex(Int32ul),
"unk_18" / Hex(Int32ul),
"context_addr" / Hex(Int64ul),
"rest" / HexDump(Bytes(0xc))
)
class DeviceControl_19(ConstructClass):
subcon = Struct (
"msg_type" / Const(0x19, Int32ul),
"data" / HexDump(Default(Bytes(0x2c), bytes(0x2c)))
)
class DeviceControl_1e(ConstructClass):
subcon = Struct (
"msg_type" / Const(0x1e, Int32ul),
"data" / HexDump(Bytes(0x2c)),
),
class DeviceControl_23(ConstructClass):
subcon = Struct (
"msg_type" / Const(0x23, Int32ul),
"data" / HexDump(Default(Bytes(0x2c), bytes(0x2c))),
)
class UnknownMsg(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Int32ul),
"data" / HexDump(Bytes(0x2c)),
)
DeviceControlMsg = FixedSized(0x30, Select(
DC_DestroyContext,
DeviceControl_19,
DeviceControl_23,
UnknownMsg,
))
# Tends to count up
class StatsMsg_00(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x00, Int32ul)),
Padding(0x18), # ??? why the hole? never written...
"offset" / Hex(Int64ul),
Padding(0xc), # Confirmed padding
)
class StatsMsg_02(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x02, Int32ul)),
"timestamp" / Hex(Int64ul),
"data" / HexDump(Bytes(0x24)),
)
# Related to 00, tends to "reset" the count
class StatsMsg_03(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x03, Int32ul)),
"offset" / Hex(Int64ul),
Padding(0x24), # Confirmed padding
)
class StatsMsg_04(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x04, Int32ul)),
"unk0" / Hex(Int32ul),
"unk1" / Hex(Int32ul),
"unk2" / Hex(Int32ul),
"unk3" / Hex(Int32ul),
"offset" / Hex(Int64ul),
Padding(0x14), # Confirmed padding
)
class StatsMsg_09(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x09, Int32ul)),
"unk0" / Hex(Int32ul),
"unk1" / Hex(Int32ul),
"unk2" / Hex(Int32ul),
"unk3" / Hex(Int32ul),
"unk4" / Hex(Int32ul),
"unk5" / Hex(Int32ul),
Padding(0x14), # Confirmed padding
)
class StatsMsg_0a(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x0a, Int32ul)),
Padding(8), # Not written
"unk0" / Hex(Int32ul),
"unk1" / Hex(Int32ul),
"unk2" / Hex(Int32ul),
"unk3" / Hex(Int32ul),
Padding(0x14), # Confirmed padding
)
class StatsMsg_0b(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x0b, Int32ul)),
"timestamp" / Hex(Int64ul),
"timestamp2" / Hex(Int64ul),
"unk0" / Hex(Int32ul),
"unk1" / Hex(Int32ul),
"unk2" / Hex(Int32ul),
"unk3" / Hex(Int32ul),
"unk4" / Hex(Int32ul),
"unk5" / Hex(Int32ul),
Padding(4), # Confirmed padding
)
class StatsMsg_0c(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x0c, Int32ul)),
"timestamp" / Hex(Int64ul),
"flag" / Int32ul,
Padding(0x20), # Confirmed padding
)
class StatsMsg_0d(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x0d, Int32ul)),
Padding(8), # Not written
"unk0" / Hex(Int32ul),
"unk1" / Hex(Int32ul),
"unk2" / Hex(Int32ul),
"unk3" / Hex(Int32ul),
Padding(0x14), # Confirmed padding
)
class StatsMsg_0e(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x0e, Int32ul)),
Padding(4), # Not written
"unk0" / Hex(Int32ul),
"unk1" / Hex(Int32ul),
"unk2" / Hex(Int32ul),
"unk3" / Hex(Int32ul),
"unk4" / Hex(Int32ul),
Padding(0x14), # Confirmed padding
)
StatsMsg = FixedSized(0x30, Select(
StatsMsg_00,
#StatsMsg_02,
StatsMsg_03,
StatsMsg_04,
StatsMsg_09,
StatsMsg_0a,
StatsMsg_0b,
StatsMsg_0c,
StatsMsg_0d,
StatsMsg_0e,
UnknownMsg,
))
class FWLogMsg(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(0x03, Int32ul)),
"seq_no" / Hex(Int32ul),
"timestamp" / Hex(Int64ul),
"msg" / PaddedString(0xc8, "ascii")
)
class FlagMsg(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(1, Int32ul)),
"firing" / Hex(Int32ul),
"unk_8" / Hex(Int32ul),
"unk_c" / Hex(Int32ul),
"unk_10" / Hex(Int32ul),
"unk_14" / Hex(Int16ul),
"unkpad_16" / HexDump(Bytes(0x38 - 0x16)),
)
class FaultMsg(ConstructClass):
subcon = Struct (
"msg_type" / Hex(Const(4, Int32ul)),
"index" / Hex(Int32ul),
"unk_8" / Hex(Int32ul),
"queue" / Hex(Int32ul),
"unkpad_16" / HexDump(Bytes(0x38 - 0x10)),
)
EventMsg = FixedSized(0x38, Select(
FlagMsg,
HexDump(Bytes(0x38)),
))
class KTraceMsg(ConstructClass):
subcon = Struct (
"unk" / HexDump(Bytes(0x70)),
)
channelNames = [
"TA_0", "3D_0", "CL_0",
"TA_1", "3D_1", "CL_1",
"TA_2", "3D_2", "CL_2",
"TA_3", "3D_3", "CL_3",
"DevCtrl",
"Event", "FWLog", "KTrace", "Stats"
]
channelRings = (
[[(RunCmdQueueMsg, 0x30, 0x100)]] * 12 + [
[(DeviceControlMsg, 0x30, 0x100)],
[(EventMsg, 0x38, 0x100)],
[
(FWLogMsg, 0xd8, 0x100), # unk 0
(FWLogMsg, 0xd8, 0x100), # init log
(FWLogMsg, 0xd8, 0x100), # unk 2
(FWLogMsg, 0xd8, 0x100), # warnings?
(FWLogMsg, 0xd8, 0x100), # unk 4
(FWLogMsg, 0xd8, 0x100), # unk 5
],
[(KTraceMsg, 0x70, 0x100)],
[(StatsMsg, 0x30, 0x100)]
]
)
class ChannelStateFields(RegMap):
READ_PTR = 0x00, Register32
WRITE_PTR = 0x20, Register32
class Channel(Reloadable):
def __init__(self, u, uat, info, ring_defs, base=None):
self.uat = uat
self.u = u
self.p = u.proxy
self.iface = u.iface
self.ring_defs = ring_defs
self.info = info
self.st_maps = uat.iotranslate(0, info.state_addr, 0x30 * len(ring_defs))
assert len(self.st_maps) == 1
self.state_phys = self.st_maps[0][0]
self.state = []
self.rb_base = []
self.rb_maps = []
if base is None:
p = info.ringbuffer_addr
else:
p = base
for i, (msg, size, count) in enumerate(ring_defs):
assert msg.sizeof() == size
self.state.append(ChannelStateFields(self.u, self.state_phys + 0x30 * i))
m = uat.iotranslate(0, p, size * count)
self.rb_base.append(p)
self.rb_maps.append(m)
p += size * count
def get_message(self, ring, index, meta_fn=None):
msgcls, size, count = self.ring_defs[ring]
assert index < count
addr = self.rb_base[ring] + index * size
stream = self.uat.iostream(0, addr)
stream.meta_fn = meta_fn
return msgcls.parse_stream(stream)
def clear_message(self, ring, index):
msgcls, size, count = self.ring_defs[ring]
self.put_message(ring, index, b"\xef\xbe\xad\xde" * (size // 4))
def put_message(self, ring, index, obj):
msgcls, size, count = self.ring_defs[ring]
assert index < count
if isinstance(obj, bytes):
data = obj
else:
data = obj.build()
self.uat.iowrite(0, self.rb_base[ring] + index * size, data)
class ChannelInfo(ConstructClass):
subcon = Struct(
"state_addr" / Hex(Int64ul),
"ringbuffer_addr" / Hex(Int64ul),
)
class ChannelInfoSet(ConstructClass):
CHAN_COUNT = len(channelNames)
subcon = Struct(*[ name / ChannelInfo for name in channelNames])
__all__.extend(k for k, v in globals().items()
if (callable(v) or isinstance(v, type)) and v.__module__ == __name__)
|
import distutils
import os
from distutils.core import setup
import _version
try:
from pip import main as pipmain
except:
from pip._internal import main as pipmain
pipmain(['install', 'appdirs'])
__version__ = _version.__version__
appname = _version.APPNAME
appauthor = _version.APPAUTHOR
def iamroot():
'''Checks if this process has admin permissions.'''
try:
return os.getuid() == 0
except AttributeError:
import ctypes
return ctypes.windll.shell32.IsUserAnAdmin() != 0
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def requirements():
with open('requirements.txt', 'r') as f:
return [l.strip() for l in f.readlines() if l.strip()]
def datafiles(d):
data_files = []
for root, dirs, files in os.walk(os.path.join(os.path.dirname(__file__), d)):
if not files: continue
root_ = root.replace(os.getcwd() + os.path.sep, '')
data_files.append((root_, [os.path.join(root_, f) for f in files]))
return data_files
def datapath():
'''Returns the path where app data is to be installed.'''
import appdirs
if iamroot():
return appdirs.site_data_dir(appname, appauthor)
else:
return appdirs.user_data_dir(appname, appauthor)
class myinstall(distutils.command.install.install):
def __init__(self, *args, **kwargs):
distutils.command.install.install.__init__(self, *args, **kwargs)
self.distribution.get_command_obj('install_data').install_dir = datapath()
setup(
name='pyrap-web',
packages=['pyrap_examples',
'pyrap_examples.controls',
'pyrap_examples.helloworld',
'pyrap_examples.layouts',
'pyrap_examples.pyrap_admin',
'pyrap_examples.sayhello',
'pyrap',
'pyrap._version',
'pyrap.pwt',
'pyrap.pwt.barchart',
'pyrap.pwt.bubblyclusters',
'pyrap.pwt.graph',
'pyrap.pwt.plot',
'pyrap.pwt.radar',
'pyrap.pwt.radar_smoothed',
'pyrap.pwt.radialdendrogramm',
'pyrap.pwt.ros3d',
'pyrap.pwt.svg',
'pyrap.pwt.tree',
'pyrap.pwt.video',
'pyrap.web',
'pyrap.web.contrib',
'pyrap.web.wsgiserver'
],
py_modules=[],
package_dir={
'pyrap': 'pyrap',
'pyrap._version': '_version',
'': '.'
},
package_data={'': ['*']},
data_files=datafiles('3rdparty') +
datafiles('css') +
datafiles('etc') +
datafiles('html') +
datafiles('js') +
datafiles('resource'),
version=__version__,
description='pyRAP is a framework for implementing extremely powerful and beautiful web-based AJAX '
'applications in pure Python. No HTML. No JavaScript. No PHP. Just pure Python. It has been designed '
'as a lightweight, easy-to-use yet powerful library that makes development of SaaS applications as '
'easy and fast as possible.',
long_description=read('README'),
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
url='https://pyrap.org/',
download_url='https://github.com/danielnyga/pyrap/archive/{}.tar.gz'.format(__version__),
keywords=['AJAX applications', 'python', 'web development'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering :: Artificial Intelligence ',
'Topic :: Software Development :: Widget Sets',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
],
install_requires=requirements(),
entry_points={
'console_scripts': [
'controls=pyrap_examples.controls.pyrap_controls:main',
'helloworld=pyrap_examples.helloworld.main:main',
'layouts=pyrap_examples.layouts.gridlayout:main',
'admin=pyrap_examples.pyrap_admin.admin:main',
'sayhello=pyrap_examples.sayhello.main:main'
],
},
cmdclass={'install': myinstall}
)
|
<filename>skule_vote/backend/ballot.py
import math
# results: function (ballots, choices, numSeats:
RON = "Reopen Nominations"
def calculate_results(ballots, choices, numSeats):
result = {
"winners": [],
"rounds": [],
"quota": 0,
"totalVotes": -1,
"spoiledBallots": 0,
}
# YES/NO election (simplest case) i.e. a referendum or one person running
if len(choices) == 2:
totalVotes, spoiledBallots = 0, 0
# initialize variables
roundObject = {}
for i in range(len(choices)):
roundObject[choices[i]["name"]] = 0
result["rounds"].append(roundObject)
# go through each ballot, which will either be yes, no, or blank (spoil)
for i in range(len(ballots)):
ranking = ballots[i]["ranking"]
# check for spoiled (ranking would be an empty list)
if len(ranking) != 0:
if ranking[0] < len(choices):
name = choices[ranking[0]]["name"]
result["rounds"][0][name] += 1
else:
print(f"ERROR - Ballot contained invalid ranking: {ranking[0]}")
totalVotes += 1
else:
spoiledBallots += 1
result["totalVotes"] = totalVotes
result["spoiledBallots"] = spoiledBallots
result["quota"] = math.floor(
totalVotes / 2 + 1
) # may be unnecessary for this election, but better to have it and not need it
ch1, ch2 = choices[0]["name"], choices[1]["name"]
result["winners"].append(
ch1 if (result["rounds"][0][ch1] > result["rounds"][0][ch2]) else ch2
)
# // check for a tie
if result["rounds"][0][ch1] == result["rounds"][0][ch2]:
result["winners"][0] = "NO (TIE)"
# single seat election
# keep eliminating the bottom choice (you cannot eliminate "Reopen Nominations" aka RON), until
# one person gets >50% of the vote, keeping track of intermediate rounds for audit reasons */
elif numSeats == 1:
stillCounting = True # need to know when to stop looping over ballots
remainingChoices = (
[]
) # starts with all choices once someone is eliminated it sets name to "Eliminated" to maintain indices
currentRound, totalVotes, spoiledBallots = 0, 0, 0
# // "remainingChoices" has all choices to start
for i in range(len(choices)):
remainingChoices.append(choices[i]["name"])
while stillCounting:
# // a little redundant, but avoids linkage of "roundObjects"
roundObject = {}
for i in range(len(choices)):
roundObject[choices[i]["name"]] = 0
result["rounds"].append(roundObject)
for i in range(len(ballots)):
ranking = ballots[i]["ranking"]
if len(ranking) != 0: # check for spoiled ballot
currentRanking = 0
keepChecking = True
# need to keep going down the list if someone's first choice has been eliminated (perform some checks each time)
while keepChecking:
# check for someone not completing a ballot fully (i.e. spoiling part of it)
if currentRanking < len(ranking):
# check for valid ranking
if ranking[currentRanking] < len(choices):
if (
remainingChoices[ranking[currentRanking]]
!= "Eliminated"
):
name = remainingChoices[ranking[currentRanking]]
result["rounds"][currentRound][name] += 1
keepChecking = False
else:
currentRanking += 1
else:
print(
f"ERROR - Ballot contained invalid ranking: {ranking[currentRanking]}"
)
# TODO: Figure out what we're doing in this edge case, below is temp for now
keepChecking = False
else:
keepChecking = False # this ballot is no longer useful
totalVotes += 1
else:
spoiledBallots += 1
# check the results for this round
maxVotes = -1
maxName = ""
minVotes = 999999
for i in range(len(remainingChoices)):
if remainingChoices[i] != "Eliminated":
votes = result["rounds"][currentRound][remainingChoices[i]]
if votes > maxVotes:
maxVotes = votes
maxName = remainingChoices[i]
if votes < minVotes and remainingChoices[i] != RON:
minVotes = votes
# assign totalVotes after the first pass through the ballots to use any ballot that has a valid first-preference
# also assign spoiledBallots at this time too
if result["totalVotes"] == -1:
result["totalVotes"] = totalVotes
result["spoiledBallots"] = spoiledBallots
result["quota"] = math.floor(totalVotes / (numSeats + 1) + 1)
# check for a winner, otherwise keep going and eliminate everyone with the lowest amount of votes total
if maxVotes >= result["quota"]:
# should only be one, but possibility remains for a complete tie
result["winners"] = backwardsEliminationProcess(
-1,
maxVotes,
remainingChoices,
result["rounds"],
currentRound,
ballots,
)
stillCounting = False
else:
backwardsEliminationProcess(
minVotes,
-1,
remainingChoices,
result["rounds"],
currentRound,
ballots,
)
currentRound += 1
# check to make sure there are still valid candidates left
validCandidates = False
for i in range(len(remainingChoices)):
if (
remainingChoices[i] != "Eliminated"
and remainingChoices[i] != RON
):
validCandidates = True
break
if not validCandidates:
stillCounting = False
# multi-seat election with more than two candidates
# Note: Case when RON wins something, stop (any other seats are unfilled) */
else:
stillCounting = True # need to know when to stop looping over ballots
remainingChoices = (
[]
) # similar as above case, except will also use "Winner" to indicate a winner of one of the seats
currentRound, totalVotes, spoiledBallots, totalWinners = 0, 0, 0, 0
winnerObject = {} # keeps track of candidates votes when they win the election
# "remainingChoices" has all choices to start
for i in range(len(choices)):
remainingChoices.append(choices[i]["name"])
while stillCounting:
# a little redundant, but avoids linkage of "roundObjects"
roundObject = {}
for i in range(len(choices)):
roundObject[choices[i]["name"]] = 0
result["rounds"].append(roundObject)
for i in range(len(ballots)):
ranking = ballots[i]["ranking"]
if len(ranking) != 0: # check for spoiled ballot
currentRanking = 0
keepChecking = True
voteValue = (
1 # updates as you pass over winners and adjusts accordingly
)
# need to keep going down the list if someone's first choice has been eliminated (perform some checks each time)
while keepChecking:
# check for someone not completing a ballot fully (i.e. spoiling part of it)
if currentRanking < len(ranking):
# check for valid ranking
if ranking[currentRanking] < len(choices):
name = remainingChoices[ranking[currentRanking]]
# this should only be hit after "quota" is set and you're at least on the second round
if name != "Eliminated" and name != "Winner":
result["rounds"][currentRound][name] += voteValue
keepChecking = False
else:
if name == "Winner":
name = choices[ranking[currentRanking]]["name"]
voteValue = (
voteValue
* (winnerObject[name] - result["quota"])
/ (winnerObject[name])
)
currentRanking += 1
else:
print(
f"ERROR - Ballot contained invalid ranking: {ranking[currentRanking]}"
)
# TODO: Figure out what we're doing in this edge case, below is temp for now
keepChecking = False
else:
keepChecking = False # this ballot is no longer useful
totalVotes += 1
else:
spoiledBallots += 1
# /check the results for this round
maxVotes = -1
minVotes = 999999
for i in range(len(remainingChoices)):
if (
remainingChoices[i] != "Eliminated"
and remainingChoices[i] != "Winner"
):
votes = result["rounds"][currentRound][remainingChoices[i]]
if votes > maxVotes:
maxVotes = votes
if votes < minVotes and remainingChoices[i] != RON:
minVotes = votes
# assign totalVotes after the first pass through the ballots to use any ballot that has a valid first-preference
# also assign spoiledBallots at this time too
if result["totalVotes"] == -1:
result["totalVotes"] = totalVotes
result["spoiledBallots"] = spoiledBallots
result["quota"] = math.floor(totalVotes / (numSeats + 1) + 1)
# check for a winner, otherwise keep going and eliminate everyone with the lowest amount of votes total
if maxVotes >= result["quota"]:
winnerList = backwardsEliminationProcess(
-1,
maxVotes,
remainingChoices,
result["rounds"],
currentRound,
ballots,
)
for i in range(len(winnerList)):
totalWinners += 1
result["winners"].append(winnerList[i])
winnerObject[winnerList[i]] = maxVotes
if totalWinners >= numSeats or winnerList[i] == RON:
stillCounting = False
else:
backwardsEliminationProcess(
minVotes,
-1,
remainingChoices,
result["rounds"],
currentRound,
ballots,
)
# check to make sure there are still valid candidates left
validCandidates = False
for i in range(len(remainingChoices)):
if (
remainingChoices[i] != "Eliminated"
and remainingChoices[i] != "Winner"
and remainingChoices[i] != RON
):
validCandidates = True
break
if not validCandidates:
stillCounting = False
currentRound += 1
return result
# /* Used for deciding which candidate to eliminate or which one to declare as a winner for a round. Use a backwards elimination
# process for this in the case of a tie at the start, compare the ballots' 2nd preferences, then 3rd, and so on. If there is
# still a tie after all of this, eliminate all candidates or declare all of them winners for that round. Either way, the CRO
# should review the ballots carefully in cases of "extreme ties" to make the final call if need be.
# Note: either "minVotes" or "maxVotes" will equal -1, so the function decides on the fly which comparison to make. */
def backwardsEliminationProcess(
minVotes, maxVotes, candidateList, roundHistory, currentRound, ballots
):
# stores the indices of the names in candidateList
eliminationList, winnerList = [], []
eliminationPath = minVotes != -1 # easy boolean comparison to be used later
returnList = []
for i in range(len(candidateList)):
if candidateList[i] != "Eliminated" and candidateList[i] != "Winner":
if (
minVotes == roundHistory[currentRound][candidateList[i]]
and candidateList[i] != RON
):
eliminationList.append(i)
elif maxVotes == roundHistory[currentRound][candidateList[i]]:
winnerList.append(i)
if len(eliminationList) == 1:
candidateList[eliminationList[0]] = "Eliminated"
return
elif len(winnerList) == 1:
returnList.append(candidateList[winnerList[0]])
candidateList[winnerList[0]] = "Winner"
else:
# first look through the rounds backwards until you reach the first round
while currentRound > 0:
currentRound -= 1
# arbitrary choice of zero index for comparison purposes
if eliminationPath:
minVotes = roundHistory[currentRound][candidateList[eliminationList[0]]]
for i in range(1, len(eliminationList)):
if (
roundHistory[currentRound][candidateList[eliminationList[i]]]
< minVotes
):
minVotes = roundHistory[currentRound][
candidateList[eliminationList[i]]
]
eliminationList = checkCandidates(
minVotes, candidateList, roundHistory, currentRound, eliminationList
)
else:
maxVotes = roundHistory[currentRound][candidateList[winnerList[0]]]
for i in range(1, len(winnerList)):
if (
roundHistory[currentRound][candidateList[winnerList[i]]]
> maxVotes
):
maxVotes = roundHistory[currentRound][
candidateList[winnerList[i]]
]
winnerList = checkCandidates(
maxVotes, candidateList, roundHistory, currentRound, winnerList
)
if len(eliminationList) == 1 or len(winnerList) == 1:
break
# if you still don't have a single candidate remaining, start looking through 2nd choice onwards (currentRound should equal 0)
currentRanking = 1
# len(roundHistory[0].keys()) is the max number of choices (i.e. number of candidates)
while (
currentRanking < len(roundHistory[0].keys())
and len(eliminationList) != 1
and len(winnerList) != 1
):
# initialize votes array to line up with votes for candidates being considered for elimination
votes = []
listLength = (
len(eliminationList) if len(eliminationList) > 0 else len(winnerList)
)
for i in range(listLength):
votes.append(0)
for i in range(len(ballots)):
ranking = ballots[i]["ranking"]
# check for spoiled ballot or partially spoiled ballot
if len(ranking) != 0 and currentRanking < len(ranking):
for j in range(listLength):
if (
eliminationPath
and eliminationList[j] == ranking[currentRanking]
) or (
not eliminationPath
and winnerList[j] == ranking[currentRanking]
): # check for valid ranking
votes[j] += 1
break
# arbitrary choice of zero index for comparison purposes
minVotes, maxVotes = votes[0], votes[0]
changed = False
for i in range(1, len(votes)):
if eliminationPath and votes[i] < minVotes:
minVotes = votes[i]
changed = True
elif not eliminationPath and votes[i] > maxVotes:
maxVotes = votes[i]
changed = True
if changed:
i = 0
while i < len(votes):
if eliminationPath and votes[i] == minVotes:
eliminationList.pop(i)
votes.pop(i)
elif not eliminationPath and votes[i] == maxVotes:
winnerList.pop(i)
votes.pop(i)
else:
i += 1
currentRanking += 1
# always end with going through the list in case you still end up with a tie by the end of the process
if eliminationPath:
for i in range(len(eliminationList)):
candidateList[eliminationList[i]] = "Eliminated"
else:
for i in range(len(winnerList)):
returnList.append(candidateList[winnerList[i]])
candidateList[winnerList[i]] = "Winner"
return returnList
def checkCandidates(
votesToCheck, candidateList, roundHistory, currentRound, currentList
):
newList = []
for i in range(len(currentList)):
if votesToCheck == roundHistory[currentRound][candidateList[currentList[i]]]:
newList.append(currentList[i])
return newList
# Parameters:
# "ballots" is every single ballot cast in that election (i.e. array of "ballot")
# "ranking" is an array as well, in order (index corresponds to "choice" array)
# ballot: {
# sid: string (voterID, useless here)
# ranking: number[] (index of choice in choices array)
# }
# "choices" is an array of "choice" (i.e. list of all candidates/options)
# choice: {
# name: string
# statement: string (useless here)
# }
# numSeats: Number of seats available in election
# "totalVotes" is the total votes cast (manually verify with quota after)
# each object in "rounds" is 1 round and displays the voteCount for remaining candidates
# Returns: {
# winners: [] (array of names)
# rounds: [{choice1: voteCount, ...}] (index in array = round number)
# quota: Number
# totalVotes: Number (total number of ballots cast)
# spoiledBallots: Number (total number of spoiled ballots)
# }
# */
|
<filename>venv/lib/python3.6/site-packages/ansible_collections/cisco/iosxr/tests/unit/modules/network/iosxr/test_iosxr_acl_interfaces.py
# (c) 2021 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.cisco.iosxr.tests.unit.compat.mock import patch
from ansible_collections.cisco.iosxr.plugins.modules import (
iosxr_acl_interfaces,
)
from ansible_collections.cisco.iosxr.tests.unit.modules.utils import (
set_module_args,
)
from .iosxr_module import TestIosxrModule, load_fixture
class TestIosxrAclInterfacesModule(TestIosxrModule):
module = iosxr_acl_interfaces
def setUp(self):
super(TestIosxrAclInterfacesModule, self).setUp()
self.mock_get_resource_connection = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module_base.get_resource_connection"
)
self.get_resource_connection = (
self.mock_get_resource_connection.start()
)
self.mock_execute_show_command = patch(
"ansible_collections.cisco.iosxr.plugins.module_utils.network.iosxr.facts.acl_interfaces.acl_interfaces.Acl_interfacesFacts.get_config"
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestIosxrAclInterfacesModule, self).tearDown()
self.get_resource_connection.stop()
self.mock_execute_show_command.stop()
def _prepare(self):
def load_from_file(*args, **kwargs):
return load_fixture("iosxr_acl_interfaces_config.cfg")
self.execute_show_command.side_effect = load_from_file
def test_iosxr_acl_interfaces_merged_idempotent(self):
self._prepare()
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/0/0/0",
access_groups=[
dict(
afi="ipv4",
acls=[
dict(name="acl_1", direction="in"),
dict(name="acl_2", direction="out"),
],
),
dict(
afi="ipv6",
acls=[
dict(name="acl6_1", direction="in"),
dict(name="acl6_2", direction="out"),
],
),
],
),
dict(
name="GigabitEthernet0/0/0/1",
access_groups=[
dict(
afi="ipv4",
acls=[dict(name="acl_1", direction="out")],
)
],
),
],
state="merged",
)
)
self.execute_module(changed=False, commands=[])
def test_iosxr_acl_interfaces_merged(self):
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/0/0/0",
access_groups=[
dict(
afi="ipv4",
acls=[
dict(name="acl_1", direction="in"),
dict(name="acl_2", direction="out"),
],
),
dict(
afi="ipv6",
acls=[
dict(name="acl6_1", direction="in"),
dict(name="acl6_2", direction="out"),
],
),
],
),
dict(
name="GigabitEthernet0/0/0/1",
access_groups=[
dict(
afi="ipv4",
acls=[dict(name="acl_1", direction="in")],
)
],
),
],
state="merged",
)
)
commands = [
"interface GigabitEthernet0/0/0/0",
"ipv4 access-group acl_1 ingress",
"ipv4 access-group acl_2 egress",
"ipv6 access-group acl6_1 ingress",
"ipv6 access-group acl6_2 egress",
"interface GigabitEthernet0/0/0/1",
"ipv4 access-group acl_1 ingress",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_iosxr_acl_interfaces_replaced(self):
self._prepare()
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/0/0/0",
access_groups=[
dict(
afi="ipv6",
acls=[dict(name="acl6_3", direction="in")],
)
],
)
],
state="replaced",
)
)
commands = [
"interface GigabitEthernet0/0/0/0",
"no ipv4 access-group acl_1 ingress",
"no ipv4 access-group acl_2 egress",
"no ipv6 access-group acl6_2 egress",
"ipv6 access-group acl6_3 ingress",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_iosxr_acl_interfaces_deleted(self):
self._prepare()
set_module_args(dict(state="deleted"))
commands = [
"interface GigabitEthernet0/0/0/0",
"no ipv4 access-group acl_1 ingress",
"no ipv4 access-group acl_2 egress",
"no ipv6 access-group acl6_1 ingress",
"no ipv6 access-group acl6_2 egress",
"interface GigabitEthernet0/0/0/1",
"no ipv4 access-group acl_1 egress",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
def test_iosxr_acl_interfaces_rendered(self):
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/0/0/0",
access_groups=[
dict(
afi="ipv4",
acls=[
dict(name="acl_1", direction="in"),
dict(name="acl_2", direction="out"),
],
),
dict(
afi="ipv6",
acls=[
dict(name="acl6_1", direction="in"),
dict(name="acl6_2", direction="out"),
],
),
],
),
dict(
name="GigabitEthernet0/0/0/1",
access_groups=[
dict(
afi="ipv4",
acls=[dict(name="acl_1", direction="in")],
)
],
),
],
state="rendered",
)
)
commands = [
"interface GigabitEthernet0/0/0/0",
"ipv4 access-group acl_1 ingress",
"ipv4 access-group acl_2 egress",
"ipv6 access-group acl6_1 ingress",
"ipv6 access-group acl6_2 egress",
"interface GigabitEthernet0/0/0/1",
"ipv4 access-group acl_1 ingress",
]
result = self.execute_module(changed=False)
self.assertEqual(sorted(result["rendered"]), sorted(commands))
def test_iosxr_acl_interfaces_parsed(self):
self.maxDiff = None
set_module_args(
dict(
running_config="interface GigabitEthernet0/0/0/0\r\n shutdown\r\n ipv4 access-group acl_1 ingress\r\n"
" ipv4 access-group acl_2 egress\r\n ipv6 access-group acl6_1 ingress\r\n ipv6 "
"access-group acl6_2 egress\r\n!\r\ninterface GigabitEthernet0/0/0/1\r\n "
"shutdown\r\n ipv4 access-group acl_1 egress\r\n!",
state="parsed",
)
)
result = self.execute_module(changed=False)
print(result["parsed"])
parsed_list = [
{
"name": "GigabitEthernet0/0/0/0",
"access_groups": [
{
"afi": "ipv4",
"acls": [
{"name": "acl_1", "direction": "in"},
{"name": "acl_2", "direction": "out"},
],
},
{
"afi": "ipv6",
"acls": [
{"name": "acl6_1", "direction": "in"},
{"name": "acl6_2", "direction": "out"},
],
},
],
},
{
"name": "GigabitEthernet0/0/0/1",
"access_groups": [
{
"afi": "ipv4",
"acls": [{"name": "acl_1", "direction": "out"}],
}
],
},
]
self.assertEqual(parsed_list, result["parsed"])
def test_iosxr_acl_interfaces_overridden(self):
self.maxDiff = None
self._prepare()
set_module_args(
dict(
config=[
dict(
name="GigabitEthernet0/0/0/0",
access_groups=[
dict(
afi="ipv6",
acls=[dict(name="acl6_3", direction="in")],
)
],
),
dict(
name="GigabitEthernet0/0/0/1",
access_groups=[
dict(
afi="ipv4",
acls=[dict(name="acl_2", direction="in")],
),
dict(
afi="ipv6",
acls=[dict(name="acl6_3", direction="out")],
),
],
),
],
state="overridden",
)
)
commands = [
"interface GigabitEthernet0/0/0/0",
"no ipv4 access-group acl_1 ingress",
"no ipv4 access-group acl_2 egress",
"no ipv6 access-group acl6_2 egress",
"ipv6 access-group acl6_3 ingress",
"interface GigabitEthernet0/0/0/1",
"no ipv4 access-group acl_1 egress",
"ipv4 access-group acl_2 ingress",
"ipv6 access-group acl6_3 egress",
]
result = self.execute_module(changed=True)
self.assertEqual(sorted(result["commands"]), sorted(commands))
|
import logging
import time
import torch
import torch.utils.model_zoo as model_zoo
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def validate(val_loader, model, criterion, input_size, print_freq=10):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
# input = torch.randn((50,) + input_size).cuda()
# model(input)
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda()
target = target.cuda()
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
return top1.avg
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def load_pretrained(model, cfg=None, num_classes=1000, in_chans=3, filter_fn=None, strict=True):
if cfg is None:
cfg = getattr(model, 'default_cfg')
if cfg is None or 'url' not in cfg or not cfg['url']:
logging.warning("Pretrained model URL is invalid, using random initialization.")
return
state_dict = model_zoo.load_url(cfg['url'], progress=False, map_location='cpu')
if in_chans == 1:
conv1_name = cfg['first_conv']
logging.info('Converting first conv (%s) from 3 to 1 channel' % conv1_name)
conv1_weight = state_dict[conv1_name + '.weight']
state_dict[conv1_name + '.weight'] = conv1_weight.sum(dim=1, keepdim=True)
elif in_chans != 3:
assert False, "Invalid in_chans for pretrained weights"
classifier_name = cfg['classifier']
if num_classes == 1000 and cfg['num_classes'] == 1001:
# special case for imagenet trained models with extra background class in pretrained weights
classifier_weight = state_dict[classifier_name + '.weight']
state_dict[classifier_name + '.weight'] = classifier_weight[1:]
classifier_bias = state_dict[classifier_name + '.bias']
state_dict[classifier_name + '.bias'] = classifier_bias[1:]
elif num_classes != cfg['num_classes']:
# completely discard fully connected for all other differences between pretrained and created model
del state_dict[classifier_name + '.weight']
del state_dict[classifier_name + '.bias']
strict = False
if filter_fn is not None:
state_dict = filter_fn(state_dict)
model.load_state_dict(state_dict, strict=strict)
|
<filename>kgcnn/utils/models.py
import tensorflow.keras as ks
import functools
import pprint
def generate_embedding(inputs, input_shape: list, embedding_args: dict, embedding_rank: int = 1, **kwargs):
"""Optional embedding for tensor input. If there is no feature dimension, an embedding layer can be used.
If the input tensor has without batch dimension the shape of e.g. `(None, F)` and `F` is the feature dimension,
no embedding layer is required. However, for shape `(None, )` an embedding with `output_dim` assures a vector
representation.
Args:
inputs (tf.Tensor): Input tensor to make embedding for.
input_shape (list, tuple): Shape of input without batch dimension. Either (None, F) or (None, ).
embedding_args (dict): Arguments for embedding layer which will be unpacked in layer constructor.
embedding_rank (int): The rank of the input which requires embedding. Default is 1.
Returns:
tf.Tensor: Tensor embedding dependent on the input shape.
"""
if len(kwargs) > 0:
print("WARNING:kgcnn: Unknown embedding kwargs {0}. Will be reserved for future versions.".format(kwargs))
if len(input_shape) == embedding_rank:
n = ks.layers.Embedding(**embedding_args)(inputs)
else:
n = inputs
return n
def update_model_kwargs_logic(default_kwargs: dict = None, user_kwargs: dict = None):
"""Make model kwargs dictionary with updated default values. This is essentially a nested version of update()
for dicts. This is supposed to be more convenient if the values of kwargs are again layer kwargs to be unpacked,
and do not need to be fully known to update them.
Args:
default_kwargs (dict): Dictionary of default values. Default is None.
user_kwargs (dict): Dictionary of args to update. Default is None.
Returns:
dict: New dict and update with first default and then user args.
"""
out = {}
if default_kwargs is None:
default_kwargs = {}
if user_kwargs is None:
user_kwargs = {}
# Check valid kwargs
for iter_key in user_kwargs.keys():
if iter_key not in default_kwargs:
raise ValueError("Model kwarg {0} not in default arguments {1}".format(iter_key, default_kwargs.keys()))
out.update(default_kwargs)
# Nested update of kwargs:
def _nested_update(dict1, dict2):
for key, values in dict2.items():
if key not in dict1:
print("WARNING:kgcnn: Unknown model kwarg {0} with value {1}".format(key, values))
dict1[key] = values
else:
if isinstance(dict1[key], dict) and isinstance(values, dict):
# The value is a dict of model arguments itself. Update the same way.
dict1[key] = _nested_update(dict1[key], values)
elif isinstance(dict1[key], dict) and not isinstance(values, dict):
# If values is None, means no information, keep dict1 values untouched.
if values is not None:
raise ValueError("Can not overwriting dictionary of {0} with {1}".format(key, values))
else:
# Just any other value to update
dict1[key] = values
return dict1
return _nested_update(out, user_kwargs)
def update_model_kwargs(model_default):
"""Decorating function for update_model_kwargs_logic() ."""
def model_update_decorator(func):
@functools.wraps(func)
def update_wrapper(*args, **kwargs):
updated_kwargs = update_model_kwargs_logic(model_default, kwargs)
if 'verbose' in updated_kwargs:
if updated_kwargs['verbose'] > 0:
# Print out the full updated kwargs
print("INFO:kgcnn: Updated model kwargs:")
pprint.pprint(updated_kwargs)
return func(*args, **updated_kwargs)
return update_wrapper
return model_update_decorator
|
import unittest
import os
import pytest
import shutil
import datetime
from unittest import mock
from parsons.etl.table import Table
from parsons.utilities.datetime import date_to_timestamp, parse_date
from parsons.utilities import files
from parsons.utilities import check_env
from parsons.utilities import json_format
from parsons.utilities import sql_helpers
from test.conftest import xfail_value_error
@pytest.mark.parametrize(
["date", "exp_ts"],
[pytest.param("2018-12-13", 1544659200),
pytest.param("2018-12-13T00:00:00-08:00", 1544688000),
pytest.param("", None),
pytest.param("2018-12-13 PST", None, marks=[xfail_value_error]),
])
def test_date_to_timestamp(date, exp_ts):
assert date_to_timestamp(date) == exp_ts
def test_parse_date():
# Test parsing an ISO8601 string
expected = datetime.datetime(year=2020, month=1, day=1, tzinfo=datetime.timezone.utc)
parsed = parse_date('2020-01-01T00:00:00.000 UTC')
assert parsed == expected, parsed
# Test parsing a unix timestamp
parsed = parse_date(1577836800)
assert parsed == expected, parsed
# Test "parsing" a datetime object
parsed = parse_date(expected)
assert parsed == expected, parsed
#
# File utility tests (pytest-style)
#
def test_create_temp_file_for_path():
temp_path = files.create_temp_file_for_path('some/file.gz')
assert temp_path[-3:] == '.gz'
def test_create_temp_directory():
temp_directory = files.create_temp_directory()
test_file1 = f'{temp_directory}/test.txt'
test_file2 = f'{temp_directory}/test2.txt'
with open(test_file1, 'w') as fh1, open(test_file2, 'w') as fh2:
fh1.write('TEST')
fh2.write('TEST')
assert files.has_data(test_file1)
assert files.has_data(test_file2)
files.cleanup_temp_directory(temp_directory)
# Verify the temp file no longer exists
with pytest.raises(FileNotFoundError):
open(test_file1, 'r')
def test_close_temp_file():
temp = files.create_temp_file()
files.close_temp_file(temp)
# Verify the temp file no longer exists
with pytest.raises(FileNotFoundError):
open(temp, 'r')
def test_is_gzip_path():
assert files.is_gzip_path('some/file.gz')
assert not files.is_gzip_path('some/file')
assert not files.is_gzip_path('some/file.csv')
def test_suffix_for_compression_type():
assert files.suffix_for_compression_type(None) == ''
assert files.suffix_for_compression_type('') == ''
assert files.suffix_for_compression_type('gzip') == '.gz'
def test_compression_type_for_path():
assert files.compression_type_for_path('some/file') is None
assert files.compression_type_for_path('some/file.csv') is None
assert files.compression_type_for_path('some/file.csv.gz') == 'gzip'
def test_empty_file():
# Create fake files.
os.mkdir('tmp')
with open('tmp/empty.csv', 'w+') as _:
pass
Table([['1'], ['a']]).to_csv('tmp/full.csv')
assert not files.has_data('tmp/empty.csv')
assert files.has_data('tmp/full.csv')
# Remove fake files and dir
shutil.rmtree('tmp')
def test_json_format():
assert json_format.arg_format('my_arg') == 'myArg'
def test_remove_empty_keys():
# Assert key removed when None
test_dict = {'a': None, 'b': 2}
assert json_format.remove_empty_keys(test_dict) == {'b': 2}
# Assert key not removed when None
test_dict = {'a': 1, 'b': 2}
assert json_format.remove_empty_keys(test_dict) == {'a': 1, 'b': 2}
# Assert that a nested empty string is removed
test_dict = {'a': '', 'b': 2}
assert json_format.remove_empty_keys(test_dict) == {'b': 2}
def test_redact_credentials():
# Test with quotes, escape characters, and line breaks
test_str = """COPY schema.tablename
FROM 's3://bucket/path/to/file.csv'
credentials 'aws_access_key_id=string-\\'escaped-quote;
aws_secret_access_key='string-escape-char\\\\'
MANIFEST"""
test_result = """COPY schema.tablename
FROM 's3://bucket/path/to/file.csv'
CREDENTIALS REDACTED
MANIFEST"""
assert sql_helpers.redact_credentials(test_str) == test_result
class TestCheckEnv(unittest.TestCase):
def test_environment_field(self):
"""Test check field"""
result = check_env.check('PARAM', 'param')
self.assertEqual(result, 'param')
@mock.patch.dict(os.environ, {'PARAM': 'env_param'})
def test_environment_env(self):
"""Test check env"""
result = check_env.check('PARAM', None)
self.assertEqual(result, 'env_param')
@mock.patch.dict(os.environ, {'PARAM': 'env_param'})
def test_environment_field_env(self):
"""Test check field with env and field"""
result = check_env.check('PARAM', 'param')
self.assertEqual(result, 'param')
def test_envrionment_error(self):
"""Test check env raises error"""
with self.assertRaises(KeyError) as _:
check_env.check('PARAM', None)
|
<reponame>wan2000/hcmus-person-reid<gh_stars>1-10
import os
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
import numpy as np
from .utils import *
class Market1501TrainVal(Dataset):
def __init__(self, root, transform=None, batch_size=32, shuffle=True):
self.root = root
self.transform = transform
self.batch_size = batch_size
self.shuffle = shuffle
self.split_train_val()
self.query_ptr = 0
self.query_done = False
self.query_ids = [get_image_id(c) for c in self.query_images]
self.query_cams = [get_image_cam(c) for c in self.query_images]
self.gallery_ptr = 0
self.gallery_done = False
self.gallery_ids = [get_image_id(c) for c in self.gallery_images]
self.gallery_cams = [get_image_cam(c) for c in self.gallery_images]
self.images = self.train_images
self.person_id_gallery = {}
for f_image in self.images:
person_id = int(f_image.split('_')[0])
if person_id not in self.person_id_gallery.keys():
self.person_id_gallery[person_id] = {
'images': [],
'ptr': 0
}
self.person_id_gallery[person_id]['images'].append(os.path.join(root, 'bounding_box_train', f_image))
self.images = [os.path.join(self.root, f_image) for f_image in self.images]
self.person_id_list = list(self.person_id_gallery.keys())
self.id_classes = {}
for i in range(len(self.person_id_list)):
self.id_classes[self.person_id_list[i]] = i
self.sample_ptr = 0
self.epoch_done = False
self.images_per_id = 4
if self.shuffle:
np.random.shuffle(self.person_id_list)
def split_train_val(self):
images = os.listdir(os.path.join(self.root, 'bounding_box_train'))
images.sort()
images.pop()
images = np.array(images)
np.random.shuffle(images)
ids = np.array([get_image_id(f) for f in images])
cams = np.array([get_image_cam(f) for f in images])
unique_ids = np.unique(ids)
np.random.shuffle(unique_ids)
train_indices = []
query_indices = []
gallery_indices = []
num_selected_ids = 0
for unique_id in unique_ids:
query_indices_ = []
indices = np.argwhere(unique_id == ids).flatten()
unique_cams = np.unique(cams[indices])
for unique_cam in unique_cams:
query_indices_.append(indices[np.argwhere(unique_cam == cams[indices]).flatten()[0]])
gallery_indices_ = list(np.setdiff1d(indices, query_indices_))
for query_index in query_indices_:
if len(gallery_indices_) == 0 or len(np.argwhere(cams[query_index] != cams[gallery_indices_]).flatten()) == 0:
query_indices_.remove(query_index)
gallery_indices_.append(query_index)
if len(query_indices_) == 0:
continue
query_indices += list(query_indices_)
gallery_indices += list(gallery_indices_)
num_selected_ids += 1
if num_selected_ids >= 100:
break
train_indices = np.setdiff1d(range(len(images)), np.hstack([query_indices, gallery_indices]))
self.train_images = images[train_indices]
self.query_images = images[query_indices]
self.gallery_images = images[gallery_indices]
self.train_images.sort()
self.query_images.sort()
self.gallery_images.sort()
def __getitem__(self, index):
person_id = int(self.images[index].split('/')[-1].split('_')[0])
image = Image.open(self.images[index])
pos_image = Image.open(np.random.choice(self.person_id_gallery[person_id]['images']))
neg_id = np.random.choice(self.person_id_list)
while neg_id == person_id:
neg_id = np.random.choice(self.person_id_list)
pos_image = Image.open(np.random.choice(self.person_id_gallery[person_id]['images']))
neg_image = Image.open(np.random.choice(self.person_id_gallery[neg_id]['images']))
if self.transform is not None:
return self.transform(image), self.transform(pos_image), self.transform(neg_image)
return image, person_id
def __len__(self):
return len(self.images)
def next_batch(self):
person_ids = self.person_id_list[self.sample_ptr: self.sample_ptr + self.batch_size]
images_t = []
labels_t = []
for id in person_ids:
for _ in range(self.images_per_id):
ptr = self.person_id_gallery[id]['ptr']
image = Image.open(self.person_id_gallery[id]['images'][ptr])
images_t.append(self.transform(image))
# labels_t.append(id)
labels_t.append(self.id_classes[id])
self.person_id_gallery[id]['ptr'] += 1
if self.person_id_gallery[id]['ptr'] >= len(self.person_id_gallery[id]['images']):
self.person_id_gallery[id]['ptr'] = 0
images_t = torch.stack(images_t)
labels_t = torch.tensor(labels_t, dtype=torch.int32)
self.sample_ptr += self.batch_size
if self.sample_ptr >= len(self.person_id_list):
self.epoch_done = True
return images_t, labels_t
def next_batch_query(self):
images_t = []
labels_t = []
for i in range(self.query_ptr, min(self.query_ptr + self.batch_size, len(self.query_images))):
filename = self.query_images[i]
image = Image.open(os.path.join(self.root, 'bounding_box_train', filename))
if self.transform is not None:
image = self.transform(image)
images_t.append(image)
labels_t.append(int(filename.split('_')[0]))
self.query_ptr += self.batch_size
if self.query_ptr >= len(self.query_images):
self.query_ptr = 0
self.query_done = True
images_t = torch.stack(images_t)
labels_t = torch.tensor(labels_t).long()
return images_t, labels_t
def next_batch_gallery(self):
images_t = []
labels_t = []
for i in range(self.gallery_ptr, min(self.gallery_ptr + self.batch_size, len(self.gallery_images))):
filename = self.gallery_images[i]
image = Image.open(os.path.join(self.root, 'bounding_box_train', filename))
if self.transform is not None:
image = self.transform(image)
images_t.append(image)
labels_t.append(int(filename.split('_')[0]))
self.gallery_ptr += self.batch_size
if self.gallery_ptr >= len(self.gallery_images):
self.gallery_ptr = 0
self.gallery_done = True
images_t = torch.stack(images_t)
labels_t = torch.tensor(labels_t).long()
return images_t, labels_t
def start_over(self):
self.epoch_done = False
self.sample_ptr = 0
if self.shuffle:
np.random.shuffle(self.person_id_list) |
""" Module for I/O in arclines
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
import numpy as np
import os
import datetime
import pdb
from astropy.table import Table, Column, vstack
from astropy.io import fits
from linetools import utils as ltu
import arclines # For path
from arclines import defs
line_path = arclines.__path__[0]+'/data/lists/'
nist_path = arclines.__path__[0]+'/data/NIST/'
def load_by_hand():
""" By-hand line list
Parameters
----------
line_file
add_path
Returns
-------
byhand : Table
"""
str_len_dict = defs.str_len()
src_file = arclines.__path__[0]+'/data/sources/by_hand_list.ascii'
# Read
line_list = Table.read(src_file, format='ascii.fixed_width', comment='#')
# Add
line_list['NIST'] = 1
# Deal with Instr and Source
ilist, slist = [], []
for row in line_list:
ilist.append(defs.instruments()[row['sInstr']]) # May need to split
slist.append(row['sSource'])
line_list['Instr'] = ilist
line_list['Source'] = np.array(slist, dtype='S{:d}'.format(str_len_dict['Source']))
# Trim
return line_list[['ion', 'wave', 'NIST', 'Instr', 'amplitude', 'Source']]
def load_line_list(line_file, add_path=False, use_ion=False, NIST=False):
"""
Parameters
----------
line_file : str
Full path to line_list or name of ion
add_path : bool, optional
Not yet implemented
NIST : bool, optional
NIST formatted table?
Returns
-------
line_list : Table
"""
if use_ion:
line_file = line_path+'{:s}_lines.dat'.format(line_file)
line_list = Table.read(line_file, format='ascii.fixed_width', comment='#')
# NIST?
if NIST:
# Remove unwanted columns
tkeys = line_list.keys()
for badkey in ['Ritz','Acc.','Type','Ei','Lower','Upper','TP','Line']:
for tkey in tkeys:
if badkey in tkey:
line_list.remove_column(tkey)
# Relative intensity -- Strip junk off the end
reli = []
for imsk, idat in zip(line_list['Rel.'].mask, line_list['Rel.'].data):
if imsk:
reli.append(0.)
else:
try:
reli.append(float(idat))
except ValueError:
try:
reli.append(float(idat[:-1]))
except ValueError:
reli.append(0.)
line_list.remove_column('Rel.')
line_list['RelInt'] = reli
#
gdrows = line_list['Observed'] > 0. # Eliminate dummy lines
line_list = line_list[gdrows]
line_list.rename_column('Observed','wave')
# Others
# Grab ion name
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
ion = line_file[i0+1:i1]
line_list.add_column(Column([ion]*len(line_list), name='Ion', dtype='U5'))
line_list.add_column(Column([1]*len(line_list), name='NIST'))
# Return
return line_list
def load_line_lists(lines, unknown=False, skip=False, all=False, NIST=False):
""" Loads a series of line list files
Parameters
----------
lamps : list
unknown : bool, optional
skip : bool, optional
Skip missing line lists (mainly for building)
NIST : bool, optional
Load the full NIST linelists
Returns
-------
line_list : Table
"""
import glob
# All?
if all:
line_files = glob.glob(line_path+'*_lines.dat')
lines = []
for line_file in line_files:
i0 = line_file.rfind('/')
i1 = line_file.rfind('_')
lines.append(line_file[i0+1:i1])
# Read standard files
lists = []
for line in lines:
if NIST:
line_file = nist_path+'{:s}_vacuum.ascii'.format(line)
else:
line_file = line_path+'{:s}_lines.dat'.format(line)
if not os.path.isfile(line_file):
if not skip:
import pdb; pdb.set_trace()
raise IOError("Input line {:s} is not included in arclines".format(line))
else:
lists.append(load_line_list(line_file, NIST=NIST))
# Stack
if len(lists) == 0:
return None
line_lists = vstack(lists, join_type='exact')
# Unknown
if unknown:
unkn_lines = load_unknown_list(lines)
unkn_lines.remove_column('line_flag') # may wish to have this info
# Stack
line_lists = vstack([line_lists, unkn_lines])
# Return
return line_lists
def load_source_table():
""" Load table of arcline sources
Returns
-------
sources : Table
"""
src_file = arclines.__path__[0]+'/data/sources/arcline_sources.ascii'
# Load
sources = Table.read(src_file, format='ascii.fixed_width', comment='#')
# Return
return sources
def load_nist(ion):
"""Parse a NIST ASCII table. Note that the long ---- should have
been commented out and also the few lines at the start.
Parameters
----------
ion : str
Name of ion
Returns
-------
tbl : Table
Table of lines
"""
import glob
# Root (for development only)
root = arclines.__path__[0]
# Find file
srch_file = root + '/data/NIST/'+ion+'_vacuum.ascii'
nist_file = glob.glob(srch_file)
if len(nist_file) == 0:
raise IOError("Cannot find NIST file {:s}".format(srch_file))
# Read
nist_tbl = Table.read(nist_file[0], format='ascii.fixed_width')
gdrow = nist_tbl['Observed'] > 0. # Eliminate dummy lines
nist_tbl = nist_tbl[gdrow]
# Now unique values only (no duplicates)
uniq, indices = np.unique(nist_tbl['Observed'],return_index=True)
nist_tbl = nist_tbl[indices]
# Deal with Rel
agdrel = []
for row in nist_tbl:
try:
gdrel = int(row['Rel.'])
except:
try:
gdrel = int(row['Rel.'][:-1])
except:
gdrel = 0
agdrel.append(gdrel)
agdrel = np.array(agdrel)
# Remove and add
nist_tbl.remove_column('Rel.')
nist_tbl.remove_column('Ritz')
nist_tbl['RelInt'] = agdrel
#nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='S5'))
nist_tbl.add_column(Column([ion]*len(nist_tbl), name='Ion', dtype='U5'))
nist_tbl.rename_column('Observed','wave')
# Return
return nist_tbl
def load_unknown_list(lines, unknwn_file=None, all=False):
"""
Parameters
----------
lines : list
Restricted lines; use all=True for all
unknwn_file : str, optional
all : bool, optional
Returns
-------
unknwn_lines : Table
"""
line_dict = defs.lines()
# Load
line_path = arclines.__path__[0]+'/data/lists/'
if unknwn_file is None:
unknwn_file = line_path+'UNKNWNs.dat'
line_list = load_line_list(unknwn_file)
# Cut on input lamps?
if all:
return line_list
else:
msk = np.array([False]*len(line_list))
for line in lines:
line_flag = line_dict[line]
match = line_list['line_flag'] % (2*line_flag) >= line_flag
msk[match] = True
# Finish
return line_list[msk]
def load_spectrum(spec_file, index=0):
""" Load a simple spectrum from input file
Parameters
----------
spec_file : str
.fits -- Assumes simple ndarray in 0 extension
.ascii -- Assumes Table.read(format='ascii') will work with single column
Returns
-------
"""
import h5py
iext = spec_file.rfind('.')
if 'ascii' in spec_file[iext:]:
tbl = Table.read(spec_file, format='ascii')
key = tbl.keys()[0]
spec = tbl[key].data
elif 'fits' in spec_file[iext:]:
spec = fits.open(spec_file)[0].data
elif 'hdf5' in spec_file[iext:]:
hdf = h5py.File(spec_file, 'r')
if 'arcs' in hdf.keys():
print("Taking arc={:d} in this file".format(index))
spec = hdf['arcs/'+str(index)+'/spec'].value
else:
raise IOError("Not ready for this hdf5 file")
elif 'json' in spec_file[iext:]:
jdict = ltu.loadjson(spec_file)
try:
spec = np.array(jdict['spec'])
except KeyError:
raise IOError("spec not in your JSON dict")
# Return
return spec
def write_line_list(tbl, outfile):
"""
Parameters
----------
tbl
outfile
"""
# Format
tbl['wave'].format = '10.4f'
# Write
with open(outfile,'w') as f:
f.write('# Creation Date: {:s}\n'.format(str(datetime.date.today().strftime('%Y-%b-%d'))))
tbl.write(f, format='ascii.fixed_width')
|
"""
constraint object library
"""
# imports third-parties
import cgp_generic_utils.python
import cgp_generic_utils.constants
import maya.cmds
# imports local
import cgp_maya_utils.constants
import cgp_maya_utils.scene._api
from . import _generic
# BASE OBJECT #
class Constraint(_generic.DagNode):
"""node object that manipulates any kind of constraint node
"""
# ATTRIBUTES #
_nodeType = 'constraint'
# COMMANDS #
def data(self):
"""data necessary to store the constraint node on disk and/or recreate it from scratch
:return: the data of the constraint
:rtype: dict
"""
# init
data = super(Constraint, self).data()
# update data
data['drivers'] = [xform.name() for xform in self.driverTransforms()]
data['driven'] = self.drivenTransform().name()
data['drivenAttributes'] = [attr.name() for attr in self.drivenAttributes()]
# return
return data
def drivenTransform(self):
"""the transform that is driven by the constraint
:return: the driven transform
:rtype: :class:`cgp_maya_utils.scene.Transform` or :class:`cgp_maya_utils.scene.Joint`
"""
# init
data = []
# execute
for output in self._drivenOutputs():
connectedNodes = maya.cmds.listConnections('{0}.{1}'.format(self.name(), output),
source=False,
destination=True) or []
data.extend(connectedNodes)
# return
return cgp_maya_utils.scene._api.node(data[0]) if data else None
def drivenAttributes(self):
"""the attributes of the driven transform that are driven by the constraint
:return: the driven attributes
:rtype: list[:class:`cgp_maya_utils.scene.Attribute`]
"""
# init
data = []
# get driven data
for output in self._drivenOutputs():
# get connections
connections = maya.cmds.listConnections('{0}.{1}'.format(self.name(), output),
source=False,
destination=True,
plugs=True) or []
# update data
for con in connections:
if con not in data:
data.append(con)
# return
return [cgp_maya_utils.scene._api.attribute(con) for con in data]
def driverTransforms(self):
"""the transforms that drives the constraint
:return: the driver transforms
:rtype: list[:class:`cgp_maya_utils.scene.Transform`, :class:`cgp_maya_utils.scene.Joint`]
"""
# init
data = []
# execute
for inp in self._driverInputs():
# get full attribute
fullAttribute = '{0}.{1}'.format(self.name(), inp)
# update
try:
# get connected nodes
connectedNodes = maya.cmds.listConnections(fullAttribute, source=True, destination=False) or []
if '*' in fullAttribute:
for index in range(len(connectedNodes)):
connections = maya.cmds.listConnections(fullAttribute.replace('*', str(index)),
source=True,
destination=False) or []
connections = [item for item in connections if item not in data]
data.extend(connections)
else:
connections = [connectedNode for connectedNode in connectedNodes if connectedNode not in data]
data.extend(connections)
# errors
except ValueError:
print 'can\'t get connections from {0}'.format(fullAttribute)
# return
return [cgp_maya_utils.scene._api.node(item) for item in data] or None
def isValid(self):
"""check is the constraint is valid by verifying if it has driver and driven transforms connected
:return: ``True`` : the constraint is valid - ``False`` : the constraint is invalid
:rtype: bool
"""
# return
return self.driverTransforms() and self.drivenTransform()
# PRIVATE COMMANDS #
def _driverInputs(self):
"""the input attributes of the constraint that are connected to the drivers of the constraint
Those attributes ares scanned to get the driver nodes through connection
:return: the input attributes connected to the drivers
:rtype: list[str]
"""
# execute
return []
def _drivenOutputs(self):
"""the output attributes of the constraint that are connected to the driven of the constraint
Those attributes ares scanned to get the driven nodes through connection
:return: the output attributes connected to the driven
:rtype: list[str]
"""
# execute
return []
def _availableAttributes(self):
"""the attributes that are listed by the ``Node.attributes`` function
:return: the available attributes
:rtype: list[str]
"""
# init
availableAttributes = super(Constraint, self)._availableAttributes()
# update settingAttributes
availableAttributes.extend(['enableRestPosition',
'lockOutput'])
# return
return availableAttributes
@staticmethod
def _formatDrivenAttributes(driven, drivenAttributes=None):
"""format the driven attributes
:param driven: name of the object that will be driven by the constraint
:type driven: str or :class:`cgp_maya_utils.scene.Node`
:param drivenAttributes: the driven attributes to format
:type drivenAttributes: list[:class:`cgp_maya_utils.constants.Transform`]
:return: the formated drivenAttributes
:rtype: list[str]
"""
# init
data = []
drivenAttributes = drivenAttributes or cgp_maya_utils.constants.Transform.ALL
# errors
for attr in drivenAttributes:
if attr not in cgp_maya_utils.constants.Transform.ALL:
raise ValueError('{0} is not a valid driven attribute - {1}'
.format(attr, cgp_maya_utils.constants.Transform.ALL))
# execute
for attr in drivenAttributes:
if attr in cgp_maya_utils.constants.Transform.GENERAL:
for axe in cgp_generic_utils.constants.Axis.ALL:
data.append('{0}{1}'.format(attr[0].lower(), axe))
else:
data.append('{0}{1}'.format(attr[0].lower(), attr[-1].lower()))
# return
return [attr for attr in set(data) if not maya.cmds.getAttr('{0}.{1}'.format(driven, attr), lock=True)]
# CONSTRAINT OBJECTS #
class AimConstraint(Constraint):
"""node object that manipulates an ``aim`` constraint node
"""
# ATTRIBUTES #
_nodeType = 'aimConstraint'
# COMMANDS #
@classmethod
def create(cls, drivers, driven, drivenAttributes=None, maintainOffset=False,
attributeValues=None, name=None, **__):
"""create an aimConstraint
:param drivers: transforms driving the constraint
:type drivers: list[str] or list[:class:`cgp_maya_utils.scene.Node`]
:param driven: transform driven by the constraint
:type driven: str or :class:`cgp_maya_utils.scene.Node`
:param drivenAttributes: driven attributes controlled by the constraint - all attributes if nothing is specified
:type drivenAttributes: list[:class:`cgp_maya_utils.constants.Transform`]
:param maintainOffset: ``True`` : constraint created with offset - ``False`` : constraint created without offset
:type maintainOffset: bool
:param attributeValues: attribute values to set on the constraint
:type attributeValues: dict
:param name: name of the constraint
:type name: str
:return: the created constraint
:rtype: :class:`cgp_maya_utils.scene.AimConstraint`
"""
# init
drivers = [str(driver) for driver in drivers]
driven = str(driven)
# get driven attributes
drivenAttributes = cls._formatDrivenAttributes(driven, drivenAttributes=drivenAttributes)
# errors
if not drivenAttributes:
raise RuntimeError('{0} can\'t be aimConstraint'.format(driven))
# get skip attributes
skipAttributes = []
for axe in cgp_generic_utils.constants.Axis.ALL:
if 'r{0}'.format(axe) not in drivenAttributes:
skipAttributes.append(axe)
# get infos
data = {'aimVector': [], 'upVector': [], 'worldUpVector': []}
if attributeValues:
# get vectors
for attr in ['aimVector', 'upVector', 'worldUpVector']:
for axe in cgp_generic_utils.constants.Axis.ALL:
# get value
value = attributeValues['{0}{1}'.format(attr, axe.upper())]
value = (cgp_maya_utils.scene._api.attribute(value).value()
if isinstance(value, basestring)
else value)
# update vectors
data[attr].append(value)
# get worldUpType
data['worldUpType'] = cgp_maya_utils.constants.WorldUpType.ALL[attributeValues['worldUpType']]
# get worldUpObject
if attributeValues['worldUpMatrix']:
data.pop('worldUpVector')
data['worldUpObject'] = (cgp_maya_utils.scene._api
.attribute(attributeValues['worldUpMatrix']).node().name())
else:
data['aimVector'] = [1, 0, 0]
data['upVector'] = [0, 1, 0]
data['worldUpVector'] = [0, 1, 0]
data['worldUpType'] = cgp_maya_utils.constants.WorldUpType.VECTOR
# execute
node = maya.cmds.aimConstraint(drivers,
driven,
name=name or '{0}_aimConstraint'.format(driven),
maintainOffset=maintainOffset,
skip=skipAttributes,
**data)[0]
cstrObject = cls(node)
# apply attributeValues
if attributeValues:
cstrObject.setAttributeValues(attributeValues)
# return
return cstrObject
# PRIVATE COMMANDS #
def _availableAttributes(self):
"""the attributes that are listed by the ``Node.attributes`` function
:return: the available attributes
:rtype: list[str]
"""
# init
availableAttributes = super(AimConstraint, self)._availableAttributes()
# update settingAttributes
availableAttributes.extend(['worldUpMatrix',
'aimVector',
'restRotate',
'upVector',
'worldUpType',
'worldUpVector'])
# return
return availableAttributes
def _driverInputs(self):
"""get inputs
:return: the inputs of the constraint related to the drivers
:rtype: list[str]
"""
# return
return ['target[*].targetParentMatrix']
def _drivenOutputs(self):
"""get the outputs of the constraint
:return: the outputs of the constraint related to the driven
:rtype: list[str]
"""
# return
return ['constraintRotate', 'constraintRotateX', 'constraintRotateY', 'constraintRotateZ']
class OrientConstraint(Constraint):
"""node object that manipulates an ``orient`` constraint node
"""
# ATTRIBUTES #
_nodeType = 'orientConstraint'
# COMMANDS #
@classmethod
def create(cls, drivers, driven, drivenAttributes=None, maintainOffset=False,
attributeValues=None, name=None, **__):
"""create an orientConstraint
:param drivers: transforms driving the constraint
:type drivers: list[str] or list[:class:`cgp_maya_utils.scene.Transform`]
:param driven: transform driven by the constraint
:type driven: str or :class:`cgp_maya_utils.scene.Transform`
:param drivenAttributes: driven attributes controlled by the constraint - all attributes if nothing is specified
:type drivenAttributes: list[:class:`cgp_maya_utils.constants.Transform`]
:param maintainOffset: ``True`` : constraint created with offset - ``False`` : constraint created without offset
:type maintainOffset: bool
:param attributeValues: attribute values to set on the constraint
:type attributeValues: dict
:param name: name of the constraint
:type name: str
:return: the created constraint
:rtype: :class:`cgp_maya_utils.scene.OrientConstraint`
"""
# init
drivers = [str(driver) for driver in drivers]
driven = str(driven)
# get driven attributes
drivenAttributes = cls._formatDrivenAttributes(driven, drivenAttributes=drivenAttributes)
# errors
if not drivenAttributes:
raise RuntimeError('{0} can\'t be orientConstraint'.format(driven))
# get skip attributes
skipAttributes = []
for axe in ['x', 'y', 'z']:
if 'r{0}'.format(axe) not in drivenAttributes:
skipAttributes.append(axe)
# execute
node = maya.cmds.orientConstraint(drivers,
driven,
name=name or '{0}_orientConstraint'.format(driven),
maintainOffset=maintainOffset,
skip=skipAttributes)[0]
cstrObject = cls(node)
# apply attributeValues
if attributeValues:
cstrObject.setAttributeValues(attributeValues)
# return
return cstrObject
# PRIVATE COMMANDS #
def _driverInputs(self):
"""get inputs
:return: the inputs of the constraint related to the drivers
:rtype: list[str]
"""
# return
return ['target[*].targetRotate']
def _drivenOutputs(self):
"""get the outputs of the constraint
:return: the outputs of the constraint related to the driven
:rtype: list[str]
"""
# return
return ['constraintRotate', 'constraintRotateX', 'constraintRotateY', 'constraintRotateZ']
def _availableAttributes(self):
"""the attributes that are listed by the ``Node.attributes`` function
:return: the available attributes
:rtype: list[str]
"""
# init
availableAttributes = super(OrientConstraint, self)._availableAttributes()
# update settingAttributes
availableAttributes.extend(['interpType',
'restRotateX', 'restRotateY', 'restRotateZ'])
# return
return availableAttributes
class ParentConstraint(Constraint):
"""node object that manipulates an ``parent`` constraint node
"""
# ATTRIBUTES #
_nodeType = 'parentConstraint'
# COMMANDS #
@classmethod
def create(cls, drivers, driven, drivenAttributes=None, maintainOffset=False,
attributeValues=None, name=None, **__):
"""create an parentConstraint
:param drivers: transforms driving the constraint
:type drivers: list[str] or list[:class:`cgp_maya_utils.scene.Transform`]
:param driven: transform driven by the constraint
:type driven: str or :class:`cgp_maya_utils.scene.Transform`
:param drivenAttributes: driven attributes controlled by the constraint - all attributes if nothing is specified
:type drivenAttributes: list[:class:`cgp_maya_utils.constants.Transform`]
:param maintainOffset: ``True`` : constraint created with offset - ``False`` : constraint created without offset
:type maintainOffset: bool
:param attributeValues: attribute values to set on the constraint
:type attributeValues: dict
:param name: name of the constraint
:type name: str
:return: the created constraint
:rtype: :class:`cgp_may_utils.scene.ParentConstraint`
"""
# init
drivers = [str(driver) for driver in drivers]
driven = str(driven)
# get driven attributes
drivenAttributes = cls._formatDrivenAttributes(driven, drivenAttributes=drivenAttributes)
# errors
if not drivenAttributes:
raise RuntimeError('{0} can\'t be parentConstraint'.format(driven))
# get skip attributes
skipAttributes = {'t': [], 'r': []}
for key in skipAttributes:
for axe in ['x', 'y', 'z']:
if '{0}{1}'.format(key, axe) not in drivenAttributes:
skipAttributes[key].append(axe)
# execute
node = maya.cmds.parentConstraint(drivers,
driven,
name=name or '{0}_parentConstraint'.format(driven),
maintainOffset=maintainOffset,
skipTranslate=skipAttributes['t'],
skipRotate=skipAttributes['r'])[0]
cstrObject = cls(node)
# apply attributeValues
if attributeValues:
cstrObject.setAttributeValues(attributeValues)
# return
return cstrObject
# PRIVATE COMMANDS #
def _driverInputs(self):
"""get inputs
:return: the inputs of the constraint related to the drivers
:rtype: list[str]
"""
# return
return ['target[*].targetParentMatrix']
def _drivenOutputs(self):
"""get the outputs of the constraint
:return: the outputs of the constraint related to the driven
:rtype: list[str]
"""
# return
return ['constraintTranslate', 'constraintTranslateX', 'constraintTranslateY', 'constraintTranslateZ',
'constraintRotate', 'constraintRotateX', 'constraintRotateY', 'constraintRotateZ']
def _availableAttributes(self):
"""the attributes that are listed by the ``Node.attributes`` function
:return: the available attributes
:rtype: list[str]
"""
# init
availableAttributes = super(ParentConstraint, self)._availableAttributes()
# update settingAttributes
availableAttributes.extend(['interpType',
'restRotateX', 'restRotateY', 'restRotateZ',
'restTranslateX', 'restTranslateY', 'restTranslateZ'])
# return
return availableAttributes
class PointConstraint(Constraint):
"""node object that manipulates an ``point`` constraint node
"""
# ATTRIBUTES #
_nodeType = 'pointConstraint'
# COMMANDS #
@classmethod
def create(cls, drivers, driven, drivenAttributes=None, maintainOffset=False,
attributeValues=None, name=None, **__):
"""create a pointConstraint
:param drivers: transforms driving the constraint
:type drivers: list[:class:`cgp_maya_utils.scene.Transform`]
:param driven: transform driven by the constraint
:type driven: str or :class:`cgp_maya_utils.scene.Transform`
:param drivenAttributes: driven attributes controlled by the constraint - all attributes if nothing is specified
:type drivenAttributes: list[:class:`cgp_maya_utils.constants.Transform`]
:param maintainOffset: ``True`` : constraint created with offset - ``False`` : constraint created without offset
:type maintainOffset: bool
:param attributeValues: attribute values to set on the constraint
:type attributeValues: dict
:param name: name of the constraint
:type name: str
:return: the created constraint
:rtype: :class:`cgp_maya_utils.scene.PointConstraint`
"""
# init
drivers = [str(driver) for driver in drivers]
driven = str(driven)
# get driven attributes
drivenAttributes = cls._formatDrivenAttributes(driven, drivenAttributes=drivenAttributes)
# errors
if not drivenAttributes:
raise RuntimeError('{0} can\'t be aimConstraint'.format(driven))
# get skip attributes
skipAttributes = []
for axe in ['x', 'y', 'z']:
if 't{0}'.format(axe) not in drivenAttributes:
skipAttributes.append(axe)
# execute
node = maya.cmds.pointConstraint(drivers,
driven,
name=name or '{0}_parentConstraint'.format(driven),
maintainOffset=maintainOffset,
skip=skipAttributes)[0]
cstrObject = cls(node)
# apply attributeValues
if attributeValues:
cstrObject.setAttributeValues(attributeValues)
# return
return cstrObject
# PRIVATE COMMANDS #
def _driverInputs(self):
"""get inputs
:return: the inputs of the constraint related to the drivers
:rtype: list[str]
"""
# return
return ['target[*].targetTranslate']
def _drivenOutputs(self):
"""get the outputs of the constraint
:return: the outputs of the constraint related to the driven
:rtype: list[str]
"""
# return
return ['constraintTranslate', 'constraintTranslateX', 'constraintTranslateY', 'constraintTranslateZ']
def _availableAttributes(self):
"""the attributes that are listed by the ``Node.attributes`` function
:return: the available attributes
:rtype: list[str]
"""
# init
availableAttributes = super(PointConstraint, self)._availableAttributes()
# update settingAttributes
availableAttributes.extend(['constraintOffsetPolarity',
'restTranslateX',
'restTranslateY',
'restTranslateZ'])
# return
return availableAttributes
class ScaleConstraint(Constraint):
"""node object that manipulates an ``scale`` constraint node
"""
# ATTRIBUTES #
_nodeType = 'scaleConstraint'
# COMMANDS #
@classmethod
def create(cls, drivers, driven, drivenAttributes=None, maintainOffset=False,
attributeValues=None, name=None, **__):
"""create a scaleConstraint
:param drivers: transforms driving the constraint
:type drivers: list[str] or list[:class:`cgp_maya_utils.scene.Transform`]
:param driven: transform driven by the constraint
:type driven: str or :class:`cgp_maya_utils.scene.Transform`
:param drivenAttributes: driven attributes controlled by the constraint - all attributes if nothing is specified
:type drivenAttributes: list[:class:`cgp_maya_utils.constants.Transform`]
:param maintainOffset: ``True`` : constraint created with offset - ``False`` : constraint created without offset
:type maintainOffset: bool
:param attributeValues: attribute values to set on the constraint
:type attributeValues: dict
:param name: name of the constraint
:type name: str
:return: the created constraint
:rtype: :class:`cgp_maya_utils.scene.ScaleConstraint`
"""
# init
drivers = [str(driver) for driver in drivers]
driven = str(driven)
# get driven attributes
drivenAttributes = cls._formatDrivenAttributes(driven, drivenAttributes=drivenAttributes)
# errors
if not drivenAttributes:
raise RuntimeError('{0} can\'t be aimConstraint'.format(driven))
# get skip attributes
skipAttributes = []
for axe in ['x', 'y', 'z']:
if 's{0}'.format(axe) not in drivenAttributes:
skipAttributes.append(axe)
# execute
node = maya.cmds.scaleConstraint(drivers,
driven,
name=name or '{0}_scaleConstraint'.format(driven),
maintainOffset=maintainOffset,
skip=skipAttributes)[0]
constraintObject = cls(node)
# apply attributeValues
if attributeValues:
constraintObject.setAttributeValues(attributeValues)
# return
return constraintObject
# PRIVATE COMMANDS #
def _driverInputs(self):
"""get inputs
:return: the inputs of the constraint related to the drivers
:rtype: list[str]
"""
# return
return ['target[*].targetScale']
def _drivenOutputs(self):
"""get the outputs of the constraint
:return: the outputs of the constraint related to the driven
:rtype: list[str]
"""
# return
return ['constraintScale', 'constraintScaleX', 'constraintScaleY', 'constraintScaleZ']
def _availableAttributes(self):
"""the attributes that are listed by the ``Node.attributes`` function
:return: the available attributes
:rtype: list[str]
"""
# return
return ['enableRestPosition', 'lockOutput']
|
<filename>basis_set_exchange/writers/gamess_us.py
# Copyright (c) 2017-2022 The Molecular Sciences Software Institute, Virginia Tech
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''
Conversion of basis sets to GAMESS-US
'''
from .. import lut, manip, sort, printing
def write_gamess_us_electron_basis(basis, electron_elements):
# electronic part starts with $DATA
s = '$DATA\n'
for z in electron_elements:
data = basis['elements'][z]
el_name = lut.element_name_from_Z(z).upper()
s += '\n' + el_name + "\n"
for shell in data['electron_shells']:
exponents = shell['exponents']
coefficients = shell['coefficients']
ncol = len(coefficients) + 2 #include index column
nprim = len(exponents)
am = shell['angular_momentum']
amchar = lut.amint_to_char(am, hij=True, use_L=True).upper()
s += '{} {}\n'.format(amchar, nprim)
# 1-based indexing
idx_column = list(range(1, nprim + 1))
point_places = [0] + [4 + 8 * i + 15 * (i - 1) for i in range(1, ncol)]
s += printing.write_matrix([idx_column, exponents, *coefficients], point_places)
# There must be a blank line before $END
s += "\n"
s += "$END"
return s
def write_gamess_us_ecp_basis(basis, ecp_elements, ecp_block=True):
s = ""
if ecp_block:
s += "\n\n$ECP\n"
for z in ecp_elements:
data = basis['elements'][z]
sym = lut.element_sym_from_Z(z).upper()
max_ecp_am = max([x['angular_momentum'][0] for x in data['ecp_potentials']])
max_ecp_amchar = lut.amint_to_char([max_ecp_am], hij=True)
# Sort lowest->highest, then put the highest at the beginning
ecp_list = sorted(data['ecp_potentials'], key=lambda x: x['angular_momentum'])
ecp_list.insert(0, ecp_list.pop())
s += '{}-ECP GEN {} {}\n'.format(sym, data['ecp_electrons'], max_ecp_am)
for pot in ecp_list:
rexponents = pot['r_exponents']
gexponents = pot['gaussian_exponents']
coefficients = pot['coefficients']
nprim = len(rexponents)
am = pot['angular_momentum']
amchar = lut.amint_to_char(am, hij=False)
# Title line
if am[0] == max_ecp_am:
s += '{:<5} ----- {}-ul potential -----\n'.format(nprim, amchar)
else:
s += '{:<5} ----- {}-{} potential -----\n'.format(nprim, amchar, max_ecp_amchar)
point_places = [8, 23, 32]
s += printing.write_matrix([*coefficients, rexponents, gexponents], point_places)
if ecp_block:
s += "$END\n"
return s
def write_gamess_us_common(basis, ecp_func):
'''Converts the electronic basis to GAMESS-US, using a
different function for ECP
'''
s = ''
# Uncontract all but SP
basis = manip.uncontract_general(basis, True)
basis = manip.uncontract_spdf(basis, 1, False)
basis = sort.sort_basis(basis, False)
# Elements for which we have electron basis
electron_elements = [k for k, v in basis['elements'].items() if 'electron_shells' in v]
# Elements for which we have ECP
ecp_elements = [k for k, v in basis['elements'].items() if 'ecp_potentials' in v]
# Electron Basis
if electron_elements:
s += write_gamess_us_electron_basis(basis, electron_elements)
# Write out ECP
if ecp_elements:
s += ecp_func(basis, ecp_elements)
return s
def write_gamess_us(basis):
'''Converts a basis set to GAMESS-US
'''
return write_gamess_us_common(basis, write_gamess_us_ecp_basis)
|
<gh_stars>0
import numpy as np
import tempfile
import logging
import pandas as pd
import rpSBML
import libsbml
import os
##TODO: this really does not need to be an object
class rpMerge:
"""Class that hosts the different functions to merge two SBML files
"""
def __init__(self):
"""Constructor of the class
"""
self.logger = logging.getLogger(__name__)
#######################################################################
############################# PRIVATE FUNCTIONS #######################
#######################################################################
def _checklibSBML(self, value, message):
"""Private function that checks the libSBML calls.
Check that the libSBML python calls do not return error INT and if so, display the error. Taken from: http://sbml.org/Software/libSBML/docs/python-api/create_simple_model_8py-example.html
:param value: The libSBML command returned int
:param message: The string that describes the call
:type value: int
:type message: str
:raises AttributeError: If the libSBML command encounters an error or the input value is None
:return: None
:rtype: None
"""
if value is None:
self.logger.error('LibSBML returned a null value trying to ' + message + '.')
raise AttributeError
elif type(value) is int:
if value==libsbml.LIBSBML_OPERATION_SUCCESS:
return
else:
err_msg = 'Error encountered trying to ' + message + '.' \
+ 'LibSBML returned error code ' + str(value) + ': "' \
+ libsbml.OperationReturnValue_toString(value).strip() + '"'
self.logger.error(err_msg)
raise AttributeError
else:
#self.logger.debug(message)
return None
def _findUniqueRowColumn(self, pd_matrix):
"""Private function that takes the matrix of similarity scores between the reactions or species of two models and finds the unqiue matches
pd_matrix is organised such that the rows are the simulated species and the columns are the measured ones
:param pd_matrix: Matrix of reactions or species of two models
:type pd_matrix: np.array
:return: Dictionary of matches
:rtype: dict
"""
self.logger.debug(pd_matrix)
to_ret = {}
######################## filter by the global top values ################
self.logger.debug('################ Filter best #############')
#transform to np.array
x = pd_matrix.values
#resolve the rouding issues to find the max
x = np.around(x, decimals=5)
#first round involves finding the highest values and if found set to 0.0 the rows and columns (if unique)
top = np.where(x==np.max(x))
#as long as its unique keep looping
if np.count_nonzero(x)==0:
return to_ret
while len(top[0])==1 and len(top[1])==1:
if np.count_nonzero(x)==0:
return to_ret
pd_entry = pd_matrix.iloc[[top[0][0]],[top[1][0]]]
row_name = str(pd_entry.index[0])
col_name = str(pd_entry.columns[0])
if col_name in to_ret:
self.logger.debug('Overwriting (1): '+str(col_name))
self.logger.debug(x)
to_ret[col_name] = [row_name]
#delete the rows and the columns
self.logger.debug('==================')
self.logger.debug('Column: '+str(col_name))
self.logger.debug('Row: '+str(row_name))
pd_matrix.loc[:, col_name] = 0.0
pd_matrix.loc[row_name, :] = 0.0
x = pd_matrix.values
x = np.around(x, decimals=5)
top = np.where(x==np.max(x))
self.logger.debug(pd_matrix)
self.logger.debug(top)
self.logger.debug('==================')
#################### filter by columns (measured) top values ##############
self.logger.debug('################ Filter by column best ############')
x = pd_matrix.values
x = np.around(x, decimals=5)
if np.count_nonzero(x)==0:
return to_ret
reloop = True
while reloop:
if np.count_nonzero(x)==0:
return to_ret
reloop = False
for col in range(len(x[0])):
if np.count_nonzero(x[:,col])==0:
continue
top_row = np.where(x[:,col]==np.max(x[:,col]))[0]
if len(top_row)==1:
top_row = top_row[0]
#if top_row==0.0:
# continue
#check to see if any other measured pathways have the same or larger score (accross)
row = list(x[top_row, :])
#remove current score consideration
row.pop(col)
if max(row)>=x[top_row, col]:
self.logger.warning('For col '+str(col)+' there are either better or equal values: '+str(row))
self.logger.warning(x)
continue
#if you perform any changes on the rows and columns, then you can perform the loop again
reloop = True
pd_entry = pd_matrix.iloc[[top_row],[col]]
self.logger.debug('==================')
row_name = pd_entry.index[0]
col_name = pd_entry.columns[0]
self.logger.debug('Column: '+str(col_name))
self.logger.debug('Row: '+str(row_name))
if col_name in to_ret:
self.logger.debug('Overwriting (2): '+str(col_name))
self.logger.debug(pd_matrix.values)
to_ret[col_name] = [row_name]
#delete the rows and the columns
pd_matrix.loc[:, col_name] = 0.0
pd_matrix.loc[row_name, :] = 0.0
x = pd_matrix.values
x = np.around(x, decimals=5)
self.logger.debug(pd_matrix)
self.logger.debug('==================')
################## laslty if there are multiple values that are not 0.0 then account for that ######
self.logger.debug('################# get the rest ##########')
x = pd_matrix.values
x = np.around(x, decimals=5)
if np.count_nonzero(x)==0:
return to_ret
for col in range(len(x[0])):
if not np.count_nonzero(x[:,col])==0:
top_rows = np.where(x[:,col]==np.max(x[:,col]))[0]
if len(top_rows)==1:
top_row = top_rows[0]
pd_entry = pd_matrix.iloc[[top_row],[col]]
row_name = pd_entry.index[0]
col_name = pd_entry.columns[0]
if not col_name in to_ret:
to_ret[col_name] = [row_name]
else:
self.logger.warning('At this point should never have only one: '+str(x[:,col]))
self.logger.warning(x)
else:
for top_row in top_rows:
pd_entry = pd_matrix.iloc[[top_row],[col]]
row_name = pd_entry.index[0]
col_name = pd_entry.columns[0]
if not col_name in to_ret:
to_ret[col_name] = []
to_ret[col_name].append(row_name)
self.logger.debug(pd_matrix)
self.logger.debug('###################')
return to_ret
#######################################################################
###################################### INPUT FUNCTIONS ################
#######################################################################
def mergeSBMLFiles(self,
path_source,
path_target,
path_merge):
"""Public function that merges two SBML files together
:param path_source: Path of the source SBML file
:param path_target: Path of the target SBML file
:param path_merge: Path of the output SBML file
:type path_source: str
:type path_target: str
:type path_merge: str
:return: Success or failure of the function
:rtype: bool
"""
if not os.path.exists(path_source):
self.logger.error('Source SBML file is invalid: '+str(path_source))
return False
if not os.path.exists(path_target):
self.logger.error('Target SBML file is invalid: '+str(path_target))
return False
source_rpsbml = rpSBML.rpSBML('source', path=path_source)
target_rpsbml = rpSBML.rpSBML('target', path=path_target)
self.mergeModels(source_rpsbml,
target_rpsbml)
target_rpsbml.writeSBML(path_merge)
return True
##########################################################################################
#################################### REACTION ############################################
##########################################################################################
# TODO: need to remove from the list reactions simulated reactions that have matched
# TODO: Remove. This assumes that reactions can match multiple times, when in fact its impossible
def compareReactions(self, species_match, target_rpsbml, source_rpsbml):
"""Compare the reactions of two SBML files
Compare that all the measured species of a reactions are found within sim species to match with a reaction.
We assume that there cannot be two reactions that have the same species and reactants. This is maintained by SBML
:param species_match: The species match dictionary returned by compareSpecies()
:param target_rpsbml: The target rpSBMl object
:param source_rpsbml: The source rpSBML object
:type species_match: dict
:type target_rpsbml: rpSBML
:type source_rpsbml: rpSBML
:return: The dictionary of the reaction matches
:rtype: dict
"""
############## compare the reactions #######################
#construct sim reactions with species
self.logger.debug('------ Comparing reactions --------')
#match the reactants and products conversion to sim species
tmp_reaction_match = {}
source_target = {}
target_source = {}
for source_reaction in source_rpsbml.model.getListOfReactions():
source_reaction_miriam = source_rpsbml.readMIRIAMAnnotation(source_reaction.getAnnotation())
################ construct the dict transforming the species #######
source_target[source_reaction.getId()] = {}
tmp_reaction_match[source_reaction.getId()] = {}
for target_reaction in target_rpsbml.model.getListOfReactions():
if not target_reaction.getId() in target_source:
target_source[target_reaction.getId()] = {}
target_source[target_reaction.getId()][source_reaction.getId()] = {}
source_target[source_reaction.getId()][target_reaction.getId()] = {}
self.logger.debug('\t=========== '+str(target_reaction.getId())+' ==========')
self.logger.debug('\t+++++++ Species match +++++++')
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()] = {'reactants': {},
'reactants_score': 0.0,
'products': {},
'products_score': 0.0,
'species_score': 0.0,
'species_std': 0.0,
'species_reaction': None,
'ec_score': 0.0,
'ec_reaction': None,
'score': 0.0,
'found': False}
target_reaction = target_rpsbml.model.getReaction(target_reaction.getId())
sim_reactants_id = [reactant.species for reactant in target_reaction.getListOfReactants()]
sim_products_id = [product.species for product in target_reaction.getListOfProducts()]
############ species ############
self.logger.debug('\tspecies_match: '+str(species_match))
self.logger.debug('\tspecies_match: '+str(species_match.keys()))
self.logger.debug('\tsim_reactants_id: '+str(sim_reactants_id))
self.logger.debug('\tmeasured_reactants_id: '+str([i.species for i in source_reaction.getListOfReactants()]))
self.logger.debug('\tsim_products_id: '+str(sim_products_id))
self.logger.debug('\tmeasured_products_id: '+str([i.species for i in source_reaction.getListOfProducts()]))
#ensure that the match is 1:1
#1)Here we assume that a reaction cannot have twice the same species
cannotBeSpecies = []
#if there is a match then we loop again since removing it from the list of potential matches would be appropriate
keep_going = True
while keep_going:
self.logger.debug('\t\t----------------------------')
keep_going = False
for reactant in source_reaction.getListOfReactants():
self.logger.debug('\t\tReactant: '+str(reactant.species))
#if a species match has been found AND if such a match has been found
founReaIDs = [tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'][i]['id'] for i in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'] if not tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'][i]['id']==None]
self.logger.debug('\t\tfounReaIDs: '+str(founReaIDs))
if reactant.species and reactant.species in species_match and not list(species_match[reactant.species].keys())==[] and not reactant.species in founReaIDs:
best_spe = [k for k, v in sorted(species_match[reactant.species].items(), key=lambda item: item[1], reverse=True)][0]
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'][reactant.species] = {'id': best_spe, 'score': species_match[reactant.species][best_spe], 'found': True}
cannotBeSpecies.append(best_spe)
elif not reactant.species in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants']:
self.logger.warning('\t\tCould not find the following measured reactant in the matched species: '+str(reactant.species))
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'][reactant.species] = {'id': None, 'score': 0.0, 'found': False}
for product in source_reaction.getListOfProducts():
self.logger.debug('\t\tProduct: '+str(product.species))
foundProIDs = [tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products'][i]['id'] for i in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products'] if not tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products'][i]['id']==None]
self.logger.debug('\t\tfoundProIDs: '+str(foundProIDs))
if product.species and product.species in species_match and not list(species_match[product.species].keys())==[] and not product.species in foundProIDs:
best_spe = [k for k, v in sorted(species_match[product.species].items(), key=lambda item: item[1], reverse=True)][0]
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'][product.species] = {'id': best_spe, 'score': species_match[product.species][best_spe], 'found': True}
cannotBeSpecies.append(best_spe)
elif not product.species in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products']:
self.logger.warning('\t\tCould not find the following measured product in the matched species: '+str(product.species))
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products'][product.species] = {'id': None, 'score': 0.0, 'found': False}
self.logger.debug('\t\tcannotBeSpecies: '+str(cannotBeSpecies))
reactants_score = [tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'][i]['score'] for i in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants']]
reactants_found = [tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants'][i]['found'] for i in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants']]
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['reactants_score'] = np.mean(reactants_score)
products_score = [tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products'][i]['score'] for i in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products']]
products_found = [tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products'][i]['found'] for i in tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products']]
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['products_score'] = np.mean(products_score)
### calculate pathway species score
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['species_score'] = np.mean(reactants_score+products_score)
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['species_std'] = np.std(reactants_score+products_score)
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['species_reaction'] = target_reaction.getId()
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['found'] = all(reactants_found+products_found)
tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['score'] = tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['species_score']
target_source[target_reaction.getId()][source_reaction.getId()] = tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['score']
source_target[source_reaction.getId()][target_reaction.getId()] = tmp_reaction_match[source_reaction.getId()][target_reaction.getId()]['score']
### matrix compare #####
unique = self._findUniqueRowColumn(pd.DataFrame(source_target))
self.logger.debug('findUniqueRowColumn')
self.logger.debug(unique)
reaction_match = {}
for meas in source_target:
reaction_match[meas] = {'id': None, 'score': 0.0, 'found': False}
if meas in unique:
if len(unique[meas])>1:
self.logger.debug('Multiple values may match, choosing the first arbitrarily: '+str(unique))
reaction_match[meas]['id'] = unique[meas]
reaction_match[meas]['score'] = round(tmp_reaction_match[meas][unique[meas][0]]['score'], 5)
reaction_match[meas]['found'] = tmp_reaction_match[meas][unique[meas][0]]['found']
#### compile a reaction score based on the ec and species scores
self.logger.debug(tmp_reaction_match)
self.logger.debug(reaction_match)
self.logger.debug('-------------------------------')
return reaction_match
#TODO: change this with a flag so that all the reactants and products are the same
def containedReaction(self, species_source_target, source_reaction, target_reaction):
"""Compare individual reactions and see if the source reaction is contained within the target one
species_source_target: {'MNXM4__64__MNXC3': {'M_o2_c': 1.0}, 'MNXM10__64__MNXC3': {'M_nadh_c': 1.0}, 'CMPD_0000000003__64__MNXC3': {}, 'TARGET_0000000001__64__MNXC3': {}, 'MNXM188__64__MNXC3': {'M_anth_c': 1.0}, 'BC_32877__64__MNXC3': {'M_nh4_c': 0.8}, 'BC_32401__64__MNXC3': {'M_nad_c': 0.2}, 'BC_26705__64__MNXC3': {'M_h_c': 1.0}, 'BC_20662__64__MNXC3': {'M_co2_c': 1.0}}
the first keys are the source compartment ids
the second key is the source species id
the value is the target species id
Note that we assure that the match is 1:1 between species using the species match
:param species_source_target: The comparison dictionary between the species of two SBML files
:param source_reaction: The target reaction
:param target_reaction: The source reaction
:type species_source_target: dict
:type source_reaction: libsbml.Reaction
:type target_reaction: libsbml.Reaction
:return: The score of the match and the dict of the match in that order
:rtype: tuple
"""
scores = []
all_match = True
########### reactants #######
ignore_reactants = []
for source_reactant in source_reaction.getListOfReactants():
if source_reactant.species in species_source_target:
spe_found = False
for target_reactiontant in target_reaction.getListOfReactants():
if target_reactiontant.species in species_source_target[source_reactant.species] and not target_reactiontant.species in ignore_reactants:
scores.append(species_source_target[source_reactant.species][target_reactiontant.species])
ignore_reactants.append(target_reactiontant.species)
spe_found = True
break
if not spe_found:
scores.append(0.0)
all_match = False
else:
self.logger.debug('Cannot find the source species '+str(source_reactant.species)+' in the target species: '+str(species_source_target))
scores.append(0.0)
all_match = False
#products
ignore_products = []
for source_product in source_reaction.getListOfProducts():
if source_product.species in species_source_target:
pro_found = False
for sim_product in target_reaction.getListOfProducts():
if sim_product.species in species_source_target[source_product.species] and not sim_product.species in ignore_products:
scores.append(species_source_target[source_product.species][sim_product.species])
ignore_products.append(sim_product.species)
pro_found = True
break
if not pro_found:
scores.append(0.0)
all_match = False
else:
self.logger.debug('Cannot find the measured species '+str(source_product.species)+' in the the matched species: '+str(species_source_target))
scores.append(0.0)
all_match = False
return np.mean(scores), all_match
#TODO: change this with a flag so that all the reactants and products are the same
def compareReaction(self, species_source_target, source_reaction, target_reaction):
"""Compare two reactions and elect that they are the same if they have exactly the same reactants and products
species_source_target: {'MNXM4__64__MNXC3': {'M_o2_c': 1.0}, 'MNXM10__64__MNXC3': {'M_nadh_c': 1.0}, 'CMPD_0000000003__64__MNXC3': {}, 'TARGET_0000000001__64__MNXC3': {}, 'MNXM188__64__MNXC3': {'M_anth_c': 1.0}, 'BC_32877__64__MNXC3': {'M_nh4_c': 0.8}, 'BC_32401__64__MNXC3': {'M_nad_c': 0.2}, 'BC_26705__64__MNXC3': {'M_h_c': 1.0}, 'BC_20662__64__MNXC3': {'M_co2_c': 1.0}}
the first keys are the source compartment ids
the second key is the source species id
the value is the target species id
Note that we assure that the match is 1:1 between species using the species match
:param species_source_target: The comparison dictionary between the species of two SBML files
:param source_reaction: The target reaction
:param target_reaction: The source reaction
:type species_source_target: dict
:type source_reaction: libsbml.Reaction
:type target_reaction: libsbml.Reaction
:return: The score of the match and boolean if its a match or not
:rtype: tuple
"""
scores = []
source_reactants = [i.species for i in source_reaction.getListOfReactants()]
target_reactants = []
for i in target_reaction.getListOfReactants():
if i.species in species_source_target:
if not species_source_target[i.species]=={}:
#WARNING: Taking the first one arbitrarely
conv_spe = [y for y in species_source_target[i.species]][0]
target_reactants.append(conv_spe)
scores.append(species_source_target[i.species][conv_spe])
else:
target_reactants.append(i.species)
scores.append(1.0)
else:
target_reactants.append(i.species)
scores.append(1.0)
source_products = [i.species for i in source_reaction.getListOfProducts()]
target_products = []
for i in target_reaction.getListOfReactants():
if i.species in species_source_target:
if not species_source_target[i.species]=={}:
#WARNING: Taking the first one arbitrarely
conv_spe = [y for y in species_source_target[i.species]][0]
target_products.append(conv_spe)
scores.append(species_source_target[i.species][conv_spe])
else:
target_products.append(i.species)
scores.append(1.0)
else:
target_products.append(i.species)
scores.append(1.0)
'''
self.logger.debug('source_reactants: '+str(source_reactants))
self.logger.debug('target_reactants: '+str(target_reactants))
self.logger.debug('source_products: '+str(source_products))
self.logger.debug('target_products: '+str(target_products))
self.logger.debug(set(source_reactants)-set(target_reactants))
self.logger.debug(set(source_products)-set(target_products))
'''
if not set(source_reactants)-set(target_reactants) and not set(source_products)-set(target_products):
return np.mean(scores), True
else:
return np.mean(scores), False
##########################################################################################
##################################### SPECIES ############################################
##########################################################################################
# TODO: for all the measured species compare with the simualted one. Then find the measured and simulated species that match the best and exclude the
# simulated species from potentially matching with another
def compareSpecies(self, comp_source_target, source_rpsbml, target_rpsbml):
"""Match all the measured chemical species to the simulated chemical species between two SBML
:param comp_source_target: The comparison dictionary between the compartment of two SBML files
:param source_rpsbml: The source rpSBML
:param target_rpsbml: The target rpSBML
:type species_source_target: dict
:type source_rpsbml: rpSBML
:type target_rpsbml: rpSBML
:return: The compartment match dictionary
:rtype: dict
"""
############## compare species ###################
source_target = {}
target_source = {}
species_match = {}
for source_species in source_rpsbml.model.getListOfSpecies():
self.logger.debug('--- Trying to match chemical species: '+str(source_species.getId())+' ---')
source_target[source_species.getId()] = {}
species_match[source_species.getId()] = {}
#species_match[source_species.getId()] = {'id': None, 'score': 0.0, 'found': False}
#TODO: need to exclude from the match if a simulated chemical species is already matched with a higher score to another measured species
for target_species in target_rpsbml.model.getListOfSpecies():
#skip the species that are not in the same compartment as the source
if not target_species.getCompartment()==comp_source_target[source_species.getCompartment()]:
continue
source_target[source_species.getId()][target_species.getId()] = {'score': 0.0, 'found': False}
if not target_species.getId() in target_source:
target_source[target_species.getId()] = {}
target_source[target_species.getId()][source_species.getId()] = {'score': 0.0, 'found': False}
source_brsynth_annot = target_rpsbml.readBRSYNTHAnnotation(source_species.getAnnotation())
target_brsynth_annot = target_rpsbml.readBRSYNTHAnnotation(target_species.getAnnotation())
source_miriam_annot = target_rpsbml.readMIRIAMAnnotation(source_species.getAnnotation())
target_miriam_annot = target_rpsbml.readMIRIAMAnnotation(target_species.getAnnotation())
#### MIRIAM ####
if target_rpsbml.compareMIRIAMAnnotations(source_species.getAnnotation(), target_species.getAnnotation()):
self.logger.debug('--> Matched MIRIAM: '+str(target_species.getId()))
source_target[source_species.getId()][target_species.getId()]['score'] += 0.4
#source_target[source_species.getId()][target_species.getId()]['score'] += 0.2+0.2*jaccardMIRIAM(target_miriam_annot, source_miriam_annot)
source_target[source_species.getId()][target_species.getId()]['found'] = True
##### InChIKey ##########
#find according to the inchikey -- allow partial matches
#compare either inchikey in brsynth annnotation or MIRIAM annotation
#NOTE: here we prioritise the BRSynth annotation inchikey over the MIRIAM
source_inchikey_split = None
target_inchikey_split = None
if 'inchikey' in source_brsynth_annot:
source_inchikey_split = source_brsynth_annot['inchikey'].split('-')
elif 'inchikey' in source_miriam_annot:
if not len(source_miriam_annot['inchikey'])==1:
#TODO: handle mutliple inchikey with mutliple compare and the highest comparison value kept
self.logger.warning('There are multiple inchikey values, taking the first one: '+str(source_miriam_annot['inchikey']))
source_inchikey_split = source_miriam_annot['inchikey'][0].split('-')
if 'inchikey' in target_brsynth_annot:
target_inchikey_split = target_brsynth_annot['inchikey'].split('-')
elif 'inchikey' in target_miriam_annot:
if not len(target_miriam_annot['inchikey'])==1:
#TODO: handle mutliple inchikey with mutliple compare and the highest comparison value kept
self.logger.warning('There are multiple inchikey values, taking the first one: '+str(target_brsynth_annot['inchikey']))
target_inchikey_split = target_miriam_annot['inchikey'][0].split('-')
if source_inchikey_split and target_inchikey_split:
if source_inchikey_split[0]==target_inchikey_split[0]:
self.logger.debug('Matched first layer InChIkey: ('+str(source_inchikey_split)+' -- '+str(target_inchikey_split)+')')
source_target[source_species.getId()][target_species.getId()]['score'] += 0.2
if source_inchikey_split[1]==target_inchikey_split[1]:
self.logger.debug('Matched second layer InChIkey: ('+str(source_inchikey_split)+' -- '+str(target_inchikey_split)+')')
source_target[source_species.getId()][target_species.getId()]['score'] += 0.2
source_target[source_species.getId()][target_species.getId()]['found'] = True
if source_inchikey_split[2]==target_inchikey_split[2]:
self.logger.debug('Matched third layer InChIkey: ('+str(source_inchikey_split)+' -- '+str(target_inchikey_split)+')')
source_target[source_species.getId()][target_species.getId()]['score'] += 0.2
source_target[source_species.getId()][target_species.getId()]['found'] = True
target_source[target_species.getId()][source_species.getId()]['score'] = source_target[source_species.getId()][target_species.getId()]['score']
target_source[target_species.getId()][source_species.getId()]['found'] = source_target[source_species.getId()][target_species.getId()]['found']
#build the matrix to send
source_target_mat = {}
for i in source_target:
source_target_mat[i] = {}
for y in source_target[i]:
source_target_mat[i][y] = source_target[i][y]['score']
unique = self._findUniqueRowColumn(pd.DataFrame(source_target_mat))
self.logger.debug('findUniqueRowColumn:')
self.logger.debug(unique)
for meas in source_target:
if meas in unique:
species_match[meas] = {}
for unique_spe in unique[meas]:
species_match[meas][unique_spe] = round(source_target[meas][unique[meas][0]]['score'], 5)
else:
self.logger.warning('Cannot find a species match for the measured species: '+str(meas))
self.logger.debug('#########################')
self.logger.debug('species_match:')
self.logger.debug(species_match)
self.logger.debug('-----------------------')
return species_match
######################################################################################################################
############################################### EC NUMBER ############################################################
######################################################################################################################
def compareEC(meas_reac_miriam, sim_reac_miriam):
"""Compare two MIRIAM annotations and find the similarity of their EC number
:param meas_reac_miriam: The annotation object of the source
:param sim_reac_miriam: The annotation object of the target
:type meas_reac_miriam: libsbml.XMLNode
:type sim_reac_miriam: libsbml.XMLNode
:return: The match score
:rtype: float
"""
#Warning we only match a single reaction at a time -- assume that there cannot be more than one to match at a given time
if 'ec-code' in meas_reac_miriam and 'ec-code' in sim_reac_miriam:
measured_frac_ec = [[y for y in ec.split('.') if not y=='-'] for ec in meas_reac_miriam['ec-code']]
sim_frac_ec = [[y for y in ec.split('.') if not y=='-'] for ec in sim_reac_miriam['ec-code']]
#complete the ec numbers with None to be length of 4
for i in range(len(measured_frac_ec)):
for y in range(len(measured_frac_ec[i]), 4):
measured_frac_ec[i].append(None)
for i in range(len(sim_frac_ec)):
for y in range(len(sim_frac_ec[i]), 4):
sim_frac_ec[i].append(None)
self.logger.debug('Measured: ')
self.logger.debug(measured_frac_ec)
self.logger.debug('Simulated: ')
self.logger.debug(sim_frac_ec)
best_ec_compare = {'meas_ec': [], 'sim_ec': [], 'score': 0.0, 'found': False}
for ec_m in measured_frac_ec:
for ec_s in sim_frac_ec:
tmp_score = 0.0
for i in range(4):
if not ec_m[i]==None and not ec_s[i]==None:
if ec_m[i]==ec_s[i]:
tmp_score += 0.25
if i==2:
best_ec_compare['found'] = True
else:
break
if tmp_score>best_ec_compare['score']:
best_ec_compare['meas_ec'] = ec_m
best_ec_compare['sim_ec'] = ec_s
best_ec_compare['score'] = tmp_score
return best_ec_compare['score']
else:
self.logger.warning('One of the two reactions does not have any EC entries.\nMeasured: '+str(meas_reac_miriam)+' \nSimulated: '+str(sim_reac_miriam))
return 0.0
#############################################################################################################
############################################ MERGE ##########################################################
#############################################################################################################
#TODO: add a confidence in the merge using the score in
#TODO: seperate the different parts so that others may use it
def mergeModels(self,
source_rpsbml,
target_rpsbml):
"""Merge two models species and reactions using the annotations to recognise the same species and reactions
The source model has to have both the GROUPS and FBC packages enabled in its SBML. The course must have a groups
called rp_pathway. If not use the readSBML() function to create a model
We add the reactions and species from the rpsbml to the target_model
:param source_rpsbml: The source rpSBML object
:param target_rpsbml: The target rpSBML object
:type source_rpsbml: rpSBML
:type target_rpsbml: rpSBML
:return: Tuple of dict where the first entry is the species source to target conversion and the second is the reaction source to target conversion
:rtype: tuple
"""
#target_rpsbml.model = target_document.getModel()
#Find the ID's of the similar target_rpsbml.model species
################ MODEL FBC ########################
if not target_rpsbml.model.isPackageEnabled('fbc'):
self._checklibSBML(target_rpsbml.model.enablePackage(
'http://www.sbml.org/sbml/level3/version1/fbc/version2',
'fbc',
True),
'Enabling the FBC package')
if not source_rpsbml.model.isPackageEnabled('fbc'):
self._checklibSBML(source_rpsbml.model.enablePackage(
'http://www.sbml.org/sbml/level3/version1/fbc/version2',
'fbc',
True),
'Enabling the FBC package')
target_fbc = target_rpsbml.model.getPlugin('fbc')
source_fbc = source_rpsbml.model.getPlugin('fbc')
#note sure why one needs to set this as False
self._checklibSBML(source_rpsbml.document.setPackageRequired('fbc', False), 'enabling FBC package')
################ UNITDEFINITIONS ######
#return the list of unit definitions id's for the target to avoid overwritting
#WARNING: this means that the original unit definitions will be prefered over the new one
target_unitDefID = [i.getId() for i in target_rpsbml.model.getListOfUnitDefinitions()]
for source_unitDef in source_rpsbml.model.getListOfUnitDefinitions():
if not source_unitDef.getId() in target_unitDefID: #have to compare by ID since no annotation
#create a new unitDef in the target
target_unitDef = target_rpsbml.model.createUnitDefinition()
self._checklibSBML(target_unitDef, 'fetching target unit definition')
#copy unitDef info to the target
self._checklibSBML(target_unitDef.setId(source_unitDef.getId()),
'setting target unit definition ID')
self._checklibSBML(target_unitDef.setAnnotation(source_unitDef.getAnnotation()),
'setting target unit definition Annotation')
for source_unit in source_unitDef.getListOfUnits():
#copy unit info to the target unitDef
target_unit = target_unitDef.createUnit()
self._checklibSBML(target_unit, 'creating target unit')
self._checklibSBML(target_unit.setKind(source_unit.getKind()),
'setting target unit kind')
self._checklibSBML(target_unit.setExponent(source_unit.getExponent()),
'setting target unit exponent')
self._checklibSBML(target_unit.setScale(source_unit.getScale()),
'setting target unit scale')
self._checklibSBML(target_unit.setMultiplier(source_unit.getMultiplier()),
'setting target unit multiplier')
target_unitDefID.append(source_unitDef.getId()) #add to the list to make sure its not added twice
################ COMPARTMENTS ###############
# Compare by MIRIAM annotations
#Note that key is source and value is target conversion
comp_source_target = {}
for source_compartment in source_rpsbml.model.getListOfCompartments():
found = False
target_ids = [i.getId() for i in target_rpsbml.model.getListOfCompartments()]
source_annotation = source_compartment.getAnnotation()
if not source_annotation:
self.logger.warning('No annotation for the source of compartment '+str(source_compartment.getId()))
continue
#compare by MIRIAM first
for target_compartment in target_rpsbml.model.getListOfCompartments():
target_annotation = target_compartment.getAnnotation()
if not target_annotation:
self.logger.warning('No annotation for the target of compartment: '+str(target_compartment.getId()))
continue
if source_rpsbml.compareMIRIAMAnnotations(source_annotation, target_annotation):
found = True
comp_source_target[source_compartment.getId()] = target_compartment.getId()
break
if not found:
#if the id is not found, see if the ids already exists
if source_compartment.getId() in target_ids:
comp_source_target[source_compartment.getId()] = source_compartment.getId()
found = True
#if there is not MIRIAM match and the id's differ then add it
else:
target_compartment = target_rpsbml.model.createCompartment()
self._checklibSBML(target_compartment, 'Creating target compartment')
self._checklibSBML(target_compartment.setMetaId(source_compartment.getMetaId()),
'setting target metaId')
#make sure that the ID is different
if source_compartment.getId()==target_compartment.getId():
self._checklibSBML(target_compartment.setId(source_compartment.getId()+'_sourceModel'),
'setting target id')
else:
self._checklibSBML(target_compartment.setId(source_compartment.getId()),
'setting target id')
self._checklibSBML(target_compartment.setName(source_compartment.getName()),
'setting target name')
self._checklibSBML(target_compartment.setConstant(source_compartment.getConstant()),
'setting target constant')
self._checklibSBML(target_compartment.setAnnotation(source_compartment.getAnnotation()),
'setting target annotation')
self._checklibSBML(target_compartment.setSBOTerm(source_compartment.getSBOTerm()),
'setting target annotation')
comp_source_target[target_compartment.getId()] = target_compartment.getId()
self.logger.debug('comp_source_target: '+str(comp_source_target))
################ PARAMETERS ###########
#WARNING: here we compare by ID
targetParametersID = [i.getId() for i in target_rpsbml.model.getListOfParameters()]
for source_parameter in source_rpsbml.model.getListOfParameters():
if not source_parameter.getId() in targetParametersID:
target_parameter = target_rpsbml.model.createParameter()
self._checklibSBML(target_parameter, 'creating target parameter')
self._checklibSBML(target_parameter.setId(source_parameter.getId()), 'setting target parameter ID')
self._checklibSBML(target_parameter.setSBOTerm(source_parameter.getSBOTerm()),
'setting target parameter SBO')
self._checklibSBML(target_parameter.setUnits(source_parameter.getUnits()),
'setting target parameter Units')
self._checklibSBML(target_parameter.setValue(source_parameter.getValue()),
'setting target parameter Value')
self._checklibSBML(target_parameter.setConstant(source_parameter.getConstant()),
'setting target parameter ID')
################ FBC GENE PRODUCTS ########################
#WARNING: here we compare by ID
targetGenProductID = [i.getId() for i in target_fbc.getListOfGeneProducts()]
for source_geneProduct in source_fbc.getListOfGeneProducts():
if not source_geneProduct.getId() in targetGenProductID:
target_geneProduct = target_fbc.createGeneProduct()
self._checklibSBML(target_geneProduct, 'creating target gene product')
self._checklibSBML(target_geneProduct.setId(source_geneProduct.getId()),
'setting target gene product id')
self._checklibSBML(target_geneProduct.setLabel(source_geneProduct.getLabel()),
'setting target gene product label')
self._checklibSBML(target_geneProduct.setName(source_geneProduct.getName()),
'setting target gene product name')
self._checklibSBML(target_geneProduct.setMetaId(source_geneProduct.getMetaId()),
'setting target gene product meta_id')
############### FBC OBJECTIVES ############
#WARNING: here we compare by ID
#TODO: if overlapping id's need to replace the id with modified, as for the species
targetObjectiveID = [i.getId() for i in target_fbc.getListOfObjectives()]
sourceObjectiveID = [i.getId() for i in source_fbc.getListOfObjectives()]
for source_objective in source_fbc.getListOfObjectives():
if not source_objective.getId() in targetObjectiveID:
target_objective = target_fbc.createObjective()
self._checklibSBML(target_objective, 'creating target objective')
self._checklibSBML(target_objective.setId(source_objective.getId()), 'setting target objective')
self._checklibSBML(target_objective.setName(source_objective.getName()), 'setting target objective')
self._checklibSBML(target_objective.setType(source_objective.getType()),
'setting target objective type')
for source_fluxObjective in source_objective.getListOfFluxObjectives():
target_fluxObjective = target_objective.createFluxObjective()
self._checklibSBML(target_fluxObjective, 'creating target flux objective')
self._checklibSBML(target_fluxObjective.setName(source_fluxObjective.getName()),
'setting target flux objective name')
self._checklibSBML(target_fluxObjective.setCoefficient(source_fluxObjective.getCoefficient()),
'setting target flux objective coefficient')
self._checklibSBML(target_fluxObjective.setReaction(source_fluxObjective.getReaction()),
'setting target flux objective reaction')
self._checklibSBML(target_fluxObjective.setAnnotation(source_fluxObjective.getAnnotation()),
'setting target flux obj annotation from source flux obj')
self._checklibSBML(target_objective.setAnnotation(source_objective.getAnnotation()),
'setting target obj annotation from source obj')
self.logger.debug('targetObjectiveID: '+str(targetObjectiveID))
self.logger.debug('sourceObjectiveID: '+str(sourceObjectiveID))
################ SPECIES ####################
species_source_target = self.compareSpecies(comp_source_target, source_rpsbml, target_rpsbml)
self.logger.debug('species_source_target: '+str(species_source_target))
target_species_ids = [i.id for i in target_rpsbml.model.getListOfSpecies()]
for source_species in species_source_target:
list_target = [i for i in species_source_target[source_species]]
if source_species in list_target:
self.logger.warning('The source ('+str(source_species)+') and target species ids ('+str(list_target)+') are the same')
#if match, replace the annotation from the source to the target
if not species_source_target[source_species]=={}:
list_species = [i for i in species_source_target[source_species]]
self.logger.debug('list_species: '+str(list_species))
if len(list_species)==0:
continue
#self.logger.warning('Source species '+str(member.getIdRef())+' has been created in the target model')
elif len(list_species)>1:
self.logger.warning('There are multiple matches to the species '+str(member.getIdRef())+'... taking the first one: '+str(list_species))
#TODO: loop throught the annotations and replace the non-overlapping information
target_member = target_rpsbml.model.getSpecies(list_species[0])
source_member = source_rpsbml.model.getSpecies(source_species)
self._checklibSBML(target_member, 'Retraiving the target species: '+str(list_species[0]))
self._checklibSBML(source_member, 'Retreiving the source species: '+str(source_species))
self._checklibSBML(target_member.setAnnotation(source_member.getAnnotation()), 'Replacing the annotations')
#if no match then add it to the target model
else:
self.logger.debug('Creating source species '+str(source_species)+' in target rpsbml')
source_species = source_rpsbml.model.getSpecies(source_species)
if not source_species:
self.logger.error('Cannot retreive model species: '+str(source_species))
else:
self._checklibSBML(source_species, 'fetching source species')
targetModel_species = target_rpsbml.model.createSpecies()
self._checklibSBML(targetModel_species, 'creating species')
self._checklibSBML(targetModel_species.setMetaId(source_species.getMetaId()),
'setting target metaId')
## need to check if the id of the source species does not already exist in the target model
if source_species.getId() in target_species_ids:
target_species_id = source_rpsbml.model.id+'__'+str(source_species.getId())
if not source_species.getId() in species_source_target:
species_source_target[source_species.getId()] = {}
species_source_target[source_species.getId()][source_rpsbml.model.id+'__'+str(source_species.getId())] = 1.0
else:
target_species_id = source_species.getId()
self._checklibSBML(targetModel_species.setId(target_species_id),
'setting target id')
self._checklibSBML(targetModel_species.setCompartment(comp_source_target[source_species.getCompartment()]),
'setting target compartment')
self._checklibSBML(targetModel_species.setInitialConcentration(
source_species.getInitialConcentration()),
'setting target initial concentration')
self._checklibSBML(targetModel_species.setBoundaryCondition(
source_species.getBoundaryCondition()),
'setting target boundary concentration')
self._checklibSBML(targetModel_species.setHasOnlySubstanceUnits(
source_species.getHasOnlySubstanceUnits()),
'setting target has only substance units')
self._checklibSBML(targetModel_species.setBoundaryCondition(
source_species.getBoundaryCondition()),
'setting target boundary condition')
self._checklibSBML(targetModel_species.setConstant(source_species.getConstant()),
'setting target constant')
self._checklibSBML(targetModel_species.setAnnotation(source_species.getAnnotation()),
'setting target annotation')
################ REACTIONS ###################
#TODO; consider the case where two reactions have the same ID's but are not the same reactions
#TODO: if overlapping id's need to replace the id with modified, as for the species
reactions_source_target = {}
for source_reaction in source_rpsbml.model.getListOfReactions():
is_found = False
for target_reaction in target_rpsbml.model.getListOfReactions():
score, match = self.compareReaction(species_source_target, source_reaction, target_reaction)
if match:
self.logger.debug('Source reaction '+str(source_reaction)+' matches with target reaction '+str(target_reaction))
#source_reaction[source_reaction.getId()] = target_reaction.getId()
reactions_source_target[source_reaction.getId()] = target_reaction.getId()
is_found = True
break
if not is_found:
self.logger.debug('Cannot find source reaction: '+str(source_reaction.getId()))
self._checklibSBML(source_reaction, 'fetching source reaction')
target_reaction = target_rpsbml.model.createReaction()
self._checklibSBML(target_reaction, 'create reaction')
target_fbc = target_reaction.getPlugin('fbc')
self._checklibSBML(target_fbc, 'fetching target FBC package')
source_fbc = source_reaction.getPlugin('fbc')
self._checklibSBML(source_fbc, 'fetching source FBC package')
source_upperFluxBound = source_fbc.getUpperFluxBound()
self._checklibSBML(source_upperFluxBound, 'fetching upper flux bound')
self._checklibSBML(target_fbc.setUpperFluxBound(source_upperFluxBound),
'setting upper flux bound')
source_lowerFluxBound = source_fbc.getLowerFluxBound()
self._checklibSBML(source_lowerFluxBound, 'fetching lower flux bound')
self._checklibSBML(target_fbc.setLowerFluxBound(source_lowerFluxBound),
'setting lower flux bound')
self._checklibSBML(target_reaction.setId(source_reaction.getId()), 'set reaction id')
self._checklibSBML(target_reaction.setName(source_reaction.getName()), 'set name')
self._checklibSBML(target_reaction.setSBOTerm(source_reaction.getSBOTerm()),
'setting the reaction system biology ontology (SBO)') #set as process
#TODO: consider having the two parameters as input to the function
self._checklibSBML(target_reaction.setReversible(source_reaction.getReversible()),
'set reaction reversibility flag')
self._checklibSBML(target_reaction.setFast(source_reaction.getFast()),
'set reaction "fast" attribute')
self._checklibSBML(target_reaction.setMetaId(source_reaction.getMetaId()), 'setting species meta_id')
self._checklibSBML(target_reaction.setAnnotation(source_reaction.getAnnotation()),
'setting annotation for source reaction')
#Reactants
self.logger.debug('Setting reactants')
for source_reaction_reactantID in [i.species for i in source_reaction.getListOfReactants()]:
self.logger.debug('\tAdding '+str(source_reaction_reactantID))
target_reactant = target_reaction.createReactant()
self._checklibSBML(target_reactant, 'create target reactant')
if source_reaction_reactantID in species_source_target:
if not species_source_target[source_reaction_reactantID]=={}:
if len(species_source_target[source_reaction_reactantID])>1:
self.logger.warning('Multiple matches for '+str(source_reaction_reactantID)+': '+str(species_source_target[source_reaction_reactantID]))
self.logger.warning('Taking one the first one arbitrarely: '+str([i for i in species_source_target[source_reaction_reactantID]][0]))
#WARNING: taking the first one arbitrarely
self._checklibSBML(target_reactant.setSpecies(
[i for i in species_source_target[source_reaction_reactantID]][0]), 'assign reactant species')
else:
self._checklibSBML(target_reactant.setSpecies(source_reaction_reactantID),
'assign reactant species')
else:
self._checklibSBML(target_reactant.setSpecies(source_reaction_reactantID),
'assign reactant species')
source_reactant = source_reaction.getReactant(source_reaction_reactantID)
self._checklibSBML(source_reactant, 'fetch source reactant')
self._checklibSBML(target_reactant.setConstant(source_reactant.getConstant()),
'set "constant" on species '+str(source_reactant.getConstant()))
self._checklibSBML(target_reactant.setStoichiometry(source_reactant.getStoichiometry()),
'set stoichiometry ('+str(source_reactant.getStoichiometry)+')')
#Products
self.logger.debug('Setting products')
for source_reaction_productID in [i.species for i in source_reaction.getListOfProducts()]:
self.logger.debug('\tAdding '+str(source_reaction_productID))
target_product = target_reaction.createProduct()
self._checklibSBML(target_product, 'create target reactant')
if source_reaction_productID in species_source_target:
if not species_source_target[source_reaction_productID]=={}:
if len(species_source_target[source_reaction_reactantID])>1:
self.logger.warning('Multiple matches for '+str(source_reaction_productID)+': '+str(species_source_target[source_reaction_productID]))
self.logger.warning('Taking one arbitrarely')
#WARNING: taking the first one arbitrarely
self._checklibSBML(target_product.setSpecies(
[i for i in species_source_target[source_reaction_productID]][0]), 'assign reactant product')
else:
self._checklibSBML(target_product.setSpecies(source_reaction_productID),
'assign reactant product')
else:
self._checklibSBML(target_product.setSpecies(source_reaction_productID),
'assign reactant product')
source_product = source_reaction.getProduct(source_reaction_productID)
self._checklibSBML(source_product, 'fetch source reactant')
self._checklibSBML(target_product.setConstant(source_product.getConstant()),
'set "constant" on product '+str(source_product.getConstant()))
self._checklibSBML(target_product.setStoichiometry(source_product.getStoichiometry()),
'set stoichiometry ('+str(source_product.getStoichiometry)+')')
#### GROUPS #####
#TODO loop through the groups to add them
if not target_rpsbml.model.isPackageEnabled('groups'):
self._checklibSBML(target_rpsbml.model.enablePackage(
'http://www.sbml.org/sbml/level3/version1/groups/version1',
'groups',
True),
'Enabling the GROUPS package')
#!!!! must be set to false for no apparent reason
self._checklibSBML(source_rpsbml.document.setPackageRequired('groups', False), 'enabling groups package')
source_groups = source_rpsbml.model.getPlugin('groups')
self._checklibSBML(source_groups, 'fetching the source model groups')
target_groups = target_rpsbml.model.getPlugin('groups')
self._checklibSBML(target_groups, 'fetching the target model groups')
#self.logger.debug('species_source_target: '+str(species_source_target))
#self.logger.debug('reactions_source_target: '+str(reactions_source_target))
source_groups_ids = [i.id for i in source_groups.getListOfGroups()]
target_groups_ids = [i.id for i in target_groups.getListOfGroups()]
#NOTE: only need to update the source species since these are the ones that are replaced with their equivalent
for source_group in source_groups.getListOfGroups():
#overwrite in the group the reaction members that have been replaced
for member in source_group.getListOfMembers():
if member.getIdRef() in reactions_source_target:
if reactions_source_target[member.getIdRef()]:
member.setIdRef(reactions_source_target[member.getIdRef()])
#overwrite in the group the species members that have been replaced
for member in source_group.getListOfMembers():
if member.getIdRef() in species_source_target:
if species_source_target[member.getIdRef()]:
list_species = [i for i in species_source_target[member.getIdRef()]]
self.logger.debug('species_source_target: '+str(species_source_target))
self.logger.debug('list_species: '+str(list_species))
if len(list_species)==0:
continue
#self.logger.warning('Source species '+str(member.getIdRef())+' has been created in the target model')
elif len(list_species)>1:
self.logger.warning('There are multiple matches to the species '+str(member.getIdRef())+'... taking the first one: '+str(list_species))
self._checklibSBML(member.setIdRef(list_species[0]), 'Setting name to the groups member')
#create and add the groups if a source group does not exist in the target
if not source_group.id in target_groups_ids:
self._checklibSBML(target_groups.addGroup(source_group),
'copy the source groups to the target groups')
#if the group already exists in the target then need to add new members
else:
target_group = target_groups.getGroup(source_group.id)
target_group_ids = [i.getIdRef() for i in target_group.getListOfMembers()]
for member in source_group.getListOfMembers():
if member.getIdRef() not in target_group_ids:
new_member = target_group.createMember()
self._checklibSBML(new_member, 'Creating a new groups member')
self._checklibSBML(new_member.setIdRef(member.getIdRef()), 'Setting name to the groups member')
"""
for group in source_groups.getListOfGroups():
#for all the species that need to be converted, replace the ones that are
#if the group is the species group, replace the ones detected from species_source_target
if group.getId()==species_group_id or group.getId()==sink_species_group_id:
for member in group.getListOfMembers():
if member.getIdRef() in species_source_target:
list_species = [i for i in species_source_target[member.getIdRef()]]
self.logger.debug('species_source_target: '+str(species_source_target))
self.logger.debug('list_species: '+str(list_species))
if len(list_species)==0:
self.logger.warning('Source species '+str(member.getIdRef())+' has been created in the target model')
elif len(list_species)>1:
self.logger.warning('There are multiple matches to the species '+str(member.getIdRef())+'... taking the first one: '+str(list_species))
#WARNING: taking the first one arbitrarely
member.setIdRef(list_species[0])
else:
member.setIdRef(list_species[0])
elif group.getId()==pathway_id:
for member in group.getListOfMembers():
if member.getIdRef() in reactions_source_target:
member.setIdRef(reactions_source_target[member.getIdRef()])
self._checklibSBML(target_groups.addGroup(group),
'copy the source groups to the target groups')
"""
###### TITLES #####
target_rpsbml.model.setId(target_rpsbml.model.getId()+'__'+source_rpsbml.model.getId())
target_rpsbml.model.setName(target_rpsbml.model.getName()+' merged with '+source_rpsbml.model.getId())
'''
if fillOrphanSpecies==True:
self.fillOrphan(target_rpsbml, self.pathway_id, compartment_id)
'''
return species_source_target, reactions_source_target
|
<reponame>Rijul24/Codechef-Codes<gh_stars>0
#this is map int
n= int(input())
strt={'test':set()}
int_pts = {}
while n!=0:
n-=1
intpt_1 , intpt_2 , strt_name , direct = list(map(str , input().split()))
intpt_1 = int(intpt_1)
intpt_2 = int(intpt_2)
#-----------------------------------------------------------------------------------------------
if direct == 'E' or direct == 'W':
if strt_name not in strt.keys():
if direct == 'E':
strt[strt_name] = {intpt_1 , intpt_2}
else:
strt[strt_name] = {intpt_2 , intpt_1}
else:
if intpt_1 in strt[strt_name] or intpt_2 in strt[strt_name]:
if (intpt_1 in strt[strt_name] and direct =='W') or (intpt_2 in strt[strt_name] and direct == 'E'):
# 4 , 1 , E or 1,4,W
temp = list(strt[strt_name])
if intpt_1 in strt[strt_name]:
#add input 2 before input 1
idx = temp.index(intpt_1)
temp.insert(idx , intpt_2)
if intpt_2 in strt[strt_name]:
#add input 1 before input 2
idx = temp.index(intpt_2)
temp.inser6(idx , intpt_1)
strt[strt_name] = set(temp)
else:
#1,4,E or 4,1, W
#directy add
strt[strt_name].add(intpt_1)
strt[strt_name].add(intpt_2)
if direct == 'N' or direct == 'S':
if strt_name not in strt.keys():
if direct == 'S':
strt[strt_name] = {intpt_1 , intpt_2}
else:
strt[strt_name] = {intpt_2 , intpt_1}
else:
if intpt_1 in strt[strt_name] or intpt_2 in strt[strt_name]:
if (intpt_1 in strt[strt_name] and direct =='N') or (intpt_2 in strt[strt_name] and direct == 'S'):
# 4 , 1 , E or 1,4,W
temp = list(strt[strt_name])
if intpt_1 in strt[strt_name]:
#add input 2 before input 1
idx = temp.index(intpt_1)
temp.insert(idx , intpt_2)
if intpt_2 in strt[strt_name]:
#add input 1 before input 2
idx = temp.index(intpt_2)
temp.inser6(idx , intpt_1)
strt[strt_name] = set(temp)
else:
#1,4,E or 4,1, W
#directy add
strt[strt_name].add(intpt_1)
strt[strt_name].add(intpt_2)
if intpt_1 not in int_pts.keys():
int_pts[intpt_1]= {strt_name}
else:
int_pts[intpt_1].add(strt_name)
if intpt_2 not in int_pts.keys():
int_pts[intpt_2] = {strt_name}
else:
int_pts[intpt_2].add(strt_name)
print(strt)
print()
print(int_pts)
#now we see how many good intersections
count =0
for intersection in int_pts.keys():
str_passing = int_pts[intersection]
if len(str_passing) == 1 or len(str_passing) >=3:
continue;
else:
#number of streets passign are two
#now check it should pass THROUGH
str_passing = list(str_passing)
s1 = str_passing[0]
s2 = str_passing[1]
int_for_street1 = list(strt[s1])
int_for_street2 = list(strt[s2])
if int_for_street1[0] != intersection and int_for_street1[-1] != intersection and int_for_street2[0] != intersection and int_for_street2[-1] != intersection:
count +=1
print(count) |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf8 -*-
import json
import codecs
JSON_EXT = '.json'
ENCODE_METHOD = 'utf-8'
class NeuromationWriter:
def __init__(self, foldername, filename):
self.foldername = foldername
self.filename = filename
self.boxlist = []
self.verified = False
def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult):
bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, 'name': name, 'difficult': difficult}
self.boxlist.append(bndbox)
def save(self, label_to_id, targetFile=None):
boxes = {}
for box in self.boxlist:
box_id = label_to_id[box['name']]
if box_id not in boxes:
boxes[box_id] = []
box_info = {'boundingBox': {'X': box['xmin'],
'Y': box['ymin'],
'Width': box['xmax'] - box['xmin'],
'Height': box['ymax'] - box['ymin']}}
boxes[box_id].append(box_info)
boxes = [{'id': id, 'data': data} for id, data in boxes.items()]
output_path = self.filename + JSON_EXT if targetFile is None else targetFile
with codecs.open(output_path, 'w', encoding=ENCODE_METHOD) as output:
json.dump(boxes, output)
class NeuromationReader:
def __init__(self, filepath, image, label_to_id, id_to_label):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.verified = True
img_size = [image.height(), image.width(), 1 if image.isGrayscale() else 3]
with codecs.open(filepath, 'r', encoding=ENCODE_METHOD) as input:
data = json.load(input)
for item in data:
if 'class' in item:
id = item['class']
if id not in id_to_label:
label_to_id[id] = id
id_to_label[id] = id
label = id_to_label[id]
x_min = int(item['boxes']['x_min'] * img_size[1])
y_min = int(item['boxes']['y_min'] * img_size[0])
x_max = int(item['boxes']['x_max'] * img_size[1])
y_max = int(item['boxes']['y_max'] * img_size[0])
self.addShape(label, x_min, y_min, x_max, y_max, False)
elif 'id' in item:
id = item['id']
if id not in id_to_label:
label_to_id[id] = id
id_to_label[id] = id
label = id_to_label[id]
for box in item['data']:
x_min = box['boundingBox']['X']
y_min = box['boundingBox']['Y']
x_max = x_min + box['boundingBox']['Width']
y_max = y_min + box['boundingBox']['Height']
self.addShape(label, x_min, y_min, x_max, y_max, False)
else:
assert False, "Wrong format"
def getShapes(self):
return self.shapes
def addShape(self, label, xmin, ymin, xmax, ymax, difficult):
points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
self.shapes.append((label, points, None, None, difficult))
|
from hashlib import *
import binascii
from binascii import unhexlify
import hashlib
from tinydb import TinyDB, Query
import random
import IOTtransaction as tr
import chain
import datetime
mempool_db = TinyDB('mempool_db.json')
chain_db = TinyDB('chain_db.json')
class block:
def __init__(self):
self.version=1
self.hash=""
self.height=0
self.timestamp=0
self.difficulty=1
self.previous_hash="8dc11f4b29b5f5081e364892cd430f8aa1b931c15b3001bf3e13d25060096a5e"
self.previous_64_hash="8dc11f4b29b5f5081e364892cd430f8aa1b931c15b3001bf3e13d25060096a5e"
self.merkle_root=""
self.nonce=0
self.transactions=[]
#1 calculating merkle root
def hashIt(self,firstTxHash, secondTxHash):
# Reverse inputs before and after hashing
# due to big-endian
unhex_reverse_first = binascii.unhexlify(firstTxHash)[::-1]
unhex_reverse_second = binascii.unhexlify(secondTxHash)[::-1]
concat_inputs = unhex_reverse_first + unhex_reverse_second
first_hash_inputs = hashlib.sha256(concat_inputs).digest()
final_hash_inputs = hashlib.sha256(first_hash_inputs).digest()
# reverse final hash and hex result
return binascii.hexlify(final_hash_inputs[::-1])
def merkleCalculatorDFS(self,hashList):
if len(hashList) == 1:
return hashList[0]
newHashList = []
# Process pairs. For odd length, the last is skipped
for i in range(0, len(hashList) - 1, 2):
newHashList.append(self.hashIt(hashList[i], hashList[i + 1]))
if len(hashList) % 2 == 1: # odd, hash last item twice
newHashList.append(self.hashIt(hashList[-1], hashList[-1]))
return self.merkleCalculator(newHashList)
def merkleCalculator(self,hashList):
if len(hashList)==0:
return sha256("mukul".encode('utf-8')).hexdigest()
if len(hashList) == 1:
return hashList[0]
while(len(hashList)>1):
newHashList = []
# Process pairs. For odd length, the last is skipped
for i in range(0, len(hashList) - 1, 2):
newHashList.append(self.hashIt(hashList[i], hashList[i + 1]))
if len(hashList) % 2 == 1: # odd, hash last item twice
newHashList.append(self.hashIt(hashList[-1], hashList[-1]))
hashList=newHashList
return hashList[0].decode('utf-8')
#
# generate merkle root
def calculate_merkle_root(self):
print("calculating markle root")
hashstore=[]
for tnx in self.transactions:
currentItem = tnx.hash
hashstore.append(currentItem)
self.merkle_root=self.merkleCalculator(hashstore)
#2 setting properties
def set_height(self,h):
self.height=h
def set_version(self,h):
self.version=h
def set_hash(self,h):
self.hash=h
def set_timestamp(self,t):
self.timestamp=t
def set_difficulty(self,d):
self.difficulty=d
def set_previous_hash(self,h):
self.previous_hash=h
def set_previous_64_hash(self,h):
self.previous_64_hash=h
def set_nonce(self,n):
self.nonce=n
def add_transactions(self,transaction):
self.transactions.append(transaction)
#3 calculate last 64 hash
def calculate_last_64_hash(self):
ln=len(chain_db)
end=ln-1
strt=max(end-64,0)
all_last_64_hash=[]
blocks = Query()
blocks_res=chain_db.search((blocks.height<=end) & (blocks.height>=strt))
for blk in blocks_res:
all_last_64_hash.append(blk["hash"]+str(self.timestamp))
h=self.merkleCalculator(all_last_64_hash)
self.set_previous_64_hash(h)
# previous hash calculation
def calculate_previous_hash(self):
ln = len(chain_db)
ln-=1
blocks = Query()
blocks_res = chain_db.search(blocks["height"]==ln)
for blk in blocks_res:
self.previous_hash=blk["previous_hash"]
#4 calculate hash
def littleEndian(self,string):
splited = [str(string)[i:i + 2] for i in range(0, len(str(string)), 2)]
splited.reverse()
return "".join(splited)
def calculate_block_hash(self):
version = self.littleEndian('0'*(16-len(hex(self.version)[2:]))+hex(self.version)[2:])
little_endian_previousHash = self.littleEndian(self.previous_hash)
little_endian_previous64Hash = self.littleEndian(self.previous_64_hash)
little_endian_merkleRoot = self.littleEndian(self.merkle_root)
little_endian_time = self.littleEndian('0'*(16-len(hex(self.timestamp)[2:]))+hex(self.timestamp)[2:])
little_endian_difficultyBits = self.littleEndian('0'*(16-len(hex(self.difficulty)[2:]))+hex(self.difficulty)[2:])
little_endian_nonce = self.littleEndian('0'*(16-len(hex(self.nonce)[2:]))+hex(self.nonce)[2:])
# print(little_endian_merkleRoot[1:-1])
header = version + little_endian_previousHash + little_endian_previous64Hash[1:-1] + little_endian_merkleRoot[1:-1] + little_endian_time + little_endian_difficultyBits + little_endian_nonce
# print(header)
if len(header)%2==1:
header='0'+header
header = unhexlify(header)
CalculatedHash = sha256(sha256(header).digest()).hexdigest()
self.set_hash(CalculatedHash)
#mine a block from available transaction in mempool
def mine_block(self):
self.construct_block()
for i in range(100000):#1000 is a demo value
self.nonce=i
self.calculate_block_hash()
if int('0x'+self.hash,16)<(2**240):
print("block is mined successfully!")
print(self.hash)
return 1
#if int('0x' + self.hash,16)>=(2 ** 254):
print("block is mined failed!")
return 0
#add to block chain
#get height
def get_height(self):
height=0
for blck in chain_db:
height=blck['height']
height+=1
return height
#construct block
def construct_block(self):
n=10
if len(mempool_db)<n:
n=len(mempool_db)
else:
n=random.randrange(n//2,n)
for tran in mempool_db:
transaction=tr.IOTtransaction(tran["hash"],tran["timestamp"])
transaction.add_json_transactions(tran["unit_transactions"])
self.transactions.append(transaction)
n-=1
if n==0:
break
now_time = datetime.datetime.now()
now_timestamp = int(now_time.timestamp())
self.timestamp=now_timestamp
height=self.get_height()#len(chain_db)
self.height=height
# print("height:",height)
if(height>0):
self.calculate_previous_hash()
self.calculate_last_64_hash()
self.calculate_merkle_root()
self.calculate_block_hash()
|
# Agentes Lógicos
"""
Abrange Lógica Proposicional e de Primeira Ordem. Primeiro temos quatro
Tipos de dados importantes:
KB Uma classe abstrata que contém uma base de conhecimento de expressões lógicas
KB_Agent Classe abstrata que é subclasse de agentes.Agent
Expr Uma expressão lógica, importada de utils.py
substitution Implementado como um dicionário de pares key:value, {x: 1, y: x}
Atenção: algumas funções levam um Expr como argumento, e algumas tomam um KB.
Expressões lógicas podem ser criadas com Expr ou expr, importadas de utils, que adicionam
a capacidade de escrever uma string que usa os conectores ==>, <==, <=> ou <= / =>.
Mas tenha cuidado com a precedência. Consulte logic.ipynb para obter exemplos.
Em seguida, implementamos várias funções para fazer inferência lógica:
pl_true Avalia uma sentença lógica proposicional em um modelo
tt_entails Determina se uma declaração é vinculada a um KB
pl_resolution Faz resolução sobre frases proposicionais
dpll_satisfiable Veja se uma sentença proposicional é satisfazível
WalkSAT Tenta encontrar uma solução para um conjunto de cláusulas
E algumas outras funções:
to_cnf Converte para forma conjuntiva normal
unify Faz a unificação de duas frases FOL
diff, simp Diferenciação simbólica e simplificação
"""
from utils import (
removeall, unique, first, argmax, probability,
isnumber, issequence, Symbol, Expr, expr, subexpressions
)
import agentes
import itertools
import random
from collections import defaultdict
# ______________________________________________________________________________
class KB:
"""Uma base de conhecimento para a qual você pode dizer e pedir frases."""
def __init__(self, sentence=None):
raise NotImplementedError
def tell(self, sentence):
"Adicione a frase ao KB."
raise NotImplementedError
def ask(self, query):
"""Retorna uma substituição que torna a consulta verdadeira ou, caso contrário, retorna False."""
return first(self.ask_generator(query), default=False)
def ask_generator(self, query):
"Produza todas as substituições que tornam a consulta verdadeira."
raise NotImplementedError
def retract(self, sentence):
"Remover a sentença da KB."
raise NotImplementedError
class PropKB(KB):
"Um KB para lógica proposicional."
def __init__(self, sentence=None):
self.clauses = []
if sentence:
self.tell(sentence)
def tell(self, sentence):
"Adicione as cláusulas da sentença ao KB."
self.clauses.extend(conjuncts(to_cnf(sentence)))
def ask_generator(self, query):
"Valida a substituição vazia {} se KB envolve consulta; Senão não há resultados."
if tt_entails(Expr('&', *self.clauses), query):
yield {}
def ask_if_true(self, query):
"Retornar True se o KB envolve consulta, senão retorna False."
for _ in self.ask_generator(query):
return True
return False
def retract(self, sentence):
"Remove as cláusulas da sentença da KB."
for c in conjuncts(to_cnf(sentence)):
if c in self.clauses:
self.clauses.remove(c)
# ______________________________________________________________________________
def KB_AgentProgram(KB):
"""Um programa genérico de agente baseado em conhecimento lógico."""
steps = itertools.count()
def program(percept):
t = next(steps)
KB.tell(make_percept_sentence(percept, t))
action = KB.ask(make_action_query(t))
KB.tell(make_action_sentence(action, t))
return action
def make_percept_sentence(self, percept, t):
return Expr("Percept")(percept, t)
def make_action_query(self, t):
return expr("ShouldDo(action, {})".format(t))
def make_action_sentence(self, action, t):
return Expr("Did")(action[expr('action')], t)
return program
def is_symbol(s):
"Um string s é um símbolo se ele começa com um caracter alfabético."
return isinstance(s, str) and s[:1].isalpha()
def is_var_symbol(s):
"Um símbolo de variável lógica é uma string inicial-minúscula."
return is_symbol(s) and s[0].islower()
def is_prop_symbol(s):
"""Um símbolo de lógica de proposição é uma string inicial maiúscula."""
return is_symbol(s) and s[0].isupper()
def variables(s):
"""Retorna um conjunto das variáveis na expressão s."""
return {x for x in subexpressions(s) if is_variable(x)}
def is_definite_clause(s):
if is_symbol(s.op):
return True
elif s.op == '==>':
antecedent, consequent = s.args
return (is_symbol(consequent.op) and
all(is_symbol(arg.op) for arg in conjuncts(antecedent)))
else:
return False
def parse_definite_clause(s):
"Devolver os antecedentes eo consequente de uma cláusula definitiva."
assert is_definite_clause(s)
if is_symbol(s.op):
return [], s
else:
antecedent, consequent = s.args
return conjuncts(antecedent), consequent
# Constante Exprs útil usado em exemplos e código:
A, B, C, D, E, F, G, P, Q, x, y, z = map(Expr, 'ABCDEFGPQxyz')
# ______________________________________________________________________________
def tt_entails(kb, alpha):
"""Tabelas-verdade para frases da KB proposicional
Note que o 'kb' deve ser um Expr que é uma conjunção de cláusulas.
>>> tt_entails(expr('P & Q'), expr('Q'))
True
"""
assert not variables(alpha)
return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {})
def tt_check_all(kb, alpha, symbols, model):
"Auxiliary routine to implement tt_entails."
if not symbols:
if pl_true(kb, model):
result = pl_true(alpha, model)
assert result in (True, False)
return result
else:
return True
else:
P, rest = symbols[0], symbols[1:]
return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and
tt_check_all(kb, alpha, rest, extend(model, P, False)))
def prop_symbols(x):
"Return a list of all propositional symbols in x."
if not isinstance(x, Expr):
return []
elif is_prop_symbol(x.op):
return [x]
else:
return list(set(symbol for arg in x.args for symbol in prop_symbols(arg)))
def tt_true(s):
s = expr(s)
return tt_entails(True, s)
def pl_true(exp, model={}):
"""Retorna True se a expressão lógica proposicional for verdadeira no modelo,
e False se for falso. Se o modelo não especificar o valor para
cada proposição, isto pode retornar nenhuma para indicar 'não óbvio';
Isso pode acontecer mesmo quando a expressão é tautológica."""
if exp in (True, False):
return exp
op, args = exp.op, exp.args
if is_prop_symbol(op):
return model.get(exp)
elif op == '~':
p = pl_true(args[0], model)
if p is None:
return None
else:
return not p
elif op == '|':
result = False
for arg in args:
p = pl_true(arg, model)
if p is True:
return True
if p is None:
result = None
return result
elif op == '&':
result = True
for arg in args:
p = pl_true(arg, model)
if p is False:
return False
if p is None:
result = None
return result
p, q = args
if op == '==>':
return pl_true(~p | q, model)
elif op == '<==':
return pl_true(p | ~q, model)
pt = pl_true(p, model)
if pt is None:
return None
qt = pl_true(q, model)
if qt is None:
return None
if op == '<=>':
return pt == qt
elif op == '^': # xor oo 'not equivalent'
return pt != qt
else:
raise ValueError("Operador ilegal na expressão lógica" + str(exp))
# ______________________________________________________________________________
# Converter em Forma Normal Conjuntiva (CNF)
def to_cnf(s):
"""Converte uma sentença lógica proposicional em forma conjuntiva normal.
Ou seja, para a forma: ((A | ~B | ...) & (B | C | ...) & ...)
>>> to_cnf('~(B | C)')
(~B & ~C)
"""
s = expr(s)
if isinstance(s, str):
s = expr(s)
s = eliminate_implications(s)
s = move_not_inwards(s)
return distribute_and_over_or(s)
def eliminate_implications(s):
"Altera as implicações em forma equivalente com apenas &, |, e ~ como operadores lógicos."
s = expr(s)
if not s.args or is_symbol(s.op):
return s
args = list(map(eliminate_implications, s.args))
a, b = args[0], args[-1]
if s.op == '==>':
return b | ~a
elif s.op == '<==':
return a | ~b
elif s.op == '<=>':
return (a | ~b) & (b | ~a)
elif s.op == '^':
assert len(args) == 2
return (a & ~b) | (~a & b)
else:
assert s.op in ('&', '|', '~')
return Expr(s.op, *args)
def move_not_inwards(s):
"""Reescreva sentenças s movendo sinal de negação.
>>> move_not_inwards(~(A | B))
(~A & ~B)"""
s = expr(s)
if s.op == '~':
def NOT(b):
return move_not_inwards(~b)
a = s.args[0]
if a.op == '~':
return move_not_inwards(a.args[0]) # ~~A ==> A
if a.op == '&':
return associate('|', list(map(NOT, a.args)))
if a.op == '|':
return associate('&', list(map(NOT, a.args)))
return s
elif is_symbol(s.op) or not s.args:
return s
else:
return Expr(s.op, *list(map(move_not_inwards, s.args)))
def distribute_and_over_or(s):
"""Dada uma sentença s consistindo de conjunções e disjunções
de literais, devolver uma sentença equivalente em CNF.
>>> distribute_and_over_or((A & B) | C)
((A | C) & (B | C))
"""
s = expr(s)
if s.op == '|':
s = associate('|', s.args)
if s.op != '|':
return distribute_and_over_or(s)
if len(s.args) == 0:
return False
if len(s.args) == 1:
return distribute_and_over_or(s.args[0])
conj = first(arg for arg in s.args if arg.op == '&')
if not conj:
return s
others = [a for a in s.args if a is not conj]
rest = associate('|', others)
return associate('&', [distribute_and_over_or(c | rest)
for c in conj.args])
elif s.op == '&':
return associate('&', list(map(distribute_and_over_or, s.args)))
else:
return s
def associate(op, args):
"""Dada uma op associativa, retornar uma expressão com o mesmo
significado como Expr (op, * args), ou seja, com instâncias aninhadas
do mesmo grupo promovido ao nível superior.
>>> associate('&', [(A&B),(B|C),(B&C)])
(A & B & (B | C) & B & C)
>>> associate('|', [A|(B|(C|(A&B)))])
(A | B | C | (A & B))
"""
args = dissociate(op, args)
if len(args) == 0:
return _op_identity[op]
elif len(args) == 1:
return args[0]
else:
return Expr(op, *args)
_op_identity = {'&': True, '|': False, '+': 0, '*': 1}
def dissociate(op, args):
"""Dada uma op associativa, retornar um resultado da lista tal
que Expr (op, * resultado) significa o mesmo que Expr (op, * args)."""
result = []
def collect(subargs):
for arg in subargs:
if arg.op == op:
collect(arg.args)
else:
result.append(arg)
collect(args)
return result
def conjuncts(s):
return dissociate('&', [s])
def disjuncts(s):
return dissociate('|', [s])
# ______________________________________________________________________________
def pl_resolution(KB, alpha):
"Resolução Propositional-lógica"
clauses = KB.clauses + conjuncts(to_cnf(~alpha))
new = set()
while True:
n = len(clauses)
pairs = [(clauses[i], clauses[j])
for i in range(n) for j in range(i+1, n)]
for (ci, cj) in pairs:
resolvents = pl_resolve(ci, cj)
if False in resolvents:
return True
new = new.union(set(resolvents))
if new.issubset(set(clauses)):
return False
for c in new:
if c not in clauses:
clauses.append(c)
def pl_resolve(ci, cj):
clauses = []
for di in disjuncts(ci):
for dj in disjuncts(cj):
if di == ~dj or ~di == dj:
dnew = unique(removeall(di, disjuncts(ci)) +
removeall(dj, disjuncts(cj)))
clauses.append(associate('|', dnew))
return clauses
# ______________________________________________________________________________
class PropDefiniteKB(PropKB):
"Um KB de cláusulas proposicionais definidas."
def tell(self, sentence):
"Adicione uma cláusula definitiva a esta KB."
assert is_definite_clause(sentence), "Deve ser cláusula definitiva"
self.clauses.append(sentence)
def ask_generator(self, query):
if pl_fc_entails(self.clauses, query):
yield {}
def retract(self, sentence):
self.clauses.remove(sentence)
def clauses_with_premise(self, p):
return [c for c in self.clauses
if c.op == '==>' and p in conjuncts(c.args[0])]
def pl_fc_entails(KB, q):
count = {c: len(conjuncts(c.args[0]))
for c in KB.clauses
if c.op == '==>'}
inferred = defaultdict(bool)
agenda = [s for s in KB.clauses if is_prop_symbol(s.op)]
while agenda:
p = agenda.pop()
if p == q:
return True
if not inferred[p]:
inferred[p] = True
for c in KB.clauses_with_premise(p):
count[c] -= 1
if count[c] == 0:
agenda.append(c.args[1])
return False
wumpus_world_inference = expr("(B11 <=> (P12 | P21)) & ~B11")
horn_clauses_KB = PropDefiniteKB()
for s in "P==>Q; (L&M)==>P; (B&L)==>M; (A&P)==>L; (A&B)==>L; A;B".split(';'):
horn_clauses_KB.tell(expr(s))
# ______________________________________________________________________________
def dpll_satisfiable(s):
clauses = conjuncts(to_cnf(s))
symbols = prop_symbols(s)
return dpll(clauses, symbols, {})
def dpll(clauses, symbols, model):
"See if the clauses are true in a partial model."
unknown_clauses = []
for c in clauses:
val = pl_true(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P, value = find_pure_symbol(symbols, unknown_clauses)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
P, value = find_unit_clause(clauses, model)
if P:
return dpll(clauses, removeall(P, symbols), extend(model, P, value))
if not symbols:
raise TypeError("Argument should be of the type Expr.")
P, symbols = symbols[0], symbols[1:]
return (dpll(clauses, symbols, extend(model, P, True)) or
dpll(clauses, symbols, extend(model, P, False)))
def find_pure_symbol(symbols, clauses):
for s in symbols:
found_pos, found_neg = False, False
for c in clauses:
if not found_pos and s in disjuncts(c):
found_pos = True
if not found_neg and ~s in disjuncts(c):
found_neg = True
if found_pos != found_neg:
return s, found_pos
return None, None
def find_unit_clause(clauses, model):
for clause in clauses:
P, value = unit_clause_assign(clause, model)
if P:
return P, value
return None, None
def unit_clause_assign(clause, model):
P, value = None, None
for literal in disjuncts(clause):
sym, positive = inspect_literal(literal)
if sym in model:
if model[sym] == positive:
return None, None
elif P:
return None, None
else:
P, value = sym, positive
return P, value
def inspect_literal(literal):
if literal.op == '~':
return literal.args[0], False
else:
return literal, True
# ______________________________________________________________________________
def WalkSAT(clauses, p=0.5, max_flips=10000):
symbols = set(sym for clause in clauses for sym in prop_symbols(clause))
model = {s: random.choice([True, False]) for s in symbols}
for i in range(max_flips):
satisfied, unsatisfied = [], []
for clause in clauses:
(satisfied if pl_true(clause, model) else unsatisfied).append(clause)
if not unsatisfied:
return model
clause = random.choice(unsatisfied)
if probability(p):
sym = random.choice(prop_symbols(clause))
else:
def sat_count(sym):
model[sym] = not model[sym]
count = len([clause for clause in clauses if pl_true(clause, model)])
model[sym] = not model[sym]
return count
sym = argmax(prop_symbols(clause), key=sat_count)
model[sym] = not model[sym]
return None
# ______________________________________________________________________________
class HybridWumpusAgent(agentes.Agent):
"An agent for the wumpus world that does logical inference. [Figure 7.20]"""
def __init__(self):
raise NotImplementedError
def plan_route(current, goals, allowed):
raise NotImplementedError
# ______________________________________________________________________________
def SAT_plan(init, transition, goal, t_max, SAT_solver=dpll_satisfiable):
def translate_to_SAT(init, transition, goal, time):
clauses = []
states = [state for state in transition]
state_counter = itertools.count()
for s in states:
for t in range(time+1):
state_sym[s, t] = Expr("State_{}".format(next(state_counter)))
clauses.append(state_sym[init, 0])
clauses.append(state_sym[goal, time])
transition_counter = itertools.count()
for s in states:
for action in transition[s]:
s_ = transition[s][action]
for t in range(time):
action_sym[s, action, t] = Expr("Transition_{}".format(next(transition_counter)))
clauses.append(action_sym[s, action, t] |'==>'| state_sym[s, t])
clauses.append(action_sym[s, action, t] |'==>'| state_sym[s_, t + 1])
for t in range(time+1):
clauses.append(associate('|', [state_sym[s, t] for s in states]))
for s in states:
for s_ in states[states.index(s) + 1:]:
clauses.append((~state_sym[s, t]) | (~state_sym[s_, t]))
for t in range(time):
transitions_t = [tr for tr in action_sym if tr[2] == t]
clauses.append(associate('|', [action_sym[tr] for tr in transitions_t]))
for tr in transitions_t:
for tr_ in transitions_t[transitions_t.index(tr) + 1 :]:
clauses.append(~action_sym[tr] | ~action_sym[tr_])
return associate('&', clauses)
def extract_solution(model):
true_transitions = [t for t in action_sym if model[action_sym[t]]]
true_transitions.sort(key=lambda x: x[2])
return [action for s, action, time in true_transitions]
for t in range(t_max):
state_sym = {}
action_sym = {}
cnf = translate_to_SAT(init, transition, goal, t)
model = SAT_solver(cnf)
if model is not False:
return extract_solution(model)
return None
# ______________________________________________________________________________
def unify(x, y, s):
if s is None:
return None
elif x == y:
return s
elif is_variable(x):
return unify_var(x, y, s)
elif is_variable(y):
return unify_var(y, x, s)
elif isinstance(x, Expr) and isinstance(y, Expr):
return unify(x.args, y.args, unify(x.op, y.op, s))
elif isinstance(x, str) or isinstance(y, str):
return None
elif issequence(x) and issequence(y) and len(x) == len(y):
if not x:
return s
return unify(x[1:], y[1:], unify(x[0], y[0], s))
else:
return None
def is_variable(x):
"A variable is an Expr with no args and a lowercase symbol as the op."
return isinstance(x, Expr) and not x.args and x.op[0].islower()
def unify_var(var, x, s):
if var in s:
return unify(s[var], x, s)
elif occur_check(var, x, s):
return None
else:
return extend(s, var, x)
def occur_check(var, x, s):
if var == x:
return True
elif is_variable(x) and x in s:
return occur_check(var, s[x], s)
elif isinstance(x, Expr):
return (occur_check(var, x.op, s) or
occur_check(var, x.args, s))
elif isinstance(x, (list, tuple)):
return first(e for e in x if occur_check(var, e, s))
else:
return False
def extend(s, var, val):
"Copy the substitution s and extend it by setting var to val; return copy."
s2 = s.copy()
s2[var] = val
return s2
def subst(s, x):
if isinstance(x, list):
return [subst(s, xi) for xi in x]
elif isinstance(x, tuple):
return tuple([subst(s, xi) for xi in x])
elif not isinstance(x, Expr):
return x
elif is_var_symbol(x.op):
return s.get(x, x)
else:
return Expr(x.op, *[subst(s, arg) for arg in x.args])
def fol_fc_ask(KB, alpha):
raise NotImplementedError
def standardize_variables(sentence, dic=None):
if dic is None:
dic = {}
if not isinstance(sentence, Expr):
return sentence
elif is_var_symbol(sentence.op):
if sentence in dic:
return dic[sentence]
else:
v = Expr('v_{}'.format(next(standardize_variables.counter)))
dic[sentence] = v
return v
else:
return Expr(sentence.op,
*[standardize_variables(a, dic) for a in sentence.args])
standardize_variables.counter = itertools.count()
# ______________________________________________________________________________
class FolKB(KB):
def __init__(self, initial_clauses=[]):
self.clauses = [] # inefficient: no indexing
for clause in initial_clauses:
self.tell(clause)
def tell(self, sentence):
if is_definite_clause(sentence):
self.clauses.append(sentence)
else:
raise Exception("Not a definite clause: {}".format(sentence))
def ask_generator(self, query):
return fol_bc_ask(self, query)
def retract(self, sentence):
self.clauses.remove(sentence)
def fetch_rules_for_goal(self, goal):
return self.clauses
test_kb = FolKB(
map(expr, ['Farmer(Mac)',
'Rabbit(Pete)',
'Mother(MrsMac, Mac)',
'Mother(MrsRabbit, Pete)',
'(Rabbit(r) & Farmer(f)) ==> Hates(f, r)',
'(Mother(m, c)) ==> Loves(m, c)',
'(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)',
'(Farmer(f)) ==> Human(f)',
'(Mother(m, h) & Human(h)) ==> Human(m)'
]))
crime_kb = FolKB(
map(expr,
['(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)', # noqa
'Owns(Nono, M1)',
'Missile(M1)',
'(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)',
'Missile(x) ==> Weapon(x)',
'Enemy(x, America) ==> Hostile(x)',
'American(West)',
'Enemy(Nono, America)'
]))
def fol_bc_ask(KB, query):
return fol_bc_or(KB, query, {})
def fol_bc_or(KB, goal, theta):
for rule in KB.fetch_rules_for_goal(goal):
lhs, rhs = parse_definite_clause(standardize_variables(rule))
for theta1 in fol_bc_and(KB, lhs, unify(rhs, goal, theta)):
yield theta1
def fol_bc_and(KB, goals, theta):
if theta is None:
pass
elif not goals:
yield theta
else:
first, rest = goals[0], goals[1:]
for theta1 in fol_bc_or(KB, subst(theta, first), theta):
for theta2 in fol_bc_and(KB, rest, theta1):
yield theta2
def diff(y, x):
if y == x:
return 1
elif not y.args:
return 0
else:
u, op, v = y.args[0], y.op, y.args[-1]
if op == '+':
return diff(u, x) + diff(v, x)
elif op == '-' and len(y.args) == 1:
return -diff(u, x)
elif op == '-':
return diff(u, x) - diff(v, x)
elif op == '*':
return u * diff(v, x) + v * diff(u, x)
elif op == '/':
return (v * diff(u, x) - u * diff(v, x)) / (v * v)
elif op == '**' and isnumber(x.op):
return (v * u ** (v - 1) * diff(u, x))
elif op == '**':
return (v * u ** (v - 1) * diff(u, x) +
u ** v * Expr('log')(u) * diff(v, x))
elif op == 'log':
return diff(u, x) / u
else:
raise ValueError("Unknown op: {} in diff({}, {})".format(op, y, x))
def simp(x):
"Simplify the expression x."
if isnumber(x) or not x.args:
return x
args = list(map(simp, x.args))
u, op, v = args[0], x.op, args[-1]
if op == '+':
if v == 0:
return u
if u == 0:
return v
if u == v:
return 2 * u
if u == -v or v == -u:
return 0
elif op == '-' and len(args) == 1:
if u.op == '-' and len(u.args) == 1:
return u.args[0] # --y ==> y
elif op == '-':
if v == 0:
return u
if u == 0:
return -v
if u == v:
return 0
if u == -v or v == -u:
return 0
elif op == '*':
if u == 0 or v == 0:
return 0
if u == 1:
return v
if v == 1:
return u
if u == v:
return u ** 2
elif op == '/':
if u == 0:
return 0
if v == 0:
return Expr('Undefined')
if u == v:
return 1
if u == -v or v == -u:
return 0
elif op == '**':
if u == 0:
return 0
if v == 0:
return 1
if u == 1:
return 1
if v == 1:
return u
elif op == 'log':
if u == 1:
return 0
else:
raise ValueError("Unknown op: " + op)
return Expr(op, *args)
def d(y, x):
"Differentiate and then simplify."
return simp(diff(y, x))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : tensor_utils.py
# Author : <NAME>, <NAME>
# Email : <EMAIL>, <EMAIL>
# Date : 09.08.2019
# Last Modified Date: 02.10.2019
# Last Modified By : Chi Han, Jiayuan Mao
#
# This file is part of the VCML codebase
# Distributed under MIT license
#
# pytorch/numpy utilities
import numpy as np
import torch
import torch.nn.functional as F
from pprint import pprint
def to_tensor(x):
if isinstance(x, torch.Tensor):
return x
elif isinstance(x, list):
if isinstance(x[0], float):
return torch.Tensor(x)
elif isinstance(x[0], int):
return torch.LongTensor(x)
else:
return x
elif isinstance(x, np.ndarray):
if x.dtype.char in ['d', 'f']:
return torch.Tensor(x)
elif x.dtype.char in ['l', 'b']:
return torch.LongTensor(x)
else:
raise Exception('not convertable')
elif isinstance(x, int) or isinstance(x, float) \
or np.isscalar(x):
return torch.tensor(x)
else:
raise Exception('not convertable')
def detach(tensor):
if isinstance(tensor, torch.Tensor):
if tensor.device.type == 'cuda':
tensor = tensor.cpu()
if tensor.requires_grad:
tensor = tensor.detach()
tensor = tensor.numpy()
return tensor
def matmul(*mats):
output = mats[0]
for x in mats[1:]:
if isinstance(output, torch.Tensor):
output = torch.matmul(output, x)
else:
output = np.matmul(output, x)
return output
def to_numpy(x):
if isinstance(x, np.ndarray):
return x
elif isinstance(x, list):
return np.array(x)
elif isinstance(x, torch.Tensor):
return x.cpu().detach().numpy()
elif isinstance(x, torch.autograd.Variable):
return x.data.cpu().numpy()
def to_normalized(x):
if isinstance(x, torch.Tensor):
return F.normalize(x, dim=-1)
elif isinstance(x, np.ndarray):
return to_normalized(torch.Tensor(x)).numpy()
else:
raise Exception('unsupported type: %s' % str(type(x)))
def init_seed(n=-1, index=-1):
if n != -1:
if index != -1:
seed = n + index
else:
seed = n
torch.manual_seed(seed)
np.random.seed(seed)
def is_cuda(x):
return x.device.type == 'cuda'
def valid_part(x, assert_finite=False):
output = torch.isnan(x).bitwise_not() * (x.abs() != float('inf'))
if assert_finite:
output = output * (x.abs() < 1e10)
return output
def is_valid_value(x, assert_finite):
if not valid_part(x, assert_finite).all():
return False
else:
return True
def assert_valid_value(*values, assert_finite=False):
for i, x in enumerate(values):
if not is_valid_value(x, assert_finite):
pprint(values)
print('Invalid tensor is', i)
raise Exception('invalid value')
def index_by_list(tensor, indexes):
if isinstance(tensor, torch.Tensor) or \
isinstance(tensor, np.ndarray):
return tensor[indexes]
elif isinstance(tensor, list) or \
isinstance(tensor, tuple):
return [tensor[ind] for ind in indexes]
else:
raise Exception()
|
# MIT License
#
# Copyright (c) 2020 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, pyqtSignal, QModelIndex
from PyQt5.QtWidgets import QAbstractItemView, QHeaderView
from PyQt5.QtWidgets import QMenu
from PyQt5.QtGui import QColor, QBrush
from hanlendar.gui.customtreemodel import ItemTreeModel
from hanlendar.gui.widget.tasktable import get_completed_color
from hanlendar.domainmodel.todo import ToDo
_LOGGER = logging.getLogger(__name__)
class ToDoTreeModel( ItemTreeModel ):
attrList = [ "title", "priority", "completed" ]
def __init__(self, parent, *args):
super().__init__(parent, *args)
self.dataObject = None
def setDataObject(self, dataObject):
self.beginResetModel()
self.dataObject = dataObject
self.endResetModel()
def data(self, index: QModelIndex, role):
if role == QtCore.Qt.SizeHintRole:
return QtCore.QSize(10, 30)
item: ToDo = self.getItem( index )
if item is None:
return None
if role == Qt.TextAlignmentRole:
attrIndex = index.column()
if attrIndex > 0:
return Qt.AlignHCenter | Qt.AlignVCenter
if role == Qt.ForegroundRole:
return get_todo_fgcolor( item )
if role == QtCore.Qt.DisplayRole:
attrIndex = index.column()
attrName = self._getAttrName(attrIndex)
if attrName is None:
return None
return getattr( item, attrName )
return None
def headerLabels(self):
return [ "Summary", "Priority", "Complete" ]
def internalMoveMimeType(self):
return "TodosTreeNode"
## overrided
def moveItem(self, itemId, targetItem, targetIndex):
if self.dataObject is None:
return
self.dataObject.moveToDo( itemId, targetItem, targetIndex )
def getRootList(self):
if self.dataObject is None:
return None
manager = self.dataObject.getManager()
return manager.getToDos()
# def setRootList(self, newList):
# if self.dataObject is None:
# return
# self.dataObject.setTodosList( newList )
def _getAttrName(self, attrIndex):
if attrIndex < 0:
return None
if attrIndex >= len(self.attrList):
return None
return self.attrList[attrIndex]
## ===========================================================
class ToDoSortFilterProxyModel( QtCore.QSortFilterProxyModel ):
def __init__(self, parentObject=None):
super().__init__(parentObject)
self._showCompleted = False
def showCompleted(self, show=True):
self._showCompleted = show
self.invalidateFilter()
def filterAcceptsRow(self, sourceRow, sourceParent: QModelIndex):
if self._showCompleted is True:
return True
dataIndex = self.sourceModel().index( sourceRow, 2, sourceParent )
item: ToDo = dataIndex.internalPointer()
return item.isCompleted() is False
def lessThan(self, left: QModelIndex, right: QModelIndex):
leftData = self.sourceModel().data(left, QtCore.Qt.DisplayRole)
rightData = self.sourceModel().data(right, QtCore.Qt.DisplayRole)
return leftData < rightData
## ===========================================================
class ToDoTable( QtWidgets.QTreeView ):
selectedToDo = pyqtSignal( ToDo )
todoUnselected = pyqtSignal()
addNewToDo = pyqtSignal()
addNewSubToDo = pyqtSignal( ToDo )
editToDo = pyqtSignal( ToDo )
removeToDo = pyqtSignal( ToDo )
convertToDoToTask = pyqtSignal( ToDo )
markCompleted = pyqtSignal( ToDo )
def __init__(self, parentWidget=None):
super().__init__(parentWidget)
self.data = None
self.setSelectionBehavior( QAbstractItemView.SelectRows )
self.setSelectionMode( QAbstractItemView.SingleSelection )
self.setEditTriggers( QAbstractItemView.NoEditTriggers )
self.setAlternatingRowColors( True )
self.setSortingEnabled( True )
self.setDragEnabled( True )
self.setDropIndicatorShown( True )
self.setDragDropMode( QAbstractItemView.InternalMove )
self.setDragDropOverwriteMode(False)
self.itemsModel = ToDoTreeModel(self)
self.proxyModel = ToDoSortFilterProxyModel(self)
self.proxyModel.setSourceModel( self.itemsModel )
self.setModel( self.proxyModel )
header = self.header()
header.setDefaultAlignment( Qt.AlignCenter )
header.setHighlightSections( False )
header.setStretchLastSection( False )
header.setSectionResizeMode( 0, QHeaderView.Stretch )
self.doubleClicked.connect( self.itemDoubleClicked )
def connectData(self, dataObject):
self.data = dataObject
self.itemsModel.setDataObject( dataObject )
self.addNewToDo.connect( dataObject.addNewToDo )
self.addNewSubToDo.connect( dataObject.addNewSubToDo )
self.editToDo.connect( dataObject.editToDo )
self.removeToDo.connect( dataObject.removeToDo )
self.convertToDoToTask.connect( dataObject.convertToDoToTask )
self.markCompleted.connect( dataObject.markToDoCompleted )
def showCompletedItems(self, show):
self.proxyModel.showCompleted( show )
self.updateView()
def updateView(self):
if self.data is None:
return
self.itemsModel.setDataObject( self.data )
def getToDo(self, itemIndex: QModelIndex ):
sourceIndex = self.proxyModel.mapToSource( itemIndex )
return self.itemsModel.getItem( sourceIndex )
def contextMenuEvent( self, event ):
evPos = event.pos()
globalPos = self.viewport().mapToGlobal( evPos )
todo: ToDo = None
mIndex = self.indexAt( evPos )
if mIndex is not None:
todo = self.getToDo( mIndex )
contextMenu = QMenu(self)
addToDoAction = contextMenu.addAction("New ToDo")
addSubToDoAction = contextMenu.addAction("New Sub ToDo")
editToDoAction = contextMenu.addAction("Edit ToDo")
removeToDoAction = contextMenu.addAction("Remove ToDo")
convertToDoAction = contextMenu.addAction("Convert to Task")
markCompletedAction = contextMenu.addAction("Mark completed")
if todo is None:
## context menu on background
addSubToDoAction.setEnabled( False )
editToDoAction.setEnabled( False )
removeToDoAction.setEnabled( False )
convertToDoAction.setEnabled( False )
markCompletedAction.setEnabled( False )
action = contextMenu.exec_( globalPos )
if action == addToDoAction:
self.addNewToDo.emit()
elif action == addSubToDoAction:
self.addNewSubToDo.emit( todo )
elif action == editToDoAction:
self.editToDo.emit( todo )
elif action == removeToDoAction:
self.removeToDo.emit( todo )
elif action == convertToDoAction:
self.convertToDoToTask.emit( todo )
elif action == markCompletedAction:
self.markCompleted.emit( todo )
def selectionChanged(self, toSelection, fromSelection):
super().selectionChanged( toSelection, fromSelection )
modelIndex = self.currentIndex()
todo = self.getToDo( modelIndex )
if todo is not None:
self.selectedToDo.emit( todo )
else:
self.todoUnselected.emit()
def itemDoubleClicked(self, modelIndex):
todo = self.getToDo( modelIndex )
self.editToDo.emit( todo )
def mousePressEvent(self, event):
pos = event.pos()
itemIndex = self.indexAt(pos)
if itemIndex.isValid() is False:
self.setCurrentIndex(itemIndex)
self.clearSelection()
super().mousePressEvent( event )
def get_todo_fgcolor( todo: ToDo ) -> QBrush:
if todo.isCompleted():
## completed -- green
return QBrush( get_completed_color() )
## normal
return QBrush( QColor(0, 0, 0) )
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import torch
from tracker.multitracker import JDETracker
from tracking_utils import visualization as vis
from tracking_utils.log import logger
from tracking_utils.timer import Timer
from tracking_utils.utils import mkdir_if_missing
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def write_results_score(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},{s},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids, scores in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id, score in zip(tlwhs, track_ids, scores):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, s=score)
f.write(line)
logger.info('save results to {}'.format(filename))
def add_stats_to_image(image, opt):
model_name = opt.load_model.split('/')[-1]
font = cv2.FONT_HERSHEY_SIMPLEX
text = f"{model_name}"
# get boundary of this text
textsize = cv2.getTextSize(text, font, 1, 2)[0]
# get coords based on boundary
# textX = int((image.shape[1] - textsize[0]))
# textY = int(textsize[1] * 2)
textX = int((image.shape[1] - textsize[0]) / 2)
textY = int((image.shape[0] + textsize[1]) / 2)
# add text centered on image
cv2.putText(image, text, (textX, textY), font, 1, (0, 0, 255), 2)
return image
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30, use_cuda=True,
epoch=0):
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
# for path, img, img0 in dataloader:
for i, (path, img, img0) in enumerate(dataloader):
# if i % 8 != 0:
# continue
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# run tracking
timer.tic()
if use_cuda:
blob = torch.from_numpy(img).cuda().unsqueeze(0)
else:
blob = torch.from_numpy(img).unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
# online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
# vertical = tlwh[2] / tlwh[3] > 1.6
# if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
if tlwh[2] * tlwh[3] > opt.min_box_area:
online_tlwhs.append(tlwh)
online_ids.append(tid)
# online_scores.append(t.score)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
# results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
online_im = add_stats_to_image(online_im, opt)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
write_results(result_filename, results, data_type)
# write_results_score(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
|
<reponame>sapcc/networking-nsx-t
from datetime import datetime
import oslo_messaging
from networking_nsxv3.common import constants as nsxv3_constants
from networking_nsxv3.db import db
from neutron_lib import context as neutron_context
from neutron_lib import exceptions, rpc
from neutron_lib.agent import topics
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log
from osprofiler.profiler import trace_cls
LOG = log.getLogger(__name__)
@trace_cls("rpc")
class NSXv3AgentRpcClient(object):
"""Neutron RPC Client for NSXv3 L2 agent"""
version = nsxv3_constants.RPC_VERSION
def __init__(self, context):
target = oslo_messaging.Target(
topic=nsxv3_constants.NSXV3,
version=self.version
)
self.context = context
self.rpc = rpc.get_client(target)
def _get_call_context(self, host=None):
topic = topics.get_topic_name(
topics.AGENT, nsxv3_constants.NSXV3, topics.UPDATE, host)
# fanout=True - broadcast to all agents, False only to the host
return self.rpc.prepare(
version=self.version,
topic=topic,
fanout=(host is None))
def get_network_bridge(self, current, network_segments, network_current, host):
LOG.debug("Bind port on Host {} & Segment {}".format(host, network_segments))
return self._get_call_context(host).call(
self.context, 'get_network_bridge',
current=current,
network_segments=network_segments,
network_current=network_current
)
def create_policy(self, context, policy):
LOG.debug("All gents. Creating policy={}.".format(policy.name))
return self._get_call_context().cast(
self.context, 'create_policy', policy=policy)
def update_policy(self, context, policy):
LOG.debug("All gents. Updating policy={}.".format(policy.name))
if (hasattr(policy, "rules")):
return self._get_call_context().cast(
self.context, 'update_policy',policy=policy)
def delete_policy(self, context, policy):
LOG.debug("All gents. Deleting policy={}.".format(policy.name))
return self._get_call_context().cast(
self.context, 'delete_policy', policy=policy)
def update_policy_precommit(self, context, policy):
LOG.debug("All gents. Validating policy={}.".format(policy))
if (hasattr(policy, "rules")):
return self._get_call_context().cast(
self.context, 'validate_policy', policy=policy)
class NSXv3ServerRpcApi(object):
"""Agent-side RPC (stub) for agent-to-plugin interaction.
This class implements the client side of an rpc interface. The server
side can be found below: NSXv3ServerRpcCallback. For more information on
changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst.
"""
rpc_version = nsxv3_constants.NSXV3_SERVER_RPC_VERSION
def __init__(self):
target = oslo_messaging.Target(topic=nsxv3_constants.NSXV3_SERVER_RPC_TOPIC,
version=self.rpc_version)
self.context = neutron_context.get_admin_context()
self.client = rpc.get_client(target)
self.host = cfg.CONF.host
@log_helpers.log_method_call
def get_ports_with_revisions(self, limit, cursor):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_ports_with_revisions',
host=self.host, limit=limit, cursor=cursor)
@log_helpers.log_method_call
def get_qos_policies_with_revisions(self, limit, cursor):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_qos_policies_with_revisions',
host=self.host, limit=limit, cursor=cursor)
@log_helpers.log_method_call
def get_security_groups_with_revisions(self, limit, cursor):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_security_groups_with_revisions',
host=self.host, limit=limit, cursor=cursor)
@log_helpers.log_method_call
def get_security_group(self, security_group_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_security_group',
host=self.host, security_group_id=security_group_id)
@log_helpers.log_method_call
def get_qos(self, qos_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_qos', host=self.host, qos_id=qos_id)
@log_helpers.log_method_call
def get_port(self, port_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_port', host=self.host, port_id=port_id)
@log_helpers.log_method_call
def get_rules_for_security_group_id(self, security_group_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_rules_for_security_group_id',
security_group_id=security_group_id)
@log_helpers.log_method_call
def get_security_group_members_effective_ips(self, security_group_id):
cctxt = self.client.prepare()
return cctxt.call(self.context,
'get_security_group_members_effective_ips',
security_group_id=security_group_id)
@log_helpers.log_method_call
def get_security_groups_for_host(self, limit, cursor):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_security_groups_for_host',
host=self.host, limit=limit, cursor=cursor)
@log_helpers.log_method_call
def get_remote_security_groups_for_host(self, limit, cursor):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'get_remote_security_groups_for_host',
host=self.host, limit=limit, cursor=cursor)
@log_helpers.log_method_call
def has_security_group_used_by_host(self, security_group_id):
cctxt = self.client.prepare()
return cctxt.call(self.context, 'has_security_group_used_by_host',
host=self.host, security_group_id=security_group_id)
class NSXv3ServerRpcCallback(object):
"""Plugin-side RPC (implementation) for agent-to-plugin interaction.
This class implements the server side of an rpc interface. The client
side can be found above: NSXv3ServerRpcApi. For more information on
changing rpc interfaces, see doc/source/contributor/internals/rpc_api.rst.
"""
rpc_version = nsxv3_constants.NSXV3_SERVER_RPC_VERSION
target = oslo_messaging.Target(version=rpc_version)
@property
def plugin(self):
if not getattr(self, '_plugin', None):
self._plugin = directory.get_plugin()
return self._plugin
@log_helpers.log_method_call
def get_ports_with_revisions(self, context, host, limit, cursor):
return db.get_ports_with_revisions(context, host, limit, cursor)
@log_helpers.log_method_call
def get_qos_policies_with_revisions(self, context, host, limit, cursor):
return db.get_qos_policies_with_revisions(context, host, limit, cursor)
@log_helpers.log_method_call
def get_security_groups_with_revisions(self, context, host, limit, cursor):
return db.get_security_groups_with_revisions(context, host, limit, cursor)
@log_helpers.log_method_call
def get_security_group(self, context, host, security_group_id):
id_rev = db.get_security_group_revision(context, security_group_id)
if id_rev:
return {
"id": id_rev[0],
"revision_number": id_rev[1],
"stateful": id_rev[2],
"tags": db.get_security_group_tag(context, security_group_id),
"ports": db.get_port_id_by_sec_group_id(context, host,
security_group_id)
}
@log_helpers.log_method_call
def get_rules_for_security_group_id(self, context, security_group_id):
return db.get_rules_for_security_group_id(context, security_group_id)
@log_helpers.log_method_call
def get_security_group_members_effective_ips(self, context, security_group_id):
a = db.get_security_group_members_ips(context, security_group_id)
b = db.get_security_group_members_address_bindings_ips(context, security_group_id)
return [ips[0] for ips in a + b]
@log_helpers.log_method_call
def get_security_groups_for_host(self, context, host, limit, cursor):
return db.get_security_groups_for_host(context, host, limit, cursor)
@log_helpers.log_method_call
def get_remote_security_groups_for_host(self, context, host, limit, cursor):
return db.get_remote_security_groups_for_host(context, host, limit, cursor)
@log_helpers.log_method_call
def has_security_group_used_by_host(self, context, host, security_group_id):
return db.has_security_group_used_by_host(context, host, security_group_id)
@log_helpers.log_method_call
def get_port(self, context, host, port_id):
port = db.get_port(context, host, port_id)
if not port:
return None
# NSX-T does not support CIDR as port manual binding - skipping X/X
for ip in db.get_port_addresses(context, port_id):
if "/" in ip:
continue
port["address_bindings"].append({"ip_address": ip[0], "mac_address": port["mac_address"]})
for ip, mac in db.get_port_allowed_pairs(context, port_id):
if "/" in ip:
continue
port["address_bindings"].append({"ip_address": ip, "mac_address": mac})
for sg_id in db.get_port_security_groups(context, port_id):
port["security_groups"].append(sg_id[0])
return port
@log_helpers.log_method_call
@oslo_messaging.expected_exceptions(exceptions.ObjectNotFound)
def get_qos(self, context, host, qos_id):
if not db.get_qos_ports_by_host(context, host, qos_id):
return
q = db.get_qos(context, qos_id)
qos = {"id": qos_id, "name": q[0], "revision_number": q[1], "rules": []}
for _, dscp_mark in db.get_qos_dscp_rules(context, qos_id):
qos["rules"].append({"dscp_mark": dscp_mark})
for dir, bps, burst in db.get_qos_bwl_rules(context, qos_id):
qos["rules"].append({"direction": dir,"max_kbps": bps, "max_burst_kbps": burst})
return qos
|
<filename>remus/data_import/aggregate_CAGE_peaks.py
##
# Based on FANTOM5 (F5) ontology (arg1) the script groups individual F5 samples into preselected organs, tissues and celltypes (facets).
# Only primary cells and tissue samples from human are used.
# Robust CAGE peaks (TPM>10) for samples are extracted from expression matrix (arg2) and aggregated over the facets.
# Output is saved in user-specified path (arg3) in facet BED files containing genomic location of CAGE peaks and aggregated score for the facet.
#
# Usage:
# aggregate_CAGE_peaks.py OBO_FILE TSS_EXPRESSION_MATRIX_FILE OUTPUT_PATH
#
# <NAME>, 9/2018
#
import obonet, networkx
import gzip
import sys, os
import pybedtools, pybedtools.featurefuncs
class F5Ontology():
# organs used in SlideBase
SLIDEBASE_ORGANS = set(['UBERON:0000029','UBERON:0000059','UBERON:0000178','UBERON:0000341','UBERON:0000473','UBERON:0000945',
'UBERON:0000948','UBERON:0000955','UBERON:0000970','UBERON:0000989','UBERON:0000992','UBERON:0000995','UBERON:0000996',
'UBERON:0001013','UBERON:0001043','UBERON:0001044','UBERON:0001134','UBERON:0001135','UBERON:0001255','UBERON:0001264',
'UBERON:0001723','UBERON:0001736','UBERON:0001831','UBERON:0001981','UBERON:0001987','UBERON:0002046','UBERON:0002048',
'UBERON:0002097','UBERON:0002106','UBERON:0002107','UBERON:0002108','UBERON:0002110','UBERON:0002113','UBERON:0002240',
'UBERON:0002331','UBERON:0002360','UBERON:0002367','UBERON:0002370','UBERON:0002372','UBERON:0003112','UBERON:0004054'])
ENCODE_ORGANS = set(['UBERON:0000006','UBERON:0000059','UBERON:0000473','UBERON:0000945','UBERON:0000948','UBERON:0000955',
'UBERON:0000966','UBERON:0000970','UBERON:0000992','UBERON:0000995','UBERON:0000996','UBERON:0001114','UBERON:0001150',
'UBERON:0001157','UBERON:0001159','UBERON:0001211','UBERON:0001224','UBERON:0001264','UBERON:0001323','UBERON:0001383',
'UBERON:0001496','UBERON:0001499','UBERON:0001515','UBERON:0001621','UBERON:0001723','UBERON:0001774','UBERON:0001870',
'UBERON:0001875','UBERON:0001987','UBERON:0002037','UBERON:0002046','UBERON:0002048','UBERON:0002080','UBERON:0002084',
'UBERON:0002106','UBERON:0002107','UBERON:0002108','UBERON:0002113','UBERON:0002129','UBERON:0002167','UBERON:0002168',
'UBERON:0002190','UBERON:0002240','UBERON:0002324','UBERON:0002331','UBERON:0002367','UBERON:0002369','UBERON:0002370',
'UBERON:0003662','UBERON:0003663','UBERON:0004264','UBERON:0004538','UBERON:0004539','UBERON:0004550','UBERON:0004648',
'UBERON:0005270','UBERON:0006631','UBERON:0006920','UBERON:0007610','UBERON:0008367','UBERON:0008450','UBERON:0008952',
'UBERON:0010414','UBERON:0011907','UBERON:0018115','UBERON:0018116','UBERON:0018117','UBERON:0018118','UBERON:0036149'])
OTHER_ORGANS = set()
SLIDEBASE_CELLTYPES = set(['CL:0000047','CL:0000056','CL:0000062','CL:0000067','CL:0000071','CL:0000077','CL:0000080','CL:1000487',
'CL:0000084','CL:0000094','CL:0000097','CL:0000098','CL:0000127','CL:0000134','CL:0000136','CL:0000138','CL:0000148',
'CL:0000182','CL:0000188','CL:0000235','CL:0000312','CL:0000359','CL:0000388','CL:0000451','CL:0000499','CL:0000540',
'CL:0000558','CL:0000575','CL:0000576','CL:0000622','CL:0000623','CL:0000632','CL:0000669','CL:0000731','CL:0000746',
'CL:0000767','CL:0000775','CL:0000945','CL:0002138','CL:0002166','CL:0002224','CL:0002231','CL:0002252','CL:0002327',
'CL:0002334','CL:0002363','CL:0002367','CL:0002368','CL:0002504','CL:0002518','CL:0002536','CL:0002548','CL:0002549',
'CL:0002550','CL:0002552','CL:0002554','CL:0002556','CL:0002557','CL:0002559','CL:0002563','CL:0002565','CL:0002577',
'CL:0002586','CL:0002598','CL:0002599','CL:0002600','CL:0002601','CL:0002620','CL:0002621','CL:1000306','CL:1000398'])
ENCODE_CELLTYPES = set(['CL:0000056','CL:0000084','CL:0000127','CL:0000188','CL:0000236','CL:0000307','CL:0000312',
'CL:0000351','CL:0000515','CL:0000545','CL:0000546','CL:0000623','CL:0000624','CL:0000625','CL:0000706','CL:0000746',
'CL:0000765','CL:0000775','CL:0000815','CL:0000895','CL:0000899','CL:0001054','CL:0001059','CL:0002188','CL:0002231',
'CL:0002252','CL:0002304','CL:0002306','CL:0002327','CL:0002328','CL:0002399','CL:0002518','CL:0002536','CL:0002539',
'CL:0002547','CL:0002548','CL:0002550','CL:0002551','CL:0002552','CL:0002553','CL:0002555','CL:0002557','CL:0002558',
'CL:0002565','CL:0002584','CL:0002586','CL:0002590','CL:0002603','CL:0002604','CL:0002606','CL:0002618','CL:0002620',
'CL:0010001','CL:1001568','CL:1001606','CL:1001608','CL:2000010','CL:2000012','CL:2000013','CL:2000014','CL:2000016',
'CL:2000017','CL:2000041','CL:2000043','CL:2000044','CL:2000045','NTR:0004646','NTR:0004647'])
OTHER_CELLTYPES = set()
ORGAN_IDS = SLIDEBASE_ORGANS | ENCODE_ORGANS | OTHER_ORGANS
CELLTYPE_IDS = SLIDEBASE_CELLTYPES | ENCODE_CELLTYPES | OTHER_CELLTYPES
HOMO_SAMPIENS_ID = 'NCBITaxon:9606'
CELL_LINE_SAMPLE_ID='FF:0000003'
def __init__(self, obo_file_path):
self.g = obonet.read_obo(obo_file_path)
self._id2name = {id_: data['name'] for id_, data in self.g.nodes(data=True)}
self._name2id = {self._id2name[k] : k for k in self._id2name.keys()}
# slim the ontology for the purpose of classifying samples to organs (removes unneeded relations)
edge_labels_to_remove=['develops_from', 'is_model_for', 'treated_with']
e2rm = [(a,b) for (a,b,c) in self.g.edges(keys=True) if c in edge_labels_to_remove]
import copy
self.slim_g = copy.copy(self.g)
self.slim_g.remove_edges_from(e2rm)
self.slim_g.remove_edges_from(e2rm) # to be certain that we remove them
#print('remaining:', set([c for (a,b,c) in slim_g.edges(keys=True)]))
def id2name(self, ids):
if isinstance(ids, list) or isinstance(ids, set):
return [self.id2name(identifier) for identifier in ids]
return self._id2name[ids] if ids in self._id2name else None
def name2id(self, names):
if isinstance(names, list) or isinstance(names, set):
return [self.name2id(name) for name in names]
return self._name2id[names] if names in self._name2id else None
def get_samples_for_terms(self, term_ids):
""" Returns a dictionary organ_id:[samples] build of organ_ids given as argument,
and list of samples belonging to that organ (according to ontology) """
human_samples = networkx.ancestors(self.slim_g, F5Ontology.HOMO_SAMPIENS_ID)
cell_line_samples = networkx.ancestors(self.slim_g, F5Ontology.CELL_LINE_SAMPLE_ID)
samples={}
for term_id in term_ids:
if term_id not in self.slim_g:
samples[term_id] = []
else:
ancestor_terms = networkx.ancestors(self.slim_g, term_id) # take all celltypes composing an organ
samples[term_id] = list((set(human_samples) - set(cell_line_samples)) & set(ancestor_terms)) # exclude cell_line samples and intersect
# remove non-root terms
samples[term_id] = [s for s in samples[term_id] if len(networkx.ancestors(self.slim_g, s))==0]
return samples
def get_organ_for_sample(self, sample_id, allow_missing = False):
return self._get_term_for_sample(sample_id, F5Ontology.ORGAN_IDS)
def get_celltypes_for_samples(self, sample_id, allow_missing = False):
return self._get_term_for_sample(sample_id, F5Ontology.CELLTYPE_IDS)
def _get_terms_for_sample(self, sample_id, term_ids, allow_missing = False):
""" Returns a list of term_ids (organs / celltypes). One sample can be part of several terms. """
if sample_id not in self.slim_g:
if allow_missing:
descendants = set()
else:
raise Exception("Node %s is missing from the ontology." % sample_id)
else:
descendants = networkx.descendants(self.slim_g, sample_id)
return list(descendants & term_ids)
def get_sample_ids_from_expression_table(file_handle):
sample_ids = []
for l in file_handle:
if l.find('##')==0: continue
elif l.find('00Annotation')==0:
header = l.split('\t')
sample_ids = [ (e.split('.')[3]).strip() for e in header[1:] ]
break
else: raise Exception('No header found') # (should not happen)
return sample_ids
def decode_genomic_location(field):
s = field.split(":")
chrom = s[0]
s = s[1].split("..")
start = int(s[0])
s = s[1].split(",")
end = int(s[0])
strand = s[1]
return pybedtools.Interval(chrom, start, end, strand=strand)
def mean(l):
return sum(l)/len(l)
#### MAIN ####
if __name__ == '__main__':
OBO_FILE = sys.argv[1]
EXPRESSION_TABLE_FILE = sys.argv[2]
OUTPUT_DIR = sys.argv[3]
EXPRESSION_CUTOFF = 10
EXPRESSION_AGGREGATE_FUNCTION = mean
print("Reading ontology file...")
f5o = F5Ontology(OBO_FILE)
tsd = f5o.get_samples_for_terms(F5Ontology.ORGAN_IDS | F5Ontology.CELLTYPE_IDS) ## term-samples dict
# Prepare list of output files, one per organ/celltype.
# Organ/celltypes missing from the ontology are skipped
print("Initiating output BED files...")
bed_names = {t: os.path.join(OUTPUT_DIR,
t.replace(":", "_") + "_" + f5o.id2name(t).replace(" ", "_") + ".bed"
) for t in tsd if len(tsd[t]) > 0}
bed_files = {t: open(bed_names[t], 'wt') for t in bed_names}
# iterate over expression table, and append single records to organ BED files.
print("Parsing CAGE expression matrix... (every 100th record is printed to show progress)")
with gzip.open(EXPRESSION_TABLE_FILE, 'rt') as f:
# jump to header and read sample IDs
sample_ids = get_sample_ids_from_expression_table(f)
# Create a dict of indices.
# Some sample IDs have replicates, so the dict values are lists
#
s_col={}
for i, s_id in enumerate(sample_ids):
if s_id in s_col:
s_col[s_id] = s_col[s_id] + [i]
else:
s_col[s_id] = [i]
#s_col = {sample_ids[i] : (i+1) for i in range(1,len(sample_ids))}
# skip two lines of normalization stats
assert f.readline().find("01STAT:MAPPED") == 0
assert f.readline().find("02STAT:NORM_FACTOR") == 0
row_cnt=0
for l in f:
lsplit = l.split('\t')
tss_interval = decode_genomic_location(lsplit[0])
new_record = pybedtools.featurefuncs.TSS(tss_interval, upstream=200, downstream=len(tss_interval))
for facet, samples in tsd.items():
# skip if there is no samples for term/facet
if len(samples) == 0: continue
s_ids = [s[len('FF:'):] for s in samples]
# remove ontology sample_ids missing from expression matrix (isolated cases)
missing = [s for s in s_ids if s not in s_col]
if len(missing)>0:
#print("Following sample IDs for facet [%s] (%s) are missing from expression matrix: %s" % (f5o.id2name(facet), facet, str(["FF:"+s for s in missing])))
s_ids = [s for s in s_ids if s not in missing]
# extract promoter expression values for samples in this facet
# values for technical replicates (list of columns with the same extract ID) are aggregated separately, to not bias the facet score
expr_list = []
for s in s_ids:
cols = s_col[s]
if len(cols)>1: # technical replicates
exprs = [float(lsplit[c]) for c in cols]
expr_list.append(EXPRESSION_AGGREGATE_FUNCTION(exprs))
else:
expr_list.append(float(lsplit[cols[0]]))
# calculate aggregated expression/activity of the promoter in the facet
# provided that at least one of samples meets the cutoff criteria
new_record.score = '%.2f' % EXPRESSION_AGGREGATE_FUNCTION(expr_list)
if max(expr_list) >= EXPRESSION_CUTOFF:
bed_files[facet].write(str(new_record))
if row_cnt % 100 == 0:
print(str(new_record).strip())
row_cnt += 1
for _,f in bed_files.items():
f.close()
print("Done.")
|
<filename>test/functional/bsv-zmq-txremovedfrommempool.py
#!/usr/bin/env python3
# Copyright (c) 2019-2020 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
"""
Test some ZMQ notifications/messages when transaction get removed from mempool.
Body of ZMQ message is in json format: {txid: hexstring, reason: string,
collidedWith: {txid: hexstring, size: integer, hex: hexstring},
blockhash: hexstring}
The fields collidedWith and blockhash are only present when reason for removal is collision-in-block-tx
To see if zmq notifiers works, we only check for particular
reasons when transactions gets removed from mempool.
Some reasons we can get from -zmqpubremovedfrommempoolblock notifier are:
- included-in-block (when tx gets included in block)
- reorg (when reorg is happening and tx tries to spend immature coin)
Some reason we can get from -zmqpubdiscardedfrommempool notifier is:
- collision-in-block-tx (when we have two tx1, tx2 using same input on two different nodes,
and then tx2 gets mined in block, when the block is propagated
the other tx1 will be removed from mempool.)
Test case 1:
- send two transactions tx1, tx2 to node0 and mine a block
- receive included in block notification
Test case 2:
- invalidate blocks inputs that tx1, tx2 use become immature
- receive removed for reorg notification
Test case 3:
- disconnect nodes n0, n1
- create tx1 on n0, tx2 on n1 that uses same input.
- mine a block on n1
- connect nodes n0, n1
- receive conflict in block tx notification
Test case 4:
- disconnect nodes n0, n1
- mine few blocks on n1
- create tx2 on n1 and mine a block
- create tx1 on n0 that uses same input as tx2 and mine a block
- connect nodes n0, n1
- receive conflict in block tx notification
"""
import json
from test_framework.script import CTransaction, CScript, OP_TRUE, CTxOut
from test_framework.test_framework import BitcoinTestFramework, SkipTest, ToHex, FromHex
from test_framework.util import (assert_equal, check_zmq_test_requirements,
disconnect_nodes_bi, connect_nodes_bi, sync_blocks)
from test_framework.mininode import CTxIn, COutPoint
class ZMQRemovedFromMempool(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_nodes(self):
# Check that bitcoin has been built with ZMQ enabled and we have python zmq package installed.
check_zmq_test_requirements(self.options.configfile,
SkipTest("bitcoind has not been built with zmq enabled."))
# import zmq when we know we have the requirements for test with zmq.
import zmq
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.set(zmq.RCVTIMEO, 60000)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"discardedfrommempool")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"removedfrommempoolblock")
ip_address = "tcp://127.0.0.1:28332"
self.zmqSubSocket.connect(ip_address)
self.extra_args = [["-zmqpubdiscardedfrommempool=%s" % ip_address,
"-zmqpubremovedfrommempoolblock=%s" % ip_address], []]
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the zmq context
self.log.debug("Destroying zmq context")
self.zmqContext.destroy(linger=None)
def _zmq_test(self):
block_hashes = self.nodes[0].generate(101)
"""Test case 1"""
tx_hash1 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
tx_hash2 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
block_hash1 = self.nodes[0].generate(1)[0]
# sync blocks so we are synchronized later in test
sync_blocks(self.nodes)
# receive notifications for txs to be included in block
msg1 = self.zmqSubSocket.recv_multipart()
assert_equal(msg1[0], b"removedfrommempoolblock")
msg1_body = json.loads(msg1[1])
assert_equal(msg1_body["reason"], "included-in-block")
msg2 = self.zmqSubSocket.recv_multipart()
assert_equal(msg2[0], b"removedfrommempoolblock")
msg2_body = json.loads(msg2[1])
assert_equal(msg2_body["reason"], "included-in-block")
removed_tx = [msg1_body["txid"], msg2_body["txid"]]
assert_equal(tx_hash1 in removed_tx and tx_hash2 in removed_tx, True)
"""Test case 2"""
# bring txs back to mempool
self.nodes[0].invalidateblock(block_hash1)
# invalidate again so the coins that txs uses are immature
self.nodes[0].invalidateblock(block_hashes[len(block_hashes) - 2])
# receive notifications for txs about reorg mempool removal reason
msg1 = self.zmqSubSocket.recv_multipart()
assert_equal(msg1[0], b"removedfrommempoolblock")
msg1_body = json.loads(msg1[1])
assert_equal(msg1_body["reason"], "reorg")
msg2 = self.zmqSubSocket.recv_multipart()
assert_equal(msg2[0], b"removedfrommempoolblock")
msg2_body = json.loads(msg2[1])
assert_equal(msg2_body["reason"], "reorg")
removed_tx = [msg1_body["txid"], msg2_body["txid"]]
assert_equal(tx_hash1 in removed_tx and tx_hash2 in removed_tx, True)
"""Test case 3"""
# bring both nodes on same height
self.nodes[1].invalidateblock(block_hashes[len(block_hashes)-2])
self.nodes[0].generate(4)
sync_blocks(self.nodes)
unspent = self.nodes[0].listunspent()[0]
# create tx with spendable output for both nodes to use
tx_spendable_output = CTransaction()
tx_outs = [CTxOut(4500000000, CScript([OP_TRUE]))]
tx_spendable_output.vout = tx_outs
tx_spendable_output.vin = [CTxIn(COutPoint(int(unspent["txid"], 16), 0))]
tx_hex = self.nodes[0].signrawtransaction(ToHex(tx_spendable_output))['hex']
self.nodes[0].sendrawtransaction(tx_hex, True)
tx_spendable_output = FromHex(CTransaction(), tx_hex)
tx_spendable_output.rehash()
self.nodes[0].generate(1)
# ignore included in block message
_ = self.zmqSubSocket.recv_multipart()
sync_blocks(self.nodes)
# disconnect nodes and create transaction tx2 on node1 and mine a block
# then create tx1 on node0 that use same output as tx2.
disconnect_nodes_bi(self.nodes, 0, 1)
tx2 = CTransaction()
tx_outs = [CTxOut(4400000000, CScript([OP_TRUE]))]
tx2.vout = tx_outs
tx2.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]
tx_hex = self.nodes[1].signrawtransaction(ToHex(tx2))['hex']
tx2_size = len(tx_hex)/2
tx2 = FromHex(CTransaction(), tx_hex)
tx2.rehash()
self.nodes[1].sendrawtransaction(tx_hex, True)
blockhash = self.nodes[1].generate(1)[0]
tx1 = CTransaction()
tx_outs = [CTxOut(4300000000, CScript([OP_TRUE]))]
tx1.vout = tx_outs
tx1.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]
tx_hex = self.nodes[0].signrawtransaction(ToHex(tx1))['hex']
tx1 = FromHex(CTransaction(), tx_hex)
tx1.rehash()
self.nodes[0].sendrawtransaction(tx_hex, True)
# connect nodes again and sync blocks, we now expect to get conflict for tx1
# because tx2 that uses same output as tx1 is already in block.
connect_nodes_bi(self.nodes, 0, 1)
sync_blocks(self.nodes)
msg = self.zmqSubSocket.recv_multipart()
assert_equal(msg[0], b"discardedfrommempool")
body = json.loads(msg[1])
assert_equal(body["reason"], "collision-in-block-tx")
assert_equal(body["txid"], tx1.hash)
assert_equal(body["collidedWith"]["txid"], tx2.hash)
assert_equal(body["collidedWith"]["size"], tx2_size)
assert_equal(body["blockhash"], blockhash)
"""Test case 4"""
# create tx with spendable output for both nodes to use
unspent = self.nodes[0].listunspent()[0]
tx_spendable_output = CTransaction()
tx_outs = [CTxOut(4500000000, CScript([OP_TRUE]))]
tx_spendable_output.vout = tx_outs
tx_spendable_output.vin = [CTxIn(COutPoint(int(unspent["txid"], 16), 0))]
tx_hex = self.nodes[0].signrawtransaction(ToHex(tx_spendable_output))['hex']
self.nodes[0].sendrawtransaction(tx_hex, True)
tx_spendable_output = FromHex(CTransaction(), tx_hex)
tx_spendable_output.rehash()
self.nodes[0].generate(5)
# ignore included in block message
_ = self.zmqSubSocket.recv_multipart()
sync_blocks(self.nodes)
# disconnect nodes; mine few blocks on n1; create transaction tx2 on node1 and mine a block
# then create tx1 on node0 that use same output as tx2.
disconnect_nodes_bi(self.nodes, 0, 1)
self.nodes[1].generate(5)
tx2 = CTransaction()
tx_outs = [CTxOut(4400000000, CScript([OP_TRUE]))]
tx2.vout = tx_outs
tx2.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]
tx_hex = self.nodes[1].signrawtransaction(ToHex(tx2))['hex']
tx2_size = len(tx_hex)/2
tx2 = FromHex(CTransaction(), tx_hex)
tx2.rehash()
self.nodes[1].sendrawtransaction(tx_hex, True)
blockhash_tx2 = self.nodes[1].generate(1)[0]
tx1 = CTransaction()
tx_outs = [CTxOut(4300000000, CScript([OP_TRUE]))]
tx1.vout = tx_outs
tx1.vin = [CTxIn(COutPoint(int(tx_spendable_output.hash, 16), 0))]
tx_hex = self.nodes[0].signrawtransaction(ToHex(tx1))['hex']
tx1 = FromHex(CTransaction(), tx_hex)
tx1.rehash()
self.nodes[0].sendrawtransaction(tx_hex, True)
self.nodes[0].generate(1)
# ignore included in block message
_ = self.zmqSubSocket.recv_multipart()
# connect nodes again to cause reorg to n1 chain, we now expect to
# get conflict for tx1, because tx2 that uses same input as tx1 is already
# in block on longer chain.
connect_nodes_bi(self.nodes, 0, 1)
sync_blocks(self.nodes)
msg = self.zmqSubSocket.recv_multipart()
assert_equal(msg[0], b"discardedfrommempool")
body = json.loads(msg[1])
assert_equal(body["reason"], "collision-in-block-tx")
assert_equal(body["txid"], tx1.hash)
assert_equal(body["collidedWith"]["txid"], tx2.hash)
assert_equal(body["collidedWith"]["size"], tx2_size)
assert_equal(body["blockhash"], blockhash_tx2)
if __name__ == '__main__':
ZMQRemovedFromMempool().main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.