text stringlengths 957 885k |
|---|
"""Class to process full HydReSGeo dataset.
Note: If IRUtils.py is not available, you need to download it before the
installation of the package into the `hprocessing/` folder:
.. code:: bash
wget -P hprocessing/ https://raw.githubusercontent.com/felixriese/thermal
-image-processing/master/tiprocessing/IRUtils.py
"""
import configparser
import glob
import itertools
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from .ProcessEnviFile import (ProcessEnviFile, getEnviFile, getEnviHeader,
readEnviHeader)
from .IRUtils import getIRDataFromMultipleZones
class ProcessFullDataset():
"""
Class to process the full HydReSGeo dataset.
Parameters
----------
envi_hdr_filepath : str
Path to envi header file (low resolution)
meas_name : str
Name of measurement
positions_hyp : dict
Dictionary with information of the positions config file for the
hyperspectral camera
positions_lwir : dict
Dictionary with information of the positions config file for the
lwir camera
zone_list : list
List of measurement zones in the image. That does not include the
spectralon (white reference). If a zone needs to be ignored, it needs
to be removed from this list.
lwir_path : str
Path to long-wave infrared (LWIR) data
soilmoisture_filepath : str
Path to soil moisture data
masks : pd.DataFrame or None
Masks for hyperspectral images
soilmode : str
Mode of the soil measurements (e.g. KW33, Lysimeter)
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
time_window_width : int, optional (default=6)
Time window width to match the hyperspectral image to the soil moisture
data. The unit of the time window width is minutes.
hyp_stat_mode : str
Mode for calculating the "mean spectrum" of a hyperspectral image.
Possible values: median, mean, max, max10 (= maximum of the top 10
pixels), std.
hyp_spectralon_factor : float, optional (default=0.95)
Factor of how much solar radiation the spectralon reflects.
verbose : int, optional (default=0)
Controls the verbosity.
Todo
-----
- Add attributes to class docstring.
- Remove self.date and self.time, only use self.datetime. Remove all
unnecessary functions of self.date and self.time.
"""
def __init__(self,
hyp_hdr_path: str,
meas_name: str,
positions_hyp: dict,
positions_lwir: dict,
zone_list: list,
lwir_path: str,
soilmoisture_path: str,
masks: pd.DataFrame,
grid: tuple = (1, 1),
imageshape: tuple = (50, 50),
time_window_width: int = 6,
hyp_stat_mode: str = "median",
hyp_spectralon_factor: float = 0.95,
verbose=0):
"""Initialize ProcessDataset instance."""
self.hyp_hdr_path = hyp_hdr_path
self.meas_name = meas_name
self.positions_hyp = positions_hyp
self.positions_lwir = positions_lwir
self.zone_list = zone_list
self.lwir_path = lwir_path
self.soilmoisture_path = soilmoisture_path
self.masks = masks
self.grid = grid
self.imageshape = imageshape
self.time_window_width = time_window_width
self.hyp_stat_mode = hyp_stat_mode
self.hyp_spectralon_factor = hyp_spectralon_factor
self.verbose = verbose
# get Envi files
self.envi_hdr_highres_path = self.hyp_hdr_path[:-4] + "_highres.hdr"
self.hdr, self.envi_img = getEnviFile(self.hyp_hdr_path)
self.hdr_highres = getEnviHeader(self.envi_hdr_highres_path)
self.date, self.time = readEnviHeader(self.hdr_highres)
# set datetime TODO: remove hard-coded timezone
self.datetime = pd.to_datetime(self.date+" "+self.time+"+02:00",
utc=True)
# read out header file
self.wavelengths = self.hdr_highres["Wavelength"]
self.bbl = self.hdr_highres["bbl"]
# get measurement index
self.index_of_meas = int(np.argwhere(
positions_hyp["measurement"].values == meas_name))
self.mask = None
# improvised solution to translate between zone1-8 to A1-D2
self.zone_dict = {
"A1": "zone1", "A2": "zone2", "B1": "zone3", "B2": "zone4",
"C1": "zone5", "C2": "zone6", "D1": "zone7", "D2": "zone8"}
def process(self) -> pd.DataFrame:
"""
Process a full dataset.
Returns
-------
pd.DataFrame
Dataframe with hyperspectral, LWIR, and soil moisture data for
one image.
"""
# set mask
if self.masks is not None:
mask_index = self.masks.index[
self.masks["measurement"] == self.meas_name].tolist()[0]
if self.index_of_meas != mask_index:
raise IOError(("positions.csv and mask.csv don't have the"
"same sequence of dates."))
self.mask = getMask(
masks=self.masks,
index_of_meas=self.index_of_meas,
imageshape=self.imageshape)
# random check if hyperspectral image is empty
if np.sum(self.envi_img[:, :, 5]) == 0:
if self.verbose:
print("Error: The hyperspectral image is empty.")
return None
# process
envi_processor = ProcessEnviFile(
image=self.envi_img,
wavelengths=self.wavelengths,
bbl=self.bbl,
zone_list=self.zone_list,
positions=self.positions_hyp,
index_of_meas=self.index_of_meas,
mask=self.mask,
grid=self.grid,
stat_mode=self.hyp_stat_mode,
spectralon_factor=self.hyp_spectralon_factor)
df_hyp = envi_processor.getMultipleSpectra()
# add datetime as column
df_hyp["datetime"] = self.datetime
# add soil moisture data
df_hyd = self.getSoilMoistureData()
df_hyd = df_hyd.drop(labels=["zone"], axis=1)
# add IR data
df_lwir = self.getLwirData()
df_lwir = df_lwir.drop(labels=["zone"], axis=1)
return pd.concat([df_hyp, df_hyd, df_lwir], axis=1)
def getSoilMoistureData(self):
"""
Get soil moisture data.
To match the dates of the soil moisture measurements and the
hyperspectral image, the timezones are converted to UTC.
Returns
-------
pd.Dataframe
Dataframe of soil moisture measurements which correspond to the
hyperspectral image of this instance.
Todo
----
- Move the CSV file read out into process-function outside this file
- Add an optional time shift correction between soil moisture data and
the hyperspectral data.
"""
soilmoisture_sensors = getUppermostSoilMoistureSensors()
# read out soil moisture data
df_sm = pd.read_csv(self.soilmoisture_path)
df_sm["timestamp"] = pd.to_datetime(df_sm["timestamp"], utc=True)
sm_dict = {"zone": [], "volSM_vol%": [], "T_C": []}
for i, sensor in enumerate(soilmoisture_sensors["number"]):
# only consider sensors in zone_list
zone = soilmoisture_sensors["zone"].iloc[i]
if self.zone_dict[zone] not in self.zone_list:
continue
# find nearest date
nearest_date, time_delta = findNearestDate(
df_sm[df_sm["sensorID"] == "T"+str(sensor)].timestamp,
self.datetime)
if time_delta > self.time_window_width / 2:
if self.verbose:
print("Warning: Could not find a soil moisture measurement"
"for sensor {0}".format(sensor))
continue
nearest_row = df_sm[(df_sm["sensorID"] == "T"+str(sensor)) &
(df_sm["timestamp"] == nearest_date)]
sm_dict["zone"].append(self.zone_dict[zone])
sm_dict["volSM_vol%"].append(nearest_row["volSM_vol%"].values[0])
sm_dict["T_C"].append(nearest_row["T_C"].values[0])
return pd.DataFrame(sm_dict)
def getLwirData(self):
"""
Get LWIR data from one of the CSV export files.
This function is based on code from another repository by the authors:
https://github.com/felixriese/thermal-image-processing
Parameters
----------
date : str
Date formatted as yyyymmdd, e.g. 20170816
time : str
Time formatted as hh-mm-ss, e.g. 13-31-40.
Returns
-------
pd.DataFrame
IR data of the current datapoint (matched to date and time)
Todo
-----
- Implement grid-wise LWIR data extraction. (For now, only zone-wise
data extraction is implemented.)
"""
# find LWIR file within the correct time window
lwir_datetime_list = []
for csvfile in glob.glob(self.lwir_path+"/ir_export_*.csv"):
csvfile_list = csvfile.split("/")[-1].split("_")
lwir_datetime = pd.to_datetime(
csvfile_list[2]+" "+csvfile_list[5][:-4].replace("-", ":") +
"+02:00", utc=True)
lwir_datetime_list.append(lwir_datetime)
nearest_date, time_delta = findNearestDate(
lwir_datetime_list, self.datetime)
# check if the nearest datetime is close enough
if time_delta > self.time_window_width / 2:
if self.verbose:
print("Warning: Did not find LWIR data.")
return pd.DataFrame({"zone": [np.nan], "mean": [np.nan],
"med": [np.nan], "std": [np.nan]})
# load LWIR CSV file
csvfile = glob.glob(self.lwir_path+"ir_export_" +
nearest_date.strftime("%Y%m%d")+"_*" +
nearest_date.tz_convert("Europe/Berlin").strftime(
"%H-%M-%S")+".csv")[0]
# get data from different zones
df_lwir_original = getIRDataFromMultipleZones(
csvpath=csvfile,
positions=self.positions_lwir.to_dict('list'),
zone_list=self.zone_list)
# The `df_lwir_original` results in one row and column names such as
# "ir_zone1_med". In the next step, one row per zone needs to be
# generated.
lwir_dict = {"zone": [], "mean": [], "med": [], "std": []}
for zone in self.zone_list:
lwir_dict["zone"].append(zone)
lwir_dict["mean"].append(
df_lwir_original["ir_"+str(zone)+"_mean"].values[0])
lwir_dict["med"].append(
df_lwir_original["ir_"+str(zone)+"_med"].values[0])
lwir_dict["std"].append(
df_lwir_original["ir_"+str(zone)+"_std"].values[0])
return pd.DataFrame(lwir_dict)
def getMask(masks, index_of_meas, imageshape=(50, 50)):
"""
Mask image with masks from mask.csv file.
Parameters
----------
masks : pd.DataFrame or None
Masks for hyperspectral images
index_of_meas : int
Index of the measurement in the file
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
Returns
-------
mask : 2D numpy array
Mask in imageshape with 1 (= true value) and 0 (= mask)
"""
mask = np.ones(imageshape, dtype=int)
# define borders
start_row = int(masks["start_row"][index_of_meas])
end_row = int(masks["end_row"][index_of_meas])
start_col = int(masks["start_col"][index_of_meas])
end_col = int(masks["end_col"][index_of_meas])
# mask around borders
mask[:start_row] = 0
mask[end_row:] = 0
mask[:, :start_col] = 0
mask[:, end_col:] = 0
# bar masks
for i in range(1, 5):
wooden_bar = getWoodenBarMask(
[masks["bar"+str(i)+"_p1_x"][index_of_meas],
masks["bar"+str(i)+"_p1_y"][index_of_meas]],
[masks["bar"+str(i)+"_p2_x"][index_of_meas],
masks["bar"+str(i)+"_p2_y"][index_of_meas]],
height=masks["bar"+str(i)+"_height"][index_of_meas],
imageshape=imageshape)
mask[[x[0] for x in wooden_bar], [x[1] for x in wooden_bar]] = 0
return mask
def getWoodenBarMask(point1, point2, height, imageshape=(50, 50)):
"""
Get mask for wooden bar.
Parameters
----------
point1, point2 : list of int
Coordinates of the two points
height : int
Height/width of the bar in y (row) direction
imageshape : tuple, optional (default= (50, 50))
Height and width of the image
Returns
-------
wooden_bar : list of tuple (int, int)
List of pixels to be masked
"""
m1, c1 = getLineFromPoints(point1, point2)
m2, c2 = getLineFromPoints((point1[0] + height, point1[1]),
(point2[0] + height, point2[1]))
def woodenBarUpper(x):
return m1*x + c1
def woodenBarLower(x):
return m2*x + c2
wooden_bar = [(x, y) for (x, y) in itertools.product(
range(imageshape[0]), range(imageshape[1]))
if woodenBarLower(x) < y < woodenBarUpper(x)]
return wooden_bar
def getAllSoilMoistureSensors():
"""
Get information about the soil moisture sensors.
The sensor data is taken from the HydReSGeo dataset. For other datasets,
the dictionary `sensors` has to be modified.
Returns
-------
sensors : dict
Sensor information consisting of number, field, depth, and name.
"""
sensors = {
"number": [36554, 36555, 36556, 36547, 36557, 36558,
36559, 36553, 36549, 36550, 36551, 36552,
36560, 36562, 36563, 36564, 36565, 36561],
"zone": ["A1", "A1", "A1", "A2", "B1", "B1", "B1", "B2", "C1",
"C1", "C1", "C1", "C2", "D1", "D1", "D1", "D1", "D2"],
"depth": [2.5, 5.0, 10.0, 5.0, 2.5, 5.0, 10.0, 5.0, 2.5,
5.0, 10.0, 20.0, 5.0, 2.5, 5.0, 10.0, 20.0, 5.0]}
sensors["name"] = ["SM_" + str(sensors["number"][i]) + "_" +
str(sensors["zone"][i]) + "_" + str(sensors["depth"][i])
for i in range(len(sensors["number"]))]
return sensors
def getUppermostSoilMoistureSensors():
"""
Get information about the soil moisture sensors.
Returns
-------
sensors : dict
Sensor information consisting of number, field, depth, and name.
"""
sensors = pd.DataFrame(getAllSoilMoistureSensors())
df_temp_list = []
for zone in np.unique(sensors["zone"].values):
min_index = sensors[sensors["zone"] == zone]["depth"].values.argmin()
df_temp_list.append(sensors[sensors["zone"] == zone].iloc[min_index])
return pd.concat(df_temp_list, axis=1).T
def findNearestDate(date_list, date):
"""
Find closest datapoint of each uppermost sensor in time window.
Adapted from https://stackoverflow.com/a/32237949/3816498 .
Parameters
----------
date_list : array-like
List of dates
date : datetime
The date, to which the nearest date in `items` should be found.
Returns
-------
nearest_date : datetime
Nearest date to `date` in `date_list`
time_delta : int
Time difference in minutes
"""
nearest_date = min(date_list, key=lambda x: abs(x - date))
time_delta = (nearest_date - date).total_seconds() / 60.
return nearest_date, time_delta
def readConfig(config_path: str,
data_directory: str,
verbose=0) -> dict:
"""
Read config file to process a dataset.
Parameters
----------
config_path : str
Path to config file
data_directory : str
Directory of the dataset folder.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
config_dict : dict
Configuration of the processing
"""
# open config file
config = configparser.ConfigParser(allow_no_value=True)
config.read(config_path)
if verbose:
print("Config file is valid.")
if verbose > 1:
print("Config file sections: {0}.".format(config.sections()))
config_dict = {}
# read out data paths
for var in ["data_hyp", "data_lwir", "data_sm"]:
config_dict[var] = (data_directory + config["Paths"][var])
config_dict["data_output"] = config["Paths"]["data_output"]
# read out positions, ignore-csv-files, and masks
for var in ["positions_hyp", "positions_lwir",
"ignore_hyp_measurements", "ignore_hyp_fields",
"ignore_hyp_datapoints", "masks_hyp"]:
config_dict[var] = pd.read_csv(data_directory + config["Paths"][var],
sep="\s+")
if "measurement" in config_dict[var].columns:
config_dict[var]["measurement"] = config_dict[var][
"measurement"].astype("str")
# read out grid size
config_dict["grid"] = (1, 1)
if (config["Process"]["grid_rows"].isdigit() and
config["Process"]["grid_columns"].isdigit()):
config_dict["grid"] = (int(config["Process"]["grid_rows"]),
int(config["Process"]["grid_columns"]))
# read out image shape
config_dict["imageshape"] = (50, 50)
if (config["Process"]["hyp_image_rows"].isdigit() and
config["Process"]["hyp_image_columns"].isdigit()):
config_dict["imageshape"] = (
int(config["Process"]["hyp_image_rows"]),
int(config["Process"]["hyp_image_columns"]))
# read out booleans
config_dict["overwrite_csv_file"] = config["Process"].getboolean(
"overwrite_csv_file")
# read out time window width
config_dict["time_window_width"] = int(
config["Process"]["time_window_width"])
# read out hyperspectral spectralon factor
config_dict["hyp_spectralon_factor"] = float(
config["Process"]["hyp_spectralon_factor"])
# read out hyperspectral spectralon factor
config_dict["hyp_stat_mode"] = str(
config["Process"]["hyp_stat_mode"])
return config_dict
def getLineFromPoints(point1, point2):
"""
Get line parameter (y = mx +c) from two points.
Parameters
----------
point1, point2 : list of int
Coordinates of the two points
Returns
-------
m, c : float
Line parameters for y = mx +c
"""
# m = (y2 - y1)/(x1 - x2)
m = (point2[1] - point1[1]) / (point2[0] - point1[0])
# c = y2 - m*x2
c = point2[1] - m * point2[0]
return m, c
def processHydReSGeoDataset(config_path: str,
data_directory: str,
verbose=0) -> pd.DataFrame:
"""
Process the full HydReSGeo dataset.
Parameters
----------
config_path : str
Path to config file
data_directory : str
Directory of the dataset folder.
verbose : int, optional (default=0)
Controls the verbosity.
Returns
-------
pd.DataFrame
Output data of the processing
"""
# path to the output folder
config = readConfig(config_path=config_path, data_directory=data_directory)
params = {
"positions_hyp": config["positions_hyp"],
"positions_lwir": config["positions_lwir"],
"lwir_path": config["data_lwir"],
"soilmoisture_path": config["data_sm"],
"masks": config["masks_hyp"],
"grid": config["grid"],
"imageshape": config["imageshape"],
"time_window_width": config["time_window_width"],
"verbose": verbose
}
output_list = []
if (not config["overwrite_csv_file"] and
os.path.isfile(config["data_output"])):
print("Processing not executed, file already exists.")
print("To overwrite the existing file, change the config.")
# loop through hyperspectral images
for _, hyp_header in enumerate(
tqdm(glob.glob(config["data_hyp"]+"*/*[0-9].hdr"))):
meas_name = hyp_header.split("/")[-2].replace("_hyp", "")
file_number = int(hyp_header.split("/")[-1][4:7])
zone_list = ["zone"+str(i) for i in range(1, 9)]
# ignore measurements
if verbose:
print("-"*50)
print("Processing {0} - file {1}...".format(
meas_name, file_number))
if meas_name in config["ignore_hyp_measurements"].values:
if verbose:
print("Ignoring measurement.")
continue
# ignore datapoint
if meas_name in config["ignore_hyp_datapoints"]["measurement"].values:
if file_number in config["ignore_hyp_datapoints"][
config["ignore_hyp_datapoints"]["measurement"] ==
meas_name]["filenumber"].values:
if verbose:
print("Ignoring file.")
continue
# ignore field
if meas_name in config["ignore_hyp_fields"]["measurement"].values:
if file_number in config["ignore_hyp_fields"][
config["ignore_hyp_fields"]["measurement"] ==
meas_name]["filenumber"].values:
zones_to_drop = config["ignore_hyp_fields"][
(config["ignore_hyp_fields"]["measurement"] == meas_name) &
(config["ignore_hyp_fields"]["filenumber"] == file_number)
]["zone"].values
for zone_to_drop in zones_to_drop:
zone_list.remove("zone"+str(zone_to_drop))
if verbose:
print("Removed {0} zone(s).".format(len(zones_to_drop)))
proc = ProcessFullDataset(
hyp_hdr_path=hyp_header,
meas_name=meas_name,
zone_list=zone_list,
**params)
datapoint = proc.process()
if datapoint is not None:
output_list.append(datapoint)
output_df = pd.concat(output_list, axis=0, ignore_index=True)
output_df.to_csv(config["data_output"])
if verbose:
print("Successfully executed!")
return output_df
|
<filename>Pinject.py
import socket
import struct
import sys
from optparse import OptionParser
def checksum(data):
s = 0
n = len(data) % 2
for i in range(0, len(data)-n, 2):
s+= ord(data[i]) + (ord(data[i+1]) << 8)
if n:
s+= ord(data[i+1])
while (s >> 16):
s = (s & 0xFFFF) + (s >> 16)
s = ~s & 0xffff
return s
class ip(object):
def __init__(self, source, destination):
self.version = 4
self.ihl = 5 # Internet Header Length
self.tos = 0 # Type of Service
self.tl = 0 # total length will be filled by kernel
self.id = 54321
self.flags = 0
self.offset = 0
self.ttl = 255
self.protocol = socket.IPPROTO_TCP
self.checksum = 0 # will be filled by kernel
self.source = socket.inet_aton(source)
self.destination = socket.inet_aton(destination)
def pack(self):
ver_ihl = (self.version << 4) + self.ihl
flags_offset = (self.flags << 13) + self.offset
ip_header = struct.pack("!BBHHHBBH4s4s",
ver_ihl,
self.tos,
self.tl,
self.id,
flags_offset,
self.ttl,
self.protocol,
self.checksum,
self.source,
self.destination)
return ip_header
class tcp(object):
def __init__(self, srcp, dstp):
self.srcp = srcp
self.dstp = dstp
self.seqn = 0
self.ackn = 0
self.offset = 5 # Data offset: 5x4 = 20 bytes
self.reserved = 0
self.urg = 0
self.ack = 0
self.psh = 0
self.rst = 0
self.syn = 1
self.fin = 0
self.window = socket.htons(5840)
self.checksum = 0
self.urgp = 0
self.payload = ""
def pack(self, source, destination):
data_offset = (self.offset << 4) + 0
flags = self.fin + (self.syn << 1) + (self.rst << 2) + (self.psh << 3) + (self.ack << 4) + (self.urg << 5)
tcp_header = struct.pack('!HHLLBBHHH',
self.srcp,
self.dstp,
self.seqn,
self.ackn,
data_offset,
flags,
self.window,
self.checksum,
self.urgp)
#pseudo header fields
source_ip = source
destination_ip = destination
reserved = 0
protocol = socket.IPPROTO_TCP
total_length = len(tcp_header) + len(self.payload)
# Pseudo header
psh = struct.pack("!4s4sBBH",
source_ip,
destination_ip,
reserved,
protocol,
total_length)
psh = psh + tcp_header + self.payload
tcp_checksum = checksum(psh)
tcp_header = struct.pack("!HHLLBBH",
self.srcp,
self.dstp,
self.seqn,
self.ackn,
data_offset,
flags,
self.window)
tcp_header+= struct.pack('H', tcp_checksum) + struct.pack('!H', self.urgp)
return tcp_header
def main():
parser = OptionParser()
parser.add_option("-s", "--src", dest="src", type="string",
help="Source IP address", metavar="IP")
parser.add_option("-d", "--dst", dest="dst", type="string",
help="Destination IP address", metavar="IP")
options, args = parser.parse_args()
if options.dst == None:
parser.print_help()
sys.exit()
else:
dst_host = socket.gethostbyname(options.dst)
if options.src == None:
# get the current Network Interface
src_host = socket.gethostbyname(socket.gethostname())
else:
src_host = options.src
print("[+] Local Machine: %s"%src_host)
print("[+] Remote Machine: %s"%dst_host)
s = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
socket.IPPROTO_RAW)
print("[+] Raw scoket created")
data = "TEST!!"
print("[+] Data to inject: %s"%data)
# IP Header
print("[+] Constructing IP Header")
ipobj = ip(src_host, dst_host)
iph = ipobj.pack()
# TCP Header
print("[+] Constructing TCP Header")
tcpobj = tcp(1234, 80)
tcpobj.payload = data
tcph = tcpobj.pack(ipobj.source,
ipobj.destination) # tcp header
# Packet Injection
packet = iph + tcph + data
s.sendto(packet, (dst_host, 0))
print("[+] Packet Injected!")
if __name__=="__main__":
main()
|
<gh_stars>1-10
# %%
from tqdm import trange
# %matplotlib widget
import matplotlib.pyplot as plt
from smpr3d.core import Sparse4DData, Metadata4D
from pathlib import Path
import numpy as np
# from ipywidgets import AppLayout, FloatSlider, GridspecLayout, VBox, Tab, Box, HBox, IntSlider
# plt.ioff()
import torch as th
from smpr3d.util import *
from smpr3d.functional import SMatrixSubpix, SparseAmplitudeLoss, SparseSmoothTruncatedAmplitudeLoss, SparseSmoothTruncatedAmplitudeProx
from numpy.fft import fftshift
import torch.optim as optim
from skimage import data
from scipy.ndimage import zoom
from skimage.color import rgb2gray
from skimage.filters import gaussian
A = SMatrixSubpix.apply
smooth_amplitude_loss = SparseSmoothTruncatedAmplitudeLoss.apply
from smatrix.util import *
from scipy.ndimage.interpolation import rotate
# %%
M1 = 60
M = np.array([M1, M1])
s = 256
zoomf = 0.35
amp_range = 0.1
amp_min = 0.9
margins = 0
oa = rgb2gray(data.astronaut())[0:0 + s, 150:150 + s]
oa1 = oa / oa.max()
oa1 = gaussian(oa1, 1 * 1/zoomf)
oa2 = oa1 * amp_range
oa2 += amp_min
oph = rotate(oa1, 0)
ob = zoom(oa2, zoomf) * np.eth(1j * 0.2 * np.pi * zoom(oph, zoomf))
obr = np.zeros((int(ob.shape[0] + 2 * margins), int(ob.shape[1] + 2 * margins))).astype(np.complex64)
obr[margins:margins + ob.shape[0], margins:margins + ob.shape[1]] = ob
obr1 = np.ones((obr.shape[0], obr.shape[1], 2)).astype(np.float32)
obr1[..., 0] = obr.real
obr1[..., 1] = obr.imag
T1 = th.view_as_complex(th.from_numpy(obr1))
N = th.tensor([obr.shape[0], obr.shape[1], 2]).int()
t = T1.numpy()
zplot([t.real, t.imag], cmap=['inferno', 'inferno'], figsize=(9, 5))
r1 = advanced_raster_scan(7, 7, fast_axis=1, mirror=[1, 1],
theta=0, dy=5, dx=5)
#%%
E = 300e3
lam = wavelength(E)
defocus_nm = 200
det_pix = 14 * 5e-6
alpha_rad = 3e-3
dx_angstrom = 1.32
q = get_qx_qy_2D_th([M1, M1], [dx_angstrom, dx_angstrom], np.float32, fft_shifted=False).cuda()
from skimage.transform import rescale, downscale_local_mean
fac = 8
ap = fftshift(gaussian(downscale_local_mean(sector_mask(fac*M,fac//2*M,fac//4*M[0]),(fac,fac)),1))
plot(ap)
Ap = th.as_tensor(ap).cuda() * 1e2
Psi_gen = ZernikeProbeSingle(q, lam, fft_shifted=True)
C_target = th.zeros((12)).cuda()
C_target[0] = 5500
C_model = th.zeros((12)).cuda()
C_model[0] = 850
Psi_target = Psi_gen(C_target, Ap)
Psi_model = Psi_gen(C_target, Ap)
psi_model = th.fft.ifft2(th.view_as_complex(Psi_target))
print(psi_model.dtype)
# plotcx(psi_model.cpu().numpy())
plotAbsAngle(psi_model.cpu().numpy())
# %%
qnp = fourier_coordinates_2D([M1,M1], [dx_angstrom,dx_angstrom], centered=False)
q = th.as_tensor(qnp).float().cuda()
Psi_gen = ZernikeProbeSingle(q, lam, fft_shifted=True)
Ap0 = Ap
C1 = C_target
# Psi_model = Psi_gen(C1, Ap0).detach()
# psi_model = th.fft.ifft2(Psi_model, norm='ortho')
#
# Psi_model = Psi_model.unsqueeze(0)
# psi_model = psi_model.unsqueeze(0).cuda()
# psi_model.requires_grad_(False)
# fig, ax = plt.subplots(1, 2, figsize=(10, 5))
# ax[0].imshow(np.angle(psi_model[0].cpu()))
# ax[1].imshow(np.abs(psi_model[0].cpu()))
# plt.show()
# %%
Ap = Ap0.cuda()
q = q.cuda()
# %%
T = T1.unsqueeze(0).cuda()
r = th.from_numpy(r1).cuda()
r[1:] += th.randn_like(r[1:]) * 3
r[r<0] = 0
r[r>30] = 30
psi_model = psi_model.unsqueeze(0)
a_target = A(T, psi_model, r)
I_target = a_target**2
#%%
print(f'psi_model norm: {th.norm(psi_model)**2}')
print(f'I_target norm: {th.sum(I_target[0])}')
#%%
f, ax = plt.subplots()
ax.imshow(a_target[2].cpu())
plt.show()
#%%
plotmosaic(fftshift(a_target.cpu().numpy(),(1,2)),cmap='viridis')
#%%
d2 = Sparse4DData.from_dense(I_target.reshape((7,7,60,60)).cpu().numpy(),make_float=True)
# %%
margin = 0
M = th.tensor(M).int().cuda()
MY = MX = M1
N = th.tensor(th.ceil(r.max(axis=0).values).int()) + M + margin
K = r.shape[0]
print('N:', N)
print('M:', M)
print('K:', K)
S_model = th.ones((1,) + tuple(N), requires_grad=True, device=th.device('cuda:0'), dtype=th.complex64)
pos = th.as_tensor(r + margin / 2, device=S_model.device)
indices_target = th.as_tensor(d2.indices, device=S_model.device)
counts_target = th.as_tensor(d2.counts, device=S_model.device)
ish = indices_target.shape
indices_target = indices_target.view((K, ish[-1]))
counts_target = th.sqrt(counts_target.view((K, ish[-1])).type(th.float32))
# optimizer = optim.Adam([T_model, C_model, pos], lr=1e-2)
lr = 10e-3
optimizer = optim.SGD([{'params': [S_model], 'lr': lr},
{'params': [psi_model], 'lr': lr}], lr=lr, momentum=0.5)
# optimizer = optim.SGD([{'params': [S_model], 'lr': lr}], lr=lr)
# %%
eps2 = 1e-3
i = 0
it = 1
it = 150
beta = 0.5
probe_start = 10
S_model.requires_grad = True
psi_model.requires_grad = False
pos.requires_grad = False
n_batches = K // 2
divpoints = array_split_divpoints_ntotal(K, n_batches)
z = a * th.eth(1j * th.angle(Psi_model))
z_tmp = th.zeros_like(z)
Lambda = th.zeros_like(z)
AtA = th.zeros(S_model.shape, dtype=th.float32)
u = S_model
u_tmp = th.zeros_like(u)
psi = psi_model.copy()
psi_tmp = th.zeros_like(psi)
psi_denom = th.zeros(psi.shape, dtype=th.float32)
psi_bc = th.broadcast_to(psi[None, ...], (K, MY, MX))
print(f"psi_bc norm: {th.norm(psi_bc)**2}")
AtA = Qoverlap_real(r, th.abs(psi_bc) ** 2, AtA)
plot(AtA.get(), 'AtA ptycho')
plotAbsAngle(z[0].get(), 'z[0] ptycho')
# init
u = At(z, psi, r, u)
plotAbsAngle(u[MY // 2:-MY // 2, MX // 2:-MX // 2].get(), 'At(z, psi, r, u) ptycho')
u /= AtA
plotAbsAngle(u[MY // 2:-MY // 2, MX // 2:-MX // 2].get(), 'At(z, psi, r, u)/AtA ptycho')
def sgn(x):
"""Signum (sign) function"""
return x / th.abs(x)
#%%
i = 0
for i in trange(100):
z_hat = th.fft.ifft2(z + Lambda / beta, norm='ortho')
# small_intensities =
# z_tmp[:] = 0
psi_denom = th.sum(th.abs(Qsplit(r, u, z_tmp)) ** 2, 0)
psi = th.sum(th.conj(Qsplit(r, u, z_tmp)) * z_hat, 0) / psi_denom
# plotcx(psi, 'psi')
# plotAbsAngle(psi.get(), 'psi ptycho')
AtA[:] = 0
psi_bc = th.broadcast_to(psi[None, ...], (k, MY, MX))
AtA = Qoverlap_real(r, th.abs(psi_bc) ** 2, AtA)
h2 = th.max(AtA)
M2 = (h2 <= eps2) * eps2 + (h2 > eps2) * h2 * r22
# u_tmp[:] = 0
u = (AtF(z_hat, psi, r, u_tmp) + M2 * u) / (AtA + M2)
z_hat[:] = 0
z_hat = A(u, psi, r, z_hat)
err = float(th.norm(th.abs(z_hat) - a) / th.sum(a))
print(f"{i} error: {err:3.3g}")
z_hat -= Lambda / beta
z_hat_flag = th.abs(z_hat) != 0
z = (a + beta * th.abs(z_hat)) / (1 + beta) * (sgn(z_hat) * z_hat_flag) + (1 - z_hat_flag) * z
z_hat += Lambda / beta
Lambda += beta * (z - z_hat)
plotcx(u.get(), f"{i}")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 14 19:45:03 2020
@author: Uwe
"""
from bs4 import BeautifulSoup
import requests, time, getpass
class Crawler():
def __init__(self, url=None, max_price=None, sleep_time = 40, alert_by_mail=False):
"""Constructor"""
self.PREFIX_ = "https://www.heise.de/preisvergleich/"
if max_price is None:
max_price = int(input("Maximum price (as integer):"))
if max_price <= 0:
raise ValueError(f"Maximum price value ({max_price}) is set to zero or lower.")
self.MAX_PRICE_ = max_price
if url is None:
url = input("URL to crawl:")
if not url.startswith(self.PREFIX_):
raise ValueError(f"Given URL ({url}) is not a URL from Heise Preisvergleich.")
self.URL_ = url
if sleep_time <= 0:
raise ValueError("Sleep time between requests is set to zero or lower.")
self.SLEEP_TIME_ = sleep_time
self.ALERT_MAIL_ = alert_by_mail
if self.ALERT_MAIL_:
# only implemented for gmail
self.USERNAME_ = input("Gmail Username:")
self.PASSWORD_ = getpass.getpass("Password:")
pass
self.FOUND_MATCH_ = False
def crawl(self):
"""Crawl given URL once and look whether an offer lower or equal to MIN_PRICE_ is found
If such an offer is found, send email notification."""
while not self.FOUND_MATCH_:
# get page data
session = requests.session()
response = session.get(self.URL_)
session.close()
# check for server errors
response.raise_for_status()
#.gh_price
soup = BeautifulSoup(response.text, "lxml")
itemPriceClasses = soup.find_all("span", {"class": "gh_price"})
itemPrices = [float(item.find_all("span", "notrans")[0]
.text[2:]
.replace(",", "."))
for item in itemPriceClasses]
try:
min(itemPrices)
except:
time.sleep(self.SLEEP_TIME_)
continue
if min(itemPrices) < self.MAX_PRICE_:
# match found -> alert with link
self.FOUND_MATCH_ = True
indexes = [idx for idx, price in enumerate(itemPrices) if price < self.MAX_PRICE_]
prices = [round(itemPrices[i], 2) for i in indexes]
itemClasses = soup.find_all('a', class_='productlist__link')
# get names
itemNames = [item.find_all("span", "notrans")[0]
.text[1:-1]
for item in itemClasses]
# get links as well
itemLinks = [self.PREFIX_ + item["href"] for item in itemClasses]
# filter for matches
itemNames = [itemNames[idx] for idx in indexes]
itemLinks = [itemLinks[idx] for idx in indexes]
print(f"Found {len(indexes)} matches with prices lower than {self.MAX_PRICE_}:\n")
for idx, (price, name, link) in enumerate(zip(prices, itemNames, itemLinks)):
print(f"\t{idx+1}:\t{name} with a price of {price}.")
print(f"\t\t{link}\n")
if self.ALERT_MAIL_:
import smtplib, email
try:
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
msg = email.message.EmailMessage()
msg["Subject"] = "Heise Preisvergleich Match"
msg["From"] = self.USERNAME_
msg["To"] = self.USERNAME_
body = f"Found {len(indexes)} matches with prices lower than {self.MAX_PRICE_}€:\n"
for price, name, link in zip(prices, itemNames, itemLinks):
body = body + f"\t{name} with a price of {price}€.\n"
body = body + f"\t{link}\n\n"
msg.set_content(body)
server.login(self.USERNAME_, self.PASSWORD_)
server.send_message(msg)
server.quit()
except:
print("Failed to send mail.")
pass
else:
# wait 15 seconds until next request
time.sleep(self.SLEEP_TIME_)
|
<filename>branches/process-threads-test/source.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import sys
import re
import socket
from datetime import date
from pygtail import Pygtail
from databasemodel import DatabaseModel
class Source(threading.Thread):
def __init__(self, db_name=None, group=None, target=None, name=None,
args=(), source=None, verbose=None):
"""
Constructor de la clase Source.
"""
threading.Thread.__init__(self, group=group, target=target, name=name, verbose=verbose)
return
def run(self):
"""
Sobrecarga de metodo run de la clase Thread.
"""
pass
def join(self):
"""
Sobrecarga del método join de la clase Thread.
Al ser el que engloba los resultados del hilo, a través
de él devolvemos los valores que necesitamos procesar
en el controlador.
"""
pass
def process(self):
pass
def get_log_values(self):
pass
class Firewall(Source):
def __init__(self, db_name=None, group=None, target=None, name=None,
args=(), source=None, verbose=None):
Source.__init__(self, db_name=None, group=None, target=None, name=None,
args=(), source=None, verbose=None)
self.args = args
self._source_ = source
self.type_source = source['T']
self.model_source = source['M']
self.path_source = source['P']
self.db = db_name
self.result = []
def run(self):
"""
Sobrecarga de metodo run de la clase Thread.
"""
self.line = []
self.log_file = open(self.path_source, 'r')
#for self.line in Pygtail(self.path_source):
#sys.stdout.write(self.line)
for self.line in self.log_file:
if(self.line.__len__() > 1): # Si es menor o igual que 1 la linea del log está vacía
self.result.append(re.split("\W? ", self.line))
#print "en ejecución con parámetros %s y %s" % (self.args, self._source_)
return
def get_log_values(self, line):
self.insert_db = {} #Diccionario con los valores del log iptables
self.day_log = "" + str(date.today().year) + "/" + line[0] + "/" + line[1] + ""
self.insert_db["Timestamp"] = [self.day_log + " - " + line[2]]
self.insert_db["S_IP"] = [self.get_ip('SRC',line)]
self.insert_db["D_IP"] = [self.get_ip('DST',line)]
self.insert_db["S_PORT"] = [self.regexp('SPT',str(line))]
self.insert_db["D_PORT"] = [self.regexp('DPT',str(line))]
self.insert_db["Protocol"] = [self.regexp('PROTO',str(line))]
self.insert_db["S_MAC"] = [self.regexp('MAC',str(line))]
self.insert_db["D_MAC"] = [self.regexp('MAC',str(line))]
#Coger la key para el IP_ID de Sources de la base de datos
self.insert_db["Info_RAW"] = [re.sub('\[','',re.sub('\n',''," ".join(line)))]
#Introducir los datos en una fila de la tabla Process y pasar el id a dicha entrada
#self.insert_db["Info_Proc"] =
self.insert_db["TAG"] = [self.get_tag(line)]
return self.insert_db
def regexp(self, source, values):
return (((re.compile(source + '=\S+')).search(values)).group(0)).split(source + '=')[1].strip("',")
def get_tag(self, values):
self.string = " ".join(values)
return (re.compile('MSG=(.*) IN')).search(self.string).group(1)
def get_ip(self, source, values):
self.ip_result = (((re.compile(source + '=\S+')).search(values)).group(0)).split(source + '=')[1].strip("',")
self.rows = RowsDatabase(self._db_.num_columns_table('ips'))
self.hostname, self.aliaslist, self.ipaddrlist = socket.gethostbyaddr(self.ip_result)
self.rows.insert_value((self.ip_result, self.hostname, ))
#ME FALTA ESTA PARTE
self._db_.insert_row('ips',self.ip_result)
def process(self):
"""
Método que procesa los datos obtenidos y los
introduce en la base de datos correspondiente.
"""
self._db_ = DatabaseModel(self.db)
self.dictionary = {}
#Ahora toca introducir los campos extraidos de log para iptables
for self.i in range(self.items_list()):
self.dictionary = self.get_log_values(self.result[self.i])
print self.dictionary
self._db_.close_db()
def items_list(self):
"""
Método que nos devuelve el número de items que tiene
la lista, que no, el número de elementos de los que
se compone cada una de ellas."""
self.count = 0
# No se hace distinción si la lista contiene una linea
# vacía o no. Esa distinción se hará a la hora de extraer
# la información de cada item.
for self.aux in self.result:
self.count += 1
return self.count
def join(self):
"""
Sobrecarga del método join de la clase Thread.
Al ser el que engloba los resultados del hilo, a través
de él devolvemos los valores que necesitamos procesar
en el controlador.
"""
super(Source, self).join()
self.process()
return self.result
|
<gh_stars>0
from tkinter import *
janelaCliente = Tk()
janelaCliente.geometry("550x200")
janelaCliente.title("Clientes")
janelaCliente.configure(background='light blue')
barraMenu = Menu(janelaCliente)
janelaCliente.config(menu = barraMenu)
subMenu = Menu(barraMenu)
barraMenu.add_cascade(label="Arquivo",menu=subMenu)
subMenu.add_command(label="Novo")
subMenu.add_command(label="Abrir")
subMenu.add_command(label="Salvar")
subMenu2 = Menu(barraMenu)
barraMenu.add_cascade(label="Editar",menu=subMenu2)
subMenu2.add_command(label="Desfazer")
subMenu2.add_command(label="Refazer")
subMenu3 = Menu(barraMenu)
barraMenu.add_cascade(label="Consultar",menu=subMenu3)
subMenu3.add_command(label="Nome do Cliente")
subMenu3.add_command(label="Telefone")
subMenu3.add_command(label="Endereço")
subMenu3.add_command(label="Bairro")
subMenu3.add_command(label="CPF")
frameCampos = Frame(janelaCliente,background='light blue')
separator1 = Frame(janelaCliente,height=2, bd=1, relief=RAISED)
separator2 = Frame(janelaCliente,height=2, bd=1, relief=RAISED)
labelTitulo = Label(text = "CADASTRO DE CLIENTES",background='light blue')
labelCód = Label(frameCampos,text = "Código do Cliente:",background='light blue')
entryCód = Entry(frameCampos,width=5)
labelNome = Label(frameCampos,text = "Nome do Cliente:",background='light blue')
entryNome = Entry(frameCampos,width=38)
labelEndereço = Label(frameCampos,text = "Endereço:",background='light blue')
entryEndereço = Entry(frameCampos,width=30)
labelCidade = Label(frameCampos,text = "Cidade:",background='light blue')
entryCidade = Entry(frameCampos,width=22)
labelBairro = Label(frameCampos,text = "Bairro:",background='light blue')
entryBairro = Entry(frameCampos,width=15)
labelTelefone = Label(frameCampos,text = "Telefone:",background='light blue')
entryTelefone = Entry(frameCampos,width=15)
labelNum = Label(frameCampos,text = "Nº:",background='light blue')
entryNum = Entry(frameCampos,width=6)
labelCPF = Label(frameCampos,text = "CPF:",background='light blue')
entryCPF = Entry(frameCampos,width=14)
botNovo = Button(text = "Novo",background='light green')
botRegistrar = Button(text = "Cadastrar",background='light green')
botAlterar = Button(text = "Alterar",background='light green')
botExcluir = Button(text = "Excluir",background='red')
labelCód.grid(column=0, row=0)
entryCód.grid(columnspan=5,column=1,row=0,sticky=W)
labelNome.grid(column=0, row=1,sticky=E)
entryNome.grid(column=1,row=1,sticky=W)
labelEndereço.grid(column = 0, row=2,sticky=E)
entryEndereço.grid(column = 1, row=2,sticky=W)
labelCidade.grid(column=0,row=3,sticky=E)
entryCidade.grid(column=1,row=3,sticky=W)
labelBairro.grid(column = 1, row=3,sticky=E)
entryBairro.grid(column = 2, row=3,sticky=W)
labelNum.grid(column = 1, row=2,sticky=E)
entryNum.grid(column = 2, row=2,sticky=W)
labelTelefone.grid(column = 0, row=4,sticky=E)
entryTelefone.grid(column = 1, row=4,sticky=W)
labelCPF.grid(column=0, row =5,sticky=E)
entryCPF.grid(column=1, row =5,sticky=W)
labelTitulo.pack()
separator1.pack(fill=X, padx=5, pady=5)
frameCampos.pack()
separator2.pack(fill=X, padx=5, pady=5)
botNovo.pack(side = LEFT,padx = 10)
botRegistrar.pack(side = LEFT,padx = 10)
botAlterar.pack(side = LEFT,padx = 10)
botExcluir.pack(side = LEFT,padx = 10)
janelaCliente.mainloop()
|
<reponame>emmettk/pvrsex
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 29 10:09:14 2017
Modified from SceneTimer_loopSleep.py on 18 Sep 2018.
@author: <NAME>
Have Streams7 record from all devices, starting a new scene every n seconds.
Use Streams time rather than system time
If the wait before recording is greater than 1 hr, recalibrate the pause at 30 min and 5 min before the start time.
Add a loop to the sleep so the script can be killed at any time (Streams only allows a script to be killed after finishing current line)
This may subsume the 30 min and 5 min checks.
Allow for both evening and morning run times to be set.
Version 20 Sep 2018 - No longer calls them "evening" and "morning" runs
"""
import SPython
import datetime as dt
import time
import math
def groupDevices():
"""
From Levi's code.
Get the handle for all devices in the system.
The 0, 0 means we are not looking for a device in any
particular movie or scene. Just devices in the main
Streams device list. Each new video device will be
added to a video device list.
Returns this video device list
"""
vidDevList = [] # make an empty list
GPSDev = 0
hDev = sFindFirstDevice(0, 0)
while hDev:
curType = sGetDeviceDataType(hDev, 0)
devName = sGetName(hDev)
if curType == tdVIDEO:
vidDevList.append(hDev)
print('Found video device ' + devName)
if curType == tdGPS:
GPSDev = hDev
print('Found GPS device ' + devName)
hDev = sFindNextDevice()
# Select all the video devices found above.
sdErr = sSelectDevice(vidDevList[0], 1)
for i in range(1, len(vidDevList)):
sdErr = sSelectDevice(vidDevList[i], 0)
# print all video devices selected
for i in range(0, len(vidDevList)):
if sIsDeviceSelected(vidDevList[i]):
print('Device ' + sGetName(vidDevList[i]) + ' selected for video')
# Group all selected devices
gsdErr = sGroupSelectedDevices('VideoGroup')
return vidDevList
def setStopCondition(vidDevList, count, trigger = "ms"):
"""
Takes a frame count (trigger = "frames")
or a time count in milliseconds (trigger = "ms")
"""
# Set a recording condition to stop recording after
# nFrames number of frames. First, remove all previous
# stopping conditions. Next, set a stopping condition on
# all video devices that stops a recording after nFrames.
rarscErr = sRemoveAllRecordStoppingConditions()
for i in range(0, len(vidDevList)):
if trigger == "frames":
arscErr = sAddRecordStoppingCondition(vidDevList[i], rscSTOP_ON_FRAME_COUNT, 0, 0, count)
elif trigger == "ms":
arscErr = sAddRecordStoppingCondition(vidDevList[i], rscSTOP_AFTER_TIME, 0, count, 0)
else: print("ERROR. INVALID TRIGGER")
print('All video devices will stop after ' + str(count) + ' '+ trigger)
def recordScene(vidDevList):
"""
Record a scene and return its handle
Adapted from Levi
"""
# start a recording. Pause program execution until
# the recording is finished.
print('Recording....')
rErr = sRecord(swoWAIT_TO_FINISH)
# get the handle of the current active scene. This
# should be the scene we just recorded.
hScene = sGetActiveScene()
return hScene
def nameScene(hScene, vidDevList):
"""
Print number of frames and scene start time
Name scene after start time
Adapted from Levi
"""
# Get the number of frames recorded from the current movie,
# the active scene, and all devices. Print this
# to the screen.
for i in range(0, len(vidDevList)):
numFramesRecorded = sGetNumRecordedFrames(0, hScene, vidDevList[i])
print(str(numFramesRecorded) + ' Frames Recorded on ' + sGetName(vidDevList[i]))
# Get the start time of the recorded scene to set the
# scene name. First get the Scene start time in Streams
# time (number of 100 nanoseconds since Jan, 1 1601).
# next convert it into standard unix time (seconds
# since Jan 1, 1970). Then convert unix time to a
# date and time string. Print the date and time of the
# start of the scene.
streamsSceneTime = sGetSceneStartTime(hScene)
pythonSceneTime = sConvertStreamsToPythonTime(streamsSceneTime)
pythonSceneTimeString = time.ctime(pythonSceneTime)
print('The active scene started on ' + sGetTimeString(streamsSceneTime))
# construct a string that is hhmmss from the date
# and time string. Print this to the screen. Next
# set the name of the recorded scene in the active movie to
# hhmmss of the start of the scene.
sceneName = pythonSceneTimeString[11:13] + pythonSceneTimeString[14:16] + pythonSceneTimeString[17:19]
print('Active scene name set to ' + sceneName)
ssnErr = sSetSceneName(0, hScene, sceneName)
def compute_runtime(length, buffer):
"""
Takes a length as a time delta and a buffer in seconds
Returns number of miliseconds equal to length - buffer
"""
return (length.total_seconds() - buffer)*1000
def make_starttime_list(starttime, stoptime, scenetime, pausetime = dt.timedelta(minutes = 0)):
"""
Return a list of start times using the first start time, stop time, and scene time
Accepts datetimes and time delta for scene time
"""
startlist = [starttime]
while startlist[-1]+scenetime+pausetime < stoptime:
startlist.append(startlist[-1]+scenetime+pausetime)
return startlist
def get_current_time_from_Streams():
current = sGetCurrentTime()
# print("streamstime", current)
python = sConvertStreamsToPythonTime(current)
# print("python time", python)
dttime= dt.datetime.fromtimestamp(python)
# print("datetime", dttime)
return dttime
def wait_to_start(starttime):
#update current time
today = get_current_time_from_Streams()
print("\nChecking wait time.")
print("The current time from Streams is "+str(today))
print("The current system time is "+str(dt.datetime.today()))
#Check how far we are from start
waittime = (starttime-today).total_seconds()
if waittime>0:
if waittime > 60:
print("Waiting "+str(waittime/60)+" min to start")
elif waittime > 3600:
print("Waiting "+str(waittime/3600)+" hr to start")
else: print("Waiting "+str(waittime)+" sec to start")
if waittime>= 60*60: ## an hour
print("Wait time will be updated again at approximately "+str(starttime-dt.timedelta(seconds = 60*30)))
sleep_loop(waittime - 60*30) ## check 30 minutes before end of wait time
wait_to_start(starttime)
elif waittime >= 60*20: ## 20 min
print("Wait time will be updated again at approximately "+str(starttime - dt.timedelta(seconds = 60*5)))
sleep_loop(waittime-60*5) ## check 5 minutes before end of waittime
wait_to_start(starttime)
elif waittime>0:
sleep_loop(waittime)
def sleep_loop(waittime, increment = 5):
"""
waittime - time to pause total
increments to pause for each time - script can be broken in Streams with this frequency
"""
for pause in range(0, int(math.floor(waittime/increment))):
# print(pause, pause*increment, dt.datetime.today())
time.sleep(increment)
# print(pause, pause*(increment+1), dt.datetime.today())
time.sleep(waittime%increment)
# print("end", dt.datetime.today())
def run(starttime, stoptime, scenetime, pausetime = dt.timedelta(minutes = 0), buffertime = 30):
"""
perform a camera run starting at starttime, ending at stoptime, with scenes of length scenetime
and delays between scenes of length pausetime + buffertime
Note: 30 seconds is about the minimum buffertime for Streams7 to fully close and reopen a scene
"""
runtime = compute_runtime(scenetime, buffertime)
startlist = make_starttime_list(starttime, stoptime, scenetime, pausetime)
print("Start time list: " + str(startlist))
vidDevList = groupDevices()
setStopCondition(vidDevList, runtime, trigger = "ms")
print("Start time: " + str(starttime))
print("Stop time: "+ str(stoptime))
#### Wait to start
wait_to_start(starttime)
### Start at specified times
### For sub-minute runs, this will jump the gun. (if, eg, the run starts at 15:00:00 and ends at 15:00:34, this check will show that current time rounded to a minute still equals 15:00:00 and it restarts)
current = get_current_time_from_Streams()
# print("entering loop", current)
print("\nPreparing to record at "+str(current))
while starttime <= current <= stoptime:
# print(current, dt.datetime.today())
now = dt.datetime.combine(current, dt.time(current.hour, current.minute))
if now in startlist:
print("Scene recording triggered at " + str(get_current_time_from_Streams()))
hScene = recordScene(vidDevList)
nameScene(hScene, vidDevList)
print("Scene recording complete at "+str(get_current_time_from_Streams()))
current = get_current_time_from_Streams()
if __name__ == "__main__":
### Run 1
starttime1 = dt.datetime(2018, 9, 20, 8, 10, 0)
stoptime1 = starttime1 +dt.timedelta(hours = 1, minutes = 50)
scenetime1 = dt.timedelta(hours = 1, minutes = 50)
pausetime1 = dt.timedelta(minutes = 0)
buffertime1 = 30 #seconds between runs
### Run 2
starttime2 = dt.datetime(2018, 9, 20, 9, 0, 0)
stoptime2 = starttime2 +dt.timedelta(hours = 8, minutes = 0)
scenetime2 = dt.timedelta(hours = 2, minutes = 0)
pausetime2 = dt.timedelta(minutes = 0)
buffertime2 = 30 #seconds between runs
### Do the first run
run(starttime1, stoptime1, scenetime1, pausetime1, buffertime1)
### Do the second run
run(starttime2, stoptime2, scenetime2, pausetime2, buffertime2)
|
<gh_stars>1000+
import numpy as np
from scipy.optimize import minimize
import GPy
from GPy.kern import Kern
from GPy.core import Param
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import euclidean_distances
class TV_SquaredExp(Kern):
""" Time varying squared exponential kernel.
For more info see the TV-GP-UCB paper:
http://proceedings.mlr.press/v51/bogunovic16.pdf
"""
def __init__(self,
input_dim,
variance=1.,
lengthscale=1.,
epsilon=0.,
active_dims=None):
super().__init__(input_dim, active_dims, "time_se")
self.variance = Param("variance", variance)
self.lengthscale = Param("lengthscale", lengthscale)
self.epsilon = Param("epsilon", epsilon)
self.link_parameters(self.variance, self.lengthscale, self.epsilon)
def K(self, X, X2):
# time must be in the far left column
if self.epsilon > 0.5: # 0.5
self.epsilon = 0.5
if X2 is None:
X2 = np.copy(X)
T1 = X[:, 0].reshape(-1, 1)
T2 = X2[:, 0].reshape(-1, 1)
dists = pairwise_distances(T1, T2, "cityblock")
timekernel = (1 - self.epsilon)**(0.5 * dists)
X = X[:, 1:]
X2 = X2[:, 1:]
RBF = self.variance * np.exp(
-np.square(euclidean_distances(X, X2)) / self.lengthscale)
return RBF * timekernel
def Kdiag(self, X):
return self.variance * np.ones(X.shape[0])
def update_gradients_full(self, dL_dK, X, X2):
if X2 is None:
X2 = np.copy(X)
T1 = X[:, 0].reshape(-1, 1)
T2 = X2[:, 0].reshape(-1, 1)
X = X[:, 1:]
X2 = X2[:, 1:]
dist2 = np.square(euclidean_distances(X, X2)) / self.lengthscale
dvar = np.exp(-np.square(
(euclidean_distances(X, X2)) / self.lengthscale))
dl = -(2 * euclidean_distances(X, X2)**2 * self.variance *
np.exp(-dist2)) * self.lengthscale**(-2)
n = pairwise_distances(T1, T2, "cityblock") / 2
deps = -n * (1 - self.epsilon)**(n - 1)
self.variance.gradient = np.sum(dvar * dL_dK)
self.lengthscale.gradient = np.sum(dl * dL_dK)
self.epsilon.gradient = np.sum(deps * dL_dK)
def normalize(data, wrt):
""" Normalize data to be in range (0,1), with respect to (wrt) boundaries,
which can be specified.
"""
return (data - np.min(wrt, axis=0)) / (
np.max(wrt, axis=0) - np.min(wrt, axis=0) + 1e-8)
def standardize(data):
""" Standardize to be Gaussian N(0,1). Clip final values.
"""
data = (data - np.mean(data, axis=0)) / (np.std(data, axis=0) + 1e-8)
return np.clip(data, -2, 2)
def UCB(m, m1, x, fixed, kappa=0.5):
""" UCB acquisition function. Interesting points to note:
1) We concat with the fixed points, because we are not optimizing wrt
these. This is the Reward and Time, which we can't change. We want
to find the best hyperparameters *given* the reward and time.
2) We use m to get the mean and m1 to get the variance. If we already
have trials running, then m1 contains this information. This reduces
the variance at points currently running, even if we don't have
their label.
Ref: https://jmlr.org/papers/volume15/desautels14a/desautels14a.pdf
"""
c1 = 0.2
c2 = 0.4
beta_t = c1 * np.log(c2 * m.X.shape[0])
kappa = np.sqrt(beta_t)
xtest = np.concatenate((fixed.reshape(-1, 1), np.array(x).reshape(-1,
1))).T
try:
preds = m.predict(xtest)
preds = m.predict(xtest)
mean = preds[0][0][0]
except ValueError:
mean = -9999
try:
preds = m1.predict(xtest)
var = preds[1][0][0]
except ValueError:
var = 0
return mean + kappa * var
def optimize_acq(func, m, m1, fixed, num_f):
""" Optimize acquisition function."""
opts = {"maxiter": 200, "maxfun": 200, "disp": False}
T = 10
best_value = -999
best_theta = m1.X[0, :]
bounds = [(0, 1) for _ in range(m.X.shape[1] - num_f)]
for ii in range(T):
x0 = np.random.uniform(0, 1, m.X.shape[1] - num_f)
res = minimize(
lambda x: -func(m, m1, x, fixed),
x0,
bounds=bounds,
method="L-BFGS-B",
options=opts)
val = func(m, m1, res.x, fixed)
if val > best_value:
best_value = val
best_theta = res.x
return (np.clip(best_theta, 0, 1))
def select_length(Xraw, yraw, bounds, num_f):
"""Select the number of datapoints to keep, using cross validation
"""
min_len = 200
if Xraw.shape[0] < min_len:
return (Xraw.shape[0])
else:
length = min_len - 10
scores = []
while length + 10 <= Xraw.shape[0]:
length += 10
base_vals = np.array(list(bounds.values())).T
X_len = Xraw[-length:, :]
y_len = yraw[-length:]
oldpoints = X_len[:, :num_f]
old_lims = np.concatenate((np.max(oldpoints, axis=0),
np.min(oldpoints, axis=0))).reshape(
2, oldpoints.shape[1])
limits = np.concatenate((old_lims, base_vals), axis=1)
X = normalize(X_len, limits)
y = standardize(y_len).reshape(y_len.size, 1)
kernel = TV_SquaredExp(
input_dim=X.shape[1], variance=1., lengthscale=1., epsilon=0.1)
m = GPy.models.GPRegression(X, y, kernel)
m.optimize(messages=True)
scores.append(m.log_likelihood())
idx = np.argmax(scores)
length = (idx + int((min_len / 10))) * 10
return (length)
|
<gh_stars>1-10
# pyright: reportPrivateUsage=false
from __future__ import annotations
import os
import sys
import socket
from typing import Optional, Set, TypeVar, Type, Dict, Tuple
from .base import Connection, Interface, Proxy, Id
from .protocol.wayland import WlDisplay, WlRegistry, WlShm
P = TypeVar("P", bound="Proxy")
class ClientConnection(Connection):
_path: str
_display: WlDisplay
_registry: WlRegistry
# interface_name -> (name, version, proxy)
_registry_globals: Dict[str, Tuple[int, int, Optional[Proxy]]]
_shm_formats: Set[WlShm.Format]
def __init__(self, path: Optional[str] = None):
super().__init__()
if path is not None:
self._path = path
else:
runtime_dir = os.getenv("XDG_RUNTIME_DIR")
if runtime_dir is None:
raise RuntimeError("XDG_RUNTIME_DIR is not set")
display = os.getenv("WAYLAND_DISPLAY", "wayland-0")
self._path = os.path.join(runtime_dir, display)
self._shm_formats = set()
self._display = self.create_proxy(WlDisplay)
self._display._is_attached = True # display is always attached
self._display.on_error(self._on_display_error)
self._display.on_delete_id(self._on_display_delete_id)
self._registry_globals = {}
self._registry = self._display.get_registry()
self._registry.on_global(self._on_registry_global)
self._registry.on_global_remove(self._on_registry_global_remove)
@property
def display(self) -> WlDisplay:
return self._display
@property
def shm_formats(self) -> Set[WlShm.Format]:
return self._shm_formats
def get_global(self, proxy_type: Type[P]) -> P:
"""Get global by proxy type"""
if not hasattr(proxy_type, "interface"):
raise TypeError("cannot get untyped proxy")
interface = proxy_type.interface
entry = self._registry_globals.get(interface.name)
if entry is None:
raise RuntimeError(f"no globals provide: {interface}")
name, version, proxy = entry
if proxy is None:
proxy = self.create_proxy(proxy_type)
self._registry.bind(name, interface.name, version, proxy)
self._proxy_setup(proxy)
self._registry_globals[interface.name] = (name, version, proxy)
if not isinstance(proxy, proxy_type):
raise ValueError("global has already been bound by untyped proxy")
return proxy
def get_global_by_interface(self, interface: Interface) -> Proxy:
"""Get global exposing interface"""
entry = self._registry_globals.get(interface.name)
if entry is None:
raise RuntimeError(f"no globals provide: {interface}")
name, version, proxy = entry
if proxy is None:
proxy = self.create_proxy_by_interface(interface)
self._registry.bind(name, interface.name, version, proxy)
self._proxy_setup(proxy)
self._registry_globals[interface.name] = (name, version, proxy)
return proxy
async def connect(self) -> ClientConnection:
await super().connect()
await self.sync()
return self
async def sync(self) -> None:
"""Ensures all requests are processed
This function can be used as a barrier to ensure all previous
requests and resulting events have been handled.
"""
callback = self.display.sync()
await callback.on_async("done")
async def _create_socket(self) -> socket.socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
sock.connect(self._path)
return sock
def _proxy_setup(self, proxy: Proxy) -> None:
"""Do additional proxy setup based on its type"""
if proxy._interface.name == "xdg_wm_base":
def pong(serial: int) -> bool:
proxy("pong", serial)
return True
proxy.on("ping", pong)
elif proxy._interface.name == "wl_shm":
def format(fmt: WlShm.Format) -> bool:
self._shm_formats.add(fmt)
return True
proxy.on("format", format)
def _on_display_error(self, proxy: Proxy, code: int, message: str) -> bool:
"""Handle for `wl_display.error` event"""
print(
f"\x1b[91mERROR: proxy='{proxy}' code='{code}' message='{message}'\x1b[m",
file=sys.stderr,
)
self.terminate()
return True
def _on_display_delete_id(self, id_int: int) -> bool:
"""Unregister proxy"""
self._delete_proxy(Id(id_int))
return True
def _on_registry_global(self, name: int, interface: str, version: int) -> bool:
"""Register name in registry globals"""
self._registry_globals[interface] = (name, version, None)
return True
def _on_registry_global_remove(self, target_name: int) -> bool:
"""Unregister name from registry globals"""
for interface, (name, _, proxy) in self._registry_globals.items():
if target_name == name:
self._registry_globals.pop(interface)
if proxy is not None:
self._proxies.pop(proxy._id)
proxy._detach("global removed")
break
return True
|
<reponame>L-Net-1992/towhee
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import threading
from collections import namedtuple
from typing import Dict, Tuple, Union, List
from towhee.dataframe import DataFrame, Variable, DataFrameIterator
class ReaderBase(ABC):
"""
The reader base class.
The read() could be blocking or non-blocking function, if it's a blocking function,
the runner may be blocked. When need to stop the graph, we call close to interrupting it.
"""
@abstractmethod
def read(self):
raise NotImplementedError
@abstractmethod
def close(self):
raise NotImplementedError
class DataFrameReader(ReaderBase):
"""
Read data from input dataframes, unpack and combine data.
One op_ctx has one dataframe reader.
"""
def __init__(self, it: DataFrameIterator, op_inputs_index: Dict[str, int]):
self._op_inputs_index = op_inputs_index
self._iter = it
@abstractmethod
def read(self) -> Union[Dict[str, any], List[Dict[str, any]]]:
pass
@property
def size(self) -> int:
return self._iter.accessible_size
@abstractmethod
def close(self):
raise NotImplementedError
def _to_op_inputs(self, cols: Tuple[Variable]) -> Dict[str, any]:
"""
Read from cols, combine op inputs
"""
ret = {}
for key, index in self._op_inputs_index.items():
ret[key] = cols[index].value
return ret
class BlockMapReaderWithOriginData(DataFrameReader):
"""
Return both op's input data and origin data.
"""
def __init__(
self,
input_df: DataFrame,
op_inputs_index: Dict[str, int]
):
super().__init__(input_df.map_iter(True), op_inputs_index)
self._lock = threading.Lock()
self._close = False
def read(self) -> Tuple[Dict[str, any], Tuple]:
"""
Read data from dataframe, get cols by operator_repr info
"""
if self._close:
raise StopIteration
with self._lock:
data = next(self._iter)
if self._close:
raise StopIteration
if not data:
return {}, ()
return self._to_op_inputs(data), data
def close(self):
self._close = True
self._iter.notify()
class BatchFrameReader(DataFrameReader):
"""
Batch reader.
"""
def __init__(self, input_df: DataFrame, op_inputs_index: Dict[str, int],
batch_size: int, step: int):
assert batch_size >= 1 and step >= 1
super().__init__(input_df.batch_iter(batch_size, step, True), op_inputs_index)
self._close = False
self._lock = threading.Lock()
def read(self) -> List[Dict[str, any]]:
if self._close:
raise StopIteration
with self._lock:
data = next(self._iter)
if self._close:
raise StopIteration
if not data:
return []
else:
res = []
for row in data:
data_dict = self._to_op_inputs(row)
res.append(namedtuple('input', data_dict.keys())(**data_dict))
return res
def close(self):
self._close = True
self._iter.notify()
class _TimeWindow:
'''
TimeWindow
The unit of timestamp is milliseconds, the unit of window(range, step) is seconds.
'''
def __init__(self, time_range_sec: int, time_step_sec: int, start_time_sec: int = 0):
self._start_time_m = start_time_sec * 1000
self._end_time_m = self._start_time_m + time_range_sec * 1000
self._next_start_time_m = (start_time_sec + time_step_sec) * 1000
self._time_range_sec = time_range_sec
self._time_step_sec = time_step_sec
self._window = []
self._next_window = None
def __call__(self, row_data) -> bool:
frame = row_data[-1].value
if frame.timestamp < self._start_time_m:
return False
if frame.timestamp < self._end_time_m:
self._window.append(row_data)
if frame.timestamp >= self._next_start_time_m:
if self._next_window is None:
self._next_window = _TimeWindow(self._time_range_sec, self._time_step_sec, self._next_start_time_m // 1000)
self._next_window(row_data)
return False
if len(self._window) == 0:
self._start_time_m = frame.timestamp // 1000 // self._time_step_sec * self._time_step_sec * 1000
self._end_time_m = self._start_time_m + self._time_range_sec * 1000
self._next_start_time_m = (self._start_time_m // 1000 + self._time_step_sec) * 1000
if frame.timestamp >= self._start_time_m and frame.timestamp < self._end_time_m:
self(row_data)
return False
if self._next_window is None:
self._next_window = _TimeWindow(self._time_range_sec, self._time_step_sec, self._next_start_time_m // 1000)
self._next_window(row_data)
return True
@property
def data(self):
return self._window
@property
def next_window(self):
return self._next_window
class TimeWindowReader(DataFrameReader):
"""
Time window reader
"""
def __init__(
self,
input_df: DataFrame,
op_inputs_index: Dict[str, int],
time_range_sec: int,
time_step_sec: int
):
super().__init__(input_df.map_iter(True), op_inputs_index)
self._window = _TimeWindow(time_range_sec, time_step_sec)
self._lock = threading.Lock()
self._close = False
def _format_to_namedtuple(self, rows):
ret = []
for row in rows:
data_dict = self._to_op_inputs(row)
ret.append(namedtuple('input', data_dict.keys())(**data_dict))
return ret
def read(self) -> Tuple[Dict[str, any], Tuple]:
"""
Read data from dataframe, get cols by operator_repr info
"""
with self._lock:
if self._close or self._window is None:
raise StopIteration
while True:
if self._window is None:
raise StopIteration
try:
data = next(self._iter)
except StopIteration:
if self._window is not None:
ret = self._format_to_namedtuple(self._window.data)
self._window = self._window.next_window
if len(ret) == 0:
continue
else:
return ret
if self._close:
raise StopIteration
if data is None:
continue
is_end = self._window(data)
if is_end and len(self._window.data) != 0:
ret = self._format_to_namedtuple(self._window.data)
self._window = self._window.next_window
return ret
def close(self):
self._close = True
self._iter.notify()
|
<reponame>lneukom/fairseq<gh_stars>0
'''
Created on Dec 19, 2013
A script to convert TMXs into parallel corpuses for machine
translation (e.g. Moses: http://www.statmt.org/moses/) training.
Pass in either paths to TMX files, or directories containing TMX files.
The script will recursively traverse directories and process all TMXs.
To perform tokenization or to filter the output, use the convert() method
with subclasses of the Tokenizer or Filter objects.
This script is Jython 2.5-compatible, so you can use nice Java tokenizers
like those included in Lucene (http://lucene.apache.org/).
@author: aaron.madlon-kay
'''
from __future__ import with_statement
import sys
import os
import codecs
from xml.etree import ElementTree
class Tokenizer(object):
def __init__(self, lang):
self.lang = lang
def tokenize(self, text):
'''Override this to return tokenized text.'''
return text
class Filter(object):
def filter(self, bitext):
'''Override this to return a bool indicating whether or not to keep the segment.'''
return True
class Converter(object):
def __init__(self):
self.tokenizers = {}
self.filters = []
self.suppress_count = 0
self.output_files = {}
self.output_path = os.getcwd()
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
cleanup(self.output_files)
def add_tokenizers(self, tokenizers):
for tokenizer in tokenizers:
self.tokenizers[tokenizer.lang] = tokenizer
def add_filter(self, bitext_filter):
if bitext_filter != None:
self.filters.append(bitext_filter)
def convert(self, files):
self.suppress_count = 0
try:
for tmx in files:
for bitext in extract_tmx(tmx):
self.__output(bitext)
print('done')
finally:
print(f'Suppressed {self.suppress_count:d} pairs')
def __output(self, bitext):
for fltr in self.filters:
if not fltr.filter(bitext):
self.suppress_count += 1
return
for lang, text in bitext.items():
tokenizer = self.tokenizers.get(lang, None)
if tokenizer != None:
bitext['tok.' + lang] = tokenizer.tokenize(text)
for lang in bitext.keys():
if lang not in self.output_files.keys():
self.output_files[lang] = codecs.open(os.path.join(self.output_path, 'bitext.' + lang),
'w',
encoding='utf-8')
for lang, text in bitext.items():
out_file = self.output_files[lang]
out_file.write(text)
out_file.write('\n')
def get_files(path, ext):
for root, dirs, files in os.walk(path):
for a_file in files:
if a_file.endswith(ext):
yield os.path.join(root, a_file)
def extract_tmx(tmx):
print('Extracting', os.path.basename(tmx))
tree = ElementTree.parse(tmx)
root = tree.getroot()
for tu in root.getiterator('tu'):
bitext = extract_tu(tu)
if bitext != {}:
yield bitext
def extract_tu(tu):
bitext = {}
for tuv in tu.findall('tuv'):
lang, text = extract_tuv(tuv)
if None not in (lang, text):
bitext[lang] = text
if len(bitext) != 2:
print('TU had %d TUV(s). Skipping.' % len(bitext))
print('\t' + ElementTree.tostring(tu))
return {}
return bitext
def extract_tuv(tuv):
lang = tuv.attrib.get('lang', None)
if lang == None:
lang = tuv.attrib.get('{http://www.w3.org/XML/1998/namespace}lang', None)
if lang == None:
print('TUV missing lang. Skipping.')
return None, None
lang = normalize_lang(lang)
segs = tuv.findall('seg')
if len(segs) > 1:
print('Multiple segs found in TUV. Skipping.')
return None, None
text = extract_seg(segs[0])
if text == None:
print('TUV missing seg. Skipping.')
return None, None
text = text.strip().replace('\n', '').replace('\r', '')
if len(text) == 0:
print('TUV had blank seg. Skipping.')
return None, None
return lang, text
def extract_seg(seg):
buffer = [seg.text]
for child in seg.getchildren():
buffer.append(child.text)
buffer.append(child.tail)
return ''.join([piece for piece in buffer if piece != None])
def normalize_lang(lang):
result = lang.lower()
if len(result) > 2 and result[2] in ('-', '_'):
result = result[:2]
return result
def cleanup(output_files):
for lang, out_file in output_files.items():
out_file.close()
def convert(paths, tokenizers=[], bitext_filter=None):
files = []
for path in paths:
if os.path.isdir(path):
print('Queuing TMXs in ' + path)
files.extend(get_files(path, '.tmx'))
elif os.path.isfile(path) and path.endswith('.tmx'):
files.append(path)
if len(files) > 0:
with Converter() as converter:
converter.add_tokenizers(tokenizers)
converter.add_filter(bitext_filter)
converter.convert(files)
else:
print('Please specify input files or paths.')
return 1
return 0
if __name__ == '__main__':
sys.exit(convert(["/home/christian/Downloads/JRC-Acquis-v3.0"])) |
<reponame>soikat15/MY_AI_CAR-BD-18
# Copyright (C) 2018 <NAME>
#
# Project Name:
# Author: <NAME>
# Author's Email: <EMAIL>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of the contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTIONS) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from builtins import print
import pygame
import serial
import time
import pyttsx3
import threading
from voice import *
engine = pyttsx3.init()
engine.setProperty('rate', 125)
#thread for voice class
t = threading.Thread(target=voice)
t.start()
#init serial
arduino = serial.Serial("/dev/ttyACM1",9600,timeout=5)
def send_to_arduino(fb, rl,brk_key,brk_release_key,center):
arduino.write(chr(100).encode())
arduino.write(chr(fb).encode())
arduino.write(chr(rl).encode())
arduino.write(chr(brk_key).encode())
arduino.write(chr(brk_release_key).encode())
arduino.write(chr(center).encode())
time.sleep(1)
print('init serial')
#init gamepad
pygame.display.init()
pygame.joystick.init()
pygame.joystick.Joystick(0).init()
print('init gamepad')
engine.say('command initiated')
engine.say('welcome back sir')
engine.runAndWait()
while True:
pygame.event.pump()
bx = int((pygame.joystick.Joystick(0).get_axis(2))*50+50)
ay = int((pygame.joystick.Joystick(0).get_axis(1))*50+50)
print('control :'+str(ay) + ' ' + str(bx))
# button 5 for break (front right up)
brk = int(pygame.joystick.Joystick(0).get_button(5))
print('break :'+ str(brk))
# button 7 for release (front right down)
brk_release =int( pygame.joystick.Joystick(0).get_button(7))
print('break release "'+str(brk_release))
# button 1 for VOICE KEY (BUTTON 2 )
voice_key =int( pygame.joystick.Joystick(0).get_button(1))
print('voice_key : '+str(voice_key))
# button 0 for steer center KEY (BUTTON 1 )
steer_center = int(pygame.joystick.Joystick(0).get_button(0))
print('steer center : ' + str(steer_center))
send_to_arduino(ay,bx,brk,brk_release,steer_center)
time.sleep(.1)
|
import h3
import moment
import re
import src.utils.google as google
from config.h3.h3_config import POI_RESOLUTION
from quart import abort, Blueprint, request
from src.utils.array_utils import get_nested_value
from src.utils.futures import execute_futures
from src.utils.cat_mapping import complete_categories
poi_information = Blueprint('poi-information', __name__)
def parse_opening_hours(opening_hours):
"""Parse opening hours to timestamps"""
if not opening_hours:
return None
def parse_list(li):
date = get_nested_value(li, 4)
opening_time_hours = get_nested_value(li, 6, 0, 0)
opening_time_minutes = get_nested_value(li, 6, 0, 1)
closing_time_hours = get_nested_value(li, 6, 0, 2)
closing_time_minutes = get_nested_value(li, 6, 0, 3)
# TODO: Consider places with breaks (e.g., closed between 13-14h)
return dict(
date=str(moment.date(date)),
openingTime=str(moment.date(date).add(
hours=opening_time_hours,
minutes=opening_time_minutes
)) if opening_time_hours is not None else None,
closingTime=str(moment.date(date).add(
days=1 if # Necessary if closing at midnight or later or when place is open 24 hours (all values 0)
closing_time_hours < opening_time_hours | (
opening_time_hours == 0 &
opening_time_minutes == 0 &
closing_time_hours == 0 &
closing_time_minutes == 0
) else 0,
hours=closing_time_hours,
minutes=closing_time_minutes
)) if closing_time_hours is not None else None
)
return list(map(parse_list, opening_hours))
def parse_waiting_time_data(waiting_time_data):
"""Parse waiting time string to minutes"""
numbers = re.findall(r'\d+', waiting_time_data)
if len(numbers) == 0:
waiting_time = 0
elif "min" in waiting_time_data:
waiting_time = int(numbers[0])
elif "hour" in waiting_time_data:
waiting_time = int(numbers[0]) * 60
else:
waiting_time = int(numbers[0]) * 60 + int(numbers[1])
return waiting_time
def parse_popularity_data(popularity_data, timezone):
"""Parse popularity information to timestamps in the respective timezone"""
popularity, waiting_time = [], []
includes_waiting_time = False
for day in popularity_data:
weekday = day[0]
p = []
w = []
# Create timestamps for each hour of the week and set popularity and waiting time to 0 by default since the
# returned popularity array doesn't necessarily cover all 24 hours of a day but only relevant hours
for h in range(24):
timestamp = str(moment.utcnow().timezone(timezone).replace(
weekday=weekday,
hours=h,
minutes=0,
seconds=0
))
p.append(dict(timestamp=timestamp, popularity=0))
w.append(dict(timestamp=timestamp, waitingTime=0))
if day[1] is not None:
for p_info in day[1]:
timestamp = str(moment.utcnow().timezone(timezone).replace(
weekday=weekday,
hours=p_info[0],
minutes=0,
seconds=0
))
index = next((i for i, item in enumerate(p) if item['timestamp'] == timestamp), -1)
p[index]['popularity'] = p_info[1]
# check if the waiting string is available and convert to minutes
if len(p_info) > 5:
includes_waiting_time = True
w[index]['waitingTime'] = parse_waiting_time_data(p_info[3])
popularity += p
waiting_time += w
return \
sorted(popularity, key=lambda x: x['timestamp']), \
sorted(waiting_time, key=lambda x: x['timestamp']) if includes_waiting_time else None
def parse_spending_time_data(spending_time_data):
if not spending_time_data:
return None
# Example: 'People typically spend up to 25 min here'
numbers = [float(f) for f in re.findall(r'\d*\.\d+|\d+', spending_time_data.replace(',', '.'))]
contains_min = 'min' in spending_time_data
contains_hour = 'hour' in spending_time_data or 'hr' in spending_time_data
spending_time = None
if contains_min and contains_hour:
spending_time = [numbers[0], numbers[1] * 60]
elif contains_hour:
spending_time = [numbers[0] * 60, (numbers[0] if len(numbers) == 1 else numbers[1]) * 60]
elif contains_min:
spending_time = [numbers[0], numbers[0] if len(numbers) == 1 else numbers[1]]
return [int(t) for t in spending_time]
@poi_information.route('/poi-information', methods=['GET'])
async def get_poi_information():
"""Retrieve POI information for an array of ids"""
ids = await request.get_json()
if ids is None:
abort(400, description='Invalid request body, is the request body type a JSON?')
if len(ids) > 100:
abort(400, description='You can send at most 100 ids at once.')
def parse_result(r):
data = r['data'][6]
name = get_nested_value(data, 11)
place_id = get_nested_value(data, 78)
lat = get_nested_value(data, 9, 2)
lng = get_nested_value(data, 9, 3)
if lat and lng:
lat = round(lat, 7) # 7 digits equals a precision of 1 cm
lng = round(lng, 7) # 7 digits equals a precision of 1 cm
# noinspection PyUnresolvedReferences
h3_index = h3.geo_to_h3(lat, lng, POI_RESOLUTION) if lat and lng else None
address = get_nested_value(data, 2)
timezone = get_nested_value(data, 30)
categories = [t[0] for t in (get_nested_value(data, 76) or [])]
opening_hours = parse_opening_hours(get_nested_value(data, 34, 1))
permanently_closed = get_nested_value(data, 88, 0) == 'CLOSED'
temporarily_closed = get_nested_value(data, 96, 5, 0, 2) == 'Reopen this place' and not permanently_closed
inside_of = get_nested_value(data, 93, 0, 0, 0, 1)
phone = get_nested_value(data, 178, 0, 3)
website = get_nested_value(data, 7, 0)
rating_stars = get_nested_value(data, 4, 7)
rating_number_of_reviews = get_nested_value(data, 4, 8)
price_level = get_nested_value(data, 4, 2)
popularity_data = get_nested_value(data, 84, 0)
spending_time = parse_spending_time_data(get_nested_value(data, 117, 0))
popularity, waiting_time = None, None
if popularity_data:
popularity, waiting_time = parse_popularity_data(popularity_data, timezone)
return dict(
id=r['id'],
data=dict(
name=name,
placeID=place_id,
location=dict(lat=lat, lng=lng),
h3Index=h3_index,
address=address,
timezone=timezone,
categories=complete_categories(categories),
temporarilyClosed=temporarily_closed,
permanentlyClosed=permanently_closed,
insideOf=inside_of,
contact=dict(phone=phone, website=website),
openingHours=opening_hours,
rating=dict(stars=rating_stars, numberOfReviews=rating_number_of_reviews),
priceLevel=len(price_level) if price_level else None,
popularity=popularity,
waitingTime=waiting_time,
spendingTime=spending_time
)
)
return execute_futures(ids, google.get_by_id, parse_result)
|
<filename>custom_components/yandex_smart_home/capability_onoff.py
"""Implement the Yandex Smart Home on_off capability."""
from __future__ import annotations
from abc import ABC, abstractmethod
import logging
from typing import Any
from homeassistant.components import (
climate,
cover,
fan,
group,
humidifier,
input_boolean,
light,
lock,
media_player,
scene,
script,
switch,
vacuum,
water_heater,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_CLOSE_COVER,
SERVICE_LOCK,
SERVICE_OPEN_COVER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_UNLOCK,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import DOMAIN as HA_DOMAIN, HomeAssistant, State
from homeassistant.helpers.service import async_call_from_config
from . import const
from .capability import PREFIX_CAPABILITIES, AbstractCapability, register_capability
from .helpers import Config, RequestData
_LOGGER = logging.getLogger(__name__)
CAPABILITIES_ONOFF = PREFIX_CAPABILITIES + 'on_off'
class OnOffCapability(AbstractCapability, ABC):
"""On_off to offer basic on and off functionality.
https://yandex.ru/dev/dialogs/alice/doc/smart-home/concepts/on_off-docpage/
"""
type = CAPABILITIES_ONOFF
instance = const.ON_OFF_INSTANCE_ON
def parameters(self) -> dict[str, Any] | None:
"""Return parameters for a devices request."""
return None
def get_value(self) -> bool | None:
return self.state.state != STATE_OFF
async def set_state(self, data: RequestData, state: dict[str, Any]):
for key, call in ((const.CONF_TURN_ON, state['value']), (const.CONF_TURN_OFF, not state['value'])):
if key in self.entity_config and call:
return await async_call_from_config(
self.hass,
self.entity_config[key],
blocking=True,
context=data.context
)
await self._set_state(data, state)
@abstractmethod
async def _set_state(self, data: RequestData, state: dict[str, Any]):
pass
@register_capability
class OnOffCapabilityBasic(OnOffCapability):
def supported(self) -> bool:
return self.state.domain in (light.DOMAIN, fan.DOMAIN, switch.DOMAIN, humidifier.DOMAIN, input_boolean.DOMAIN)
async def _set_state(self, data: RequestData, state: dict[str, Any]):
if state['value']:
service = SERVICE_TURN_ON
else:
service = SERVICE_TURN_OFF
await self.hass.services.async_call(
self.state.domain,
service, {
ATTR_ENTITY_ID: self.state.entity_id
},
blocking=True,
context=data.context
)
@register_capability
class OnOffCapabilityGroup(OnOffCapability):
def supported(self) -> bool:
return self.state.domain in group.DOMAIN
async def _set_state(self, data: RequestData, state: dict[str, Any]):
if state['value']:
service = SERVICE_TURN_ON
else:
service = SERVICE_TURN_OFF
await self.hass.services.async_call(
HA_DOMAIN,
service, {
ATTR_ENTITY_ID: self.state.entity_id
},
blocking=True,
context=data.context
)
@register_capability
class OnOffCapabilityScript(OnOffCapability):
retrievable = False
def get_value(self) -> bool | None:
return None
def supported(self) -> bool:
return self.state.domain in (scene.DOMAIN, script.DOMAIN)
async def _set_state(self, data: RequestData, state: dict[str, Any]):
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON, {
ATTR_ENTITY_ID: self.state.entity_id
},
blocking=self.state.domain != script.DOMAIN,
context=data.context
)
@register_capability
class OnOffCapabilityLock(OnOffCapability):
def get_value(self) -> bool:
return self.state.state == lock.STATE_UNLOCKED
def supported(self) -> bool:
return self.state.domain == lock.DOMAIN
async def _set_state(self, data: RequestData, state: dict[str, Any]):
if state['value']:
service = SERVICE_UNLOCK
else:
service = SERVICE_LOCK
await self.hass.services.async_call(
lock.DOMAIN,
service, {
ATTR_ENTITY_ID: self.state.entity_id
},
blocking=True,
context=data.context
)
@register_capability
class OnOffCapabilityCover(OnOffCapability):
def __init__(self, hass: HomeAssistant, config: Config, state: State):
super().__init__(hass, config, state)
if self.entity_config.get(const.CONF_STATE_UNKNOWN):
self.retrievable = False
def get_value(self) -> bool:
return self.state.state == cover.STATE_OPEN
def parameters(self) -> dict[str, Any] | None:
if not self.retrievable:
return {'split': True}
return None
def supported(self) -> bool:
return self.state.domain == cover.DOMAIN
async def _set_state(self, data: RequestData, state: dict[str, Any]):
if state['value']:
service = SERVICE_OPEN_COVER
else:
service = SERVICE_CLOSE_COVER
await self.hass.services.async_call(
cover.DOMAIN,
service, {
ATTR_ENTITY_ID: self.state.entity_id
},
blocking=True,
context=data.context
)
@register_capability
class OnOffCapabilityMediaPlayer(OnOffCapability):
def supported(self) -> bool:
if self.state.domain == media_player.DOMAIN:
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if const.CONF_TURN_ON in self.entity_config or const.CONF_TURN_OFF in self.entity_config:
return True
return features & media_player.SUPPORT_TURN_ON or features & media_player.SUPPORT_TURN_OFF
return False
def parameters(self) -> dict[str, Any] | None:
if not self.retrievable:
return {'split': True}
@property
def retrievable(self) -> bool:
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
support_turn_on = const.CONF_TURN_ON in self.entity_config or features & media_player.SUPPORT_TURN_ON
support_turn_off = const.CONF_TURN_OFF in self.entity_config or features & media_player.SUPPORT_TURN_OFF
if support_turn_on and support_turn_off:
return True
return False
async def _set_state(self, data: RequestData, state: dict[str, Any]):
if state['value']:
service = SERVICE_TURN_ON
else:
service = SERVICE_TURN_OFF
await self.hass.services.async_call(
media_player.DOMAIN,
service, {
ATTR_ENTITY_ID: self.state.entity_id
},
blocking=True,
context=data.context
)
@register_capability
class OnOffCapabilityVacuum(OnOffCapability):
def get_value(self) -> bool | None:
return self.state.state in [STATE_ON, vacuum.STATE_CLEANING]
def supported(self) -> bool:
if self.state.domain != vacuum.DOMAIN:
return False
if const.CONF_TURN_ON in self.entity_config:
return True
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & vacuum.SUPPORT_TURN_ON and features & vacuum.SUPPORT_TURN_OFF:
return True
if features & vacuum.SUPPORT_START:
if features & vacuum.SUPPORT_RETURN_HOME or features & vacuum.SUPPORT_STOP:
return True
return False
async def _set_state(self, data: RequestData, state: dict[str, Any]):
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if state['value']:
if features & vacuum.SUPPORT_START:
service = vacuum.SERVICE_START
else:
service = SERVICE_TURN_ON
else:
if features & vacuum.SUPPORT_RETURN_HOME:
service = vacuum.SERVICE_RETURN_TO_BASE
elif features & vacuum.SUPPORT_STOP:
service = vacuum.SERVICE_STOP
else:
service = SERVICE_TURN_OFF
await self.hass.services.async_call(
vacuum.DOMAIN,
service, {
ATTR_ENTITY_ID: self.state.entity_id
},
blocking=True,
context=data.context
)
@register_capability
class OnOffCapabilityClimate(OnOffCapability):
def get_value(self) -> bool | None:
return self.state.state != climate.HVAC_MODE_OFF
def supported(self) -> bool:
return self.state.domain == climate.DOMAIN
async def _set_state(self, data: RequestData, state: dict[str, Any]):
service_data = {
ATTR_ENTITY_ID: self.state.entity_id
}
if state['value']:
service = SERVICE_TURN_ON
hvac_modes = self.state.attributes.get(climate.ATTR_HVAC_MODES)
for mode in (climate.const.HVAC_MODE_HEAT_COOL,
climate.const.HVAC_MODE_AUTO):
if mode not in hvac_modes:
continue
service_data[climate.ATTR_HVAC_MODE] = mode
service = climate.SERVICE_SET_HVAC_MODE
break
else:
service = SERVICE_TURN_OFF
await self.hass.services.async_call(
climate.DOMAIN,
service,
service_data,
blocking=True,
context=data.context
)
@register_capability
class OnOffCapabilityWaterHeater(OnOffCapability):
water_heater_operations = {
STATE_ON: [STATE_ON, 'On', 'ON', water_heater.STATE_ELECTRIC],
STATE_OFF: [STATE_OFF, 'Off', 'OFF'],
}
def get_value(self) -> bool | None:
operation_mode = self.state.attributes.get(water_heater.ATTR_OPERATION_MODE)
operation_list = self.state.attributes.get(water_heater.ATTR_OPERATION_LIST)
return operation_mode != self.get_water_heater_operation(STATE_OFF, operation_list)
def get_water_heater_operation(self, required_mode: str, operations_list: list[str]) -> str | None:
for operation in self.water_heater_operations[required_mode]:
if operation in operations_list:
return operation
return None
def supported(self) -> bool:
if self.state.domain != water_heater.DOMAIN:
return False
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & water_heater.SUPPORT_OPERATION_MODE:
operation_list = self.state.attributes.get(water_heater.ATTR_OPERATION_LIST)
if self.get_water_heater_operation(STATE_ON, operation_list) is None:
return False
if self.get_water_heater_operation(STATE_OFF, operation_list) is None:
return False
return True
return False
async def _set_state(self, data: RequestData, state: dict[str, Any]):
operation_list = self.state.attributes.get(water_heater.ATTR_OPERATION_LIST)
if state['value']:
mode = self.get_water_heater_operation(STATE_ON, operation_list)
else:
mode = self.get_water_heater_operation(STATE_OFF, operation_list)
await self.hass.services.async_call(
water_heater.DOMAIN,
water_heater.SERVICE_SET_OPERATION_MODE, {
ATTR_ENTITY_ID: self.state.entity_id,
water_heater.ATTR_OPERATION_MODE: mode
},
blocking=True,
context=data.context
)
|
<filename>tests/djangokeys/core/test_env_vars.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Contains tests for accessing the values of environment variables.
import pytest
from unittest.mock import patch
from unittest.mock import MagicMock
from djangokeys.core.env_vars import EnvironmentVariables
from djangokeys.core.env_vars import EnvironmentVariableNotFound
from tests.files import EMPTY_ENV_PATH
from tests.files import EXAMPLE1_ENV_PATH
def test__get_value__not_found__overwrite_true():
""" When env var isn't set, an appropriate exception is raised.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = None
env_vars = EnvironmentVariables(EMPTY_ENV_PATH)
with pytest.raises(EnvironmentVariableNotFound):
env_vars.get_value("ENV_VAR", overwrite=True)
os_mock.getenv.assert_called_once_with('ENV_VAR', None)
def test__get_value__not_found__overwrite_false():
""" When env var isn't set, an appropriate exception is raised.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = None
env_vars = EnvironmentVariables(EMPTY_ENV_PATH)
with pytest.raises(EnvironmentVariableNotFound):
env_vars.get_value("ENV_VAR", overwrite=False)
os_mock.getenv.assert_called_once_with('ENV_VAR', None)
def test__get_value__set_by_file__overwrite_true():
""" When env var is only set by dotenv file, use that value.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = None
env_vars = EnvironmentVariables(EXAMPLE1_ENV_PATH)
assert env_vars.get_value("DOMAIN", overwrite=True) == "example.org"
os_mock.getenv.assert_called_once_with("DOMAIN", None)
def test__get_value__set_by_file__overwrite_false():
""" When env var is only set by dotenv file, use that value.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = None
env_vars = EnvironmentVariables(EXAMPLE1_ENV_PATH)
assert env_vars.get_value("DOMAIN", overwrite=False) == "example.org"
os_mock.getenv.assert_called_once_with("DOMAIN", None)
def test__get_value__set_by_environment__overwrite_true():
""" When env var is only set by environment, use that value.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = "mydomain.org"
env_vars = EnvironmentVariables(EMPTY_ENV_PATH)
assert env_vars.get_value("DOMAIN", overwrite=True) == "mydomain.org"
os_mock.getenv.assert_called_once_with("DOMAIN", None)
def test__get_value__set_by_environment__overwrite_false():
""" When env var is only set by environment, use that value.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = "mydomain.org"
env_vars = EnvironmentVariables(EMPTY_ENV_PATH)
assert env_vars.get_value("DOMAIN", overwrite=False) == "mydomain.org"
os_mock.getenv.assert_called_once_with("DOMAIN", None)
def test__get_value__conflict__overwrite_true():
""" When env var is set by environment and dotenv file, and overwrite=True,
then use the environment variable set by the dotenv file.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = "mydomain.org"
env_vars = EnvironmentVariables(EXAMPLE1_ENV_PATH)
assert env_vars.get_value("DOMAIN", overwrite=True) == "example.org"
def test__get_value__conflict__overwrite_false():
""" When env var is set by environment and dotenv file, and overwrite=False,
then use the environment variable set by the environment.
"""
with patch('djangokeys.core.env_vars.os') as os_mock:
os_mock.getenv = MagicMock()
os_mock.getenv.return_value = "mydomain.org"
env_vars = EnvironmentVariables(EXAMPLE1_ENV_PATH)
assert env_vars.get_value("DOMAIN", overwrite=False) == "mydomain.org"
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import tensorflow as tf
import numpy as np
from detector import Detector
from util import load_image
import matplotlib.pyplot as plt
import cPickle
import os
import math
import configuration
import inference_wrapper
from inference_utils import caption_generator
from inference_utils import vocabulary
import json
import shutil
from bottle import run,post,request,response
import time
import ntpath
start_time=time.time()
model_path='./models/model-2'
wordset_path='./models/words.pkl'
weight_path='./models/caffe_layers_value.pickle'
drop_words=[u'的',u'着',u'有',u'在',u'上',u'里',u'和',u'旁',u'前',u'、',u'后面',u'前站',u'下',u'中']
with open(wordset_path)as f:
wordset=cPickle.load(f)
inv_map={}
for i,x in enumerate(wordset):
inv_map[i]=x
n_labels=len(wordset)
images_tf=tf.placeholder(tf.float32,[None,224,224,3],name="images")
labels_tf=tf.placeholder(tf.int64,[None],name='labels')
detector=Detector(weight_path,n_labels)
c1,c2,c3,c4,conv5,conv6,gap,output=detector.inference(images_tf)
sig_output=tf.nn.sigmoid(output)
classmap=detector.get_classmap(labels_tf,conv6)
sess1=tf.InteractiveSession()
saver=tf.train.Saver()
saver.restore(sess1,model_path)
g=tf.Graph()
with g.as_default():
model=inference_wrapper.InferenceWrapper()
restore_fn=model.build_graph_from_config(configuration.ModelConfig(),'./models/model.ckpt-633626')
g.finalize()
vocab=vocabulary.Vocabulary('./models/word_counts.txt')
sess2=tf.InteractiveSession(graph=g)
restore_fn(sess2)
generator=caption_generator.CaptionGenerator(model,vocab)
print time.time()-start_time
@post('/process')
def my_process():
start_time=time.time()
req_obj=json.loads(request.body.read())
print 'extract feature from %s'%req_obj['input']
image=load_image(req_obj['input'])
conv6_val,output_val,sig_output_val=sess1.run([conv6,output,sig_output],feed_dict={images_tf:[image]})
rlabel_predictions=sig_output_val.round()
mask=np.ones((1,n_labels))
last= rlabel_predictions==mask
xx=np.argsort(sig_output_val[0])[::-1]
prob={}
shutil.copyfile(req_obj['input'],os.path.join('/tmp/demo/freeze',ntpath.basename(req_obj['input'])))
shutil.rmtree(req_obj['output'])
os.makedirs(req_obj['output'])
shutil.copyfile(os.path.join('/tmp/demo/freeze',ntpath.basename(req_obj['input'])),os.path.join(req_obj['output'],ntpath.basename(req_obj['input'])))
print time.time()-start_time
start_time=time.time()
for x in xx:
if inv_map[x]not in drop_words and sig_output_val[0][x]>req_obj['threshold']:
start_time=time.time()
classmap_vals=sess1.run(classmap,feed_dict={labels_tf:[x],conv6:conv6_val})
print time.time()-start_time
start_time=time.time()
classmap_vis=map(lambda x:((x-x.min())/(x.max()-x.min())),classmap_vals)
for vis,ori in zip(classmap_vis,[image]):
print inv_map[x],sig_output_val[0][x]
prob[inv_map[x]]=float(sig_output_val[0][x])
fig=plt.gcf()
plt.axis('off')
plt.imshow(ori)
plt.imshow(vis,cmap=plt.cm.jet,alpha=0.5,interpolation='nearest')
fig.savefig(os.path.join(req_obj['output'],inv_map[x]+'.png'),dpi=100)
print 'sace:',time.time()-start_time
feature=sig_output_val[0]
print time.time()-start_time
start_time=time.time()
with tf.gfile.GFile(req_obj['input'],"r")as f:
image=f.read()
captions=generator.beam_search(sess2,image,feature)
print time.time()-start_time
result={'filename':req_obj['input'],'caption':(" ".join([vocab.id_to_word(w)for w in captions[0].sentence[1:-1]])).decode('utf-8'),'prob':prob}
print result
return json.dumps(result)
run(host='localhost',port=8080,debug=True)
|
"""JSON implementations of cataloging managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import profile
from . import sessions
from .. import utilities
from ..osid import managers as osid_managers
from ..primitives import Type
from ..type.objects import TypeList
from ..utilities import get_registry
from dlkit.abstract_osid.osid import errors
from dlkit.manager_impls.cataloging import managers as cataloging_managers
class CatalogingProfile(osid_managers.OsidProfile, cataloging_managers.CatalogingProfile):
"""The cataloging profile describes the interoperability among cataloging services."""
def supports_catalog_lookup(self):
"""Tests for the availability of a catalog lookup service.
return: (boolean) - ``true`` if catalog lookup is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceProfile.supports_resource_lookup
return 'supports_catalog_lookup' in profile.SUPPORTS
def supports_catalog_query(self):
"""Tests for the availability of a catalog query service that defines more comprehensive queries.
return: (boolean) - ``true`` if catalog query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceProfile.supports_resource_lookup
return 'supports_catalog_query' in profile.SUPPORTS
def supports_catalog_admin(self):
"""Tests for the availability of a catalog administration service for the addition and deletion of catalogs.
return: (boolean) - ``true`` if catalog administration is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceProfile.supports_resource_lookup
return 'supports_catalog_admin' in profile.SUPPORTS
def supports_catalog_hierarchy(self):
"""Tests for the availability of a catalog hierarchy traversal service.
return: (boolean) - ``true`` if catalog hierarchy traversal is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceProfile.supports_resource_lookup
return 'supports_catalog_hierarchy' in profile.SUPPORTS
def supports_catalog_hierarchy_design(self):
"""Tests for the availability of a catalog hierarchy design service.
return: (boolean) - ``true`` if catalog hierarchy design is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented in all
providers.*
"""
# Implemented from template for
# osid.resource.ResourceProfile.supports_resource_lookup
return 'supports_catalog_hierarchy_design' in profile.SUPPORTS
def get_catalog_record_types(self):
"""Gets the supported ``Catalog`` record types.
return: (osid.type.TypeList) - a list containing the supported
``Catalog`` record types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceProfile.get_resource_record_types_template
record_type_maps = get_registry('CATALOG_RECORD_TYPES', self._runtime)
record_types = []
for record_type_map in record_type_maps:
record_types.append(Type(**record_type_maps[record_type_map]))
return TypeList(record_types)
catalog_record_types = property(fget=get_catalog_record_types)
def get_catalog_search_record_types(self):
"""Gets the supported catalog search reciord types.
return: (osid.type.TypeList) - a list containing the supported
search record types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceProfile.get_resource_record_types_template
record_type_maps = get_registry('CATALOG_SEARCH_RECORD_TYPES', self._runtime)
record_types = []
for record_type_map in record_type_maps:
record_types.append(Type(**record_type_maps[record_type_map]))
return TypeList(record_types)
catalog_search_record_types = property(fget=get_catalog_search_record_types)
class CatalogingManager(osid_managers.OsidManager, CatalogingProfile, cataloging_managers.CatalogingManager):
"""The cataloging manager provides access to cataloging sessions and provides interoperability tests for various aspects of this service.
The sessions included in this manager are:
* ``CatalogSession:`` a session to lookup mappings to catalogs
* ``CatalogAssignmentSession:`` a session to manage Id to Catalog
mappings
* ``CatalogEntryNotificationSession:`` a session to receive
notification of changed mappings
* ``CatalogLookupSession:`` a session to retrieve catalogs
* ``CatalogQuerySession:`` a session to query catalogs
* ``CatalogSearchSession:`` a session to search for catalogs
* ``CatalogAdminSession:`` a session to create, update and delete
catalogs
* ``CatalogNotificationSession:`` a session to receive
notifications for changes in catalogs
* ``CatalogHierarchyTraversalSession:`` a session to traverse
hierarchies of catalogs
* ``CatalogHierarchyDesignSession:`` a session to manage
hierarchues of catalogs
The cataloging manager also provides a profile for determing the
supported search types supported by this service.
"""
def __init__(self):
osid_managers.OsidManager.__init__(self)
@utilities.remove_null_proxy_kwarg
def get_catalog_lookup_session(self):
"""Gets the catalog lookup session.
return: (osid.cataloging.CatalogLookupSession) - a
``CatalogLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_lookup()`` is ``true``.*
"""
if not self.supports_catalog_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogLookupSession(runtime=self._runtime)
catalog_lookup_session = property(fget=get_catalog_lookup_session)
@utilities.remove_null_proxy_kwarg
def get_catalog_query_session(self):
"""Gets the catalog query session.
return: (osid.cataloging.CatalogQuerySession) - a
``CatalogQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_query()`` is ``true``.*
"""
if not self.supports_catalog_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogQuerySession(runtime=self._runtime)
catalog_query_session = property(fget=get_catalog_query_session)
@utilities.remove_null_proxy_kwarg
def get_catalog_admin_session(self):
"""Gets the catalog administrative session for creating, updating and deleting catalogs.
return: (osid.cataloging.CatalogAdminSession) - a
``CatalogAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_admin()`` is ``true``.*
"""
if not self.supports_catalog_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogAdminSession(runtime=self._runtime)
catalog_admin_session = property(fget=get_catalog_admin_session)
@utilities.remove_null_proxy_kwarg
def get_catalog_hierarchy_session(self):
"""Gets the catalog hierarchy traversal session.
return: (osid.cataloging.CatalogHierarchySession) - a
``CatalogHierarchySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy()`` is ``true``.*
"""
if not self.supports_catalog_hierarchy():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogHierarchySession(runtime=self._runtime)
catalog_hierarchy_session = property(fget=get_catalog_hierarchy_session)
@utilities.remove_null_proxy_kwarg
def get_catalog_hierarchy_design_session(self):
"""Gets the catalog hierarchy design session.
return: (osid.cataloging.CatalogHierarchyDesignSession) - a
``CatalogHierarchyDesignSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy_design()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy_design()`` is ``true``.*
"""
if not self.supports_catalog_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogHierarchyDesignSession(runtime=self._runtime)
catalog_hierarchy_design_session = property(fget=get_catalog_hierarchy_design_session)
def get_cataloging_rules_manager(self):
"""Gets the cataloging rules manager.
return: (osid.cataloging.rules.CatalogingRulesManager) - a
``CatalogingRulesManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_cataloging_rules()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_cataloging_rules()`` is ``true``.*
"""
raise errors.Unimplemented()
cataloging_rules_manager = property(fget=get_cataloging_rules_manager)
class CatalogingProxyManager(osid_managers.OsidProxyManager, CatalogingProfile, cataloging_managers.CatalogingProxyManager):
"""The cataloging manager provides access to cataloging sessions and provides interoperability tests for various aspects of this service.
Methods in this manager support the passing of a ``Proxy`` for the
purposes of passing information from server environments.
* ``CatalogSession:`` a session to lookup mappings to catalogs
* ``CatalogAssignmentSession:`` a session to manage Id to Catalog
mappings
* ``CatalogEntryNotificationSession:`` a session to receive
notification of changed mappings
* ``CatalogLookupSession:`` a session to retrieve catalogs
* ``CatalogQuerySession:`` a session to query catalogs
* ``CatalogSearchSession:`` a session to search for catalogs
* ``CatalogAdminSession:`` a session to create, update and delete
catalogs
* ``CatalogNotificationSession:`` a session to receive
notifications for changes in catalogs
* ``CatalogHierarchyTraversalSession:`` a session to traverse
hierarchies of catalogs
* ``CatalogHierarchyDesignSession:`` a session to manage
hierarchues of catalogs
The cataloging manager also provides a profile for determing the
supported search types supported by this service.
"""
def __init__(self):
osid_managers.OsidProxyManager.__init__(self)
@utilities.arguments_not_none
def get_catalog_lookup_session(self, proxy):
"""Gets the catalog lookup session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.cataloging.CatalogLookupSession) - a
``CatalogLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_lookup()`` is ``true``.*
"""
if not self.supports_catalog_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogLookupSession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_catalog_query_session(self, proxy):
"""Gets the catalog query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.cataloging.CatalogQuerySession) - a
``CatalogQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_query()`` is ``true``.*
"""
if not self.supports_catalog_query():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogQuerySession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_catalog_admin_session(self, proxy):
"""Gets the catalog administrative session for creating, updating and deleting catalogs.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.cataloging.CatalogAdminSession) - a
``CatalogAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_admin()`` is ``true``.*
"""
if not self.supports_catalog_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogAdminSession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_catalog_hierarchy_session(self, proxy):
"""Gets the catalog hierarchy traversal session.
arg: proxy (osid.proxy.Proxy): proxy
return: (osid.cataloging.CatalogHierarchySession) - a
``CatalogHierarchySession``
raise: NullArgument - ``proxy`` is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy()`` is ``true``.*
"""
if not self.supports_catalog_hierarchy():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogHierarchySession(proxy=proxy, runtime=self._runtime)
@utilities.arguments_not_none
def get_catalog_hierarchy_design_session(self, proxy):
"""Gets the catalog hierarchy design session.
arg: proxy (osid.proxy.Proxy): proxy
return: (osid.cataloging.CatalogHierarchyDesignSession) - a
``CatalogHierarchyDesignSession``
raise: NullArgument - ``proxy`` is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy_design()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy_design()`` is ``true``.*
"""
if not self.supports_catalog_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogHierarchyDesignSession(proxy=proxy, runtime=self._runtime)
def get_cataloging_rules_proxy_manager(self):
"""Gets the cataloging rules proxy manager.
return: (osid.cataloging.rules.CatalogingRulesProxyManager) - a
``CatalogingRulesManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_cataloging_rules()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_cataloging_rules()`` is ``true``.*
"""
raise errors.Unimplemented()
cataloging_rules_proxy_manager = property(fget=get_cataloging_rules_proxy_manager)
|
<reponame>dhruvgairola/linearAlgebra-coursera
from vec import Vec
def getitem(M, k):
"Returns the value of entry k in M. The value of k should be a pair."
assert k[0] in M.D[0] and k[1] in M.D[1]
if (k[0], k[1]) in M.f.keys():
return M.f[(k[0], k[1])]
else:
return 0
def setitem(M, k, val):
"Sets the element of v with label k to be val. The value of k should be a pair"
assert k[0] in M.D[0] and k[1] in M.D[1]
M.f[(k[0], k[1])] = val
def add(A, B):
"Returns the sum of A and B"
assert A.D == B.D
added_matrix = Mat(A.D, {})
for key, val in A.f.items():
added_matrix.f[key] = val
for key, val in B.f.items():
if key in added_matrix.f.keys():
added_matrix.f[key] += val
else:
added_matrix.f[key] = val
return added_matrix
def scalar_mul(M, alpha):
"Returns the product of scalar alpha with M"
return Mat(M.D, {key: (val * alpha) for key, val in M.f.items()})
def equal(A, B):
"Returns true iff A is equal to B"
assert A.D == B.D
for r in A.D[0]:
for c in A.D[1]:
if (r, c) not in A.f.keys():
A.f[(r, c)] = 0
for r in B.D[0]:
for c in B.D[1]:
if (r, c) not in B.f.keys():
B.f[(r, c)] = 0
return A.f == B.f
def transpose(M):
"Returns the transpose of M"
return Mat((M.D[1], M.D[0]), { (q, p):v for (p, q), v in M.f.items() })
def vector_matrix_mul(v, M):
"Returns the product of vector v and matrix M"
assert M.D[0] == v.D
ret_v = Vec(M.D[1], {})
for d in ret_v.D:
ret_v.f[d] = 0
for (k1, k2), val in M.f.items():
ret_v.f[k2] += val * v[k1]
return ret_v
def matrix_vector_mul(M, v):
"Returns the product of matrix M and vector v"
assert M.D[1] == v.D
ret_v = Vec(M.D[0], {})
for d in ret_v.D:
ret_v.f[d] = 0
for (k1, k2), val in M.f.items():
ret_v.f[k1] += val * v[k2]
return ret_v
def matrix_matrix_mul(A, B):
"Returns the product of A and B"
from matutil import mat2coldict, mat2rowdict
assert A.D[1] == B.D[0]
ret_mat = Mat((A.D[0], B.D[1]), {})
a_row = mat2rowdict(A)
b_col = mat2coldict(B)
for key, val in a_row.items():
for key2, val2 in b_col.items():
ret_mat.f[(key, key2)] = val * val2
return ret_mat
################################################################################
class Mat:
def __init__(self, labels, function):
self.D = labels
self.f = function
__getitem__ = getitem
__setitem__ = setitem
transpose = transpose
def __neg__(self):
return (-1)*self
def __mul__(self,other):
if Mat == type(other):
return matrix_matrix_mul(self,other)
elif Vec == type(other):
return matrix_vector_mul(self,other)
else:
return scalar_mul(self,other)
#this will only be used if other is scalar (or not-supported). mat and vec both have __mul__ implemented
def __rmul__(self, other):
if Vec == type(other):
return vector_matrix_mul(other, self)
else: # Assume scalar
return scalar_mul(self, other)
__add__ = add
def __sub__(a,b):
return a+(-b)
__eq__ = equal
def copy(self):
return Mat(self.D, self.f.copy())
def __str__(M, rows=None, cols=None):
"string representation for print()"
if rows == None:
try:
rows = sorted(M.D[0])
except TypeError:
rows = sorted(M.D[0], key=hash)
if cols == None:
try:
cols = sorted(M.D[1])
except TypeError:
cols = sorted(M.D[1], key=hash)
separator = ' | '
numdec = 3
pre = 1+max([len(str(r)) for r in rows])
colw = {col:(1+max([len(str(col))] + [len('{0:.{1}G}'.format(M[row,col],numdec)) if isinstance(M[row,col], int) or isinstance(M[row,col], float) else len(str(M[row,col])) for row in rows])) for col in cols}
s1 = ' '*(1+ pre + len(separator))
s2 = ''.join(['{0:>{1}}'.format(c,colw[c]) for c in cols])
s3 = ' '*(pre+len(separator)) + '-'*(sum(list(colw.values())) + 1)
s4 = ''.join(['{0:>{1}} {2}'.format(r, pre,separator)+''.join(['{0:>{1}.{2}G}'.format(M[r,c],colw[c],numdec) if isinstance(M[r,c], int) or isinstance(M[r,c], float) else '{0:>{1}}'.format(M[r,c], colw[c]) for c in cols])+'\n' for r in rows])
return '\n' + s1 + s2 + '\n' + s3 + '\n' + s4
def pp(self, rows, cols):
print(self.__str__(rows, cols))
def __repr__(self):
"evaluatable representation"
return "Mat(" + str(self.D) +", " + str(self.f) + ")"
|
<gh_stars>0
# Generated by Django 3.2.4 on 2021-07-01 12:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Animal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('euid', models.CharField(max_length=256, unique=True)),
('name', models.CharField(max_length=128)),
('birthDate', models.DateField()),
('animalid', models.CharField(max_length=128)),
('arrivaldate', models.DateField()),
('departuredate', models.DateField(blank=True, null=True)),
('departurereason', models.CharField(blank=True, max_length=256, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Breed',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Gender',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=256)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='SeedingType',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Weight',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('timestamp', models.DateTimeField()),
('weight', models.DecimalField(decimal_places=3, max_digits=8)),
('automaticmeasurement', models.BooleanField()),
('animal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finliveapp.animal')),
],
),
migrations.CreateModel(
name='PregnancyCheck',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateField()),
('result', models.BooleanField()),
('calvingdate', models.DateField()),
('animal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finliveapp.animal')),
],
),
migrations.CreateModel(
name='Insemination',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateField()),
('bull', models.CharField(max_length=128)),
('animal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finliveapp.animal')),
('inseminationmethod', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='finliveapp.seedingtype')),
],
),
migrations.CreateModel(
name='Calving',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('date', models.DateField()),
('assistance', models.BooleanField()),
('calvingnumber', models.IntegerField()),
('animal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finliveapp.animal')),
],
),
migrations.CreateModel(
name='Barn',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=256)),
('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='finliveapp.organization')),
],
),
migrations.AddField(
model_name='animal',
name='barn',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='finliveapp.barn'),
),
migrations.AddField(
model_name='animal',
name='breed',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='finliveapp.breed'),
),
migrations.AddField(
model_name='animal',
name='gender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='finliveapp.gender'),
),
migrations.AddField(
model_name='animal',
name='organization',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='finliveapp.organization'),
),
]
|
'''
@File : Bam2CuttingBW.py
@Time : 2020/8/15 4:30 下午
@Author : KeeeeepGoing(<NAME>)
@Version :
@Contact : <EMAIL>
@Desc : copy from https://github.com/forrestzhang/bagatelle.git
'''
import pysam
import sys
import pyBigWig
def dhstobw(bamfile, samplename, excludechr='', library='Duke'):
# Washington is under processing
"""
:param bamfile:
:param bwfile:
:param library:Duke or Washington
Duke: |=====>
<=====|
Washington: |===========|
Out put cutting site '|'
:return:
"""
if excludechr == None:
excludechr = ''
bwfile = samplename + '_' + 'cutting.bw'
bamfor = Baminfo(bamfile)
bw = pyBigWig.open(bwfile, "w")
excludechrs = excludechr.split(',')
countchrs = list()
for chrom in list(bamfor.chrlen.items()):
if chrom[0] not in excludechrs:
countchrs.append(chrom)
bw.addHeader(countchrs)
print(countchrs)
for chromosome in bamfor.chrlen:
if chromosome not in excludechrs:
end = bamfor.chrlen[chromosome]
dhscut = dhcutcount(bamfile=bamfile, chromosome=chromosome, start=1,
end=end, library=library)
if dhscut:
starts = list()
values = list()
for start in sorted(dhscut):
starts.append(start)
values.append(float(dhscut[start]))
bw.addEntries(chromosome, starts=starts, values=values,
span=1, step=1)
bw.close()
def openBam(bamFile):
try:
bam = pysam.Samfile(bamFile, 'rb')
except IOError:
sys.exit("The file {} does not exist".format(bamFile))
except:
sys.exit("The file {} does not have BAM format ".format(bamFile))
try:
if 'check_index' in dir(bam):
assert (bam.check_index())
else:
# The proper check_index() function wasn't implemented until pysam 0.8.4!
assert (bam._hasIndex())
except:
sys.exit("{} does not appear to have an index. You MUST index the file first!".format(bamFile))
if bam.mapped == 0:
sys.exit("Samtools reports that the number of mapped "
"reads is zero for the file {}. Please check "
"that the file is properly indexed and that "
"it contains mapped reads.".format(bamFile))
return bam
class Baminfo:
def __init__(self, bamfile):
self.bamfile = bamfile
self.samfile = openBam(self.bamfile)
self.chrlen = self.getchrlen()
def getchrlen(self):
ref_lengths = self.samfile.lengths
sam_ref = self.samfile.references
refere_ncenumber = self.samfile.nreferences
chrlen = dict()
for i in range(refere_ncenumber):
chr_length = ref_lengths[i]
chrlen[sam_ref[i]] = chr_length
return chrlen
def dhcutcount(bamfile, chromosome, start, end, library='Duke'):
"""
:param bamfile: bamfile
:param chromosome:
:param start:
:param end:
:param library: Duke or Washington
Duke: |=====>
<=====|
Washington: |===========|
Out put cutting site '|'
:return: dictionary of cutting site count
"""
samfile = openBam(bamfile)
readscount = dict()
if library == 'Duke':
for aligned_read in samfile.fetch(reference=str(chromosome), start=start, end=end):
if aligned_read.is_reverse:
site = aligned_read.aend
else:
site = aligned_read.pos
site = site + 1
if site in readscount:
readscount[site] = readscount[site] + 1
else:
readscount[site] = 1
elif library == 'Washington':
pass
else:
pass
return readscount
|
<gh_stars>1-10
from pyvirtualdisplay import Display
import pandas as pd
import os
import sys
import shutil
import glob
import time
import requests
import datetime
import cv2
from error_handlers import exception_handler
from google.cloud import storage
from pathlib import Path
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
def clear_dir(dir_name):
files = glob.glob(dir_name + "/*")
for f in files:
os.remove(f)
def rm_cache(timestamp):
date_latest = datetime.datetime.strptime(timestamp, '%m-%d-%Y')
folders = [
os.path.basename(f.path) for f in os.scandir(OUTPUT_BASE_DIR)
if f.is_dir()
]
for folder in folders:
date = datetime.datetime.strptime(folder, '%m-%d-%Y')
date_diff = date_latest - date
if date_diff.days >= KEEP_DAYS:
folder_path = Path(OUTPUT_BASE_DIR) / folder
folder_path = str(folder_path.resolve())
clear_dir(folder_path)
os.rmdir(folder_path)
def process_thumb_prefix(thumb_urls):
# Remove thumbnail placeholders
thumb_urls = [url for url in thumb_urls if 'transparent' not in url]
sample_thumb_url = thumb_urls[0]
return sample_thumb_url.split('img-master')[0]
def large_img_url(url):
processed_url = 'https://i.pximg.net/img-master' + \
url.split('img-master')[1]
return processed_url
def thumb_img_url(url, thumb_prefix):
processed_url = thumb_prefix + 'img-master' + url.split('img-master')[1]
return processed_url
def orig_img_url(url):
substr2 = "img-master"
substr3 = "_master1200"
substr4 = ".jpg"
x2 = url.find(substr2)
x3 = url.find(substr3)
x4 = url.find(substr4)
newurl1 = url[:x2]+"img-original" + \
url[x2+len(substr2):x3]+url[x3+len(substr3):]
newurl2 = url[:x2]+"img-original" + \
url[x2+len(substr2):x3]+url[x3+len(substr3):x4]+".png"
newurl3 = url[:x2]+"img-original" + \
url[x2+len(substr2):x3]+url[x3+len(substr3):x4]+".gif"
return [newurl1, newurl2, newurl3]
def download_image(urls, all_cookies, img_name, referer, dir_name):
"""Try downloading from url, try backup url if fails
Send request with cookies, referer and UA
"""
headers = {
'User-Agent':
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept':
'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive',
'Referer': referer
}
cookies = {}
for s_cookie in all_cookies:
cookies[s_cookie["name"]] = s_cookie["value"]
for url in urls:
try:
response = requests.get(url,
cookies=cookies,
headers=headers,
timeout=120)
except:
print("Failed to download image {} because conection timed out".
format(img_name))
return ""
ext = url.split(".")[-1]
if response.status_code != 404:
break
output_name = img_name + "." + ext
output_path = Path(dir_name) / output_name
output_path = str(output_path.resolve())
Path(output_path).parent.absolute().mkdir(parents=True, exist_ok=True)
with open(output_path, "wb") as f:
f.write(response.content)
return output_path
def load_and_retry(driver, url, retries):
for _ in range(retries):
try:
driver.get(url)
except:
print("Web page failed to load, trying again")
print(url)
time.sleep(30)
continue
return
print("Page load reached max retries, exiting")
raise Exception('Page load failure')
def detect_safe_search_uri(uri):
"""Detects unsafe features in the file located in Google Cloud Storage or
on the Web."""
from google.cloud import vision
client = vision.ImageAnnotatorClient.from_service_account_json(
"service_account.json")
image = vision.Image()
image.source.image_uri = uri
response = client.safe_search_detection(image=image)
safe = response.safe_search_annotation
# Names of likelihood from google.cloud.vision.enums
likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',
'LIKELY', 'VERY_LIKELY')
if response.error.message:
raise Exception(
'{}\nFor more info on error messages, check: '
'https://cloud.google.com/apis/design/errors'.format(
response.error.message))
return likelihood_name[safe.adult]
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
storage_client = storage.Client.from_service_account_json(
'service_account.json')
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(f"File {source_file_name} uploaded to {destination_blob_name}.")
if __name__ == '__main__':
OUTPUT_BASE_DIR = "imgs"
OUTPUT_CSV = "artwork_info.csv"
URL_RANKING = "https://www.pixiv.net/ranking.php?mode=daily&content=illust"
MAX_RETRIES = 3
KEEP_DAYS = 7
project_id = os.environ['GCP_PROJ']
bucket_name = os.environ['DOWNLOAD_BUCKET_NAME']
BUCKET_URL_PREFIX = f"https://storage.googleapis.com/{bucket_name}"
df_artworks = pd.DataFrame({
"Rank": [],
"IllustID": [],
"Compressed": [],
"Thumbnail": [],
"Original": [],
"Downloaded": [],
"Timestamp": []
})
print("Initializing")
try:
display = Display(visible=0, size=(800, 600))
display.start()
firefox_profile = FirefoxProfile()
firefox_profile.set_preference(
'dom.ipc.plugins.enabled.libflashplayer.so', 'false')
driver = webdriver.Firefox(firefox_profile)
driver.set_page_load_timeout(90)
driver.implicitly_wait(30)
except:
driver.quit()
display.stop()
exception_handler("Initialization failed")
print("Loading Pixiv daily rankings")
try:
load_and_retry(driver, URL_RANKING, MAX_RETRIES)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
page_title = driver.title
# Slash not allowed in folder names
timestamp = page_title.split(" ")[-1].replace('/', '-')
print("Rankings loaded {}".format(timestamp))
# ----- Prepare Output Path -----
output_dir = Path('.') / OUTPUT_BASE_DIR / timestamp
output_dir = str(output_dir.resolve())
# ----- Detect Duplicate Download -----
if os.path.exists(output_dir):
# Is previous download successful?
if os.path.exists(csv_path):
print("Already downloaded, exiting")
exit(0)
else:
clear_dir(output_dir)
else:
Path(output_dir).mkdir(parents=True, exist_ok=True)
# ----- Remove Old Image Cache -----
rm_cache(timestamp)
except:
driver.quit()
display.stop()
exception_handler("Pixiv daily rankings failed to load")
print("Download started")
try:
medium_urls = []
illust_ids = []
thumb_urls = []
artworks = driver.find_elements_by_class_name("ranking-item")
# Only get top 50
artworks = artworks[:50]
for artwork in artworks:
illust_ids.append(artwork.get_attribute("data-id"))
thumb_urls.append(artwork.find_element_by_class_name("_work").
find_element_by_class_name("_layout-thumbnail").
find_element_by_class_name("_thumbnail").
get_attribute("data-src"))
medium_urls.append(
artwork.find_element_by_class_name("_work").get_attribute(
"href"))
thumb_prefix = process_thumb_prefix(thumb_urls)
for i, url in enumerate(medium_urls):
illustid = illust_ids[i]
rank = i + 1
thumb_url = thumb_urls[i]
print("\nAnalyzing links of image ranking {}".format(rank))
downloaded = True
output_name_c = "compressed/{}_c".format(rank)
output_name_t = "thumbnail/{}_t".format(rank)
output_name_o = "original/{}_o".format(rank)
print("Downloading image ranking {}".format(rank))
compressed_path = download_image([large_img_url(thumb_url)],
driver.get_cookies(), output_name_c,
url, output_dir)
upload_blob(bucket_name, compressed_path, f"{timestamp}/compressed/{os.path.basename(compressed_path)}")
thumbnail_path = download_image([thumb_url],
driver.get_cookies(), output_name_t,
url, output_dir)
upload_blob(bucket_name, thumbnail_path, f"{timestamp}/thumbnail/{os.path.basename(thumbnail_path)}")
original_path = download_image(orig_img_url(large_img_url(thumb_url)),
driver.get_cookies(), output_name_o,
url, output_dir)
upload_blob(bucket_name, original_path, f"{timestamp}/original/{os.path.basename(original_path)}")
compressed_shape = cv2.imread(compressed_path).shape
thumbnail_shape = cv2.imread(thumbnail_path).shape
original_shape = cv2.imread(original_path).shape
compressed = f"{BUCKET_URL_PREFIX}/{timestamp}/compressed/{os.path.basename(compressed_path)}"
thumbnail = f"{BUCKET_URL_PREFIX}/{timestamp}/thumbnail/{os.path.basename(thumbnail_path)}"
original = f"{BUCKET_URL_PREFIX}/{timestamp}/original/{os.path.basename(original_path)}"
df_artworks = df_artworks.append(
{
"Rank": rank,
"IllustID": illustid,
"Compressed": compressed,
"CompressedHeight": compressed_shape[0],
"CompressedWidth": compressed_shape[1],
"Thumbnail": thumbnail,
"ThumbnailHeight": thumbnail_shape[0],
"ThumbnailWidth": thumbnail_shape[1],
"Original": original,
"OriginalHeight": original_shape[0],
"OriginalWidth": original_shape[1],
"Adult": detect_safe_search_uri(thumbnail),
"Downloaded": downloaded,
"Timestamp": timestamp
},
ignore_index=True)
df_artworks.to_csv("data.csv", index=False)
except:
driver.quit()
display.stop()
exception_handler("Download failed")
|
# -*- coding: utf-8 -*-
import uuid
import hashlib
from base64 import b64encode, b64decode
from datetime import datetime
from werkzeug.http import http_date, parse_date
from werkzeug.datastructures import CallbackDict
from . import json
from ._compat import iteritems, text_type
from itsdangerous import URLSafeTimedSerializer, BadSignature
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
class SessionMixin(object):
new = False
modified = True
def _get_permanent(self):
return self.get('_permanent', False)
def _set_permanent(self, value):
self['_permanent'] = bool(value)
permanent = property(_get_permanent, _set_permanent)
del _get_permanent, _set_permanent
def _tag(value):
if isinstance(value, tuple):
return {' t': [_tag(x) for x in value]}
elif isinstance(value, uuid.UUID):
return {' u': value.hex}
elif isinstance(value, bytes):
return {' b': b64encode(value).decode('ascii')}
elif callable(getattr(value, '__html__', None)):
return {' m': text_type(value.__html__())}
elif isinstance(value, list):
return [_tag(x) for x in value]
elif isinstance(value, datetime):
return {' d': http_date(value)}
elif isinstance(value, dict):
return dict((k, _tag(v)) for k, v in iteritems(value))
elif isinstance(value, str):
try:
return text_type(value)
except UnicodeError:
from flak.debughelpers import UnexpectedUnicodeError
raise UnexpectedUnicodeError(u'A byte string with '
u'non-ASCII data was passed to the session system '
u'which can only store unicode strings. Consider '
u'base64 encoding your string (String was %r)' % value)
return value
class TaggedJsonSerializer(object):
def __init__(self, cx):
self.cx = cx
def dumps(self, value):
return self.cx.dumps(_tag(value), separators=(',', ':'))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == ' t':
return tuple(the_value)
elif the_key == ' u':
return uuid.UUID(the_value)
elif the_key == ' b':
return b64decode(the_value)
elif the_key == ' d':
return parse_date(the_value)
return obj
return self.cx.loads(value, object_hook=object_hook)
class SecureCookieSession(CallbackDict, SessionMixin):
def __init__(self, initial=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.modified = False
class NullSession(SecureCookieSession):
modified = False
def _nop(self, *args, **kwargs):
pass
__setitem__ = __delitem__ = clear = pop = popitem \
= update = setdefault = _nop
del _nop
class SessionInterface(object):
null_session_class = NullSession
def get_cookie_domain(self, app):
if app.config['SESSION_COOKIE_DOMAIN'] is not None:
return app.config['SESSION_COOKIE_DOMAIN']
if app.config['SERVER_NAME'] is not None:
# chop off the port which is usually not supported by browsers
rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0]
# Google chrome does not like cookies set to .localhost, so
# we just go with no domain then. Flak documents anyways that
# cross domain cookies need a fully qualified domain name
if rv == '.localhost':
rv = None
# If we infer the cookie domain from the server name we need
# to check if we are in a subpath. In that case we can't
# set a cross domain cookie.
if rv is not None:
path = self.get_cookie_path(app)
if path != '/':
rv = rv.lstrip('.')
return rv
def get_cookie_path(self, app):
return (app.config['SESSION_COOKIE_PATH']
or app.config['APPLICATION_ROOT']
or '/')
def get_cookie_httponly(self, app):
return app.config['SESSION_COOKIE_HTTPONLY']
def get_cookie_secure(self, app):
return app.config['SESSION_COOKIE_SECURE']
def get_expiration_time(self, app, session):
if session.permanent:
return datetime.utcnow() + app.permanent_session_lifetime
def should_set_cookie(self, app, session):
if session.modified:
return True
save_each = app.config['SESSION_REFRESH_EACH_REQUEST']
return save_each and session.permanent
def open_session(self, cx):
return self.null_session_class()
def save_session(self, cx, response):
pass
class SecureCookieSessionInterface(SessionInterface):
salt = 'cookie-session'
key_derivation = 'hmac'
digest_method = staticmethod(hashlib.sha1)
serializer = TaggedJsonSerializer
session_class = SecureCookieSession
def get_signing_serializer(self, cx):
if not cx.app.secret_key:
return None
signer_kwargs = dict(key_derivation=self.key_derivation,
digest_method=self.digest_method)
return URLSafeTimedSerializer(cx.app.secret_key, salt=self.salt,
serializer=self.serializer(cx),
signer_kwargs=signer_kwargs)
def open_session(self, cx):
app = cx.app
rq = cx.request
s = self.get_signing_serializer(cx)
if s is None:
return self.null_session_class()
val = rq.cookies.get(app.session_cookie_name)
if not val:
return self.session_class()
max_age = total_seconds(app.permanent_session_lifetime)
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(self, cx, response):
app = cx.app
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
# Delete case. If there is no session we bail early.
# If the session was modified to be empty we remove the
# whole cookie.
session = cx.session
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name,
domain=domain, path=path)
return
# Modification case. There are upsides and downsides to
# emitting a set-cookie header each request. The behavior
# is controlled by the :meth:`should_set_cookie` method
# which performs a quick check to figure out if the cookie
# should be set or not. This is controlled by the
# SESSION_REFRESH_EACH_REQUEST config flag as well as
# the permanent flag on the session itself.
if not self.should_set_cookie(app, session):
return
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(cx).dumps(dict(session))
response.set_cookie(app.session_cookie_name, val,
expires=expires, httponly=httponly,
domain=domain, path=path, secure=secure)
|
<reponame>pulumi/pulumi-azure-nextgen
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Lab']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:devtestlab:Lab'.""", DeprecationWarning)
class Lab(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:devtestlab:Lab'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
announcement: Optional[pulumi.Input[pulumi.InputType['LabAnnouncementPropertiesArgs']]] = None,
environment_permission: Optional[pulumi.Input[Union[str, 'EnvironmentPermission']]] = None,
extended_properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
lab_storage_type: Optional[pulumi.Input[Union[str, 'StorageType']]] = None,
location: Optional[pulumi.Input[str]] = None,
mandatory_artifacts_resource_ids_linux: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mandatory_artifacts_resource_ids_windows: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
premium_data_disks: Optional[pulumi.Input[Union[str, 'PremiumDataDisk']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
support: Optional[pulumi.Input[pulumi.InputType['LabSupportPropertiesArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A lab.
Latest API Version: 2018-09-15.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['LabAnnouncementPropertiesArgs']] announcement: The properties of any lab announcement associated with this lab
:param pulumi.Input[Union[str, 'EnvironmentPermission']] environment_permission: The access rights to be granted to the user when provisioning an environment
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] extended_properties: Extended properties of the lab used for experimental features
:param pulumi.Input[Union[str, 'StorageType']] lab_storage_type: Type of storage used by the lab. It can be either Premium or Standard. Default is Premium.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] mandatory_artifacts_resource_ids_linux: The ordered list of artifact resource IDs that should be applied on all Linux VM creations by default, prior to the artifacts specified by the user.
:param pulumi.Input[Sequence[pulumi.Input[str]]] mandatory_artifacts_resource_ids_windows: The ordered list of artifact resource IDs that should be applied on all Windows VM creations by default, prior to the artifacts specified by the user.
:param pulumi.Input[str] name: The name of the lab.
:param pulumi.Input[Union[str, 'PremiumDataDisk']] premium_data_disks: The setting to enable usage of premium data disks.
When its value is 'Enabled', creation of standard or premium data disks is allowed.
When its value is 'Disabled', only creation of standard data disks is allowed.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['LabSupportPropertiesArgs']] support: The properties of any lab support message associated with this lab
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
"""
pulumi.log.warn("Lab is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:devtestlab:Lab'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['announcement'] = announcement
__props__['environment_permission'] = environment_permission
__props__['extended_properties'] = extended_properties
__props__['lab_storage_type'] = lab_storage_type
__props__['location'] = location
__props__['mandatory_artifacts_resource_ids_linux'] = mandatory_artifacts_resource_ids_linux
__props__['mandatory_artifacts_resource_ids_windows'] = mandatory_artifacts_resource_ids_windows
__props__['name'] = name
__props__['premium_data_disks'] = premium_data_disks
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['support'] = support
__props__['tags'] = tags
__props__['artifacts_storage_account'] = None
__props__['created_date'] = None
__props__['default_premium_storage_account'] = None
__props__['default_storage_account'] = None
__props__['load_balancer_id'] = None
__props__['network_security_group_id'] = None
__props__['premium_data_disk_storage_account'] = None
__props__['provisioning_state'] = None
__props__['public_ip_id'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
__props__['vault_name'] = None
__props__['vm_creation_resource_group'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab:Lab"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:Lab"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:Lab"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:Lab")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Lab, __self__).__init__(
'azure-nextgen:devtestlab/latest:Lab',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Lab':
"""
Get an existing Lab resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Lab(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def announcement(self) -> pulumi.Output[Optional['outputs.LabAnnouncementPropertiesResponse']]:
"""
The properties of any lab announcement associated with this lab
"""
return pulumi.get(self, "announcement")
@property
@pulumi.getter(name="artifactsStorageAccount")
def artifacts_storage_account(self) -> pulumi.Output[str]:
"""
The lab's artifact storage account.
"""
return pulumi.get(self, "artifacts_storage_account")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the lab.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="defaultPremiumStorageAccount")
def default_premium_storage_account(self) -> pulumi.Output[str]:
"""
The lab's default premium storage account.
"""
return pulumi.get(self, "default_premium_storage_account")
@property
@pulumi.getter(name="defaultStorageAccount")
def default_storage_account(self) -> pulumi.Output[str]:
"""
The lab's default storage account.
"""
return pulumi.get(self, "default_storage_account")
@property
@pulumi.getter(name="environmentPermission")
def environment_permission(self) -> pulumi.Output[Optional[str]]:
"""
The access rights to be granted to the user when provisioning an environment
"""
return pulumi.get(self, "environment_permission")
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Extended properties of the lab used for experimental features
"""
return pulumi.get(self, "extended_properties")
@property
@pulumi.getter(name="labStorageType")
def lab_storage_type(self) -> pulumi.Output[Optional[str]]:
"""
Type of storage used by the lab. It can be either Premium or Standard. Default is Premium.
"""
return pulumi.get(self, "lab_storage_type")
@property
@pulumi.getter(name="loadBalancerId")
def load_balancer_id(self) -> pulumi.Output[str]:
"""
The load balancer used to for lab VMs that use shared IP address.
"""
return pulumi.get(self, "load_balancer_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mandatoryArtifactsResourceIdsLinux")
def mandatory_artifacts_resource_ids_linux(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The ordered list of artifact resource IDs that should be applied on all Linux VM creations by default, prior to the artifacts specified by the user.
"""
return pulumi.get(self, "mandatory_artifacts_resource_ids_linux")
@property
@pulumi.getter(name="mandatoryArtifactsResourceIdsWindows")
def mandatory_artifacts_resource_ids_windows(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The ordered list of artifact resource IDs that should be applied on all Windows VM creations by default, prior to the artifacts specified by the user.
"""
return pulumi.get(self, "mandatory_artifacts_resource_ids_windows")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroupId")
def network_security_group_id(self) -> pulumi.Output[str]:
"""
The Network Security Group attached to the lab VMs Network interfaces to restrict open ports.
"""
return pulumi.get(self, "network_security_group_id")
@property
@pulumi.getter(name="premiumDataDiskStorageAccount")
def premium_data_disk_storage_account(self) -> pulumi.Output[str]:
"""
The lab's premium data disk storage account.
"""
return pulumi.get(self, "premium_data_disk_storage_account")
@property
@pulumi.getter(name="premiumDataDisks")
def premium_data_disks(self) -> pulumi.Output[Optional[str]]:
"""
The setting to enable usage of premium data disks.
When its value is 'Enabled', creation of standard or premium data disks is allowed.
When its value is 'Disabled', only creation of standard data disks is allowed.
"""
return pulumi.get(self, "premium_data_disks")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIpId")
def public_ip_id(self) -> pulumi.Output[str]:
"""
The public IP address for the lab's load balancer.
"""
return pulumi.get(self, "public_ip_id")
@property
@pulumi.getter
def support(self) -> pulumi.Output[Optional['outputs.LabSupportPropertiesResponse']]:
"""
The properties of any lab support message associated with this lab
"""
return pulumi.get(self, "support")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="vaultName")
def vault_name(self) -> pulumi.Output[str]:
"""
The lab's Key vault.
"""
return pulumi.get(self, "vault_name")
@property
@pulumi.getter(name="vmCreationResourceGroup")
def vm_creation_resource_group(self) -> pulumi.Output[str]:
"""
The resource group in which all new lab virtual machines will be created. To let DevTest Labs manage resource group creation, set this value to null.
"""
return pulumi.get(self, "vm_creation_resource_group")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
<reponame>factomatic/py-factom-did
import hashlib
from jsonschema.exceptions import ValidationError
from factom_did.client.constants import ENTRY_SCHEMA_V100
from factom_did.client.enums import EntryType, Network
from factom_did.client.validators import validate_full_key_identifier
from factom_did.resolver.exceptions import MalformedDIDManagementEntry
class EmptyEntryContentValidator:
@staticmethod
def validate(content):
if content:
raise ValidationError("Invalid entry content: must be empty")
def validate_did_management_ext_ids_v100(ext_ids):
"""
Validates the ExtIDs of a DIDManagement entry.
Parameters
----------
ext_ids: list of bytes
The ExtIDs of the entry
Raises
------
MalformedDIDManagementEntry
If the ExtIDs are not valid.
"""
if not (
_validate_ext_ids_length(ext_ids, 2)
and _validate_entry_type(ext_ids, EntryType.Create)
and _validate_schema_version(ext_ids, ENTRY_SCHEMA_V100)
):
raise MalformedDIDManagementEntry(
"Invalid or missing {} entry ExtIDs".format(EntryType.Create.value)
)
def validate_did_update_ext_ids_v100(ext_ids, chain_id, network=Network.Unspecified):
"""
Validates the ExtIDs of a DIDUpdate entry.
Parameters
----------
ext_ids: list of bytes
The ExtIDs of the entry
chain_id: str
The chain ID where the DIDUpdate is recorded
network: Network, optional
The Factom network on which the DID is recorded
Returns
-------
bool
True if the ExtIDs are valid, False otherwise.
"""
return (
_validate_ext_ids_length(ext_ids, 4)
and _validate_entry_type(ext_ids, EntryType.Update)
and _validate_schema_version(ext_ids, ENTRY_SCHEMA_V100)
and _validate_full_key_identifier(ext_ids)
and validate_management_key_id_against_chain_id(ext_ids[2], chain_id)
and validate_id_against_network(ext_ids[2], network)
)
def validate_did_method_version_upgrade_ext_ids_v100(
ext_ids, chain_id, network=Network.Unspecified
):
"""
Validates the ExtIDs of a DIDMethodVersionUpgrade entry.
Parameters
----------
ext_ids: list of bytes
The ExtIDs of the entry
chain_id: str
The chain ID where the DIDUpdate is recorded
network: Network, optional
The Factom network on which the DID is recorded
Returns
-------
bool
True if the ExtIDs are valid, False otherwise.
"""
return (
_validate_ext_ids_length(ext_ids, 4)
and _validate_entry_type(ext_ids, EntryType.VersionUpgrade)
and _validate_schema_version(ext_ids, ENTRY_SCHEMA_V100)
and _validate_full_key_identifier(ext_ids)
and validate_management_key_id_against_chain_id(ext_ids[2], chain_id)
and validate_id_against_network(ext_ids[2], network)
)
def validate_did_deactivation_ext_ids_v100(
ext_ids, chain_id, network=Network.Unspecified
):
"""
Validates the ExtIDs of a DIDDeactivation entry.
Parameters
----------
ext_ids: list of bytes
The ExtIDs of the entry
chain_id: str
The chain ID where the DIDUpdate is recorded
network: Network, optional
The Factom network on which the DID is recorded
Returns
-------
bool
True if the ExtIDs are valid, False otherwise.
"""
return (
_validate_ext_ids_length(ext_ids, 4)
and _validate_entry_type(ext_ids, EntryType.Deactivation)
and _validate_schema_version(ext_ids, ENTRY_SCHEMA_V100)
and _validate_full_key_identifier(ext_ids)
and validate_management_key_id_against_chain_id(ext_ids[2], chain_id)
and validate_id_against_network(ext_ids[2], network)
)
def validate_signature(ext_ids, content, signing_key):
"""
Checks if the signature contained in the last element of ext_ids is valid.
The signature is for a DIDUpdate, DIDMethodVersionUpgrade or DIDDeactivation entry and covers the content of the
entry + the first 3 ext_ids. For more details on the signatures of these entries, refer to
https://github.com/bi-foundation/FIS/blob/feature/DID/FIS/DID.md
Parameters
----------
ext_ids: list of bytes
content: bytes
signing_key: ManagementKey
Returns
-------
bool
"""
signed_data = bytearray()
for i in range(3):
signed_data.extend(ext_ids[i])
signed_data.extend(content)
return signing_key.verify(hashlib.sha256(signed_data).digest(), ext_ids[3])
def validate_management_key_id_against_chain_id(key_id, chain_id):
"""
Checks if the chain in the key_id matches the value supplied in chain_id.
Parameters
----------
key_id: bytes or str
The partial or full key identifier
chain_id: str
The chain ID
Raises
------
UnicodeDecodeError
If the key_id cannot be decoded to a Unicode string
Returns
-------
bool
"""
if type(key_id) is bytes:
key_id = key_id.decode()
# If the identifier is a full key id, extract the chain and compare it to the provided value
if ":" in key_id:
key_id_chain = key_id.split(":")[-1].split("#")[0]
return key_id_chain == chain_id
# Otherwise, just return True
else:
return True
def validate_id_against_network(id_value, network):
"""
Checks if the network in the id_value matches the value supplied in network.
Parameters
----------
id_value: bytes or str
The partial or full key/service identifier
network: factom_did.client.enums.Network
The network
Raises
------
UnicodeDecodeError
If the key_id cannot be decoded to a Unicode string
Returns
-------
bool
"""
if type(id_value) is bytes:
id_value = id_value.decode()
# If the key identifier contains a network, extract it and compare it to the provided value
if ":" in id_value:
key_id_parts = id_value.split(":")
if len(key_id_parts) == 4:
return key_id_parts[2] == network.value
else:
# This is a full key identifier, but it doesn't contain the network
return True
# Otherwise, just return True
else:
return True
def _validate_ext_ids_length(ext_ids, min_length):
return len(ext_ids) >= min_length
def _validate_entry_type(ext_ids, entry_type):
try:
return ext_ids[0].decode() == entry_type.value
except UnicodeDecodeError:
return False
def _validate_schema_version(ext_ids, version):
try:
return ext_ids[1].decode() == version
except UnicodeDecodeError:
return False
def _validate_full_key_identifier(ext_ids):
try:
validate_full_key_identifier(ext_ids[2].decode())
except (UnicodeDecodeError, ValueError):
return False
else:
return True
|
# Copyright (c) 2021 by <NAME>. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
from dataclasses import dataclass
import numpy as np
@dataclass
class SimpleFlexHeatController:
'''
Simple controller for the power-to-heat facility (heat pump & storage tank).
'''
# Parameters
T_tank_max: float = 72 # Maximum tank temperature - [degC]
T_tank_min: float = 65 # Minimum tank temperature - [degC]
# Variables
## Input measures
mdot_HEX1: float = 0.0 # Mass flow requested by the consumer 1 HEX - [kg/s]
mdot_HEX2: float = 0.0 # Mass flow requested by the consumer 1 HEX - [kg/s]
mdot_bypass: float = 0.5 # Mass flow through network bypass - [kg/s]
T_tank_hot: float = 50 # Average tank temperature - [degC]
T_hp_cond_in: float = 50 # Heat pump condenser inlet temperature - [degC]
T_hp_cond_out: float = 70 # Heat pump output temperature - [degC]
T_hp_evap_in: float = 40 # Heat pump evaporator inlet temperature - [degC]
T_hp_evap_out_min: float = 15 # Heat pump minimum evaporator outlet temperature - [degC]
# Control inputs
voltage_control_enabled: bool = False # Centralised voltage controller connected
P_hp_rated: float = 100 # Rated heat pump el. consumption [kWe]
P_hp_el_setpoint: float = 0 # Heat pump setpoint - [kWe]
P_hp_effective: float = 0 # Effective heat pump electricity consumption - [kWe]
# Control outputs
hp_on_request: bool = False # Voltage control request for under voltage (toggle) and time period - [sec]
hp_off_request: bool = False # Voltage control request for over voltage (toggle) and time period - [sec]
## Internal Vars
MDOT_FORWARD_MIN: float = 0.11 # Minimum forward mass flow - [kg/s]
MDOT_HP_MAX: float = 10 # Maximum heat pump output mass flow - [kg/s]
hp_operating_threshold: float = 0.35 * P_hp_rated # HP operating threshold (minimum on-time el. consumption) - [kWe]
## Output
mdot_1_supply: float = 0.0 # Supply 3 way valve mass flow at port 1 - [kg/s]
mdot_2_supply: float = 0.0 # Supply 3 way valve mass flow at port 2 - [kg/s]
mdot_3_supply: float = 0.0 # Supply 3 way valve mass flow at port 3 - [kg/s]
mdot_1_return: float = 0.0 # Return 3 way valve mass flow at port 1 - [kg/s]
mdot_2_return: float = 0.0 # Return 3 way valve mass flow at port 2 - [kg/s]
mdot_3_return: float = 0.0 # Return 3 way valve mass flow at port 3 - [kg/s]
mdot_HP_out: float = 0 # HP forward mass flow [kg/s]
state: int = 1 # state variable 1..6
# Constants
Cp_water = 4.180 # [kJ/(kg.degK)]
def __post_init__(self):
self.step_single()
def step_single(self):
self._update_state() # Check if state is changed
self._do_state_based_control() # Do control based on new step
def _do_state_based_control(self):
self.mdot_2_supply = -(self.mdot_HEX1 + self.mdot_HEX2 + self.mdot_bypass)
self.mdot_1_return = self.mdot_HEX1 + self.mdot_HEX2 + self.mdot_bypass
if self.state == 1: # Mode 1: External grid supplies heat, hp and tank inactive
self.mdot_1_supply = - self.mdot_2_supply - self.MDOT_FORWARD_MIN
self.mdot_HP_out = 0
elif self.state == 2: # Mode 2: External grid supplies heat, HP charges tank
self.mdot_1_supply = - self.mdot_2_supply - self.MDOT_FORWARD_MIN
self.set_hp_mdot_out()
elif self.state == 3: # Mode 3: Discharge the tank, hp off
self.mdot_1_supply = self.MDOT_FORWARD_MIN
self.mdot_HP_out = 0
elif self.state == 4: # Mode 4: Discharge the tank, hp on
self.mdot_1_supply = self.MDOT_FORWARD_MIN
self.set_hp_mdot_out()
elif self.state == 5: # Mode 5: Tank supports (with fixed mass flow) the grid, hp off
self.mdot_1_supply = - self.mdot_2_supply - 2.0
self.mdot_HP_out = 0
elif self.state == 6: # Mode 6: Tank supports (with fixed mass flow) the grid, hp on
self.mdot_1_supply = - self.mdot_2_supply - 2.0
self.set_hp_mdot_out()
self.mdot_3_supply = -(self.mdot_1_supply + self.mdot_2_supply)
self.mdot_3_return = -self.mdot_3_supply
self.mdot_2_return = -self.mdot_1_supply
def _update_state(self):
if self.voltage_control_enabled:
self.set_hp_request()
if self.state is 1: # Mode 1: External grid supplies, tank inactive
if not self.hp_off_request:
self.set_state(new_state=2) # Mode 2: Charge the tank, external supply
elif self.state is 2: # Mode 2: Grid supplies, tank inactive, hp on
if self.T_tank_hot > self.T_tank_max: # or self.hp_off_request:
if self.hp_on_request:
self.set_state(new_state=6)
else:
self.set_state(new_state=5)
if self.hp_off_request:
self.set_state(new_state=5)
elif self.state is 6: # Mode 6: Grid supplies, tank supports (and holding the temperature)
if not self.hp_on_request:
self.set_state(new_state=5)
elif self.state is 5: # Mode 5: Tank support, hp off
if self.T_tank_hot < self.T_tank_min: # or self.hp_on_request:
if self.hp_off_request:
self.set_state(new_state=1)
else:
self.set_state(new_state=2)
if self.hp_on_request:
self.set_state(new_state=2)
def set_hp_request(self):
# Set heat_pump request
if self.P_hp_el_setpoint > self.hp_operating_threshold:
self.hp_off_request = False
self.hp_on_request = True
else:
self.hp_off_request = True
self.hp_on_request = False
def set_state(self, new_state):
old_state = self.state
self.state = new_state
if self.state != old_state:
print(f'Controller state changed from {old_state} to {self.state}')
def set_hp_mdot_out(self):
if self.voltage_control_enabled:
if self.hp_off_request:
self.mdot_HP_out = 0
else:
# PID control
error_abs = self.P_hp_el_setpoint - self.P_hp_effective
error_pu = (error_abs / self.P_hp_el_setpoint)
mdot = self.mdot_HP_out
mdot += error_pu * -0.5
self.mdot_HP_out = np.clip(mdot, -self.MDOT_HP_MAX, 0)
else:
self.mdot_HP_out = -3.5
pass
def get_hp_cop(self):
eta_hp_sys = 0.5 # Estimated hp efficiency
T_hot_in = self.T_hp_cond_in
T_hot_out = self.T_hp_cond_out
T_cold_in = self.T_hp_evap_in
T_cold_out = self.T_hp_evap_out_min
# Calculate COP
T_hot_m = (T_hot_in - T_hot_out) / np.log(T_hot_out/T_hot_in)
T_cold_m = (T_cold_in - T_cold_out) / np.log(T_cold_out/T_cold_in)
cop_hp = (eta_hp_sys * T_hot_m) / (T_hot_m - T_cold_m)
return cop_hp
if __name__ == '__main__':
test = SimpleFlexHeatController()
|
<reponame>maheriya/tennisLabels
#!/usr/bin/env python
# coding: utf-8
## Scale Images in a Directory
# This script scales all images in a VOCdevkit dataset to a specified output dimensions and
# updates the existing labels accordingly. The Pascal VOC annotations use pixel values for
# bounding boxes, and as a result, need to be scaled with the same scale as the images.
#
## Why Scale?
# We annotate the images in full resolution since it is easier to find shapes and bounding
# boxes can be more accurately defined. However, for training we don't need the images at full
# resolution. Also, carrying around images at full resolution requires a lot of disk space.
# Especially if we have to use cloud resources.
#
# That is why we scale the images to the dimensions we need for training. Keeping it slightly
# larger then actual size needed for training is optimal because that allows using the random
# crop augmentation more effectively. Also, it saves time during training since images will
# not have to be scaled during training.
import os
import sys
import cv2 as cv
from lxml import etree
from lxml import objectify
from glob import glob
import subprocess
if sys.version_info[0] < 3:
PYVER = 2
else:
PYVER = 3
## Global variables
SHOW_IMAGES = False
def getImageSizefromAnnotations(annfile):
## Load annotations file
with open(annfile) as f:
xml = f.read()
ann = objectify.fromstring(xml)
## Get the image size from annotation
return (ann.size.width, ann.size.height)
def readAndScaleAnnotations(i_anndir, imgbase, SCALE):
## Load annotations file
i_annfile = os.path.join(i_anndir, imgbase + ".xml")
#print("Input annotation file: {}".format(i_annfile))
with open(i_annfile) as f:
xml = f.read()
ann = objectify.fromstring(xml)
## Change the size based on scale
ann.size.width = objectify.StringElement(str(int(ann.size.width * SCALE)))
ann.size.height = objectify.StringElement(str(int(ann.size.height * SCALE)))
folder = ann.folder
filename = ann.filename
filepath = os.path.join(str(folder), 'JPEGImages', str(filename))
ann.path = objectify.StringElement(filepath)
for obj in ann.iter('object'):
obj.bndbox.ymin = objectify.StringElement(str(obj.bndbox.ymin * SCALE))
obj.bndbox.xmin = objectify.StringElement(str(obj.bndbox.xmin * SCALE))
obj.bndbox.ymax = objectify.StringElement(str(obj.bndbox.ymax * SCALE))
obj.bndbox.xmax = objectify.StringElement(str(obj.bndbox.xmax * SCALE))
return ann
def show_imgs(cvimg, cvimg_scaled):
global SHOW_IMAGES
cv.imshow("Original image", cvimg)
cv.imshow("Scaled image", cvimg_scaled)
key = cv.waitKey(0) & 255
if key == 27:
cv.destroyAllWindows()
sys.exit(0)
elif key == ord('g'): ## Go for it; don't show images after this
cv.destroyAllWindows()
SHOW_IMAGES = False
##-#####################################################################################
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"invoc", type=str, #default="/IMAGESETS/TENNIS/VOCdevkit",
help="The input VOC root directory."
)
parser.add_argument(
"outvoc", type=str, #default="/IMAGESETS/TENNIS/VOCdevkitScaled",
help="Output VOC root directory."
)
parser.add_argument(
"--height", type=float, default=540, required=False,
help="Output image height. "
)
args = parser.parse_args()
# Select a size that is suitable for training.
#
# Assume our input images are 1920x1080. In that case,
# Suggested sizes are:
# 480x270 - Scale of 1/4
# 533x300 - Scale of 1/3.6
# 640x360 - Scale of 1/3
# 960x540 - Scale of 1/2
#
# Short edge of the input video size (first frame) will be used for determining the scale.
#
# Tennis ball detection is a difficult object detection problem. At normal resolutions (x270, x300 or x360),
# the ball is only a few pixel in diameter. At service line, it is 3 pixels in diameter!
# As a result, detection will be tough. In the end, we have to run training experiments to find the optimal
# image size -- it is a speed vs accuracy tradeoff.
## Main variables
IN_VOCDIR = os.path.abspath(args.invoc)
IN_IMGDIR = os.path.join(IN_VOCDIR, "{}", "JPEGImages") # Template
IN_ANNDIR = os.path.join(IN_VOCDIR, "{}", "Annotations") # Template
OUT_VOCDIR = os.path.abspath(args.outvoc)
OUT_IMGDIR = os.path.join(OUT_VOCDIR, "{}", "JPEGImages") # Template
OUT_ANNDIR = os.path.join(OUT_VOCDIR, "{}", "Annotations")# Template
## Each directory under *_VOCDIR is a base dataset
findtask = subprocess.Popen(
[r"find {}/ -mindepth 3 -name '*.xml' | sed -e 's#/Annotations/.*.xml##g' | sort | uniq".format(IN_VOCDIR)],
shell=True, stdout=subprocess.PIPE)
output,err = findtask.communicate()
if PYVER>=3:
output = bytes.decode(output)
output = output.rstrip().split('\n')
vocbases = [os.path.basename(d) for d in output]
print(vocbases)
print(len(vocbases))
cnt = 0
for base in vocbases:
print("VOC Base: {}".format(base))
i_imgdir = IN_IMGDIR.format(base)
i_anndir = IN_ANNDIR.format(base)
if not os.path.isdir(i_imgdir):
print("Input image dir {} is not accessible".format(i_imgdir))
if not os.path.isdir(i_anndir):
print("Input annotations dir {} is not accessible".format(i_anndir))
o_imgdir = OUT_IMGDIR.format(base)
o_anndir = OUT_ANNDIR.format(base)
for dir in [o_imgdir, o_anndir]:
if not os.path.isdir(dir):
os.makedirs(dir)
else:
print("Dir {} already exists".format(dir))
## Check by loading one image. For debug
anns = glob("{}/*.xml".format(i_anndir))
## Determine scale -- for each VOC db. Some may be 1920x1080, some may be 4k
(width, height) = getImageSizefromAnnotations(anns[0])
SCALE = float(args.height) / float(height)
print("Scale: {:.3f}".format(SCALE))
for annfile in anns:
imgbase = os.path.splitext(os.path.basename(annfile))[0]
#print(" Image base {}".format(imgbase))
## Scale annotations
ann = readAndScaleAnnotations(i_anndir, imgbase, SCALE)
img_width = int(ann.size.width)
img_height = int(ann.size.height)
img_depth = int(ann.size.depth)
## Load image
cvimg = cv.imread(os.path.join(i_imgdir, imgbase+".jpg"), 1)
## Scale image
cvimg_n = cv.resize(cvimg, (img_width, img_height), interpolation = cv.INTER_CUBIC)
if SHOW_IMAGES:
print("xmin: {}".format(ann.findall(".//xmin")))
print("ymin: {}".format(ann.findall(".//ymin")))
print("xmax: {}".format(ann.findall(".//xmax")))
print("ymax: {}".format(ann.findall(".//ymax")))
print("Scaled properties: width: {:4d}, height: {:4d}, depth: {:1d}".format(img_width, img_height, img_depth))
# Check images
show_imgs(cvimg, cvimg_n)
obj_xml = etree.tostring(ann, pretty_print=True, xml_declaration=False)
o_annfile = os.path.join(o_anndir, imgbase + ".xml")
with open(o_annfile, 'w') as f:
if PYVER>=3:
f.write(obj_xml.decode('utf8'))
else:
f.write(obj_xml)
o_imgfile = os.path.join(o_imgdir, imgbase+".jpg")
#print("Writing scaled image {}".format(o_imgfile))
cv.imwrite(o_imgfile, cvimg_n)
cnt += 1
print("Done. Scaled {} annotations and images".format(cnt))
|
#!/usr/bin/env python
#
# Normalize a question into a sentence to make it easier
# to answer questions about.
#
from __future__ import print_function
# this module should be imported before using any NLTK or Stanford Parser code,
# as it initializes these with their support folders.
import settings as s
import grammar_util as g
import util
import nltk
from nltk.tree import Tree
from pattern.en import conjugate
# Return status types: possible values returned from normalize
BINARY = 's-BINARY'
FACTOID = 's-FACTOID'
SENTENCE = 's-SENTENCE'
NOSUBJECT = 'f-NOSUBJECT'
UNKNOWN = 'f-UNKNOWN'
def is_success_type(qtype):
"""
Interprets a qtype (BINARY, UNKNOWN, etc.) as a "success" or "failure"
"""
return qtype[0] == 's'
def flatten(l):
return [item for sublist in l for item in sublist]
def normalize_binary(root, extras=[]):
sq = g.get_sentence(root)
(subject, verb_nodes, tail) = g.partition_sq(sq)
# Sometimes the root we get is from a factoid SQ, which means the subject
# is outside in a WH-phrase. Let's let them know, so they can handle it.
if subject is None:
return (NOSUBJECT, [g.as_sentence(root)])
# Treat non-verb nodes as leaves
def leaf_fn(node):
return g.is_leaf(node) or not g.is_verb(node)
# list of Trees (which are lists). To access the string contents of the
# i'th element, use verb_leaves[i][0]
leaf_nodes = flatten([g.leaves(verb_node, leaf_fn=leaf_fn) \
for verb_node in verb_nodes])
verb_leaves = [leaf for leaf in leaf_nodes if g.is_verb(leaf)]
non_verb_leaves = [leaf for leaf in leaf_nodes if not g.is_verb(leaf)]
(positive_vp, negative_vp) = g.negate_verb_leaves(verb_leaves)
# Check to see if we have to collapse things like 'does eat' to 'eats'
if len(positive_vp) > 1 and conjugate(positive_vp[0][0], 'VB') == 'do':
# Grab the second verb and conjugate it according
# to the POS of the 'do' verb
pos = positive_vp[0].label()
positive_vp = [Tree(pos, [conjugate(positive_vp[1][0], pos)])] + positive_vp[2:]
sentences = []
sentences.append(Tree('S',
[g.upcase(subject), g.downcase(Tree('VP', positive_vp))] +
extras + non_verb_leaves + tail))
sentences.append(Tree('S',
[g.upcase(subject), g.downcase(Tree('VP', negative_vp))] +
extras + non_verb_leaves + tail))
# TODO(jez): handle the WHNP/NP + 'be' case
if len(positive_vp) == 1 and conjugate(positive_vp[0][0], 'VB') == 'is':
# positive reverse
sentences.append(Tree('S', extras + non_verb_leaves + tail +
[g.downcase(Tree('VP', positive_vp)), g.upcase(subject)]))
# negative reverse
sentences.append(Tree('S', extras + non_verb_leaves + tail +
[g.downcase(Tree('VP', negative_vp)), g.upcase(subject)]))
return (BINARY, [g.as_sentence(sent) for sent in sentences])
def normalize_factoid(root):
sbarq = g.get_sentence(root)
# blindly take the first SQ; if we get an exception,
# it will be caught higher up
sq = next(child for child in sbarq if g.is_sq(child))
sq_root = Tree('ROOT', [sq])
try:
whp = next(child for child in sbarq if g.is_wh_phrase(child))
non_whp = [g.replace_wh_phrase(whp)]
except StopIteration:
non_whp = []
(qtype, sentences) = normalize_binary(sq_root, extras=non_whp)
if qtype == NOSUBJECT:
# The subject must be in the wh-phrase, because it definitely
# wasn't in the nested SQ, so we can just replace and move on
return (FACTOID, [g.as_sentence(g.replace_wh_phrase(root))])
else:
# The '?' gets truncated when we dive into the nested SQ,
# so let's add it back manually
return (FACTOID, [sent + '.' for sent in sentences])
def normalize_sentence(root):
non_whp = g.replace_wh_phrase(root)
return (SENTENCE, [g.as_sentence(non_whp)])
def normalize(root):
"""
Tries as best we can to convert the parsed sentence into a question.
If something fails (either we didn't get an expected sentence type or an
exception was raised outside of DEBUG mode), we return the original
string.
Otherwise, returns a pair (qtype, sentences), where:
- qtype is one of BINARY, FACTOID, or UNKNOWN, describing the type of
question which was asked
- sentences is a list of strings representing our guesses at how to
convert the question into a sentence
"""
try:
if g.is_binary_question(root):
return normalize_binary(root)
elif g.is_factoid_question(root):
return normalize_factoid(root)
elif g.is_complete_sentence(root):
return normalize_sentence(root)
else:
g.log_error(root, 'Malformed question')
except:
g.log_error(root, 'Exception raised while parsing')
if s.DEBUG:
# only re-raise errors in DEBUG mode
raise
# Fall back to the original question if something failed
return (UNKNOWN, [g.as_sentence(root)])
def use_line(line):
return len(line.strip()) > 0 and line.strip()[0] != '#'
if __name__ == '__main__':
# Only import these if we've been invoked from the command line
import sys
import codecs
from nltk.parse import stanford as stanford_parse
if len(sys.argv) != 2:
print('usage: %s <questions.txt>' % sys.argv[0])
sys.exit(1)
questions_file = sys.argv[1]
sp = stanford_parse.StanfordParser()
with codecs.open(questions_file) as f:
questions = [line.strip() for line in f.readlines() if use_line(line)]
for question in questions:
parse = next(sp.raw_parse(question))
(qtype, normalizeds) = normalize(parse)
print(question)
print(' ' + qtype)
for normalized in normalizeds:
print(' ' + normalized)
print()
|
<reponame>geoyee/PdRSCD
import numpy as np
import cv2
import random
import math
from functools import reduce
from ppcd.transforms import functional as func
# ----- compose -----
class Compose:
"""
根据数据增强算子对输入数据进行操作
所有操作的输入图像流形状均是 [H, W, C],其中H为图像高,W为图像宽,C为图像通道数
Args:
transforms (list/None): 数据增强算子,默认为None
data_format ("HWC"/"CHW"): 如果数据是npy/npz格式,数据形状如何,默认为"HWC"
classes_num (int): 标签有多少类,默认为2(单一的变化检测)
"""
def __init__(self, transforms=None, data_format="HWC", classes_num=2):
if data_format != "HWC" and data_format != "CHW":
raise ValueError('The data_format must be "HWC" or "CHW"!')
self.transforms = transforms
self.data_format = data_format
self.classes_num = classes_num
def __call__(self, imgs, labs=None):
"""
Args:
A_img (list[str/ndarray]): 多期图像路径 (.tif/.img/.npy/.jpg/.mat等)
lab (list[ndarray]): 标注图像路径 (.png),默认为None
当为ndarray时,就是大图像处理的时候
"""
timgs = []
tlabs = []
for i in range(len(imgs)):
if isinstance(imgs[i], str):
timgs.append(func.read_img(imgs[i], self.data_format, is_lab=False))
else:
timgs.append(imgs[i])
if labs is not None:
for i in range(len(labs)):
if isinstance(labs[i], str):
tlabs.append(func.read_img(labs[i], self.data_format, \
is_lab=True, classes_num=self.classes_num))
else:
tlabs.append(labs[i])
else:
tlabs = None
# 数据增强
if self.transforms is not None:
for op in self.transforms:
timgs, tlabs = op(timgs, tlabs)
if tlabs is None:
return timgs
else:
tlabst = [np.array(tlab).astype('int64') for tlab in tlabs]
return timgs, tlabst
# ----- transforms -----
class Resize:
"""
调整图像和标注图大小
Args:
target_size (int/list/tuple): 目标大小
interp (str): 插值方式,可选参数为 ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4'],默认为'NEAREST'
"""
interp_dict = {
'NEAREST': cv2.INTER_NEAREST,
'LINEAR': cv2.INTER_LINEAR,
'CUBIC': cv2.INTER_CUBIC,
'AREA': cv2.INTER_AREA,
'LANCZOS4': cv2.INTER_LANCZOS4
}
def __init__(self, target_size, interp='NEAREST'):
if isinstance(target_size, list) or isinstance(target_size, tuple):
if len(target_size) != 2:
raise ValueError(
'when target is list or tuple, it should include 2 elements, but it is {}.'
.format(target_size))
elif not isinstance(target_size, int):
raise TypeError(
'Type of target_size is invalid. Must be Integer or List or tuple, now is {}.'
.format(type(target_size)))
assert interp in self.interp_dict, \
'interp should be one of {}.'.format(self.interp_dict.keys())
self.target_size = target_size
self.interp = interp
def __call__(self, image, label=None):
if isinstance(self.target_size, int):
size = (self.target_size, self.target_size)
else:
size = self.target_size
for i in range(len(image)):
if not isinstance(image[i], np.ndarray):
raise TypeError("ResizeImage: image type is not np.ndarray.")
if len(image[i].shape) != 3:
raise ValueError('ResizeImage: image is not 3-dimensional.')
image[i] = cv2.resize(image[i], size, interpolation=self.interp_dict[self.interp])
if label is not None:
label = [cv2.resize(lab, size, interpolation=self.interp_dict['NEAREST']) \
for lab in label]
return image, label
class Normalize:
"""
对图像进行标准化
1.图像像素归一化到区间 [0.0, 1.0]
2.对图像进行减均值除以标准差操作
Args:
mean (list): 图像数据集的均值列表,有多少波段需要多少个元素
std (list): 图像数据集的标准差列表,有多少波段需要多少个元素
bit_num (int): 图像的位数,默认为8
band_num (int): 操作的波段数,默认为3
"""
def __init__(self, mean, std, bit_num=8, band_num=3):
if bit_num not in [8, 16, 24]:
raise ValueError('{} is not effective bit_num, bit_num should be one of 8, 16, 24.'
.format(bit_num))
if band_num != len(mean) or band_num != len(std):
raise ValueError('band_num should be equal to len of mean/std.')
if not (isinstance(mean, list) and isinstance(std, list)):
raise ValueError('{}: input type is invalid.'.format(self))
if reduce(lambda x, y: x * y, std) == 0:
raise ValueError('{}: std is invalid!'.format(self))
self.mean = mean
self.std = std
self.band_num = band_num
self.min_val = [0] * band_num
self.max_val = [2**bit_num - 1] * band_num
def __call__(self, image, label=None):
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
images = []
for i in range(len(image)):
images.append(func.normalize(
image[i], self.min_val, self.max_val, mean, std, self.band_num))
return images, label
class RandomFlip:
"""
对图像和标注图进行翻转
Args:
prob (float): 随机翻转的概率。默认值为0.5
direction (str): 翻转方向,可选参数为 ['Horizontal', 'Vertical', 'Both'],默认为'Both'
"""
flips_list = ['Horizontal', 'Vertical', 'Both']
def __init__(self, prob=0.5, direction='Both'):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
assert direction in self.flips_list, 'direction should be one of {}.'.format(self.flips_list)
self.prob = prob
self.direction = direction
def __call__(self, image, label=None):
if random.random() < self.prob:
images = []
for i in range(len(image)):
images.append(func.mode_flip(image[i], self.direction))
if label is not None:
label = [func.mode_flip(lab, self.direction) for lab in label]
return images, label
else:
return image, label
class RandomRotate:
"""
对图像和标注图进行随机1-89度旋转,保持图像大小
Args:
prob (float): 选择的概率。默认值为0.5
ig_pix (int): 标签旋转后周围填充的忽略值,默认为255
"""
def __init__(self, prob=0.5, ig_pix=255):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
self.prob = prob
self.ig_pix = ig_pix
def __call__(self, image, label=None):
ang = random.randint(1, 89)
if random.random() < self.prob:
images = []
for i in range(len(image)):
images.append(func.rotate_img(image[i], ang))
if label is not None:
label = [func.rotate_img(lab, ang, ig_pix=self.ig_pix) for lab in label]
return images, label
else:
return image, label
class RandomEnlarge:
"""
对图像和标注图进行随机裁剪,然后拉伸到到原来的大小 (局部放大)
Args:
prob (float): 裁剪的概率。默认值为0.5
min_clip_rate (list/tuple): 裁剪图像行列占原图大小的最小倍率。默认为 [0.5, 0.5]
"""
def __init__(self, prob=0.5, min_clip_rate=[0.5, 0.5]):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if isinstance(min_clip_rate, list) or isinstance(min_clip_rate, tuple):
if len(min_clip_rate) != 2:
raise ValueError(
'when min_clip_rate is list or tuple, it should include 2 elements, but it is {}.'
.format(min_clip_rate))
self.prob = prob
self.min_clip_rate = list(min_clip_rate)
def __call__(self, image, label=None):
h, w = image[0].shape[:2]
h_clip = math.floor(self.min_clip_rate[0] * h)
w_clip = math.floor(self.min_clip_rate[1] * w)
x = random.randint(0, (w - w_clip))
y = random.randint(0, (h - h_clip))
if random.random() < self.prob:
images = []
for i in range(len(image)):
images.append(func.enlarge_img(image[i], x, y, h_clip, w_clip))
if label is not None:
label = [func.enlarge_img(lab, x, y, h_clip, w_clip) for lab in label]
return images, label
else:
return image, label
class RandomNarrow:
"""
对图像和标注图进行随机缩小,然后填充到到原来的大小
Args:
prob (float): 缩小的概率。默认值为0.5
min_size_rate (list/tuple): 缩小图像行列为原图大小的倍率。默认为 [0.5, 0.5]
ig_pix (int): 标签缩小后周围填充的忽略值,默认为255
"""
def __init__(self, prob=0.5, min_size_rate=[0.5, 0.5], ig_pix=255):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if isinstance(min_size_rate, list) or isinstance(min_size_rate, tuple):
if len(min_size_rate) != 2:
raise ValueError(
'when min_size_rate is list or tuple, it should include 2 elements, but it is {}.'
.format(min_size_rate))
self.prob = prob
self.min_size_rate = list(min_size_rate)
self.ig_pix = ig_pix
def __call__(self, image, label=None):
x_rate = random.uniform(self.min_size_rate[0], 1)
y_rate = random.uniform(self.min_size_rate[1], 1)
if random.random() < self.prob:
images = []
for i in range(len(image)):
images.append(func.narrow_img(image[i], x_rate, y_rate))
if label is not None:
label = [func.narrow_img(lab, x_rate, y_rate, ig_pix=self.ig_pix) for lab in label]
return images, label
else:
return image, label
class RandomBlur:
"""
对图像进行高斯模糊
Args:
prob (float): 图像模糊概率。默认为0.1
ksize (int): 高斯核大小,默认为3
band_num (int): 操作的波段数,默认为3
img_do (bool): 对哪几个时段进行操作,默认为[0, 1]
"""
def __init__(self, prob=0.1, ksize=3, band_num=3, img_do=[0, 1]):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if not isinstance(img_do, list):
raise ValueError('img_do should be list.')
self.prob = prob
self.ksize = ksize
self.band_num = band_num
self.img_do = img_do
def __call__(self, image, label=None):
if random.random() < self.prob:
for i in range(len(image)):
if i in self.img_do:
image[i][:, :, :self.band_num] = cv2.GaussianBlur(
image[i][:, :, :self.band_num], (self.ksize, self.ksize), 0)
return image, label
else:
return image, label
class RandomSharpening:
"""
对图像进行锐化
Args:
prob (float): 图像锐化概率。默认为0.1
laplacian_mode (str): 拉普拉斯算子类型,可选参数为 ['4-1', '8-1', '4-2'],默认为'8-1'
band_num (int): 操作的波段数,默认为3
img_do (bool): 对哪几个时段进行操作,默认为[0, 1]
"""
laplacian_dict = {
'4-1': np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]], np.float32),
'8-1': np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], np.float32),
'4-2': np.array([[1, -2, 1], [-2, 4, -2], [1, -2, 1]], np.float32)
}
def __init__(self, prob=0.1, laplacian_mode='8-1', band_num=3, img_do=[0, 1]):
assert laplacian_mode in self.laplacian_dict, \
'laplacian_mode should be one of {}.'.format(self.laplacian_dict.keys())
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if not isinstance(img_do, list):
raise ValueError('img_do should be list.')
self.prob = prob
self.band_num = band_num
self.kernel = self.laplacian_dict[laplacian_mode]
self.img_do = img_do
def __call__(self, image, label=None):
if random.random() < self.prob:
for i in range(len(image)):
if i in self.img_do:
image[i][:, :, :self.band_num] += (
0.2 * cv2.filter2D(image[i][:, :, :self.band_num], -1, kernel=self.kernel))
return image, label
else:
return image, label
class RandomColor:
"""
对图像随机进行对比度及亮度的小范围增减
Args:
prob (float): 改变概率。默认为0.5
alpha_range (list/tuple): 图像对比度调节范围,默认为 [0.8, 1.2]
beta_range (list/tuple): 图像亮度调节范围,默认为 [-10, 10]
band_num (int): 操作的波段数,默认为3
img_do (bool): 对哪几个时段进行操作,默认为[0, 1]
"""
def __init__(self, prob=0.5, alpha_range=[0.8, 1.2], beta_range=[-10, 10], band_num=3, img_do=[0, 1]):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if isinstance(alpha_range, list) or isinstance(alpha_range, tuple):
if len(alpha_range) != 2:
raise ValueError(
'when alpha_range is list or tuple, it should include 2 elements, but it is {}.'
.format(alpha_range))
if isinstance(beta_range, list) or isinstance(beta_range, tuple):
if len(beta_range) != 2:
raise ValueError(
'when beta_range is list or tuple, it should include 2 elements, but it is {}.'
.format(beta_range))
if not isinstance(img_do, list):
raise ValueError('img_do should be list.')
self.prob = prob
self.alpha_range = list(alpha_range)
self.beta_range = list(beta_range)
self.band_num = band_num
self.img_do = img_do
def __call__(self, image, label=None):
if random.random() < self.prob:
alpha = random.uniform(self.alpha_range[0], self.alpha_range[1])
beta = random.uniform(self.beta_range[0], self.beta_range[1])
for i in range(len(image)):
if i in self.img_do:
image[i][:, :, :self.band_num] = alpha * image[i][:, :, :self.band_num] + beta
return image, label
else:
return image, label
class RandomStrip:
"""
对图像随机加上条带噪声
Args:
prob (float): 加上条带噪声的概率。默认为0.5
strip_rate (float): 条带占比,默认0.05
direction (str): 条带方向,可选参数 ['Horizontal', 'Vertical'],,默认'Horizontal'
band_num (int): 操作的波段数,默认为3
img_do (bool): 对哪几个时段进行操作,默认为[0, 1]
"""
strip_list = ['Horizontal', 'Vertical']
def __init__(self, prob=0.5, strip_rate=0.05, direction='Horizontal', band_num=3, img_do=[0, 1]):
assert direction in self.strip_list, 'direction should be one of {}.'.format(self.strip_list)
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if strip_rate < 0 or strip_rate > 1:
raise ValueError('strip_rate should be between 0 and 1.')
if not isinstance(img_do, list):
raise ValueError('img_do should be list.')
self.prob = prob
self.strip_rate = strip_rate
self.direction = direction
self.band_num = band_num
self.img_do = img_do
def __call__(self, image, label=None):
h, w = image[0].shape[:2]
if random.random() < self.prob:
strip_num = self.strip_rate * (h if self.direction == 'Horizontal' else w)
images = []
for i in range(len(image)):
if i in self.img_do:
images.append(func.random_strip(
image[i], strip_num, self.direction, self.band_num))
return images, label
else:
return image, label
class RandomFog:
"""
对图像随机加上雾效果
Args:
prob (float): 加上雾效果的概率。默认为0.5
fog_range (list/tuple): 雾的大小范围,范围在0-1之间,默认为 [0.03, 0.28]
band_num (int): 操作的波段数,默认为3
img_do (bool): 对哪几个时段进行操作,默认为[0, 1]
"""
def __init__(self, prob=0.5, fog_range=[0.03, 0.28], band_num=3, img_do=[0, 1]):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if isinstance(fog_range, list) or isinstance(fog_range, tuple):
if len(fog_range) != 2:
raise ValueError(
'when fog_range is list or tuple, it should include 2 elements, but it is {}.'
.format(fog_range))
if not isinstance(img_do, list):
raise ValueError('img_do should be list.')
self.prob = prob
self.fog_range = fog_range
self.band_num = band_num
self.img_do = img_do
def __call__(self, image, label=None):
if random.random() < self.prob:
images = []
for i in range(len(image)):
if i in self.img_do:
images.append(func.add_fog(image[i], self.fog_range, self.band_num))
return images, label
else:
return image, label
class RandomSplicing:
"""
对图像进行随机划分成两块,并对其中一块改变色彩,营造拼接未匀色的效果
Args:
prob (float): 执行此操作的概率。默认为0.1
direction (str): 分割方向,可选参数 ['Horizontal', 'Vertical'],,默认'Horizontal'
band_num (int): 操作的波段数,默认为3
"""
splic_list = ['Horizontal', 'Vertical']
def __init__(self, prob=0.1, direction='Horizontal', band_num=3):
assert direction in self.splic_list, 'direction should be one of {}.'.format(self.splic_list)
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
self.prob = prob
self.direction = direction
self.band_num = band_num
def __call__(self, image, label=None):
if random.random() < self.prob:
images = []
for i in range(len(image)):
images.append(func.random_splicing(image[i], self.direction, self.band_num))
return images, label
else:
return image, label
class RandomRemoveBand:
"""
对图像随机置零某个波段
Args:
prob (float): 执行此操作的概率。默认为0.1
kill_bands (list): 必须置零的波段列表,默认为None
keep_bands (list): 不能置零的波段列表,默认为None
"""
def __init__(self, prob=0.1, kill_bands=None, keep_bands=None):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
if not(isinstance(kill_bands, list)) and kill_bands != None:
raise ValueError('kill_bands must be list or None.')
if not(isinstance(keep_bands, list)) and keep_bands != None:
raise ValueError('keep_bands must be list or None.')
self.prob = prob
self.kill_bands = [] if kill_bands == None else list(kill_bands)
self.keep_bands = [] if keep_bands == None else list(keep_bands)
def __call__(self, image, label=None):
if random.random() < self.prob:
rand_list = []
rm_list = []
c = image[0].shape[-1]
for i in range(c):
if i in self.kill_bands:
rm_list.append(i)
elif i in self.keep_bands:
continue
else:
rand_list.append(i)
rnd = random.choice(rand_list)
rm_list.append(rnd)
for j in rm_list:
for i in range(len(image)):
image[i][:, :, j] = 0
return image, label
class NDVI:
"""
对图像计算NDVI (归一化植被指数)并添加在新的通道中
Args:
r_band (int): 红波段序号,默认为landsat TM的第三波段
nir_band (int): 近红外波段序号,默认为landsat TM的第四波段
"""
def __init__(self, r_band=2, nir_band=3):
self.r_band = r_band
self.nir_band = nir_band
def __call__(self, image, label=None):
images = []
for i in range(len(image)):
images.append(func.band_comput(image[i], self.nir_band, self.r_band))
return images, label
class NDWI:
"""
对图像计算NDWI (归一化水体指数)并添加在新的通道中
Args:
g_band (int): 绿波段序号,默认为landsat TM的第二波段
nir_band (int): 近红外波段序号,默认为landsat TM的第四波段
"""
def __init__(self, g_band=1, nir_band=3):
self.g_band = g_band
self.nir_band = nir_band
def __call__(self, image, label=None):
images = []
for i in range(len(image)):
images.append(func.band_comput(image[i], self.g_band, self.nir_band))
return images, label
class NDBI:
"""
对图像计算NDBI (归一化建筑指数)并添加在新的通道中
Args:
nir_band (int): 近红外波段序号,默认为landsat TM的第四波段
mir_band (int): 中红外波段序号,默认为landsat TM的第五波段
"""
def __init__(self, nir_band=3, mir_band=4):
self.nir_band = nir_band
self.mir_band = mir_band
def __call__(self, image, label=None):
images = []
for i in range(len(image)):
images.append(func.band_comput(image[i], self.mir_band, self.nir_band))
return images, label
# ----- change detection -----
class ExchangeTime:
"""
将两个时段的图像进行交换
Args:
prob (int): 执行此操作的概率。默认为0.5
"""
def __init__(self, prob=0.5):
if prob < 0 or prob > 1:
raise ValueError('prob should be between 0 and 1.')
self.prob = prob
def __call__(self, image, label=None):
if len(image) == 2:
return [image[1], image[0]], label
else:
return image, label
# ----- histogram -----
class HistogramMatching:
"""
将后续时段的直方图规定到第一时段
Args:
bit_num (int): 图像的位数,默认为8
band_num (int): 操作的波段数,默认为3
"""
def __init__(self, bit_num=8, band_num=3):
if bit_num not in [8, 16, 24]:
raise ValueError('{} is not effective bit_num, bit_num should be one of 8, 16, 24.'
.format(bit_num))
self.bit_num = bit_num
self.band_num = band_num
def __call__(self, image, label=None):
images = []
for i in range(len(image)):
if i == 0:
images.append(image[i])
else:
images.append(func.histogram_matching(
image[i], image[0], self.band_num, self.bit_num))
return images, label |
<gh_stars>0
import json
from tornado import gen, iostream
from tornado.web import Application, RequestHandler, RedirectHandler, StaticFileHandler
from tornado.websocket import WebSocketHandler
from tornado.ioloop import IOLoop
from mako.template import Template
from mako.lookup import TemplateLookup
import plim
import stylus
from .compat import Path
from .transformers import transform
here = Path(__file__).parent
PAGE_FORMAT_EXTS = ['.html', '.plim', '.md', '.rst']
site = None
def start_server(site_, port):
global site, app
site = site_
if site.base_url != '/':
handlers = [(r'/', RedirectHandler, {'url': site.base_url, 'permanent': False})]
else:
handlers = []
handlers.extend([
(r'/__reload.js', ReloadJSHandler),
(r'/__reload__/', ReloadHandler),
(site.base_url + r'(.*)', NoCacheFileHandler),
])
app = Application(handlers, debug=True)
app.sockets = set()
app.listen(port)
loop = IOLoop.current()
send.loop = loop
send.sockets = app.sockets
loop.start()
def start_static_server(site_, port):
settings = dict(
path=str(site_.build_dir),
default_filename='index.html',
)
if site_.base_url != '/':
handlers = [(r'/', RedirectHandler, {'url': site_.base_url, 'permanent': False})]
else:
handlers = []
handlers.extend([
(site_.base_url + r'(.*)', StaticFileHandler, settings)
])
app = Application(handlers)
app.listen(port)
loop = IOLoop.current()
loop.start()
class ReloadJSHandler(RequestHandler):
def get(self):
self.set_header('Content-Type', 'text/javascript')
reload_file = here / 'reload.js'
self.write(reload_file.read_bytes())
class ReloadHandler(WebSocketHandler):
def open(self):
self.application.sockets.add(self)
def on_close(self):
self.application.sockets.remove(self)
class NoCacheFileHandler(RequestHandler):
@gen.coroutine
def get(self, path):
self.set_header('Cache-Control', 'no-store')
filepath = site.site_dir / path
if filepath.is_dir():
index_file = find_index_file(filepath)
if index_file:
filepath = index_file
if not filepath.exists():
self.clear()
self.set_status(404)
self.finish((site.site_dir / '404.html').read_bytes())
return
result = transform(filepath)
if isinstance(result, tuple):
mime_type, content = result
self.set_header('Content-Type', mime_type)
if mime_type == 'text/html':
content = add_reload_snippet(content)
self.finish(content)
return
content = get_content(filepath)
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
class SendCallable:
"""
A callable object that is used to communicate with the browser.
"""
def __init__(self):
self.loop = None
self.sockets = None
def __call__(self, data):
"""
It is safe to call this method from outside the main thread that is
running the Tornado event loop.
"""
if not self.loop:
return
self.loop.add_callback(self._send, data)
def _send(self, data):
"Write the given data to all connected websockets."
for socket in self.sockets:
socket.write_message(data)
send = SendCallable()
def find_index_file(dir):
for ext in PAGE_FORMAT_EXTS:
index_file = dir / ('index'+ext)
if index_file.exists():
return index_file
return None
def get_content(path):
with path.open('rb') as fp:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = fp.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
def add_reload_snippet(html):
index = html.find('</body>')
if index == -1:
return html
else:
return (html[:index] + '<script src="/__reload.js"></script>' +
html[index:])
|
<filename>fabfile.py<gh_stars>0
"""Main Fabric deployment file for CloudBioLinux distribution.
This installs a standard set of useful biological applications on a remote
server. It is designed for bootstrapping a machine from scratch, as with new
Amazon EC2 instances.
Usage:
fab -H hostname -i private_key_file install_biolinux
which will call into the 'install_biolinux' method below. See the README for
more examples.
Requires:
Fabric http://docs.fabfile.org
PyYAML http://pyyaml.org/wiki/PyYAMLDocumentation
"""
import os
import sys
from datetime import datetime
from fabric.api import *
from fabric.contrib.files import *
import yaml
# use local cloudbio directory
for to_remove in [p for p in sys.path if p.find("cloudbiolinux-") > 0]:
sys.path.remove(to_remove)
sys.path.append(os.path.dirname(__file__))
import cloudbio
from cloudbio import libraries
from cloudbio.utils import _setup_logging, _configure_fabric_environment
from cloudbio.cloudman import _cleanup_ec2
from cloudbio.cloudbiolinux import _cleanup_space
from cloudbio.custom import shared
from cloudbio.package.shared import _yaml_to_packages
from cloudbio.package import brew
from cloudbio.package import (_configure_and_install_native_packages,
_connect_native_packages)
from cloudbio.package.nix import _setup_nix_sources, _nix_packages
from cloudbio.flavor.config import get_config_file
from cloudbio.config_management.puppet import _puppet_provision
from cloudbio.config_management.chef import _chef_provision, chef, _configure_chef
# ### Shared installation targets for all platforms
def install_biolinux(target=None, flavor=None):
"""Main entry point for installing BioLinux on a remote server.
`flavor` allows customization of CloudBioLinux behavior. It can either
be a flavor name that maps to a corresponding directory in contrib/flavor
or the path to a custom directory. This can contain:
- alternative package lists (main.yaml, packages.yaml, custom.yaml)
- custom python code (nameflavor.py) that hooks into the build machinery
`target` allows running only particular parts of the build process. Valid choices are:
- packages Install distro packages
- custom Install custom packages
- chef_recipes Provision chef recipes
- libraries Install programming language libraries
- post_install Setup CloudMan, FreeNX and other system services
- cleanup Remove downloaded files and prepare images for AMI builds
"""
_setup_logging(env)
time_start = _print_time_stats("Config", "start")
_check_fabric_version()
_configure_fabric_environment(env, flavor,
ignore_distcheck=(target is not None
and target in ["libraries", "custom"]))
env.logger.debug("Target is '%s'" % target)
_perform_install(target, flavor)
_print_time_stats("Config", "end", time_start)
def _perform_install(target=None, flavor=None, more_custom_add=None):
"""
Once CBL/fabric environment is setup, this method actually
runs the required installation procedures.
See `install_biolinux` for full details on arguments
`target` and `flavor`.
"""
pkg_install, lib_install, custom_ignore, custom_add = _read_main_config()
if more_custom_add:
if custom_add is None:
custom_add = {}
for k, vs in more_custom_add.iteritems():
if k in custom_add:
custom_add[k].extend(vs)
else:
custom_add[k] = vs
if target is None or target == "packages":
env.keep_isolated = getattr(env, "keep_isolated", "false").lower() in ["true", "yes"]
# Only touch system information if we're not an isolated installation
if not env.keep_isolated:
# can only install native packages if we have sudo access or are root
if env.use_sudo or env.safe_run_output("whoami").strip() == "root":
_configure_and_install_native_packages(env, pkg_install)
else:
_connect_native_packages(env, pkg_install, lib_install)
if env.nixpkgs: # ./doc/nixpkgs.md
_setup_nix_sources()
_nix_packages(pkg_install)
if target is None or target == "custom":
_custom_installs(pkg_install, custom_ignore, custom_add)
if target is None or target == "chef_recipes":
_provision_chef_recipes(pkg_install, custom_ignore)
if target is None or target == "puppet_classes":
_provision_puppet_classes(pkg_install, custom_ignore)
if target is None or target == "brew":
install_brew(flavor=flavor, automated=True)
if target is None or target == "libraries":
_do_library_installs(lib_install)
if target is None or target == "post_install":
env.edition.post_install(pkg_install=pkg_install)
env.flavor.post_install()
if target is None or target == "cleanup":
_cleanup_space(env)
if "is_ec2_image" in env and env.is_ec2_image.upper() in ["TRUE", "YES"]:
_cleanup_ec2(env)
def _print_time_stats(action, event, prev_time=None):
""" A convenience method for displaying time event during configuration.
:type action: string
:param action: Indicates type of action (eg, Config, Lib install, Pkg install)
:type event: string
:param event: The monitoring event (eg, start, stop)
:type prev_time: datetime
:param prev_time: A timeststamp of a previous event. If provided, duration between
the time the method is called and the time stamp is included in
the printout
:rtype: datetime
:return: A datetime timestamp of when the method was called
"""
time = datetime.utcnow()
s = "{0} {1} time: {2}".format(action, event, time)
if prev_time: s += "; duration: {0}".format(str(time-prev_time))
env.logger.info(s)
return time
def _check_fabric_version():
"""Checks for fabric version installed
"""
version = env.version
if int(version.split(".")[0]) < 1:
raise NotImplementedError("Please install fabric version 1 or higher")
def _custom_installs(to_install, ignore=None, add=None):
if not env.safe_exists(env.local_install) and env.local_install:
env.safe_run("mkdir -p %s" % env.local_install)
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
if add is not None:
for key, vals in add.iteritems():
for v in vals:
pkg_to_group[v] = key
packages.append(v)
for p in env.flavor.rewrite_config_items("custom", packages):
install_custom(p, True, pkg_to_group)
def _provision_chef_recipes(to_install, ignore=None):
"""
Much like _custom_installs, read config file, determine what to install,
and install it.
"""
pkg_config = get_config_file(env, "chef_recipes.yaml").base
packages, _ = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
recipes = [recipe for recipe in env.flavor.rewrite_config_items("chef_recipes", packages)]
if recipes: # Don't bother running chef if nothing to configure
install_chef_recipe(recipes, True)
def _provision_puppet_classes(to_install, ignore=None):
"""
Much like _custom_installs, read config file, determine what to install,
and install it.
"""
pkg_config = get_config_file(env, "puppet_classes.yaml").base
packages, _ = _yaml_to_packages(pkg_config, to_install)
packages = [p for p in packages if ignore is None or p not in ignore]
classes = [recipe for recipe in env.flavor.rewrite_config_items("puppet_classes", packages)]
if classes: # Don't bother running chef if nothing to configure
install_puppet_class(classes, True)
def install_chef_recipe(recipe, automated=False, flavor=None):
"""Install one or more chef recipes by name.
Usage: fab [-i key] [-u user] -H host install_chef_recipe:recipe
:type recipe: string or list
:param recipe: TODO
:type automated: bool
:param automated: If set to True, the environment is not loaded.
"""
_setup_logging(env)
if not automated:
_configure_fabric_environment(env, flavor)
time_start = _print_time_stats("Chef provision for recipe(s) '{0}'".format(recipe), "start")
_configure_chef(env, chef)
recipes = recipe if isinstance(recipe, list) else [recipe]
for recipe_to_add in recipes:
chef.add_recipe(recipe_to_add)
_chef_provision(env, recipes)
_print_time_stats("Chef provision for recipe(s) '%s'" % recipe, "end", time_start)
def install_puppet_class(classes, automated=False, flavor=None):
"""Install one or more puppet classes by name.
Usage: fab [-i key] [-u user] -H host install_puppet_class:class
:type classes: string or list
:param classes: TODO
:type automated: bool
:param automated: If set to True, the environment is not loaded.
"""
_setup_logging(env)
if not automated:
_configure_fabric_environment(env, flavor)
time_start = _print_time_stats("Puppet provision for class(es) '{0}'".format(classes), "start")
classes = classes if isinstance(classes, list) else [classes]
_puppet_provision(env, classes)
_print_time_stats("Puppet provision for classes(s) '%s'" % classes, "end", time_start)
def install_custom(p, automated=False, pkg_to_group=None, flavor=None):
"""
Install a single custom program or package by name.
This method fetches program name from ``config/custom.yaml`` and delegates
to a method in ``custom/*name*.py`` to proceed with the installation.
Alternatively, if a program install method is defined in the appropriate
package, it will be called directly (see param ``p``).
Usage: fab [-i key] [-u user] -H host install_custom:program_name
:type p: string
:param p: A name of the custom program to install. This has to be either a name
that is listed in ``custom.yaml`` as a subordinate to a group name or a
program name whose install method is defined in either ``cloudbio`` or
``custom`` packages
(e.g., ``cloudbio/custom/cloudman.py -> install_cloudman``).
:type automated: bool
:param automated: If set to True, the environment is not loaded and reading of
the ``custom.yaml`` is skipped.
"""
p = p.lower() # All packages listed in custom.yaml are in lower case
if not automated:
_setup_logging(env)
_configure_fabric_environment(env, flavor, ignore_distcheck=True)
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, None)
time_start = _print_time_stats("Custom install for '{0}'".format(p), "start")
fn = _custom_install_function(env, p, pkg_to_group)
fn(env)
## TODO: Replace the previous 4 lines with the following one, barring
## objections. Slightly different behavior because pkg_to_group will be
## loaded regardless of automated if it is None, but IMO this shouldn't
## matter because the following steps look like they would fail if
## automated is True and pkg_to_group is None.
# _install_custom(p, pkg_to_group)
_print_time_stats("Custom install for '%s'" % p, "end", time_start)
def _install_custom(p, pkg_to_group=None):
if pkg_to_group is None:
pkg_config = get_config_file(env, "custom.yaml").base
packages, pkg_to_group = _yaml_to_packages(pkg_config, None)
fn = _custom_install_function(env, p, pkg_to_group)
fn(env)
def install_brew(p=None, version=None, flavor=None, automated=False):
"""Top level access to homebrew/linuxbrew packages.
p is a package name to install, or all configured packages if not specified.
"""
if not automated:
_setup_logging(env)
_configure_fabric_environment(env, flavor, ignore_distcheck=True)
if p is not None:
if version:
p = "%s==%s" % (p, version)
brew.install_packages(env, packages=[p])
else:
pkg_install = _read_main_config()[0]
brew.install_packages(env, to_install=pkg_install)
def _custom_install_function(env, p, pkg_to_group):
"""
Find custom install function to execute based on package name to
pkg_to_group dict.
"""
try:
# Allow direct calling of a program install method, even if the program
# is not listed in the custom list (ie, not contained as a key value in
# pkg_to_group). For an example, see 'install_cloudman' or use p=cloudman.
mod_name = pkg_to_group[p] if p in pkg_to_group else p
env.logger.debug("Importing module cloudbio.custom.%s" % mod_name)
mod = __import__("cloudbio.custom.%s" % mod_name,
fromlist=["cloudbio", "custom"])
except ImportError:
raise ImportError("Need to write module cloudbio.custom.%s" %
pkg_to_group[p])
replace_chars = ["-"]
try:
for to_replace in replace_chars:
p = p.replace(to_replace, "_")
env.logger.debug("Looking for custom install function %s.install_%s"
% (mod.__name__, p))
fn = getattr(mod, "install_%s" % p)
except AttributeError:
raise ImportError("Need to write a install_%s function in custom.%s"
% (p, pkg_to_group[p]))
return fn
def _read_main_config():
"""Pull a list of groups to install based on our main configuration YAML.
Reads 'main.yaml' and returns packages and libraries
"""
yaml_file = get_config_file(env, "main.yaml").base
with open(yaml_file) as in_handle:
full_data = yaml.load(in_handle)
packages = full_data.get('packages', [])
packages = env.edition.rewrite_config_items("main_packages", packages)
libraries = full_data.get('libraries', [])
custom_ignore = full_data.get('custom_ignore', [])
custom_add = full_data.get("custom_additional")
if packages is None: packages = []
if libraries is None: libraries = []
if custom_ignore is None: custom_ignore = []
env.logger.info("Meta-package information from {2}\n- Packages: {0}\n- Libraries: "
"{1}".format(",".join(packages), ",".join(libraries), yaml_file))
return packages, sorted(libraries), custom_ignore, custom_add
# ### Library specific installation code
def _python_library_installer(config):
"""Install python specific libraries using pip, conda and easy_install.
Handles using isolated anaconda environments.
"""
if shared._is_anaconda(env):
conda_bin = shared._conda_cmd(env)
for pname in env.flavor.rewrite_config_items("python", config.get("conda", [])):
env.safe_run("{0} install --yes {1}".format(conda_bin, pname))
cmd = env.safe_run
with settings(warn_only=True):
cmd("%s -U distribute" % os.path.join(os.path.dirname(conda_bin), "easy_install"))
else:
pip_bin = shared._pip_cmd(env)
ei_bin = pip_bin.replace("pip", "easy_install")
env.safe_sudo("%s -U pip" % ei_bin)
with settings(warn_only=True):
env.safe_sudo("%s -U distribute" % ei_bin)
cmd = env.safe_sudo
for pname in env.flavor.rewrite_config_items("python", config['pypi']):
cmd("{0} install --upgrade {1} --allow-unverified {1} --allow-external {1}".format(shared._pip_cmd(env), pname)) # fixes problem with packages not being in pypi
def _ruby_library_installer(config):
"""Install ruby specific gems.
"""
gem_ext = getattr(env, "ruby_version_ext", "")
def _cur_gems():
with settings(
hide('warnings', 'running', 'stdout', 'stderr')):
gem_info = env.safe_run_output("gem%s list --no-versions" % gem_ext)
return [l.rstrip("\r") for l in gem_info.split("\n") if l.rstrip("\r")]
installed = _cur_gems()
for gem in env.flavor.rewrite_config_items("ruby", config['gems']):
# update current gems only to check for new installs
if gem not in installed:
installed = _cur_gems()
if gem in installed:
env.safe_sudo("gem%s update %s" % (gem_ext, gem))
else:
env.safe_sudo("gem%s install %s" % (gem_ext, gem))
def _perl_library_installer(config):
"""Install perl libraries from CPAN with cpanminus.
"""
with shared._make_tmp_dir() as tmp_dir:
with cd(tmp_dir):
env.safe_run("wget --no-check-certificate -O cpanm "
"https://raw.github.com/miyagawa/cpanminus/master/cpanm")
env.safe_run("chmod a+rwx cpanm")
env.safe_sudo("mv cpanm %s/bin" % env.system_install)
sudo_str = "--sudo" if env.use_sudo else ""
for lib in env.flavor.rewrite_config_items("perl", config['cpan']):
# Need to hack stdin because of some problem with cpanminus script that
# causes fabric to hang
# http://agiletesting.blogspot.com/2010/03/getting-past-hung-remote-processes-in.html
env.safe_run("cpanm %s --skip-installed --notest %s < /dev/null" % (sudo_str, lib))
def _haskell_library_installer(config):
"""Install haskell libraries using cabal.
"""
run("cabal update")
for lib in config["cabal"]:
sudo_str = "--root-cmd=sudo" if env.use_sudo else ""
env.safe_run("cabal install %s --global %s" % (sudo_str, lib))
lib_installers = {
"r-libs" : libraries.r_library_installer,
"python-libs" : _python_library_installer,
"ruby-libs" : _ruby_library_installer,
"perl-libs" : _perl_library_installer,
"haskell-libs": _haskell_library_installer,
}
def install_libraries(language):
"""High level target to install libraries for a specific language.
"""
_setup_logging(env)
_check_fabric_version()
_configure_fabric_environment(env, ignore_distcheck=True)
_do_library_installs(["%s-libs" % language])
def _do_library_installs(to_install):
for iname in to_install:
yaml_file = get_config_file(env, "%s.yaml" % iname).base
with open(yaml_file) as in_handle:
config = yaml.load(in_handle)
lib_installers[iname](config)
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.internal_gains import ComfortViewFactorAngles
log = logging.getLogger(__name__)
class TestComfortViewFactorAngles(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_comfortviewfactorangles(self):
pyidf.validation_level = ValidationLevel.error
obj = ComfortViewFactorAngles()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_surface_1_name = "object-list|Surface 1 Name"
obj.surface_1_name = var_surface_1_name
# real
var_angle_factor_1 = 0.5
obj.angle_factor_1 = var_angle_factor_1
# object-list
var_surface_2_name = "object-list|Surface 2 Name"
obj.surface_2_name = var_surface_2_name
# real
var_angle_factor_2 = 0.5
obj.angle_factor_2 = var_angle_factor_2
# object-list
var_surface_3_name = "object-list|Surface 3 Name"
obj.surface_3_name = var_surface_3_name
# real
var_angle_factor_3 = 0.5
obj.angle_factor_3 = var_angle_factor_3
# object-list
var_surface_4_name = "object-list|Surface 4 Name"
obj.surface_4_name = var_surface_4_name
# real
var_angle_factor_4 = 0.5
obj.angle_factor_4 = var_angle_factor_4
# object-list
var_surface_5_name = "object-list|Surface 5 Name"
obj.surface_5_name = var_surface_5_name
# real
var_angle_factor_5 = 0.5
obj.angle_factor_5 = var_angle_factor_5
# object-list
var_surface_6_name = "object-list|Surface 6 Name"
obj.surface_6_name = var_surface_6_name
# real
var_angle_factor_6 = 0.5
obj.angle_factor_6 = var_angle_factor_6
# object-list
var_surface_7_name = "object-list|Surface 7 Name"
obj.surface_7_name = var_surface_7_name
# real
var_angle_factor_7 = 0.5
obj.angle_factor_7 = var_angle_factor_7
# object-list
var_surface_8_name = "object-list|Surface 8 Name"
obj.surface_8_name = var_surface_8_name
# real
var_angle_factor_8 = 0.5
obj.angle_factor_8 = var_angle_factor_8
# object-list
var_surface_9_name = "object-list|Surface 9 Name"
obj.surface_9_name = var_surface_9_name
# real
var_angle_factor_9 = 0.5
obj.angle_factor_9 = var_angle_factor_9
# object-list
var_surface_10_name = "object-list|Surface 10 Name"
obj.surface_10_name = var_surface_10_name
# real
var_angle_factor_10 = 0.5
obj.angle_factor_10 = var_angle_factor_10
# object-list
var_surface_11_name = "object-list|Surface 11 Name"
obj.surface_11_name = var_surface_11_name
# real
var_angle_factor_11 = 0.5
obj.angle_factor_11 = var_angle_factor_11
# object-list
var_surface_12_name = "object-list|Surface 12 Name"
obj.surface_12_name = var_surface_12_name
# real
var_angle_factor_12 = 0.5
obj.angle_factor_12 = var_angle_factor_12
# object-list
var_surface_13_name = "object-list|Surface 13 Name"
obj.surface_13_name = var_surface_13_name
# real
var_angle_factor_13 = 0.5
obj.angle_factor_13 = var_angle_factor_13
# object-list
var_surface_14_name = "object-list|Surface 14 Name"
obj.surface_14_name = var_surface_14_name
# real
var_angle_factor_14 = 0.5
obj.angle_factor_14 = var_angle_factor_14
# object-list
var_surface_15_name = "object-list|Surface 15 Name"
obj.surface_15_name = var_surface_15_name
# real
var_angle_factor_15 = 0.5
obj.angle_factor_15 = var_angle_factor_15
# object-list
var_surface_16_name = "object-list|Surface 16 Name"
obj.surface_16_name = var_surface_16_name
# real
var_angle_factor_16 = 0.5
obj.angle_factor_16 = var_angle_factor_16
# object-list
var_surface_17_name = "object-list|Surface 17 Name"
obj.surface_17_name = var_surface_17_name
# real
var_angle_factor_17 = 0.5
obj.angle_factor_17 = var_angle_factor_17
# object-list
var_surface_18_name = "object-list|Surface 18 Name"
obj.surface_18_name = var_surface_18_name
# real
var_angle_factor_18 = 0.5
obj.angle_factor_18 = var_angle_factor_18
# object-list
var_surface_19_name = "object-list|Surface 19 Name"
obj.surface_19_name = var_surface_19_name
# real
var_angle_factor_19 = 0.5
obj.angle_factor_19 = var_angle_factor_19
# object-list
var_surface_20_name = "object-list|Surface 20 Name"
obj.surface_20_name = var_surface_20_name
# real
var_angle_factor_20 = 0.5
obj.angle_factor_20 = var_angle_factor_20
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.comfortviewfactorangless[0].name, var_name)
self.assertEqual(idf2.comfortviewfactorangless[0].zone_name, var_zone_name)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_1_name, var_surface_1_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_1, var_angle_factor_1)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_2_name, var_surface_2_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_2, var_angle_factor_2)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_3_name, var_surface_3_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_3, var_angle_factor_3)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_4_name, var_surface_4_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_4, var_angle_factor_4)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_5_name, var_surface_5_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_5, var_angle_factor_5)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_6_name, var_surface_6_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_6, var_angle_factor_6)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_7_name, var_surface_7_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_7, var_angle_factor_7)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_8_name, var_surface_8_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_8, var_angle_factor_8)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_9_name, var_surface_9_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_9, var_angle_factor_9)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_10_name, var_surface_10_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_10, var_angle_factor_10)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_11_name, var_surface_11_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_11, var_angle_factor_11)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_12_name, var_surface_12_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_12, var_angle_factor_12)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_13_name, var_surface_13_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_13, var_angle_factor_13)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_14_name, var_surface_14_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_14, var_angle_factor_14)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_15_name, var_surface_15_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_15, var_angle_factor_15)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_16_name, var_surface_16_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_16, var_angle_factor_16)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_17_name, var_surface_17_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_17, var_angle_factor_17)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_18_name, var_surface_18_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_18, var_angle_factor_18)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_19_name, var_surface_19_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_19, var_angle_factor_19)
self.assertEqual(idf2.comfortviewfactorangless[0].surface_20_name, var_surface_20_name)
self.assertAlmostEqual(idf2.comfortviewfactorangless[0].angle_factor_20, var_angle_factor_20) |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class Order(resource.Resource):
"""A resource allowing for the generation secret material by Barbican.
The resource allows to generate some secret material. It can be, for
example, some key or certificate. The order encapsulates the workflow
and history for the creation of a secret. The time to generate a secret can
vary depending on the type of secret.
"""
support_status = support.SupportStatus(version='2014.2')
default_client_name = 'barbican'
entity = 'orders'
PROPERTIES = (
NAME, PAYLOAD_CONTENT_TYPE, MODE, EXPIRATION,
ALGORITHM, BIT_LENGTH, TYPE, REQUEST_TYPE, SUBJECT_DN,
SOURCE_CONTAINER_REF, CA_ID, PROFILE, REQUEST_DATA,
PASS_PHRASE
) = (
'name', 'payload_content_type', 'mode', 'expiration',
'algorithm', 'bit_length', 'type', 'request_type', 'subject_dn',
'source_container_ref', 'ca_id', 'profile', 'request_data',
'pass_phrase'
)
ATTRIBUTES = (
STATUS, ORDER_REF, SECRET_REF, PUBLIC_KEY, PRIVATE_KEY,
CERTIFICATE, INTERMEDIATES, CONTAINER_REF
) = (
'status', 'order_ref', 'secret_ref', 'public_key', 'private_key',
'certificate', 'intermediates', 'container_ref'
)
ORDER_TYPES = (
KEY, ASYMMETRIC, CERTIFICATE
) = (
'key', 'asymmetric', 'certificate'
)
# full-cmc is declared but not yet supported in barbican
REQUEST_TYPES = (
STORED_KEY, SIMPLE_CMC, CUSTOM
) = (
'stored-key', 'simple-cmc', 'custom'
)
ALLOWED_PROPERTIES_FOR_TYPE = {
KEY: [NAME, ALGORITHM, BIT_LENGTH, MODE, PAYLOAD_CONTENT_TYPE,
EXPIRATION],
ASYMMETRIC: [NAME, ALGORITHM, BIT_LENGTH, MODE, PASS_PHRASE,
PAYLOAD_CONTENT_TYPE, EXPIRATION],
CERTIFICATE: [NAME, REQUEST_TYPE, SUBJECT_DN, SOURCE_CONTAINER_REF,
CA_ID, PROFILE, REQUEST_DATA]
}
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Human readable name for the secret.'),
),
PAYLOAD_CONTENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type/format the secret data is provided in.'),
),
EXPIRATION: properties.Schema(
properties.Schema.STRING,
_('The expiration date for the secret in ISO-8601 format.'),
constraints=[
constraints.CustomConstraint('iso_8601'),
],
),
ALGORITHM: properties.Schema(
properties.Schema.STRING,
_('The algorithm type used to generate the secret. '
'Required for key and asymmetric types of order.'),
),
BIT_LENGTH: properties.Schema(
properties.Schema.INTEGER,
_('The bit-length of the secret. Required for key and '
'asymmetric types of order.'),
),
MODE: properties.Schema(
properties.Schema.STRING,
_('The type/mode of the algorithm associated with the secret '
'information.'),
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the order.'),
constraints=[
constraints.AllowedValues(ORDER_TYPES),
],
required=True,
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
constraints=[constraints.AllowedValues(REQUEST_TYPES)]
),
SUBJECT_DN: properties.Schema(
properties.Schema.STRING,
_('The subject of the certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
SOURCE_CONTAINER_REF: properties.Schema(
properties.Schema.STRING,
_('The source of certificate request.'),
support_status=support.SupportStatus(version='5.0.0'),
),
CA_ID: properties.Schema(
properties.Schema.STRING,
_('The identifier of the CA to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PROFILE: properties.Schema(
properties.Schema.STRING,
_('The profile of certificate to use.'),
support_status=support.SupportStatus(version='5.0.0'),
),
REQUEST_DATA: properties.Schema(
properties.Schema.STRING,
_('The content of the CSR. Only for certificate orders.'),
support_status=support.SupportStatus(version='5.0.0'),
),
PASS_PHRASE: properties.Schema(
properties.Schema.STRING,
_('The passphrase the created key. Can be set only '
'for asymmetric type of order.'),
support_status=support.SupportStatus(version='5.0.0'),
),
}
attributes_schema = {
STATUS: attributes.Schema(
_('The status of the order.'),
type=attributes.Schema.STRING
),
ORDER_REF: attributes.Schema(
_('The URI to the order.'),
type=attributes.Schema.STRING
),
SECRET_REF: attributes.Schema(
_('The URI to the created secret.'),
type=attributes.Schema.STRING
),
CONTAINER_REF: attributes.Schema(
_('The URI to the created container.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PUBLIC_KEY: attributes.Schema(
_('The payload of the created public key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
PRIVATE_KEY: attributes.Schema(
_('The payload of the created private key, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
CERTIFICATE: attributes.Schema(
_('The payload of the created certificate, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
INTERMEDIATES: attributes.Schema(
_('The payload of the created intermediates, if available.'),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
}
def handle_create(self):
info = dict((k, v) for k, v in self.properties.items()
if v is not None)
order = self.client().orders.create(**info)
order_ref = order.submit()
self.resource_id_set(order_ref)
# NOTE(pshchelo): order_ref is HATEOAS reference, i.e a string
# need not to be fixed re LP bug #1393268
return order_ref
def validate(self):
super(Order, self).validate()
if self.properties[self.TYPE] != self.CERTIFICATE:
if (self.properties[self.ALGORITHM] is None
or self.properties[self.BIT_LENGTH] is None):
msg = _("Properties %(algorithm)s and %(bit_length)s are "
"required for %(type)s type of order.") % {
'algorithm': self.ALGORITHM,
'bit_length': self.BIT_LENGTH,
'type': self.properties[self.TYPE]}
raise exception.StackValidationFailed(message=msg)
declared_props = sorted([k for k, v in six.iteritems(
self.properties) if k != self.TYPE and v is not None])
allowed_props = sorted(self.ALLOWED_PROPERTIES_FOR_TYPE[
self.properties[self.TYPE]])
diff = sorted(set(declared_props) - set(allowed_props))
if diff:
msg = _("Unexpected properties: %(unexpected)s. Only these "
"properties are allowed for %(type)s type of order: "
"%(allowed)s.") % {
'unexpected': ', '.join(diff),
'type': self.properties[self.TYPE],
'allowed': ', '.join(allowed_props)}
raise exception.StackValidationFailed(message=msg)
def check_create_complete(self, order_href):
order = self.client().orders.get(order_href)
if order.status == 'ERROR':
reason = order.error_reason
code = order.error_status_code
msg = (_("Order '%(name)s' failed: %(code)s - %(reason)s")
% {'name': self.name, 'code': code, 'reason': reason})
raise exception.Error(msg)
return order.status == 'ACTIVE'
def _resolve_attribute(self, name):
client = self.client()
order = client.orders.get(self.resource_id)
if name in (
self.PUBLIC_KEY, self.PRIVATE_KEY, self.CERTIFICATE,
self.INTERMEDIATES):
container = client.containers.get(order.container_ref)
secret = getattr(container, name)
return secret.payload
return getattr(order, name)
# TODO(ochuprykov): remove this method when bug #1485619 will be fixed
def _show_resource(self):
order = self.client().orders.get(self.resource_id)
info = order._get_formatted_entity()
return dict(zip(info[0], info[1]))
def resource_mapping():
return {
'OS::Barbican::Order': Order,
}
|
"""functions working with the lightFM recommender model."""
# imports
import boto3
import lightfm
import pandas as pd
import numpy as np
import pickle
import psycopg2
from lightfm.data import Dataset
import os
import copy
# get model
# remember that you may need to use two dots for this to work
def get_lightfm_model():
"""unpickles a lightfm model and returns it."""
# initialize s3 connection
S3 = boto3.client(
's3',
aws_access_key_id=os.getenv("ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("SECRET_ACCESS_KEY")
)
S3.download_file(
'yelpsense',
'models/recommender/lightfm/lightfm_model.p',
'./lightfm/lightfm_model.p')
model = pickle.load(open("./lightfm/lightfm_model.p", "rb"))
return model
def get_lightfm_dataset():
"""unpickles a lightfm dataset with appropriate shape and returns it."""
# initialize s3 connection
S3 = boto3.client(
's3',
aws_access_key_id=os.getenv("ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("SECRET_ACCESS_KEY")
)
S3.download_file(
'yelpsense',
'models/recommender/lightfm/lightfm_empty_dataset.p',
'./lightfm/lightfm_empty_dataset.p')
empty_dataset = pickle.load(
open("./lightfm/lightfm_empty_dataset.p", "rb"))
return empty_dataset
def make_lightfm_user_set(dataset, businesses, stars):
"""Makes lightFM interactions and a lightFM dataset of reviews given a list of businesses and star ratings."""
"""# make lists from user_dict
businesses = user_dict.keys()
stars = user_dict.values()"""
# get a user's data
userframe = pd.DataFrame({'business_id': businesses, 'stars': stars})
userframe['recommend'] = userframe['stars'].map(
{1: -1, 2: -1, 3: -1, 4: 1, 5: 1})
userframe['user_id'] = 'new_user'
# get an empty but shaped dataset and populate with interactions
user_dataset = copy.deepcopy(dataset)
(user_interactions, weights) = user_dataset.build_interactions(
[(x['user_id'], x['business_id'], x['recommend']) for index, x in userframe.iterrows()])
return (user_interactions, user_dataset)
def make_business_dataframe():
"""Makes a dataframe of businesses to populate with predicted reviews."""
conn = psycopg2.connect(host=os.getenv('AWS_RDS_HOST'),
database=os.getenv('AWS_RDS_DB'),
user=os.getenv('AWS_RDS_USER'),
password=<PASSWORD>('<PASSWORD>'),
port=os.getenv('AWS_RDS_PORT'))
cur = conn.cursor()
query = f"SELECT business_id, name, address, city, aggregate_rating, categories FROM business_data"
businessframe = pd.read_sql(sql=query, con=conn)
conn.close()
return businessframe
def lightfm_inference(model, user_interactions, user_dataset):
"""does secondary training on a model with given data, and returns recommendations."""
# copy the model object to prevent contamination
pretrained = copy.deepcopy(model)
pretrained.fit_partial(user_interactions, epochs=50)
businessframe = make_business_dataframe()
businessframe['lightFM_mapping'] = businessframe['business_id'].apply(
lambda x: user_dataset._item_id_mapping[x])
businessframe['recommender_values'] = model.predict(
user_ids=[
user_dataset._user_id_mapping['new_user']],
item_ids=list(
businessframe['lightFM_mapping'].values),
num_threads=1)
top_ten = businessframe[['business_id',
'name',
'address',
'city',
'aggregate_rating',
'categories',
'recommender_values']].sort_values(by='recommender_values',
ascending=False)
return top_ten
def select_from_db(city='', business_name='', address='', category=''):
"""gets results for businesses from database based on params."""
conn = psycopg2.connect(host=os.getenv('AWS_RDS_HOST'),
database=os.getenv('AWS_RDS_DB'),
user=os.getenv('AWS_RDS_USER'),
password=<PASSWORD>('<PASSWORD>'),
port=os.getenv('AWS_RDS_PORT'))
cur = conn.cursor()
query = f"SELECT business_id, name, address, city, aggregate_rating, categories FROM business_data WHERE city ILIKE '%{city}%' AND name ILIKE '%{business_name}%' AND address ILIKE '%{address}%' AND categories ILIKE '%{category}%'"
cur.execute(query)
output = cur.fetchall()
conn.close()
return output
|
<filename>location_similarity.py
import pymongo as pm
import math
import os
from bs4 import BeautifulSoup
import time
# Calculating Cosine Similarity
def cosine_similarity1(db_location_data, input_location_data, k, collection, model):
cosine_sim_output = []
for record in db_location_data:
sumxx, sumxy, sumyy = 0, 0, 0
v_mul = []
y_square = 0
for db_data in collection.find({'_id': record[0][0]}):
for data in db_data['DATA']:
y_square += data[model] * data[model]
for i in range(1, len(input_location_data[1][1:]) + 1):
x = input_location_data[1][i]
y = record[1][i]
sumxx += x * x
sumyy += y * y
sumxy += x * y
v_mul.append(x * y)
cosine_sim_output.append([record[0][0], sumxy / math.sqrt(sumxx * y_square), record[0], v_mul])
cosine_sim_output = sorted(cosine_sim_output, key=lambda z: z[1], reverse=True)
if k > len(cosine_sim_output):
k = len(cosine_sim_output)
for item in range(0, k):
top_three_terms_list = []
for item1 in sorted(zip(cosine_sim_output[item][3], cosine_sim_output[item][2][1:]), reverse=True)[:3]:
top_three_terms_list.append(item1[1])
del cosine_sim_output[item][2:]
cosine_sim_output[item].append(top_three_terms_list)
print(cosine_sim_output[item])
def main():
start_time = time.time()
# Connecting to Database and Creating Collection
database_name = 'location_data'
collection_name = 'location_text_descriptors'
try:
db_client = pm.MongoClient('localhost', 27017)
except pm.errors.ConnectionFailure:
print('Could not connect to the database. Please re-start the program!')
db = db_client[database_name]
collection = db[collection_name]
# Loading data in MongoDB
location_text_desc_path = os.getcwd() + '\dataset\desctxt\devset_textTermsPerPOI.wFolderNames.txt'
with open(location_text_desc_path, 'r', encoding='UTF-8') as fileobject:
for line in fileobject:
data = line.split(' ')
data_list = []
len(data[0].split('_'))
num = 1 + len(data[0].split('_'))
while num < len(data) - 3:
data_dict = {}
data_dict['TERM'] = data[num].strip('\"')
data_dict['TF'] = int(data[num + 1])
data_dict['DF'] = int(data[num + 2])
data_dict['TF-IDF'] = float(data[num + 3])
num += 4
data_list.append(data_dict)
collection.insert_one({'_id': data[0], 'DATA': data_list})
# Taking user Input
user_input = input('Please enter the location id, model and k : ')
input_location_id = int(user_input.split(' ')[0])
model = user_input.split(' ')[1]
k = int(user_input.split(' ')[2])
# Parsing devset_topics.xml to map location id to location name
devset_topic_file_loc = os.getcwd() + '\dataset\devset_topics.xml'
with open(devset_topic_file_loc) as file:
soup = BeautifulSoup(file, 'html.parser')
for topic in soup.find_all('topic'):
location_id = int(topic.number.contents[0])
location_name = topic.title.contents[0]
if location_id == input_location_id:
input_location_name = location_name
# Creating Input Location Vector
input_location_data_terms = []
input_location_data_freq = []
for record in collection.find({'_id': input_location_name}).sort('DATA.TERM', pm.ASCENDING):
input_location_data_terms.append(record['_id'])
input_location_data_freq.append(record['_id'])
for data in sorted(record['DATA'], key=lambda z: z['TERM']):
input_location_data_terms.append(data['TERM'])
input_location_data_freq.append(data[model])
input_location_data = [input_location_data_terms, input_location_data_freq]
# Creating a Vector of all other database locations
db_location_data = []
for records in collection.find({'DATA.TERM': {'$in': input_location_data_terms[1:]}}):
db_location_data_terms = []
db_location_data_freq = []
x = []
db_location_data_terms.append(records['_id'])
db_location_data_freq.append(records['_id'])
for i in range(1, len(input_location_data_terms)):
term_found = False
for data in sorted(records['DATA'], key=lambda z: z['TERM']):
if data['TERM'] == input_location_data_terms[i]:
db_location_data_terms.append(data['TERM'])
db_location_data_freq.append(data[model])
term_found = True
break
if term_found is False:
db_location_data_terms.append(0)
db_location_data_freq.append(0)
while len(db_location_data_terms) < len(input_location_data_terms):
db_location_data_terms.append(0)
db_location_data_freq.append(0)
x.append(db_location_data_terms)
x.append(db_location_data_freq)
db_location_data.append(x)
# Passing Input User Vector and Database User Vectors for Cosine Similarity Calculations
cosine_similarity1(db_location_data, input_location_data, k, collection, model)
end_time = time.time()
print('Execution Time : ',end_time-start_time)
# Post Run Cleanup
collection.drop()
db_client.drop_database(database_name)
if __name__ == '__main__':
main()
|
"""
pre-processing the data
"""
# Import necessary libraries
import argparse
import pickle
import pandas as pd
from colorama import Fore, Style
###########################################################################
# Description and help
###########################################################################
DESCRIPTION = "Reads the annotation file and Embeddings \
from the selected folder and outputs the base dataframe"
HELP = "Input the annotation file path [Label_1, Label_2, Label_3] \n \
and path for its embeddings"
###########################################################################
#parse the input arguments given from command line
###########################################################################
PARSER = argparse.ArgumentParser(description=DESCRIPTION)
PARSER.add_argument('-annotation_file', '--annotation_file', action='store',
help=HELP)
PARSER.add_argument('-path_for_saved_embeddings', '--path_for_saved_embeddings', action='store',
help="Input the path where embeddings are stored")
PARSER.add_argument('-path_to_save_dataframe', '--path_to_save_dataframe', action='store',
help="Input the path to save dataframe (.pkl) file")
RESULT = PARSER.parse_args()
LABELS_NAME_COLUMNS = ['Label_1', "Label_2", "Label_3", "Label_4"]
###########################################################################
###########################################################################
def read_data_files(filename):
"""
read the annotated file
(".csv")
"""
data_files = pd.read_csv(filename).fillna("")
return data_files
###########################################################################
# create dictionary
###########################################################################
SET_DICTIONARY = {"crow":"Crow",
"honking":"Vehicle",
"stream":"Stream",
"frogmouth":"Frog",
"birdf":"Bird",
"conersation":"Conversation",
"honkiong":"Vehicle",
"peafowl":"Bird",
"convertsation":"Conversation",
"inesct":"Insect",
"helicopter":"Vehicle",
"aeroplane":"Vehicle",
"plane":"Vehicle",
"birtd":"Bird",
"frog":"Frog",
"raini":"Rain",
"rain":"Rain",
"forg":"Frog",
"insect":"Insect",
"manmade":"Conversation",
"thunder":"Thunder",
"honkinig":"Vehicle",
"conversatoin":"Conversation",
"none":"",
"vehicle":"Vehicle",
"music":"Music",
"dog barking":"Dog",
"human":"Speech",
"conservation":"Conversation",
"conversation":"Conversation",
"bird":"Bird",
"felling (axe)":"Tools",
"wind":"Wind",
"biird":"Bird",
"footsteps":"Walk, footsteps",
"door closing":"",
"buzzing":"Insect",
"Silence" :"Silence",
"twig snap":"",
"buzz":"Insect",
"fly/buzzing":"Insect",
"----":''}
###########################################################################
# Helper Function
###########################################################################
def check_for_null(array):
"""
check for null string values in array
"""
while "" in array:
array.remove("") if "" in array else array
return array
def preprocess_data(data_frame, label_columns_list, data_file_name):
"""
start preprocessing the data
"""
print '\npreprocessing..'
print Style.RESET_ALL
for col in label_columns_list:
data_frame[col] = data_frame[col].apply(lambda arr: arr.strip(""))
data_frame[col] = data_frame[col].replace(SET_DICTIONARY)
data_frame['labels_name'] = data_frame[label_columns_list].values.tolist()
data_frame['labels_name'] = data_frame['labels_name'].apply(lambda arr: check_for_null(arr))
print Fore.GREEN + "pre-processing Done:" + data_file_name.split("/")[-1]
print Style.RESET_ALL
# removing the null labelling rows
index_null = data_frame['labels_name'].loc[data_frame['labels_name'].apply(lambda arr: len(arr) == 0)].index
data_frame = data_frame.drop(index_null)
data_frame.index = range(data_frame.shape[0])
return data_frame
def check_for_unknown_label(data_frame, label_columns_list):
"""
"""
labels_not_found = []
for col in label_columns_list:
for each_label in data_frame[col].values.tolist():
if each_label in SET_DICTIONARY.keys() or each_label in SET_DICTIONARY.values():
pass
else:
labels_not_found.append(each_label)
print "Labels not found in Dictionary: \n", list(set(labels_not_found))
def read_embeddings(data_frame, path_to_embeddings):
"""
read the embeddings
"""
embeddings_list = []
test_index = []
for each_file in data_frame['wav_file'].values.tolist():
try:
with open(path_to_embeddings+each_file[:-4]+".pkl", 'rb') as file_obj:
embeddings_list.append(pickle.load(file_obj))
except:
test_index.append(data_frame['wav_file'].values.tolist().index(each_file))
data_frame = data_frame.drop(test_index)
data_frame.index = range(data_frame.shape[0])
data_frame['features'] = embeddings_list
return data_frame
def initiate_preprocessing(data_file_name, path_to_embeddings):
"""
initiate preprocessing by
reading data file
"""
data_file = read_data_files(data_file_name)
check_for_unknown_label(data_file, LABELS_NAME_COLUMNS)
data = preprocess_data(data_file, LABELS_NAME_COLUMNS, data_file_name)
data = data.drop(LABELS_NAME_COLUMNS, axis=1)
#read all the embeddings
if path_to_embeddings:
data_with_embeddings = read_embeddings(data, path_to_embeddings)
return data_with_embeddings
else:
return data
def write_dataframe(path_to_write, dataframe):
"""
write out the dataframe in pickle format
"""
if path_to_write:
with open(path_to_write, "w") as file_obj:
pickle.dump(dataframe, file_obj)
else:
print "Input path to write dataframe"
###########################################################################
# Main Function
###########################################################################
if __name__ == "__main__":
if RESULT.path_for_saved_embeddings:
DATAFRAME = initiate_preprocessing(RESULT.annotation_file, RESULT.path_for_saved_embeddings)
else:
DATAFRAME = initiate_preprocessing(RESULT.annotation_file, None)
if RESULT.path_to_save_dataframe:
write_dataframe(RESULT.path_to_save_dataframe, DATAFRAME)
else:
write_dataframe(None, DATAFRAME)
|
from __future__ import division, print_function, absolute_import
from base_model import BaseModel
import tensorflow as tf
import numpy as np
import random
import pandas as pd
import re
import cv2
import time
from tensorflow.examples.tutorials.mnist import input_data
import tflearn.datasets.mnist as mnist
from tflearn.datasets import cifar10
from tflearn.data_utils import to_categorical, pad_sequences
from logistic_regression import *
from AlexNet import *
from vgg16 import *
from GoogleNetV1Easy import *
from GoogleNetV2Easy import *
from GoogleNetV3 import *
from ResNet import *
from res_bottleneck import *
from dense_bottleneck import *
from resnext import *
from densenet import *
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
#This Code for testing googlenet
def GetData(width,height):
def getimage(X):
result = []
for idx in range(0,len(X)):
if idx % 10000 == 0:
print("load data:\t" + str(idx) + "/" + str(len(inputx)))
im = cv2.resize(X[idx],(width,height),interpolation=cv2.INTER_CUBIC)
result.append(im)
return result
(X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/share/cnndata/",one_hot=True)
X = getimage(X)
X_test = getimage(X_test)
return X,Y,X_test,Y_test
def GetCifar10Batch(width,height,inputX):
def getimage(inputx):
result = []
for idx in range(0, len(inputx)):
im = cv2.resize(inputx[idx], (width, height), interpolation=cv2.INTER_CUBIC)
result.append(im)
return result
X = getimage(inputX)
return X
def TestVGG16():
m = Vgg16(10)
X, Y, X_test, Y_test = GetData(224,224)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 1e-3,
"batch_size": 64,
"num_epochs": 100,
"keep_prob": 0.8
}
feed_data = {"inputs": X, "labels": Y, "ValidX": X_test, "ValidY": Y_test}
m.set_parameter(params)
m.train(feed_data)
m.model_save("./ModelSavePath/vgg16.ckpt")
m.model_load("./ModelSavePath/vgg16.ckpt")
dic = m.evaluate(feed_data)
print("Evaluate:" + str(dic))
return
def TestGoogleV1():
m = GoogleNetV1(10)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 1e-3,
"batch_size": 32,
"num_epochs": 100,
}
X, Y, X_test, Y_test = GetData(227,227)
feed_data = {"inputs": X, "labels": Y}
test_feed_data = {"inputs":X_test,"labels":Y_test}
m.set_parameter(params)
time_start = time.time()
m.train(feed_data)
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/GoogLeNet.ckpt")
m.model_load("/home/share/model/GoogLeNet.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
def TestGoogleV2():
m = GoogleNetV2(10)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 1e-3,
"batch_size": 32,
"num_epochs": 100,
}
X, Y, X_test, Y_test = GetData(227, 227)
feed_data = {"inputs": X, "labels": Y}
test_feed_data = {"inputs":X_test,"labels":Y_test}
m.set_parameter(params)
time_start = time.time()
m.train(feed_data)
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/GoogLeNet.ckpt")
m.model_load("/home/share/model/GoogLeNet.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
def TestGoogleV3():
m = GoogleNetV3(10)
(X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/share/cnndata/", one_hot=True)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 1e-3,
"batch_size": 32,
"num_epochs": 1,
}
m.set_parameter(params)
time_start = time.time()
for max_step in range(0,30000):
index = np.random.choice(np.arange(len(X)),32,replace=False)
curbatchx = X[index]
curbatchy = Y[index]
curbatchx= GetCifar10Batch(299, 299,curbatchx)
feed_data = {"inputs": curbatchx, "labels": curbatchy}
acc = m.train_batch(feed_data)
if (max_step%300 == 0):
print("step:"+str(max_step))
print("accuracy"+str(acc))
'''
for batchnumber in range(0,5):
print("batch:\t"+str(batchnumber))
index = np.random.permutation(len(X))
xinput = X[index]
Ycur = Y[index]
xinput = xinput[0:10000]
Ycur = Ycur[0:10000]
#xinput = X[10000*batchnumber:10000*(batchnumber+1)]
#Ycur = Y[10000*batchnumber:10000*(batchnumber+1)]
Xcur= GetCifar10Batch(299, 299,xinput)
print(len(Xcur))
print(len(Ycur))
'''
X_test = GetCifar10Batch(299,299,X_test)
test_feed_data = {"inputs":X_test,"labels":Y_test}
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/GoogLeNetV3.ckpt")
m.model_load("/home/share/model/GoogLeNetV3.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
def TestResNet():
m = ResNet()
(X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/share/cnndata/", one_hot=True)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 0.1,
"batch_size": 128,
"num_epochs": 200,
"class_num":10,
"block_num":5,
}
feed_data = {"inputs": X, "labels": Y}
test_feed_data = {"inputs":X_test,"labels":Y_test}
m.set_parameter(params)
time_start = time.time()
m.train(feed_data)
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/resnet.ckpt")
m.model_load("/home/share/model/resnet.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
def Test_residual_bottleneck():
m = res_bottleneck()
(X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/share/cnndata/", one_hot=True)
#X, Y, X_test, Y_test = mnist.load_data(one_hot=True)
#X = X.reshape([-1, 28, 28, 1])
#X_test = X_test.reshape([-1, 28, 28, 1])
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 1e-3,
"batch_size": 128,
"num_epochs": 200,
"class_num":10,
"block_num":5,
"decay_steps": 100,
"decay_rate": 0.96,
}
feed_data = {"inputs": X, "labels": Y}
test_feed_data = {"inputs":X_test,"labels":Y_test}
m.set_parameter(params)
time_start = time.time()
m.train(feed_data)
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/res_bottleneck.ckpt")
m.model_load("/home/share/model/res_bottleneck.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
def Test_resnext():
m = resnext()
(X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/share/cnndata/", one_hot=True)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 0.1,
"batch_size": 256,
"num_epochs": 30,
"class_num":10,
"block_num":5,
"decay_steps": 32000,
"decay_rate": 0.1,
}
feed_data = {"inputs": X, "labels": Y}
test_feed_data = {"inputs":X_test,"labels":Y_test}
m.set_parameter(params)
time_start = time.time()
m.train(feed_data,test_feed_data)
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/res_next.ckpt")
m.model_load("/home/share/model/res_next.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
def test_densenet():
m = densenet()
(X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/share/cnndata/", one_hot=True)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 0.1,
"batch_size": 32,
"num_epochs": 200,
"class_num":10,
"block_num":12,
"decay_steps": 32000,
"decay_rate": 0.1,
"growth": 12,
}
feed_data = {"inputs": X, "labels": Y}
test_feed_data = {"inputs":X_test,"labels":Y_test}
m.set_parameter(params)
time_start = time.time()
m.train(feed_data,test_feed_data)
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/res_next.ckpt")
m.model_load("/home/share/model/res_next.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
def test_dense_bottleneck():
m = dense_bottleneck()
(X, Y), (X_test, Y_test) = cifar10.load_data(dirname="/home/share/cnndata/", one_hot=True)
params = {
"loss": "square_loss",
"metrics": ["loss"],
"optimizer": "sgd",
"learning_rate": 0.01,
"batch_size": 64,
"num_epochs": 200,
"class_num":10,
"block_num":12,
"decay_steps": 32000,
"decay_rate": 0.1,
"growth": 12,
}
feed_data = {"inputs": X, "labels": Y}
test_feed_data = {"inputs":X_test,"labels":Y_test}
m.set_parameter(params)
time_start = time.time()
m.train(feed_data,test_feed_data)
time_end = time.time()
time_delta = time_end - time_start
print(time_delta/1000)
m.model_save("/home/share/model/densenet_bottleneck.ckpt")
m.model_load("/home/share/model/densenet_bottleneck.ckpt")
dic = m.evaluate(test_feed_data)
print("Evaluate:" + str(dic))
#TestVGG16()
#TestGoogleV1()
#TestGoogleV2()
#TestGoogleV3()
#TestResNet()
#Test_residual_bottleneck()
#Test_resnext()
#test_densenet()
test_dense_bottleneck() |
<reponame>Psemp/oc_project_11
import pygame
from pathlib import Path
from app.models.map import Map
from app.models.position_nt import Position
from app.scripts.startup import startup
def gametest():
game_map = Map(20, 20)
assets = startup()
game_map = assets["maze"]
game_map.make_map()
pygame.init
game_over = False
consciousness = False
lenght = len(game_map.box) * 32
height = len(game_map.box[0]) * 32
win = pygame.display.set_mode((lenght, height + 32))
characters = assets["characters"]
ford = characters[0]
bernard = characters[1]
items = assets["items"]
tablet = items[0]
glasses = items[1]
toy = items[2]
def get_path(file_name):
return str(Path(__file__).parent.parent.joinpath("images", file_name))
def get_image(file_name):
return pygame.image.load(get_path(file_name)).convert()
def get_image_alpha(file_name):
return pygame.image.load(get_path(file_name)).convert_alpha()
known_images = ['floor.png', 'wall.png']
known_alpha_images = [
"bernard.png",
"consciousness.png",
"ford.png",
"defeat.png",
"glasses.png",
"maze.png",
"tablet.png",
"victory.png",
]
images = {}
for known_image in known_images:
loaded_image = get_image(known_image)
images[known_image] = loaded_image
for known_image in known_alpha_images:
loaded_image = get_image_alpha(known_image)
images[known_image] = loaded_image
run = True
while run:
pygame.time.delay(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
# GAME "RULES"
if bernard.alive and ford.alive and bernard.position == ford.position and not consciousness:
bernard.alive = False
game_over = True
if bernard.alive and ford.alive and bernard.position == ford.position and consciousness:
ford.alive = False
game_over = True
if bernard.alive and tablet.gathered and toy.gathered and glasses.gathered:
consciousness = True
if bernard.alive and not tablet.gathered and bernard.position == tablet.position:
tablet.gathered = True
if bernard.alive and not glasses.gathered and bernard.position == glasses.position:
glasses.gathered = True
if bernard.alive and not toy.gathered and bernard.position == toy.position:
toy.gathered = True
# /GAME "RULES"
# DRAW
win.fill((255, 255, 255))
for i in range(0, len(game_map.floors)):
win.blit(images["floor.png"], (game_map.floors[i]))
for i in range(0, len(game_map.walls)):
win.blit(images["wall.png"], (game_map.walls[i]))
if not glasses.gathered:
win.blit(images["glasses.png"], ((glasses.position.x_axis * 32, glasses.position.y_axis * 32)))
else:
win.blit(images["glasses.png"], (0, height))
if not tablet.gathered:
win.blit(images["tablet.png"], ((tablet.position.x_axis * 32, tablet.position.y_axis * 32)))
else:
win.blit(images["tablet.png"], (32, height))
if not toy.gathered:
win.blit(images["maze.png"], ((toy.position.x_axis * 32, toy.position.y_axis * 32)))
else:
win.blit(images["maze.png"], (64, height))
if bernard.alive:
win.blit(images["bernard.png"], ((bernard.position.x_axis * 32, bernard.position.y_axis * 32)))
if ford.alive:
win.blit(images["ford.png"], ((ford.position.x_axis * 32, ford.position.y_axis * 32)))
if game_over and bernard.alive:
win.blit(images["victory.png"], (0, 0))
if game_over and not bernard.alive:
win.blit(images["defeat.png"], (0, 0))
if consciousness:
win.blit(images["consciousness.png"], (lenght - 32, height))
# /DRAW
# PLAYER MOVEMENT
player_action = pygame.key.get_pressed()
if player_action[pygame.K_LEFT] and not game_over:
new_position = Position(bernard.position.y_axis, bernard.position.x_axis - 1)
if not ((new_position.x_axis * 32, new_position.y_axis * 32)) in game_map.walls:
bernard.position = new_position
print(bernard.position)
if player_action[pygame.K_RIGHT] and not game_over:
new_position = Position(bernard.position.y_axis, bernard.position.x_axis + 1)
if not ((new_position.x_axis * 32, new_position.y_axis * 32)) in game_map.walls:
bernard.position = new_position
print(bernard.position)
if player_action[pygame.K_UP] and not game_over:
new_position = Position(bernard.position.y_axis - 1, bernard.position.x_axis)
if not ((new_position.x_axis * 32, new_position.y_axis * 32)) in game_map.walls:
bernard.position = new_position
print(bernard.position)
if player_action[pygame.K_DOWN] and not game_over:
new_position = Position(bernard.position.y_axis + 1, bernard.position.x_axis)
if not ((new_position.x_axis * 32, new_position.y_axis * 32)) in game_map.walls:
bernard.position = new_position
print(bernard.position)
# /MOVEMENT
pygame.display.flip()
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""project settings"""
from __future__ import unicode_literals
import os.path
import sys
from django import VERSION as DJANGO_VERSION
DEBUG = False
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test_db',
'USER': 'runner',
'PASSWORD': 'semaphoredb',
'HOST': '127.0.0.1',
'PORT': 5432,
'ATOMIC_REQUESTS': True,
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
gettext = lambda s: s
LANGUAGES = (
('en', gettext('English')),
('fr', gettext('Français')),
('en-us', gettext('American')),
('ru', gettext('Russian')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.abspath(PROJECT_PATH + '/public/media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.abspath(PROJECT_PATH+'/public/static/')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'drone-ci'
# List of callables that know how to import templates from various sources.
if DJANGO_VERSION > (1, 9):
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_PATH, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
else:
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.abspath(PROJECT_PATH + '/templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.request",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.contrib.messages.context_processors.messages",
)
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'coop_cms.utils.RequestMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'application'
AUTHENTICATION_BACKENDS = (
'coop_cms.perms_backends.ArticlePermissionBackend',
'coop_cms.apps.email_auth.auth_backends.EmailAuthBackend',
'django.contrib.auth.backends.ModelBackend', # Django's default auth backend
)
LOCALE_PATHS = (
PROJECT_PATH+'/locale/',
)
SOUTH_TESTS_MIGRATE = False
TEST_RUNNER = 'coop_cms.test_runners.SafeMediaDiscoverRunner'
COOP_HTML_EDITOR_LINK_MODELS = ('basic_cms.Article',)
COOP_CMS_ARTICLE_LOGO_SIZE = "950x250"
COOP_CMS_NEWSLETTER_TEMPLATES = (
('basic_newsletter.html', 'Basic'),
)
COOP_CMS_ARTICLE_TEMPLATES = (
('standard.html', 'Standard'),
)
COOP_CMS_FROM_EMAIL = ''
COOP_CMS_TEST_EMAILS = ('"<NAME> - Apidev" <<EMAIL>>', )
COOP_CMS_SITE_PREFIX = ''
COOP_CMS_REPLY_TO = '<EMAIL>'
COOP_CMS_TITLE_OPTIONAL = True
LOGIN_REDIRECT_URL = "/"
ACCOUNT_ACTIVATION_DAYS = 7
INSTALLED_APPS = (
# contribs
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
# 3rd parties
'django_extensions',
'floppyforms',
'sorl.thumbnail',
'django_registration',
# externals
'coop_html_editor',
'colorbox',
'coop_cms',
'coop_bar',
'coop_cms.apps.basic_cms',
'coop_cms.apps.email_auth',
'django.contrib.admin',
'django.contrib.admindocs',
)
if (len(sys.argv) > 1) and (not sys.argv[1] in ('schemamigration', 'datamigration', 'makemigrations')):
INSTALLED_APPS = ('modeltranslation', ) + INSTALLED_APPS
if len(sys.argv) > 1 and 'test' == sys.argv[1]:
INSTALLED_APPS = INSTALLED_APPS + ('coop_cms.apps.test_app', )
#import warnings
#warnings.filterwarnings('ignore', r"django.contrib.localflavor is deprecated")
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'coop_cms': {
'handlers': ['console'],
'level': 'DEBUG',
},
'colorbox': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
try:
from local_settings import * # pylint: disable=W0401,W0614
except ImportError:
pass
|
<reponame>dylanirion/wildbook-ia<gh_stars>10-100
# -*- coding: utf-8 -*-
import logging
from vtool._pyflann_backend import pyflann as pyflann
import utool as ut
import uuid
from os.path import exists, join
import lockfile
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class Win32CompatTempFile(object):
"""
mimics tempfile.NamedTemporaryFile but allows the file to be closed without
being deleted. This lets a second process (like the FLANN) read/write to
the file in a win32 system. The file is instead deleted after the
Win32CompatTempFile object goes out of scope.
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.pickle_flann import * # NOQA
>>> verbose = True
>>> temp = Win32CompatTempFile(verbose=verbose)
>>> data = '10010'
>>> data = data.encode()
>>> print('data = %r' % (data,))
>>> data1 = temp.read()
>>> print('data1 = %r' % (data1,))
>>> temp.write(data)
>>> data2 = temp.read()
>>> print('data2 = %r' % (data2,))
>>> temp.close()
>>> assert data != data1
>>> assert data == data2
>>> ut.assert_raises(ValueError, temp.close)
>>> assert not ut.checkpath(temp.fpath, verbose=verbose)
"""
def __init__(temp, delete=True, verbose=False):
temp.delete = delete
appname = 'wbia'
temp.dpath = ut.ensure_app_resource_dir(appname, 'tempfiles')
temp.fpath = None
temp.fname = None
temp._isclosed = False
temp.verbose = verbose
temp._create_unique_file()
@property
def name(temp):
return temp.fpath
def read(temp):
temp._check_open()
with open(temp.fpath, 'rb') as file_:
return file_.read()
def write(temp, data):
temp._check_open()
with open(temp.fpath, 'wb') as file_:
file_.write(data)
file_.flush()
def close(temp):
temp._check_open()
if temp.delete and exists(temp.fpath):
ut.delete(temp.fpath, verbose=temp.verbose)
temp._isclosed = True
def _create_unique_file(temp):
temp._check_open()
with lockfile.LockFile(join(temp.dpath, 'tempfile.lock')):
flag = True
while flag or exists(temp.fpath):
temp.fname = str(uuid.uuid4()) + '.temp'
temp.fpath = join(temp.dpath, temp.fname)
flag = False
ut.touch(temp.fpath, verbose=temp.verbose)
def _check_open(temp):
if temp._isclosed:
raise ValueError('I/O operation on closed object')
def __del__(temp):
if not temp._isclosed:
temp.close()
if pyflann is not None:
class PickleFLANN(pyflann.FLANN):
"""
Adds the ability to pickle a flann class on a unix system.
(Actually, pickle still wont work because we need the original point data.
But we can do a custom dumps and a loads)
CommandLine:
python -m wbia.algo.smk.pickle_flann PickleFLANN
Example:
>>> # ENABLE_DOCTEST
>>> from wbia.algo.smk.pickle_flann import * # NOQA
>>> import numpy as np
>>> rng = np.random.RandomState(42)
>>> data = rng.rand(10, 2)
>>> query = rng.rand(5, 2)
>>> flann = PickleFLANN()
>>> flann.build_index(data, random_seed=42)
>>> index_bytes = flann.dumps()
>>> flann2 = PickleFLANN()
>>> flann2.loads(index_bytes, data)
>>> assert flann2 is not flann
>>> assert flann2.dumps() == index_bytes
>>> idx1 = flann.nn_index(query)[0]
>>> idx2 = flann2.nn_index(query)[0]
>>> assert np.all(idx1 == idx2)
"""
def dumps(self):
"""
# Make a special wordflann pickle
http://www.linuxscrew.com/2010/03/24/fastest-way-to-create-ramdisk-in-ubuntulinux/
sudo mkdir /tmp/ramdisk; chmod 777 /tmp/ramdisk
sudo mount -t tmpfs -o size=256M tmpfs /tmp/ramdisk/
http://zeblog.co/?p=1588
"""
# import tempfile
# assert not ut.WIN32, 'Fix on WIN32. Cannot write to temp file'
# temp = tempfile.NamedTemporaryFile(delete=True)
temp = Win32CompatTempFile(delete=True, verbose=False)
try:
self.save_index(temp.name)
index_bytes = temp.read()
except Exception:
raise
finally:
temp.close()
return index_bytes
def loads(self, index_bytes, pts):
# import tempfile
# assert not ut.WIN32, 'Fix on WIN32. Cannot write to temp file'
# temp = tempfile.NamedTemporaryFile(delete=True)
temp = Win32CompatTempFile(delete=True, verbose=False)
try:
temp.write(index_bytes)
# temp.file.flush()
self.load_index(temp.name, pts)
except Exception:
raise
finally:
temp.close()
else:
PickleFLANN = None
|
<filename>tests/components/plugwise/conftest.py
"""Setup mocks for the Plugwise integration tests."""
import re
from unittest.mock import AsyncMock, Mock, patch
import jsonpickle
from plugwise.exceptions import (
ConnectionFailedError,
InvalidAuthentication,
PlugwiseException,
XMLDataMissingError,
)
import pytest
from tests.common import load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
def _read_json(environment, call):
"""Undecode the json data."""
fixture = load_fixture(f"plugwise/{environment}/{call}.json")
return jsonpickle.decode(fixture)
@pytest.fixture(name="mock_smile")
def mock_smile():
"""Create a Mock Smile for testing exceptions."""
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock:
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.return_value.connect.return_value = True
yield smile_mock.return_value
@pytest.fixture(name="mock_smile_unauth")
def mock_smile_unauth(aioclient_mock: AiohttpClientMocker) -> None:
"""Mock the Plugwise Smile unauthorized for Home Assistant."""
aioclient_mock.get(re.compile(".*"), status=401)
aioclient_mock.put(re.compile(".*"), status=401)
@pytest.fixture(name="mock_smile_error")
def mock_smile_error(aioclient_mock: AiohttpClientMocker) -> None:
"""Mock the Plugwise Smile server failure for Home Assistant."""
aioclient_mock.get(re.compile(".*"), status=500)
aioclient_mock.put(re.compile(".*"), status=500)
@pytest.fixture(name="mock_smile_notconnect")
def mock_smile_notconnect():
"""Mock the Plugwise Smile general connection failure for Home Assistant."""
with patch("homeassistant.components.plugwise.gateway.Smile") as smile_mock:
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.PlugwiseException = PlugwiseException
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=False)
yield smile_mock.return_value
@pytest.fixture(name="mock_smile_adam")
def mock_smile_adam():
"""Create a Mock Adam environment for testing exceptions."""
chosen_env = "adam_multiple_devices_per_zone"
with patch("homeassistant.components.plugwise.gateway.Smile") as smile_mock:
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.XMLDataMissingError = XMLDataMissingError
smile_mock.return_value.gateway_id = "fe799307f1624099878210aa0b9f1475"
smile_mock.return_value._active_device_present = False
smile_mock.return_value.smile_version = "3.0.15"
smile_mock.return_value.smile_type = "thermostat"
smile_mock.return_value.smile_hostname = "smile98765"
smile_mock.return_value.notifications = _read_json(chosen_env, "notifications")
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.async_update.side_effect = AsyncMock(
return_value=_read_json(chosen_env, "all_data")
)
smile_mock.return_value.single_master_thermostat.side_effect = Mock(
return_value=False
)
smile_mock.return_value.set_schedule_state.side_effect = AsyncMock(
return_value=True
)
smile_mock.return_value.set_preset.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.set_temperature.side_effect = AsyncMock(
return_value=True
)
smile_mock.return_value.set_switch_state.side_effect = AsyncMock(
return_value=True
)
yield smile_mock.return_value
@pytest.fixture(name="mock_smile_anna")
def mock_smile_anna():
"""Create a Mock Anna environment for testing exceptions."""
chosen_env = "anna_heatpump"
with patch("homeassistant.components.plugwise.gateway.Smile") as smile_mock:
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.XMLDataMissingError = XMLDataMissingError
smile_mock.return_value.gateway_id = "015ae9ea3f964e668e490fa39da3870b"
smile_mock.return_value._heater_id = "1cbf783bb11e4a7c8a6843dee3a86927"
smile_mock.return_value.smile_version = "4.0.15"
smile_mock.return_value.smile_type = "thermostat"
smile_mock.return_value.smile_hostname = "smile98765"
smile_mock.return_value.notifications = _read_json(chosen_env, "notifications")
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.async_update.side_effect = AsyncMock(
return_value=_read_json(chosen_env, "all_data")
)
smile_mock.return_value.single_master_thermostat.side_effect = Mock(
return_value=True
)
smile_mock.return_value.set_schedule_state.side_effect = AsyncMock(
return_value=True
)
smile_mock.return_value.set_preset.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.set_temperature.side_effect = AsyncMock(
return_value=True
)
smile_mock.return_value.set_switch_state.side_effect = AsyncMock(
return_value=True
)
yield smile_mock.return_value
@pytest.fixture(name="mock_smile_p1")
def mock_smile_p1():
"""Create a Mock P1 DSMR environment for testing exceptions."""
chosen_env = "p1v3_full_option"
with patch("homeassistant.components.plugwise.gateway.Smile") as smile_mock:
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.XMLDataMissingError = XMLDataMissingError
smile_mock.return_value.gateway_id = "e950c7d5e1ee407a858e2a8b5016c8b3"
smile_mock.return_value._heater_id = None
smile_mock.return_value.smile_version = "3.3.9"
smile_mock.return_value.smile_type = "power"
smile_mock.return_value.smile_hostname = "smile98765"
smile_mock.return_value.notifications = _read_json(chosen_env, "notifications")
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.async_update.side_effect = AsyncMock(
return_value=_read_json(chosen_env, "all_data")
)
smile_mock.return_value.single_master_thermostat.side_effect = Mock(
return_value=None
)
yield smile_mock.return_value
@pytest.fixture(name="mock_stretch")
def mock_stretch():
"""Create a Mock Stretch environment for testing exceptions."""
chosen_env = "stretch_v31"
with patch("homeassistant.components.plugwise.gateway.Smile") as smile_mock:
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.XMLDataMissingError = XMLDataMissingError
smile_mock.return_value.gateway_id = "259882df3c05415b99c2d962534ce820"
smile_mock.return_value._heater_id = None
smile_mock.return_value.smile_version = "3.1.11"
smile_mock.return_value.smile_type = "stretch"
smile_mock.return_value.smile_hostname = "stretch98765"
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.async_update.side_effect = AsyncMock(
return_value=_read_json(chosen_env, "all_data")
)
smile_mock.return_value.set_switch_state.side_effect = AsyncMock(
return_value=True
)
yield smile_mock.return_value
|
<filename>pyemma/coordinates/transform/transformer.py<gh_stars>0
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Free University
# Berlin, 14195 Berlin, Germany.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from pyemma.util.log import getLogger
from pyemma._base.progress import ProgressReporter
from itertools import count
import numpy as np
from math import ceil
from abc import ABCMeta, abstractmethod
from pyemma.util.exceptions import NotConvergedWarning
import six
from six.moves import range
__all__ = ['Transformer']
__author__ = 'noe, marscher'
class SkipPassException(Exception):
""" raise this to skip a pass during parametrization """
def __init__(self, next_pass_lagtime=0, next_pass_stride=1):
self.next_pass_lagtime = next_pass_lagtime
self.next_pass_stride = next_pass_stride
class TransformerIteratorContext(object):
def __init__(self, stride=1, lag=0):
self._lag = lag
self.__init_stride(stride)
def __init_stride(self, stride):
self._stride = stride
if isinstance(stride, np.ndarray):
keys = stride[:, 0]
self._trajectory_keys, self._trajectory_lengths = np.unique(keys, return_counts=True)
else:
self._trajectory_keys = None
self._uniform_stride = TransformerIteratorContext.is_uniform_stride(stride)
if not self.uniform_stride and not self.is_stride_sorted():
raise ValueError("Currently only sorted arrays allowed for random access")
def ra_indices_for_traj(self, traj):
"""
Gives the indices for a trajectory file index (without changing the order within the trajectory itself).
:param traj: a trajectory file index
:return: a Nx1 - np.array of the indices corresponding to the trajectory index
"""
assert not self.uniform_stride, "requested random access indices, but is in uniform stride mode"
return self._stride[self._stride[:, 0] == traj][:, 1] if traj in self.traj_keys else np.array([])
def ra_trajectory_length(self, traj):
assert not self.uniform_stride, "requested random access trajectory length, but is in uniform stride mode"
return int(self._trajectory_lengths[np.where(self.traj_keys == traj)]) if traj in self.traj_keys else 0
@property
def stride(self):
return self._stride
@stride.setter
def stride(self, value):
self.__init_stride(value)
@property
def lag(self):
return self._lag
@lag.setter
def lag(self, value):
self._lag = value
@property
def traj_keys(self):
return self._trajectory_keys
@property
def uniform_stride(self):
return self._uniform_stride
@staticmethod
def is_uniform_stride(stride):
return not isinstance(stride, np.ndarray)
def is_stride_sorted(self):
if not self.uniform_stride:
stride_traj_keys = self.stride[:, 0]
if not all(np.diff(stride_traj_keys) >= 0):
# traj keys were not sorted
return False
for idx in self.traj_keys:
if not all(np.diff(self.stride[stride_traj_keys == idx][:, 1]) >= 0):
# traj indices were not sorted
return False
return True
class TransformerIterator(object):
def __init__(self, transformer, stride=1, lag=0):
# reset transformer iteration
self._transformer = transformer
self._ctx = TransformerIteratorContext(stride=stride, lag=lag)
self._transformer._reset(self._ctx)
# for random access stride mode: skip the first empty trajectories
if not self._ctx.uniform_stride:
self._transformer._itraj = min(self._ctx.traj_keys)
def __iter__(self):
return self
def __next__(self):
if self._transformer._itraj >= self._transformer.number_of_trajectories():
raise StopIteration
last_itraj = self._transformer._itraj
if self._ctx.lag == 0:
X = self._transformer._next_chunk(self._ctx)
return (last_itraj, X)
else:
X, Y = self._transformer._next_chunk(self._ctx)
return (last_itraj, X, Y)
def next(self):
return self.__next__()
class Transformer(six.with_metaclass(ABCMeta, ProgressReporter)):
r""" Basis class for pipeline objects
Parameters
----------
chunksize : int (optional)
the chunksize used to batch process underlying data
"""
# count instances
_ids = count(0)
def __init__(self, chunksize=100):
self.chunksize = chunksize
self._in_memory = False
self._data_producer = None
self._parametrized = False
self._param_with_stride = 1
# allow children of this class to implement their own progressbar handling
self._custom_param_progress_handling = False
self.__create_logger()
@property
def data_producer(self):
r"""where the transformer obtains its data."""
return self._data_producer
@data_producer.setter
def data_producer(self, dp):
if dp is not self._data_producer:
self._logger.debug("reset (previous) parametrization state, since"
" data producer has been changed.")
self._parametrized = False
self._data_producer = dp
@property
def chunksize(self):
"""chunksize defines how much data is being processed at once."""
return self._chunksize
@chunksize.setter
def chunksize(self, size):
if not size >= 0:
raise ValueError("chunksize has to be positive")
self._chunksize = int(size)
def _n_chunks(self, stride=1):
""" rough estimate of how many chunks will be processed """
if self._chunksize != 0:
if not TransformerIteratorContext.is_uniform_stride(stride):
chunks = ceil(len(stride[:, 0]) / float(self._chunksize))
else:
chunks = sum([ceil(l / float(self._chunksize))
for l in self.trajectory_lengths(stride)])
else:
chunks = 1
return int(chunks)
def _close(self):
if self.data_producer is not self:
self.data_producer._close()
@property
def in_memory(self):
r"""are results stored in memory?"""
return self._in_memory
@in_memory.setter
def in_memory(self, op_in_mem):
r"""
If set to True, the output will be stored in memory.
"""
old_state = self._in_memory
if not old_state and op_in_mem:
self._in_memory = op_in_mem
self._Y = []
self._map_to_memory()
elif not op_in_mem and old_state:
self._clear_in_memory()
self._in_memory = op_in_mem
def _clear_in_memory(self):
if __debug__:
self._logger.debug("clear memory")
assert self.in_memory, "tried to delete in memory results which are not set"
self._Y = None
@abstractmethod
def dimension(self):
r""" Number of dimensions that should be used for the output of the transformer. """
pass
def __create_logger(self):
# note this is private, since it should only be called (once) from this class.
count = next(self._ids)
i = self.__module__.rfind(".")
j = self.__module__.find(".") + 1
package = self.__module__[j:i]
name = "%s.%s[%i]" % (package, self.__class__.__name__, count)
self._name = name
self._logger = getLogger(name)
def number_of_trajectories(self):
r"""
Returns the number of trajectories.
Returns
-------
int : number of trajectories
"""
return self.data_producer.number_of_trajectories()
@property
def ntraj(self):
__doc__ = self.number_of_trajectories.__doc__
return self.number_of_trajectories()
def trajectory_length(self, itraj, stride=1):
r"""
Returns the length of trajectory of the requested index.
Parameters
----------
itraj : int
trajectory index
stride : int
return value is the number of frames in the trajectory when
running through it with a step size of `stride`.
Returns
-------
int : length of trajectory
"""
return self.data_producer.trajectory_length(itraj, stride=stride)
def trajectory_lengths(self, stride=1):
r"""
Returns the length of each trajectory.
Parameters
----------
stride : int
return value is the number of frames of the trajectories when
running through them with a step size of `stride`.
Returns
-------
array(dtype=int) : containing length of each trajectory
"""
return self.data_producer.trajectory_lengths(stride=stride)
def n_frames_total(self, stride=1):
r"""
Returns total number of frames.
Parameters
----------
stride : int
return value is the number of frames in trajectories when
running through them with a step size of `stride`.
Returns
-------
int : n_frames_total
"""
return self.data_producer.n_frames_total(stride=stride)
@abstractmethod
def describe(self):
r""" Get a descriptive string representation of this class."""
pass
def output_type(self):
r""" By default transformers return single precision floats. """
return np.float32
def parametrize(self, stride=1):
r""" Parametrize this Transformer
"""
# check if ready
if self.data_producer is None:
raise RuntimeError('Called parametrize while data producer is not'
' yet set. Ensure "data_producer" attribute is set!')
# if stride is not equal to one and does not match to a previous call
# retrigger parametrization (but not for readers).
if stride != self._param_with_stride and self._data_producer is not self:
self._parametrized = False
self._param_with_stride = stride
if self._parametrized:
return
# init
return_value = self._param_init()
if return_value is not None:
if isinstance(return_value, tuple):
lag, stride = return_value
else:
lag = return_value
else:
lag = 0
# create iterator context
ctx = TransformerIteratorContext(stride, lag)
# feed data, until finished
add_data_finished = False
ipass = 0
if not self._custom_param_progress_handling:
# NOTE: this assumes this class implements a 1-pass algo
self._progress_register(self._n_chunks(stride), "parameterizing "
+ self.__class__.__name__, 0)
# parametrize
try:
while not add_data_finished:
first_chunk = True
self.data_producer._reset(ctx)
# iterate over trajectories
last_chunk = False
itraj = 0
if not ctx.uniform_stride:
# in random access mode skip leading trajectories which are not included
while itraj not in ctx.traj_keys and itraj < self.number_of_trajectories():
itraj += 1
while not last_chunk:
last_chunk_in_traj = False
t = 0
while not last_chunk_in_traj:
# iterate over times within trajectory
if ctx.lag == 0:
X = self.data_producer._next_chunk(ctx)
Y = None
else:
X, Y = self.data_producer._next_chunk(ctx)
L = np.shape(X)[0]
# last chunk in traj?
last_chunk_in_traj = (t + L >= self.trajectory_length(itraj, stride=ctx.stride))
# last chunk?
last_chunk = (
last_chunk_in_traj and itraj >= self.number_of_trajectories() - 1)
# pass chunks to algorithm and respect its return values
# and possible SkipPassException
try:
return_value = self._param_add_data(
X, itraj, t, first_chunk, last_chunk_in_traj,
last_chunk, ipass, Y=Y, stride=stride)
except SkipPassException as spe:
self._logger.debug("got skip pass exception."
" Skipping pass %i" % ipass)
# break the inner loops
last_chunk_in_traj = True
last_chunk = True
# set lag time for next pass
return_value = False, spe.next_pass_lagtime, spe.next_pass_stride
if not self._custom_param_progress_handling:
self._progress_update(1, 0)
if isinstance(return_value, tuple):
if len(return_value) == 2:
add_data_finished, ctx.lag = return_value
else:
add_data_finished, ctx.lag, ctx.stride = return_value
else:
add_data_finished = return_value
first_chunk = False
# increment time
t += L
# increment trajectory
itraj += 1
# skip missing trajectories in random access mode
if not ctx.uniform_stride:
while itraj not in ctx.traj_keys and itraj < self.number_of_trajectories():
itraj += 1
ipass += 1
except NotConvergedWarning:
self._logger.info("presumely finished parameterization.")
self._close()
# finish parametrization
if not self._custom_param_progress_handling:
self._progress_force_finish(0)
self._param_finish()
self._parametrized = True
# memory mode? Then map all results. Avoid recursion here, if parametrization
# is triggered from get_output
if self.in_memory and not self._mapping_to_mem_active:
self._map_to_memory()
def map(self, X):
r"""Maps the input data through the transformer to correspondingly shaped output data array/list.
Parameters
----------
X : ndarray(T, n) or list of ndarray(T_i, n)
The input data, where T is the number of time steps and n is the number of dimensions.
If a list is provided, the number of time steps is allowed to vary, but the number of dimensions are
required to be to be consistent.
required to be to be consistent.
Returns
-------
Y : ndarray(T, d) or list of ndarray(T_i, d)
The mapped data, where T is the number of time steps of the input data and d is the output dimension
of this transformer. If called with a list of trajectories, Y will also be a corresponding list of
trajectories
"""
if isinstance(X, np.ndarray):
if X.ndim == 2:
mapped = self._map_array(X)
return mapped
else:
raise TypeError('Input has the wrong shape: %s with %i'
' dimensions. Expecting a matrix (2 dimensions)'
% (str(X.shape, X.ndim)))
elif isinstance(X, (list, tuple)):
out = []
for x in X:
mapped = self._map_array(x)
out.append(mapped)
return out
else:
raise TypeError('Input has the wrong type: %s '
'. Either accepting numpy arrays of dimension 2 '
'or lists of such arrays' % (str(type(X))))
@abstractmethod
def _map_array(self, X):
r"""
Initializes the parametrization.
Parameters
----------
X : ndarray(T, n)
The input data, where T is the number of time steps and n is the number of dimensions.
Returns
-------
Y : ndarray(T, d)
The projected data, where T is the number of time steps of the input data and d is the output dimension
of this transformer.
"""
pass
def _param_init(self):
r"""
Initializes the parametrization.
"""
pass
def _param_finish(self):
r"""
Finalizes the parametrization.
"""
pass
@abstractmethod
def _param_add_data(self, *args, **kwargs):
r""" Adds data to parameterization """
pass
def _map_to_memory(self, stride=1):
r"""Maps results to memory. Will be stored in attribute :attr:`Y`."""
self._logger.debug("mapping to mem")
assert self._in_memory
self._mapping_to_mem_active = True
self._Y = self.get_output(stride=stride)
self._mapping_to_mem_active = False
def _reset(self, context=None):
r"""_reset data position"""
# TODO: children of this do not call parametrize nor reset their data_producers.
# check if this is an issue
if not self._parametrized:
self._logger.warning("reset(): not yet parametrized! Performing now.")
self.parametrize()
self._itraj = 0
self._t = 0
if not self.in_memory and self.data_producer is not self:
# operate in pipeline
self.data_producer._reset(context)
def _next_chunk(self, ctx):
r"""
Transforms next available chunk from either in memory data or internal
data_producer
Parameters
----------
lag : int
time delay of second data source.
Returns
-------
X, (Y if lag > 0) : array_like
mapped (transformed) data
"""
if self.in_memory and not self._mapping_to_mem_active:
if self._itraj >= self.number_of_trajectories():
return None
# operate in memory, implement iterator here
traj_len = self.trajectory_length(self._itraj, stride=ctx.stride)
traj = self._Y[self._itraj]
if ctx.lag == 0:
if not ctx.uniform_stride:
Y = traj[ctx.ra_indices_for_traj(self._itraj)[self._t:min(self._t + self.chunksize, traj_len)]]
self._t += self.chunksize
while (self._itraj not in ctx.traj_keys
or ctx.ra_indices_for_traj(self._itraj)[self._t:min(self._t + self.chunksize, traj_len)].size == 0) \
and self._itraj < self.number_of_trajectories():
self._itraj += 1
self._t = 0
else:
Y = traj[self._t:min(self._t + self.chunksize * ctx.stride, traj_len):ctx.stride]
# increment counters
self._t += self.chunksize * ctx.stride
if self._t >= traj_len:
self._itraj += 1
self._t = 0
return Y
else:
Y0 = traj[self._t:min(self._t + self.chunksize * ctx.stride, traj_len):ctx.stride]
Ytau = traj[self._t + ctx.lag * ctx.stride:min(self._t + (self.chunksize + ctx.lag) * ctx.stride, traj_len):ctx.stride]
# increment counters
self._t += self.chunksize * ctx.stride
if self._t >= traj_len:
self._itraj += 1
self._t = 0
return Y0, Ytau
else:
if not ctx.uniform_stride:
while self._itraj not in ctx.traj_keys and self._itraj < self.number_of_trajectories():
self._itraj += 1
self._t = 0
# operate in pipeline
if ctx.lag == 0:
X = self.data_producer._next_chunk(ctx)
self._t += X.shape[0]
if self._t >= self.trajectory_length(self._itraj, stride=ctx.stride):
self._itraj += 1
self._t = 0
return self.map(X)
# TODO: this seems to be a dead branch of code
else:
(X0, Xtau) = self.data_producer._next_chunk(ctx)
self._t += X0.shape[0]
if self._t >= self.trajectory_length(self._itraj, stride=ctx.stride):
self._itraj += 1
self._t = 0
return self.map(X0), self.map(Xtau)
def __iter__(self):
r"""
Returns an iterator that allows to access the transformed data.
Returns
-------
iterator : a :class:`pyemma.coordinates.transfrom.transformer.TransformerIterator` transformer iterator
a call to the .next() method of this iterator will return the pair
(itraj, X) : (int, ndarray(n, m))
where itraj corresponds to input sequence number (eg. trajectory index)
and X is the transformed data, n = chunksize or n < chunksize at end
of input.
"""
self._reset()
return TransformerIterator(self, stride=1, lag=0)
def iterator(self, stride=1, lag=0):
r"""
Returns an iterator that allows to access the transformed data.
Parameters
----------
stride : int
Only transform every N'th frame, default = 1
lag : int
Configure the iterator such that it will return time-lagged data
with a lag time of `lag`. If `lag` is used together with `stride`
the operation will work as if the striding operation is applied
before the time-lagged trajectory is shifted by `lag` steps.
Therefore the effective lag time will be stride*lag.
Returns
-------
iterator : a :class:`TransformerIterator <pyemma.coordinates.transform.transformer.TransformerIterator>`
If lag = 0, a call to the .next() method of this iterator will return
the pair
(itraj, X) : (int, ndarray(n, m)),
where itraj corresponds to input sequence number (eg. trajectory index)
and X is the transformed data, n = chunksize or n < chunksize at end
of input.
If lag > 0, a call to the .next() method of this iterator will return
the tuple
(itraj, X, Y) : (int, ndarray(n, m), ndarray(p, m))
where itraj and X are the same as above and Y contain the time-lagged
data.
"""
return TransformerIterator(self, stride=stride, lag=lag)
def get_output(self, dimensions=slice(0, None), stride=1):
r""" Maps all input data of this transformer and returns it as an array or list of arrays.
Parameters
----------
dimensions : list-like of indexes or slice
indices of dimensions you like to keep, default = all
stride : int
only take every n'th frame, default = 1
Returns
-------
output : ndarray(T, d) or list of ndarray(T_i, d)
the mapped data, where T is the number of time steps of the input data, or if stride > 1,
floor(T_in / stride). d is the output dimension of this transformer.
If the input consists of a list of trajectories, Y will also be a corresponding list of trajectories
Notes
-----
* This function may be RAM intensive if stride is too large or
too many dimensions are selected.
* if in_memory attribute is True, then results of this methods are cached.
Example
-------
plotting trajectories
>>> import pyemma.coordinates as coor # doctest: +SKIP
>>> import matplotlib.pyplot as plt # doctest: +SKIP
Fill with some actual data!
>>> tica = coor.tica() # doctest: +SKIP
>>> trajs = tica.get_output(dimensions=(0,), stride=100) # doctest: +SKIP
>>> for traj in trajs: # doctest: +SKIP
... plt.figure() # doctest: +SKIP
... plt.plot(traj[:, 0]) # doctest: +SKIP
"""
if isinstance(dimensions, int):
ndim = 1
dimensions = slice(dimensions, dimensions + 1)
elif isinstance(dimensions, list):
ndim = len(np.zeros(self.dimension())[dimensions])
elif isinstance(dimensions, np.ndarray):
assert dimensions.ndim == 1, 'dimension indices can\'t have more than one dimension'
ndim = len(np.zeros(self.dimension())[dimensions])
elif isinstance(dimensions, slice):
ndim = len(np.zeros(self.dimension())[dimensions])
else:
raise ValueError('unsupported type (%s) of \"dimensions\"' % type(dimensions))
assert ndim > 0, "ndim was zero in %s" % self.__class__.__name__
if not self._parametrized:
self._logger.warning("has to be parametrized before getting output!"
" Doing it now.")
self.parametrize(stride)
# if we are in memory and have results already computed, return them
if self._in_memory:
# ensure stride and dimensions are same of cached result
if self._Y and all(self._Y[i].shape == (self.trajectory_length(i, stride=stride), ndim)
for i in range(self.number_of_trajectories())):
return self._Y
# allocate memory
try:
trajs = [np.empty((l, ndim), dtype=self.output_type())
for l in self.trajectory_lengths(stride=stride)]
except MemoryError:
self._logger.exception("Could not allocate enough memory to map all data."
" Consider using a larger stride.")
return
if __debug__:
self._logger.debug("get_output(): dimensions=%s" % str(dimensions))
self._logger.debug("get_output(): created output trajs with shapes: %s"
% [x.shape for x in trajs])
# fetch data
last_itraj = -1
t = 0 # first time point
self._progress_register(self._n_chunks(stride), description=
'getting output of ' + self.__class__.__name__, stage=1)
for itraj, chunk in self.iterator(stride=stride):
if itraj != last_itraj:
last_itraj = itraj
t = 0 # reset time to 0 for new trajectory
L = chunk.shape[0]
if L > 0:
trajs[itraj][t:t + L, :] = chunk[:, dimensions]
t += L
# update progress
self._progress_update(1, stage=1)
if self._in_memory:
self._Y = trajs
return trajs
|
import asyncio
import dataclasses
import enum
import fnmatch
import re
import urllib.parse
from typing import Collection, FrozenSet, Iterable, Iterator, List, Mapping, \
MutableMapping, NewType, Optional, Pattern, Set, Union
# A namespace specification with globs, negations, and some minimal syntax; see `match_namespace()`.
# Regexps are also supported if pre-compiled from the code, not from the CLI options as raw strings.
NamespacePattern = Union[str, Pattern]
# A specific really existing addressable namespace (at least, the one assumed to be so).
# Made as a NewType for stricter type-checking to avoid collisions with patterns and other strings.
NamespaceName = NewType('NamespaceName', str)
# A namespace reference usable in the API calls. `None` means cluster-wide API calls.
Namespace = Optional[NamespaceName]
def select_specific_namespaces(patterns: Iterable[NamespacePattern]) -> Collection[NamespaceName]:
"""
Select the namespace specifications that can be used as direct namespaces.
It is used in a fallback scenario when the namespace observation is either
disabled or not possible due to restricted permission, while the normal
operation is still possible in the very specific configured namespaces.
"""
return {
NamespaceName(pattern)
for pattern in patterns
if isinstance(pattern, str) # excl. regexps & etc.
if not('!' in pattern or '*' in pattern or '?' in pattern or ',' in pattern)
}
def match_namespace(name: NamespaceName, pattern: NamespacePattern) -> bool:
"""
Check if the specific namespace matches a namespace specification.
Each individual namespace pattern is a string that follows some syntax:
* the pattern consists of comma-separated parts (spaces are ignored);
* each part is either an inclusive or an exclusive (negating) glob;
* each glob can have ``*`` and ``?`` placeholders for any or one symbols;
* the exclusive globs start with ``!``;
* if the the first glob is exclusive, then a preceding catch-all is implied.
A check of whether a namespace matches the individual pattern, is done by
iterating the pattern's globs left-to-right: the exclusive patterns exclude
it from the match; the first inclusive pattern does the initial match, while
the following inclusive patterns only re-match it if it was excluded before;
i.e., they do not do the full initial match.
For example, the pattern ``"myapp-*, !*-pr-*, *pr-123"``
will match ``myapp-test``, ``myapp-live``, even ``myapp-pr-123``,
but not ``myapp-pr-456`` and certainly not ``otherapp-pr-123``.
The latter one, despite it matches the last glob, is not included
because it was not matched by the initial pattern.
On the other hand, the pattern ``"!*-pr-*, *pr-123"``
(equivalent to ``"*, !*-pr-*, *pr-123"``) will match ``myapp-test``,
``myapp-live``, ``myapp-pr-123``, ``anyapp-anything``,
and even ``otherapp-pr-123`` -- though not ``myapp-pr-456``.
Unlike in the first example, the otherapp's namespace was included initially
by the first glob (the implied ``*``), and therefore could be re-matched
by the last glob ``*pr-123`` after being excluded by ``!*-pr-*``.
While these are theoretical capabilities of this pattern-matching algorithm,
it is not expected that they will be abused too much. The main intention is
to have simple one-glob patterns (either inclusive or exclusive),
only rarely followed by a single negation.
"""
# Regexps are powerful enough on their own -- we do not parse or interpret them.
if isinstance(pattern, re.Pattern):
return bool(pattern.fullmatch(name))
# The first pattern should be an inclusive one. Unless it is, prepend a catch-all pattern.
globs = [glob.strip() for glob in pattern.split(',')]
if not globs or globs[0].startswith('!'):
globs.insert(0, '*')
# Iterate and calculate: every inclusive pattern makes the namespace to match regardless,
# of the previous result; every exclusive pattern un-matches it if it was matched before.
matches = first_match = fnmatch.fnmatch(name, globs[0])
for glob in globs[1:]:
if glob.startswith('!'):
matches = matches and not fnmatch.fnmatch(name, glob.lstrip('!'))
else:
matches = matches or (first_match and fnmatch.fnmatch(name, glob))
return matches
# Detect conventional API versions for some cases: e.g. in "myresources.v1alpha1.example.com".
# Non-conventional versions are indistinguishable from API groups ("myresources.foo1.example.com").
# See also: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/
K8S_VERSION_PATTERN = re.compile(r'^v\d+(?:(?:alpha|beta)\d+)?$')
@dataclasses.dataclass(frozen=True, eq=False, repr=False)
class Resource:
"""
A reference to a very specific custom or built-in resource kind.
It is used to form the K8s API URLs. Generally, K8s API only needs
an API group, an API version, and a plural name of the resource.
All other names are remembered to match against resource selectors,
for logging, and for informational purposes.
"""
group: str
"""
The resource's API group; e.g. ``"kopf.dev"``, ``"apps"``, ``"batch"``.
For Core v1 API resources, an empty string: ``""``.
"""
version: str
"""
The resource's API version; e.g. ``"v1"``, ``"v1beta1"``, etc.
"""
plural: str
"""
The resource's plural name; e.g. ``"pods"``, ``"kopfexamples"``.
It is used as an API endpoint, together with API group & version.
"""
kind: Optional[str] = None
"""
The resource's kind (as in YAML files); e.g. ``"Pod"``, ``"KopfExample"``.
"""
singular: Optional[str] = None
"""
The resource's singular name; e.g. ``"pod"``, ``"kopfexample"``.
"""
shortcuts: FrozenSet[str] = frozenset()
"""
The resource's short names; e.g. ``{"po"}``, ``{"kex", "kexes"}``.
"""
categories: FrozenSet[str] = frozenset()
"""
The resource's categories, to which the resource belongs; e.g. ``{"all"}``.
"""
subresources: FrozenSet[str] = frozenset()
"""
The resource's subresources, if defined; e.g. ``{"status", "scale"}``.
"""
namespaced: Optional[bool] = None
"""
Whether the resource is namespaced (``True``) or cluster-scoped (``False``).
"""
preferred: bool = True # against conventions, but makes versionless selectors match by default.
"""
Whether the resource belong to a "preferred" API version.
Only "preferred" resources are served when the version is not specified.
"""
verbs: FrozenSet[str] = frozenset()
"""
All available verbs for the resource, as supported by K8s API;
e.g., ``{"list", "watch", "create", "update", "delete", "patch"}``.
Note that it is not the same as all verbs permitted by RBAC.
"""
def __hash__(self) -> int:
return hash((self.group, self.version, self.plural))
def __eq__(self, other: object) -> bool:
if isinstance(other, Resource):
self_tuple = (self.group, self.version, self.plural)
other_tuple = (other.group, other.version, other.plural)
return self_tuple == other_tuple
else:
return NotImplemented
def __repr__(self) -> str:
plural_main, *subs = self.plural.split('/')
name_text = f'{plural_main}.{self.version}.{self.group}'.strip('.')
subs_text = f'/{"/".join(subs)}' if subs else ''
return f'{name_text}{subs_text}'
# Mostly for tests, to be used as `@kopf.on.event(*resource, ...)`
def __iter__(self) -> Iterator[str]:
return iter((self.group, self.version, self.plural))
def get_url(
self,
*,
server: Optional[str] = None,
namespace: Namespace = None,
name: Optional[str] = None,
subresource: Optional[str] = None,
params: Optional[Mapping[str, str]] = None,
) -> str:
"""
Build a URL to be used with K8s API.
If the namespace is not set, a cluster-wide URL is returned.
For cluster-scoped resources, the namespace is ignored.
If the name is not set, the URL for the resource list is returned.
Otherwise (if set), the URL for the individual resource is returned.
If subresource is set, that subresource's URL is returned,
regardless of whether such a subresource is known or not.
Params go to the query parameters (``?param1=value1¶m2=value2...``).
"""
if subresource is not None and name is None:
raise ValueError("Subresources can be used only with specific resources by their name.")
if not self.namespaced and namespace is not None:
raise ValueError(f"Specific namespaces are not supported for cluster-scoped resources.")
if self.namespaced and namespace is None and name is not None:
raise ValueError("Specific namespaces are required for specific namespaced resources.")
parts: List[Optional[str]] = [
'/api' if self.group == '' and self.version == 'v1' else '/apis',
self.group,
self.version,
'namespaces' if self.namespaced and namespace is not None else None,
namespace if self.namespaced and namespace is not None else None,
self.plural,
name,
subresource,
]
query = urllib.parse.urlencode(params, encoding='utf-8') if params else ''
path = '/'.join([part for part in parts if part])
url = path + ('?' if query else '') + query
return url if server is None else server.rstrip('/') + '/' + url.lstrip('/')
class Marker(enum.Enum):
"""
A special marker to handle all resources possible, built-in and custom.
"""
EVERYTHING = enum.auto()
# An explicit catch-all marker for positional arguments of resource selectors.
EVERYTHING = Marker.EVERYTHING
@dataclasses.dataclass(frozen=True)
class Selector:
"""
A resource specification that can match several resource kinds.
The resource specifications are not usable in K8s API calls, as the API
has no endpoints with masks or placeholders for unknown or catch-all
resource identifying parts (e.g. any API group, any API version, any name).
They are used only locally in the operator to match against the actual
resources with specific names (:class:`Resource`). The handlers are
defined with resource specifications, but are invoked with specific
resource kinds. Even if those specifications look very concrete and allow
no variations, they still remain specifications.
"""
arg1: dataclasses.InitVar[Union[None, str, Marker]] = None
arg2: dataclasses.InitVar[Union[None, str, Marker]] = None
arg3: dataclasses.InitVar[Union[None, str, Marker]] = None
argN: dataclasses.InitVar[None] = None # a runtime guard against too many positional arguments
group: Optional[str] = None
version: Optional[str] = None
kind: Optional[str] = None
plural: Optional[str] = None
singular: Optional[str] = None
shortcut: Optional[str] = None
category: Optional[str] = None
any_name: Optional[Union[str, Marker]] = None
def __post_init__(
self,
arg1: Union[None, str, Marker],
arg2: Union[None, str, Marker],
arg3: Union[None, str, Marker],
argN: None, # a runtime guard against too many positional arguments
) -> None:
# Since the class is frozen & read-only, post-creation field adjustment is done via a hack.
# This is the same hack as used in the frozen dataclasses to initialise their fields.
if argN is not None:
raise TypeError("Too many positional arguments. Max 3 positional args are accepted.")
elif arg3 is not None:
object.__setattr__(self, 'group', arg1)
object.__setattr__(self, 'version', arg2)
object.__setattr__(self, 'any_name', arg3)
elif arg2 is not None and isinstance(arg1, str) and '/' in arg1:
object.__setattr__(self, 'group', arg1.rsplit('/', 1)[0])
object.__setattr__(self, 'version', arg1.rsplit('/')[-1])
object.__setattr__(self, 'any_name', arg2)
elif arg2 is not None and arg1 == 'v1':
object.__setattr__(self, 'group', '')
object.__setattr__(self, 'version', arg1)
object.__setattr__(self, 'any_name', arg2)
elif arg2 is not None:
object.__setattr__(self, 'group', arg1)
object.__setattr__(self, 'any_name', arg2)
elif arg1 is not None and isinstance(arg1, Marker):
object.__setattr__(self, 'any_name', arg1)
elif arg1 is not None and '.' in arg1 and K8S_VERSION_PATTERN.match(arg1.split('.')[1]):
if len(arg1.split('.')) >= 3:
object.__setattr__(self, 'group', arg1.split('.', 2)[2])
object.__setattr__(self, 'version', arg1.split('.')[1])
object.__setattr__(self, 'any_name', arg1.split('.')[0])
elif arg1 is not None and '.' in arg1:
object.__setattr__(self, 'group', arg1.split('.', 1)[1])
object.__setattr__(self, 'any_name', arg1.split('.')[0])
elif arg1 is not None:
object.__setattr__(self, 'any_name', arg1)
# Verify that explicit & interpreted arguments have produced an unambiguous specification.
names = [self.kind, self.plural, self.singular, self.shortcut, self.category, self.any_name]
clean = [name for name in names if name is not None]
if len(clean) > 1:
raise TypeError(f"Ambiguous resource specification with names {clean}")
if len(clean) < 1:
raise TypeError(f"Unspecific resource with no names.")
# For reasons unknown, the singular is empty for ALL builtin resources. This does not affect
# the checks unless defined as e.g. ``singular=""``, which would match ALL builtins at once.
# Thus we prohibit it until clarified why is it so, what does it mean, how to deal with it.
if any([name == '' for name in names]):
raise TypeError("Names must not be empty strings; either None or specific strings.")
def __repr__(self) -> str:
kwargs = {f.name: getattr(self, f.name) for f in dataclasses.fields(self)}
kwtext = ', '.join([f'{key!s}={val!r}' for key, val in kwargs.items() if val is not None])
clsname = self.__class__.__name__
return f'{clsname}({kwtext})'
@property
def is_specific(self) -> bool:
return (self.kind is not None or
self.shortcut is not None or
self.plural is not None or
self.singular is not None or
(self.any_name is not None and not isinstance(self.any_name, Marker)))
def check(self, resource: Resource) -> bool:
"""
Check if a specific resources matches this resource specification.
"""
# Core v1 events are excluded from EVERYTHING: they are implicitly produced during handling,
# and thus trigger unnecessary handling cycles (even for other resources, not for events).
return (
(self.group is None or self.group == resource.group) and
((self.version is None and resource.preferred) or self.version == resource.version) and
(self.kind is None or self.kind == resource.kind) and
(self.plural is None or self.plural == resource.plural) and
(self.singular is None or self.singular == resource.singular) and
(self.category is None or self.category in resource.categories) and
(self.shortcut is None or self.shortcut in resource.shortcuts) and
(self.any_name is None or
self.any_name == resource.kind or
self.any_name == resource.plural or
self.any_name == resource.singular or
self.any_name in resource.shortcuts or
(self.any_name is Marker.EVERYTHING and
not EVENTS.check(resource) and
not EVENTS_K8S.check(resource))))
def select(self, resources: Collection[Resource]) -> Collection[Resource]:
result = {resource for resource in resources if self.check(resource)}
# Core v1 API group's priority is hard-coded in K8s and kubectl. Do the same. For example:
# whenever "pods" is specified, and "pods.v1" & "pods.v1beta1.metrics.k8s.io" are found,
# implicitly give priority to "v1" and hide the existence of non-"v1" groups.
# But not if they are specified by categories! -- In that case, keep all resources as is.
if self.is_specific:
v1only = {resource for resource in result if resource.group == ''}
result = v1only or result
return result
# Some predefined API endpoints that we use in the framework itself (not exposed to the operators).
# Note: the CRDs are versionless: we do not look into its ``spec`` stanza, we only watch for
# the fact of changes, so the schema does not matter, any cluster-preferred API version would work.
# Note: the peering resources are either zalando.org/v1 or kopf.dev/v1; both cannot co-exist because
# they would share the names, so K8s will not let this. It is done for domain name transitioning.
CRDS = Selector('apiextensions.k8s.io', 'customresourcedefinitions')
EVENTS = Selector('v1', 'events')
EVENTS_K8S = Selector('events.k8s.io', 'events') # only for exclusion from EVERYTHING
NAMESPACES = Selector('v1', 'namespaces')
CLUSTER_PEERINGS = Selector('clusterkopfpeerings')
NAMESPACED_PEERINGS = Selector('kopfpeerings')
class Backbone(Mapping[Selector, Resource]):
"""
Actual resources used in the core (reactor & engines) of the framework.
Why? The codebase only refers to the resources by API group/version & names.
The actual resources can be different in different clusters, usually due
to different versions: e.g. "v1" vs. "v1beta1" for CRDs.
The actual backbone resources are detected in the initial cluster scanning
during the operator startup in :func:`resource_scanner`.
The backbone resources cannot be changed at runtime after they are found
for the first time -- since the core tasks are already started with those
resource definitions, and cannot be easily restarted.
This does not apply to the resources of the operator (not the framework!),
where the resources can be created, changed, and deleted at runtime easily.
"""
def __init__(self) -> None:
super().__init__()
self._items: MutableMapping[Selector, Resource] = {}
self._revised = asyncio.Condition()
self.selectors = [NAMESPACES, EVENTS, CRDS, CLUSTER_PEERINGS, NAMESPACED_PEERINGS]
def __len__(self) -> int:
return len(self._items)
def __iter__(self) -> Iterator[Selector]:
return iter(self._items)
def __getitem__(self, item: Selector) -> Resource:
return self._items[item]
async def fill(
self,
*,
resources: Iterable[Resource],
) -> None:
async with self._revised:
for resource in resources:
for spec in self.selectors:
if spec not in self._items:
if spec.check(resource):
self._items[spec] = resource
self._revised.notify_all()
async def wait_for(
self,
selector: Selector,
) -> Resource:
"""
Wait for the actual resource to be found in the cluster scanning.
The resources can be cached in-memory. Once the resource is retrieved,
it never changes in memory even if it changes in the cluster. This is
intentional -- to match with the nature of the cluster scanning,
which waits for the resources and then starts background jobs,
which are not easy to terminate without terminating the whole operator.
"""
async with self._revised:
await self._revised.wait_for(lambda: selector in self)
return self[selector]
@dataclasses.dataclass(frozen=True)
class Insights:
"""
Actual resources & namespaces served by the operator.
"""
namespaces: Set[Namespace] = dataclasses.field(default_factory=set)
resources: Set[Resource] = dataclasses.field(default_factory=set)
backbone: Backbone = dataclasses.field(default_factory=Backbone)
# Signalled when anything changes in the insights.
revised: asyncio.Condition = dataclasses.field(default_factory=asyncio.Condition)
# The flags that are set after the initial listing is finished. Not cleared afterwards.
ready_namespaces: asyncio.Event = dataclasses.field(default_factory=asyncio.Event)
ready_resources: asyncio.Event = dataclasses.field(default_factory=asyncio.Event)
|
<filename>utils/sampling_utils.py
import numpy as np
import tensorflow as tf
from scipy import ndimage, misc
from utils.projection_utils import *
def depth_transform_inv(t_depth, near, far):
return np.exp(t_depth * np.log(far + 1) - 1)
def depth_transform(depth, near, far):
return np.log(depth + 1) / np.log(far + 1)
def importance_sample_error(error_maps, N_samples):
# Sample frame
frame_pmf = np.sum(error_maps, axis=(1, 2))
image_index = sample_pdf(
np.arange(0, error_maps.shape[0]).astype(np.float32), frame_pmf, 1
)
image_index = int(np.squeeze(np.round(image_index)))
# Sample pixels
pixel_pmf = np.reshape(error_maps[image_index], [-1])
pixel_index = sample_pdf(
np.arange(0, pixel_pmf.shape[0]).astype(np.float32), pixel_pmf, N_samples
)
pixel_index = np.squeeze(np.round(pixel_index))
pixel_x = np.mod(pixel_index, error_maps.shape[-1])
pixel_y = (pixel_index // error_maps.shape[-1])
pixels = np.stack([pixel_y, pixel_x], axis=-1).astype(np.int32)
return image_index, pixels
def sample_pdf(bins, weights, N_samples, det=False, base_uncertainty=1e-5):
# Get pdf
weights += base_uncertainty # prevent nans
pdf = weights / tf.reduce_sum(weights, -1, keepdims=True)
cdf = tf.cumsum(pdf, -1)
cdf = tf.concat([tf.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples
if det:
u = tf.linspace(0., 1., N_samples)
u = tf.broadcast_to(u, list(cdf.shape[:-1]) + [N_samples])
else:
u = tf.random.uniform(list(cdf.shape[:-1]) + [N_samples])
# Invert CDF
inds = tf.searchsorted(cdf, u, side='right')
below = tf.maximum(0, inds-1)
above = tf.minimum(cdf.shape[-1]-1, inds)
inds_g = tf.stack([below, above], -1)
cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = tf.where(denom < 1e-5, tf.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
def coarse_samples_np(near, far, args):
min_depth = near
max_depth = far
# Generate samples
t_vals = np.linspace(0., 1., args.N_samples)
if not args.lindisp:
z_vals = min_depth * (1. - t_vals) + max_depth * (t_vals)
else:
z_vals = 1. / (1. / min_depth * (1. - t_vals) + 1. / max_depth * (t_vals))
return z_vals
def shadow_samples(near, far, args):
t_vals = tf.cast(tf.linspace(0., 1., args.N_shadow_samples), near.dtype)[None]
z_vals = near[..., None] * (1. - t_vals) + far[..., None] * (t_vals)
return z_vals
def coarse_samples_np(near, far, args):
min_depth = near
max_depth = far
# Generate samples
t_vals = np.linspace(0., 1., args.N_samples)
if not args.lindisp:
z_vals = min_depth * (1. - t_vals) + max_depth * (t_vals)
else:
z_vals = 1. / (1. / min_depth * (1. - t_vals) + 1. / max_depth * (t_vals))
return z_vals
def coarse_samples(near, far, args):
min_depth = near
max_depth = far
# Generate samples
t_vals = tf.cast(tf.linspace(0., 1., args.N_samples), near.dtype)
if not args.lindisp:
z_vals = min_depth * (1. - t_vals) + max_depth * (t_vals)
else:
z_vals = 1. / (1. / min_depth * (1. - t_vals) + 1. / max_depth * (t_vals))
return z_vals
def perturb_samples(z_vals):
mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = tf.concat([mids, z_vals[..., -1:]], -1)
lower = tf.concat([z_vals[..., :1], mids], -1)
t_rand = tf.random.uniform(z_vals.shape)
z_vals = lower + (upper - lower) * t_rand
return z_vals
def fine_samples(coarse_z_vals, near, far, weights, chunk_inputs, args):
if 'sampling_volume' in chunk_inputs \
and (chunk_inputs['sampling_volume'] is not None) \
and args.use_depth_sampling:
weights = tf.reshape(chunk_inputs['sampling_volume'], [-1, args.N_samples])
# PDF distances
coarse_z_vals_mid = .5 * (coarse_z_vals[..., 1:] + coarse_z_vals[..., :-1])
fine_z_vals = sample_pdf(
coarse_z_vals_mid, weights[..., 1:-1], args.N_importance,
det=(args.perturb == 0.), base_uncertainty=args.base_uncertainty
)
fine_z_vals = tf.stop_gradient(fine_z_vals)
# Fine sample points
return fine_z_vals
def shadow_fine_samples(coarse_z_vals, near, far, weights, args):
# PDF distances
coarse_z_vals_mid = .5 * (coarse_z_vals[..., 1:] + coarse_z_vals[..., :-1])
fine_z_vals = sample_pdf(
coarse_z_vals_mid, weights[..., 1:-1], args.N_shadow_samples,
det=(args.perturb == 0.), base_uncertainty=args.base_uncertainty
)
fine_z_vals = tf.stop_gradient(fine_z_vals)
# Fine sample points
return fine_z_vals
def repeat_int(x, num_repeats):
x = tf.tile(tf.expand_dims(x, axis=1), [1, num_repeats])
return tf.reshape(x, [-1])
def constant_weight(d, s):
return tf.ones_like(d)
def linear_weight(d, s):
return tf.math.maximum(1 - tf.abs(d), 0.)
def euclidean_weight(k):
def weight(diff_x, shift_x, diff_y, shift_y):
dist = tf.math.sqrt(diff_x * diff_x + diff_y * diff_y)
return tf.math.maximum(1.0 - dist / (np.sqrt(2) * k), 0.0)
return weight
def interpolate_image(
image,
pixels,
shifts_x=list(range(-3, 5)),
shifts_y=list(range(-3, 5)),
weight_fn=euclidean_weight(3)
):
batch_size, pixels_height, pixels_width, _ = pixels.shape
_, height, width, channels = image.shape
image_shape = [batch_size, pixels_height, pixels_width, channels]
shifts = [(sx, sy) for sx in shifts_x for sy in shifts_y]
# Unstack and reshape
pixels = tf.transpose(pixels, [0, 3, 1, 2])
pixels = tf.reshape(pixels, [batch_size, 2, -1])
x, y = tf.unstack(pixels, axis=1)
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
# Sample points
x0 = tf.cast(tf.floor(x), tf.int32)
y0 = tf.cast(tf.floor(y), tf.int32)
# Interpolate
b = repeat_int(tf.range(batch_size), pixels_height * pixels_width)
res = 0.
total_weight = 0.
for (sx, sy) in shifts:
px, py = (x0 + sx, y0 + sy)
diff_x, diff_y = (tf.abs(x - tf.cast(px, image.dtype)), tf.abs(y - tf.cast(py, image.dtype)))
ind = tf.stack([b, py, px], axis=1)
weight = weight_fn(diff_x, sx, diff_y, sy)[:, None]
res += tf.gather_nd(image, ind) * weight
total_weight += weight
res = tf.math.divide_no_nan(res, total_weight)
return tf.reshape(res, image_shape)
def splat_image(
image,
out_shape,
pixels,
shifts_x=list(range(-3, 5)),
shifts_y=list(range(-3, 5)),
weight_fn=euclidean_weight(3)
):
batch_size, pixels_height, pixels_width, _ = pixels.shape
_, height, width, channels = image.shape
image_shape = [batch_size, pixels_height, pixels_width, channels]
shifts = [(sx, sy) for sx in shifts_x for sy in shifts_y]
# Unstack and reshape
pixels = tf.transpose(pixels, [0, 3, 1, 2])
pixels = tf.reshape(pixels, [batch_size, 2, -1])
x, y = tf.unstack(pixels, axis=1)
x = tf.reshape(x, [-1])
y = tf.reshape(y, [-1])
# Sample points
x0 = tf.cast(tf.floor(x), tf.int32)
y0 = tf.cast(tf.floor(y), tf.int32)
# Scatter for all shifts
image = tf.reshape(image, [-1, channels])
b = repeat_int(tf.range(batch_size), pixels_height * pixels_width)
res = 0.0
for (sx, sy) in shifts:
px, py = x0 + sx, y0 + sy
diff_x, diff_y = tf.abs(x - tf.cast(px, image.dtype)), tf.abs(y - tf.cast(py, image.dtype))
# Pixel values
weight = weight_fn(diff_x, sx, diff_y, sy)[..., None]
ind = tf.stack([b, py, px], axis=1)
res += tf.scatter_nd(ind, image * weight, out_shape)
# Return
return tf.reshape(res, out_shape)
def project_points(points, P):
projected_points = transform_points(points, P)
projected_pixels = tf.math.divide_no_nan(
projected_points[..., :2],
projected_points[..., -1:]
)
projected_pixels = tf.where(
projected_points[..., -1:] < 0,
-tf.ones_like(projected_pixels),
projected_pixels
)
return tf.reshape(projected_pixels, points.shape[:-1] + (2,))
def depth_to_index_np(depth, near, far, args):
depth_inds = np.floor(((depth - near) / (far - near)) * args.N_samples)
depth_inds = np.clip(depth_inds, 0, args.N_samples - 1)
return int(depth_inds)
def depth_to_index(depth, near, far, args):
depth_inds = tf.math.floor(((depth - near) / (far - near)) * args.N_samples)
depth_inds = tf.clip_by_value(depth_inds, 0, args.N_samples - 1)
return tf.cast(depth_inds, tf.int32)
def normalize_volume_np(volume):
volume = volume / (np.sum(volume, axis=-1, keepdims=True))
return np.where(
np.math.isnan(volume),
np.zeros_like(volume),
volume
)
def normalize_volume(volume):
volume = volume / (tf.reduce_sum(volume, axis=-1, keepdims=True))
return tf.where(
tf.math.is_nan(volume),
tf.zeros_like(volume),
volume
)
def interp_sampling_volume(
volume_probs, volume_poses,
pose, H, W, K_inv, K_volume, near, far, args
):
rays_o, rays_d = get_rays_matrix(H, W, pose @ K_inv)
z_vals = coarse_samples(near, far, None, args)
pose = tf.cast(pose, tf.float32)
K_inv = tf.cast(K_inv, tf.float32)
K_volume = tf.cast(K_volume, tf.float32)
near = tf.cast(near, tf.float32)
far = tf.cast(far, tf.float32)
z_vals = tf.cast(z_vals, tf.float32)
rays_o = tf.cast(rays_o, tf.float32)
rays_d = tf.cast(rays_d, tf.float32)
# Ray-march volume
probs = [0.0 for z in z_vals]
for volume_prob, v_pose in zip(volume_probs, volume_poses):
v_pose = tf.cast(v_pose, tf.float32)
inv_v_pose = tf.linalg.inv(v_pose)
proj_matrix = K_volume @ inv_v_pose
for i, z in enumerate(z_vals):
points = rays_o + rays_d * z
pixels = project_points(
points, proj_matrix
)
cur_volume_prob = interpolate_image(
volume_prob[None],
pixels[None],
shifts_x=[0],
shifts_y=[0]
)
# Accumulate probability from sampling volume
t_points = transform_points(points, inv_v_pose)
look_up_z = t_points[..., -1]
depth_ind = tf.cast(tf.one_hot(depth_to_index(look_up_z, near, far, args), args.N_samples), tf.bool)
probs[i] += tf.reshape(cur_volume_prob[depth_ind[None]], cur_volume_prob.shape[:-1])
return tf.stack(probs, axis=-1)
def splat_sampling_volume(
depths, depth_poses, pose,
ray_gen_fn, project_fn,
H, W, near, far, args
):
pose = tf.cast(pose, tf.float32)
inv_pose = tf.linalg.inv(pose)
near = tf.cast(near, tf.float32)
far = tf.cast(far, tf.float32)
# Depth projection
probs = 0.0
for depth, d_pose in zip(depths, depth_poses):
# Pixels to splat to
rays_o, rays_d = ray_gen_fn(d_pose)
rays_o = tf.cast(rays_o, tf.float32)
rays_d = tf.cast(rays_d, tf.float32)
points = rays_o + rays_d * depth
pixels = project_fn(points, pose)
# Depth values to splat
t_points = transform_points(points, inv_pose)
z_vals = t_points[..., -1]
# Encode as probability volume
depth_inds = depth_to_index(z_vals, near, far, args)
depth_one_hot = tf.one_hot(depth_inds, args.N_samples)
# Splat probabilities
probs += splat_image(
depth_one_hot[None],
depth_one_hot[None].shape,
pixels[None],
shifts_x=[0],
shifts_y=[0]
)
return probs
def depth_to_sampling_volume(depth, near, far, args):
depth_inds = depth_to_index(depth, tf.cast(near, tf.float32), tf.cast(far, tf.float32), args)
sampling_volume = tf.one_hot(depth_inds, args.N_samples)
sampling_volume = ndimage.gaussian_filter(sampling_volume, sigma=[5.0, 5.0, 0.0])
sampling_volume = ndimage.gaussian_filter1d(sampling_volume, sigma=5.0, axis=-1)
sampling_volume = normalize_volume(sampling_volume)
return sampling_volume
def splat_sampling_volume_from_views(
train_sampling_volumes, train_sampling_depths,
all_poses, test_pose,
ray_gen_fn, project_fn,
H, W, near, far, render_kwargs, args
):
# Sampling volume
sampling_volume = 0.0
for img_i in range(train_sampling_volumes.shape[0]):
train_pose = all_poses[img_i]
train_depth = train_sampling_depths[img_i][..., None]
sampling_volume += splat_sampling_volume(
[train_depth], [train_pose], test_pose,
ray_gen_fn, project_fn,
H, W, near, far, args
)[0]
sampling_volume = ndimage.gaussian_filter(sampling_volume, sigma=[5.0, 5.0, 0.0])
sampling_volume = ndimage.gaussian_filter1d(sampling_volume, sigma=5.0)
sampling_volume = normalize_volume(sampling_volume)
return sampling_volume
def sample_on_unit_sphere(N):
shape = (N,3)
points = tf.random.normal(shape)
return tf.linalg.normalize(points, axis=-1) |
<filename>gbml/test/test_elasticity.py<gh_stars>1-10
#!/usr/bin/env python
# Test script for gbml elasticity (bulk and shear moduli) predictions
from gbml import elasticity
import unittest
from pymatgen import MPRester
mpr = MPRester()
import os
# Use a Mock query engine to return the data
class MockQE(object):
def __init__(self):
pass
_materials = {
'mp-10003': {u'energy_per_atom': -9.174497691666668,
u'is_hubbard': False,
u'material_id': u'mp-10003',
u'nsites': 12,
u'pretty_formula': u'Nb4CoSi',
u'volume': 194.5128160886403},
'mp-10010': {u'energy_per_atom': -6.30060916,
u'is_hubbard': False,
u'material_id': u'mp-10010',
u'nsites': 5,
u'pretty_formula': u'Al(CoSi)2',
u'volume': 61.957194678711375},
'mp-10015': {u'energy_per_atom': -8.66025992,
u'is_hubbard': False,
u'material_id': u'mp-10015',
u'nsites': 2,
u'pretty_formula': u'SiOs',
u'volume': 25.9156062823109},
'mp-10018': {u'energy_per_atom': -4.0931096,
u'is_hubbard': False,
u'material_id': u'mp-10018',
u'nsites': 1,
u'pretty_formula': u'Ac',
u'volume': 45.384619900972496},
'mp-10021': {u'energy_per_atom': -3.026048165,
u'is_hubbard': False,
u'material_id': u'mp-10021',
u'nsites': 2,
u'pretty_formula': u'Ga',
u'volume': 38.00766563190904},
'mp-19306': {u'energy_per_atom': -6.709619538571429,
u'is_hubbard': True,
u'material_id': u'mp-19306',
u'nsites': 14,
u'pretty_formula': u'Fe3O4',
u'volume': 155.34118212181002},
'mp-26': {u'energy_per_atom': -4.9257722625,
u'is_hubbard': False,
u'material_id': u'mp-26',
u'nsites': 4,
u'pretty_formula': u'La',
u'volume': 148.5978601715663}
}
def query(self, criteria, properties):
mid_list = criteria["task_id"]["$in"]
return [ self._materials.get(mid, None) for mid in mid_list ]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def test_predict_k_g():
(expected_k_value, expected_g_value, expected_caveat_str) = (175.30512291338607, 84.49987188140813, '')
mpID = "mp-10003"
(k_value, g_value, caveat_str) = elasticity.predict_k_g(mpID, query_engine=MockQE())
assert (k_value, g_value, caveat_str) == (expected_k_value, expected_g_value, expected_caveat_str)
def test_predict_k_g_list():
(expected_matid_list, expected_k_list, expected_g_list, expected_caveat_list) = (
['mp-10003', 'mp-10010', 'mp-10015', 'mp-10018', 'mp-10021', 'mp-19306', 'mp-26'],
[175.30512291338607, 168.01218642160669, 265.96469661453744,
45.15072359694464, 68.43138936905679, 136.86585554248228, 55.511505777303256],
[84.49987188140813, 92.92207342120894, 118.409731828977, 19.816609506500367, 30.473676331990507,
49.63871682171615, 24.379918816217213],
['', '', '',
'Predictions are likely less reliable for materials containing F-block elements.',
'',
'Predictions may be less reliable for materials with non-GGA runs.',
'Predictions are likely less reliable for materials containing F-block elements.'])
mpID_list = ['mp-10003', 'mp-10010', 'mp-10015', 'mp-10018', 'mp-10021', 'mp-19306', 'mp-26']
(matid_list, k_list, g_list, caveat_list) = elasticity.predict_k_g_list(mpID_list, query_engine=MockQE())
assert (matid_list, k_list, g_list, caveat_list) == (expected_matid_list, expected_k_list, expected_g_list, expected_caveat_list)
@unittest.skipIf(mpr.api_key is None, reason="API key not defined")
def test_predict_k_g_remote():
(expected_k_value, expected_g_value, expected_caveat_str) = (175.30512291338607, 84.49987188140813, '')
mpID = "mp-10003"
(k_value, g_value, caveat_str) = elasticity.predict_k_g(mpID, mpr.api_key)
assert (k_value, g_value, caveat_str) == (expected_k_value, expected_g_value, expected_caveat_str)
@unittest.skipIf(mpr.api_key is None, reason="API key not defined")
def test_predict_k_g_list_remote():
(expected_matid_list, expected_k_list, expected_g_list, expected_caveat_list) = (
['mp-10003', 'mp-10010', 'mp-10015', 'mp-10018', 'mp-10021', 'mp-19306', 'mp-26'],
[175.30512291338607, 168.01218642160669, 265.96469661453744,
45.15072359694464, 68.43138936905679, 136.86585554248228, 55.511505777303256],
[84.49987188140813, 92.92207342120894, 118.409731828977, 19.816609506500367, 30.473676331990507,
49.63871682171615, 24.379918816217213],
['', '', '',
'Predictions are likely less reliable for materials containing F-block elements.',
'',
'Predictions may be less reliable for materials with non-GGA runs.',
'Predictions are likely less reliable for materials containing F-block elements.'])
mpID_list = ['mp-10003', 'mp-10010', 'mp-10015', 'mp-10018', 'mp-10021', 'mp-19306', 'mp-26']
(matid_list, k_list, g_list, caveat_list) = elasticity.predict_k_g_list(mpID_list, mpr.api_key)
assert (matid_list, k_list, g_list, caveat_list) == (expected_matid_list, expected_k_list, expected_g_list, expected_caveat_list)
if __name__ == '__main__':
unittest.main()
|
<filename>code/test/apitest/tools/compare_pdf.py
#!/usr/bin/env python
# Copyright (c) 2005-2009 <NAME>
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
import os
import sys
import glob
import md5
import getopt
import re
import heapq
import copy
import fnmatch
import string
if sys.version_info[0]==2 and sys.version_info[1]<4:
from sets import Set as set
class RetCode:
def __init__(self):
self.c = 0
def set(self, val):
self.c = val
def get(self):
return self.c
def validate_dir(dirname):
if not os.path.isdir(dirname):
raise RuntimeError("Directory %s not found." % dirname)
dirname = os.path.abspath(dirname)
if not dirname[-1] in ['\\', '/']:
dirname += '/'
return dirname
def verify_file_list(dirname, filelist):
filelist = os.path.abspath(filelist)
if not os.path.isfile(filelist):
raise RuntimeError("Filelist '%s' not found." % filelist)
# round up all existing files
found_files = set()
for dirpath, dirnames, filenames in os.walk(dirname):
for fname in filenames:
if not fname.endswith('.log'):
found_files.add(os.path.abspath(os.path.join(dirpath, fname)))
assert filelist in found_files
found_files.remove(filelist)
# round up all listed files
listed_files = set()
for line in open(filelist):
line = line.strip()
if not line or line.startswith('#'): continue
fname = os.path.abspath(os.path.join(dirname, line))
listed_files.add(fname)
# on linux, ignore Windows only files
if 'linux' in sys.platform:
assert not fnmatch.filter(found_files, '*-windows-only.pdf')
listed_files = set([f for f in listed_files if not fnmatch.fnmatch(f, '*-windows-only.pdf')])
# analyze differences
diff = listed_files.difference(found_files)
if diff:
raise RuntimeError("Required file(s) missing:\n " + "\n ".join(diff))
diff = found_files.difference(listed_files)
if diff:
raise RuntimeError("Additional file(s) found:\n " + "\n ".join(diff))
def find_ranges(str1, str2):
if len(str1) != len(str2):
raise RuntimeError("Cannot diff, the data have different size.")
begin=-1 # -1..range not opened
ranges=[]
for i, (lhs,rhs) in enumerate(zip(str1,str2)):
if lhs != rhs:
if -1 == begin:
begin = i
else:
if -1 != begin:
ranges.append([begin, i])
begin = -1
if -1 != begin:
ranges.append([begin, i+1])
return ranges
def find_ranges_in_files(gooddir, fname):
stem = os.path.basename(fname)
goodfname = os.path.join(gooddir, stem)
for b, e in find_ranges(file(fname,'rb').read(),
file(goodfname, 'rb').read()):
print "%s,%s" % (b,e)
def read_range_file(rng_file):
ranges = []
if not os.path.isfile(rng_file):
return ranges
for line in file(rng_file):
line = line.strip()
if line:
pair = line.split(',')
ranges.append([int(pair[0]), int(pair[1])])
return ranges
def find_re_ranges(str1, str2, regexs):
rex = re.compile('|'.join(regexs), re.M)
matches1 = [m for m in rex.finditer(str1)]
matches2 = [m for m in rex.finditer(str2)]
return [[m1.start(), m1.end()] for m1, m2 in zip(matches1, matches2) if m1.span() == m2.span()]
def find_re_ranges_for_txt(txt, regexs):
rex = re.compile('|'.join(regexs), re.M)
# for i, m1 in enumerate(rex.finditer(txt)):
# print i, m1.group(0)
return [[m1.start(), m1.end()] for m1 in rex.finditer(txt)]
def strip_ranges(str_, ranges):
if not ranges:
return str_
flattened = [0] + [x for sublist in ranges for x in sublist] + [sys.maxint]
return ''.join([str_[flattened[i]:flattened[i+1]]
for i in range(0, len(flattened), 2)])
def merge_ranges(*ranges):
all_merged = []
for rng in ranges:
all_merged += rng
if not all_merged:
return []
all_merged.sort()
ranges=[all_merged[0]]
for rng in all_merged[1:]:
if rng[0] <= ranges[-1][1]: #?extends
if rng[1] > ranges[-1][1]:
ranges[-1][1] = rng[1]
else:
ranges.append(rng)
return ranges
def md5_on_ranges(str_, ranges):
m = md5.new()
m.update(strip_ranges(str_, ranges))
return m.digest()
def cmp_on_ranges(lhs, rhs, ranges):
if len(lhs) != len(rhs):
return False
# if md5_on_ranges(lhs, ranges) != md5_on_ranges(rhs, ranges):
# open('/tmp/lhs.txt', 'wb').write(strip_ranges(lhs,ranges))
# open('/tmp/rhs.txt', 'wb').write(strip_ranges(rhs,ranges))
return md5_on_ranges(lhs, ranges) == md5_on_ranges(rhs, ranges)
# this function was refactored to be able to compare files with
# different file size, but at the moment this is not possible The
# problem is that the offset to the object table is changed as well
# (not speaking about the offsets in the table)
class Xlator(dict):
def _make_regex(self):
return re.compile("|".join(map(re.escape, self.keys())))
def __call__(self, match):
return self[match.group(0)]
def xlat(self, text):
return self._make_regex().sub(self, text)
def compare_files(lhs, rhs, ranges=[]):
pdfstr = '(.+?(?<![^\\\]\\\))'
patterns = [\
# subset font prefixes
"/BaseFont/[A-Z]{6}\+",
"/FontName/[A-Z]{6}\+",
# creation date
"/CreationDate \($pdfstr\)",
# document id
"/ID\[<[a-f0-9]{32}> <[a-f0-9]{32}>\]",
# document producer
"Producer \($pdfstr\)"]
lhs_data = open(lhs).read()
rhs_data = open(rhs).read()
# make sure that ranges are not used, when file sizes differ
assert not ranges or (len(lhs_data) == len(rhs_data))
#string.Template supported since 2.4, using Xlator instead
#patterns = [string.Template(p).substitute(locals()) for p in patterns]
adict = {'$pdfstr': pdfstr }
xlat = Xlator(adict)
patterns = [xlat.xlat(p) for p in patterns]
def calc_md5(data):
m = md5.new()
m.update(data)
return m.digest()
def get_stripped_data(data):
rngs = find_re_ranges_for_txt(data, patterns)
rngs = merge_ranges(rngs, ranges)
return strip_ranges(data, rngs)
lhs_stripped = get_stripped_data(lhs_data)
rhs_stripped = get_stripped_data(rhs_data)
if len(lhs_stripped) != len(rhs_stripped):
return 'wrong lenght'
if calc_md5(lhs_stripped) != calc_md5(rhs_stripped):
return 'wrong md5 sum'
return ''
def verify(outdir, gooddir, files=None):
def err(msg):
print 'FAILED:', msg
ret_code.set(1)
ret_code = RetCode()
files = glob.glob(outdir + '/*.pdf')
files.sort()
for i, fname in enumerate(files):
fstem = os.path.basename(fname)
goodfname = os.path.join(gooddir, fstem)
if not os.path.isfile(goodfname):
continue
print '[%3d/%3d] %-50s' % (i+1, len(files), fstem),
ranges = read_range_file(goodfname + '.rng')
error = compare_files(fname, goodfname, ranges)
if error:
err(error)
else:
print 'ok'
# if os.path.getsize(fname) != os.path.getsize(goodfname):
# err('wrong length')
# else:
# ranges = read_range_file(goodfname + '.rng')
# if not compare_files(fname,goodfname,ranges):
# err('wrong md5 sum')
# else:
# print 'ok'
return ret_code.get()
def test():
assert [[1,2]] == find_ranges('123', '1a3')
assert [[1,2],[3,4]] == find_ranges('1234', '1a3b')
assert [[1,3],[4,5]] == find_ranges('12234', '1aa3b')
assert [] == find_ranges('12234', '12234')
rngs = find_ranges('12234', '1aa3b')
assert '13' == strip_ranges('12234', rngs)
assert cmp_on_ranges('12234', '1aa3b', rngs)
assert not cmp_on_ranges('1z2234', '1aa3bs', rngs)
assert [[1,4],[5,7]] == find_re_ranges('1aaf12u',
'1abz12g', ['a[a-z]+', '2[a-z]+'])
assert [[0,3], [4,11]] == merge_ranges([[1, 2], [4, 8]], [[0, 1], [2, 3], [5, 6], [8, 11]])
assert [[1,4]] == merge_ranges([[3,4]], find_re_ranges('aD12b',
'aD13b', ['D..']))
assert [[1,4]] == merge_ranges([[1,4]], find_re_ranges('aABCa',
'aDEFa', ['[A-Z]+']))
helpik="""compare_pdfs.py [-l] outdir goodir
compare_pdfs.py -r file gooddir
-r prints byte offsets where files differ
-l consult file list (.outfiles in outdir) and report errors
"""
#############################################################
if __name__ == "__main__":
# test()
# sys.exit(0)
cfg = dict(ranges=None,
verifylist=None,
outdir=None,
gooddir=None)
try:
optlist, args = getopt.getopt(sys.argv[1:], "o:g:r:l", ['out-dir=',
'good-dir=',
'find-ranges=',
'verify-list'])
outdir = args
except RuntimeError, exc:
print exc
sys.exit(2)
try:
for opt, val in optlist:
if opt in ['-r', '--find-ranges']:
cfg['ranges']=val
#
elif opt in ['-l', '--verify-list']:
cfg['verifylist']=True
elif opt in ['-o', '--out-dir']:
cfg['outdir']=val
elif opt in ['-g', '--good-dir']:
cfg['gooddir']=val
gooddir = validate_dir(cfg['gooddir'])
if cfg['ranges']:
retcode = find_ranges_in_files(gooddir, cfg['ranges'])
else:
retcode = None
outdir = validate_dir(cfg['outdir'])
if cfg['verifylist']:
retcode = verify_file_list(outdir, os.path.join(outdir,'.outfiles'))
if retcode == None:
retcode = verify(outdir, gooddir)
except RuntimeError, exc:
print exc
retcode = 1
if retcode:
sys.exit(retcode)
|
import pygame
import time
import sudoku_game
pygame.font.init()
class Grid:
"""
This class is all about developing the UI of the
Sudoku game and updating the board and giving
solved sudoku when asked
"""
board = sudoku_game.generate("easy")
def __init__(self, rows, cols, width, height, win):
"""This function initializes all the required parameters
for the sudoku game
params : rows [int], cols [int], width [int], height [int], win [int]
"""
self.rows = rows
self.cols = cols
self.cubes = [
[Cube(self.board[i][j], i, j, width, height) for j in range(cols)]
for i in range(rows)
]
self.width = width
self.height = height
self.model = None
self.update_model()
self.selected = None
self.win = win
def update_model(self):
"""Update values of all cubes"""
self.model = [
[self.cubes[i][j].value for j in range(self.cols)]
for i in range(self.rows)
]
def place(self, val):
"""Updates the value of particular cell
params : val : int
"""
row, col = self.selected
if self.cubes[row][col].value == 0:
self.cubes[row][col].set(val)
self.update_model()
if valid(self.model, val, (row, col)):
return True
else:
self.cubes[row][col].set(0)
self.cubes[row][col].set_temp(0)
self.update_model()
return False
def sketch(self, val):
"""Displays temporary value entered by the user on cell
params : val : int
"""
row, col = self.selected
self.cubes[row][col].set_temp(val)
def draw(self):
"""Draws entire board with grid lines and cell values"""
# Draw Grid Lines
gap = self.width / 9
for i in range(self.rows + 1):
if i % 3 == 0 and i != 0:
thick = 4
else:
thick = 1
pygame.draw.line(
self.win, (0, 0, 0), (0, i * gap), (self.width, i * gap), thick
)
pygame.draw.line(
self.win,
(0, 0, 0),
(i * gap, 0),
(i * gap, self.height),
thick,
)
# Draw Cubes
for i in range(self.rows):
for j in range(self.cols):
self.cubes[i][j].draw(self.win)
def select(self, row, col):
"""Marks the current selected cell by user"""
# Reset all other
for i in range(self.rows):
for j in range(self.cols):
self.cubes[i][j].selected = False
self.cubes[row][col].selected = True
self.selected = (row, col)
def clear(self):
"""Clears the selected cell's temporary value"""
row, col = self.selected
if self.cubes[row][col].value == 0:
self.cubes[row][col].set_temp(0)
def click(self, pos):
"""
param : pos
return : (row, col) on the board
"""
if pos[0] < self.width and pos[1] < self.height:
gap = self.width / 9
x = pos[0] // gap
y = pos[1] // gap
return (int(y), int(x))
else:
return None
def is_finished(self):
"""
This function check whether the user has filled all the
blank cubes or not.
returns : boolean value
"""
for i in range(self.rows):
for j in range(self.cols):
if self.cubes[i][j].value == 0:
return False
return True
def update_grid(self, new_board):
"""
This function updates the values of
the entire board
params : new_board [list]
"""
for row in range(9):
for col in range(9):
self.model[row][col] = new_board[row][col]
self.cubes[row][col].set(new_board[row][col])
self.cubes[row][col].draw_change(self.win, True)
self.board[row][col] = new_board[row][col]
self.update_model()
pygame.display.update()
pygame.time.delay(5)
class Cube:
"""
This class is all about developing different 81 cubes of
sudoku game.
"""
rows = 9
cols = 9
def __init__(self, value, row, col, width, height):
"""Initializes the cube"""
self.value = value
self.temp = 0
self.row = row
self.col = col
self.width = width
self.height = height
self.selected = False
def draw(self, win):
"""
This function draws the cubes in the given space.
params : win (the space of the entire board) [list]
"""
fnt = pygame.font.SysFont("comicsans", 40)
gap = self.width / 9
x = self.col * gap
y = self.row * gap
if self.temp != 0 and self.value == 0:
text = fnt.render(str(self.temp), 1, (128, 128, 128))
win.blit(text, (x + 5, y + 5))
elif not (self.value == 0):
text = fnt.render(str(self.value), 1, (0, 0, 0))
win.blit(
text,
(
x + (gap / 2 - text.get_width() / 2),
y + (gap / 2 - text.get_height() / 2),
),
)
if self.selected:
pygame.draw.rect(win, (255, 0, 0), (x, y, gap, gap), 3)
def draw_change(self, win, by_user=True):
"""Draws changed value of the cell
params : win : window
by_user : boolean (False : value entered by user)
"""
fnt = pygame.font.SysFont("comicsans", 40)
gap = self.width / 9
x = self.col * gap
y = self.row * gap
pygame.draw.rect(win, (255, 255, 255), (x, y, gap, gap), 0)
text = fnt.render(str(self.value), 1, (0, 0, 0))
win.blit(
text,
(
x + (gap / 2 - text.get_width() / 2),
y + (gap / 2 - text.get_height() / 2),
),
)
if by_user:
pygame.draw.rect(win, (0, 255, 0), (x, y, gap, gap), 3)
else:
pygame.draw.rect(win, (255, 0, 0), (x, y, gap, gap), 3)
def set(self, val):
"""Sets the real value of cube
params : val : int
"""
self.value = val
self.temp = 0
def set_temp(self, val):
"""Sets the temporary value of cube
params : val : int
"""
self.temp = val
def find_empty(board):
"""Finds empty cell
params : board : list (sudoku 9x9 board)
"""
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
return (i, j) # row, col
return None
def valid(board, val, pos):
"""Checks if value is valid to be entered in the cell or not
params : val : int
pos : tuple (cell cordinates)
returns : boolean
"""
# Check row
for i in range(len(board[0])):
if board[pos[0]][i] == val and pos[1] != i:
return False
# Check column
for i in range(len(board)):
if board[i][pos[1]] == val and pos[0] != i:
return False
# Check box
box_x = pos[1] // 3
box_y = pos[0] // 3
for i in range(box_y * 3, box_y * 3 + 3):
for j in range(box_x * 3, box_x * 3 + 3):
if board[i][j] == val and (i, j) != pos:
return False
return True
def redraw_window(win, board, time, strikes):
"""
This function draws various buttons, and displays time on
the sudoku window
params : win : window
board : list (9x9 grid)
time : time elapsed
strikes : int (1:)
"""
win.fill((255, 255, 255))
pygame.draw.rect(win, (210, 255, 255), (0, 0, 560, 540), 0)
# Draw time
fnt = pygame.font.SysFont("comicsans", 40)
text = fnt.render(format_time(time), 1, (0, 0, 0))
win.blit(text, (375, 563))
# Draw Easy Button
text = fnt.render("Easy", 1, (0, 0, 0))
pygame.draw.rect(win, (150, 150, 0), [20, 560, 80, 33], 0)
win.blit(text, (30, 565))
# Draw border for Easy Button
pygame.draw.rect(win, (0, 0, 0), [20, 560, 80, 33], 4)
# Draw Hard Button
text = fnt.render("Hard", 1, (0, 0, 0))
pygame.draw.rect(win, (150, 0, 0), [120, 560, 80, 33], 0)
win.blit(text, (130, 565))
# Draw border for Hard Button
pygame.draw.rect(win, (0, 0, 0), [120, 560, 80, 33], 4)
# Draw Solve Button
text = fnt.render("Solve", 1, (0, 0, 0))
pygame.draw.rect(win, (0, 150, 0), [220, 560, 90, 33], 0)
win.blit(text, (230, 565))
# Draw border for Solve Button
pygame.draw.rect(win, (0, 0, 0), [220, 560, 90, 33], 4)
# Draw Strikes
if strikes == 1:
img = pygame.image.load("images/red_cross.png")
img = pygame.transform.scale(img, (40, 40))
win.blit(img, (330, 555))
elif strikes == 2:
img = pygame.image.load("images/green_tick.jpg")
img = pygame.transform.scale(img, (40, 40))
win.blit(img, (330, 555))
# Draw grid and board
board.draw()
def format_time(secs):
"""
This function is all about displaying time on the window.
params : sec [int]
returns : string
"""
if type(secs) == str:
return secs
sec = secs % 60
minute = secs // 60
hour = minute // 60
mat = "Time: " + str(hour) + ":" + str(minute) + ":" + str(sec)
return mat
def main():
"""Draws main window and handle events generated
from user interactions
"""
win = pygame.display.set_mode((540, 600))
pygame.display.set_caption("Sudoku")
board = Grid(9, 9, 540, 540, win)
key = None
run = True
start = time.time()
strikes = 0
while run:
if start != "stop":
play_time = round(time.time() - start)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
key = 1
if event.key == pygame.K_2:
key = 2
if event.key == pygame.K_3:
key = 3
if event.key == pygame.K_4:
key = 4
if event.key == pygame.K_5:
key = 5
if event.key == pygame.K_6:
key = 6
if event.key == pygame.K_7:
key = 7
if event.key == pygame.K_8:
key = 8
if event.key == pygame.K_9:
key = 9
if event.key == pygame.K_KP1:
key = 1
if event.key == pygame.K_KP2:
key = 2
if event.key == pygame.K_KP3:
key = 3
if event.key == pygame.K_KP4:
key = 4
if event.key == pygame.K_KP5:
key = 5
if event.key == pygame.K_KP6:
key = 6
if event.key == pygame.K_KP7:
key = 7
if event.key == pygame.K_KP8:
key = 8
if event.key == pygame.K_KP9:
key = 9
if event.key == pygame.K_DELETE:
board.clear()
key = None
if event.key == pygame.K_SPACE:
board.solve_gui()
if event.key == pygame.K_RETURN:
i, j = board.selected
if board.cubes[i][j].temp != 0:
if board.place(board.cubes[i][j].temp):
print("Success")
strikes = 2
else:
print("Wrong")
strikes = 1
key = None
if board.is_finished():
strikes = 0
print("Game over")
play_time = "You won!!!"
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
clicked = board.click(pos)
# Easy Button Clicked
if 20 <= pos[0] <= 100 and 560 <= pos[1] <= 560 + 33:
new_board = sudoku_game.generate("easy")
board.update_grid(new_board)
start = time.time()
strikes = 0
# Hard Button Clicked
elif 120 <= pos[0] <= 200 and 560 <= pos[1] <= 560 + 33:
new_board = sudoku_game.generate("hard")
board.update_grid(new_board)
strikes = 0
start = time.time()
# Solve Button Clicked
elif 220 <= pos[0] <= 310 and 560 <= pos[1] <= 560 + 33:
solved_board = sudoku_game.solve(board.board)
board.update_grid(solved_board)
strikes = 0
play_time = "Solved!!!"
start = "stop"
elif clicked:
board.select(clicked[0], clicked[1])
key = None
if board.selected and key is not None:
board.sketch(key)
redraw_window(win, board, play_time, strikes)
pygame.display.update()
if __name__ == "__main__":
main()
pygame.quit()
|
<filename>housekeeper/cli/add.py
"""Module for adding via CLI"""
import datetime as dt
import logging
from logging import Logger
from pathlib import Path
from typing import List
import click
from housekeeper.date import get_date
from housekeeper.files import load_json, validate_input
LOG: Logger = logging.getLogger(__name__)
def validate_args(arg: str, json: str, arg_name: str) -> None:
"""Check if input is valid
One of the arguments has to be specified. Both arguments are not allowed
"""
if not (arg or json):
LOG.error("Please input json or %s", arg_name)
raise click.Abort
if arg and json:
LOG.warning("Can not input both json and %s", arg_name)
raise click.Abort
@click.group()
def add():
"""Add things to the store."""
@add.command("bundle")
@click.argument("bundle_name", required=False)
@click.option("-j", "--json", help="Input json string")
@click.pass_context
def bundle_cmd(context: click.Context, bundle_name: str, json: str):
"""Add a new bundle."""
LOG.info("Running add bundle")
store = context.obj["store"]
validate_args(arg=bundle_name, json=json, arg_name="bundle_name")
data = {}
data["name"] = bundle_name
data["created_at"] = str(dt.datetime.now())
# This is to preserve the behaviour of adding a bundle without providing all information
if json:
data = load_json(json)
bundle_name = data["name"]
if store.bundle(bundle_name):
LOG.warning("bundle name %s already exists", bundle_name)
raise click.Abort
validate_input(data, input_type="bundle")
data["created_at"] = get_date(data.get("created_at"))
if "files" not in data:
data["files"] = []
try:
new_bundle, new_version = store.add_bundle(data)
except FileNotFoundError as err:
LOG.warning("File %s does not exist", err)
raise click.Abort
store.add_commit(new_bundle)
new_version.bundle = new_bundle
store.add_commit(new_version)
LOG.info("new bundle added: %s (%s)", new_bundle.name, new_bundle.id)
@add.command("file")
@click.option("-t", "--tag", "tags", multiple=True, help="tag to associate the file by")
@click.option("-b", "--bundle-name", help="name of bundle that file should be added to")
@click.option("-j", "--json", help="json formated input")
@click.argument("path", required=False)
@click.pass_context
def file_cmd(context: click.Context, tags: List[str], bundle_name: str, json: str, path: str):
"""Add a file to the latest version of a bundle."""
LOG.info("Running add file")
store = context.obj["store"]
validate_args(arg=path, json=json, arg_name="path")
data = {}
if json:
data = load_json(json)
validate_input(data, input_type="file")
file_path = Path(data.get("path", path))
if not file_path.exists():
LOG.warning("File: %s does not exist", file_path)
raise click.Abort
bundle_name = data.get("bundle", bundle_name)
bundle_obj = store.bundle(bundle_name)
if bundle_obj is None:
LOG.warning("unknown bundle: %s", bundle_name)
raise click.Abort
tags = data.get("tags", tags)
new_file = store.add_file(file_path=file_path, bundle=bundle_obj, tags=tags)
store.add_commit(new_file)
LOG.info("new file added: %s (%s)", new_file.path, new_file.id)
@add.command("version")
@click.option("-j", "--json", help="Input in json format")
@click.option("--created-at", help="Date when created")
@click.argument("bundle_name", required=False)
@click.pass_context
def version_cmd(context: click.Context, bundle_name: str, created_at: str, json: str):
"""Add a new version to a bundle."""
LOG.info("Running add version")
store = context.obj["store"]
validate_args(arg=bundle_name, json=json, arg_name="bundle_name")
data = {}
data["bundle_name"] = bundle_name
data["created_at"] = created_at
if json:
data = load_json(json)
bundle_name = data["bundle_name"]
data["created_at"] = data.get("created_at") or str(dt.datetime.now())
validate_input(data, input_type="version")
bundle_obj = store.bundle(bundle_name)
if bundle_obj is None:
LOG.warning("unknown bundle: %s", bundle_name)
raise click.Abort
data["created_at"] = get_date(data.get("created_at"))
new_version = store.add_version(data, bundle_obj)
if not new_version:
LOG.warning("Seems like version already exists for the bundle")
raise click.Abort
store.add_commit(new_version)
LOG.info("new version (%s) added to bundle %s", new_version.id, bundle_obj.name)
@add.command("tag")
@click.argument("tags", nargs=-1)
@click.option("-f", "--file-id", type=int)
@click.pass_context
def tag_cmd(context: click.Context, tags: List[str], file_id: int):
"""Add tags to housekeeper. Use `--file-id` to add tags to existing file"""
LOG.info("Running add tag")
store = context.obj["store"]
file_obj = None
if len(tags) == 0:
LOG.warning("No tags provided")
raise click.Abort
if file_id:
file_obj = store.file_(file_id)
if not file_obj:
LOG.warning("unable to find file with id %s", file_id)
raise click.Abort
for tag_name in tags:
tag_obj = store.tag(tag_name)
if not tag_obj:
LOG.info("%s: tag created", tag_name)
tag_obj = store.new_tag(tag_name)
store.add_commit(tag_obj)
if not file_obj:
continue
if tag_obj in file_obj.tags:
LOG.info("%s: tag already added", tag_name)
continue
file_obj.tags.append(tag_obj)
store.commit()
if not file_obj:
return
all_tags = (tag.name for tag in file_obj.tags)
LOG.info("file tags: %s", ", ".join(all_tags))
|
<filename>jira_agile_metrics/calculators/debt_test.py
import datetime
import pytest
from pandas import Timedelta, Timestamp, NaT
from ..conftest import (
FauxJIRA as JIRA,
FauxIssue as Issue,
FauxFieldValue as Value
)
from ..utils import extend_dict
from ..querymanager import QueryManager
from .debt import DebtCalculator
@pytest.fixture
def fields(minimal_fields):
return minimal_fields + [
{'id': 'priority', 'name': 'Priority'},
]
@pytest.fixture
def settings(minimal_settings):
return extend_dict(minimal_settings, {
'debt_query': 'issueType = "Tech Debt"',
'debt_priority_field': 'Priority',
'debt_priority_values': ['Low', 'Medium', 'High'],
'debt_chart': 'debt-chart.png',
'debt_chart_title': 'Debt chart',
'debt_window': 3,
'debt_age_chart': 'debt-age-chart.png',
'debt_age_chart_title': 'Debt age',
'debt_age_chart_bins': [10, 20, 30]
})
@pytest.fixture
def jira(fields):
return JIRA(fields=fields, issues=[
Issue("D-1",
summary="Debt 1",
issuetype=Value("Tech Debt", "Tech Debt"),
status=Value("Closed", "closed"),
created="2018-01-01 01:01:01",
resolution="Done",
resolutiondate="2018-03-20 02:02:02",
priority=Value("High", "High"),
changes=[],
),
Issue("D-2",
summary="Debt 2",
issuetype=Value("Tech Debt", "Tech Debt"),
status=Value("Closed", "closed"),
created="2018-01-02 01:01:01",
resolution="Done",
resolutiondate="2018-01-20 02:02:02",
priority=Value("Medium", "Medium"),
changes=[],
),
Issue("D-3",
summary="Debt 3",
issuetype=Value("Tech Debt", "Tech Debt"),
status=Value("Closed", "closed"),
created="2018-02-03 01:01:01",
resolution="Done",
resolutiondate="2018-03-20 02:02:02",
priority=Value("High", "High"),
changes=[],
),
Issue("D-4",
summary="Debt 4",
issuetype=Value("Tech Debt", "Tech Debt"),
status=Value("Closed", "closed"),
created="2018-01-04 01:01:01",
resolution=None,
resolutiondate=None,
priority=Value("Medium", "Medium"),
changes=[],
),
Issue("D-5",
summary="Debt 5",
issuetype=Value("Tech Debt", "Tech Debt"),
status=Value("Closed", "closed"),
created="2018-02-05 01:01:01",
resolution="Done",
resolutiondate="2018-02-20 02:02:02",
priority=Value("High", "High"),
changes=[],
),
Issue("D-6",
summary="Debt 6",
issuetype=Value("Tech Debt", "Tech Debt"),
status=Value("Closed", "closed"),
created="2018-03-06 01:01:01",
resolution=None,
resolutiondate=None,
priority=Value("Medium", "Medium"),
changes=[],
),
])
def test_no_query(jira, settings):
query_manager = QueryManager(jira, settings)
results = {}
settings = extend_dict(settings, {
'debt_query': None
})
calculator = DebtCalculator(query_manager, settings, results)
data = calculator.run()
assert data is None
def test_columns(jira, settings):
query_manager = QueryManager(jira, settings)
results = {}
calculator = DebtCalculator(query_manager, settings, results)
data = calculator.run()
assert list(data.columns) == ['key', 'priority', 'created', 'resolved', 'age']
def test_empty(fields, settings):
jira = JIRA(fields=fields, issues=[])
query_manager = QueryManager(jira, settings)
results = {}
calculator = DebtCalculator(query_manager, settings, results)
data = calculator.run()
assert len(data.index) == 0
def test_breakdown(jira, settings):
query_manager = QueryManager(jira, settings)
results = {}
calculator = DebtCalculator(query_manager, settings, results)
data = calculator.run(now=datetime.datetime(2018, 3, 21, 2, 2, 2))
assert data.to_dict('records') == [
{'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('78 days 01:01:01'), 'priority': 'High'},
{'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'age': Timedelta('18 days 01:01:01'), 'priority': 'Medium'},
{'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('45 days 01:01:01'), 'priority': 'High'},
{'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'age': Timedelta('76 days 01:01:01'), 'priority': 'Medium'},
{'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'age': Timedelta('15 days 01:01:01'), 'priority': 'High'},
{'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'age': Timedelta('15 days 01:01:01'), 'priority': 'Medium'},
]
def test_no_priority_field(jira, settings):
settings = extend_dict(settings, {
'debt_priority_field': None
})
query_manager = QueryManager(jira, settings)
results = {}
calculator = DebtCalculator(query_manager, settings, results)
data = calculator.run(now=datetime.datetime(2018, 3, 21, 2, 2, 2))
assert data.to_dict('records') == [
{'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('78 days 01:01:01'), 'priority': None},
{'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'age': Timedelta('18 days 01:01:01'), 'priority': None},
{'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('45 days 01:01:01'), 'priority': None},
{'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'age': Timedelta('76 days 01:01:01'), 'priority': None},
{'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'age': Timedelta('15 days 01:01:01'), 'priority': None},
{'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'age': Timedelta('15 days 01:01:01'), 'priority': None},
]
|
<reponame>jbarnoud/vermouth-martinize<gh_stars>0
# Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Assign protein secondary structures using DSSP.
"""
import collections
import logging
import os
import subprocess
import tempfile
from ..file_writer import deferred_open
from ..pdb import pdb
from ..system import System
from ..processors.processor import Processor
from ..selectors import is_protein, selector_has_position, filter_minimal, select_all
from .. import utils
from ..log_helpers import StyleAdapter, get_logger
LOGGER = StyleAdapter(get_logger(__name__))
class DSSPError(Exception):
"""
Exception raised if DSSP fails.
"""
def read_dssp2(lines):
"""
Read the secondary structure from a DSSP output.
Only the first column of the "STRUCTURE" block is read. See the
`documentation of the DSSP format`_ for more details.
The secondary structures that can be read are:
:H: α-helix
:B: residue in isolated β-bridge
:E: extended strand, participates in β ladder
:G: 3-helix (3-10 helix)
:I: 5 helix (π-helix)
:T: hydrogen bonded turn
:S: bend
:C: loop or irregular
The "C" code for loops and random coil is translated from the gap used in
the DSSP file for an improved readability.
Only the version 2 and 3 of DSSP is supported. If the format is not
recognized as comming from that version of DSSP, then a :exc:`IOError` is
raised.
.. _`documentation of the DSSP format`: http://swift.cmbi.ru.nl/gv/dssp/DSSP_3.html
Parameters
----------
lines:
An iterable over the lines of the DSSP output. This can be *e.g.* a
list of lines, or a file handler. The new line character is ignored.
Returns
-------
secstructs: list[str]
The secondary structure assigned by DSSP as a list of one-letter
secondary structure code.
Raises
------
IOError
When a line could not be parsed, or if the version of DSSP
is not supported.
"""
secstructs = []
# We use the line number for the error messages. It is more natural for a
# user to count lines in a file starting from 1 rather than 0.
numbered_lines = enumerate(lines, start=1)
# The function can only read output from DSSP version 2 and 3. Hopefully, if
# the input file is not in this format, then the parser will break as it
# reads the file; we can expect that the end of the header will not be found
# or the secondary structure will be an unexpected character.
# We could predict from the first line that the format is not the one we
# expect if it does not start with "===="; however, the first lines of the
# file are non-essential and could have been trimmed. (For instance, the
# first line of a DSSPv2 file contains the date of execution of the
# program, which is annoying when comparing files.) Failing at the
# first line is likely unnecessary.
# Yet, we can identify files from DSSP v1 from the first line. These files
# start with "****" instead of "====". If we identify such a file, we can
# fail with a useful error message.
_, first_line = next(numbered_lines)
if first_line and first_line.startswith('****'):
msg = ('Based on its header, the input file could come from a '
'pre-July 1995 version of DSSP (or the compatibility mode '
'of a more recent version). Only output from the version 2 and 3'
'of DSSP are supported.')
raise IOError(msg)
# First we skip the header and the histogram.
for line_num, line in numbered_lines:
if line.startswith(' # RESIDUE AA'):
break
else: # no break
msg = ('No secondary structure assignation could be read because the '
'file is not formated correctly. No line was found that starts '
'with " # RESIDUE AA".')
raise IOError(msg)
# Now, every line should be a secondary structure assignation.
for line_num, line in numbered_lines:
if '!' in line or not line:
# This is a TER record or an empty line, we ignore it.
continue
elif len(line) >= 17:
secondary_structure = line[16]
if secondary_structure not in 'HBEGITS ':
msg = 'Unrecognize secondary structure "{}" in line {}: "{}"'
raise IOError(msg.format(secondary_structure, line_num, line))
# DSSP represents the coil with a space. While this works in a
# column based file, it is much less convenient to handle in
# our code, and it is much less readable in our debug logs.
# We translate the space to "C" in our representation.
if secondary_structure == ' ':
secondary_structure = 'C'
secstructs.append(secondary_structure)
else:
raise IOError('Line {} is too short: "{}".'.format(line_num, line))
return secstructs
def run_dssp(system, executable='dssp', savefile=None, defer_writing=True):
"""
Run DSSP on a system and return the assigned secondary structures.
Run DSSP using the path (or name in the research PATH) given by
"executable". Return the secondary structure parsed from the output of the
program.
In order to call DSSP, a PDB file is produced. Therefore, all the molecules
in the system must contain the required attributes for such a file to be
generated. Also, the atom names are assumed to be compatible with the
'universal' force field for DSSP to recognize them.
However, the molecules do not require the edges to be defined.
DSSP is assumed to be in version 2 or 3. The secondary structure codes are
described in :func:`read_dssp2`.
If "savefile" is set to a path, then the output of DSSP is written in
that file.
Parameters
----------
system: System
executable: str
Where to find the DSSP executable.
savefile: None or str or pathlib.Path
If set to a path, the output of DSSP is written in that file.
defer_writing: bool
Whether to use :meth:`~vermouth.file_writer.DeferredFileWriter.write` for writing data
Returns
list[str]
The assigned secondary structures as a list of one-letter codes.
The secondary structure sequences of all the molecules are combined
in a single list without delimitation.
Raises
------
DSSPError
DSSP failed to run.
IOError
The output of DSSP could not be parsed.
See Also
--------
read_dssp2
Parse a DSSP output.
"""
tmpfile_handle, tmpfile_name = tempfile.mkstemp(suffix='.pdb', text=True,
dir='.', prefix='dssp_in_')
tmpfile_handle = os.fdopen(tmpfile_handle, mode='w')
tmpfile_handle.write(pdb.write_pdb_string(system, conect=False))
tmpfile_handle.close()
process = subprocess.run(
[executable, '-i', tmpfile_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
check=False,
universal_newlines=True
)
status = process.returncode
# If an error is encountered, or the loglevel is low enough, preserve the
# DSSP input file, and print a nice message.
if not status and LOGGER.getEffectiveLevel() > logging.DEBUG:
os.remove(tmpfile_name)
if status:
message = 'DSSP encountered an error. The message was {err}. The input' \
' file provided to DSSP can be found at {file}.'
raise DSSPError(message.format(err=process.stderr, file=tmpfile_name))
else:
LOGGER.debug('DSSP input file written to {}', tmpfile_name)
if savefile is not None:
if defer_writing:
open = deferred_open
with open(str(savefile), 'w') as outfile:
outfile.write(process.stdout)
return read_dssp2(process.stdout.split('\n'))
def _savefile_path(molecule, savedir=None):
savefile = None
if savedir is not None:
first_atom = list(molecule.nodes.keys())[0]
chain = molecule.nodes[first_atom].get('chain')
if chain is None:
msg = 'The "savedir" argument can only be used if chains are set.'
raise ValueError(msg)
savefile = os.path.join(savedir, 'chain_{}.ssd'.format(chain))
return savefile
def annotate_dssp(molecule, executable='dssp', savedir=None, attribute='secstruct'):
"""
Adds the DSSP assignation to the atoms of a molecule.
Runs DSSP on the molecule and adds the secondary structure assignation as
an attribute of its atoms. The attribute name in which the assignation is
stored is controlled with the "attribute" argument.
Only proteins can be annotated. Non-protein molecules are returned
unmodified, so are empty molecules, and molecules for which no positions
are set.
The atom names are assumed to be compatible with DSSP. Atoms with no known
position are not passed to DSSP which may lead to an error in DSSP.
.. warning::
The molecule is annotated **in-place**.
Parameters
----------
molecule: Molecule
The molecule to annotate. Its atoms must have the attributes required
to write a PDB file; other atom attributes, edges, or molecule
attributes are not used.
executable: str
The path or name in the research PATH of the DSSP executable.
savedir: None or str
If set to a path, the DSSP output will be written in this **directory**.
The option is only available if chains are defined with the 'chain'
atom attribute.
attribute: str
The name of the atom attribute in which to store the annotation.
See Also
--------
run_dssp, read_dssp2
"""
if not is_protein(molecule):
return
clean_pos = molecule.subgraph(
filter_minimal(molecule, selector=selector_has_position)
)
# We ignore empty molecule, there is no point at running DSSP on them.
if not clean_pos:
return
savefile = _savefile_path(molecule, savedir)
system = System()
system.add_molecule(clean_pos)
secstructs = run_dssp(system, executable, savefile)
annotate_residues_from_sequence(molecule, attribute, secstructs)
def convert_dssp_to_martini(sequence):
"""
Convert a sequence of secondary structure to martini secondary sequence.
Martini treats some secondary structures with less resolution than dssp.
For instance, the different types of helices that dssp discriminates are
seen the same by martini. Yet, different parts of the same helix are seen
differently in martini.
In the Martini force field, the B and E secondary structures from DSSP are
both treated as extended regions. All the DSSP helices are treated the
same, but the different part of the helices (beginning, end, core of a
short helix, core of a long helix) are treated differently.
After the conversion, the secondary structures are:
* :F: Collagenous Fiber
* :E: Extended structure (β sheet)
* :H: Helix structure
* :1: Helix start (H-bond donor)
* :2: Helix end (H-bond acceptor)
* :3: Ambivalent helix type (short helices)
* :T: Turn
* :S: Bend
* :C: Coil
Parameters
----------
sequence: str
A sequence of secondary structures as read from dssp. One letter per
residue.
Returns
-------
str
A sequence of secondary structures usable for martini. One letter per
residue.
"""
ss_cg = {'1': 'H', '2': 'H', '3': 'H', 'H': 'H', 'G': 'H', 'I': 'H',
'B': 'E', 'E': 'E', 'T': 'T', 'S': 'S', 'C': 'C'}
patterns = collections.OrderedDict([
('.H.', '.3.'), ('.HH.', '.33.'), ('.HHH.', '.333.'),
('.HHHH.', '.3333.'), ('.HHHHH.', '.13332.'),
('.HHHHHH.', '.113322.'), ('.HHHHHHH.', '.1113222.'),
('.HHHH', '.1111'), ('HHHH.', '2222.'),
])
cg_sequence = ''.join(ss_cg[secstruct] for secstruct in sequence)
wildcard_sequence = ''.join('H' if secstruct == 'H' else '.'
for secstruct in cg_sequence)
for pattern, replacement in patterns.items():
wildcard_sequence = wildcard_sequence.replace(pattern, replacement)
result = ''.join(
wildcard if wildcard != '.' else cg
for wildcard, cg in zip(wildcard_sequence, cg_sequence)
)
return result
def sequence_from_residues(molecule, attribute, default=None):
"""
Generates a sequence of `attribute`, one per residue in `molecule`.
Parameters
----------
molecule: vermouth.molecule.Molecule
The molecule to process.
attribute: collections.abc.Hashable
The attribute of interest.
default: object
Yielded if the first node of a residue has no attribute `attribute`.
Yields
------
object
The value of `attribute` for every residue in `molecule`.
"""
for residue_nodes in molecule.iter_residues():
# TODO: Make sure they're the same for every node in residue.
first_name = residue_nodes[0]
first_node = molecule.nodes[first_name]
value = first_node.get(attribute, default)
yield value
def annotate_residues_from_sequence(molecule, attribute, sequence):
"""
Sets the attribute `attribute` to a value from `sequence` for every node in
`molecule`. Nodes in the n'th residue of `molecule` are given the n'th
value of `sequence`.
Parameters
----------
molecule: networkx.Graph
The molecule to annotate. Is modified in-place.
attribute: collections.abc.Hashable
The attribute to set.
sequence: collections.abc.Sequence
The values assigned.
Raises
------
ValueError
If the length of `sequence` is different from the number of residues in
`molecule`.
"""
residues = list(molecule.iter_residues())
if len(sequence) == 1:
sequence = sequence * len(residues)
elif len(sequence) != len(residues):
msg = ('The sequence length does not match the number of residues. '
'The sequence has {} elements for {} residues.')
raise ValueError(msg.format(len(sequence), len(residues)))
for residue_nodes, value in zip(residues, sequence):
for node_name in residue_nodes:
molecule.nodes[node_name][attribute] = value
def convert_dssp_annotation_to_martini(
molecule, from_attribute='secstruct', to_attribute='cgsecstruct'):
"""
For every node in `molecule`, translate the `from_attribute` with
:func:`convert_dssp_to_martini`, and assign it to the attribute
`to_attribute`.
Parameters
----------
molecule: networkx.Graph
The molecule to process. Is modified in-place.
from_attribute: collections.abc.Hashable
The attribute to read.
to_attribute: collections.abc.Hashable
The attribute to set.
Raises
------
ValueError
If not all nodes have a `from_attribute`.
"""
dssp_sequence = list(sequence_from_residues(molecule, from_attribute))
if None not in dssp_sequence:
cg_sequence = list(convert_dssp_to_martini(dssp_sequence))
annotate_residues_from_sequence(molecule, to_attribute, cg_sequence)
elif all(elem is None for elem in dssp_sequence):
# There is no DSSP assignation for the molecule. This is likely due to
# the molecule not being a protein. Anyway, we issue a debug message
# as it *could* be due to the DSSP assignation having been skipped
# for some reason.
msg = 'No DSSP assignation to convert to Martini secondary structure intermediates.'
LOGGER.debug(msg)
else:
# This is more of a problem. For now, we do not know what to do with
# incomplete DSSP assignation. This may come later as a problem if
# a molecule is attached to a protein.
raise ValueError('Not all residues have a DSSP assignation.')
class AnnotateDSSP(Processor):
name = 'AnnotateDSSP'
def __init__(self, executable='dssp', savedir=None):
super().__init__()
self.executable = executable
self.savedir = savedir
def run_molecule(self, molecule):
annotate_dssp(molecule, self.executable, self.savedir)
return molecule
class AnnotateMartiniSecondaryStructures(Processor):
name = 'AnnotateMartiniSecondaryStructures'
@staticmethod
def run_molecule(molecule):
convert_dssp_annotation_to_martini(molecule)
return molecule
class AnnotateResidues(Processor):
"""
Set an attribute of the nodes from a sequence with one element per residue.
Read a sequence with one element per residue and assign an attribute of
each node based on that sequence, so each node has the value corresponding
to its residue. In most cases, the length of the sequence has to match the
total number of residues in the system. The sequence must be ordered in the
same way as the residues in the system. If all the molecules have the same
number of residues, and if the length of the sequence corresponds to the
number of residue of one molecule, then the sequence is repeated to all
molecules. If the sequence contains only one element, then it is repeated
to all the residues ofthe system.
Parameters
----------
attribute: str
Name of the node attribute to populate.
sequence: collections.abc.Sequence
Per-residue sequence.
molecule_selector: collections.abc.Callable
Function that takes an instance of :class:`vermouth.molecule.Molecule`
as argument and returns `True` if the molecule should be considered,
else `False`.
"""
name = 'AnnotateResidues'
def __init__(self, attribute, sequence,
molecule_selector=select_all):
self.attribute = attribute
self.sequence = sequence
self.molecule_selector = molecule_selector
def run_molecule(self, molecule):
"""
Run the processor on a single molecule.
Parameters
----------
molecule: vermouth.molecule.Molecule
Returns
-------
vermouth.molecule.Molecule
"""
if self.molecule_selector(molecule):
annotate_residues_from_sequence(molecule, self.attribute, self.sequence)
return molecule
def run_system(self, system):
"""
Run the processor on a system.
Parameters
----------
system: vermouth.system.System
Returns
-------
vermouth.system.System
"""
# Test and adjust the length of the sequence. There are 3 valid scenarios:
# * the length of the sequence matches the number of residues in the
# selection;
# * all the molecules in the selection have the same number of residues
# and the sequence length matches the number of residue of one
# molecule; in this case the equence is repeated for each molecule;
# * the sequence has a length of one; in this case the sequence is
# repeated for each residue.
# The case were there is no molecule in the selection is only valid if
# the sequence is empty. Then we are in the first valid scenario.
molecule_lengths = [
len(list(molecule.iter_residues()))
for molecule in system.molecules
if self.molecule_selector(molecule)
]
if self.sequence and not molecule_lengths:
raise ValueError('There is no molecule to which '
'to apply the sequence.')
if (molecule_lengths
and len(self.sequence) == molecule_lengths[0]
and utils.are_all_equal(molecule_lengths)):
sequence = list(self.sequence) * len(molecule_lengths)
elif len(self.sequence) == 1:
sequence = list(self.sequence) * sum(molecule_lengths)
elif len(self.sequence) != sum(molecule_lengths):
raise ValueError(
'The length of the sequence ({}) does not match the '
'number of residues in the selection ({}).'
.format(len(self.sequence), sum(molecule_lengths))
)
else:
sequence = self.sequence
end = 0
begin = 0
for molecule, nres in zip(system.molecules, molecule_lengths):
end += nres
annotate_residues_from_sequence(
molecule,
self.attribute,
sequence[begin:end]
)
begin += nres
|
<gh_stars>0
import os
import uuid
import base64
import time
import yaml
import json
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from behave import given, when, then
from helpers import (
ucfs_claimant_api_helper,
console_printer,
invoke_lambda,
claimant_api_data_generator,
streaming_data_helper,
aws_helper,
file_helper,
)
message_type = "claimant_api"
@given("The claimant API '{region_type}' region is set to '{region}'")
def step_impl(context, region_type, region):
region_to_use = region.lower()
if region.lower() == "ireland":
region_to_use = context.aws_region_alternative
elif region.lower() == "london":
region_to_use = context.aws_region_main
if region_type.lower() == "storage":
context.claimant_api_storage_region = region_to_use.lower()
else:
context.claimant_api_business_region = region_to_use.lower()
console_printer.print_info(
f"Claimant API region of type '{region_type}' set to '{region}'"
)
@given("The nino salt has been retrieved")
def step_impl(context):
context.nino_salt = aws_helper.ssm_get_parameter_value(
context.ucfs_claimant_api_salt_ssm_parameter_name, True
)
console_printer.print_info(f"Claimant API nino salt set to '{context.nino_salt}'")
@given("The claimant API data has been reset")
def step_impl(context):
invoke_lambda.invoke_claimant_mysql_metadata_interface()
console_printer.print_info(f"Claimant API data reset")
@then("Take home pay can be successfully decoded as '{expected_value}'")
def step_impl(context, expected_value):
assert (
context.claimant_api_status_code == 200
), f"Status code from response was {context.claimant_api_status_code}, not 200"
assert context.claimant_api_response is not None, f"Response body was empty"
response = context.claimant_api_response
console_printer.print_info(
f"Claimant API Response: {context.claimant_api_response}"
)
try:
take_home_pay_enc = base64.urlsafe_b64decode(
response["assessmentPeriod"][0]["amount"]["takeHomePay"]
)
cipher_text_blob = base64.urlsafe_b64decode(
response["assessmentPeriod"][0]["amount"]["cipherTextBlob"]
)
except Exception as ex:
console_printer.print_error_text(
f"Could not retrieve information from claimant API response of '{response}' and error of '{ex}'"
)
raise ex
console_printer.print_info(
f"Successfully retrieved cipher text of '{cipher_text_blob}' and take home pay of '{take_home_pay_enc}'"
)
data_key = aws_helper.kms_decrypt_cipher_text(
cipher_text_blob, context.claimant_api_storage_region
)
console_printer.print_info(f"Successfully decoded data key of '{data_key}'")
nonce_size = 12
nonce = take_home_pay_enc[:nonce_size]
take_home_pay_data = take_home_pay_enc[nonce_size:]
aesgcm = AESGCM(data_key)
take_home_pay = aesgcm.decrypt(nonce, take_home_pay_data, None).decode("utf-8")
console_printer.print_info(
f"Successfully decoded take home pay of '{take_home_pay}'"
)
assert (
take_home_pay == expected_value
), f"Take home pay was {take_home_pay} which does not match expected value of {expected_value}"
@given(
"UCFS send claimant API kafka messages with input file of '{input_file_name}' and data file of '{data_file_name}'"
)
def step_impl(
context,
input_file_name,
data_file_name,
):
global message_type
root_folder = os.path.join(context.temp_folder, str(uuid.uuid4()))
folder = streaming_data_helper.generate_fixture_data_folder(message_type)
context.claimant_api_kafka_temp_folder = os.path.join(root_folder, folder)
topic_prefix = streaming_data_helper.generate_topic_prefix(message_type)
(
kafka_input_file_data,
context.generated_ninos,
context.generated_ids,
) = claimant_api_data_generator.generate_claimant_api_kafka_files(
s3_input_bucket=context.s3_ingest_bucket,
input_data_file_name=data_file_name,
input_template_name=input_file_name,
new_uuid=None,
local_files_temp_folder=root_folder,
fixture_files_root=context.fixture_path_local,
s3_output_prefix=context.s3_temp_output_path,
seconds_timeout=context.timeout,
fixture_data_folder=folder,
todays_date=context.todays_date,
)
context.local_generated_claimant_api_kafka_files = []
for (id_field_name, generated_files) in kafka_input_file_data:
files_to_send = [db_object_tuple[0] for db_object_tuple in generated_files]
context.local_generated_claimant_api_kafka_files.extend(
[db_object_tuple[1] for db_object_tuple in generated_files]
)
aws_helper.send_files_to_kafka_producer_sns(
dynamodb_table_name=context.dynamo_db_table_name,
s3_input_bucket=context.s3_ingest_bucket,
aws_acc_id=context.aws_acc,
sns_topic_name=context.aws_sns_topic_name,
fixture_files=files_to_send,
message_key=<KEY>(),
topic_name=ucfs_claimant_api_helper.get_topic_by_id_type(id_field_name),
topic_prefix=topic_prefix,
region=context.aws_region_main,
skip_encryption=False,
kafka_message_volume="1",
kafka_random_key="true",
wait_for_job_completion=True,
)
@when(
"UCFS send kafka updates for first existing claimant with input file of '{input_file_name}' and data file of '{data_file_name}'"
)
def step_impl(
context,
input_file_name,
data_file_name,
):
global message_type
existing_files_folder = os.path.join(
context.claimant_api_kafka_temp_folder, "edited_files"
)
citizen_id = file_helper.get_id_from_claimant_by_id(
existing_files_folder, context.generated_ninos[0], "nino", "citizenId"
)
contract_id = file_helper.get_id_from_claimant_by_id(
existing_files_folder, citizen_id, "people", "contractId"
)
topic_prefix = streaming_data_helper.generate_topic_prefix(message_type)
folder = streaming_data_helper.generate_fixture_data_folder(message_type)
kafka_input_file_data = claimant_api_data_generator.generate_updated_contract_and_statement_files_for_existing_claimant(
citizen_id=citizen_id,
contract_id=contract_id,
fixture_files_root=context.fixture_path_local,
fixture_data_folder=folder,
input_data_file_name=data_file_name,
input_template_name=input_file_name,
s3_input_bucket=context.s3_ingest_bucket,
local_files_temp_folder=os.path.join(context.temp_folder, str(uuid.uuid4())),
s3_output_prefix=context.s3_temp_output_path,
seconds_timeout=context.timeout,
todays_date=context.todays_date,
)
context.local_generated_claimant_api_kafka_files = []
for (id_field_name, generated_files) in kafka_input_file_data:
files_to_send = [db_object_tuple[0] for db_object_tuple in generated_files]
context.local_generated_claimant_api_kafka_files.extend(
[db_object_tuple[1] for db_object_tuple in generated_files]
)
aws_helper.send_files_to_kafka_producer_sns(
dynamodb_table_name=context.dynamo_db_table_name,
s3_input_bucket=context.s3_ingest_bucket,
aws_acc_id=context.aws_acc,
sns_topic_name=context.aws_sns_topic_name,
fixture_files=files_to_send,
message_key=<KEY>(),
topic_name=ucfs_claimant_api_helper.get_topic_by_id_type(id_field_name),
topic_prefix=topic_prefix,
region=context.aws_region_main,
skip_encryption=False,
kafka_message_volume="1",
kafka_random_key="true",
wait_for_job_completion=True,
)
@when(
"UCFS send a kafka delete for first existing claimant with input file of '{input_file_name}'"
)
def step_impl(
context,
input_file_name,
):
global message_type
existing_files_folder = os.path.join(
context.claimant_api_kafka_temp_folder, "edited_files"
)
citizen_id = file_helper.get_id_from_claimant_by_id(
existing_files_folder, context.generated_ninos[0], "nino", "citizenId"
)
person_id = file_helper.get_id_from_claimant_by_id(
existing_files_folder, context.generated_ninos[0], "nino", "personId"
)
topic_prefix = streaming_data_helper.generate_topic_prefix(message_type)
folder = streaming_data_helper.generate_fixture_data_folder(message_type)
kafka_input_file_data = claimant_api_data_generator.generate_updated_claimant_file_for_existing_claimant(
citizen_id=citizen_id,
person_id=person_id,
fixture_files_root=context.fixture_path_local,
fixture_data_folder=folder,
input_template_name=input_file_name,
s3_input_bucket=context.s3_ingest_bucket,
local_files_temp_folder=os.path.join(context.temp_folder, str(uuid.uuid4())),
s3_output_prefix=context.s3_temp_output_path,
seconds_timeout=context.timeout,
increment=1,
)
context.local_generated_claimant_api_kafka_files = []
for (id_field_name, generated_files) in kafka_input_file_data:
files_to_send = [db_object_tuple[0] for db_object_tuple in generated_files]
context.local_generated_claimant_api_kafka_files.extend(
[db_object_tuple[1] for db_object_tuple in generated_files]
)
aws_helper.send_files_to_kafka_producer_sns(
dynamodb_table_name=context.dynamo_db_table_name,
s3_input_bucket=context.s3_ingest_bucket,
aws_acc_id=context.aws_acc,
sns_topic_name=context.aws_sns_topic_name,
fixture_files=files_to_send,
message_key=<KEY>(),
topic_name=ucfs_claimant_api_helper.get_topic_by_id_type(id_field_name),
topic_prefix=topic_prefix,
region=context.aws_region_main,
skip_encryption=False,
kafka_message_volume="1",
kafka_random_key="true",
wait_for_job_completion=True,
)
@when("I query for the first new claimant from claimant API '{version}'")
@then("I query for the first new claimant from claimant API '{version}'")
def step_impl(context, version):
api_path = context.ucfs_claimant_api_path_v2_get_award_details
if version.lower() == "v1":
api_path = context.ucfs_claimant_api_path_v1_get_award_details
(
context.claimant_api_status_code,
context.claimant_api_response,
) = ucfs_claimant_api_helper.query_for_claimant_from_claimant_api(
context.ucfs_claimant_domain_name,
context.claimant_api_business_region,
api_path,
ucfs_claimant_api_helper.hash_nino(
context.generated_ninos[0], context.nino_salt
),
context.test_run_name,
)
console_printer.print_info(
f"Query status code is '{context.claimant_api_status_code}' and response is '{context.claimant_api_response}'"
)
@when("I query for a claimant from claimant API '{version}' who does not exist")
def step_impl(context, version):
api_path = context.ucfs_claimant_api_path_v2_get_award_details
if version.lower() == "v1":
api_path = context.ucfs_claimant_api_path_v1_get_award_details
(
context.claimant_api_status_code,
context.claimant_api_response,
) = ucfs_claimant_api_helper.query_for_claimant_from_claimant_api(
context.ucfs_claimant_domain_name,
context.claimant_api_business_region,
api_path,
"test_unhashed_fake_nino",
context.test_run_name,
)
console_printer.print_info(
f"Query status code is '{context.claimant_api_status_code}' and response is '{context.claimant_api_response}'"
)
@when(
"I query for the first claimant from claimant API '{version}' with the parameters file of '{parameters_file}'"
)
def step_impl(context, version, parameters_file):
api_path = context.ucfs_claimant_api_path_v2_get_award_details
local_folder = streaming_data_helper.generate_fixture_data_folder(message_type)
query_parameters_full_file_name = os.path.join(
context.fixture_path_local, local_folder, "query_parameters", parameters_file
)
console_printer.print_info(
f"Using parameters file of '{query_parameters_full_file_name}'"
)
query_parameters = yaml.safe_load(open(query_parameters_full_file_name))
from_date = claimant_api_data_generator.generate_dynamic_date(
context.todays_date,
(
query_parameters["from_date_days_offset"]
if "from_date_days_offset" in query_parameters
else None
),
(
query_parameters["from_date_months_offset"]
if "from_date_months_offset" in query_parameters
else None
),
).strftime("%Y%m%d")
to_date = claimant_api_data_generator.generate_dynamic_date(
context.todays_date,
(
query_parameters["to_date_days_offset"]
if "to_date_days_offset" in query_parameters
else None
),
(
query_parameters["to_date_months_offset"]
if "to_date_months_offset" in query_parameters
else None
),
).strftime("%Y%m%d")
if version.lower() == "v1":
api_path = context.ucfs_claimant_api_path_v1_get_award_details
(
context.claimant_api_status_code,
context.claimant_api_response,
) = ucfs_claimant_api_helper.query_for_claimant_from_claimant_api(
aws_host_name=context.ucfs_claimant_domain_name,
claimant_api_region=context.claimant_api_business_region,
award_details_api_path=api_path,
hashed_nino=ucfs_claimant_api_helper.hash_nino(
context.generated_ninos[0], context.nino_salt
),
transaction_id=context.test_run_name,
from_date=from_date,
to_date=to_date,
)
console_printer.print_info(
f"Query status code is '{context.claimant_api_status_code}' and response is '{context.claimant_api_response}'"
)
@given("The new claimants can be found from claimant API '{version}'")
def step_impl(context, version):
console_printer.print_info(
f"Waiting for '{len(context.generated_ninos)}' new claimants to be found"
)
api_path = context.ucfs_claimant_api_path_v2_get_award_details
if version.lower() == "v1":
api_path = context.ucfs_claimant_api_path_v1_get_award_details
found_ninos = []
time_taken = 1
timeout_time = time.time() + context.timeout
claimants_found = False
while not claimants_found and time.time() < timeout_time:
for nino in context.generated_ninos:
(
context.claimant_api_status_code,
context.claimant_api_response,
) = ucfs_claimant_api_helper.query_for_claimant_from_claimant_api(
context.ucfs_claimant_domain_name,
context.claimant_api_business_region,
api_path,
ucfs_claimant_api_helper.hash_nino(nino, context.nino_salt),
context.test_run_name,
)
if (
context.claimant_api_status_code == 200
and "claimantFound" in context.claimant_api_response
and context.claimant_api_response["claimantFound"]
):
found_ninos.append(nino)
console_printer.print_info(
f"Successfully found claimant with nino of '{nino}'"
)
if set(found_ninos) == set(context.generated_ninos):
console_printer.print_info(f"Successfully found all new claimants")
claimants_found = True
time.sleep(1)
time_taken += 1
assert claimants_found, f"All claimants were not found"
@when("The query succeeds and returns that the claimant has been found")
@then("The query succeeds and returns that the claimant has been found")
def step_impl(context):
assert (
context.claimant_api_status_code == 200
), f"Status code from response was {context.claimant_api_status_code}, not 200"
assert context.claimant_api_response is not None, f"Response body was empty"
assert (
"claimantFound" in context.claimant_api_response
), f"claimantFound not present in response body"
assert (
context.claimant_api_response["claimantFound"] == True
), f"claimantFound was not set to True"
@then("The query succeeds and returns that the claimant has not been found")
def step_impl(context):
assert (
context.claimant_api_status_code == 200
), f"Status code from response was {context.claimant_api_status_code}, not 200"
assert context.claimant_api_response is not None, f"Response body was empty"
assert (
"claimantFound" in context.claimant_api_response
), f"claimantFound not present in response body"
assert (
context.claimant_api_response["claimantFound"] == False
), f"claimantFound was not set to False"
@when("The query succeeds and returns that the claimant is not suspended")
@then("The query succeeds and returns that the claimant is not suspended")
def step_impl(context):
assert (
context.claimant_api_status_code == 200
), f"Status code from response was {context.claimant_api_status_code}, not 200"
assert context.claimant_api_response is not None, f"Response body was empty"
assert (
"suspendedDate" not in context.claimant_api_response
), f"suspendedDate not present in response body"
@then(
"I query the first claimant again from claimant API '{version}' and it is not found"
)
def step_impl(context, version):
nino = context.generated_ninos[0]
console_printer.print_info(f"Waiting for '{nino}' claimant to be not found")
api_path = context.ucfs_claimant_api_path_v2_get_award_details
if version.lower() == "v1":
api_path = context.ucfs_claimant_api_path_v1_get_award_details
time_taken = 1
timeout_time = time.time() + context.timeout
claimant_not_found = False
while not claimant_not_found and time.time() < timeout_time:
(
context.claimant_api_status_code,
context.claimant_api_response,
) = ucfs_claimant_api_helper.query_for_claimant_from_claimant_api(
context.ucfs_claimant_domain_name,
context.claimant_api_business_region,
api_path,
ucfs_claimant_api_helper.hash_nino(nino, context.nino_salt),
context.test_run_name,
)
if (
context.claimant_api_status_code == 200
and "claimantFound" in context.claimant_api_response
and not context.claimant_api_response["claimantFound"]
):
console_printer.print_info(
f"Successfully retrieved the response and claimant is not found"
)
claimant_not_found = True
time.sleep(1)
time_taken += 1
assert claimant_not_found, f"claimantFound was set to True"
@then(
"I query the first claimant again from claimant API '{version}' and it is suspended"
)
def step_impl(context, version):
nino = context.generated_ninos[0]
console_printer.print_info(f"Waiting for '{nino}' claimant to be suspended")
api_path = context.ucfs_claimant_api_path_v2_get_award_details
if version.lower() == "v1":
api_path = context.ucfs_claimant_api_path_v1_get_award_details
time_taken = 1
timeout_time = time.time() + context.timeout
claimant_suspended = False
while not claimant_suspended and time.time() < timeout_time:
(
context.claimant_api_status_code,
context.claimant_api_response,
) = ucfs_claimant_api_helper.query_for_claimant_from_claimant_api(
context.ucfs_claimant_domain_name,
context.claimant_api_business_region,
api_path,
ucfs_claimant_api_helper.hash_nino(nino, context.nino_salt),
context.test_run_name,
)
if (
context.claimant_api_status_code == 200
and "claimantFound" in context.claimant_api_response
and context.claimant_api_response["claimantFound"]
):
if (
"suspendedDate" in context.claimant_api_response
and context.claimant_api_response["suspendedDate"]
):
console_printer.print_info(
f"Successfully found claimant and they are suspended"
)
claimant_suspended = True
time.sleep(1)
time_taken += 1
assert claimant_suspended, f"suspendedDate could not be found or was False"
@then(
"The assessment periods are correctly returned using data file of '{data_file_name}'"
)
def step_impl(context, data_file_name):
global message_type
assert (
context.claimant_api_status_code == 200
), f"Status code from response was {context.claimant_api_status_code}, not 200"
assert context.claimant_api_response is not None, f"Response body was empty"
response = context.claimant_api_response
folder = streaming_data_helper.generate_fixture_data_folder(message_type)
expected_assessment_periods = (
ucfs_claimant_api_helper.retrieve_assessment_periods_from_claimant_data_file(
input_data_file_name=data_file_name,
fixture_files_root=context.fixture_path_local,
fixture_data_folder=folder,
)
)
for assessment_period in expected_assessment_periods:
if "start_date" not in assessment_period:
assessment_period[
"start_date"
] = claimant_api_data_generator.generate_dynamic_date(
context.todays_date,
(
assessment_period["start_date_days_offset"]
if "start_date_days_offset" in assessment_period
else None
),
(
assessment_period["start_date_month_offset"]
if "start_date_month_offset" in assessment_period
else None
),
).strftime(
"%Y%m%d"
)
assessment_period[
"end_date"
] = claimant_api_data_generator.generate_dynamic_date(
context.todays_date,
(
assessment_period["end_date_days_offset"]
if "end_date_days_offset" in assessment_period
else None
),
(
assessment_period["end_date_month_offset"]
if "end_date_month_offset" in assessment_period
else None
),
).strftime(
"%Y%m%d"
)
try:
actual_assessment_periods = response["assessmentPeriod"]
console_printer.print_info(f"assessment period {response['assessmentPeriod']}")
except Exception as ex:
console_printer.print_error_text(
f"Could not retrieve assessment periods from claimant API response of '{response}' and error of '{ex}'"
)
raise ex
nonce_size = 12
console_printer.print_info(
f"Successfully retrieved '{len(actual_assessment_periods)}' actual assessment periods"
)
assert len(actual_assessment_periods) == len(
expected_assessment_periods
), f"Expected assessment period count does not match actual count"
for expected_assessment_period in expected_assessment_periods:
assessment_period_found = False
for actual_assessment_period in actual_assessment_periods:
if (
actual_assessment_period["fromDate"]
== expected_assessment_period["start_date"]
):
assessment_period_found = True
assert (
actual_assessment_period["fromDate"]
== expected_assessment_period["start_date"]
), f"Expected assessment period start_date '{expected_assessment_period['start_date']}' does not match actual fromDate {actual_assessment_periods[index]['fromDate']}"
assert (
actual_assessment_period["toDate"]
== expected_assessment_period["end_date"]
), f"Expected assessment period end_date '{expected_assessment_period['end_date']}' does not match actual toDate {actual_assessment_periods[index]['toDate']}"
cipher_text_blob = base64.urlsafe_b64decode(
actual_assessment_period["amount"]["cipherTextBlob"]
)
data_key = aws_helper.kms_decrypt_cipher_text(
cipher_text_blob, context.claimant_api_storage_region
)
console_printer.print_info(
f"Successfully decoded data key of '{data_key}'"
)
aesgcm = AESGCM(data_key)
take_home_pay_enc = base64.urlsafe_b64decode(
actual_assessment_period["amount"]["takeHomePay"]
)
nonce = take_home_pay_enc[:nonce_size]
take_home_pay_data = take_home_pay_enc[nonce_size:]
actual_take_home_pay = aesgcm.decrypt(
nonce, take_home_pay_data, None
).decode("utf-8")
console_printer.print_info(
f"Successfully decoded take home pay of '{actual_take_home_pay}'"
)
assert (
actual_take_home_pay == expected_assessment_period["amount"]
), f"Take home pay was {actual_take_home_pay} which does not match expected value of {expected_assessment_period['amount']}"
assert (
assessment_period_found == True
), f"Expected assessment period with start_date of '{expected_assessment_period['start_date']}' not found in actual assessment periods"
@then("The messages are sent to the DLQ")
def step_impl(context):
expected_dlq_ids = context.generated_ids
console_printer.print_info(
f"Found '{len(expected_dlq_ids)}' expected DLQ ids of '{expected_dlq_ids}'"
)
time_taken = 1
timeout_time = time.time() + context.timeout
while time.time() < timeout_time:
actual_dlq_files_content_for_today = aws_helper.retrieve_files_from_s3(
s3_bucket=context.s3_ingest_bucket,
path=context.s3_dlq_path_and_date_prefix,
pattern=None,
remove_whitespace=True,
)
console_printer.print_info(
f"Found '{len(actual_dlq_files_content_for_today)}' actual DLQ files in s3 folder with prefix of '{context.s3_dlq_path_and_date_prefix}'"
)
ids_found = 0
for expected_dlq_id in expected_dlq_ids:
for actual_dlq_file_content_for_today in actual_dlq_files_content_for_today:
if str(expected_dlq_id) in actual_dlq_file_content_for_today:
ids_found += 1
console_printer.print_info(
f"Successfully found {ids_found} DLQ files"
)
if ids_found == len(expected_dlq_ids):
console_printer.print_info(f"Successfully found all DLQ files")
return
time.sleep(1)
time_taken += 1
raise AssertionError("Could not find DLQ files within timeout")
|
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
# File Name : dataset.py
# Purpose :
# Creation Date : 05-01-2019
# Author : <NAME> [sli[at]informatik[dot]uni-hamburg[dot]de]
# Author : <NAME> [liang[at]informatik[dot]uni-hamburg[dot]de]
from __future__ import division, print_function
import os
import torch.utils.data
import torch.nn as nn
import torchvision.transforms as trans
import numpy as np
import yaml
from audio_pouring.utils.utils import get_pouring_path
config_file = os.path.join(get_pouring_path(), "config/preprocess.yaml")
config = yaml.load(open(config_file, "r"))
def pitch_shift_spectrogram(spectrogram):
""" Shift a spectrogram along the frequency axis in the spectral-domain at
random
"""
nb_cols = spectrogram.shape[0]
max_shifts = nb_cols // 20 # around 5% shift
nb_shifts = np.random.randint(-max_shifts, max_shifts)
return np.roll(spectrogram, nb_shifts, axis=0)
class PouringDataset(torch.utils.data.Dataset):
def __init__(self, path, input_audio_size, multi_modal=False, train_rnn=False, is_fixed=False,
is_train=False, is_filtered=False, is_noise=False, bottle_train='1', bottle_test='1', seg_audio=False):
self.path = path
self.input_audio_size = input_audio_size
self.is_train = is_train
self.multi_modal = multi_modal
self.train_rnn = train_rnn
self.bottle_train = bottle_train
self.bottle_test = bottle_test
self.seg_audio = seg_audio
# self.seq_length = seq_length
if is_filtered:
self.audio_mean = config["filtered"]["audio_mean"]
self.audio_std = config["filtered"]["audio_std"]
else:
self.audio_mean = config["unfiltered"]["audio_mean"]
self.audio_std = config["unfiltered"]["audio_std"]
if is_train:
if self.train_rnn:
if is_noise:
self.label = np.load(path + 'noise_good_fixed_bottle' + self.bottle_train + '_train.npy')
else:
if is_filtered:
self.label = np.load(path + 'filter_npy' + self.bottle_train + '_train.npy')
else:
if is_fixed:
self.label = np.load(path + 'robot_train0.npy')
else:
self.label = np.load(path + 'bottle' + self.bottle_train + '_train.npy')
else:
self.label = np.load(path + 'bottle' + self.bottle_train + '_train.npy')
else: # load test dataset
if self.train_rnn:
if is_noise:
self.label = np.load(path + 'good_fixed_bottle' + self.bottle_test + '_test.npy')
else:
if is_filtered:
self.label = np.load(path + 'filter_npy' + self.bottle_train + '_test.npy')
else:
if is_fixed:
self.label = np.load(path + 'robot_test0.npy')
else:
self.label = np.load(path + 'bottle' + self.bottle_test + '_test.npy')
else:
self.label = np.load(path + 'bottle' + self.bottle_test + '_test.npy')
self.length = len(self.label)
def __getitem__(self, index):
tmp_path = self.label[index].split("/")[-2:]
tag = np.load(os.path.join(self.path, tmp_path[0], tmp_path[1]))
if not self.train_rnn:
tag = np.squeeze(tag)
target = np.array(tag[2]).astype(np.float32)
else:
target = np.array(tag[3]).astype(np.float32)
audio = tag[1].astype(np.float32)
audio -= self.audio_mean
audio /= self.audio_std
if self.seg_audio:
audio = audio[20:120]
assert (audio.shape[0] == self.input_audio_size)
# Augmented(if train/is_noise)
# if self.is_train:
# audio = pitch_shift_spectrogram(audio)
if self.train_rnn:
assert (target.shape[0] == audio.shape[1])
return audio.T, target
else:
return audio, target
def __len__(self):
return self.length
if __name__ == "__main__":
b = PouringDataset("../dataset/", input_audio_size=257, input_force_size=1, multi_modal=False,
train_rnn=True, is_fixed=True, is_train=True, seg_audio=False)
train_loader = torch.utils.data.DataLoader(b, batch_size=1, num_workers=32, pin_memory=True, )
for batch_idx, (audio_, height_) in enumerate(train_loader):
print(batch_idx, audio_.shape, height_.shape)
a, f, h, s = b.__getitem__(1)
|
<reponame>cnr-ibba/IMAGE-ValidationTool
#!/usr/bin/env python3
# -*- coding: utf-8 -*
import unittest
from image_validation import use_ontology
class TestUseOntology(unittest.TestCase):
# this test is more about how to use zooma properly, the function itself is like a by-product
def test_use_zooma(self):
# category: species
# organism in gxa datasource with high, disallow any datasource, good
expected = {
'type': 'organism',
'confidence': 'High',
'text': 'Mus musculus',
'ontologyTerms': 'http://purl.obolibrary.org/obo/NCBITaxon_10090'
}
self.assertDictEqual(use_ontology.use_zooma('mus musculus', 'species'), expected)
self.assertDictEqual(use_ontology.use_zooma('mus musculus', 'organism'), expected)
self.assertIs(use_ontology.use_zooma('mouse', 'organism'), None)
# category: country
expected = {
'type': 'country',
'confidence': 'Good',
'text': 'Germany',
'ontologyTerms': 'http://purl.obolibrary.org/obo/NCIT_C16636'
}
# country type=null, two matches medium/low, so returned value is None
self.assertDictEqual(use_ontology.use_zooma('germany', 'country'), expected)
self.assertIs(use_ontology.use_zooma('deutschland', 'country'), None)
# country type=null, while using ena datasource, high
test = use_ontology.use_zooma('norway', 'country')
if not test:
print("\nIMAGE zooma library not loaded into Zooma for mapping")
# category: breed
expected = {
'type': 'breed',
'confidence': 'Good',
'text': 'Bentheim Black Pied',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0000347'
}
self.assertDictEqual(use_ontology.use_zooma('bentheim black pied', 'breed'), expected)
# category: other
# Health status type=disease
expected = {
'type': 'disease',
'confidence': 'High',
'text': 'normal',
'ontologyTerms': 'http://purl.obolibrary.org/obo/PATO_0000461'
}
self.assertDictEqual(use_ontology.use_zooma('normal', 'disease'), expected)
# Organism part
expected = {
'type': 'organism part',
'confidence': 'High',
'text': 'spleen',
'ontologyTerms': 'http://purl.obolibrary.org/obo/UBERON_0002106'
}
self.assertDictEqual(use_ontology.use_zooma('spleen', 'organism part'), expected)
# Organism part UBERON_0001968 (semen) medium for default OLS setting
self.assertIs(use_ontology.use_zooma('semen', 'organism part'), None)
# developmental stage
expected = {
'type': 'developmental stage',
'confidence': 'High',
'text': 'adult',
'ontologyTerms': 'http://www.ebi.ac.uk/efo/EFO_0001272'
}
self.assertDictEqual(use_ontology.use_zooma('adult', 'developmental stage'), expected)
# Physiological stage several medium/low none of them related to physiological stage PATO_0001701 (mature)
self.assertIs(use_ontology.use_zooma('mature', 'physiological stage'), None)
# test limiting datasource
# without limiting to LBO, match to a random GAZ term
self.assertIs(use_ontology.use_zooma('Poitevine', 'breed'), None)
# without mapping to the country
self.assertIsNone(use_ontology.use_zooma('turkey', 'species'))
def test_use_ontology_types(self):
self.assertRaises(TypeError, use_ontology.use_zooma, 'string', 123)
self.assertRaises(TypeError, use_ontology.use_zooma, -12.34, 'string')
self.assertRaises(TypeError, use_ontology.use_zooma, False, 'string')
def test_get_general_breed_by_species(self):
expected_buffalo = {
'text': 'buffalo breed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0001042'
}
expected_cattle = {
'text': 'cattle breed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0000001'
}
expected_chicken = {
'text': 'chicken breed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0000002'
}
expected_goat = {
'text': 'goat breed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0000954'
}
expected_horse = {
'text': 'horse breed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0000713'
}
expected_pig = {
'text': 'pig breed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0000003'
}
expected_sheep = {
'text': 'sheep breed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0000004'
}
self.assertDictEqual(use_ontology.get_general_breed_by_species('bubalus bubalis'), expected_buffalo)
self.assertDictEqual(use_ontology.get_general_breed_by_species('bos taurus'), expected_cattle)
self.assertDictEqual(use_ontology.get_general_breed_by_species('Gallus gallus'), expected_chicken)
self.assertDictEqual(use_ontology.get_general_breed_by_species('Ovis aries'), expected_sheep)
self.assertDictEqual(use_ontology.get_general_breed_by_species('suS scrofA'), expected_pig)
self.assertDictEqual(use_ontology.get_general_breed_by_species('capra HIrcus'), expected_goat)
self.assertDictEqual(use_ontology.get_general_breed_by_species('Equus caballus'), expected_horse)
expected_cattle_cross = {
'text': 'Cattle crossbreed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0001036'
}
expected_chicken_cross = {
'text': 'Chicken crossbreed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0001037'
}
expected_goat_cross = {
'text': 'Goat crossbreed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0001038'
}
expected_horse_cross = {
'text': 'Horse crossbreed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0001039'
}
expected_pig_cross = {
'text': 'Pig crossbreed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0001040'
}
expected_sheep_cross = {
'text': 'Sheep crossbreed',
'ontologyTerms': 'http://purl.obolibrary.org/obo/LBO_0001041'
}
self.assertDictEqual(use_ontology.get_general_breed_by_species('bos taurus', True), expected_cattle_cross)
self.assertDictEqual(use_ontology.get_general_breed_by_species('Gallus gallus', True), expected_chicken_cross)
self.assertDictEqual(use_ontology.get_general_breed_by_species('capra HIrcus', True), expected_goat_cross)
self.assertDictEqual(use_ontology.get_general_breed_by_species('sus scrofa', True), expected_pig_cross)
self.assertDictEqual(use_ontology.get_general_breed_by_species('Ovis aries', True), expected_sheep_cross)
self.assertDictEqual(use_ontology.get_general_breed_by_species('Equus caballus', True), expected_horse_cross)
self.assertDictEqual(use_ontology.get_general_breed_by_species('random species'), {})
self.assertDictEqual(use_ontology.get_general_breed_by_species('random species', True), {})
# self.assertIs(use_ontology.get_general_breed_by_species('random species'), None)
def test_get_general_breed_by_species_types(self):
self.assertRaises(TypeError, use_ontology.get_general_breed_by_species, 123)
self.assertRaises(TypeError, use_ontology.get_general_breed_by_species, -12.34)
self.assertRaises(TypeError, use_ontology.get_general_breed_by_species, False)
self.assertRaises(TypeError, use_ontology.get_general_breed_by_species, 'string', 'False')
self.assertRaises(TypeError, use_ontology.get_general_breed_by_species, 'string', 0)
self.assertRaises(TypeError, use_ontology.get_general_breed_by_species, 'string', -1.0)
# the asserted values are subject to change
def test_ontology(self):
wrong = use_ontology.Ontology("WRONG")
self.assertFalse(wrong.found)
self.assertEqual(wrong.get_short_term(), "WRONG")
self.assertEqual(wrong.get_iri(), "")
self.assertEqual(wrong.get_label(), "")
self.assertListEqual(wrong.get_labels_and_synonyms(), [])
self.assertEqual(wrong.get_ontology_name(), '')
self.assertIsNone(wrong.is_leaf())
correct_human = use_ontology.Ontology('NCBITaxon_9606')
self.assertTrue(correct_human.found)
self.assertEqual(correct_human.get_short_term(), "NCBITaxon_9606")
self.assertEqual(correct_human.get_iri(), "http://purl.obolibrary.org/obo/NCBITaxon_9606")
self.assertEqual(correct_human.get_label(), "Homo sapiens")
expected = ['Homo sapiens']
self.assertListEqual(correct_human.get_labels_and_synonyms(), expected)
self.assertEqual(correct_human.get_ontology_name(), 'ncbitaxon')
self.assertFalse(correct_human.is_leaf())
correct_submitter = use_ontology.Ontology("EFO_0001741")
self.assertTrue(correct_submitter.found)
self.assertEqual(correct_submitter.get_short_term(), "EFO_0001741")
self.assertEqual(correct_submitter.get_iri(), "http://www.ebi.ac.uk/efo/EFO_0001741")
self.assertEqual(correct_submitter.get_label(), "submitter")
expected = ['submitter']
self.assertListEqual(correct_submitter.get_labels_and_synonyms(), expected)
self.assertEqual(correct_submitter.get_ontology_name(), 'efo')
self.assertTrue(correct_submitter.is_leaf())
correct_hair = use_ontology.Ontology("UBERON_0001037")
self.assertTrue(correct_hair.found)
self.assertEqual(correct_hair.get_short_term(), "UBERON_0001037")
self.assertEqual(correct_hair.get_iri(), "http://purl.obolibrary.org/obo/UBERON_0001037")
self.assertEqual(correct_hair.get_label(), "strand of hair")
expected = ['strand of hair', 'hair']
self.assertListEqual(correct_hair.get_labels_and_synonyms(), expected)
self.assertEqual(correct_hair.get_ontology_name(), 'uberon')
self.assertFalse(correct_hair.is_leaf())
self.assertFalse(correct_hair.label_match_ontology("strand Hair"))
self.assertFalse(correct_hair.label_match_ontology("strand of Hair"))
self.assertFalse(correct_hair.label_match_ontology("Hair"))
self.assertTrue(correct_hair.label_match_ontology("Hair", False))
self.assertFalse(correct_hair.__eq__(correct_human))
self.assertTrue(correct_hair == correct_hair)
self.assertNotEqual(correct_hair, "hair")
self.assertRaises(TypeError, use_ontology.Ontology, 12)
self.assertRaises(TypeError, use_ontology.Ontology, -12.34)
self.assertRaises(TypeError, use_ontology.Ontology, True)
self.assertRaises(TypeError, correct_hair.label_match_ontology, "string", "true")
self.assertRaises(TypeError, correct_hair.label_match_ontology, "string", 1)
self.assertRaises(TypeError, correct_hair.label_match_ontology, "string", -1.0)
self.assertRaises(TypeError, correct_hair.label_match_ontology, 12, True)
self.assertRaises(TypeError, correct_hair.label_match_ontology, 12.34, True)
self.assertRaises(TypeError, correct_hair.label_match_ontology, True, True)
def test_ontology_cache(self):
short_term = "NCBITaxon_9606"
ontology = use_ontology.Ontology(short_term)
# test contains and add_ontology
cache = use_ontology.OntologyCache()
self.assertFalse(cache.contains(short_term))
cache.add_ontology(ontology)
self.assertTrue(cache.contains(short_term))
# test get_ontology
retrieved = cache.get_ontology(short_term)
self.assertEqual(ontology, retrieved)
# test has_parent
self.assertTrue(cache.has_parent(short_term, "NCBITaxon_1"))
# wrong direction
self.assertFalse(cache.has_parent("NCBITaxon_1", short_term))
# irrelevant
self.assertFalse(cache.has_parent("UBERON_0001037", "EFO_0001741"))
self.assertRaises(TypeError, cache.add_ontology, "string")
self.assertRaises(TypeError, cache.add_ontology, 12)
self.assertRaises(TypeError, cache.get_ontology, 12)
self.assertRaises(TypeError, cache.get_ontology, True)
self.assertRaises(TypeError, cache.has_parent, "str", True)
self.assertRaises(TypeError, cache.has_parent, True, "str")
self.assertRaises(TypeError, cache.contains, 12)
self.assertRaises(TypeError, cache.contains, True)
if __name__ == '__main__':
unittest.main()
|
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.framework.python.ops import variables
from collections import namedtuple
slim = tf.contrib.slim
Clone = namedtuple('Clone', ['outputs', 'scope', 'device', ])
@add_arg_scope
def l2_normalization(inputs, scaling=False, scale_initializer=tf.ones_initializer(), reuse=None,
variables_collections=None, outputs_collections=None,
data_format='NHWC', trainable=True, scope=None):
with tf.variable_scope(scope, 'L2Normalization', [inputs], reuse=reuse) as sc:
inputs_shape = inputs.get_shape()
inputs_rank = inputs_shape.ndims
dtype = inputs.dtype.base_dtype
if data_format == 'NHWC':
norm_dim = tf.range(inputs_rank - 1, inputs_rank)
params_shape = inputs_shape[-1:]
elif data_format == 'NCHW':
norm_dim = tf.range(1, 2)
params_shape = (inputs_shape[1])
outputs = tf.nn.l2_normalize(inputs, norm_dim, epsilon=1e-12)
if scaling:
scale_collections = utils.get_variable_collections(variables_collections, 'scale')
scale = variables.model_variable('gamma', shape=params_shape, dtype=dtype, initializer=scale_initializer,
collections=scale_collections, trainable=trainable)
if data_format == 'NHWC':
outputs = tf.multiply(outputs, scale)
elif data_format == 'NCHW':
scale = tf.expand_dims(scale, axis=-1)
scale = tf.expand_dims(scale, axis=-1)
outputs = tf.multiply(outputs, scale)
return utils.collect_named_outputs(outputs_collections, sc.original_name_scope, outputs)
@add_arg_scope
def dataFormatChange(inputs, data_format='NHWC', scope=None):
with tf.name_scope(scope, 'data_format_change', [inputs]):
if data_format == 'NHWC':
net = inputs
elif data_format == 'NCHW':
net = tf.transpose(inputs, perm=(0, 2, 3, 1))
return net
def tensorShape(tensor, rank=3):
if tensor.get_shape().is_fully_defined():
return tensor.get_shape().as_list()
else:
static_shape = tensor.get_shape()
if rank is None:
static_shape = static_shape.as_list()
rank = len(static_shape)
else:
static_shape = tensor.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(tensor), rank)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def listReshape(l, shape=None):
r = []
if shape is None:
for a in l:
if isinstance(a, (list, tuple)):
r = r + list(a)
else:
r.append(a)
else:
i = 0
for s in shape:
if s == 1:
r.append(l[i])
else:
r.append(l[i:i + s])
i += s
return r
def creatClones(config, model_fn, args=None, kwargs=None):
clones = []
args = args or []
kwargs = kwargs or {}
# 为slim.model_variable, slim.variable加默认参数
with slim.arg_scope([slim.model_variable, slim.variable], device=config.variables_device()):
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(), reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def setLearningRate(flags, num_samples_per_epoch, global_step):
decay_steps = int(num_samples_per_epoch / flags.batch_size * flags.num_epochs_per_decay)
if flags.learning_rate_decay_type == 'exponential':
return tf.train.exponential_decay(flags.learning_rate,
global_step,
decay_steps,
flags.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
elif flags.learning_rate_decay_type == 'fixed':
return tf.constant(flags.learning_rate, name='fixed_learning_rate')
elif flags.learning_rate_decay_type == 'polynomial':
return tf.train.polynomial_decay(flags.learning_rate,
global_step,
decay_steps,
flags.end_learning_rate,
power=1.0,
cycle=False,
name='polynomial_decay_learning_rate')
else:
raise ValueError('learning_rate_decay_type [%s] was not recognized',
flags.learning_rate_decay_type)
def setOptimizer(flags, learning_rate):
if flags.optimizer == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer(
learning_rate,
rho=flags.adadelta_rho,
epsilon=flags.opt_epsilon)
elif flags.optimizer == 'adagrad':
optimizer = tf.train.AdagradOptimizer(
learning_rate,
initial_accumulator_value=flags.adagrad_initial_accumulator_value)
elif flags.optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(
learning_rate,
beta1=flags.adam_beta1,
beta2=flags.adam_beta2,
epsilon=flags.opt_epsilon)
elif flags.optimizer == 'ftrl':
optimizer = tf.train.FtrlOptimizer(
learning_rate,
learning_rate_power=flags.ftrl_learning_rate_power,
initial_accumulator_value=flags.ftrl_initial_accumulator_value,
l1_regularization_strength=flags.ftrl_l1,
l2_regularization_strength=flags.ftrl_l2)
elif flags.optimizer == 'momentum':
optimizer = tf.train.MomentumOptimizer(
learning_rate,
momentum=flags.momentum,
name='Momentum')
elif flags.optimizer == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(
learning_rate,
decay=flags.rmsprop_decay,
momentum=flags.rmsprop_momentum,
epsilon=flags.opt_epsilon)
elif flags.optimizer == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
else:
raise ValueError('Optimizer [%s] was not recognized', flags.optimizer)
return optimizer
def getTrainableVariables(flags):
if flags.trainable_scopes is None:
return tf.trainable_variables()
else:
scopes = [scope.strip() for scope in flags.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return variables_to_train
def optimizerClones(clones, optimizer, regularization_losses=None, **kwargs):
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
regularization_losses = None
total_loss = tf.add_n(clones_losses, name='total_loss')
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def setInit(flags):
if flags.checkpoint_path is None:
return None
if tf.train.latest_checkpoint(flags.train_dir):
tf.logging.info(
'Ignoring --checkpoint_path because a checkpoint already exists in %s'
% flags.train_dir)
return None
exclusions = []
if flags.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in flags.checkpoint_exclude_scopes.split(',')]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if flags.checkpoint_model_scope is not None:
variables_to_restore = \
{var.op.name.replace(flags.model_name,
flags.checkpoint_model_scope): var
for var in variables_to_restore}
if tf.gfile.IsDirectory(flags.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(flags.checkpoint_path)
else:
checkpoint_path = flags.checkpoint_path
tf.logging.info('Fine-tuning from %s. Ignoring missing vars: %s' % (checkpoint_path, flags.ignore_missing_vars))
return slim.assign_from_checkpoint_fn(
checkpoint_path,
variables_to_restore,
ignore_missing_vars=flags.ignore_missing_vars)
@add_arg_scope
def pad2d(inputs, pad=(0, 0), mode='CONSTANT', data_format='NHWC', trainable=True, scope=None):
with tf.name_scope(scope, 'pad2d', [inputs]):
if data_format == 'NHWC':
paddings = [[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]
elif data_format == 'NCHW':
paddings = [[0, 0], [0, 0], [pad[0], pad[0]], [pad[1], pad[1]]]
net = tf.pad(inputs, paddings, mode=mode)
return net
def abs_smooth(x):
absx = tf.abs(x)
minx = tf.minimum(absx, 1)
r = 0.5 * ((absx - 1) * minx + absx)
return r
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar('clone_loss', clone_loss)
# tf.summary.scalar(clone.scope + '/clone_loss', clone_loss)
if regularization_loss is not None:
tf.summary.scalar('regularization_loss', regularization_loss)
return sum_loss
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
|
<gh_stars>0
''' Batched Room-to-Room navigation environment '''
import sys
sys.path.append('build')
import MatterSim
import csv
import numpy as np
import math
import json
import random
import networkx as nx
import functools
import os.path
import time
import paths
import pickle
import os
import os.path
import sys
import itertools
from collections import namedtuple, defaultdict
from utils import load_datasets, load_nav_graphs, structured_map, vocab_pad_idx, decode_base64, k_best_indices, try_cuda, spatial_feature_from_bbox
import torch
from torch.autograd import Variable
print('-----------*-------------------------------------+-----------', flush=True)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device, flush=True)
print('-----------*-------------------------------------+-----------', flush=True)
csv.field_size_limit(sys.maxsize)
# Not needed for panorama action space
# FOLLOWER_MODEL_ACTIONS = ['left', 'right', 'up', 'down', 'forward', '<end>', '<start>', '<ignore>']
#
# LEFT_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("left")
# RIGHT_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("right")
# UP_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("up")
# DOWN_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("down")
# FORWARD_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("forward")
# END_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("<end>")
# START_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("<start>")
# IGNORE_ACTION_INDEX = FOLLOWER_MODEL_ACTIONS.index("<ignore>")
# FOLLOWER_ENV_ACTIONS = [
# (0,-1, 0), # left
# (0, 1, 0), # right
# (0, 0, 1), # up
# (0, 0,-1), # down
# (1, 0, 0), # forward
# (0, 0, 0), # <end>
# (0, 0, 0), # <start>
# (0, 0, 0) # <ignore>
# ]
# assert len(FOLLOWER_MODEL_ACTIONS) == len(FOLLOWER_ENV_ACTIONS)
angle_inc = np.pi / 6.
def _build_action_embedding(adj_loc_list, features):
feature_dim = features.shape[-1]
embedding = np.zeros((len(adj_loc_list), feature_dim + 128), np.float32)
for a, adj_dict in enumerate(adj_loc_list):
if a == 0:
# the embedding for the first action ('stop') is left as zero
continue
embedding[a, :feature_dim] = features[adj_dict['absViewIndex']]
loc_embedding = embedding[a, feature_dim:]
rel_heading = adj_dict['rel_heading']
rel_elevation = adj_dict['rel_elevation']
loc_embedding[0:32] = np.sin(rel_heading)
loc_embedding[32:64] = np.cos(rel_heading)
loc_embedding[64:96] = np.sin(rel_elevation)
loc_embedding[96:] = np.cos(rel_elevation)
return embedding
def build_viewpoint_loc_embedding(viewIndex):
"""
Position embedding:
heading 64D + elevation 64D
1) heading: [sin(heading) for _ in range(1, 33)] +
[cos(heading) for _ in range(1, 33)]
2) elevation: [sin(elevation) for _ in range(1, 33)] +
[cos(elevation) for _ in range(1, 33)]
"""
embedding = np.zeros((36, 128), np.float32)
for absViewIndex in range(36):
relViewIndex = (absViewIndex - viewIndex) % 12 + (absViewIndex // 12) * 12
rel_heading = (relViewIndex % 12) * angle_inc
rel_elevation = (relViewIndex // 12 - 1) * angle_inc
embedding[absViewIndex, 0:32] = np.sin(rel_heading)
embedding[absViewIndex, 32:64] = np.cos(rel_heading)
embedding[absViewIndex, 64:96] = np.sin(rel_elevation)
embedding[absViewIndex, 96:] = np.cos(rel_elevation)
return embedding
# pre-compute all the 36 possible paranoram location embeddings
_static_loc_embeddings = [
build_viewpoint_loc_embedding(viewIndex) for viewIndex in range(36)]
def _loc_distance(loc):
return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
def _canonical_angle(x):
''' Make angle in (-pi, +pi) '''
return x - 2 * np.pi * round(x / (2 * np.pi))
def _adjust_heading(sim, heading):
heading = (heading + 6) % 12 - 6 # minimum action to turn (e.g 11 -> -1)
''' Make possibly more than one heading turns '''
for _ in range(int(abs(heading))):
sim.makeAction(0, np.sign(heading), 0)
def _adjust_elevation(sim, elevation):
for _ in range(int(abs(elevation))):
''' Make possibly more than one elevation turns '''
sim.makeAction(0, 0, np.sign(elevation))
def _navigate_to_location(sim, nextViewpointId, absViewIndex):
state = sim.getState()
if state.location.viewpointId == nextViewpointId:
return # do nothing
# 1. Turn to the corresponding view orientation
_adjust_heading(sim, absViewIndex % 12 - state.viewIndex % 12)
_adjust_elevation(sim, absViewIndex // 12 - state.viewIndex // 12)
# find the next location
state = sim.getState()
assert state.viewIndex == absViewIndex
a, next_loc = None, None
for n_loc, loc in enumerate(state.navigableLocations):
if loc.viewpointId == nextViewpointId:
a = n_loc
next_loc = loc
break
assert next_loc is not None
# 3. Take action
sim.makeAction(a, 0, 0)
def _get_panorama_states(sim):
'''
Look around and collect all the navigable locations
Representation of all_adj_locs:
{'absViewIndex': int,
'relViewIndex': int,
'nextViewpointId': int,
'rel_heading': float,
'rel_elevation': float}
where relViewIndex is normalized using the current heading
Concepts:
- absViewIndex: the absolute viewpoint index, as returned by
state.viewIndex
- nextViewpointId: the viewpointID of this adjacent point
- rel_heading: the heading (radians) of this adjacent point
relative to looking forward horizontally (i.e. relViewIndex 12)
- rel_elevation: the elevation (radians) of this adjacent point
relative to looking forward horizontally (i.e. relViewIndex 12)
Features are 36 x D_vis, ordered from relViewIndex 0 to 35 (i.e.
feature[12] is always the feature of the patch forward horizontally)
'''
state = sim.getState()
initViewIndex = state.viewIndex
# 1. first look down, turning to relViewIndex 0
elevation_delta = -(state.viewIndex // 12)
_adjust_elevation(sim, elevation_delta)
# 2. scan through the 36 views and collect all navigable locations
adj_dict = {}
for relViewIndex in range(36):
# Here, base_rel_heading and base_rel_elevation are w.r.t
# relViewIndex 12 (looking forward horizontally)
# (i.e. the relative heading and elevation
# adjustment needed to switch from relViewIndex 12
# to the current relViewIndex)
base_rel_heading = (relViewIndex % 12) * angle_inc
base_rel_elevation = (relViewIndex // 12 - 1) * angle_inc
state = sim.getState()
absViewIndex = state.viewIndex
# get adjacent locations
for loc in state.navigableLocations[1:]:
distance = _loc_distance(loc)
# if a loc is visible from multiple view, use the closest
# view (in angular distance) as its representation
if (loc.viewpointId not in adj_dict or
distance < adj_dict[loc.viewpointId]['distance']):
rel_heading = _canonical_angle(
base_rel_heading + loc.rel_heading)
rel_elevation = base_rel_elevation + loc.rel_elevation
adj_dict[loc.viewpointId] = {
'absViewIndex': absViewIndex,
'nextViewpointId': loc.viewpointId,
'rel_heading': rel_heading,
'rel_elevation': rel_elevation,
'distance': distance}
# move to the next view
if (relViewIndex + 1) % 12 == 0:
sim.makeAction(0, 1, 1) # Turn right and look up
else:
sim.makeAction(0, 1, 0) # Turn right
# 3. turn back to the original view
_adjust_elevation(sim, - 2 - elevation_delta)
state = sim.getState()
assert state.viewIndex == initViewIndex # check the agent is back
# collect navigable location list
stop = {
'absViewIndex': -1,
'nextViewpointId': state.location.viewpointId}
adj_loc_list = [stop] + sorted(
adj_dict.values(), key=lambda x: abs(x['rel_heading']))
return state, adj_loc_list
WorldState = namedtuple("WorldState", ["scanId", "viewpointId", "heading", "elevation"])
BottomUpViewpoint = namedtuple("BottomUpViewpoint", ["cls_prob", "image_features", "attribute_indices", "object_indices", "spatial_features", "no_object_mask"])
def load_world_state(sim, world_state):
sim.newEpisode(*world_state)
def get_world_state(sim):
state = sim.getState()
return WorldState(scanId=state.scanId,
viewpointId=state.location.viewpointId,
heading=state.heading,
elevation=state.elevation)
def make_sim(image_w, image_h, vfov):
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True)
sim.setCameraResolution(image_w, image_h)
sim.setCameraVFOV(math.radians(vfov))
sim.init()
return sim
# def encode_action_sequence(action_tuples):
# encoded = []
# reached_end = False
# if action_tuples[0] == (0, 0, 0):
# # this method can't handle a <start> symbol
# assert all(t == (0, 0, 0) for t in action_tuples)
# for tpl in action_tuples:
# if tpl == (0, 0, 0):
# if reached_end:
# ix = IGNORE_ACTION_INDEX
# else:
# ix = END_ACTION_INDEX
# reached_end = True
# else:
# ix = FOLLOWER_ENV_ACTIONS.index(tpl)
# encoded.append(ix)
# return encoded
# Not needed for panorama action space
# def index_action_tuple(action_tuple):
# ix, heading_chg, elevation_chg = action_tuple
# if heading_chg > 0:
# return FOLLOWER_MODEL_ACTIONS.index('right')
# elif heading_chg < 0:
# return FOLLOWER_MODEL_ACTIONS.index('left')
# elif elevation_chg > 0:
# return FOLLOWER_MODEL_ACTIONS.index('up')
# elif elevation_chg < 0:
# return FOLLOWER_MODEL_ACTIONS.index('down')
# elif ix > 0:
# return FOLLOWER_MODEL_ACTIONS.index('forward')
# else:
# return FOLLOWER_MODEL_ACTIONS.index('<end>')
class ImageFeatures(object):
NUM_VIEWS = 36
MEAN_POOLED_DIM = 2048
feature_dim = MEAN_POOLED_DIM
IMAGE_W = 640
IMAGE_H = 480
VFOV = 60
@staticmethod
def from_args(args):
feats = []
for image_feature_type in sorted(args.image_feature_type):
if image_feature_type == "none":
feats.append(NoImageFeatures())
elif image_feature_type == "bottom_up_attention":
# feats.append(BottomUpImageFeatures(
# args.bottom_up_detections,
# #precomputed_cache_path=paths.bottom_up_feature_cache_path,
# precomputed_cache_dir=paths.bottom_up_feature_cache_dir,
# ))
raise NotImplementedError('bottom_up_attention has not been implemented for panorama environment')
elif image_feature_type == "convolutional_attention":
feats.append(ConvolutionalImageFeatures(
args.image_feature_datasets,
split_convolutional_features=True,
downscale_convolutional_features=args.downscale_convolutional_features
))
raise NotImplementedError('convolutional_attention has not been implemented for panorama environment')
else:
assert image_feature_type == "mean_pooled"
feats.append(MeanPooledImageFeatures(args.image_feature_datasets))
return feats
@staticmethod
def add_args(argument_parser):
argument_parser.add_argument("--image_feature_type", nargs="+", choices=["none", "mean_pooled", "convolutional_attention", "bottom_up_attention"], default=["mean_pooled"])
argument_parser.add_argument("--image_attention_size", type=int)
argument_parser.add_argument("--image_feature_datasets", nargs="+", choices=["imagenet", "places365"], default=["imagenet"], help="only applicable to mean_pooled or convolutional_attention options for --image_feature_type")
argument_parser.add_argument("--bottom_up_detections", type=int, default=20)
argument_parser.add_argument("--bottom_up_detection_embedding_size", type=int, default=20)
argument_parser.add_argument("--downscale_convolutional_features", action='store_true')
def get_name(self):
raise NotImplementedError("get_name")
def batch_features(self, feature_list):
features = np.stack(feature_list)
return try_cuda(Variable(torch.from_numpy(features), requires_grad=False))
def get_features(self, state):
raise NotImplementedError("get_features")
class NoImageFeatures(ImageFeatures):
feature_dim = ImageFeatures.MEAN_POOLED_DIM
def __init__(self):
print('Image features not provided', flush=True)
self.features = np.zeros((ImageFeatures.NUM_VIEWS, self.feature_dim), dtype=np.float32)
def get_features(self, state):
return self.features
def get_name(self):
return "none"
class MeanPooledImageFeatures(ImageFeatures):
def __init__(self, image_feature_datasets):
image_feature_datasets = sorted(image_feature_datasets)
self.image_feature_datasets = image_feature_datasets
self.mean_pooled_feature_stores = [paths.mean_pooled_feature_store_paths[dataset]
for dataset in image_feature_datasets]
self.feature_dim = MeanPooledImageFeatures.MEAN_POOLED_DIM * len(image_feature_datasets)
print('Loading image features from %s' % ', '.join(self.mean_pooled_feature_stores), flush=True)
tsv_fieldnames = ['scanId', 'viewpointId', 'image_w','image_h', 'vfov', 'features']
self.features = defaultdict(list)
for mpfs in self.mean_pooled_feature_stores:
with open(mpfs, "rt") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = tsv_fieldnames)
for item in reader:
assert int(item['image_h']) == ImageFeatures.IMAGE_H
assert int(item['image_w']) == ImageFeatures.IMAGE_W
assert int(item['vfov']) == ImageFeatures.VFOV
long_id = self._make_id(item['scanId'], item['viewpointId'])
features = np.frombuffer(decode_base64(item['features']), dtype=np.float32).reshape((ImageFeatures.NUM_VIEWS, ImageFeatures.MEAN_POOLED_DIM))
self.features[long_id].append(features)
assert all(len(feats) == len(self.mean_pooled_feature_stores) for feats in self.features.values())
self.features = {
long_id: np.concatenate(feats, axis=1)
for long_id, feats in self.features.items()
}
def _make_id(self, scanId, viewpointId):
return scanId + '_' + viewpointId
def get_features(self, state):
long_id = self._make_id(state.scanId, state.location.viewpointId)
# Return feature of all the 36 views
return self.features[long_id]
def get_name(self):
name = '+'.join(sorted(self.image_feature_datasets))
name = "{}_mean_pooled".format(name)
return name
class ConvolutionalImageFeatures(ImageFeatures):
feature_dim = ImageFeatures.MEAN_POOLED_DIM
def __init__(self, image_feature_datasets, split_convolutional_features=True, downscale_convolutional_features=True):
self.image_feature_datasets = image_feature_datasets
self.split_convolutional_features = split_convolutional_features
self.downscale_convolutional_features = downscale_convolutional_features
self.convolutional_feature_stores = [paths.convolutional_feature_store_paths[dataset]
for dataset in image_feature_datasets]
def _make_id(self, scanId, viewpointId):
return scanId + '_' + viewpointId
@functools.lru_cache(maxsize=3000)
def _get_convolutional_features(self, scanId, viewpointId, viewIndex):
feats = []
for cfs in self.convolutional_feature_stores:
if self.split_convolutional_features:
path = os.path.join(cfs, scanId, "{}_{}{}.npy".format(viewpointId, viewIndex, "_downscaled" if self.downscale_convolutional_features else ""))
this_feats = np.load(path)
else:
# memmap for loading subfeatures
path = os.path.join(cfs, scanId, "%s.npy" % viewpointId)
mmapped = np.load(path, mmap_mode='r')
this_feats = mmapped[viewIndex,:,:,:]
feats.append(this_feats)
if len(feats) > 1:
return np.concatenate(feats, axis=1)
return feats[0]
def get_features(self, state):
return self._get_convolutional_features(state.scanId, state.location.viewpointId, state.viewIndex)
def get_name(self):
name = '+'.join(sorted(self.image_feature_datasets))
name = "{}_convolutional_attention".format(name)
if self.downscale_convolutional_features:
name = name + "_downscale"
return name
class BottomUpImageFeatures(ImageFeatures):
PAD_ITEM = ("<pad>",)
feature_dim = ImageFeatures.MEAN_POOLED_DIM
def __init__(self, number_of_detections, precomputed_cache_path=None, precomputed_cache_dir=None, image_width=640, image_height=480):
self.number_of_detections = number_of_detections
self.index_to_attributes, self.attribute_to_index = BottomUpImageFeatures.read_visual_genome_vocab(paths.bottom_up_attribute_path, BottomUpImageFeatures.PAD_ITEM, add_null=True)
self.index_to_objects, self.object_to_index = BottomUpImageFeatures.read_visual_genome_vocab(paths.bottom_up_object_path, BottomUpImageFeatures.PAD_ITEM, add_null=False)
self.num_attributes = len(self.index_to_attributes)
self.num_objects = len(self.index_to_objects)
self.attribute_pad_index = self.attribute_to_index[BottomUpImageFeatures.PAD_ITEM]
self.object_pad_index = self.object_to_index[BottomUpImageFeatures.PAD_ITEM]
self.image_width = image_width
self.image_height = image_height
self.precomputed_cache = {}
def add_to_cache(key, viewpoints):
assert len(viewpoints) == ImageFeatures.NUM_VIEWS
viewpoint_feats = []
for viewpoint in viewpoints:
params = {}
for param_key, param_value in viewpoint.items():
if param_key == 'cls_prob':
# make sure it's in descending order
assert np.all(param_value[:-1] >= param_value[1:])
if param_key == 'boxes':
# TODO: this is for backward compatibility, remove it
param_key = 'spatial_features'
param_value = spatial_feature_from_bbox(param_value, self.image_height, self.image_width)
assert len(param_value) >= self.number_of_detections
params[param_key] = param_value[:self.number_of_detections]
viewpoint_feats.append(BottomUpViewpoint(**params))
self.precomputed_cache[key] = viewpoint_feats
if precomputed_cache_dir:
self.precomputed_cache = {}
import glob
for scene_dir in glob.glob(os.path.join(precomputed_cache_dir, "*")):
scene_id = os.path.basename(scene_dir)
pickle_file = os.path.join(scene_dir, "d={}.pkl".format(number_of_detections))
with open(pickle_file, 'rb') as f:
data = pickle.load(f)
for (viewpoint_id, viewpoints) in data.items():
key = (scene_id, viewpoint_id)
add_to_cache(key, viewpoints)
elif precomputed_cache_path:
self.precomputed_cache = {}
with open(precomputed_cache_path, 'rb') as f:
data = pickle.load(f)
for (key, viewpoints) in data.items():
add_to_cache(key, viewpoints)
@staticmethod
def read_visual_genome_vocab(fname, pad_name, add_null=False):
# one-to-many mapping from indices to names (synonyms)
index_to_items = []
item_to_index = {}
start_ix = 0
items_to_add = [pad_name]
if add_null:
null_tp = ()
items_to_add.append(null_tp)
for item in items_to_add:
index_to_items.append(item)
item_to_index[item] = start_ix
start_ix += 1
with open(fname) as f:
for index, line in enumerate(f):
this_items = []
for synonym in line.split(','):
item = tuple(synonym.split())
this_items.append(item)
item_to_index[item] = index + start_ix
index_to_items.append(this_items)
assert len(index_to_items) == max(item_to_index.values()) + 1
return index_to_items, item_to_index
def batch_features(self, feature_list):
def transform(lst, wrap_with_var=True):
features = np.stack(lst)
x = torch.from_numpy(features)
if wrap_with_var:
x = Variable(x, requires_grad=False)
return try_cuda(x)
return BottomUpViewpoint(
cls_prob=transform([f.cls_prob for f in feature_list]),
image_features=transform([f.image_features for f in feature_list]),
attribute_indices=transform([f.attribute_indices for f in feature_list]),
object_indices=transform([f.object_indices for f in feature_list]),
spatial_features=transform([f.spatial_features for f in feature_list]),
no_object_mask=transform([f.no_object_mask for f in feature_list], wrap_with_var=False),
)
def parse_attribute_objects(self, tokens):
parse_options = []
# allow blank attribute, but not blank object
for split_point in range(0, len(tokens)):
attr_tokens = tuple(tokens[:split_point])
obj_tokens = tuple(tokens[split_point:])
if attr_tokens in self.attribute_to_index and obj_tokens in self.object_to_index:
parse_options.append((self.attribute_to_index[attr_tokens], self.object_to_index[obj_tokens]))
assert parse_options, "didn't find any parses for {}".format(tokens)
# prefer longer objects, e.g. "electrical outlet" over "electrical" "outlet"
return parse_options[0]
@functools.lru_cache(maxsize=20000)
def _get_viewpoint_features(self, scan_id, viewpoint_id):
if self.precomputed_cache:
return self.precomputed_cache[(scan_id, viewpoint_id)]
fname = os.path.join(paths.bottom_up_feature_store_path, scan_id, "{}.p".format(viewpoint_id))
with open(fname, 'rb') as f:
data = pickle.load(f, encoding='latin1')
viewpoint_features = []
for viewpoint in data:
top_indices = k_best_indices(viewpoint['cls_prob'], self.number_of_detections, sorted=True)[::-1]
no_object = np.full(self.number_of_detections, True, dtype=np.uint8) # will become torch Byte tensor
no_object[0:len(top_indices)] = False
cls_prob = np.zeros(self.number_of_detections, dtype=np.float32)
cls_prob[0:len(top_indices)] = viewpoint['cls_prob'][top_indices]
assert cls_prob[0] == np.max(cls_prob)
image_features = np.zeros((self.number_of_detections, ImageFeatures.MEAN_POOLED_DIM), dtype=np.float32)
image_features[0:len(top_indices)] = viewpoint['features'][top_indices]
spatial_feats = np.zeros((self.number_of_detections, 5), dtype=np.float32)
spatial_feats[0:len(top_indices)] = spatial_feature_from_bbox(viewpoint['boxes'][top_indices], self.image_height, self.image_width)
object_indices = np.full(self.number_of_detections, self.object_pad_index)
attribute_indices = np.full(self.number_of_detections, self.attribute_pad_index)
for i, ix in enumerate(top_indices):
attribute_ix, object_ix = self.parse_attribute_objects(list(viewpoint['captions'][ix].split()))
object_indices[i] = object_ix
attribute_indices[i] = attribute_ix
viewpoint_features.append(BottomUpViewpoint(cls_prob, image_features, attribute_indices, object_indices, spatial_feats, no_object))
return viewpoint_features
def get_features(self, state):
viewpoint_features = self._get_viewpoint_features(state.scanId, state.location.viewpointId)
return viewpoint_features[state.viewIndex]
def get_name(self):
return "bottom_up_attention_d={}".format(self.number_of_detections)
class EnvBatch():
''' A simple wrapper for a batch of MatterSim environments,
using discretized viewpoints and pretrained features '''
def __init__(self, batch_size, beam_size):
self.sims = []
self.batch_size = batch_size
self.beam_size = beam_size
for i in range(batch_size):
beam = []
for j in range(beam_size):
sim = make_sim(ImageFeatures.IMAGE_W, ImageFeatures.IMAGE_H, ImageFeatures.VFOV)
beam.append(sim)
self.sims.append(beam)
def sims_view(self, beamed):
if beamed:
return [itertools.cycle(sim_list) for sim_list in self.sims]
else:
return (s[0] for s in self.sims)
def newEpisodes(self, scanIds, viewpointIds, headings, beamed=False):
assert len(scanIds) == len(viewpointIds)
assert len(headings) == len(viewpointIds)
assert len(scanIds) == len(self.sims)
world_states = []
for i, (scanId, viewpointId, heading) in enumerate(zip(scanIds, viewpointIds, headings)):
world_state = WorldState(scanId, viewpointId, heading, 0)
if beamed:
world_states.append([world_state])
else:
world_states.append(world_state)
load_world_state(self.sims[i][0], world_state)
assert len(world_states) == len(scanIds)
return world_states
def getStates(self, world_states, beamed=False):
''' Get list of states. '''
def f(sim, world_state):
load_world_state(sim, world_state)
return _get_panorama_states(sim)
return structured_map(f, self.sims_view(beamed), world_states, nested=beamed)
def makeActions(self, world_states, actions, last_obs, beamed=False):
''' Take an action using the full state dependent action interface (with batched input).
Each action is an index in the adj_loc_list,
0 means staying still (i.e. stop)
'''
def f(sim, world_state, action, last_ob):
load_world_state(sim, world_state)
# load the location attribute corresponding to the action
loc_attr = last_ob['adj_loc_list'][action]
_navigate_to_location(
sim, loc_attr['nextViewpointId'], loc_attr['absViewIndex'])
# sim.makeAction(index, heading, elevation)
return get_world_state(sim)
return structured_map(f, self.sims_view(beamed), world_states, actions, last_obs, nested=beamed)
# def makeSimpleActions(self, simple_indices, beamed=False):
# ''' Take an action using a simple interface: 0-forward, 1-turn left, 2-turn right, 3-look up, 4-look down.
# All viewpoint changes are 30 degrees. Forward, look up and look down may not succeed - check state.
# WARNING - Very likely this simple interface restricts some edges in the graph. Parts of the
# environment may not longer be navigable. '''
# def f(sim, index):
# if index == 0:
# sim.makeAction(1, 0, 0)
# elif index == 1:
# sim.makeAction(0,-1, 0)
# elif index == 2:
# sim.makeAction(0, 1, 0)
# elif index == 3:
# sim.makeAction(0, 0, 1)
# elif index == 4:
# sim.makeAction(0, 0,-1)
# else:
# sys.exit("Invalid simple action %s" % index)
# structured_map(f, self.sims_view(beamed), simple_indices, nested=beamed)
# return None
class R2RBatch():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self,
image_features_list, batch_size=100, seed=10, splits=['train'],
tokenizer=None, beam_size=1, instruction_limit=None, with_objects=False,
train_instructions_with_objects=False, custom_metadata_path="",
objects_per_word=3, objects_loss_lambda=0.3, with_craft_instruction="",
craft_instruction_loss_beta=0.3):
self.image_features_list = image_features_list
self.data = []
self.scans = []
self.gt = {}
self.objects_by_words = {}
self.craft_instructions = {}
for item in load_datasets(splits):
# Split multiple instructions into separate entries
assert item['path_id'] not in self.gt
self.gt[item['path_id']] = item
instructions = item['instructions']
if instruction_limit:
instructions = instructions[:instruction_limit]
for j,instr in enumerate(instructions):
self.scans.append(item['scan'])
new_item = dict(item)
new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
new_item['instructions'] = instr
if tokenizer:
self.tokenizer = tokenizer
new_item['instr_encoding'], new_item['instr_length'] = tokenizer.encode_sentence(instr)
else:
self.tokenizer = None
self.data.append(new_item)
self.scans = set(self.scans)
self.splits = splits
self.seed = seed
random.seed(self.seed)
random.shuffle(self.data)
self.ix = 0
self.batch_size = batch_size
self._load_nav_graphs()
self.custom_metadata_path = custom_metadata_path
self.with_objects = with_objects
self.with_craft_instruction = with_craft_instruction
self.objects_per_word = objects_per_word
self.objects_loss_lambda = objects_loss_lambda
self.craft_instruction_loss_beta = craft_instruction_loss_beta
self.train_instructions_with_objects = train_instructions_with_objects
if with_objects and self.splits in [['train'], ['train_instructions_with_objects']]:
self._load_objects_by_word()
if with_craft_instruction and self.splits in [['train'], ['train_instructions_with_objects']]:
self._load_craft_instructions()
self.set_beam_size(beam_size)
self.print_progress = False
print('R2RBatch loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)), flush=True)
print(f'Example of instructions: {self.data[0]}', flush=True)
if self.with_objects:
print('Using objects to generate better instructions', flush=True)
else:
print('Using base model', flush=True)
def set_beam_size(self, beam_size, force_reload=False):
# warning: this will invalidate the environment, self.reset() should be called afterward!
try:
invalid = (beam_size != self.beam_size)
except:
invalid = True
if force_reload or invalid:
self.beam_size = beam_size
self.env = EnvBatch(self.batch_size, beam_size)
def _load_nav_graphs(self):
''' Load connectivity graph for each scan, useful for reasoning about shortest paths '''
print('Loading navigation graphs for %d scans' % len(self.scans), flush=True)
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan,G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan,G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
def _load_objects_by_word(self):
''' Load objects that should be en each word (or instruction) '''
path = 'train_objects_by_word.pickle'
if self.train_instructions_with_objects:
path = 'train_objects_by_word_objects_only.pickle'
if self.custom_metadata_path:
print(f"Loading custom objecs metadata path {self.custom_metadata_path}")
path = self.custom_metadata_path
with open(f'data/{path}', 'rb') as file:
data = pickle.load(file)
print(f'Loading objects of 3 instruction per {len(data.keys())} paths', flush=True)
self.objects_by_words = data
def _load_craft_instructions(self):
''' Load craft instructions for adding loss '''
path = self.with_craft_instruction
with open(f'data/{path}', 'r') as file:
data = json.load(file)
print(f'Loading {len(data.keys())} craft instructions', flush=True)
self.craft_instructions = data
def _next_minibatch(self, sort_instr_length):
batch = self.data[self.ix:self.ix+self.batch_size]
if self.print_progress:
sys.stderr.write("\rix {} / {}".format(self.ix, len(self.data)))
if len(batch) < self.batch_size:
random.shuffle(self.data)
self.ix = self.batch_size - len(batch)
batch += self.data[:self.ix]
else:
self.ix += self.batch_size
if sort_instr_length:
batch = sorted(batch, key=lambda item: item['instr_length'], reverse=True)
self.batch = batch
def reset_epoch(self):
''' Reset the data index to beginning of epoch. Primarily for testing.
You must still call reset() for a new episode. '''
self.ix = 0
def _shortest_path_action(self, state, adj_loc_list, goalViewpointId):
'''
Determine next action on the shortest path to goal,
for supervised training.
'''
if state.location.viewpointId == goalViewpointId:
return 0 # do nothing
path = self.paths[state.scanId][state.location.viewpointId][
goalViewpointId]
nextViewpointId = path[1]
for n_a, loc_attr in enumerate(adj_loc_list):
if loc_attr['nextViewpointId'] == nextViewpointId:
return n_a
# Next nextViewpointId not found! This should not happen!
print('adj_loc_list:', adj_loc_list, flush=True)
print('nextViewpointId:', nextViewpointId, flush=True)
long_id = '{}_{}'.format(state.scanId, state.location.viewpointId)
print('longId:', long_id, flush=True)
raise Exception('Bug: nextViewpointId not in adj_loc_list', flush=True)
def observe(self, world_states, beamed=False, include_teacher=True):
#start_time = time.time()
obs = []
for i,states_beam in enumerate(self.env.getStates(world_states, beamed=beamed)):
item = self.batch[i]
obs_batch = []
for state, adj_loc_list in states_beam if beamed else [states_beam]:
assert item['scan'] == state.scanId
feature = [featurizer.get_features(state) for featurizer in self.image_features_list]
assert len(feature) == 1, 'for now, only work with MeanPooled feature'
feature_with_loc = np.concatenate((feature[0], _static_loc_embeddings[state.viewIndex]), axis=-1)
action_embedding = _build_action_embedding(adj_loc_list, feature[0])
ob = {
'instr_id' : item['instr_id'],
'scan' : state.scanId,
'viewpoint' : state.location.viewpointId,
'viewIndex' : state.viewIndex,
'heading' : state.heading,
'elevation' : state.elevation,
'feature' : [feature_with_loc],
'step' : state.step,
'adj_loc_list' : adj_loc_list,
'action_embedding': action_embedding,
'navigableLocations' : state.navigableLocations,
'instructions' : item['instructions'],
}
if include_teacher:
ob['teacher'] = self._shortest_path_action(state, adj_loc_list, item['path'][-1])
if 'instr_encoding' in item:
ob['instr_encoding'] = item['instr_encoding']
if 'instr_length' in item:
ob['instr_length'] = item['instr_length']
obs_batch.append(ob)
if beamed:
obs.append(obs_batch)
else:
assert len(obs_batch) == 1
obs.append(obs_batch[0])
#end_time = time.time()
#print("get obs in {} seconds".format(end_time - start_time))
return obs
def get_starting_world_states(self, instance_list, beamed=False):
scanIds = [item['scan'] for item in instance_list]
viewpointIds = [item['path'][0] for item in instance_list]
headings = [item['heading'] for item in instance_list]
return self.env.newEpisodes(scanIds, viewpointIds, headings, beamed=beamed)
def reset(self, sort=False, beamed=False, load_next_minibatch=True):
''' Load a new minibatch / episodes. '''
if load_next_minibatch:
self._next_minibatch(sort)
assert len(self.batch) == self.batch_size
return self.get_starting_world_states(self.batch, beamed=beamed)
def step(self, world_states, actions, last_obs, beamed=False):
''' Take action (same interface as makeActions) '''
return self.env.makeActions(world_states, actions, last_obs, beamed=beamed)
def shortest_paths_to_goals(self, starting_world_states, max_steps):
world_states = starting_world_states
obs = self.observe(world_states)
all_obs = []
all_actions = []
for ob in obs:
all_obs.append([ob])
all_actions.append([])
ended = np.array([False] * len(obs))
for t in range(max_steps):
actions = [ob['teacher'] for ob in obs]
world_states = self.step(world_states, actions, obs)
obs = self.observe(world_states)
for i,ob in enumerate(obs):
if not ended[i]:
all_obs[i].append(ob)
for i,a in enumerate(actions):
if not ended[i]:
all_actions[i].append(a)
if a == 0:
ended[i] = True
if ended.all():
break
return all_obs, all_actions
def gold_obs_actions_and_instructions(self, max_steps, load_next_minibatch=True):
starting_world_states = self.reset(load_next_minibatch=load_next_minibatch)
path_obs, path_actions = self.shortest_paths_to_goals(starting_world_states, max_steps)
encoded_instructions = [obs[0]['instr_encoding'] for obs in path_obs]
return path_obs, path_actions, encoded_instructions
|
#!/usr/bin/env python3
'''
Reads a Yosys log file to map the original register names to the $procdff{id} naming scheme.
Hopefully will eventually add flag to Yosys for preserving names
'''
##
# CoreIR mangling: \ -> ''
# $ -> '__DOLLAR__'
# CoreIR flattening
# top$pe_tile_name$sb_name$procdff<ID>
# top$pe_tile_name$cb_name$procdff<ID>
# Tile naming scheme
# PE t0_row_col
# Mem t1_row_col
# CB naming scheme
# cb_unq1 a/b 16bit PE inputs
# cb_unq2 d/e/f 1bit PE inputs
# cb_unq3 wdata/waddr/raddr 16bit Mem inputs
# cb_unq4 ren/wen 1bit Mem inputs
# SB naming scheme
# sb_unq1 16bit PE and Mem signals
# sb_unq2 1bit PE and Mem signals
#
##
import argparse
reg_ind = "Creating register for signal `"
end_reg_ind = "'"
dff_ind = "created $dff cell `"
adff_ind = "created $adff cell `"
end_dff_ind = "'"
io_m = "IO "
s_m = "STATE "
ms_m = "MOD.SIG: "
def read_log_file(log_filename, statemap_filename):
with open(log_filename) as f:
log = f.read()
f.close()
with open(statemap_filename) as f:
sm = f.read()
f.close()
procdff_mapping = {}
for line in log.split("\n"):
if reg_ind in line:
signame = line[line.find(reg_ind)+len(reg_ind):line.find(end_reg_ind)]
signame = signame.replace("\\", "")
elif dff_ind in line or adff_ind in line:
ind = dff_ind if dff_ind in line else adff_ind
dffname = line[line.find(ind)+len(ind):line.find(end_dff_ind)]
dffname = dffname.replace("\\", "")
dffname = dffname.replace("$", "__DOLLAR__")
procdff_mapping[signame] = dffname
else:
pass
name_mapping = {}
for line in sm.split("\n"):
if io_m in line:
mname = line[line.find(io_m)+len(io_m):line.find(":")]
fabname = line[line.find(": ")+2:]
name_mapping[mname] = fabname
elif s_m in line:
mname = line[line.find(s_m)+len(s_m):line.find(":")]
fabname = line[line.find(": ")+2:line.find(ms_m)]
mod_width_sig = line[line.find(ms_m)+len(ms_m):]
mod, width, sig = mod_width_sig.split(".")
# a little hacky
# not sure how to anticipate paramod name in general
paramodname = "$paramod{}DataWidth={}.{}".format(mod, width, sig)
modsig = "{}.{}".format(mod,sig)
# paramodname has priority
if paramodname in procdff_mapping:
procdffname = procdff_mapping[paramodname]
elif modsig in procdff_mapping:
procdffname = procdff_mapping[modsig]
else:
raise RuntimeError("Could not infer mapping from Yosys log.")
fabname = fabname.replace(sig, procdffname)
name_mapping[mname] = fabname.strip()
return name_mapping
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Read proc_dff mappings from yosys log')
parser.add_argument('log_filename', metavar='<LOG_FILE>', help='The Yosys log file to read')
parser.add_argument('statemap_filename', metavar='<STATE_MAP_FILE>', help='The file produced from PNR with --state-map flag')
parser.add_argument('output_filename', metavar='<OUTPUT_FILE>', help='File to write the mapping to')
args = parser.parse_args()
log_filename = args.log_filename
statemap_filename = args.statemap_filename
output_filename = args.output_filename
nm = read_log_file(log_filename, statemap_filename)
with open(output_filename, "w") as output:
for k, v in nm.items():
output.write("{}: {}\n".format(k, v))
output.close()
|
# This script monitors topics in my local MQTT broker
# Some messages are forwarded to io.adafruit.com
import logging
import time
import sys
from .mqtt import MqttMonitor
from .handler import Generic, GenericEnergy, GenericString
from .adafruit import Adafruit
from .private import username, password
# private.py is not part of the checked in code. You will need to create it.
# It is a two line file with your Adafruit IO username and access key in it:
# username = 'xxxxxxxx'
# password = '<PASSWORD>'
class Monitor:
def __init__(self):
# must be overridden
self.access = 'foo'
raise NotImplementedError
def configure(self, mqtt_monitor, metering_queue):
# must be overridden
raise NotImplementedError
def run(self, msg, topic, mqtt_ip):
status = 0
metering_queue = []
metering_queue.append({'topic': topic, 'message': msg})
try:
aio = Adafruit(username, password, self.access)
mqtt_monitor = MqttMonitor(mqtt_ip)
self.configure(mqtt_monitor, metering_queue) # configure device handlers
mqtt_monitor.start()
last_min = time.localtime().tm_min
while True:
try:
localtime = time.localtime()
if localtime.tm_min != last_min and 59 == localtime.tm_min:
# stuff to do once per hour (just before the hour strikes and the handlers clear their data)
for h in mqtt_monitor.handlers:
h.evaluate() # do a self evaluation
last_min = localtime.tm_min
if len(metering_queue):
#if (isinstance(metering_queue[0], dict)
#and 'topic' in metering_queue[0]
#and 'message' in metering_queue[0]:
t = metering_queue[0].get('topic', '')
m = metering_queue[0].get('message', '')
s = metering_queue[0].get('state', Adafruit.INITIAL)
f = metering_queue[0].get('filter', True)
newstate = aio.publish(t, m, s, f)
if Adafruit.PUBLISHED == newstate:
logging.debug('Monitor.run() popping metering_queue entry %s', t)
metering_queue.pop(0)
elif Adafruit.INFLIGHT == newstate:
logging.debug('Monitor.run() updating metering_queue[0] to state INFLIGHT')
metering_queue[0]['state'] = newstate
elif Adafruit.ERROR == newstate:
# abandon this entry
logging.debug('Monitor.run() ERROR: Discarding metering_queue entry %s', t)
metering_queue.pop(0)
# delay here?
elif Adafruit.INITIAL == newstate:
# this is wierd... I guess we do nothing and let it try again
pass
else:
# this should never happen. It means I am missing a case
logging.debug('Monitor.run() updating metering_queue[0] to state %d', newstate)
metering_queue[0]['state'] = newstate
else:
aio.loop()
except Exception as e:
logging.error('Exception: %s', e, exc_info=True)
except Exception as e:
logging.error('Exception: %s', e, exc_info=True)
status = 1
except KeyboardInterrupt:
status = 2
#except NotImplementedError:
# don't catch this exception
else:
pass # normal exit
finally:
# all exits
sys.stdout.flush()
sys.stderr.flush()
return status
class Barn(Monitor):
'''
Object to monitor sensors at the barn
'''
def __init__(self):
logging.basicConfig(level=logging.DEBUG)
logging.info('Starting Barn Monitor')
self.access = 'lte'
#self.access = 'gprs'
def configure(self, mqtt_monitor, metering_queue):
'''
configure each of the local MQTT topics being monitored and the AdFruit Topics being published
'''
handler = Generic('tele/0dd92a/SENSOR', metering_queue, 240, 's.mph')
handler.NAME = 'PumpHouse'
handler.setup('s.ot', 'T0')
handler.setup('s.it', 'T1')
handler.setup('s.ht', 'HT')
handler.setup('s.rt', 'RTCount')
mqtt_monitor.topic(handler)
handler = Generic('tele/0dd096/SENSOR', metering_queue, 240, 's.mph')
handler.NAME = 'Loft'
handler.setup('s.lt', 'T0')
mqtt_monitor.topic(handler)
handler = GenericString('ups', metering_queue, 0, 's.mph')
handler.NAME = 'Ups'
handler.setup('s.ups', 'unused')
mqtt_monitor.topic(handler)
# tele/921601/TRIP {"GCBMS":1.0,"Sketch":"ESP GCBMS coprocessor v1.0","Duration":7,"Odometer":0,"Current":0,"Speed":0,"b0":5231,"B0":5497,"b1":0,"B1":0,"b2":0,"B2":0,"b3":0,"B3":0,"b4":0,"B4":0,"b5":0,"B5":0,"StopTime":159199}
# 921601 is AD
# 0dccf8 is N
handler = Generic('tele/921601/TRIP', metering_queue, 0, 's.mph')
handler.NAME = 'Rriba'
handler.setup('rriba.b0', 'b0', filter=False)
handler.setup('rriba.b1', 'b1', filter=False)
handler.setup('rriba.b2', 'b2', filter=False)
handler.setup('rriba.b3', 'b3', filter=False)
handler.setup('rriba.b4', 'b4', filter=False)
handler.setup('rriba.b5', 'b5', filter=False)
handler.setup('rriba.duration', 'Duration', filter=False)
handler.setup('rriba.odometer', 'Odometer', filter=False)
handler.setup('rriba.current', 'Current', filter=False)
handler.setup('rriba.speed', 'Speed', filter=False)
mqtt_monitor.topic(handler)
handler = Generic('tele/0dccf8/TRIP', metering_queue, 0, 's.mph')
handler.NAME = 'Helga'
handler.setup('helga.b0', 'b0', filter=False)
handler.setup('helga.b1', 'b1', filter=False)
handler.setup('helga.b2', 'b2', filter=False)
handler.setup('helga.b3', 'b3', filter=False)
handler.setup('helga.b4', 'b4', filter=False)
handler.setup('helga.b5', 'b5', filter=False)
handler.setup('helga.duration', 'Duration', filter=False)
handler.setup('helga.odometer', 'Odometer', filter=False)
handler.setup('helga.current', 'Current', filter=False)
handler.setup('helga.speed', 'Speed', filter=False)
mqtt_monitor.topic(handler)
class Home(Monitor):
'''
Object to monitor sensors at home
'''
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.access = 'rest'
def configure(self, mqtt_monitor, metering_queue):
'''
configure each of the local MQTT topics being monitored and the AdaFruit Topics being published
'''
handler = Generic('tele/99e934/SENSOR', metering_queue, 240, 'h.mph')
handler.NAME = 'Garage'
handler.setup('g.sq', 'SQ')
handler.setup('g.door', 'doorCount')
handler.setup('g.t0', 'T0')
handler.setup('g.t1', 'T1')
mqtt_monitor.topic(handler)
handler = Generic('tele/3154ff/SENSOR', metering_queue, 1, 'h.mph')
handler.NAME = 'SoilProbe'
handler.setup('h.sp', 'S0')
handler.setup('h.sb', 'S1')
mqtt_monitor.topic(handler)
handler = Generic('tele/99e813/SENSOR', metering_queue, 240, 'h.mph')
handler.NAME = 'Waterer'
handler.setup('h.r', 'RTCount')
handler.setup('h.v', 'valveCount')
handler.setup('h.vr', 'VBATLOAD')
handler.setup('w.valve', 'valveCount')
handler.setup('w.vbat', 'VBATLOAD')
mqtt_monitor.topic(handler)
handler = GenericString('tele/99e813/VOT', metering_queue, 0, 'h.mph')
handler.NAME = 'watererValve'
handler.setup('w.vot', 'unused')
mqtt_monitor.topic(handler)
handler = GenericString('tele/99e813/VCT', metering_queue, 0, 'h.mph')
handler.NAME = 'watererValve'
handler.setup('w.vct', 'unused')
mqtt_monitor.topic(handler)
handler = Generic('tele/9215de/SENSOR', metering_queue, 240, 'h.mph')
handler.NAME = 'CatFeeder'
handler.setup('h.cf', 'CFCount')
mqtt_monitor.topic(handler)
handler = GenericEnergy('tele/sonoffP/SENSOR', metering_queue, 240, 'h.mph')
handler.NAME = 'Laser'
handler.setup('h.lasercurrent', 'Current')
mqtt_monitor.topic(handler)
handler = GenericEnergy('tele/sonoffD/SENSOR', metering_queue, 240, 'h.mph')
handler.NAME = 'Printer'
handler.setup('h.printercurrent', 'Current', clamp=0.150)
mqtt_monitor.topic(handler)
handler = GenericEnergy('tele/sonoffE/SENSOR', metering_queue, 240, 'h.mph')
handler.NAME = 'Washer'
handler.setup('h.washercurrent', 'Current', clamp=0.06)
handler.setup('h.washervoltage', 'Voltage')
mqtt_monitor.topic(handler)
handler = GenericString('ups', metering_queue, 0, 'h.mph')
handler.NAME = 'Ups'
handler.setup('h.ups', 'unused')
mqtt_monitor.topic(handler)
handler = GenericString('tele/0dd6ce/wce', metering_queue, 0, 'h.mph')
handler.NAME = 'Status'
handler.setup('h.wce', 'unused')
mqtt_monitor.topic(handler)
# tele/920e8c/SENSOR (esp_now_slave) {"S0":332,"S1":0,"S2":0}
# tele/1dc700/SENSOR {"Sketch":"tsunamiLight v1.0","SQ":-78,"minSQ":-90,"maxSQ":-71}
# tele/GosundW/STATE (machine room LED lights) {"Time":"2020-08-30T10:16:28","Uptime":"22T12:28:56","UptimeSec":1945736,"Heap":31,"SleepMode":"Dynamic","Sleep":50,"LoadAvg":19,"MqttCount":7,"POWER":"OFF","Wifi":{"AP":1,"SSId":"Cisco52305","BSSId":"68:7F:74:49:E3:7E","Channel":6,"RSSI":24,"Signal":-88,"LinkCount":3,"Downtime":"0T00:00:18"}}
# tele/GosundX/STATE (machine room power strip)
# tele/GosundY/STATE (TV room light)
# tele/shellyB/STATE (machine room ceiling fan and light) {"Time":"2020-08-30T10:14:45","Uptime":"48T19:42:51","Heap":14,"SleepMode":"Dynamic","Sleep":50,"LoadAvg":19,"POWER1":"OFF","POWER2":"OFF","Wifi":{"AP":1,"SSId":"Cisco52305","BSSId":"68:7F:74:49:E3:7E","Channel":6,"RSSI":42,"LinkCount":10,"Downtime":"0T00:00:56"}}
# tele/shellyB/SENSOR (machine room ceiling fan and light) {"Time":"2020-08-30T10:15:00","Switch1":"OFF","Switch2":"OFF","ANALOG":{"Temperature":100.1},"ENERGY":{"TotalStartTime":"2019-07-25T22:29:23","Total":0.586,"Yesterday":0.033,"Today":0.000,"Period":0,"Power":0,"ApparentPower":0,"ReactivePower":0,"Factor":0.00,"Voltage":0,"Current":0.000},"TempUnit":"F"}
# tele/sonoffQ/STATE (soldering iron) {"Time":"2020-08-28T20:37:17","Uptime":"0T00:20:22","Heap":15,"SleepMode":"Dynamic","Sleep":50,"LoadAvg":19,"POWER":"ON","Wifi":{"AP":1,"SSId":"Cisco52305","BSSId":"68:7F:74:49:E3:7E","Channel":6,"RSSI":44,"LinkCount":1,"Downtime":"0T00:00:10"}}
# tele/sonoffQ/SENSOR (soldering iron) {"Time":"2020-08-28T20:37:17","ENERGY":{"TotalStartTime":"2020-04-07T03:01:40","Total":0.034,"Yesterday":0.000,"Today":0.000,"Period":0,"Power":0,"ApparentPower":0,"ReactivePower":0,"Factor":0.00,"Voltage":121,"Current":0.000}}
|
import utils
from llvmlite import ir
INT = ir.IntType(32)
FLOAT = ir.FloatType()
VOID = ir.VoidType()
def get_llvm_prm_type(t):
if t == 'INT' or t == 'CHAR':
return INT
elif t == 'FLOAT':
return FLOAT
else:
return VOID
class BaseAST:
def __init__(self, parent=None):
self.parent = parent
self.children = {}
def set_parent(self, value):
self.parent = value
def get_parent(self):
return self.parent
def add_child(self, name, obj):
obj.set_parent(self)
self.children[name] = obj
def get_children(self, name):
return self.children.get(name) or self.parent and self.parent.get_children(name)
def code_gen(self, module):
pass
class CharLiteralAST(BaseAST):
def __init__(self, value, parent=None):
super().__init__(parent=parent)
self.value = value
def get_type(self):
return 'CHAR'
def code_gen(self, module, builder=None):
return ir.Constant(INT, self.value)
class IntLiteralAST(BaseAST):
def __init__(self, value, parent=None):
super().__init__(parent=parent)
self.value = value
def get_type(self):
return 'INT'
def code_gen(self, module, builder=None):
return ir.Constant(INT, self.value)
class FloatLiteralAST(BaseAST):
def __init__(self, value, parent=None):
super().__init__(parent=parent)
self.value = value
def get_type(self):
return 'FLOAT'
def code_gen(self, module, builder=None):
return ir.Constant(FLOAT, self.value)
class VarDecAST(BaseAST):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.dim = 0
self.name = ""
self.type = None
self.value = None
self.ptr = None
self.is_global = False
def set_dim(self, value):
self.dim = value
def get_dim(self):
return self.dim
def set_name(self, value):
self.name = value
def get_name(self):
return self.name
def set_type(self, value):
self.type = value
def get_type(self):
return self.type
def set_value(self, value):
self.value = value
def get_value(self):
return self.value
def code_gen(self, module, builder=None):
t = get_llvm_prm_type(self.type)
if self.get_parent().get_parent() is None:
v = ir.GlobalVariable(module, t, self.name)
else:
v = builder.alloca(t, name=self.name)
self.ptr = v
print(v)
return v
class VarDefAST(BaseAST):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.dim = -1
self.value = None
self.var_dec = None
def set_dim(self, dim):
self.dim = dim
def get_dim(self):
return self.dim
def set_var_dec(self, obj):
self.var_dec = obj
def get_var_dec(self):
return self.var_dec
def set_value(self, value):
self.value = value
def get_type(self):
return self.var_dec.type
def code_gen(self, module, builder=None):
if type(self.var_dec.ptr) == ir.Argument:
return self.var_dec.ptr
else:
return builder.load(self.var_dec.ptr, name=self.var_dec.name)
class ProcedureCallAST(BaseAST):
def __init__(self, proc, args, parent=None):
super().__init__(parent=parent)
self.proc_callee = proc
self.args = args
def set_parent(self, value):
self.parent = value
def is_valid(self):
pass
def code_gen(self, module, builder=None):
args = []
for a in self.args:
args.append(a.code_gen(module, builder))
print('proc')
print(args)
return builder.call(self.proc_callee.proc, args)
class FunctionCallAST(BaseAST):
def __init__(self, func, args, parent=None):
super().__init__(parent=parent)
self.func_callee = func
self.args = args
self.ret = None
def set_parent(self, value):
self.parent = value
def set_ret_name(self, name):
self.ret = name
def get_type(self):
return self.func_callee.type
def code_gen(self, module, builder=None):
args = []
for a in self.args:
args.append(a.code_gen(module, builder))
print('func')
print(args)
return builder.call(self.func_callee.func, args, name="tmp")
class ReturnAst(BaseAST):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.value = None
def set_value(self, value):
self.value = value
def code_gen(self, func, builder=None):
tmp = self.value.code_gen(func, builder)
builder.ret(tmp)
class AssignmentAST(BaseAST):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.lval = None
self.rval = None
def set_lval(self, value):
self.lval = value
def set_rval(self, value):
self.rval = value
def is_valid(self):
if self.lval.get_type() == self.rval.get_type():
return True
else:
return False
def code_gen(self, module, builder=None):
rval_code = self.rval.code_gen(module, builder)
builder.store(rval_code, self.lval.var_dec.ptr)
return self.lval.var_dec.ptr
class BinaryAST(BaseAST):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.operator = None
self.lhs = None
self.rhs = None
def set_lhs(self, value):
self.lhs = value
def set_rhs(self, value):
self.rhs = value
def set_op(self, value):
self.operator = value
def is_valid(self):
if utils.is_operator(self.operator) and (self.lhs is not None) and (self.rhs is not None):
return True
else:
return False
def get_type(self):
t1 = self.lhs.get_type()
t2 = self.rhs.get_type()
if t1 == t2 and utils.is_arithmetic_operator(self.operator):
return t1
else:
return None
def code_gen(self, module, builder=None):
code_lhs = self.lhs.code_gen(module, builder)
code_rhs = self.rhs.code_gen(module, builder)
if code_lhs is None or code_rhs is None:
return None
if self.operator == 'AND':
if self.lhs.get_type() == 'INT':
return builder.and_(code_lhs, code_rhs, 'andtmp')
elif self.lhs.get_type() == 'FLOAT':
return None
elif self.operator == 'OR':
if self.lhs.get_type() == 'INT':
return builder.or_(code_lhs, code_rhs, 'ortmp')
elif self.lhs.get_type() == 'FLOAT':
return None
if self.operator == 'ADD':
if self.lhs.get_type() == 'INT':
return builder.add(code_lhs, code_rhs, 'addtmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fadd(code_lhs, code_rhs, 'addtmp')
elif self.operator == 'SUB':
if self.lhs.get_type() == 'INT':
return builder.sub(code_lhs, code_rhs, 'subtmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fsub(code_lhs, code_rhs, 'subtmp')
elif self.operator == 'DIV':
if self.lhs.get_type() == 'INT':
return builder.udiv(code_lhs, code_rhs, 'divtmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fdiv(code_lhs, code_rhs, 'divtmp')
elif self.operator == 'MUL':
if self.lhs.get_type() == 'INT':
return builder.mul(code_lhs, code_rhs, 'multmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fmul(code_lhs, code_rhs, 'multmp')
elif self.operator == 'LT':
if self.lhs.get_type() == 'INT':
return builder.icmp_signed('<', code_lhs, code_rhs, 'lttmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fcmp_ordered('<', code_lhs, code_rhs, 'lttmp')
elif self.operator == 'LE':
if self.lhs.get_type() == 'INT':
return builder.icmp_signed('<=', code_lhs, code_rhs, 'letmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fcmp_ordered('<=', code_lhs, code_rhs, 'letmp')
elif self.operator == 'GT':
if self.lhs.get_type() == 'INT':
return builder.icmp_signed('>', code_lhs, code_rhs, 'gttmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fcmp_ordered('>', code_lhs, code_rhs, 'gttmp')
elif self.operator == 'GE':
if self.lhs.get_type() == 'INT':
return builder.icmp_signed('>=', code_lhs, code_rhs, 'getmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fcmp_ordered('>=', code_lhs, code_rhs, 'getmp')
elif self.operator == 'EQ':
if self.lhs.get_type() == 'INT':
return builder.icmp_signed('==', code_lhs, code_rhs, 'eqtmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fcmp_ordered('==', code_lhs, code_rhs, 'eqtmp')
elif self.operator == 'NE':
if self.lhs.get_type() == 'INT':
return builder.icmp_signed('!=', code_lhs, code_rhs, 'netmp')
elif self.lhs.get_type() == 'FLOAT':
return builder.fcmp_ordered('!=', code_lhs, code_rhs, 'netmp')
else:
return None
class CompoundExpression(BaseAST):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.order_operations = []
def set_child(self, obj):
obj.set_parent(self)
self.order_operations.append(obj)
def get_var_def(self, name):
ops = self.order_operations.copy()
ops.reverse()
for o in ops:
if isinstance(o, AssignmentAST) and o.lval.var_dec.name == name:
return o.lval
return None
def code_gen(self, module, bb=None):
for op in self.order_operations:
op.code_gen(module, bb)
return module
class ExprIfAST(CompoundExpression):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.expression = None
self.then_body = None
self.else_body = None
def set_expression(self, expr):
self.expression = expr
def set_then(self, obj):
self.then_body = obj
def set_else(self, obj):
self.else_body = obj
def code_gen(self, module, builder=None):
expr = self.expression.code_gen(module, builder)
func = builder.basic_block.function
then_block = func.append_basic_block('then')
else_block = func.append_basic_block('else')
merge_block = func.append_basic_block('ifcond')
builder.cbranch(expr, then_block, else_block)
builder.position_at_end(then_block)
then_value = self.then_body.code_gen(module, builder)
builder.branch(merge_block)
builder.position_at_end(else_block)
else_value = self.else_body and self.else_body.code_gen(module, builder)
builder.branch(merge_block)
builder.position_at_end(merge_block)
return merge_block
class ExprWhileAST(CompoundExpression):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.expression = None
self.body = None
def set_expression(self, expr):
self.expression = expr
def set_body(self, obj):
self.body = obj
def code_gen(self, module, builder=None):
func = builder.basic_block.function
expr_block = func.append_basic_block('expr')
body_loop = func.append_basic_block('loop')
after_block = func.append_basic_block('after')
builder.branch(expr_block)
builder.position_at_end(expr_block)
expr = self.expression.code_gen(module, builder)
builder.cbranch(expr, body_loop, after_block)
expr_block = builder.basic_block
builder.position_at_end(body_loop)
body_code = self.body.code_gen(module, builder)
builder.branch(expr_block)
body_loop = builder.basic_block
builder.position_at_end(after_block)
after_block = builder.basic_block
return after_block
class ExprDoWhileAST(CompoundExpression):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.expression = None
self.body = None
def set_expression(self, expr):
self.expression = expr
def set_body(self, obj):
self.body = obj
def code_gen(self, module, builder=None):
func = builder.basic_block.function
body_loop = func.append_basic_block('loop')
expr_block = func.append_basic_block('expr_block')
before_loop = func.append_basic_block('before')
builder.branch(body_loop)
builder.position_at_end(body_loop)
body_code = self.body.code_gen(module, builder)
builder.branch(expr_block)
body_loop = builder.basic_block
builder.position_at_end(expr_block)
expr = self.expression.code_gen(module, builder)
builder.cbranch(expr, body_loop, before_loop)
expr_block = builder.basic_block
builder.position_at_end(before_loop)
before_loop = builder.basic_block
return before_loop
class ProcedureDefAST(CompoundExpression):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.name = ""
self.args = []
self.body = None
self.proc = None
def set_name(self, value):
self.name = value
def set_body(self, obj):
self.body = obj
def add_arg(self, arg):
self.args.append(arg)
self.add_child(arg.name, arg)
def code_gen(self, module, bb=None):
args_type = []
for arg in self.args:
t = get_llvm_prm_type(arg.type)
args_type.append(t)
ty_func = ir.FunctionType(VOID, args_type)
func = ir.Function(module, ty_func, self.name)
self.proc = func
for i in range(len(self.args)):
func.args[i].name = self.args[i].name
self.args[i].ptr = func.args[i]
bb = func.append_basic_block("entry")
builder = ir.IRBuilder(bb)
for op in self.order_operations:
op.code_gen(func, builder)
if builder.block.terminator is None:
builder.ret_void()
class FunctionDefAST(CompoundExpression):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.name = ""
self.args = []
self.return_value = None
self.type = None
self.body = None
self.func = None
def set_name(self, value):
self.name = value
def set_body(self, obj):
self.body = obj
def set_type(self, t):
self.type = t
def add_arg(self, arg):
self.args.append(arg)
self.add_child(arg.name, arg)
def set_return_value(self, obj):
self.return_value = obj
def code_gen(self, module, bb=None):
ret_type = get_llvm_prm_type(self.type)
args_type = [get_llvm_prm_type(arg.type) for arg in self.args]
ty_func = ir.FunctionType(ret_type, args_type)
func = ir.Function(module, ty_func, self.name)
self.func = func
for i in range(len(self.args)):
func.args[i].name = self.args[i].name
self.args[i].ptr = func.args[i]
bb = func.append_basic_block("entry")
builder = ir.IRBuilder(bb)
for op in self.order_operations:
op.code_gen(func, builder)
|
<reponame>sgallag-insta/pixray
import unittest
from util import *
class TestUtilMethods(unittest.TestCase):
#region get_file_path
def test_get_file_path_no_backslash(self):
self.assertEqual(get_file_path('/testpath', 'testfile', '.png'), '/testpath/testfile.png')
def test_get_file_path_with_backslash(self):
self.assertEqual(get_file_path('/testpath/', 'testfile', '.png'), '/testpath/testfile.png')
def test_get_file_path_filename_contains_backslash(self):
self.assertRaises(ValueError, get_file_path, '/testpath/', '\\test\\filename.png', '.png')
def test_get_file_path_filename_contains_slash(self):
self.assertRaises(ValueError, get_file_path, '/testpath/', '/test/filename.png', '.png')
def test_get_file_path_no_path(self):
self.assertEqual(get_file_path('', 'testfile', '.png'), 'testfile.png')
def test_get_file_no_filename_raises_error(self):
self.assertRaises(ValueError, get_file_path, '/testpath/', None, '.png')
def test_get_file_empty_filename_raises_error(self):
self.assertRaises(ValueError, get_file_path, '/testpath/', ' ', '.png')
def test_get_file_path_replaces_suffix(self):
self.assertEqual(get_file_path('/testpath', 'testfile.png', '.mp4'), '/testpath/testfile.mp4')
#endregion get_file_path
#region parse_unit
def test_parse_unit_valid_iterations(self):
self.assertEqual(parse_unit('200iterations', 500, 'overlay_until'), 200)
def test_parse_unit_valid_iterations_space(self):
self.assertEqual(parse_unit('200 i', 500, 'overlay_until'), 200)
def test_parse_unit_valid_percentage(self):
self.assertEqual(parse_unit('50%', 500, 'overlay_until'), 250)
def test_parse_unit_valid_percentage_space(self):
self.assertEqual(parse_unit('33 percent', 500, 'overlay_until'), 165)
def test_parse_unit_valid_invalid(self):
self.assertRaises(ValueError, parse_unit, ' percent', 500, 'overlay_until')
def test_parse_unit_none(self):
self.assertEqual(parse_unit(None, 500, 'overlay_until'), None)
def test_parse_unit_robust_format(self):
self.assertEqual(parse_unit('200 iterATions ', 500, 'overlay_until'), 200)
def test_parse_unit_default_percent(self):
self.assertEqual(parse_unit('50', 500, 'overlay_until'), 250)
def test_parse_unit_default_unit_arg(self):
self.assertEqual(parse_unit('50', 500, 'overlay_until', 'i'), 50)
def test_parse_unit_plain_integer(self):
self.assertEqual(parse_unit(50, 500, 'overlay_until', 'i'), 50)
def test_parse_unit_leading_decimal_iterations(self):
self.assertEqual(parse_unit(.6, 500, 'overlay_until', 'i'), 0)
def test_parse_unit_leading_decimal_percent(self):
self.assertEqual(parse_unit(.5, 500, 'overlay_until', 'p'), 2)
def test_parse_unit_trailing_decimal_invalid(self):
self.assertRaises(ValueError, parse_unit, '67.i', 500, 'overlay_until')
#endregion parse_unit
#region split_pipes
def test_split_pipes_value_none(self):
self.assertEqual(split_pipes(None), None)
def test_split_pipes_with_pipe(self):
self.assertEqual(split_pipes('test|another'), ['test', 'another'])
def test_split_pipes_with_empty(self):
self.assertEqual(split_pipes(''), '')
def test_split_pipes_with_one(self):
self.assertEqual(split_pipes('single'), ['single'])
#endregion split_pipes
if __name__ == '__main__':
unittest.main()
|
<filename>utils.py
import argparse
import collections
import os
import traceback
from random import shuffle
from typing import List, Optional, Dict, Union, Any
import deepspeed
import torch
from torch.utils.data import Dataset
from tqdm import tqdm
from transformers import AutoTokenizer
from transformers.trainer import Trainer, logger
from preprocessing import SentencePair
class CustomTrainer(Trainer):
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
pass
def evaluate(self, eval_dataset: Optional[Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement
the :obj:`__len__` method.
Returns:
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[str, float]]
"""
model = self.model
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
batch_size = eval_dataloader.batch_size
print("***** Running %s *****", "Evaluation")
logger.info("***** Running %s *****", "Evaluation")
logger.info(" Batch size = %d", batch_size)
model.eval()
losses = list()
true_losses = list()
for step, inputs in enumerate(tqdm(eval_dataloader)):
try:
with torch.no_grad():
inputs = self._prepare_inputs(inputs)
outputs = model(**inputs)
true_similarities = torch.nn.functional.cosine_similarity(outputs[1], outputs[2])
true_diff = torch.ones_like(true_similarities) - true_similarities
true_loss = torch.mean(true_diff).item()
N = outputs[1].size()[0]
neg = list()
for i in range(N):
xxx = torch.zeros(N - 1).to(outputs[1].device)
negative_samples_similarities_exp = [
torch.nn.functional.cosine_similarity(outputs[1][i].unsqueeze(0),
outputs[2][n].unsqueeze(0))
for n in
range(N) if n != i]
for idx in range(N - 1):
xxx[idx] = negative_samples_similarities_exp[idx]
neg.append(torch.mean(xxx).item())
true_loss1 = sum(neg) / len(neg) + true_loss
losses.append(outputs[0].mean().item())
true_losses.append(true_loss1)
except Exception:
print(traceback.print_exc())
metrics = {
"understandable_loss": sum(true_losses) / len(true_losses),
"loss": sum(losses) / len(losses)
}
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
self.log(metrics)
return metrics
class DataLoaderLaper(Dataset):
def __init__(self, sentence_list: List[SentencePair], shuffle_every_epoch=False):
self.items = sentence_list
self.shuffle_every_epoch = shuffle_every_epoch
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
if idx == 0 and self.shuffle_every_epoch:
shuffle(self.items)
return {
"source": self.items[idx].get_source(),
"target": self.items[idx].get_target()
}
def run_tensorboard():
os.system(
"tensorboard --logdir=" + os.environ.get("LOG_DIR",
"./tensorboard") + " --port=6006 --host=0.0.0.0")
def add_argument():
parser = argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=True, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-e', '--epochs', default=int(os.environ.get("EPOCHS")), type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
return args
def data_collector_deepspeed(batch_of_sentences, _tokenizer, rank):
batch_of_sentences = [SentencePair(batch_of_sentences["source"][h], batch_of_sentences["target"][h]) for h in
range(len(batch_of_sentences["source"]))]
source_batch = _tokenizer([s.get_source() for s in batch_of_sentences], add_special_tokens=True, padding=True,
return_tensors="pt")
target_batch = _tokenizer([s.get_target() for s in batch_of_sentences], add_special_tokens=True, padding=True,
return_tensors="pt")
src_in = source_batch["input_ids"].transpose(0, 1)[0:512].transpose(0, 1).to(rank),
src_attn = source_batch["attention_mask"].transpose(0, 1)[0:512].transpose(0, 1).to(rank)
tgt_in = target_batch["input_ids"].transpose(0, 1)[0:512].transpose(0, 1).detach().to(rank),
tgt_attn = target_batch["attention_mask"].transpose(0, 1)[0:512].transpose(0, 1).detach().to(rank)
return {
"x1": {
"input_ids": src_in[0],
"attention_mask": src_attn
},
"x2": {
"input_ids": tgt_in[0],
"attention_mask": tgt_attn
},
}
tokenizer = AutoTokenizer.from_pretrained(os.environ.get("PRETRAINED_MODEL_AND_TOKENIZER", "distilroberta-base"))
def data_collector_huggingface(batch_of_sentences):
global tokenizer, rank
source_batch = tokenizer([s["source"] for s in batch_of_sentences], add_special_tokens=True, padding=True,
return_tensors="pt", truncation=True, max_length=512)
target_batch = tokenizer([s["target"] for s in batch_of_sentences], add_special_tokens=True, padding=True,
return_tensors="pt", truncation=True, max_length=512)
src_in = source_batch["input_ids"] # .transpose(0, 1)[0:512].transpose(0, 1),
src_attn = source_batch["attention_mask"] # .transpose(0, 1)[0:512].transpose(0, 1)
tgt_in = target_batch["input_ids"] # .transpose(0, 1)[0:512].transpose(0, 1),
tgt_attn = target_batch["attention_mask"] # .transpose(0, 1)[0:512].transpose(0, 1)
return {
"x1": {
"input_ids": src_in,
"attention_mask": src_attn
},
"x2": {
"input_ids": tgt_in,
"attention_mask": tgt_attn
},
}
|
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
####### USAGE #########
# python rfcn-benchmark.py -i <path-to-COCO-validation-images>
from __future__ import print_function
import argparse
import os
import time
import random
import requests
import numpy as np
from PIL import Image
import tensorflow as tf
from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array
def check_for_link(value):
"""
Throws an error if the specified path is a link. os.islink returns
True for sym links. For files, we also look at the number of links in
os.stat() to determine if it's a hard link.
"""
if os.path.islink(value) or \
(os.path.isfile(value) and os.stat(value).st_nlink > 1):
raise argparse.ArgumentTypeError("{} cannot be a link.".format(value))
def check_valid_folder(value):
"""verifies filename exists and isn't a link"""
if value is not None:
if not os.path.isdir(value):
raise argparse.ArgumentTypeError("{} does not exist or is not a directory.".
format(value))
check_for_link(value)
return value
def get_random_image(image_dir):
image_path = os.path.join(image_dir, random.choice(os.listdir(image_dir)))
image = Image.open(image_path)
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)
def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10):
i = 0
total_time = 0
for _ in range(num_iteration):
i += 1
np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist()
predict_request = '{"instances" : %s}' % np_images
start_time = time.time()
requests.post(SERVER_URL, data=predict_request)
time_consume = time.time() - start_time
print('Iteration %d: %.3f sec' % (i, time_consume))
if i > warm_up_iteration:
total_time += time_consume
time_average = total_time / (num_iteration - warm_up_iteration)
print('Average time: %.3f sec' % (time_average))
print('Batch size = %d' % batch_size)
if batch_size == 1:
print('Latency: %.3f ms' % (time_average * 1000))
print('Throughput: %.3f images/sec' % (batch_size / time_average))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images_path", type=check_valid_folder, required=True,
help="Path to COCO validation directory")
args = vars(ap.parse_args())
SERVER_URL = 'http://localhost:8501/v1/models/rfcn:predict'
IMAGES_PATH = args['images_path']
print('\n SERVER_URL: {} \n IMAGES_PATH: {}'.format(SERVER_URL, IMAGES_PATH))
print('\nStarting R-FCN model benchmarking for Latency with batch_size=1, num_iteration=20, warm_up_iteration=10')
benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10)
print('\nStarting R-FCN model benchmarking for Throughput with batch_size=128, num_iteration=10, warm_up_iteration=2')
benchmark(batch_size=128, num_iteration=10, warm_up_iteration=2)
|
<filename>client/test.py
#!/usr/bin/env python2
# -*- coding: utf-8-*-
import os
import sys
import unittest
import logging
import tempfile
import shutil
import contextlib
import argparse
from mock import patch, Mock
import test_mic
import vocabcompiler
import g2p
import brain
import jasperpath
import tts
import diagnose
from stt import TranscriptionMode
DEFAULT_PROFILE = {
'prefers_email': False,
'location': '08544',
'timezone': 'US/Eastern',
'phone_number': '012344321'
}
class TestVocabCompiler(unittest.TestCase):
def testPhraseExtraction(self):
expected_phrases = ['MOCK']
mock_module = Mock()
mock_module.WORDS = ['MOCK']
with patch.object(brain.Brain, 'get_modules',
classmethod(lambda cls: [mock_module])):
extracted_phrases = vocabcompiler.get_all_phrases()
self.assertEqual(expected_phrases, extracted_phrases)
def testKeywordPhraseExtraction(self):
expected_phrases = ['MOCK']
with tempfile.TemporaryFile() as f:
# We can't use mock_open here, because it doesn't seem to work
# with the 'for line in f' syntax
f.write("MOCK\n")
f.seek(0)
with patch('%s.open' % vocabcompiler.__name__,
return_value=f, create=True):
extracted_phrases = vocabcompiler.get_keyword_phrases()
self.assertEqual(expected_phrases, extracted_phrases)
class TestVocabulary(unittest.TestCase):
VOCABULARY = vocabcompiler.DummyVocabulary
@contextlib.contextmanager
def do_in_tempdir(self):
tempdir = tempfile.mkdtemp()
yield tempdir
shutil.rmtree(tempdir)
def testVocabulary(self):
phrases = ['GOOD BAD UGLY']
with self.do_in_tempdir() as tempdir:
self.vocab = self.VOCABULARY(path=tempdir)
self.assertIsNone(self.vocab.compiled_revision)
self.assertFalse(self.vocab.is_compiled)
self.assertFalse(self.vocab.matches_phrases(phrases))
# We're now testing error handling. To avoid flooding the
# output with error messages that are catched anyway,
# we'll temporarly disable logging. Otherwise, error log
# messages and traceback would be printed so that someone
# might think that tests failed even though they succeeded.
logging.disable(logging.ERROR)
with self.assertRaises(OSError):
with patch('os.makedirs', side_effect=OSError('test')):
self.vocab.compile(phrases)
with self.assertRaises(OSError):
with patch('%s.open' % vocabcompiler.__name__,
create=True,
side_effect=OSError('test')):
self.vocab.compile(phrases)
class StrangeCompilationError(Exception):
pass
with patch.object(self.vocab, '_compile_vocabulary',
side_effect=StrangeCompilationError('test')):
with self.assertRaises(StrangeCompilationError):
self.vocab.compile(phrases)
with self.assertRaises(StrangeCompilationError):
with patch('os.remove',
side_effect=OSError('test')):
self.vocab.compile(phrases)
# Re-enable logging again
logging.disable(logging.NOTSET)
self.vocab.compile(phrases)
self.assertIsInstance(self.vocab.compiled_revision, str)
self.assertTrue(self.vocab.is_compiled)
self.assertTrue(self.vocab.matches_phrases(phrases))
self.vocab.compile(phrases)
self.vocab.compile(phrases, force=True)
class TestPocketsphinxVocabulary(TestVocabulary):
VOCABULARY = vocabcompiler.PocketsphinxVocabulary
def testVocabulary(self):
super(TestPocketsphinxVocabulary, self).testVocabulary()
self.assertIsInstance(self.vocab.decoder_kwargs, dict)
self.assertIn('lm', self.vocab.decoder_kwargs)
self.assertIn('dict', self.vocab.decoder_kwargs)
class TestPatchedPocketsphinxVocabulary(TestPocketsphinxVocabulary):
def testVocabulary(self):
def write_test_vocab(text, output_file):
with open(output_file, "w") as f:
for word in text.split(' '):
f.write("%s\n" % word)
def write_test_lm(text, output_file, **kwargs):
with open(output_file, "w") as f:
f.write("TEST")
class DummyG2P(object):
def __init__(self, *args, **kwargs):
pass
@classmethod
def get_config(self, *args, **kwargs):
return {}
def translate(self, *args, **kwargs):
return {'GOOD': ['G UH D',
'G UW D'],
'BAD': ['B AE D'],
'UGLY': ['AH G L IY']}
with patch('vocabcompiler.cmuclmtk',
create=True) as mocked_cmuclmtk:
mocked_cmuclmtk.text2vocab = write_test_vocab
mocked_cmuclmtk.text2lm = write_test_lm
with patch('vocabcompiler.PhonetisaurusG2P', DummyG2P):
super(TestPatchedPocketsphinxVocabulary,
self).testVocabulary()
class TestMic(unittest.TestCase):
def setUp(self):
self.jasper_clip = jasperpath.data('audio', 'jasper.wav')
self.time_clip = jasperpath.data('audio', 'time.wav')
from stt import PocketSphinxSTT
self.stt = PocketSphinxSTT(**PocketSphinxSTT.get_config())
def testTranscribeJasper(self):
"""
Does Jasper recognize his name (i.e., passive listen)?
"""
with open(self.jasper_clip, mode="rb") as f:
transcription = self.stt.transcribe(f,
mode=TranscriptionMode.KEYWORD)
self.assertIn("JASPER", transcription)
def testTranscribe(self):
"""
Does Jasper recognize 'time' (i.e., active listen)?
"""
with open(self.time_clip, mode="rb") as f:
transcription = self.stt.transcribe(f)
self.assertIn("TIME", transcription)
class TestG2P(unittest.TestCase):
def setUp(self):
self.g2pconverter = g2p.PhonetisaurusG2P(
**g2p.PhonetisaurusG2P.get_config())
self.words = ['GOOD', 'BAD', 'UGLY']
def testTranslateWord(self):
for word in self.words:
self.assertIn(word, self.g2pconverter.translate(word).keys())
def testTranslateWords(self):
results = self.g2pconverter.translate(self.words).keys()
for word in self.words:
self.assertIn(word, results)
class TestPatchedG2P(TestG2P):
class DummyProc(object):
def __init__(self, *args, **kwargs):
self.returncode = 0
def communicate(self):
return ("GOOD\t9.20477\t<s> G UH D </s>\n" +
"GOOD\t14.4036\t<s> G UW D </s>\n" +
"GOOD\t16.0258\t<s> G UH D IY </s>\n" +
"BAD\t0.7416\t<s> B AE D </s>\n" +
"BAD\t12.5495\t<s> B AA D </s>\n" +
"BAD\t13.6745\t<s> B AH D </s>\n" +
"UGLY\t12.572\t<s> AH G L IY </s>\n" +
"UGLY\t17.9278\t<s> Y UW G L IY </s>\n" +
"UGLY\t18.9617\t<s> AH G L AY </s>\n", "")
def setUp(self):
with patch('g2p.diagnose.check_executable',
return_value=True):
with tempfile.NamedTemporaryFile() as f:
conf = g2p.PhonetisaurusG2P.get_config().items()
with patch.object(g2p.PhonetisaurusG2P, 'get_config',
classmethod(lambda cls: dict(
conf + [('fst_model', f.name)]))):
super(self.__class__, self).setUp()
def testTranslateWord(self):
with patch('subprocess.Popen',
return_value=TestPatchedG2P.DummyProc()):
super(self.__class__, self).testTranslateWord()
def testTranslateWords(self):
with patch('subprocess.Popen',
return_value=TestPatchedG2P.DummyProc()):
super(self.__class__, self).testTranslateWords()
class TestDiagnose(unittest.TestCase):
def testPythonImportCheck(self):
# This a python stdlib module that definitely exists
self.assertTrue(diagnose.check_python_import("os"))
# I sincerly hope nobody will ever create a package with that name
self.assertFalse(diagnose.check_python_import("nonexistant_package"))
class TestModules(unittest.TestCase):
def setUp(self):
self.profile = DEFAULT_PROFILE
self.send = False
def runConversation(self, query, inputs, module):
"""Generic method for spoofing conversation.
Arguments:
query -- The initial input to the server.
inputs -- Additional input, if conversation is extended.
Returns:
The server's responses, in a list.
"""
self.assertTrue(module.isValid(query))
mic = test_mic.Mic(inputs)
module.handle(query, mic, self.profile)
return mic.outputs
def testLife(self):
from modules import Life
query = "What is the meaning of life?"
inputs = []
outputs = self.runConversation(query, inputs, Life)
self.assertEqual(len(outputs), 1)
self.assertTrue("42" in outputs[0])
def testJoke(self):
from modules import Joke
query = "Tell me a joke."
inputs = ["Who's there?", "Random response"]
outputs = self.runConversation(query, inputs, Joke)
self.assertEqual(len(outputs), 3)
allJokes = open(jasperpath.data('text', 'JOKES.txt'), 'r').read()
self.assertTrue(outputs[2] in allJokes)
def testTime(self):
from modules import Time
query = "What time is it?"
inputs = []
self.runConversation(query, inputs, Time)
@unittest.skipIf(not diagnose.check_network_connection(),
"No internet connection")
def testGmail(self):
key = 'gmail_password'
if key not in self.profile or not self.profile[key]:
return
from modules import Gmail
query = "Check my email"
inputs = []
self.runConversation(query, inputs, Gmail)
@unittest.skipIf(not diagnose.check_network_connection(),
"No internet connection")
def testHN(self):
from modules import HN
query = "find me some of the top hacker news stories"
if self.send:
inputs = ["the first and third"]
else:
inputs = ["no"]
outputs = self.runConversation(query, inputs, HN)
self.assertTrue("front-page articles" in outputs[1])
@unittest.skipIf(not diagnose.check_network_connection(),
"No internet connection")
def testNews(self):
from modules import News
query = "find me some of the top news stories"
if self.send:
inputs = ["the first"]
else:
inputs = ["no"]
outputs = self.runConversation(query, inputs, News)
self.assertTrue("top headlines" in outputs[1])
@unittest.skipIf(not diagnose.check_network_connection(),
"No internet connection")
def testWeather(self):
from modules import Weather
query = "what's the weather like tomorrow"
inputs = []
outputs = self.runConversation(query, inputs, Weather)
self.assertTrue(
"can't see that far ahead" in outputs[0]
or "Tomorrow" in outputs[0])
class TestTTS(unittest.TestCase):
def testTTS(self):
tts_engine = tts.get_engine_by_slug('dummy-tts')
tts_instance = tts_engine()
tts_instance.say('This is a test.')
class TestBrain(unittest.TestCase):
@staticmethod
def _emptyBrain():
mic = test_mic.Mic([])
profile = DEFAULT_PROFILE
return brain.Brain(mic, profile)
def testLog(self):
"""Does Brain correctly log errors when raised by modules?"""
my_brain = TestBrain._emptyBrain()
unclear = my_brain.modules[-1]
with patch.object(unclear, 'handle') as mocked_handle:
with patch.object(my_brain._logger, 'error') as mocked_loggingcall:
mocked_handle.side_effect = KeyError('foo')
my_brain.query("zzz gibberish zzz")
self.assertTrue(mocked_loggingcall.called)
def testSortByPriority(self):
"""Does Brain sort modules by priority?"""
my_brain = TestBrain._emptyBrain()
priorities = filter(lambda m: hasattr(m, 'PRIORITY'), my_brain.modules)
target = sorted(priorities, key=lambda m: m.PRIORITY, reverse=True)
self.assertEqual(target, priorities)
def testPriority(self):
"""Does Brain correctly send query to higher-priority module?"""
my_brain = TestBrain._emptyBrain()
hn_module = 'HN'
hn = filter(lambda m: m.__name__ == hn_module, my_brain.modules)[0]
with patch.object(hn, 'handle') as mocked_handle:
my_brain.query(["hacker news"])
self.assertTrue(mocked_handle.called)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Test suite for the Jasper client code.')
parser.add_argument('--light', action='store_true',
help='runs a subset of the tests (only requires ' +
'Python dependencies)')
parser.add_argument('--debug', action='store_true',
help='show debug messages')
args = parser.parse_args()
logging.basicConfig()
logger = logging.getLogger()
if args.debug:
logger.setLevel(logging.DEBUG)
# Change CWD to jasperpath.LIB_PATH
os.chdir(jasperpath.LIB_PATH)
test_cases = [TestBrain, TestModules, TestDiagnose, TestTTS,
TestVocabCompiler, TestVocabulary]
if args.light:
test_cases.append(TestPatchedG2P)
test_cases.append(TestPatchedPocketsphinxVocabulary)
else:
test_cases.append(TestG2P)
test_cases.append(TestPocketsphinxVocabulary)
test_cases.append(TestMic)
suite = unittest.TestSuite()
for test_case in test_cases:
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(test_case))
result = unittest.TextTestRunner(verbosity=2).run(suite)
if not result.wasSuccessful():
sys.exit("Tests failed")
|
<reponame>TTOFFLINE-LEAK/ttoffline
from SCElement import SCElement
from SCObject import SCObject
from SCMenu import SCMenu
from direct.fsm.StatePush import StateVar, FunctionCall
from direct.showbase.DirectObject import DirectObject
from otp.avatar import Emote
SCTerminalSelectedEvent = 'SCTerminalSelected'
SCTerminalLinkedEmoteEvent = 'SCTerminalLinkedEmoteEvent'
SCWhisperModeChangeEvent = 'SCWhisperModeChange'
class SCTerminal(SCElement):
def __init__(self, linkedEmote=None):
SCElement.__init__(self)
self.setLinkedEmote(linkedEmote)
scGui = loader.loadModel(SCMenu.GuiModelName)
self.emotionIcon = scGui.find('**/emotionIcon')
self.setDisabled(False)
self.__numCharges = -1
self._handleWhisperModeSV = StateVar(False)
self._handleWhisperModeFC = None
return
def destroy(self):
self._handleWhisperModeSV.set(False)
if self._handleWhisperModeFC:
self._handleWhisperModeFC.destroy()
self._handleWhisperModeSV.destroy()
SCElement.destroy(self)
def privSetSettingsRef(self, settingsRef):
SCElement.privSetSettingsRef(self, settingsRef)
if self._handleWhisperModeFC is None:
self._handleWhisperModeFC = FunctionCall(self._handleWhisperModeSVChanged, self._handleWhisperModeSV)
self._handleWhisperModeFC.pushCurrentState()
self._handleWhisperModeSV.set(self.settingsRef is not None and not self.isWhisperable())
return
def _handleWhisperModeSVChanged(self, handleWhisperMode):
if handleWhisperMode:
self._wmcListener = DirectObject()
self._wmcListener.accept(self.getEventName(SCWhisperModeChangeEvent), self._handleWhisperModeChange)
elif hasattr(self, '_wmcListener'):
self._wmcListener.ignoreAll()
del self._wmcListener
self.invalidate()
def _handleWhisperModeChange(self, whisperMode):
self.invalidate()
def handleSelect(self, displayType=0):
messenger.send(self.getEventName(SCTerminalSelectedEvent))
if self.hasLinkedEmote() and self.linkedEmoteEnabled():
messenger.send(self.getEventName(SCTerminalLinkedEmoteEvent), [self.linkedEmote, displayType])
def isWhisperable(self):
return True
def getLinkedEmote(self):
return self.linkedEmote
def setLinkedEmote(self, linkedEmote):
self.linkedEmote = linkedEmote
self.invalidate()
def hasLinkedEmote(self):
return self.linkedEmote is not None
def linkedEmoteEnabled(self):
if Emote.globalEmote:
return Emote.globalEmote.isEnabled(self.linkedEmote)
def getCharges(self):
return self.__numCharges
def setCharges(self, nCharges):
self.__numCharges = nCharges
if nCharges is 0:
self.setDisabled(True)
def isDisabled(self):
return self.__disabled or self.isWhispering() and not self.isWhisperable()
def setDisabled(self, bDisabled):
self.__disabled = bDisabled
def onMouseClick(self, displayType, event):
if not self.isDisabled():
SCElement.onMouseClick(self, displayType, event)
self.handleSelect(displayType)
def getMinDimensions(self):
width, height = SCElement.getMinDimensions(self)
if self.hasLinkedEmote():
width += 1.3
return (width, height)
def finalize(self, dbArgs={}):
if not self.isDirty():
return
else:
args = {}
if self.hasLinkedEmote():
self.lastEmoteIconColor = self.getEmoteIconColor()
self.emotionIcon.setColorScale(*self.lastEmoteIconColor)
args.update({'image': self.emotionIcon, 'image_pos': (
self.width - 0.6, 0, -self.height * 0.5)})
if self.isDisabled():
args.update({'rolloverColor': (0, 0, 0, 0), 'pressedColor': (0, 0, 0, 0),
'rolloverSound': None,
'clickSound': None,
'text_fg': self.getColorScheme().getTextDisabledColor() + (1, )})
args.update(dbArgs)
SCElement.finalize(self, dbArgs=args)
return
def getEmoteIconColor(self):
if self.linkedEmoteEnabled() and not self.isWhispering():
r, g, b = self.getColorScheme().getEmoteIconColor()
else:
r, g, b = self.getColorScheme().getEmoteIconDisabledColor()
return (r,
g,
b,
1)
def updateEmoteIcon(self):
if hasattr(self, 'button'):
self.lastEmoteIconColor = self.getEmoteIconColor()
for i in xrange(self.button['numStates']):
self.button[('image%s_image' % i)].setColorScale(*self.lastEmoteIconColor)
else:
self.invalidate()
def enterVisible(self):
SCElement.enterVisible(self)
if hasattr(self, 'lastEmoteIconColor'):
if self.getEmoteIconColor() != self.lastEmoteIconColor:
self.invalidate()
def handleWhisperModeChange(whisperMode, self=self):
if self.hasLinkedEmote():
if self.isVisible() and not self.isWhispering():
self.updateEmoteIcon()
self.accept(self.getEventName(SCWhisperModeChangeEvent), handleWhisperModeChange)
def handleEmoteEnableStateChange(self=self):
if self.hasLinkedEmote():
if self.isVisible() and not self.isWhispering():
self.updateEmoteIcon()
if self.hasLinkedEmote():
if Emote.globalEmote:
self.accept(Emote.globalEmote.EmoteEnableStateChanged, handleEmoteEnableStateChange)
def exitVisible(self):
SCElement.exitVisible(self)
self.ignore(self.getEventName(SCWhisperModeChangeEvent))
if Emote.globalEmote:
self.ignore(Emote.globalEmote.EmoteEnableStateChanged)
def getDisplayText(self):
if self.getCharges() is not -1:
return self.text + ' (%s)' % self.getCharges()
else:
return self.text |
# -*- coding: utf-8 -*-
"""
This script makes plots of relevant data.
@author: <NAME>
"""
import yaml
import os
import pandas as pd
import energyscope as es
import numpy as np
import matplotlib.pyplot as plt
from sys import platform
from energyscope.utils import make_dir, load_config, get_FEC_from_sankey
from energyscope.postprocessing import get_total_einv
def compute_einv_res(cs: str, all_data: dict):
"""
Compute the Einv by RESOURCES part (Einv_op).
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the RESOURCES list
RESOURCES = list(all_data['Resources'].index)
return df_einv.loc[RESOURCES].copy()['Einv_op']
def compute_einv_tech(cs: str, all_data: dict):
"""
Compute the Einv by TECHNOLOGIES part (Einv_const).
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the TECHNOLOGIES list
TECHNOLOGIES = list(all_data['Technologies'].index)
return df_einv.loc[TECHNOLOGIES].copy()['Einv_constr']
def retrieve_einv_const_by_categories(range_val, all_data: dict, dir: str, user_data: str):
"""
Retrieve the Einv_const values for all case studies classed by categories of technologies.
:param range_val: range of GWP constrained values.
:param all_data: the data into a dict of pd.DataFrames.
:param dir: case study path and name.
:param user_data: user_data directory.
:return: dict with keys being the categories of technologies. For each catagory, a pd.DataFrame with Einv_const values for all scenarios.
"""
# Retrieve all Einv_const values for all case studies
einv_tech = []
for run in ['run_' + str(i) for i in range_val]:
cs_temp = dir + '/' + run
einv_tech.append(compute_einv_tech(cs=cs_temp, all_data=all_data))
df_einv_tech = pd.concat(einv_tech, axis=1)
df_einv_tech.columns = [i for i in range_val]
# Retrieve the technologies categories:
df_aux_tech = pd.read_csv(user_data + "/aux_technologies.csv", index_col=0)
# tech_cat = ['Electricity', 'Heat', 'Mobility', 'Infrastructure', 'Synthetic fuels', 'Storage']
tech_cat = list(df_aux_tech['Category'].values)
tech_cat = list(dict.fromkeys(tech_cat)) # remove duplicate
# Class the technologies by categories into a dict
tech_by_cat = dict()
for cat in tech_cat:
tech_by_cat[cat] = list(df_aux_tech['Category'][df_aux_tech['Category'] == cat].index)
# Retrieve the values of Einv_const per category of technology (and remove tech where Einv_const is always 0)
tech_classed_by_cat = dict()
for cat in tech_by_cat.keys():
tech_classed_by_cat[cat] = retrieve_non_zero_val(df=df_einv_tech.loc[tech_by_cat[cat]].transpose()) /1000 # TWh
return tech_classed_by_cat
def compute_einv_details(cs: str, user_data: str, all_data: dict):
"""
Compute the Einv by RESOURCES and TECHNOLOGIES, it details the breakdown by subcategories of RESOURCES and categories of TECHNOLOGIES.
:param cs: case study path
:param user_data: user_data directory
:param all_data: the data into a dict of pd.DataFrames.
:return: the data into pd.DataFrames
"""
# Load Einv data
df_einv = pd.read_csv(f"{cs}/output/einv_breakdown.csv", index_col=0)
# Define the RESOURCES and TECHNOLOGIES lists
RESOURCES = list(all_data['Resources'].index)
TECHNOLOGIES = list(all_data['Technologies'].index)
df_inv_res = df_einv.loc[RESOURCES].copy()
df_inv_tech = df_einv.loc[TECHNOLOGIES].copy()
# Get the category and subcategory indexes
df_aux_res = pd.read_csv(user_data + "/aux_resources.csv", index_col=0)
df_aux_tech = pd.read_csv(user_data + "/aux_technologies.csv", index_col=0)
# 1. Compute the Einv by subcategory of resources
res_subcat = list(df_aux_res['Subcategory'].values)
res_subcat = list(dict.fromkeys(res_subcat)) # remove duplicate
res_by_subcat = dict()
for sub_cat in res_subcat:
res_by_subcat[sub_cat] = list(df_aux_res['Subcategory'][df_aux_res['Subcategory'] == sub_cat].index)
einv_res_by_subcat = dict()
for sub_cat in res_by_subcat.keys():
einv_res_by_subcat[sub_cat] = df_inv_res.loc[res_by_subcat[sub_cat]]
df_inv_res_by_subcat = pd.DataFrame(
data=[einv_res_by_subcat[sub_cat].sum().sum() for sub_cat in einv_res_by_subcat.keys()],
index=einv_res_by_subcat.keys(), columns=['RESSOURCES'])
# 2. Compute the Einv by category of technologies
tech_cat = list(df_aux_tech['Category'].values)
tech_cat = list(dict.fromkeys(tech_cat)) # remove duplicate
tech_by_cat = dict()
for cat in tech_cat:
tech_by_cat[cat] = list(df_aux_tech['Category'][df_aux_tech['Category'] == cat].index)
einv_tech_by_cat = dict()
for cat in tech_by_cat.keys():
einv_tech_by_cat[cat] = df_inv_tech.loc[tech_by_cat[cat]]
df_inv_tech_by_cat = pd.DataFrame(data=[einv_tech_by_cat[cat].sum().sum() for cat in einv_tech_by_cat.keys()],
index=einv_tech_by_cat.keys(), columns=['TECHNOLOGIES'])
return df_inv_res_by_subcat, df_inv_tech_by_cat
def compute_primary_energy(cs: str, user_data: str, run: str, all_data: dict):
"""
Compute the primary energy for a given case study.
:param cs: case study path.
:param user_data: user_data directory
:param run: run name.
:return: the data into pd.DataFrames.
"""
# load year_balance.csv
df_y_balance = pd.read_csv(f"{cs}/output/year_balance.csv", index_col=0)
# list the ressources
RESOURCES = list(all_data['Resources'][all_data['Resources']['Category'] != 'Others'].index) # remove ressources related to CO2
RESOURCES.remove('CO2_EMISSIONS')
# select primary energy from the year_balance.csv into a pd.DataFrame
# df_temp = df_y_balance.loc[RESOURCES].sum().loc[['ELECTRICITY', 'GASOLINE', 'DIESEL', 'LFO', 'GAS', 'WOOD',
# 'WET_BIOMASS', 'COAL', 'URANIUM', 'WASTE', 'H2', 'AMMONIA',
# 'METHANOL',
# 'RES_WIND', 'RES_SOLAR', 'RES_HYDRO', 'RES_GEO']] / 1000 # TWh
df_temp = df_y_balance.loc[RESOURCES].sum(axis=1) / 1000 # TWh
df_primary_energy = pd.DataFrame(data=df_temp.values, index=df_temp.index, columns=['RESSOURCES'])
# Label each ressource by its subcategory: ['Other non-renewable', 'Fossil fuel', 'Biomass', 'Non-biomass']
df_primary_energy['Subcategory'] = ''
df_aux_res = pd.read_csv(user_data + "/aux_resources.csv", index_col=0)
for ind in df_primary_energy.index:
df_primary_energy['Subcategory'].loc[ind] = df_aux_res.loc[ind]['Subcategory']
# List of the subcategories into a list
res_subcat = list(df_primary_energy['Subcategory'].values)
res_subcat = list(dict.fromkeys(res_subcat)) # remove duplicate
# aggregate the primary energy by subcategory
primary_dict = dict()
for subcat in res_subcat:
primary_dict[subcat] = df_primary_energy[df_primary_energy['Subcategory'] == subcat]['RESSOURCES'].sum()
return pd.DataFrame(data=primary_dict.values(), index=primary_dict.keys(), columns=[run]), df_primary_energy.sort_values(by=['Subcategory'])
def fec_given_tech(tech: str, data: pd.DataFrame, prod_corr:float):
"""
Compute the FEC related to a given EUD and TECHNO.
:param tech: technology type to satisfy this EUD type such as IND_COGEN_GAS if EUD = HEAT_HIGH_T
:param data: dataframe with the year_balance.csv
return: FEC value
"""
# get the inputs for a given technology: electricity, gas, H2, etc.
inputs_tech = data.loc[tech][data.loc[tech] < 0].copy()
# get the outputs for a given technology: electricity, heat high T, heat low T FHN, etc.
outputs_tech = data.loc[tech][data.loc[tech] > 0].copy()
if outputs_tech.sum() == 0:
return
else:
# remove C02 emissions
outputs_labels = list(outputs_tech.index)
for lab in ['CO2_ATM', 'CO2_INDUSTRY', 'CO2_CAPTURED']:
if lab in outputs_labels:
outputs_tech = outputs_tech.drop([lab], axis=0)
# Ex: eud = 'HEAT_HIGH_T' and tech = 'IND_COGEN_GAS'
# IND_COGEN_GAS inputs: gas with 2.1739
# IND_COGEN_GAS outputs: electricity with 0.9565 and HEAT_HIGH_T with 1
# -> FEC = (1 * (1+0.9565)) * (2.1739)
# Warning a technology may have several inputs such as CAR_PHEV with 0.1376 of ELECTRICITY and 0.1087 of GASOLINE for 1 of MOB_PRIVATE
return (prod_corr / outputs_tech.sum()) * (-inputs_tech.sum())
def compute_fec(data: pd.DataFrame, user_data:str):
"""
Compute the system FEC for a given simulation in GWh.
:param data: year_balance.csv
:return FEC detailed by EUD and technologies into fec_details dict, and FEC aggregated by EUD into fec_tot dict.
Assumption: FEC ELECTRICITY = EUF ELECTRICITY
See the FEC computation details for a given EUD in the function fec_given_tech(eud=eud, tech=tech, data=data)
"""
EUD_types = ['HEAT_HIGH_T', 'HEAT_LOW_T_DHN', 'HEAT_LOW_T_DECEN', 'MOB_PUBLIC', 'MOB_PRIVATE', 'MOB_FREIGHT_RAIL',
'MOB_FREIGHT_BOAT', 'MOB_FREIGHT_ROAD', 'HVC', 'AMMONIA', 'METHANOL']
df_aux_res = pd.read_csv(user_data + "/aux_resources.csv", index_col=0)
RESOURCES = list(df_aux_res.index)
fec_details = dict()
fec_tot = dict()
prod_tech_EUD = dict()
for eud in EUD_types:
fec_EUD = []
# list of tech that produced this eud
prod_tech_EUD[eud] = data[eud].drop(index=['END_USES_DEMAND'])[data[eud] > 0]
prod_sum = prod_tech_EUD[eud].sum()
# total consumption of this energy
conso_sum= -data[eud].drop(index=['END_USES_DEMAND'])[data[eud] < 0].sum()
# Note: conso_eud + eud = prod_sum
# We calculate the FEC of the eud and not of conso_eud + eud! -> a correction factor is required
for tech in list(prod_tech_EUD[eud].index):
# correction factor to calculate the FEC corresponding at the consumption of the eud
corr_factor = prod_tech_EUD[eud][tech] / prod_sum
prod_corr = prod_tech_EUD[eud][tech] - conso_sum * corr_factor
if tech not in RESOURCES:
fec_tech_corr = fec_given_tech(tech=tech, data=data, prod_corr=prod_corr)
# fec_tech = fec_given_tech(tech=tech, data=data, prod_corr=prod_tech_EUD[eud][tech])
else:
fec_tech_corr = prod_corr
# fec_tech = prod_tech_EUD[eud][tech]
# print('%s %s %.1f %.1f %.1f' %(eud, tech, fec_tech, fec_tech_corr, corr_factor))
fec_EUD.append([tech, fec_tech_corr])
fec_details[eud] = pd.DataFrame(fec_EUD)
fec_tot[eud] = pd.DataFrame(fec_EUD)[1].sum()
fec_details['ELECTRICITY'] = data['ELECTRICITY'].loc['END_USES_DEMAND']
fec_tot['ELECTRICITY'] = data['ELECTRICITY'].loc['END_USES_DEMAND']
return fec_details, fec_tot
def eroi_computation(dir: str, user_data: str, range_val):
"""
EROI, Einv, and FEC computation for several case studies.
:param dir: directory to the case studies.
:param range: GWP_ini values.
:return: results into pd.DataFrame.
"""
fec_tot_list = []
eroi_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
df_year_balance = pd.read_csv(dir_temp + "/output/year_balance.csv", index_col=0)
fec_details, fec_tot = compute_fec(data=df_year_balance, user_data=user_data)
fec_temp = sum(fec_tot.values())
einv_temp = get_total_einv(dir_temp)
eroi_temp = fec_temp / einv_temp
fec_tot_list.append(pd.DataFrame(data=fec_tot.values(), index=fec_tot.keys(), columns=[run]))
eroi_list.append([eroi_temp, fec_temp / 1000, einv_temp / 1000])
df_fec_details = pd.concat(fec_tot_list, axis=1) / 1000 # TWh
df_fec_details.columns = [i for i in range_val]
df_eroi = pd.DataFrame(data=np.asarray(eroi_list), index=[i for i in range_val], columns=['EROI', 'FEC', 'Einv'])
return df_eroi, df_fec_details
def res_details(range_val, all_data: dict, dir: str, user_data: str):
"""
Compute the Einv and primary energy details.
:param range_val: range of GWP constrained values.
:param all_data: the data into a dict of pd.DataFrames.
:param dir: case study path and name.
:param user_data: user_data directory.
:return: Einv and primary energy results in pd.DataFrames.
"""
Einv_Res_cat_list = []
Einv_Tech_cat_list = []
Einv_res_list = []
EI_by_cat_list = []
EI_list = []
for run in ['run_' + str(i) for i in range_val]:
cs_temp = dir + '/' + run
# Compute the Einv details divided into resources and technologies by categories
df_Einv_RES_cat_temp, df_Einv_TECH_cat_temp = compute_einv_details(cs=cs_temp,
user_data=user_data,
all_data=all_data)
Einv_Res_cat_list.append(df_Einv_RES_cat_temp)
Einv_Tech_cat_list.append(df_Einv_TECH_cat_temp)
# Einv_op only
Einv_res_list.append(compute_einv_res(cs=cs_temp, all_data=all_data))
# Compute the primary energy
df_EI_cat_temp, df_EI_temp = compute_primary_energy(cs=cs_temp, user_data=user_data, run=run, all_data=all_data)
EI_by_cat_list.append(df_EI_cat_temp)
EI_list.append(df_EI_temp.drop(columns=['Subcategory']))
cols = [i for i in range_val]
df_Einv_op = pd.concat(Einv_res_list, axis=1) / 1000 # TWh
df_Einv_op.columns = cols
df_EI = pd.concat(EI_list, axis=1)
df_EI.columns = cols
df_EI['Subcategory'] = df_EI_temp['Subcategory'].copy()
df_Einv_RES_cat = pd.concat(Einv_Res_cat_list, axis=1) / 1000 # TWh
df_Einv_RES_cat.columns = cols
df_Einv_tech_cat = pd.concat(Einv_Tech_cat_list, axis=1) / 1000 # TWh
df_Einv_tech_cat.columns = cols
df_EI_cat = pd.concat(EI_by_cat_list, axis=1)
df_EI_cat.columns = cols
return df_Einv_op, df_Einv_RES_cat, df_Einv_tech_cat, df_EI_cat, df_EI
def get_gwp(cs: str):
"""
Get the GWP from gwp_breakdown.csv.
:param cs: directory name.
:return GWP value.
"""
gwp = pd.read_csv(f"{cs}/output/gwp_breakdown.csv", index_col=0, sep=',')
return gwp.sum()
def get_cost(cs: str):
"""
Get the cost from cost_breakdown.csv.
:param cs: directory name.
:return cost values breakdown between C_inv, C_maint, and C_op.
"""
cost = pd.read_csv(f"{cs}/output/cost_breakdown.csv", index_col=0, sep=',')
return cost.sum()
def gwp_computation(dir: str, range_val):
"""
GWP computation for several case studies.
:param dir: directory to the case studies.
:param range: GWP_ini values.
:return: GWP in MtC02eq/y
"""
GWP_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
GWP_val = get_gwp(cs=dir_temp)
GWP_list.append([GWP_val['GWP_constr'], GWP_val['GWP_op']])
return pd.DataFrame(data=np.asarray(GWP_list)/1000, index=[i for i in range_val], columns=['GWP_cons', 'GWP_op'])
def cost_computation(dir: str, range_val):
"""
Cost computation for several case studies.
:param dir: directory to the case studies.
:param range: GWP_ini values.
:return: Cost in bEUR/y
"""
cost_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
cost_val = get_cost(cs=dir_temp)
cost_list.append([cost_val['C_inv'], cost_val['C_maint'], cost_val['C_op']])
return pd.DataFrame(data=np.asarray(cost_list)/1000, index=[i for i in range_val], columns=['C_inv', 'C_maint', 'C_op'])
def gwp_breakdown(dir: str, range_val):
"""
GWP breakdown for several scenarios.
:param dir: directory to the case studies.
:param range_val: scenario values.
:return: GWP_const and GWP_op into pd.DataFrame
"""
gwp_const_list = []
gwp_op_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
gwp = pd.read_csv(f"{dir_temp}/output/gwp_breakdown.csv", index_col=0, sep=',')
gwp_const_list.append(gwp['GWP_constr'])
gwp_op_list.append(gwp['GWP_op'])
df_gwp_const = pd.concat(gwp_const_list, axis=1)
df_gwp_const.columns = [i for i in range_val]
df_gwp_op = pd.concat(gwp_op_list, axis=1)
df_gwp_op.columns = [i for i in range_val]
return df_gwp_const / 1000, df_gwp_op / 1000 # MtC02/y
def cost_breakdown(dir: str, range_val):
"""
Cost breakdown for several scenarios.
:param dir: directory to the case studies.
:param range_val: scenario values.
:return: GWP_const and GWP_op into pd.DataFrame
"""
cost_inv_list = []
cost_maint_list = []
cost_op_list = []
for run in ['run_' + str(i) for i in range_val]:
dir_temp = dir + '/' + run
gwp = pd.read_csv(f"{dir_temp}/output/cost_breakdown.csv", index_col=0, sep=',')
cost_inv_list.append(gwp['C_inv'])
cost_maint_list.append(gwp['C_maint'])
cost_op_list.append(gwp['C_op'])
df_cost_inv = pd.concat(cost_inv_list, axis=1)
df_cost_inv.columns = [i for i in range_val]
df_cost_maint = pd.concat(cost_maint_list, axis=1)
df_cost_maint.columns = [i for i in range_val]
df_cost_op = pd.concat(cost_op_list, axis=1)
df_cost_op.columns = [i for i in range_val]
return df_cost_inv / 1000, df_cost_maint / 1000 , df_cost_op / 1000 # bEUR/y
def gwp_const_per_category(df_gwp_const: pd.DataFrame, user_data: str):
"""
Build a dict with technology categories as keys.
In each category a pd.DataFrame lists the GWP_const of the corresponding technologies for several scenarios.
:param df_gwp_const: GWP_const raw data for several scenarios.
:param user_data: path to user_data.
:return: dict.
"""
df_aux_tech = pd.read_csv(user_data + "/aux_technologies.csv", index_col=0)
# Retrieve the list subcategory of technologies
tech_subcategory_list = list(dict.fromkeys(list(df_aux_tech['Subcategory'])))
tech_by_subcategory = dict()
for cat in tech_subcategory_list:
tech_by_subcategory[cat] = list(df_aux_tech[df_aux_tech['Subcategory'] == cat].index)
# Select per technology category the GWP_const
gwp_const_by_tech_cat = dict()
for cat in tech_by_subcategory.keys():
temp_list = []
for tech in tech_by_subcategory[cat]:
if tech in list(df_gwp_const.columns):
temp_list.append(df_gwp_const[tech])
if len(temp_list) > 0:
gwp_const_by_tech_cat[cat] = pd.concat(temp_list, axis=1)
else:
gwp_const_by_tech_cat[cat] = None
return gwp_const_by_tech_cat
def retrieve_non_zero_val(df: pd.DataFrame):
"""
Retrieve columns of a DataFrame with 0 values for all rows.
:param df: DataFrame of shape (n_scenarios, n_cols).
:return: DataFrame of shape (n_scenarios, n_cols_new) with n_cols_new <= n_cols.
"""
return df.loc[:, (df != 0).any(axis=0)].copy()
def res_assets_capacity(range_val, dir: str):
"""
Retrieve the asset installed capacities.
:param range_val: range of GWP constrained values.
:param dir: case study path and name.
:return: Asset installed capacities into a pd.DataFrame.
"""
assets_list = []
for run in ['run_' + str(i) for i in range_val]:
df_asset_temp = pd.read_csv(dir + '/' + run + "/output/assets.csv", index_col=0)
assets_list.append(df_asset_temp['f'])
df_assets = pd.concat(assets_list, axis=1)
df_assets.index.name = ''
df_assets.columns = [i for i in range_val]
return df_assets.drop(index='UNITS').astype(float)
if __name__ == '__main__':
# Load configuration into a dict
config = load_config(config_fn='config.yaml')
# Loading data
all_data = es.import_data(user_data_dir=config['user_data'], developer_data_dir=config['developer_data'])
# Modify the minimum capacities of some technologies
for tech in config['Technologies']['f_min']:
all_data['Technologies']['f_min'].loc[tech] = config['Technologies']['f_min'][tech]
GWP_tot = True
if GWP_tot:
dir_name = 're_be_GWP_tot'
else:
dir_name = 're_be_GWP_op'
# Read case study name
run = 'run_100'
cs_test = f"{config['case_studies_dir']}/{dir_name + '_0/' + run}"
# Compute the FEC from the year_balance.csv
df_year_balance = pd.read_csv(f"{cs_test}/output/year_balance.csv", index_col=0)
fec_details, fec_tot = compute_fec(data=df_year_balance, user_data=config['user_data'])
fec_tot_val = sum(fec_tot.values()) / 1000 # TWh
# Compute the FEC from SANKEY
ef = get_FEC_from_sankey(case_study_dir=cs_test, col=run)
fec_sankey = ef.sum()
einv = get_total_einv(cs_test) / 1000 # TWh
print('FEC SANKEY %.2f vs year_balance %.2f [TWh/y]' % (fec_sankey, fec_tot_val))
print('EROI %.2f %.2f' % (fec_sankey / einv, fec_tot_val / einv))
GWP_val = get_gwp(cs=cs_test)
print('GWP_cons %.1f GWP_op %.1f [ktC02/y]' %(GWP_val['GWP_constr'], GWP_val['GWP_op']))
# Compute Einv by ressources and technologies
df_inv_res_by_subcat, df_inv_tech_by_cat = compute_einv_details(cs=cs_test, user_data=config['user_data'], all_data=all_data)
# Primary Energy by subcategory
df_primary_energy_subcat, df_primary_energy = compute_primary_energy(cs=cs_test, user_data=config['user_data'], run=run, all_data=all_data) |
# This file is modified from the screenutils Python module
# https://pypi.org/project/screenutils/
# https://github.com/Christophe31/screenutils
# -*- coding:utf-8 -*-
#
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the GNU Public License 2 or upper.
# Please ask if you wish a more permissive license.
try:
from commands import getoutput
except Exception:
from subprocess import getoutput
from os import system
from time import sleep
class ScreenNotFoundError(Exception):
"""Raised when the screen does not exists."""
def __init__(self, message, screen_name):
message += " Screen \"{0}\" not found".format(screen_name)
self.screen_name = screen_name
super(ScreenNotFoundError, self).__init__(message)
def list_screens():
"""List all the existing screens and build a Screen instance for each."""
list_cmd = "screen -ls"
return [
Screen(".".join(l.split(".")[1:]).split("\t")[0])
for l in getoutput(list_cmd).split('\n')
if "\t" in l and ".".join(l.split(".")[1:]).split("\t")[0]
]
class Screen(object):
"""Represents a gnu-screen object.
>>> s=Screen("screenName", initialize=True)
>>> s.name
'screenName'
>>> s.exists
True
>>> s.state
>>> s.send_commands("man -k keyboard")
>>> s.kill()
>>> s.exists
False
"""
def __init__(self, name, initialize=False):
self.name = name
self._id = None
self._status = None
if initialize:
self.initialize()
@property
def id(self):
"""Return the identifier of the screen as string."""
if not self._id:
self._set_screen_infos()
return self._id
@property
def status(self):
"""Return the status of the screen as string."""
self._set_screen_infos()
return self._status
@property
def exists(self):
"""Tell if the screen session exists or not."""
# Parse the screen -ls call, to find if the screen exists or not.
# " 28062.G.Terminal (Detached)"
lines = getoutput("screen -ls").split('\n')
return self.name in [
".".join(l.split(".")[1:]).split("\t")[0]
for l in lines
if self.name in l
]
def initialize(self):
"""Initialize a screen, if does not exists yet."""
if not self.exists:
self._id = None
# Detach the screen once attached, on a new tread.
# support Unicode (-U),
# attach to a new/existing named screen (-R).
# ORIGINAL
# Thread(target=self._delayed_detach).start()
# system('screen -s sh -UR -S ' + self.name)
# CUSTOM
system('screen -d -m -S ' + self.name)
def interrupt(self):
"""Insert CTRL+C in the screen session."""
self._screen_commands("eval \"stuff \\003\"")
def kill(self):
"""Kill the screen applications then close the screen."""
self._screen_commands('quit')
def detach(self):
"""Detach the screen."""
self._check_exists()
system("screen -d " + self.id)
def send_commands(self, *commands):
"""Send commands to the active gnu-screen."""
self._check_exists()
for command in commands:
# use single quote unless that is a part of the command
if "'" in command:
q = "\""
else:
q = "\'"
self._screen_commands(
'stuff {q}{c}{q}'.format(q=q, c=command),
'eval "stuff \\015"'
)
def add_user_access(self, unix_user_name):
"""Allow to share your session with an other unix user."""
self._screen_commands('multiuser on', 'acladd ' + unix_user_name)
def _screen_commands(self, *commands):
"""Allow to insert generic screen specific commands."""
self._check_exists()
for command in commands:
cmd = 'screen -x {0}.{1} -p 0 -X {2}'.format(self.id, self.name, command)
system(cmd)
sleep(0.02)
def _check_exists(self, message="Error code: 404."):
"""Check whereas the screen exist. if not, raise an exception."""
if not self.exists:
raise ScreenNotFoundError(message, self.name)
def _set_screen_infos(self):
"""Set the screen information related parameters."""
if self.exists:
line = ""
for l in getoutput("screen -ls").split("\n"):
if (
l.startswith('\t') and
self.name in l and
self.name == ".".join(l.split('\t')[1].split('.')[1:]) in l
):
line = l
if not line:
raise ScreenNotFoundError("While getting info.", self.name)
infos = line.split('\t')[1:]
self._id = infos[0].split('.')[0]
if len(infos) == 3:
self._date = infos[1][1:-1]
self._status = infos[2][1:-1]
else:
self._status = infos[1][1:-1]
def _delayed_detach(self):
sleep(0.5)
self.detach()
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.name)
|
import re
import traceback
import xml.etree.ElementTree as etree
from collections import defaultdict, Counter
from bs4 import BeautifulSoup, PageElement, NavigableString, CData, Tag
from tqdm import tqdm
import pprint
import csv
import numpy as np
from utils import *
from typing import Set
class QA_Pairer():
tag_split_re = re.compile(r"[\<\>]+")
# remove @token if it occurs at the beginning of the string or preceeded by whitespace
remove_username_re = re.compile(r"(^|\s+)@\w+")
threshold_lower_bounds = {
# ('stackoverflow', 'comments'): [0, 0, 0, 1, 1, 2], # not enough have a positive score to use
('stackoverflow', 'questions'): [0, 1, 2, 3, 6, 11],
('stackoverflow', 'answers'): [0, 1, 2, 4, 7, 14],
}
def __init__(self, post_path,
name=None,
out_folder="out",
out_format="txt",
archiver=None,
in_format="xml",
comment_path=None,
max_responses=3,
max_comments=5,
min_score=3,
attribute_move_probability=0.5,
shard_number=None,
num_shards=None,
tokenizer=None,
count_tokens=False):
"""Makes a text dataset from StackExchange dumps"""
self.post_path = post_path
self.comment_path = comment_path
if name is None:
self.name = os.path.dirname(post_path).replace("dumps/", "")
else:
self.name = name
# dict to save questions
self.questions = defaultdict(lambda: None, {})
# folder to save txt files to
self.out_folder = out_folder
# min_score required to parse an answer
self.min_score = min_score
self.max_responses = max_responses
self.max_comments = max_comments
self.attribute_move_probability = attribute_move_probability
assert in_format in ["csv", "xml"], "In format not recognized"
self.in_format = in_format
assert out_format in ["txt", "lm_dataformat", "zip", "none", "fairseq"], "Out format not recognized"
self.out_format = out_format
if out_format in ["lm_dataformat", "zip", "fairseq"]:
assert archiver is not None
self.ar = archiver
self.tag_counter = Counter()
self.count_tokens = count_tokens
self.tokenizer = tokenizer
self.token_counter = Counter()
self.token_count = 0
self.question_count = 0
self.answer_count = 0
self.shard_number = shard_number
self.num_shards = num_shards
# either None or (if comment_path was passed) a dict Dict[PostId: str, (score: int, text: str)
self.comment_dict = self.parse_comments()
def make_iter(self, file):
if self.in_format == 'csv':
with open(file, 'r') as f:
f = (line.replace('\0', '') for line in f)
reader = csv.DictReader(f)
for row in reader:
record = defaultdict(lambda: None, {k: None if v == '' else v for k, v in row.items()})
yield record
else:
for event, elem in etree.iterparse(file, events=('end',)):
if elem.tag == 'row':
record = defaultdict(lambda: None, elem.attrib)
yield record
elem.clear()
def parse_comments(self):
comment_dict = defaultdict(list)
for record in tqdm(self.make_iter(self.comment_path), desc="Parsing {} comment file".format(self.name), ncols=120):
text = record["Text"]
if text is None:
continue
if self.in_format == 'xml':
text = BeautifulSoup(text, "html.parser").get_text()
text = self.remove_username_re.sub("", text)
post_id = record["PostId"]
comment_dict[post_id].append(text)
self.comment_dict = comment_dict
return comment_dict
def main(self):
"""iterates through SE xmls and:
- stores PostTypeId="1" with AcceptedAnswerIds / Answers.
- when an AcceptedAnswerId or Answer > min_score is reached, it should:
> concat the Question & Accepted answer
> Clean markup / HTML
> Output to txt file
> Delete from memory
"""
os.makedirs(self.out_folder, exist_ok=True)
if self.shard_number is not None and self.num_shards is not None:
question_ids = [int(record["Id"]) for record in tqdm(self.make_iter(self.post_path), desc="Get post ids for sharding", ncols=120) if is_question(record)]
shard_question_ids = set(np.array_split(question_ids, self.num_shards)[self.shard_number])
else:
shard_question_ids = None
for record in tqdm(self.make_iter(self.post_path), desc="Parsing {} posts".format(self.name), ncols=120):
# try:
if is_question(record):
if shard_question_ids is not None:
question_id = int(record["Id"])
if question_id not in shard_question_ids:
continue
shard_question_ids.remove(question_id)
if has_answers(record):
trim_attribs(record, "question")
self.questions[record["Id"]] = record
else:
# if the question has no answers, discard it
continue
elif is_answer(record):
# if is accepted answer, append answer Body to relevant questions "AcceptedAnswer" field
# if the answer's score > min_score
# append the answer to the relevant question's OtherAnswers dict
self.add_answer(record)
self.check_complete(record)
# except :
# traceback.print_exc()
print("processing complete")
self.print_status()
if shard_question_ids is not None and len(shard_question_ids) != 0:
print("warning: did not find {len(shard_question_ids)} questions ids that should have been in this shard (below):")
print(' '.join(str(x) for x in sorted(shard_question_ids)))
def is_above_threshold(self, a_attribs):
"""
Determines whether an answer is above the min_score threshold
:param a_attribs: Answer's attribute dict
:return:
"""
assert is_answer(a_attribs), "Must be an answer to be above threshold"
if a_attribs["Score"] is not None:
if int(a_attribs["Score"]) >= self.min_score:
return True
return False
def add_answer(self, a_attribs):
"""
Adds answer to its parent question in self.questions if it's either an accepted answer or above self.min_score.
If answer is an accepted answer, it gets appended to the AcceptedAnswer field, otherwise it gets appended to
OtherAnswers.
Also increments the question's 'ParsedAnswers' field. When ParsedAnswers = AnswerCount, the question is deleted
from memory and saved to a text file.
:param a_attribs: Answer's attribute dict
"""
assert is_answer(a_attribs), "Must be an answer to add to parent"
if a_attribs is not None and self.questions[a_attribs["ParentId"]] is not None:
if is_accepted_answer(a_attribs, self.questions[a_attribs["ParentId"]]):
self.questions[a_attribs["ParentId"]]["Answers"][a_attribs["Id"]] = trim_attribs(a_attribs, "answer")
self.questions[a_attribs["ParentId"]]["ParsedAnswers"] += 1
elif self.is_above_threshold(a_attribs):
if a_attribs["Id"] is not None:
parent = self.questions[a_attribs["ParentId"]]
if parent is not None:
self.questions[a_attribs["ParentId"]]["Answers"][a_attribs["Id"]] = trim_attribs(a_attribs, "answer")
self.questions[a_attribs["ParentId"]]["ParsedAnswers"] += 1
else:
self.questions[a_attribs["ParentId"]]["ParsedAnswers"] += 1
else:
self.questions[a_attribs["ParentId"]]["ParsedAnswers"] += 1
def write(self, out_name, out_str):
if self.out_format == "none":
pass
elif self.out_format == "fairseq":
assert self.tokenizer is not None
raw_file, bpe_file = self.ar
try:
line = filter_newlines(out_str)
except:
line = filter_newlines(handle_unicode_errors(out_str))
raw_file.write(line)
raw_file.write("\n\n")
bpe_file.write(' '.join(str(ix) for ix in self.tokenizer.encode(line).ids))
bpe_file.write("\n\n")
elif self.out_format == "txt":
fname = "{}/{}".format(self.out_folder, out_name)
with open(fname, 'w') as f:
try:
f.write(filter_newlines(out_str))
except:
f.write(filter_newlines(handle_unicode_errors(out_str)))
elif self.out_format == "zip":
try:
self.ar.writestr(out_name, filter_newlines(out_str))
except:
self.ar.writestr(out_name, filter_newlines(handle_unicode_errors(out_str)))
elif self.out_format == "lm_dataformat":
try:
self.ar.add_data(filter_newlines(out_str), meta={
'name': out_name})
except:
self.ar.add_data(filter_newlines(handle_unicode_errors(out_str)), meta={
'name': out_name})
@classmethod
def get_tags(cls, attrib):
if "Tags" not in attrib or attrib["Tags"] is None:
return []
tags = cls.tag_split_re.split(attrib["Tags"])
return [t for t in tags if bool(t)]
def update_tag_and_token_counts(self, tags, out_str):
self.tag_counter.update(tags)
if self.count_tokens and self.tokenizer is not None:
tokens = self.tokenizer.encode(out_str).ids
token_count = len(tokens)
for tag in tags:
self.token_counter[tag] += token_count
self.token_count += token_count
def print_status(self):
print(f"{self.question_count:_} questions")
print(f"{self.answer_count:_} answers")
print(f"{self.answer_count / self.question_count:.2f} answers / question")
print("common tags:")
underscore_print_counter(self.tag_counter, n=20)
if self.tokenizer is not None:
print(f"total tokens: {self.token_count:_}")
underscore_print_counter(self.token_counter, n=20)
print()
def check_complete(self, a_attribs):
"""
checks if the parent question of the previously added answer has no future answers, and if so,
removes from dict and prints to file.
"""
keys_to_del = []
parent = self.questions[a_attribs["ParentId"]]
if a_attribs is not None and parent is not None:
if parent["AnswerCount"] is not None and parent["ParsedAnswers"] is not None:
if int(parent["ParsedAnswers"]) == int(parent['AnswerCount']):
keys_to_del.append(a_attribs["ParentId"])
if parent["Answers"] is not None and len(parent["Answers"]) > 0:
out_name = "{}_{}.txt".format(self.name, parent["Id"].zfill(10))
out_strs = []
question_body = ""
question_attrs = {}
tags = self.get_tags(parent)
random.shuffle(tags)
tag_str = ','.join(tags)
if tag_str:
question_attrs['tags'] = tag_str
if (self.name, 'questions') in self.threshold_lower_bounds:
question_votes = int(parent['Score'])
question_attrs['dscore'] = threshold(self.threshold_lower_bounds[(self.name, 'questions')], question_votes)
if parent["TitleParsed"] is not None:
title_parsed = parent["TitleParsed"]
question_body += title_parsed
elif parent["Title"] is not None:
title_parsed = BeautifulSoup(parent["Title"], "html.parser").get_text()
question_body += title_parsed
if parent["BodyParsed"] is not None:
body_parsed = parent["BodyParsed"]
if question_body:
question_body += '\n\n{}'.format(body_parsed)
else:
question_body = body_parsed
elif parent["Body"] is not None:
body_parsed = CodePreservingBeautifulSoup(parent["Body"], "html.parser").get_text()
if question_body:
question_body += '\n\n{}'.format(body_parsed)
else:
question_body = body_parsed
question_body = self.remove_username_re.sub("", question_body)
out_strs.append(make_tagged("q", question_body.strip(), question_attrs, attribute_move_probability=self.attribute_move_probability))
def add_comments(post_id):
if self.comment_dict is not None:
comments = self.comment_dict[post_id][:self.max_comments]
comment_str = '\n'.join(make_tagged('c', comment.strip(), {}) for comment in comments)
if comment_str:
out_strs.append(comment_str)
add_comments(parent["Id"])
if parent["Answers"] is not None:
answers = sorted(parent["Answers"].items(), key=lambda t: int(t[1]["Score"]), reverse=True)
count = 0
for key, answer in answers:
if count >= self.max_responses:
break
if answer["BodyParsed"] is not None:
answer_body_parsed = answer["BodyParsed"]
elif answer["Body"] is not None:
answer_body_parsed = CodePreservingBeautifulSoup(answer["Body"], "html.parser").get_text()
else:
continue
answer_body_parsed = self.remove_username_re.sub("", answer_body_parsed)
answer_attrs = {}
if (self.name, 'answers') in self.threshold_lower_bounds:
answer_votes = int(answer['Score'])
answer_attrs['dscore'] = threshold(self.threshold_lower_bounds[(self.name, 'answers')], answer_votes)
if tag_str:
answer_attrs['tags'] = tag_str
out_strs.append(make_tagged("a", answer_body_parsed.strip(), answer_attrs, attribute_move_probability=self.attribute_move_probability))
add_comments(answer["Id"])
count += 1
self.answer_count += 1
out_str = '\n'.join(out_strs)
self.question_count += 1
tags = self.get_tags(parent)
self.update_tag_and_token_counts(tags, out_str)
self.write(out_name, out_str)
if self.question_count % 100_000 == 0:
self.print_status()
for key in keys_to_del:
self.questions.pop(key, None)
class CodePreservingBeautifulSoup(BeautifulSoup):
"""
modified from https://stackoverflow.com/a/42802393, with changes for beautifulsoup 4.10
"""
tags_to_keep = {'code'}
keep_only_with_newlines = True
def _all_strings(self, strip=False, types=BeautifulSoup.default):# strip=False, types=(NavigableString, CData)):
if types is self.default:
types = self.interesting_string_types
for descendant in self.descendants:
# return inner text within keep_tags, if we encounter them
if isinstance(descendant, Tag) and descendant.name in self.tags_to_keep and \
((not self.keep_only_with_newlines) or ('\n' in str(descendant))):
#yield f"<|{descendant.name}|>{descendant.get_text()}</|{descendant.name}|>"
yield str(descendant)
# skip an inner text node inside "a"
if isinstance(descendant, NavigableString) and descendant.parent.name in self.tags_to_keep and \
((not self.keep_only_with_newlines) or ('\n' in str(descendant))):
continue
# default behavior
if (types is None and not isinstance(descendant, NavigableString)):
continue
descendant_type = type(descendant)
if isinstance(types, type):
if descendant_type is not types:
# We're not interested in strings of this type.
continue
elif types is not None and descendant_type not in types:
# We're not interested in strings of this type.
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
|
import requests
from conftest import print_timing
from fixtures import session
from fixtures import base_url
import os
from maxfreq import max_freq
from conftest import print_in_shell
from conftest import getRandomFilter
class TestDelete:
@max_freq(50/3600)
@print_timing
def test_delete_diagram(self, base_url, session):
# Prepare
# request list of diagrams using the session id
HOSTNAME = os.environ.get('application_hostname')
##CREATE DIAGRAM
# Get user
diagrams_response = session.get('/rest/dependency-map/1.0/user')
assert diagrams_response.status_code == 200
userKey = diagrams_response.json()["key"]
# Get filter key
diagrams_response = session.get('/rest/dependency-map/1.0/filter?searchTerm=&page=0&resultsPerPage=25')
assert diagrams_response.status_code == 200
# Get field status
diagrams_response = session.get('/rest/dependency-map/1.0/field/status')
assert diagrams_response.status_code == 200
field= diagrams_response.json()["id"]
# Get field priority
diagrams_response = session.get('/rest/dependency-map/1.0/field/priority')
assert diagrams_response.status_code == 200
field2= diagrams_response.json()["id"]
#Get filterKey randomly among the project in the project file
filterKey= getRandomFilter(session)
# Create diagram
payload ={ 'name':"F100", 'author':userKey,
'lastEditedBy':userKey, 'layoutId':2, 'filterKey': filterKey,
'boxColorFieldKey': field, 'groupedLayoutFieldKey': field,
'matrixLayoutHorizontalFieldKey': field, 'matrixLayoutVerticalFieldKey': field2}
diagrams_response = session.post('/rest/dependency-map/1.0/diagram',
json=payload)
assert diagrams_response.status_code == 200
diagramId = diagrams_response.json()['id']
print("Hej")
##FIND DIAGRAM
# To make it thread save need to create the diagram before removing
# Get user
diagrams_response = session.get('/rest/dependency-map/1.0/user')
assert diagrams_response.status_code == 200
userKey = diagrams_response.json()["key"]
# Get filter key
diagrams_response = session.get('/rest/dependency-map/1.0/filter?searchTerm=&page=0&resultsPerPage=25')
assert diagrams_response.status_code == 200
#Get favoritDiagram
diagrams_response = session.get('/rest/dependency-map/1.0/favoriteDiagram')
assert diagrams_response.status_code == 200
#Get diagrams with filterKey
diagrams_response = session.get('/rest/dependency-map/1.0/diagram?filterKey=' + filterKey + '&searchTerm=&sortBy=name&reverseSort=&startAt=0&maxResults=50')
assert diagrams_response.status_code == 200
# #Get filter
diagrams_response = session.get('/rest/api/2/filter/' + filterKey)
assert diagrams_response.status_code == 200
#Get diagram
diagrams_response = session.get('/rest/dependency-map/1.0/diagram/' + str(diagramId))
assert diagrams_response.status_code == 200
##REMOVE
#remove
diagrams_response2 = session.delete('/rest/dependency-map/1.0/diagram/' + str(diagramId))
assert diagrams_response2.status_code == 200
print_in_shell("Diagram removed" + str(diagramId))
#print_in_shell( diagrams_response.json() );
#get all diagrams after delete
diagrams_response = session.get('/rest/dependency-map/1.0/diagram?filterKey=' + filterKey + '&searchTerm=&sortBy=name&reverseSort=&startAt=0&maxResults=50')
assert diagrams_response.status_code == 200
|
<gh_stars>0
# Face alignment and crop demo
# Uses MTCNN, FaceBoxes or Retinaface as a face detector;
# Support different backbones, include PFLD, MobileFaceNet, MobileNet;
# Retinaface+MobileFaceNet gives the best peformance
# <NAME> (<EMAIL>), Feb. 2021
#test
from __future__ import division
import argparse
import torch
from dataset import Landmark
import os
import cv2
import numpy as np
from common.utils import BBox,drawLandmark,drawLandmark_multiple
from models.basenet import MobileNet_GDConv
from models.pfld_compressed import PFLDInference
from models.mobilefacenet import MobileFaceNet
from FaceBoxes import FaceBoxes
from Retinaface import Retinaface
from PIL import Image
import matplotlib.pyplot as plt
from MTCNN import detect_faces
import glob
import time
from utils.align_trans import get_reference_facial_points, warp_and_crop_face
parser = argparse.ArgumentParser(description='PyTorch face landmark')
# Datasets
parser.add_argument('--backbone', default='MobileFaceNet', type=str,
help='choose which backbone network to use: MobileNet, PFLD, MobileFaceNet')
parser.add_argument('--detector', default='Retinaface', type=str,
help='choose which face detector to use: MTCNN, FaceBoxes, Retinaface')
args = parser.parse_args()
mean = np.asarray([ 0.485, 0.456, 0.406 ])
std = np.asarray([ 0.229, 0.224, 0.225 ])
#basepath
#basepath = "/home/william/Documents/Work/pytorch_face_landmark"
#the file to e the landmarks
#landmark_file = open("/home/william/Documents/Work/pytorch_face_landmark/landmarks_trump.txt", "w")
landmark_file = open("/home/william/Documents/Work/test-run/FaceStuff/source_landmarks.txt", "w")
#the files to use as images
source_images_filenames = "samples/12--Group/*.jpg"
#source_images_filenames = "/samples/other/*.jpg"
crop_size= 112
scale = crop_size / 112.
reference = get_reference_facial_points(default_square = True) * scale
if torch.cuda.is_available():
map_location=lambda storage, loc: storage.cuda()
else:
map_location='cpu'
def load_model():
if args.backbone=='MobileNet':
model = MobileNet_GDConv(136)
model = torch.nn.DataParallel(model)
# download model from https://drive.google.com/file/d/1Le5UdpMkKOTRr1sTp4lwkw8263sbgdSe/view?usp=sharing
checkpoint = torch.load('checkpoint/mobilenet_224_model_best_gdconv_external.pth.tar', map_location=map_location)
print('Use MobileNet as backbone')
elif args.backbone=='PFLD':
model = PFLDInference()
# download from https://drive.google.com/file/d/1gjgtm6qaBQJ_EY7lQfQj3EuMJCVg9lVu/view?usp=sharing
checkpoint = torch.load('checkpoint/pfld_model_best.pth.tar', map_location=map_location)
print('Use PFLD as backbone')
# download from https://drive.google.com/file/d/1T8J73UTcB25BEJ_ObAJczCkyGKW5VaeY/view?usp=sharing
elif args.backbone=='MobileFaceNet':
model = MobileFaceNet([112, 112],136)
checkpoint = torch.load('checkpoint/mobilefacenet_model_best.pth.tar', map_location=map_location)
print('Use MobileFaceNet as backbone')
else:
print('Error: not suppored backbone')
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == '__main__':
if args.backbone=='MobileNet':
out_size = 224
else:
out_size = 112
model = load_model()
model = model.eval()
filenames=glob.glob(source_images_filenames)
for imgname in filenames:
#a = imgname.split("/")
#print(a[2])
print(imgname)
landmark_file.write(str(imgname) + " ")
img = cv2.imread(imgname)
org_img = Image.open(imgname)
height,width,_=img.shape
if args.detector=='MTCNN':
# perform face detection using MTCNN
image = Image.open(imgname)
faces, landmarks = detect_faces(image)
elif args.detector=='FaceBoxes':
face_boxes = FaceBoxes()
faces = face_boxes(img)
elif args.detector=='Retinaface':
retinaface=Retinaface.Retinaface()
faces = retinaface(img)
else:
print('Error: not suppored detector')
ratio=0
if len(faces)==0:
print('NO face is detected!')
continue
for k, face in enumerate(faces):
if face[4]<0.9: # remove low confidence detection
continue
x1=face[0]
y1=face[1]
x2=face[2]
y2=face[3]
w = x2 - x1 + 1
h = y2 - y1 + 1
size = int(min([w, h])*1.2)
cx = x1 + w//2
cy = y1 + h//2
x1 = cx - size//2
x2 = x1 + size
y1 = cy - size//2
y2 = y1 + size
dx = max(0, -x1)
dy = max(0, -y1)
x1 = max(0, x1)
y1 = max(0, y1)
edx = max(0, x2 - width)
edy = max(0, y2 - height)
x2 = min(width, x2)
y2 = min(height, y2)
new_bbox = list(map(int, [x1, x2, y1, y2]))
new_bbox = BBox(new_bbox)
cropped=img[new_bbox.top:new_bbox.bottom,new_bbox.left:new_bbox.right]
if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
cropped = cv2.copyMakeBorder(cropped, int(dy), int(edy), int(dx), int(edx), cv2.BORDER_CONSTANT, 0)
cropped_face = cv2.resize(cropped, (out_size, out_size))
if cropped_face.shape[0]<=0 or cropped_face.shape[1]<=0:
continue
test_face = cropped_face.copy()
test_face = test_face/255.0
if args.backbone=='MobileNet':
test_face = (test_face-mean)/std
test_face = test_face.transpose((2, 0, 1))
test_face = test_face.reshape((1,) + test_face.shape)
input = torch.from_numpy(test_face).float()
input= torch.autograd.Variable(input)
start = time.time()
if args.backbone=='MobileFaceNet':
landmark = model(input)[0].cpu().data.numpy()
for i in range(landmark.shape[1]):
landmark_file.write(str(landmark[0][i])+" ")
landmark_file.write("\n")
#print(landmark)
else:
landmark = model(input).cpu().data.numpy()
end = time.time()
print('Time: {:.6f}s.'.format(end - start))
landmark = landmark.reshape(-1,2)
landmark = new_bbox.reprojectLandmark(landmark)
img = drawLandmark_multiple(img, new_bbox, landmark)
# crop and aligned the face
lefteye_x=0
lefteye_y=0
for i in range(36,42):
lefteye_x+=landmark[i][0]
lefteye_y+=landmark[i][1]
lefteye_x=lefteye_x/6
lefteye_y=lefteye_y/6
lefteye=[lefteye_x,lefteye_y]
righteye_x=0
righteye_y=0
for i in range(42,48):
righteye_x+=landmark[i][0]
righteye_y+=landmark[i][1]
righteye_x=righteye_x/6
righteye_y=righteye_y/6
righteye=[righteye_x,righteye_y]
nose=landmark[33]
leftmouth=landmark[48]
rightmouth=landmark[54]
facial5points=[righteye,lefteye,nose,rightmouth,leftmouth]
warped_face = warp_and_crop_face(np.array(org_img), facial5points, reference, crop_size=(crop_size, crop_size))
img_warped = Image.fromarray(warped_face)
# save the aligned and cropped faces
#img_warped.save(os.path.join(basepath + 'results_aligned', os.path.basename(imgname)[:-4]+'_'+str(k)+'.png'))
#img_warped.save(
# os.path.join(basepath + '/results_aligned', os.path.basename(imgname)[:-4] + '.png'))
#img = drawLandmark_multiple(img, new_bbox, facial5points) # plot and show 5 points
# save the landmark detections
cv2.imwrite(os.path.join('results',os.path.basename(imgname)),img)
landmark_file.close()
from torch.utils.data import Dataset, DataLoader
import torchvision
from torchvision import transforms, utils
dataset = Landmark()
train_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=3, num_workers=1, shuffle=True)
for data, target in train_loader:
utils.save_image(data, './samples.png',
normalize=True)
|
<gh_stars>10-100
from __future__ import print_function
from __future__ import absolute_import
import weakref
import gevent
import gevent.exceptions
from gevent.lock import Semaphore
from gevent.thread import allocate_lock
import gevent.testing as greentest
try:
from _thread import allocate_lock as std_allocate_lock
except ImportError: # Py2
from thread import allocate_lock as std_allocate_lock
# pylint:disable=broad-except
class TestSemaphore(greentest.TestCase):
# issue 39
def test_acquire_returns_false_after_timeout(self):
s = Semaphore(value=0)
result = s.acquire(timeout=0.01)
assert result is False, repr(result)
def test_release_twice(self):
s = Semaphore()
result = []
s.rawlink(lambda s: result.append('a'))
s.release()
s.rawlink(lambda s: result.append('b'))
s.release()
gevent.sleep(0.001)
# The order, though, is not guaranteed.
self.assertEqual(sorted(result), ['a', 'b'])
def test_semaphore_weakref(self):
s = Semaphore()
r = weakref.ref(s)
self.assertEqual(s, r())
@greentest.ignores_leakcheck
def test_semaphore_in_class_with_del(self):
# Issue #704. This used to crash the process
# under PyPy through at least 4.0.1 if the Semaphore
# was implemented with Cython.
class X(object):
def __init__(self):
self.s = Semaphore()
def __del__(self):
self.s.acquire()
X()
import gc
gc.collect()
gc.collect()
def test_rawlink_on_unacquired_runs_notifiers(self):
# https://github.com/gevent/gevent/issues/1287
# Rawlinking a ready semaphore should fire immediately,
# not raise LoopExit
s = Semaphore()
gevent.wait([s])
class TestLock(greentest.TestCase):
def test_release_unheld_lock(self):
std_lock = std_allocate_lock()
g_lock = allocate_lock()
try:
std_lock.release()
self.fail("Should have thrown an exception")
except Exception as e:
std_exc = e
try:
g_lock.release()
self.fail("Should have thrown an exception")
except Exception as e:
g_exc = e
self.assertIsInstance(g_exc, type(std_exc))
@greentest.skipOnPurePython("Needs C extension")
class TestCExt(greentest.TestCase):
def test_c_extension(self):
self.assertEqual(Semaphore.__module__,
'gevent.__semaphore')
class SwitchWithFixedHash(object):
# Replaces greenlet.switch with a callable object
# with a hash code we control. This only matters if
# we're hashing this somewhere (which we used to), but
# that doesn't preserve order, so we don't do
# that anymore.
def __init__(self, greenlet, hashcode):
self.switch = greenlet.switch
self.hashcode = hashcode
def __hash__(self):
raise AssertionError
def __eq__(self, other):
raise AssertionError
def __call__(self, *args, **kwargs):
return self.switch(*args, **kwargs)
def __repr__(self):
return repr(self.switch)
class FirstG(gevent.Greenlet):
# A greenlet whose switch method will have a low hashcode.
hashcode = 10
def __init__(self, *args, **kwargs):
gevent.Greenlet.__init__(self, *args, **kwargs)
self.switch = SwitchWithFixedHash(self, self.hashcode)
class LastG(FirstG):
# A greenlet whose switch method will have a high hashcode.
hashcode = 12
def acquire_then_exit(sem, should_quit):
sem.acquire()
should_quit.append(True)
def acquire_then_spawn(sem, should_quit):
if should_quit:
return
sem.acquire()
g = FirstG.spawn(release_then_spawn, sem, should_quit)
g.join()
def release_then_spawn(sem, should_quit):
sem.release()
if should_quit: # pragma: no cover
return
g = FirstG.spawn(acquire_then_spawn, sem, should_quit)
g.join()
class TestSemaphoreFair(greentest.TestCase):
@greentest.ignores_leakcheck
def test_fair_or_hangs(self):
# If the lock isn't fair, this hangs, spinning between
# the last two greenlets.
# See https://github.com/gevent/gevent/issues/1487
sem = Semaphore()
should_quit = []
keep_going1 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
keep_going2 = FirstG.spawn(acquire_then_spawn, sem, should_quit)
exiting = LastG.spawn(acquire_then_exit, sem, should_quit)
with self.assertRaises(gevent.exceptions.LoopExit):
gevent.joinall([keep_going1, keep_going2, exiting])
self.assertTrue(exiting.dead, exiting)
self.assertTrue(keep_going2.dead, keep_going2)
self.assertFalse(keep_going1.dead, keep_going1)
if __name__ == '__main__':
greentest.main()
|
<filename>NMTK_apps/NMTK_server/data_loaders/rasters.py
import logging
import collections
import datetime
from dateutil.parser import parse
from BaseDataLoader import *
from loaders import FormatException
from ogr import osr
from django.contrib.gis.gdal import \
GDALRaster, SpatialReference, CoordTransform
from django.contrib.gis.geos import Polygon
logger = logging.getLogger(__name__)
class RasterLoader(BaseDataLoader):
name = 'Raster'
types = 99
def __init__(self, *args, **kwargs):
'''
A reader for GDAL support raster images.
'''
# A list of files that should be unpacked from the archive, it's
# important to note that the first one is the supported file type,
# the others are supporting files.
self.unpack_list = []
self.raster_obj = None
self._srid = kwargs.pop('srid', None)
super(RasterLoader, self).__init__(*args, **kwargs)
for fn in self.filelist:
try:
self.raster_obj = GDALRaster(fn)
except:
if logger.isEnabledFor(logging.DEBUG):
logger.exception('Failed to open file %s', fn)
else:
logger.info('The GDAL Loader does not support this data ' +
'format, deferring to the next loader ' +
'in the chain.')
self.ogr_obj = None
if self.raster_obj is not None:
self.spatial = True
self.format = self.raster_obj.driver.name
logger.debug('The format of the file is %s', self.format)
self.filename = fn
self.unpack_list.append(fn)
logger.info('Raster file detected is %s', self.filename)
break
def bands(self):
Band = collections.namedtuple('RasterBand',
['min', 'max', 'type', ])
for band in self.raster_obj.bands:
band_type = band.datatype(as_string=True).lower()
if 'float' in band_type:
bt = 'float'
elif 'int' in band_type:
bt = 'integer'
elif 'byte' in band_type:
bt = 'integer'
else:
bt = 'float'
yield Band(band.min, band.max, bt)
@property
def dimensions(self):
return 2
def determineGeometryType(self, layer):
'''
In the case of a KML/KMZ file, we need to iterate over the data to determine
the appropriate geometry type to use.
'''
return None
def __iter__(self):
return iter([])
@property
def spatial_type(self):
return self.data.type
@property
def feature_count(self):
return self.data.feature_count
@property
def srs(self):
return self.data.srs
@property
def srid(self):
return self.data.srid
def is_supported(self):
'''
Indicate whether or not this loader is able to process this
file type. If it is, return True, otherwise return False.
In this case, we return true if it's an OGR supported file type.
'''
if self.raster_obj:
return True
return False
def fields(self):
'''
This was changed so now our field list is actually a list of fields
and their respective data types. So now we need to preserve the support
of retrieval of fields for backwards compatibility.
'''
return [str(i + 1) for i, b in enumerate(self.raster_obj.bands)]
def fields_types(self):
'''
This returns a list of tuples, with the first being a field name
and the second element of each being the python type of the field.
'''
return []
def ogr_fields_types(self):
'''
This returns a list of tuples, with the first being a field name
and the second element of each being the python type of the field.
'''
return []
@property
def extent(self):
return (self.data.extent[0], self.data.extent[2],
self.data.extent[1], self.data.extent[3],)
@property
def data(self):
'''
Read the output file and provide an iterable result
'''
if not hasattr(self, '_data'):
if self.raster_obj is None:
self._data = None
return None
layer = geom_extent = geom_type = spatial_ref = geom_srid = None
# If we get here, then we have successfully determined the file type
# that was provided, using OGR. ogr_obj contains the OGR DataSource
# object, and fn contains the name of the file we read to get that.
# We only support single layer uploads, if there is more than one
# layer then we will raise an exception
driver = self.raster_obj.driver.name
layer = None
geom_extent = self.raster_obj.extent
geom_type = 99
srs = self.raster_obj.srs
geos_extent = Polygon.from_bbox(self.raster_obj.extent)
ogr_extent = geos_extent.ogr
srid = None
# USer supplied SRID, so we will use that...
if self._srid:
srs = None
geom_srid = self._srid
epsg = str('EPSG:%s' % (geom_srid,))
logger.debug('Setting output SRID to %s',
epsg)
try:
srs = SpatialReference(epsg)
srs.validate()
geom_srid = srs.srid
except Exception, e:
if logger.isEnabledFor(logging.DEBUG):
logger.exception('Invalid SRS (or none): %s', e)
srs = None
# No SRID! Let's try to detect it
if srs and not geom_srid:
srs.identify_epsg()
geom_srid = srs.srid
logger.debug('Auto-detect of SRID yielded %s', srid)
if srs and not geom_srid:
'''
Still no SRID - but we have an srs - so let's try to
reproject...
'''
try:
reprojection = CoordTransform(
r.srs, SpatialReference('EPSG:4326'))
ogr_extent.transform(reprojection)
geos_extent = ogr_extent.geos
geom_srid = geos_extent.srid
except Exception, e:
if logger.isEnabledFor(logging.DEBUG):
logger.exception('Failed to transform: %s', e)
raise FormatException('Unable to determine valid SRID ' +
'for this data')
if not geom_srid:
raise FormatException('Unable to determine valid SRID ' +
'for this data')
# Ensure we have an extent that is in EPSG 4326
if geom_srid != 4326:
reprojection = CoordTransform(
srs, SpatialReference('EPSG:4326'))
ogr_extent.transform(reprojection)
geos_4326 = ogr_extent.geos
geom_srid = 4326
else:
geos_4326 = geos_extent
RasterResult = collections.namedtuple('RasterResult',
['srid',
'extent',
'srs',
'layer',
'feature_count',
'ogr',
'type',
'type_text',
'fields',
'reprojection',
'dest_srs',
'dim', ])
self._data = RasterResult(srid=geom_srid,
extent=(geos_4326.extent[0],
geos_4326.extent[2],
geos_4326.extent[1],
geos_4326.extent[3],),
ogr=None,
layer=None,
srs=geos_4326.srs,
feature_count=0,
type=geom_type,
type_text='Raster',
fields=[],
dest_srs=None,
reprojection=None,
dim=self.dimensions)
return self._data
|
<reponame>MTonyM/PReMVOS
import tensorflow as tf
import numpy
from math import ceil
from ReID_net.datasets.Dataset import Dataset
from ReID_net.datasets.Util.Reader import load_image_tensorflow
from ReID_net.datasets.Util.Resize import resize_image
from ReID_net.datasets.Util.Util import smart_shape, username
from ReID_net.datasets.Augmentors import apply_augmentors
from ReID_net.datasets.Util.Normalization import normalize, unnormalize
SIMILARITY_DEFAULT_PATH = "/fastwork/" + username() + "/mywork/data/CUHK03/"
DEFAULT_INPUT_SIZE = [128, 128]
SIMILARITY_VOID_LABEL = 255
class SimilarityDataset(Dataset):
def __init__(self, config, subset, coord, annotations, n_train_ids, jpg=True):
super(SimilarityDataset, self).__init__(subset)
assert subset in ("train", "valid"), subset
self.jpg = jpg
self.config = config
self.subset = subset
self.coord = coord
self.model = config.str("model", "")
self.annotations = annotations
self.input_size = config.int_list("input_size", DEFAULT_INPUT_SIZE)
self.input_size = tuple(self.input_size)
self.batching_mode = config.str("batching_mode", "pair")
assert self.batching_mode in ("single", "pair", "group", "eval"), self.batching_mode
self.validation_mode = config.str("validation_mode", "embedding")
assert self.validation_mode in ("embedding", "similarity"), self.validation_mode
self.group_size = config.int("group_size", 2)
self.pair_ratio = config.int("pair_ratio", 1)
self.augmentors, _ = self._parse_augmentors_and_shuffle()
self.context_region_factor = config.float("context_region_factor", 1.2)
if self.subset != "train":
context_region_factor_val = config.float("context_region_factor_val", -1.0)
if context_region_factor_val != -1.0:
self.context_region_factor = context_region_factor_val
self.use_summaries = self.config.bool("use_summaries", False)
self.epoch_length = config.int("epoch_length", 1000)
if subset != "train":
epoch_length = config.int("epoch_length_val", -1)
if epoch_length != -1:
self.epoch_length = epoch_length
if self.batching_mode == "eval":
assert len(self.augmentors) == 0, len(self.augmentors)
self.epoch_length = len(annotations)
self.n_classes = config.int("num_classes", None)
self.num_train_id = n_train_ids
self.file_names_list = [ann["img_file"] for ann in annotations]
has_tags = "tag" in annotations[0]
if has_tags:
self.tags_list = [ann["tag"] for ann in annotations]
else:
self.tags_list = self.file_names_list
self.bboxes_list = numpy.array([ann["bbox"] for ann in annotations], dtype="float32")
cat_ids = [ann["category_id"] for ann in annotations]
self.class_labels_list = numpy.array(cat_ids, dtype="int32")
train_id_list, train_counts = numpy.unique(sorted(cat_ids), return_counts=True)
# print(set(numpy.arange(0,5952))-set(train_id_list))
# print(len(train_id_list),len(train_counts))
# print("started seeming long thing")
# import time
# t = time.time()
self.indices_for_classes = [[idx for idx, id_ in enumerate(cat_ids) if id_ == cat_id] for cat_id in train_id_list]
# print("finished seeming long thing",time.time()-t)
self.train_counts = tf.constant(train_counts.astype(numpy.int32))
self.idx_placeholder = tf.placeholder(tf.int32, (4,), "idx")
self.test_case = tf.placeholder(tf.string)
self.use_end_network = tf.placeholder(tf.bool)
def num_classes(self):
return self.n_classes
def _create_inputs_for_eval(self, batch_size):
#for now require batch size of 1, which will allow us to output crops of different sizes for visualization
# assert batch_size == 1
tf_fns = tf.constant(self.file_names_list, tf.string)
tf_tags = tf.constant(self.tags_list, tf.string)
tf_bboxes = tf.constant(self.bboxes_list, tf.float32)
tf_class_labels_list = tf.constant(self.class_labels_list, tf.int32)
def load_fn(fn_, tag, bbox_, class_label_):
if batch_size == 1:
img, _, img_raw = self._load_crop_helper(fn_, bbox_)
else:
img, img_raw, _ = self._load_crop_helper(fn_, bbox_)
return img, img_raw, tag, class_label_
USE_DATASET_API = True
if USE_DATASET_API:
dataset = tf.data.Dataset.from_tensor_slices((tf_fns, tf_tags, tf_bboxes, tf_class_labels_list))
# dataset = dataset.map(load_fn)
dataset = dataset.map(load_fn,num_parallel_calls=32)
#dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
imgs_norm, imgs_raw, tags, class_labels = next_element
else:
#alternative with slice_input_producer
fn, tag_, bbox, class_label = tf.train.slice_input_producer((tf_fns, tf_tags, tf_bboxes, tf_class_labels_list),
num_epochs=1, shuffle=False)
imgs_norm, imgs_raw, tags, class_labels = load_fn(fn, tag_, bbox, class_label)
#expand for batch size of 1
if batch_size == 1:
imgs_norm = tf.expand_dims(imgs_norm, axis=0)
imgs_raw = tf.expand_dims(imgs_raw, axis=0)
tags = tf.expand_dims(tags, axis=0)
class_labels = tf.expand_dims(class_labels, axis=0)
else:
imgs_norm, imgs_raw, tags, class_labels = tf.train.batch([imgs_norm, imgs_raw, tags, class_labels], batch_size,
num_threads=32, capacity=10 * batch_size,
allow_smaller_final_batch=True)
print(imgs_norm.get_shape())
return imgs_norm, imgs_raw, tags, class_labels
def _create_inputs_for_pair(self, batch_size):
assert self.group_size == 2
assert batch_size % self.group_size == 0
batch_size /= self.group_size
def _create_example(_=None):
rand = tf.random_uniform([5], maxval=tf.int32.max, dtype=tf.int32)
sample_same_person = rand[0] % (self.pair_ratio + 1)
sample_same_person = tf.cast(tf.equal(sample_same_person, 0), tf.int32)
pers_id_1 = ((rand[1] - 1) % self.num_train_id) + 1
pers_1_n_imgs = self.train_counts[pers_id_1 - 1]
img_id_1 = ((rand[2] - 1) % pers_1_n_imgs) + 1
def if_same_person():
pers_id_2 = pers_id_1
img_id_2 = ((rand[4] - 1) % (pers_1_n_imgs - 1)) + 1
img_id_2 = tf.cond(img_id_2 >= img_id_1, lambda: img_id_2 + 1, lambda: img_id_2)
return pers_id_2, img_id_2
def if_not_same_person():
pers_id_2 = ((rand[3] - 1) % (self.num_train_id - 1)) + 1
pers_id_2 = tf.cond(pers_id_2 >= pers_id_1, lambda: pers_id_2 + 1, lambda: pers_id_2)
pers_2_n_imgs = self.train_counts[pers_id_2 - 1]
img_id_2 = ((rand[4] - 1) % pers_2_n_imgs) + 1
return pers_id_2, img_id_2
pers_id_2, img_id_2 = tf.cond(tf.cast(sample_same_person, tf.bool), if_same_person, if_not_same_person)
#TODO: change the functions above to create 0 indexed values
img_id_1 -= 1
img_id_2 -= 1
pers_id_1 -= 1
pers_id_2 -= 1
img1, img1_class, img1_file_name, original_img1 = self.load_crop(img_id_1, pers_id_1)
img2, img2_class, img2_file_name, original_img2 = self.load_crop(img_id_2, pers_id_2)
tag = img1_file_name + " " + img2_file_name + " " + tf.as_string(sample_same_person)
pair = tf.stack([img1, img2])
original_class_pair = tf.stack([img1_class, img2_class], axis=0)
label = sample_same_person
return pair, label, tag, original_class_pair
USE_DATASET_API = False
if USE_DATASET_API:
dummy = tf.constant(0, tf.int32)
dataset = tf.contrib.data.Dataset.from_tensors(dummy)
dataset = dataset.map(_create_example)
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
imgs, labels, tags, original_classes = next_element
else:
pair, label, tag, original_class = _create_example()
imgs, labels, tags, original_classes = tf.train.batch([pair, label, tag, original_class], batch_size=batch_size,
num_threads=32, capacity=50 * batch_size)
imgs = self.reshape_group(imgs, batch_size)
labels = tf.reshape(tf.tile(tf.expand_dims(labels, axis=-1), multiples=[1, 2]), [-1])
return imgs, labels, tags, original_classes
def _create_inputs_for_group(self, batch_size):
assert 1 < self.group_size < batch_size
assert batch_size % self.group_size == 0
batch_size /= self.group_size
batch_size = int(batch_size)
sample_from = tf.range(self.num_train_id)
if batch_size > self.num_train_id:
sample_from = tf.tile(sample_from, [int(ceil(float(batch_size) / self.num_train_id))])
pers_ids = tf.random_shuffle(sample_from)[0:int(batch_size)]
def for_each_identity(p_idx):
pers_id = pers_ids[p_idx]
img_ids = tf.tile(tf.random_shuffle(tf.range(self.train_counts[pers_id])), [4])[:self.group_size]
def for_each_img(i_idx):
img_id = img_ids[i_idx]
img, img_class, img_file_name, original_img = self.load_crop(img_id, pers_id)
return img, img_class, img_file_name, original_img
imgs, labels, tags, original_imgs = tf.map_fn(for_each_img, tf.range(self.group_size), dtype=(tf.float32, tf.int32, tf.string, tf.float32))
return imgs, labels, tags, original_imgs
imgs, labels, tags, original_imgs = tf.map_fn(for_each_identity, tf.range(batch_size), dtype=(tf.float32, tf.int32, tf.string, tf.float32))
# imgs, labels, tags = tf.train.batch([imgs, labels, tags], batch_size=1, num_threads=32, capacity=50 * batch_size)
# imgs, labels, tags = tf.train.batch([imgs, labels, tags], batch_size=1, num_threads=1, capacity=1 * batch_size)
# imgs = self.reshape_group(tf.squeeze(imgs, 0), batch_size)
# labels = self.reshape_group(tf.squeeze(labels, 0), batch_size)
# tags = self.reshape_group(tf.squeeze(tags, 0), batch_size)
imgs = self.reshape_group(imgs, batch_size)
labels = self.reshape_group(labels, batch_size)
tags = self.reshape_group(tags, batch_size)
original_imgs = self.reshape_group(original_imgs,batch_size)
return imgs, labels, tags, original_imgs
def create_input_tensors_dict(self, batch_size):
if self.batching_mode == "pair":
imgs, labels, tags, original_classes = self._create_inputs_for_pair(batch_size)
imgs_raw = unnormalize(imgs)
elif self.batching_mode == "group":
imgs, labels, tags, original_images = self._create_inputs_for_group(batch_size)
original_classes = labels
imgs_raw = original_images
# imgs_raw = None
elif self.batching_mode == "eval":
imgs, imgs_raw, tags, labels = self._create_inputs_for_eval(batch_size)
original_classes = labels
else:
raise ValueError("Incorrect batching mode error")
#summary = tf.get_collection(tf.GraphKeys.SUMMARIES)[-1]
#self.summaries.append(summary)
if self.use_summaries:
summ = tf.summary.image("imgs", unnormalize(imgs))
self.summaries.append(summ)
tensors = {"inputs": imgs, "labels": labels, "tags": tags, "original_labels": original_classes}
if imgs_raw is not None:
tensors["imgs_raw"] = imgs_raw
return tensors
def load_crop(self, img_id, pers_id):
def select_data(pers_id_, img_id_):
idx = self.indices_for_classes[pers_id_][img_id_]
return self.class_labels_list[idx], self.bboxes_list[idx], self.file_names_list[idx]
img_class, img_bbox, img_file_name = tf.py_func(select_data, [pers_id, img_id],
[tf.int32, tf.float32, tf.string], name="select_data")
img_class.set_shape(())
img_bbox.set_shape((4,))
img_file_name.set_shape(())
img, original_img, _ = self._load_crop_helper(img_file_name, img_bbox)
return img, img_class, img_file_name,original_img
def _load_crop_helper(self, img_file_name, img_bbox):
img_whole_im = load_image_tensorflow(img_file_name, jpg=self.jpg, channels=3)
dims = tf.shape(img_whole_im)
img_x = img_bbox[0]
img_y = img_bbox[1]
img_w = img_bbox[2]
img_h = img_bbox[3]
# add context region
img_x -= 0.5 * img_w * (self.context_region_factor - 1.0)
img_y -= 0.5 * img_h * (self.context_region_factor - 1.0)
img_w *= self.context_region_factor
img_h *= self.context_region_factor
# round to integer coordinates
img_x = tf.cast(tf.round(img_x), tf.int32)
img_y = tf.cast(tf.round(img_y), tf.int32)
img_w = tf.cast(tf.round(img_w), tf.int32)
img_h = tf.cast(tf.round(img_h), tf.int32)
# clip to image size
img_x = tf.maximum(img_x, 0)
img_y = tf.maximum(img_y, 0)
img_excess_w = tf.maximum(img_x + img_w - dims[1], 0)
img_excess_h = tf.maximum(img_y + img_h - dims[0], 0)
img_w = img_w - img_excess_w
img_h = img_h - img_excess_h
# crop
img_cropped = img_whole_im[img_y:img_y + img_h, img_x:img_x + img_w]
# resize
img = resize_image(img_cropped, self.input_size, True)
img.set_shape(self.input_size + (3,))
# augment and normalize
tensors = {"unnormalized_img": img}
tensors = apply_augmentors(tensors, self.augmentors)
img = tensors["unnormalized_img"]
img_norm = normalize(img)
return img_norm, img, img_cropped
def reshape_group(self, x, batch_size):
shape = smart_shape(x)
shape2 = shape[1:]
shape2[0] = self.group_size * batch_size
x = tf.reshape(x, shape2)
return x
def num_examples_per_epoch(self):
return self.epoch_length
def void_label(self):
return None
|
CLICK_LOCATION = {
# 开始页面点击 QS 位置
'CENTER_CLICK': (350, 230),
'MAIN_RETURN_INDEX': (16, 62),
'MAIN_TASK_RETURN_BATTLE_SELECT': (59, 37),
"LOGIN_QUICK_LOGIN": (640, 675),
"LOGIN_START_WAKEUP": (642, 507),
"BATTLE_CLICK_IN": (1173, 186),
'BATTLE_CLICK_AI_COMMANDER': (1109, 588),
'BATTLE_CLICK_START_BATTLE': (1151, 658),
'BATTLE_CLICK_ENSURE_TEAM_INFO': (1104, 512),
"BATTLE_SELECT_MAIN_TASK": (75, 663),
"BATTLE_SELECT_MAIN_TASK_2": (1213, 311),
"BATTLE_SELECT_MAIN_TASK_4": (969, 362),
"BATTLE_SELECT_MAIN_TASK_5": (1261, 362),
"BATTLE_SELECT_MAIN_TASK_2-2": (1265, 338),
"BATTLE_SELECT_MAIN_TASK_S2-1": (1054, 431),
"BATTLE_SELECT_MAIN_TASK_4-4": (610, 339),
"BATTLE_SELECT_MAIN_TASK_4-5": (824, 254),
"BATTLE_SELECT_MAIN_TASK_4-6": (1034, 340),
"BATTLE_SELECT_MAIN_TASK_4-7": (771, 343),
"BATTLE_SELECT_MAIN_TASK_5-1": (574, 408),
"BATTLE_SELECT_MAIN_TASK_5-2": (884, 319),
"BATTLE_SELECT_MAIN_TASK_5-3": (372, 321),
"BATTLE_SELECT_MAIN_TASK_5-4": (778, 390),
"BATTLE_SELECT_MAIN_TASK_5-5": (1007, 321),
"BATTLE_SELECT_MAIN_TASK_S5-1": (1061, 446),
"BATTLE_SELECT_MATERIAL_COLLECTION": (236, 658),
# 预定义部分
"BATTLE_SELECT_MATERIAL_COLLECTION_0": (168, 375),
"BATTLE_SELECT_MATERIAL_COLLECTION_1": (452, 375),
"BATTLE_SELECT_MATERIAL_COLLECTION_2": (739, 375),
"BATTLE_SELECT_MATERIAL_COLLECTION_3": (1005, 356),
"BATTLE_SELECT_MATERIAL_COLLECTION_X-1": (135, 570),
"BATTLE_SELECT_MATERIAL_COLLECTION_X-2": (135, 570),
"BATTLE_SELECT_MATERIAL_COLLECTION_X-3": (664, 402),
"BATTLE_SELECT_MATERIAL_COLLECTION_X-4": (778, 293),
"BATTLE_SELECT_MATERIAL_COLLECTION_X-5": (880, 167),
"BATTLE_SELECT_CHIP_SEARCH": (387, 658),
# 预定义部分
"BATTLE_SELECT_CHIP_SEARCH_PR-1": (264, 367),
"BATTLE_SELECT_CHIP_SEARCH_PR-2": (503, 414),
"BATTLE_SELECT_CHIP_SEARCH_PR-3": (762, 396),
"BATTLE_SELECT_CHIP_SEARCH_PR-X-1": (324, 415),
"BATTLE_SELECT_CHIP_SEARCH_PR-X-2": (767, 251),
# 火蓝之心副本
"BATTLE_SELECT_HEART_OF_SURGING_FLAME": (694, 653),
"BATTLE_SELECT_HEART_OF_SURGING_FLAME_OF-": (1055, 336),
"BATTLE_SELECT_HEART_OF_SURGING_FLAME_OF-7": (76, 329),
"BATTLE_SELECT_HEART_OF_SURGING_FLAME_OF-8": (308, 446),
"BATTLE_SELECT_HEART_OF_SURGING_FLAME_OF-F": (953, 426),
"BATTLE_SELECT_HEART_OF_SURGING_FLAME_OF-F1": (172, 478),
"BATTLE_SELECT_HEART_OF_SURGING_FLAME_OF-F2": (400, 357),
"BATTLE_SELECT_HEART_OF_SURGING_FLAME_OF-F3": (720, 443),
}
MAP_LOCATION = {
# 截图位置 # (X,Y) (DX,DY)
"BATTLE_CLICK_AI_COMMANDER": ((1055, 580), (23, 23)),
"BATTLE_INFO_BATTLE_END": ((30, 573), (375, 100)),
"BATTLE_INFO_STRENGTH_REMAIN": ((1128, 21), (152, 33)),
"BATTLE_INFO_LEVEL_UP": ((288, 348), (184, 58)),
"BATTLE_INFO_LEVEL_UP_BLACK": ((827, 244), (136, 67)),
"BATTLE_INFO_EAT_STONE": ((880, 520), (113, 37)),
'BATTLE_DEBUG_WHEN_OCR_ERROR': ((108, 122), (114, 53)),
"INDEX_INFO_IS_SETTING": ((268, 31), (54, 32)),
}
SWIPE_LOCATION = {
# 拖动动作 # (X1,Y1) -> (X2,Y2)
"BATTLE_TO_MAP_LEFT": ((24, 87), (1200, 0)),
"BATTLE_TO_MAP_RIGHT": ((1023, 157), (-600, 0))
}
MAIN_TASK_SUPPORT = {
# 目前主战斗模块支持的关卡
# 主关卡
'5-1': 18,
'5-2': 18,
'5-3': 18,
'5-4': 18,
'S5-1': 18,
'4-4': 18,
'4-5': 18,
'4-6': 18,
'4-7': 18,
# CA
'CA-1': 10,
'CA-2': 15,
'CA-3': 20,
'CA-4': 25,
'CA-5': 30,
# CE
'CE-3': 20,
'CE-2': 15,
'CE-1': 10,
'CE-4': 25,
'CE-5': 30,
# LS
'LS-1': 10,
'LS-2': 15,
'LS-3': 20,
'LS-4': 25,
'LS-5': 30,
# SK
'SK-1': 10,
'SK-2': 15,
'SK-3': 20,
'SK-4': 25,
'SK-5': 30,
# AP
'AP-1': 10,
'AP-2': 15,
'AP-3': 20,
'AP-4': 25,
'AP-5': 30,
# PR
'PR-A-1': 18,
'PR-A-2': 36,
'PR-B-1': 18,
'PR-B-2': 36,
'PR-C-1': 18,
'PR-C-2': 36,
'PR-D-1': 18,
'PR-D-2': 36,
# OF HEART_OF_SURGING_FLAME 副本
'OF-8': 20,
'OF-7': 20,
'OF-F3': 8,
'OF-F2': 6,
'OF-F1': 4,
}
LIZHI_CONSUME = {
# 理智消耗 c_id : number
# CA
'CA-1': 10,
'CA-2': 15,
'CA-3': 20,
'CA-4': 25,
'CA-5': 30,
# CE
'CE-3': 20,
'CE-2': 15,
'CE-1': 10,
'CE-4': 25,
'CE-5': 30,
# LS
'LS-1': 10,
'LS-2': 15,
'LS-3': 20,
'LS-4': 25,
'LS-5': 30,
# SK
'SK-1': 10,
'SK-2': 15,
'SK-3': 20,
'SK-4': 25,
'SK-5': 30,
# AP
'AP-1': 10,
'AP-2': 15,
'AP-3': 20,
'AP-4': 25,
'AP-5': 30,
# PR
'PR-A-1': 18,
'PR-A-2': 36,
'PR-B-1': 18,
'PR-B-2': 36,
'PR-C-1': 18,
'PR-C-2': 36,
'PR-D-1': 18,
'PR-D-2': 36,
# 主关卡 5图
'5-1': 18,
'5-2': 18,
'5-3': 18,
'5-4': 18,
'5-5': 18,
'5-6': 18,
'5-7': 18,
'5-8': 18,
'5-9': 18,
'5-10': 21,
"S5-1": 18,
"S5-2": 18,
"S5-3": 18,
"S5-4": 18,
"S5-5": 18,
"S5-6": 18,
# 主关卡 4图
'4-1': 18,
'4-2': 18,
'4-3': 18,
'4-4': 18,
'4-5': 18,
'4-6': 18,
'4-7': 18,
'4-8': 21,
'4-9': 21,
'4-10': 21,
# 主关卡 3图
'3-8': 18,
'3-7': 15,
'3-6': 15,
'S3-3': 15,
'S3-4': 15,
'S3-2': 15,
'S3-1': 15,
'3-5': 15,
'3-4': 15,
'3-3': 15,
'3-2': 15,
'3-1': 15,
# 主关卡 2图
'2-10': 15,
'2-9': 12,
'2-8': 12,
'2-7': 12,
'2-6': 12,
'2-5': 12,
"S2-12": 15,
'S2-11': 12,
'S2-9': 12,
'S2-8': 12,
'S2-7': 12,
'S2-6': 12,
'S2-5': 12,
'2-4': 12,
'2-3': 9,
'2-2': 9,
'2-1': 9,
'S2-4': 9,
'S2-3': 9,
'S2-2': 9,
'S2-1': 9,
# OF HEART_OF_SURGING_FLAME 副本
'OF-8': 20,
'OF-7': 20,
# ==== 一下为门票消耗
'OF-F3': 8, # 备注在 OF_Fx中,理智消耗会变为门票消耗。 为了保持一致,任然会提示是理智不足而不是门票不足
'OF-F2': 6,
'OF-F1': 4,
#
}
BATTLE_SELECTORS = {
1: 'MAIN_TASK', # 主线任务
2: 'MATERIAL_COLLECTION', # 物资筹备
3: 'CHIP_SEARCH', # 芯片收集
4: 'EXTERMINATE_BATTLE'
}
# 拖动次数
# 只更新一些需要刷素材的关卡
MAIN_TASK_CHAPTER_SWIPE = {
# 1 代表 1次 BATTLE_TO_MAP_RIGHT
'4': 1,
'5': 1,
}
MAIN_TASK_BATTLE_SWIPE = {
# 1 代表 1次 BATTLE_TO_MAP_RIGHT
'4-4': 1,
'4-5': 1,
'4-6': 1,
'4-7': 2,
'4-8': 2,
"5-3": 1,
'5-4': 1,
'5-5': 1,
'OF-8': 2,
'OF-7': 2,
}
DAILY_LIST = {
# 日常位置定位
# 数据来自于http://wiki.joyme.com/arknights/%E9%A6%96%E9%A1%B5
# 顺序可能有问题
"3": {
# __import__('datetime').datetime.now().strftime("%w")
# 关卡名 开放时间 掉落物资 关卡ID
# 固若金汤 一、四、五、日 重装、医疗精英化材料 A
# 摧枯拉朽 一、二、五、六 术师、狙击精英化材料 B
# 势不可挡 三、四、六、日 先锋、辅助精英化材料 C
# 身先士卒 二、三、六、日 近卫、特种精英化材料 D
'1':
{
'A': 1,
'B': 2,
},
'2':
{
'B': 1,
'D': 2,
},
'3':
{
'C': 1,
'D': 2,
},
'4':
{
'A': 1,
'C': 2,
},
'5':
{
'A': 1,
'B': 2,
},
'6':
{
'B': 1,
'C': 2,
'D': 3,
},
'7':
{
'A': 1,
'C': 1,
'D': 3,
},
},
"2": {
# 关卡名 开放时间 掉落物资 关卡ID
# 战术演习 常驻开放 作战记录 LS
# 空中威胁 二、三、五、日 技巧概要 CA
# 粉碎防御 一、四、六、日 采购凭证 AP
# 资源保障 一、三、五、六 碳、家具零件 SK
# 货物运送 二、四、六、日 龙门币 CE
# __import__('datetime').datetime.now().strftime("%w")
'1':
{
'LS': 0,
'AP': 1,
'SK': 2,
},
'2':
{
'LS': 0,
'CA': 1,
'CE': 2,
},
'3':
{
'LS': 0,
'CA': 1,
'SK': 2,
},
'4':
{
'LS': 0,
'AP': 1,
'CE': 2,
},
'5':
{
'LS': 0,
'CA': 1,
'SK': 2,
},
'6':
{
'LS': 0,
'AP': 1,
'SK': 2,
'CE': 3,
},
'7':
{
'LS': 0,
'AP': 1,
'CA': 2,
'CE': 3,
},
}
}
# To Do
Activity_List = {
"3": {
# 关卡名 开放时间 掉落物资 关卡ID
# 固若金汤 一、四、五、日 重装、医疗精英化材料 A
# 摧枯拉朽 一、二、五、六 术师、狙击精英化材料 B
# 势不可挡 三、四、六、日 先锋、辅助精英化材料 C
# 身先士卒 二、三、六、日 近卫、特种精英化材料 D
"A": 1,
"B": 2,
"C": 3,
"D": 4,
},
"2": {
},
}
|
### For compiling/running the c++ file
import os
import platform ## for checking if im windows or linux
import sys ## for printing progress on large loops (camera image function).
### 3rd party imports ###
## Maths and linear algebra
import numpy as np
from scipy.interpolate import interp1d##Interpolation useful for making smooth anomations
### Plotting and animating
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d as ax3d
from matplotlib import animation
import matplotlib.colors
def printProgressBar(Q,size,preText='',postText=''):
"""
Prints a progress bar to show how far through a loop you are.
"""
n_bar =30 #size of progress bar
q = Q/size
sys.stdout.write('\r')
sys.stdout.write(f" {preText} [{'=' * int(n_bar * q):{n_bar}s}] {postText} ")
sys.stdout.flush()
return None
def compile_cpp(cpp_file='ray_tracing.cpp', exe_file='ray_tracing.o'):
''' Compiles the C++ file.
'''
os.system("echo Compiling " + cpp_file)
os.system('g++ ' + cpp_file + ' -o ' + exe_file)
print('Done.')
return exe_file
def run_cpp_ray_trace(params, technique='Simple',exe_file='ray_tracing.o', cpp_file='ray_tracing.cpp', force_new_compile=False, get_times=False, get_j=False):
''' I run the C++ script ray_tracing.cpp, either by first compiling ray_tracing.o
or by simply running ray_tracing.o if it already exists.
I take as paramaters
params[0] = alpha_0 = X co-ordinate in image plane
params[1] = beta_0 = Y co-ordinate in image plane
params[2] = theta_0 = image plane inclination angle (degrees)
params[3] = a = Normalised black hole spin -1 < a < 1.
technique is the name of logic used for ray tracing.
'''
if force_new_compile:
exe_file = compile_cpp(cpp_file, exe_file)
elif os.path.isfile(exe_file):
pass
else:
exe_file = compile_cpp(cpp_file, exe_file)
if platform.system() == 'Darwin':
exe_file = './' + exe_file
elif platform.system() == 'Windows':
exe_file = '.\\' + exe_file
else:
exe_file = './' + exe_file#I am probably on Linux
if len(params) != 4:
print('######################### ERROR #########################')
print('Ray trace got %d params but needs 4'%(len(params)))
print('#########################################################')
raise ValueError('Ray trace got %d params but needs 4'%(len(params)))
params.append(technique)
allowed_techniques = ['Simple', 'Disc', 'NoDisc']
if params[-1] not in allowed_techniques:
print('################################### WARNING ###################################')
print('Ray trace got %s technique but must be one of %s , %s , %s .'%(technique, *allowed_techniques))
print('Defaulting to Simple technique.')
print('###############################################################################')
params[-1] = 'Simple'
params = [str(param) for param in params]
pipe = os.popen(exe_file + ' ' + ' '.join(params))
## Note that os.popen() is not technically the recommended technique for doing this.
## See RunCpp.py for a better way.
if get_times:
if get_j:
x, y, z, t, j = process_cpp_output(pipe,a=float(params[-2]),get_times=get_times, get_j = get_j)
return x, y, z, t, j
x, y, z, t = process_cpp_output(pipe,a=float(params[-2]),get_times=get_times, get_j = get_j)
return x, y, z, t
if get_j:
x, y, z, j = process_cpp_output(pipe,a=float(params[-2]),get_times=get_times, get_j = get_j)
return x, y, z, j
x, y, z = process_cpp_output(pipe, a=float(params[-2]))
return x, y, z
def piped_line_to_array(line):
''' Takes an output line and returns an array.
The C++ code is written to return the r(t),
theta(t) and phi(t) arrays as a single string
with values seperated by commas. This transforms
that back into the original array.
'''
arr = []
l = 0
for k in range(len(line)):
if line[k] == ',':
arr.append(float(line[l:k]))
l=k+1
return np.array(arr)
def process_cpp_output(pipe, a, get_times=False, get_j = False):
''' Transforms the output from the C++ file into
useful numpy arrays. Returns x(t), y(t), z(t).
Can return the time as well if get_times=True.
'''
line_r = pipe.readline()# C++ code written so that
line_theta = pipe.readline()# the first line of output
line_phi = pipe.readline()# is r, then theta, then phi, then t.
line_t = pipe.readline()
line_j = pipe.readline()
r = piped_line_to_array(line_r)
theta = piped_line_to_array(line_theta)
phi = piped_line_to_array(line_phi)
if get_times:
t = piped_line_to_array(line_t)
if get_j:
j = float(line_j)
x, y, z = cartesian_from_sphereical_polar(r, theta, phi, a)
if get_times:
if get_j:
return x, y, z, t, j
return x, y, z, t
if get_j:
return x, y, z, j
return x, y, z
def cartesian_from_sphereical_polar(r, theta, phi, a):
''' Transforms sphereical polar co-ordinates
(r, theta, phi) to cartesian co-ordinates
(x, y, z).
'''
x = np.sqrt(a*a + r*r)*np.sin(theta)*np.cos(phi)
y = np.sqrt(a*a + r*r)*np.sin(theta)*np.sin(phi)
z = r*np.cos(theta)
return x, y, z
def sphereical_polar_from_cartesian(x, y, z, a):
''' Transforms cartesian co-ordinates
(x, y, z) to sphereical polar
co-ordinates (r, theta, phi).
'''
phi = np.arctan(y/x)
R = np.sqrt(x**2 + y**2 + z**2)
r = np.sqrt(1/2 * (R**2 - a**2 + np.sqrt((R**2 - a**2)**2 + 4*a**2*z**2)))
theta = np.arccos(z/r)
return r, theta, phi
def get_isco(a):
""" Gets the ISCO radius for a given BH spin parameter"""
Z_1 = 1 + (1-a**2)**(1/3) * ((1+a)**(1/3) + (1-a)**(1/3))
Z_2 = np.sqrt(3*a**2 + Z_1**2)
return (3 + Z_2 - np.sign(a) * np.sqrt((3-Z_1)*(3 + Z_1 + 2 * Z_2)))
def get_event_horizon(a):
''' Gets the event horizon for a given BH spin parameter '''
return 1 + np.sqrt(1 - a*a) # black hole event horizon
def make_canvas(a, fig_width=9, fig_height=6, view_theta=60,view_phi=-130,axis='off'):
'''
Returns a figure and axis with a black hole and
disc drawn on
'''
fig = plt.figure(figsize=(fig_width, fig_height))
ax = fig.add_subplot(111,projection='3d')
ax.set_xlim(-15,15)
ax.set_ylim(-15,15)
ax.set_zlim(-9,9)
ax.view_init(90-view_theta,view_phi)
ax = plot_black_hole(a, ax)
ax = plot_disc(a, ax)
plt.axis(axis)
return fig, ax
def make_canvas_no_disc(a, fig_width=9, fig_height=6, view_theta=60,view_phi=-130,axis='off'):
'''
Returns a figure and axis with a black hole and
disc drawn on
'''
fig = plt.figure(figsize=(fig_width, fig_height))
ax = fig.add_subplot(111,projection='3d')
ax.set_xlim(-15,15)
ax.set_ylim(-15,15)
ax.set_zlim(-9,9)
ax.view_init(90-view_theta,view_phi)
ax = plot_black_hole(a, ax)
plt.axis(axis)
return fig, ax
def plot_black_hole(a, ax):
"""
plot the black hole
"""
rH = get_event_horizon(a)
u, v = np.mgrid[0:2*np.pi:20j, 0:np.pi:20j]
xs = np.sqrt(rH*rH + a*a)*np.cos(u)*np.sin(v)
ys = np.sqrt(rH*rH + a*a)*np.sin(u)*np.sin(v)
zs = rH*np.cos(v)
ax.plot_surface(xs, ys, zs, color="k",alpha=0.5)
plt.axis('off')
return ax
def plot_disc(a, ax):
"""
plot the disc -- p = outer edge radius, q = inner edge radius
"""
rI = get_isco(a)
N = 100
thetad = np.linspace(0, 2.*np.pi, N)
phid = np.linspace(0, 2.*np.pi, N)
thetad, phid = np.meshgrid(thetad, phid)
p, q = 20,rI
c, b = (p+q)/2, (p-q)/2
xd = (c + b*np.cos(thetad)) * np.cos(phid)
yd = (c + b*np.cos(thetad)) * np.sin(phid)
zd = 0.000000001*b * np.sin(thetad)
ax.contourf(xd,yd,zd,[0.0000000001,2],zdir='z',cmap=cm.autumn,alpha = 0.5)
return ax
def plot_ray(ax, x, y, z, color=None):
''' Plots the photon path.
'''
ax.plot(x, y, z, color=color)
return
def animate_rays(xs, ys, zs, ts, a, cmap='jet', n_frame=None, disc=False, burst_mode=False, lw=2.0, ls='-', interval = 1, blit = False, repeat = True,
fig_width=9, fig_height=6, view_theta=60,view_phi=-130):
''' Animates a set of ray paths described by x_i(t_i), y_i(t_i), z_i(t_i).
Takes as input:
xs = [x1(t1), x2(t2), x3(t3), ....]
ys = [y1(t1), y2(t2), y3(t3), ....]
zs = .... etc.
'''
if n_frame is None:
n_frame = int(max([len(x) for x in xs])/3)
n_rays = len(xs)
if cmap is None:
colors = [None for k in range(n_rays)]
else:
cm = plt.cm.get_cmap(cmap)
colors = cm(np.linspace(0,1,n_rays))
def get_t_i(x, y, z, t, r0 = 30):
''' Returns the time when r ~ r0,
and the array index when that happens.
'''
r,_,_ = sphereical_polar_from_cartesian(x, y, z, a)
ind = np.argmin(r>r0) # gets first instance of r < r0 in array.
t_i = t[ind]
return t_i, ind
def get_t_max(ts):
''' Returns the largest proper time
of all the photon paths
'''
tm = max([t[-1] for t in ts])
return tm
x_animate = [[] for k in range(n_rays)]
y_animate = [[] for k in range(n_rays)]
z_animate = [[] for k in range(n_rays)]
lamb_animate = np.linspace(0,1,n_frame)
t_j = get_t_max(ts)
for k in range(n_rays):
t_i, ind = get_t_i(xs[k],ys[k],zs[k],ts[k])
t_m_i = max(ts[k])
if t_m_i != t_j:
x_interp = interp1d((np.append(ts[k][ind:],t_j)-t_i)/(t_j-t_i), np.append(xs[k][ind:], xs[k][-1]))
y_interp = interp1d((np.append(ts[k][ind:],t_j)-t_i)/(t_j-t_i), np.append(ys[k][ind:], ys[k][-1]))
z_interp = interp1d((np.append(ts[k][ind:],t_j)-t_i)/(t_j-t_i), np.append(zs[k][ind:], zs[k][-1]))
else:
x_interp = interp1d((ts[k][ind:]-t_i)/(t_j-t_i), xs[k][ind:])
y_interp = interp1d((ts[k][ind:]-t_i)/(t_j-t_i), ys[k][ind:])
z_interp = interp1d((ts[k][ind:]-t_i)/(t_j-t_i), zs[k][ind:])
x_animate[k] = x_interp(lamb_animate)
y_animate[k] = y_interp(lamb_animate)
z_animate[k] = z_interp(lamb_animate)
if disc:
fig, ax = make_canvas(a, fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
else:
fig, ax = make_canvas_no_disc(a, fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
line, = ax.plot([], [])
plotlays, plotcols = [n_rays], colors
lines = []
for index in range(n_rays):
lobj = ax.plot([],[],lw=lw,color=plotcols[index],linestyle=ls)[0]
lines.append(lobj)
def init():
for line in lines:
line.set_data(np.asarray([]),np.asarray([]))
line.set_3d_properties(np.asarray([]))
return lines
def animate(i):
xlist = [[] for k in range(n_rays)]
ylist = [[] for k in range(n_rays)]
zlist = [[] for k in range(n_rays)]
if burst_mode:
for j in range(n_rays):
xlist[j] = x_animate[j][max(0,i-int(n_frame/10)):i]
ylist[j] = y_animate[j][max(0,i-int(n_frame/10)):i]
zlist[j] = z_animate[j][max(0,i-int(n_frame/10)):i]
else:
for j in range(n_rays):
xlist[j] = x_animate[j][:i]
ylist[j] = y_animate[j][:i]
zlist[j] = z_animate[j][:i]
for lnum,line in enumerate(lines):
line.set_data(np.asarray(xlist[lnum]), np.asarray(ylist[lnum]))
line.set_3d_properties(np.asarray(zlist[lnum]))
return lines
anim = animation.FuncAnimation(fig, animate, init_func=init, frames = n_frame, interval = interval, blit = blit, repeat = repeat)
return fig, ax, anim
def wavelength_to_rgb(wavelength, gamma=0.8, alpha = 0.1, set_unobservable_grey=True):
''' Modified from http://www.noah.org/wiki/Wavelength_to_RGB_in_Python
This converts a given wavelength of light to an
approximate RGB color value. The wavelength must be given
in nanometers in the range from 380 nm through 750 nm
(789 THz through 400 THz).
Based on code by <NAME>
http://www.physics.sfasu.edu/astro/color/spectra.html
Additionally allows any wavelengths out of visible range to be
set to grey with alpha value = alpha.
'''
wavelength = float(wavelength)
if wavelength >= 380 and wavelength <= 750:
A = 1.0
else:
if set_unobservable_grey:
A = alpha
else:
A = 1.0
if wavelength < 380:
if set_unobservable_grey:
wavelength = 379.0
else:
wavelength = 380.0
if wavelength >750:
if set_unobservable_grey:
wavelength = 751.0
else:
wavelength = 750.0
if wavelength >= 380 and wavelength <= 440:
attenuation = 0.3 + 0.7 * (wavelength - 380) / (440 - 380)
R = ((-(wavelength - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
elif wavelength >= 440 and wavelength <= 490:
R = 0.0
G = ((wavelength - 440) / (490 - 440)) ** gamma
B = 1.0
elif wavelength >= 490 and wavelength <= 510:
R = 0.0
G = 1.0
B = (-(wavelength - 510) / (510 - 490)) ** gamma
elif wavelength >= 510 and wavelength <= 580:
R = ((wavelength - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif wavelength >= 580 and wavelength <= 645:
R = 1.0
G = (-(wavelength - 645) / (645 - 580)) ** gamma
B = 0.0
elif wavelength >= 645 and wavelength <= 750:
attenuation = 0.3 + 0.7 * (750 - wavelength) / (750 - 645)
R = (1.0 * attenuation) ** gamma
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
return (R,G,B,A)
def get_spectral_color_map(set_unobservable_grey=True):
''' Returns a spectral color map
'''
clim=(350,780)
norm = plt.Normalize(*clim)
wl = np.arange(clim[0],clim[1]+1,2)
colorlist = list(zip(norm(wl),[wavelength_to_rgb(w, set_unobservable_grey=set_unobservable_grey) for w in wl]))
spectralmap = matplotlib.colors.LinearSegmentedColormap.from_list("spectrum", colorlist)
return spectralmap
def camera_image(a, theta0, N_r=50, N_phi=200, r_out=20, rest_wavelength=550, wavelength_function=None, set_unobservable_grey=False, set_intensity=False, print_progress=False):
''' Takes a camera image.
'''
save_string = ['N_r=',str(N_r),'N_phi=',str(N_phi),'a=',str(a),'theta=',str(theta0),'r_out=',str(r_out)]
save_string = ''.join(save_string)
files_exist = os.path.isfile('im_x'+save_string+'.npy')
files_exist = files_exist and os.path.isfile('im_y'+save_string+'.npy')
files_exist = files_exist and os.path.isfile('phys_y'+save_string+'.npy')
files_exist = files_exist and os.path.isfile('phys_y'+save_string+'.npy')
files_exist = files_exist and os.path.isfile('red_shift'+save_string+'.npy')
if files_exist:
print('Loading physical parameters...')
ri = get_isco(a)
rH = get_event_horizon(a)
im_x = np.load('im_x'+save_string+'.npy')
im_y = np.load('im_y'+save_string+'.npy')
phys_x = np.load('phys_x'+save_string+'.npy')
phys_y = np.load('phys_y'+save_string+'.npy')
red_shifts = np.load('red_shift'+save_string+'.npy')
else:
print('Computing physical parameters of %d photons... \n This may take a couple of minutes. '%(N_r*N_phi))
def U0(r,a):
return (1+a*np.power(r,-3/2))/np.sqrt(1-3/r+2*a*np.power(r,-3/2))
def Uphi(r,a):
return 1/(np.power(r,3/2)*np.sqrt(1-3/r+2*a*np.power(r,-3/2)))
def g(r,a,j,ri,r_out=20):
if r < ri:
return 10
elif r > r_out:
return 0
return 1/(U0(r,a)) * 1/(1 + j*(Uphi(r,a)/U0(r,a)))
ri = get_isco(a)
rH = get_event_horizon(a)
im_r = [ri + (r_out + 5 - ri) * k/(N_r-1) for k in range(N_r)]
im_phi = [np.pi/2 + 2*np.pi * k/(N_phi-1) for k in range(N_phi)]
im_x = []
im_y = []
red_shifts = []
phys_x = []
phys_y = []
f_min = 0.01
f_max = 2.00
n_rays_traced = 0
for i in range(N_r):
for j in range(N_phi):
n_rays_traced += 1
alpha = im_r[i] * np.cos(im_phi[j])
beta = im_r[i] * np.sin(im_phi[j]) * np.cos(theta0*np.pi/180)
if beta > 0:
beta *= np.power(1/np.cos(theta0*np.pi/180), .7)
im_x.append(alpha)
im_y.append(beta)
x, y, z, j = run_cpp_ray_trace([alpha, beta, theta0, a], get_times=False, get_j = True)
rf, _, _ = sphereical_polar_from_cartesian(x[-1], y[-1], z[-1], a)## gets the final radial co-ordinate
if rf < rH:
f = 10
elif z[-1] > 0.5:
f = 10
else:
f = g(rf, a, j, ri, r_out)
red_shifts.append(f)
phys_x.append(x[-1])
phys_y.append(y[-1])
if print_progress:
if n_rays_traced % 100 == 0:
printProgressBar(Q=n_rays_traced,size=N_r*N_phi,preText='Ray tracing....',postText='Photons traced: '+str(n_rays_traced)+'/'+str(N_r*N_phi))
if print_progress:
printProgressBar(Q=N_r*N_phi,size=N_r*N_phi,preText='Ray tracing....',postText='Photons traced: '+str(N_r*N_phi)+'/'+str(N_r*N_phi))
print('')
red_shifts = [red_shifts[k] if not (red_shifts[k] < f_min or red_shifts[k] > f_max) else np.nan for k in range(N_r*N_phi)]
save_string = ['N_r=',str(N_r),'N_phi=',str(N_phi),'a=',str(a),'theta=',str(theta0),'r_out=',str(r_out)]
save_string = ''.join(save_string)
np.save('im_x'+save_string+'.npy',im_x)
np.save('im_y'+save_string+'.npy',im_y)
np.save('phys_x'+save_string+'.npy',phys_x)
np.save('phys_y'+save_string+'.npy',phys_y)
np.save('red_shift'+save_string+'.npy',red_shifts)
# End else.
print('Done. ')
### Plotting from here.
if wavelength_function is not None:
wavelengths = [wavelength_function(phys_x[k], phys_y[k])/red_shifts[k] if not np.isnan(red_shifts[k]) else np.nan for k in range(N_r*N_phi)]
else:
wavelengths = [rest_wavelength/red_shifts[k] if not np.isnan(red_shifts[k]) else np.nan for k in range(N_r*N_phi)]
imx_lim_u = max([im_x[k] if not np.isnan(wavelengths[k]) else 0 for k in range(N_r*N_phi)])
imx_lim_l = min([im_x[k] if not np.isnan(wavelengths[k]) else 0 for k in range(N_r*N_phi)])
imy_lim_u = max([im_y[k] if not np.isnan(wavelengths[k]) else 0 for k in range(N_r*N_phi)])
imy_lim_l = min([im_y[k] if not np.isnan(wavelengths[k]) else 0 for k in range(N_r*N_phi)])
spectralmap = get_spectral_color_map(set_unobservable_grey=set_unobservable_grey)
fig = plt.figure(figsize=(9,6))
ax = fig.add_subplot(111)
if set_intensity:
colors = spectralmap((np.array([wavelengths[k] if not np.isnan(wavelengths[k]) else 0 for k in range(N_r*N_phi)]) - 350)/(780-350))
colors[:,3] = [red_shifts[k]**3 if not np.isnan(red_shifts[k]) else 0.01 for k in range(N_r*N_phi)]
colors[:,3] = [.999 * (colors[k,3] - min(colors[:,3])) /(max(colors[:,3]) - min(colors[:,3])) for k in range(N_r*N_phi)]
sc = ax.scatter(im_x,im_y,s=100,color=colors, edgecolors=None)
else:
sc = ax.scatter(im_x,im_y,s=100,c=wavelengths,cmap = spectralmap, vmin=349, vmax=781, edgecolors=None)
ax.set_xlabel(r'$X_{im}$',fontsize=20)
ax.set_ylabel(r'$Y_{im}$',fontsize=20,rotation = 0)
ax.set_xlim(imx_lim_l-1.5, imx_lim_u+1.5)
ax.set_ylim(imy_lim_l-1.0, imy_lim_u+1.0)
fig1 = plt.figure(figsize=(9, 6))
ax1 = fig1.add_subplot(111)
ax1.set_xlabel(r'$X_{im}$',fontsize=20)
ax1.set_ylabel(r'$Y_{im}$',fontsize=20,rotation = 0)
ax1.set_xlim(imx_lim_l-1.5, imx_lim_u+1.5)
ax1.set_ylim(imy_lim_l-1.0, imy_lim_u+1.0)
if wavelength_function is None:
plot_phi = np.linspace(0,2*np.pi,100)
disc_x_in = ri * np.cos(plot_phi)
disc_y_in = ri * np.cos(theta0*np.pi/180) * np.sin(plot_phi)
disc_x_out = r_out * np.cos(plot_phi)
disc_y_out = r_out * np.cos(theta0*np.pi/180) * np.sin(plot_phi)
ax1.plot(disc_x_in, disc_y_in, color=wavelength_to_rgb(rest_wavelength),lw=0)
ax1.plot(disc_x_out, disc_y_out, color=wavelength_to_rgb(rest_wavelength),lw=0)
ax1.fill(np.append(disc_x_in, disc_x_out[::-1]), np.append(disc_y_in, disc_y_out[::-1]), color=wavelength_to_rgb(rest_wavelength))
circle1 = plt.Circle((0, 0), rH, color='k')
ax1.add_artist(circle1)
if rH > ri * np.cos(theta0*np.pi/180):
n = 40
for k in range(n):
plot_phi = np.linspace(np.pi/2,3*np.pi/2,100)
r = ri + (rH/np.cos(theta0*np.pi/180) - ri) * k/(n-1)
disc_x_line = r * np.sin(plot_phi)
disc_y_line = r * np.cos(theta0*np.pi/180) * np.cos(plot_phi)
ax1.plot(disc_x_line, disc_y_line, color=wavelength_to_rgb(rest_wavelength),lw=1)
else:
n = 40
for k in range(n):
plot_phi = np.linspace(0,2*np.pi,200)
r = ri + (r_out - ri) * k/(n-1)
disc_x_line = r * np.cos(plot_phi)
disc_y_line = r * np.cos(theta0*np.pi/180) * np.sin(plot_phi)
ax1.scatter(disc_x_line, disc_y_line,s=100, c=wavelength_function(disc_x_line, disc_y_line/np.cos(theta0*np.pi/180)),cmap=spectralmap,vmin=350,vmax=780)
if rH > ri * np.cos(theta0*np.pi/180):
for k in range(n):#### Can do this better with ax1.fill(), like above, with semicircle function etc.
plot_phi = np.linspace(0,np.pi,200)
r = rH * k/(n-1)
disc_x_line = r * np.cos(plot_phi)
disc_y_line = r * np.sin(plot_phi)
ax1.scatter(disc_x_line, disc_y_line,s=1, color='k')
else:
for k in range(n):#### Can do this better with ax1.fill(), like above, with semicircle function etc.
plot_phi = np.linspace(0,2*np.pi,400)
r = rH * k/(n-1)
disc_x_line = r * np.cos(plot_phi)
disc_y_line = r * np.sin(plot_phi)
ax1.scatter(disc_x_line, disc_y_line,s=1, color='k')
ax.set_title('General Relativistic Universe', fontsize=18)
ax1.set_title("'Newtonian' Universe", fontsize=18)
### End of camera image.
return fig, ax, fig1, ax1
def plot_rays_from_parameters(spins, thetas, alphas, betas, technique='NoDisc', fig_width=9, fig_height=6, view_theta=60,view_phi=-130):
## Cast all parameters into lists
if type(spins) != type([]):
spins = [spins]
if type(thetas) != type([]):
thetas = [thetas]
if type(alphas) != type([]):
alphas = [alphas]
if type(betas) != type([]):
betas = [betas]
## Check they have the right lengths
max_len = max([len(spins), len(thetas), len(alphas), len(betas)])
len_test = (len(spins) == max_len or len(spins) == 1)
len_test = len_test and (len(thetas) == max_len or len(thetas) == 1)
len_test = len_test and (len(alphas) == max_len or len(alphas) == 1)
len_test = len_test and (len(betas) == max_len or len(betas) == 1)
if not len_test:
raise ValueError('Initial ray parameters must be lists of length 1 or equal length. \n You have provided lists with lengths %d, %d, %d, %d.'
%(len(spins), len(thetas), len(alphas), len(betas)))
if len(spins) == 1:
spins = [spins[0] for k in range(max_len)]
if len(thetas) == 1:
thetas = [thetas[0] for k in range(max_len)]
if len(alphas) == 1:
alphas = [alphas[0] for k in range(max_len)]
if len(betas) == 1:
betas = [betas[0] for k in range(max_len)]
n_ray = max_len
xs, ys, zs, ts = [[] for k in range(n_ray)], [[] for k in range(n_ray)], [[] for k in range(n_ray)], [[] for k in range(n_ray)]
for i, a0, b0, theta0, a in zip(range(n_ray), alphas, betas, thetas, spins):
x, y, z, t = run_cpp_ray_trace([a0, b0, theta0, a], technique, get_times=True)
xs[i], ys[i], zs[i], ts[i] = x, y, z, t
if technique == 'NoDisc':
fig, ax = make_canvas_no_disc(spins[0], fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
else:
fig, ax = make_canvas(spins[0], fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
cm = plt.cm.get_cmap('jet')
colors = cm(np.linspace(0,1,n_ray))
for i, x, y, z in zip(range(n_ray), xs, ys, zs):
plot_ray(ax, x, y, z,color=colors[i])
return fig, ax
def animate_rays_from_parameters(spins, thetas, alphas, betas, technique='NoDisc', cmap='jet', n_frame=None, burst_mode=False,
lw=2.0, ls='-', interval = 1, blit = False, repeat = True, fig_width=9, fig_height=6, view_theta=60,view_phi=-130):
## Cast all parameters into lists
if type(spins) != type([]):
spins = [spins]
if type(thetas) != type([]):
thetas = [thetas]
if type(alphas) != type([]):
alphas = [alphas]
if type(betas) != type([]):
betas = [betas]
## Check they have the right lengths
max_len = max([len(spins), len(thetas), len(alphas), len(betas)])
len_test = (len(spins) == max_len or len(spins) == 1)
len_test = len_test and (len(thetas) == max_len or len(thetas) == 1)
len_test = len_test and (len(alphas) == max_len or len(alphas) == 1)
len_test = len_test and (len(betas) == max_len or len(betas) == 1)
if not len_test:
raise ValueError('Initial ray parameters must be lists of length 1 or equal length. \n You have provided lists with lengths %d, %d, %d, %d.'
%(len(spins), len(thetas), len(alphas), len(betas)))
if len(spins) == 1:
spins = [spins[0] for k in range(max_len)]
if len(thetas) == 1:
thetas = [thetas[0] for k in range(max_len)]
if len(alphas) == 1:
alphas = [alphas[0] for k in range(max_len)]
if len(betas) == 1:
betas = [betas[0] for k in range(max_len)]
n_ray = max_len
xs, ys, zs, ts = [[] for k in range(n_ray)], [[] for k in range(n_ray)], [[] for k in range(n_ray)], [[] for k in range(n_ray)]
for i, a0, b0, theta0, a in zip(range(n_ray), alphas, betas, thetas, spins):
x, y, z, t = run_cpp_ray_trace([a0, b0, theta0, a],technique, get_times=True)
xs[i], ys[i], zs[i], ts[i] = x, y, z, t
if technique == 'NoDisc':
anim_fig, anim_ax, anim = animate_rays(xs, ys, zs, ts, spins[0], disc=False, cmap=cmap, n_frame=n_frame, burst_mode=burst_mode,
lw=lw, ls=ls, interval = interval, blit = blit, repeat = repeat, fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
else:
anim_fig, anim_ax, anim = animate_rays(xs, ys, zs, ts, spins[0], disc=True, cmap=cmap, n_frame=n_frame, burst_mode=burst_mode,
lw=lw, ls=ls, interval = interval, blit = blit, repeat = repeat, fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
return anim_fig, anim_ax, anim
def plot_and_animate_rays_from_parameters(spins, thetas, alphas, betas, technique='NoDisc', cmap='jet', n_frame=None, burst_mode=False,
lw=2.0, ls='-', interval = 1, blit = False, repeat = True, fig_width=9, fig_height=6, view_theta=60,view_phi=-130):
## Cast all parameters into lists
if type(spins) != type([]):
spins = [spins]
if type(thetas) != type([]):
thetas = [thetas]
if type(alphas) != type([]):
alphas = [alphas]
if type(betas) != type([]):
betas = [betas]
## Check they have the right lengths
max_len = max([len(spins), len(thetas), len(alphas), len(betas)])
len_test = (len(spins) == max_len or len(spins) == 1)
len_test = len_test and (len(thetas) == max_len or len(thetas) == 1)
len_test = len_test and (len(alphas) == max_len or len(alphas) == 1)
len_test = len_test and (len(betas) == max_len or len(betas) == 1)
if not len_test:
raise ValueError('Initial ray parameters must be lists of length 1 or equal length. \n You have provided lists with lengths %d, %d, %d, %d.'
%(len(spins), len(thetas), len(alphas), len(betas)))
if len(spins) == 1:
spins = [spins[0] for k in range(max_len)]
if len(thetas) == 1:
thetas = [thetas[0] for k in range(max_len)]
if len(alphas) == 1:
alphas = [alphas[0] for k in range(max_len)]
if len(betas) == 1:
betas = [betas[0] for k in range(max_len)]
n_ray = max_len
xs, ys, zs, ts = [[] for k in range(n_ray)], [[] for k in range(n_ray)], [[] for k in range(n_ray)], [[] for k in range(n_ray)]
for i, a0, b0, theta0, a in zip(range(n_ray), alphas, betas, thetas, spins):
x, y, z, t = run_cpp_ray_trace([a0, b0, theta0, a],technique, get_times=True)
xs[i], ys[i], zs[i], ts[i] = x, y, z, t
if technique == 'NoDisc':
plot_fig, plot_ax = make_canvas_no_disc(spins[0], fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
else:
plot_fig, plot_ax = make_canvas(spins[0], fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
cm = plt.cm.get_cmap('jet')
colors = cm(np.linspace(0,1,n_ray))
for i, x, y, z in zip(range(n_ray), xs, ys, zs):
plot_ray(plot_ax, x, y, z,color=colors[i])
if technique == 'NoDisc':
anim_fig, anim_ax, anim = animate_rays(xs, ys, zs, ts, spins[0], disc=False, cmap=cmap, n_frame=n_frame, burst_mode=burst_mode,
lw=lw, ls=ls, interval = interval, blit = blit, repeat = repeat, fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
else:
anim_fig, anim_ax, anim = animate_rays(xs, ys, zs, ts, spins[0], disc=True, cmap=cmap, n_frame=n_frame, burst_mode=burst_mode,
lw=lw, ls=ls, interval = interval, blit = blit, repeat = repeat, fig_width=fig_width, fig_height=fig_height, view_theta=view_theta,view_phi=view_phi)
return plot_fig, plot_ax, anim_fig, anim_ax, anim
######################### END. #########################
|
#!/usr/bin/env python3
"""Interpolation in a diffusion model's latent space."""
import argparse
from functools import partial
from pathlib import Path
import jax
import jax.numpy as jnp
from PIL import Image
from tqdm import trange
from diffusion import get_model, get_models, load_params, sampling, utils
MODULE_DIR = Path(__file__).resolve().parent
def main():
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('--batch-size', '-bs', type=int, default=4,
help='the number of images per batch')
p.add_argument('--checkpoint', type=str,
help='the checkpoint to use')
p.add_argument('--init-1', type=str,
help='the init image for the starting point')
p.add_argument('--init-2', type=str,
help='the init image for the ending point')
p.add_argument('--model', type=str, choices=get_models(), required=True,
help='the model to use')
p.add_argument('-n', type=int, default=16,
help='the number of images to sample')
p.add_argument('--seed-1', type=int, default=0,
help='the random seed for the starting point')
p.add_argument('--seed-2', type=int, default=1,
help='the random seed for the ending point')
p.add_argument('--steps', type=int, default=1000,
help='the number of timesteps')
args = p.parse_args()
model = get_model(args.model)
checkpoint = args.checkpoint
if not checkpoint:
checkpoint = MODULE_DIR / f'checkpoints/{args.model}.pkl'
params = load_params(checkpoint)
key_1 = jax.random.PRNGKey(args.seed_1)
key_2 = jax.random.PRNGKey(args.seed_2)
latent_1 = jax.random.normal(key_1, [1, *model.shape])
latent_2 = jax.random.normal(key_2, [1, *model.shape])
_, y, x = model.shape
reverse_sample_step = partial(sampling.jit_reverse_sample_step, extra_args={})
reverse_steps = utils.get_ddpm_schedule(jnp.linspace(0, 1, args.steps + 1))
if args.init_1:
init_1 = Image.open(args.init_1).convert('RGB').resize((x, y), Image.LANCZOS)
init_1 = utils.from_pil_image(init_1)[None]
print('Inverting the starting init image...')
latent_1 = sampling.reverse_sample_loop(model, params, key_1, init_1, reverse_steps,
reverse_sample_step)
if args.init_2:
init_2 = Image.open(args.init_2).convert('RGB').resize((x, y), Image.LANCZOS)
init_2 = utils.from_pil_image(init_2)[None]
print('Inverting the ending init image...')
latent_2 = sampling.reverse_sample_loop(model, params, key_2, init_2, reverse_steps,
reverse_sample_step)
def run(weights):
alphas, sigmas = utils.t_to_alpha_sigma(weights)
latents = latent_1 * alphas[:, None, None, None] + latent_2 * sigmas[:, None, None, None]
sample_step = partial(sampling.jit_sample_step, extra_args={})
steps = utils.get_ddpm_schedule(jnp.linspace(1, 0, args.steps + 1)[:-1])
dummy_key = jax.random.PRNGKey(0)
return sampling.sample_loop(model, params, dummy_key, latents, steps, 0., sample_step)
def run_all(weights):
for i in trange(0, len(weights), args.batch_size):
outs = run(weights[i:i+args.batch_size])
for j, out in enumerate(outs):
utils.to_pil_image(out).save(f'out_{i + j:05}.png')
try:
print('Sampling...')
run_all(jnp.linspace(0, 1, args.n))
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
from itertools import product
from typing import Dict, Any, List, Tuple
import json
import glob
import os
import copy
from ml_gym.io.config_parser import YAMLConfigLoader
class GridSearch:
# GRID SEARCH CREATION
@staticmethod
def _get_dict_obj(keys: List[str], values: Tuple[Any]) -> Dict[str, Any]:
"""
Merges two lists into a dictionary, where one acts as the keys and the other one as the values
:param keys:
:param values:
:return:
"""
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
@staticmethod
def _find_products(splits_by_keys: Dict[str, List[Any]]) -> List[Dict[str, Any]]:
"""
Takes a dictionary str -> List, calculates the cartesian product for the set of lists and then assigns each product
the respective key.
Example: {"p1": ["A", "B", "C"], "p2": ["a", "b"], "p3": [1]} then the cartesian product will produce
[("A", "a", 1), ("A", "b", 1), ("B", "a", 1), ("B", "b", 1), ("C", "a", 1), ("C", "b", 1)].
From each tuple we then create a dictionary resulting in the list of dictionaries
[{"p1": "A", "p2": "a", "p3": 1}, ..., {"p1": "C", "p2": "b", "p3": 1}].
:param splits_by_keys:
:return:
"""
values = list(splits_by_keys.values())
keys = list(splits_by_keys.keys())
if len(values) == 1:
dict_objs = [GridSearch._get_dict_obj(keys, (value,)) for value in values[0]]
else:
product_values = product(*values)
dict_objs = [GridSearch._get_dict_obj(keys, value) for value in product_values]
return dict_objs
@staticmethod
def _is_sweep_node(node: Dict[str, Any]):
"""
Checks if a given node is a sweep node
:param node:
:return:
"""
return GridSearch._is_node(node) and "sweep" in node
@staticmethod
def _is_node(item):
"""
Checks if a given item is a node. Otherwise it's a leave.
:param item:
:return:
"""
return isinstance(item, dict)
@staticmethod
def _expand_sweep_node(node: Dict[str, Any]) -> Dict[str, Any]:
"""
Depending on the sweep type, e.g., range or absolute, the given sweep node needs to be fully created.
:param node:
:return:
"""
if node["sweep"] == "absolute":
return node
else:
raise NotImplementedError("Other sweep types have not been implemented, yet!")
@staticmethod
def _split_config(node: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
Takes a grid search dictionary or a node within a grid search and creates the corresponding list of configs.
:param node:
:return:
"""
splits_by_key = {}
for child_key, child in node.items():
if GridSearch._is_sweep_node(child): # sweep node
child = GridSearch._expand_sweep_node(child)
all_splits = []
for item in child["values"]: # for each element in the sweep
splits = GridSearch._split_config(item) if GridSearch._is_node(item) else [item]
all_splits.extend(splits)
splits_by_key[child_key] = all_splits
elif GridSearch._is_node(child): # normal dictionary node
splits_by_key[child_key] = GridSearch._split_config(child)
else: # leave node
splits_by_key[child_key] = [child]
configs = GridSearch._find_products(splits_by_key)
return [copy.deepcopy(config) for config in configs]
# GS creationu
@staticmethod
def create_gs_configs_from_path(config_path: str) -> List[Dict]:
gs_config = YAMLConfigLoader.load(config_path)
configs = GridSearch.create_gs_from_config_dict(gs_config)
return configs
@staticmethod
def create_gs_from_config_dict(gs_config: Dict):
configs = GridSearch._split_config(gs_config)
return configs
# CONFIG PART OF GS
@staticmethod
def is_config_in_gs(d: Dict, gs: Dict, negligible_paths: Dict = None) -> bool:
if negligible_paths is None:
negligible_paths = {}
gs_configs = GridSearch._split_config(gs)
for gs_config in gs_configs:
if GridSearch._is_config_equal(d, gs_config, negligible_paths):
return True
return False
@staticmethod
def _delete_branches(d, negligible_paths):
"""
negligible example:
```
{
"optimizer": {
"lr": None,
"weight_decays": [0.0001, 123],
}
}
```
---> sets path root->optimizer->lr in d to None.
Args:
d:
negligible_paths:
Returns:
"""
if isinstance(negligible_paths, dict):
for key in negligible_paths.keys():
if negligible_paths[key] is not None:
GridSearch._delete_branches(d[key], negligible_paths[key])
else:
if key in d:
d.pop(key)
if isinstance(negligible_paths, list):
for i, negligible_elem in enumerate(negligible_paths):
if negligible_elem is None:
d.pop(i)
else:
GridSearch._delete_branches(d[i], negligible_paths[i])
@staticmethod
def _is_config_equal(d1: Dict, d2: Dict, negligible_paths: Dict = None) -> bool:
"""
NOTE: This only works with dictionaries that don't have sweeps...
Args:
d1:
d2:
negligible_paths:
Returns:
"""
def ordered(obj):
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
if negligible_paths is None:
negligible_paths = {}
d1, d2 = d1.copy(), d2.copy()
GridSearch._delete_branches(d1, negligible_paths)
GridSearch._delete_branches(d2, negligible_paths)
return ordered(d1) == ordered(d2)
# Update configs from GS
@staticmethod
def update_config_from_grid_search(old_config: Dict, gs: List[Dict], negligible_paths: Dict):
for new_config in gs:
if GridSearch._is_config_equal(old_config, new_config, negligible_paths):
return new_config
raise Exception("Config is not present in grid search!")
@staticmethod
def get_rerun_configs(old_configs: List[Dict], gs: Dict, negligible_paths: Dict) -> List[Dict[str, Any]]:
gs_configs = GridSearch.create_gs_from_config_dict(gs)
return [GridSearch.update_config_from_grid_search(old_config, gs_configs, negligible_paths) for old_config in
old_configs]
@staticmethod
def are_sweeps_identical(gs_config_path: str, experiment_folder: str) -> bool:
with open(gs_config_path, "r") as f:
gs_config = json.load(f)
gs_configs_list = GridSearch.create_gs_configs_from_path(gs_config_path)
config_paths = glob.glob(os.path.join(experiment_folder, "**/config.json"), recursive=True)
if len(gs_configs_list) != len(config_paths):
print(f"GS lengths do not match! (gs: {len(gs_configs_list)} vs experiments: {len(config_paths)})")
return False
else:
configs = []
for path in config_paths:
with open(path, "r") as f:
config = json.load(f)
configs.append(config)
equal_map = [GridSearch.is_config_in_gs(d=config, gs=gs_config) for config in configs]
print(equal_map)
return all(equal_map)
if __name__ == "__main__":
test_gs = {
"p_1": 1,
"p_2": {
"sweep": "absolute",
"values": [
2,
3,
4
]
},
"p_3": {
"sweep": "absolute",
"values": [
{
"p_3.1": "XYZ"
},
{
"p_3.1": {
"sweep": "absolute",
"values": [
6,
7
]
}
}
]
},
"p_4": [12, 13]
}
configs = GridSearch._split_config(test_gs)
print(f" Total configs found: {len(configs)}")
for config in configs:
print(config)
print(GridSearch._is_config_equal(configs[0], configs[1]))
c = configs[0]
c_negligible = {'p_3': {'p_3.1': None}, 'p_1': None, 'p_4': [12, None]}
GridSearch._delete_branches(c, c_negligible)
print(c)
print(GridSearch._is_config_equal(c, c))
|
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect connect-cli.
# Copyright (c) 2019-2021 Ingram Micro. All Rights Reserved.
import click
from click.exceptions import ClickException
from cmr import render
from connect.cli.core import group
from connect.cli.core.config import pass_config
from connect.cli.core.utils import continue_or_quit
from connect.cli.plugins.exceptions import SheetNotFoundError
from connect.cli.plugins.product.clone import ProductCloner
from connect.cli.plugins.product.export import dump_product
from connect.cli.plugins.product.sync import (
ActionsSynchronizer,
CapabilitiesSynchronizer,
ConfigurationValuesSynchronizer,
GeneralSynchronizer,
ItemSynchronizer,
MediaSynchronizer,
ParamsSynchronizer,
StaticResourcesSynchronizer,
TemplatesSynchronizer,
)
from connect.client import ClientError, ConnectClient, R, RequestLogger
@group(name='product', short_help='Manage product definitions.')
def grp_product():
pass # pragma: no cover
@grp_product.command(
name='list',
short_help='List products.',
)
@click.option(
'--query',
'-q',
'query',
help='RQL query expression.',
)
@click.option(
'--page-size',
'-p',
'page_size',
type=int,
help='Number of products per page.',
default=25,
)
@click.option(
'--always-continue',
'-c',
'always_continue',
is_flag=True,
help='Do not prompt to continue.',
)
@pass_config
def cmd_list_products(config, query, page_size, always_continue):
acc_id = config.active.id
acc_name = config.active.name
if not config.silent:
click.echo(
click.style(
f'Current active account: {acc_id} - {acc_name}\n',
fg='blue',
),
)
client = ConnectClient(
api_key=config.active.api_key,
endpoint=config.active.endpoint,
use_specs=False,
max_retries=3,
logger=RequestLogger() if config.verbose else None,
)
if config.active.is_vendor():
default_query = R().visibility.owner.eq(True) & R().version.null(True)
else:
default_query = R().visibility.listing.eq(True) | R().visibility.syndication.eq(True)
query = query or default_query
paging = 0
query_products = client.products.filter(query).limit(page_size)
for prod in query_products:
paging += 1
click.echo(
f"{prod['id']} - {prod['name']}",
)
if paging % page_size == 0 and paging != query_products.count() and not always_continue:
if not continue_or_quit():
return
@grp_product.command(
name='export',
short_help='Export a product to an excel file.',
)
@click.argument('product_id', metavar='product_id', nargs=1, required=True) # noqa: E304
@click.option(
'--out',
'-o',
'output_file',
type=click.Path(exists=False, file_okay=True, dir_okay=False),
help='Output Excel file name.',
)
@click.option(
'--output_path',
'-p',
'output_path',
type=click.Path(exists=True, file_okay=False, dir_okay=True),
help='Directory where to store the export.',
)
@pass_config
def cmd_dump_products(config, product_id, output_file, output_path):
acc_id = config.active.id
acc_name = config.active.name
if not config.silent:
click.echo(
click.style(
f'Current active account: {acc_id} - {acc_name}\n',
fg='blue',
),
)
outfile = dump_product(
config.active.endpoint,
config.active.api_key,
product_id,
output_file,
config.silent,
config.verbose,
output_path,
)
if not config.silent:
click.echo(
click.style(
f'\nThe product {product_id} has been successfully exported to {outfile}.',
fg='green',
),
)
@grp_product.command(
name='sync',
short_help='Synchronize a product from an excel file.',
)
@click.argument('input_file', metavar='input_file', nargs=1, required=True) # noqa: E304
@click.option( # noqa: E304
'--yes',
'-y',
'yes',
is_flag=True,
help='Answer yes to all questions.',
)
@pass_config
def cmd_sync_products(config, input_file, yes): # noqa: CCR001
acc_id = config.active.id
acc_name = config.active.name
if '.xlsx' not in input_file:
input_file = f'{input_file}/{input_file}.xlsx'
if not config.silent:
click.echo(
click.style(
f'Current active account: {acc_id} - {acc_name}\n',
fg='blue',
),
)
client = ConnectClient(
api_key=config.active.api_key,
endpoint=config.active.endpoint,
use_specs=False,
max_retries=3,
logger=RequestLogger() if config.verbose else None,
)
synchronizer = GeneralSynchronizer(
client,
config.silent,
)
product_id = synchronizer.open(input_file, 'General Information')
if not yes:
click.confirm(
'Are you sure you want to synchronize '
f'the product {product_id} ?',
abort=True,
)
click.echo('')
general_errors = synchronizer.sync()
if general_errors and not config.silent:
click.echo(
click.style(
f'\nError synchronizing general product information: {".".join(general_errors)}\n',
fg='magenta',
),
)
results_tracker = []
try:
results_tracker.append(item_sync(client, config, input_file))
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
try:
results_tracker.append(capabilities_sync(client, config, input_file))
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
try:
results_tracker.append(static_resources_sync(client, config, input_file))
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
try:
results_tracker.append(templates_sync(client, config, input_file))
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
results_tracker.append(
param_task(
client,
config,
input_file,
product_id,
'Ordering Parameters',
),
)
results_tracker.append(
param_task(
client,
config,
input_file,
product_id,
'Fulfillment Parameters',
),
)
results_tracker.append(
param_task(
client,
config,
input_file,
product_id,
'Configuration Parameters',
),
)
try:
results_tracker.append(
actions_sync(
client,
config,
input_file,
),
)
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
try:
results_tracker.append(
media_sync(
client,
config,
input_file,
),
)
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
try:
results_tracker.append(
config_values_sync(
client,
config,
input_file,
),
)
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
print_results(
product_id=product_id,
silent=config.silent,
results_tracker=results_tracker,
)
@grp_product.command(
name='clone',
short_help='Create a clone of a product.',
)
@click.argument('source_product_id', metavar='product_id', nargs=1, required=True) # noqa: E304
@click.option(
'--source_account',
'-s',
'source_account',
help='Source account ID',
)
@click.option(
'--destination_account',
'-d',
'destination_account',
help='Destination account ID',
)
@click.option(
'--new-product-name',
'-n',
'name',
help='Cloned product name',
)
@click.option( # noqa: E304
'--yes',
'-y',
'yes',
is_flag=True,
help='Answer yes to all questions.',
)
@pass_config
def cmd_clone_products(config, source_product_id, source_account, destination_account, name, yes):
if not config.active.is_vendor():
raise ClickException(
'The clone command is only available for vendor accounts.',
)
if name and len(name) > 32:
click.echo(
click.style(
f'New product name can not exceed 32 chracters, provided as name {name}',
fg='red',
),
)
exit(-1)
if destination_account:
config.activate(destination_account)
else:
destination_account = config.active.id
if source_account:
config.activate(source_account)
else:
source_account = config.active.id
acc_id = config.active.id
acc_name = config.active.name
if not config.silent:
click.echo(
click.style(
f'Current active account: {acc_id} - {acc_name}\n',
fg='blue',
),
)
client = ConnectClient(
api_key=config.active.api_key,
endpoint=config.active.endpoint,
use_specs=False,
max_retries=3,
logger=RequestLogger() if config.verbose else None,
)
if not yes:
click.confirm(
'Are you sure you want to Clone '
f'the product {source_product_id} ?',
abort=True,
)
click.echo('')
try:
client.products[source_product_id].get()
except ClientError:
click.echo(
click.style(
f'Product {source_product_id} does not exist',
fg='red',
),
)
exit(-1)
synchronizer = ProductCloner(
config=config,
source_account=source_account,
destination_account=destination_account,
product_id=source_product_id,
)
if not config.silent:
click.echo(
click.style(
f'Dumping Product {synchronizer.product_id} from account '
f'{synchronizer.source_account}\n',
fg='blue',
),
)
synchronizer.dump()
synchronizer.load_wb()
if not config.silent:
click.echo(
click.style(
f'Creating new Product on account {synchronizer.destination_account}',
fg='blue',
),
)
synchronizer.create_product(name=name)
synchronizer.clean_wb()
if not config.silent:
click.echo(
click.style(
'Injecting Product information',
fg='blue',
),
)
synchronizer.inject()
if not config.silent:
click.echo(
click.style(
f'Finished cloning product {source_product_id} from account '
f'{synchronizer.source_account} to {synchronizer.destination_account}\n',
fg='green',
),
)
click.echo(
click.style(
f'New product id {synchronizer.destination_product}',
fg='green',
),
)
def param_task(client, config, input_file, product_id, param_type):
try:
result = params_sync(client, config, input_file, param_type)
except SheetNotFoundError as e:
if not config.silent:
click.echo(
click.style(
str(e),
fg='blue',
),
)
return result
def media_sync(client, config, input_file):
synchronizer = MediaSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, 'Media')
skipped, created, updated, deleted, errors = synchronizer.sync()
synchronizer.save(input_file)
return {
"module": "Media",
"created": created,
"updated": updated,
"deleted": deleted,
"skipped": skipped,
"errors": errors,
}
def actions_sync(client, config, input_file):
synchronizer = ActionsSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, 'Actions')
skipped, created, updated, deleted, errors = synchronizer.sync()
return {
"module": "Actions",
"created": created,
"updated": updated,
"deleted": deleted,
"skipped": skipped,
"errors": errors,
}
def templates_sync(client, config, input_file):
synchronizer = TemplatesSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, 'Templates')
skipped, created, updated, deleted, errors = synchronizer.sync()
synchronizer.save(input_file)
return {
"module": "Templates",
"created": created,
"updated": updated,
"deleted": deleted,
"skipped": skipped,
"errors": errors,
}
def params_sync(client, config, input_file, param_type):
synchronizer = ParamsSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, param_type)
skipped, created, updated, deleted, errors = synchronizer.sync()
synchronizer.save(input_file)
return {
"module": param_type,
"created": created,
"updated": updated,
"deleted": deleted,
"skipped": skipped,
"errors": errors,
}
def static_resources_sync(client, config, input_file):
synchronizer = StaticResourcesSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, 'Embedding Static Resources')
skipped, created, deleted, errors = synchronizer.sync()
return {
"module": "Static Resources",
"created": created,
"updated": 0,
"deleted": deleted,
"skipped": skipped,
"errors": errors,
}
def capabilities_sync(client, config, input_file):
synchronizer = CapabilitiesSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, 'Capabilities')
skipped, updated, errors = synchronizer.sync()
return {
"module": "Capabilities",
"created": 0,
"updated": updated,
"deleted": 0,
"skipped": skipped,
"errors": errors,
}
def config_values_sync(client, config, input_file):
synchronizer = ConfigurationValuesSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, 'Configuration')
skipped, created, updated, deleted, errors = synchronizer.sync()
return {
"module": "Configuration",
"created": created,
"updated": updated,
"deleted": deleted,
"skipped": skipped,
"errors": errors,
}
def item_sync(client, config, input_file):
synchronizer = ItemSynchronizer(
client,
config.silent,
)
synchronizer.open(input_file, 'Items')
skipped, created, updated, deleted, errors = synchronizer.sync()
synchronizer.save(input_file)
return {
"module": "Items",
"created": created,
"updated": updated,
"deleted": deleted,
"skipped": skipped,
"errors": errors,
}
def print_results( # noqa: CCR001
silent,
product_id,
results_tracker,
):
if not silent:
msg = f'''
# Results of synchronizing {product_id}
| Module | Processed | Created | Updated | Deleted | Skipped | Errors |
|:--------|--------:| --------:|--------:|----------:|----------:|----------:|
'''
errors = 0
for result in results_tracker:
errors_count = len(result['errors'])
errors += errors_count
processed = result['skipped'] + result['created']
processed += result['updated'] + result['deleted']
processed += errors_count
row = '|{module}|{processed}|{created}|{updated}|{deleted}|{skipped}|{errors}|\n'
msg += row.format(
module=result['module'],
processed=processed,
created=result['created'],
updated=result['updated'],
deleted=result['deleted'],
skipped=result['skipped'],
errors=errors_count,
)
click.echo(
f'\n{render(msg)}\n',
)
if errors > 0:
msg = f'\nSync operation had {errors} errors, do you want to see them?'
fg = 'yellow'
click.echo(click.style(msg, fg=fg))
print_errors = continue_or_quit()
if print_errors:
for result in results_tracker:
if len(result['errors']) > 0:
click.echo(
click.style(f'\nModule {result["module"]}:\n', fg='magenta'),
)
for row_idx, messages in result["errors"].items():
click.echo(f' Errors at row #{row_idx}')
for msg in messages:
click.echo(f' - {msg}')
click.echo(' ')
def get_group():
return grp_product
|
import gzip
import os
import unittest
from pipelines_utils import TypedColumnReader
DATA_DIR = os.path.join('test', 'python2_3', 'pipelines_utils', 'data')
class TypedColumnReaderTestCase(unittest.TestCase):
def test_basic_example_a(self):
"""Test loading of a simple CSV file
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.a.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')
num_lines = 0
first_row = {}
for row in test_file:
if num_lines == 0:
first_row = row
num_lines += 1
self.assertEqual(2, num_lines)
# Examine the first row...
self.assertEqual('A string', first_row['one'])
self.assertEqual(45, first_row['two'])
self.assertEqual(None, first_row['three'])
self.assertEqual('and finally', first_row['four'])
csv_file.close()
def test_basic_example_a_with_supplied_header(self):
"""Test loading of a simple CSV file with a provided header
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.a-no-header.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file,
column_sep=',',
header='one,two:int,three:float,four:string')
num_lines = 0
for _ in test_file:
num_lines += 1
self.assertEqual(2, num_lines)
csv_file.close()
def test_basic_example_a_with_duplicate_column_names(self):
"""Test loading of a simple CSV file with a provided header
that contains duplicate names
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.a-no-header.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file,
column_sep=',',
header='one,one,three,four')
num_lines = 0
got_exception = False
try:
for _ in test_file:
num_lines += 1
except TypedColumnReader.ContentError as e:
self.assertEqual(2, e.column)
self.assertEqual(0, e.row)
self.assertEqual('one', e.value)
got_exception = True
self.assertTrue(got_exception)
self.assertEqual(0, num_lines)
csv_file.close()
def test_basic_example_a_gzip(self):
"""Test loading of a simple CSV file (gzipped)
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.a.csv.gz')
csv_file = gzip.open(test_file, 'rt')
test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')
num_lines = 0
for _ in test_file:
num_lines += 1
self.assertEqual(2, num_lines)
csv_file.close()
def test_basic_example_b_unknown_type(self):
"""Test loading of a simple CSV file with a column type that is unknown
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.b.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')
num_lines = 0
got_exception = False
try:
for _ in test_file:
num_lines += 1
except TypedColumnReader.UnknownTypeError as e:
self.assertEqual(4, e.column)
self.assertEqual('unknown-type', e.column_type)
got_exception = True
self.assertTrue(got_exception)
self.assertEqual(0, num_lines)
csv_file.close()
def test_basic_example_c_too_many_colons(self):
"""Test loading of a simple CSV file with a column that has too many colons
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.c.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')
num_lines = 0
got_exception = False
try:
for _ in test_file:
num_lines += 1
except TypedColumnReader.ContentError as e:
self.assertEqual(4, e.column)
self.assertEqual(1, e.row)
self.assertEqual('four:unknown-type:too-many-colons', e.value)
got_exception = True
self.assertTrue(got_exception)
self.assertEqual(0, num_lines)
csv_file.close()
def test_basic_example_d_wrong_type(self):
"""Test loading of a simple CSV file with a column that has a string as an int
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.d.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')
num_lines = 0
got_exception = False
try:
for _ in test_file:
num_lines += 1
except TypedColumnReader.ContentError as e:
self.assertEqual(1, e.column)
self.assertEqual(2, e.row)
self.assertEqual('A string', e.value)
self.assertEqual('Does not comply with column type', e.message)
got_exception = True
self.assertTrue(got_exception)
self.assertEqual(0, num_lines)
csv_file.close()
def test_basic_example_d_tabs(self):
"""Test loading of a simple CSV file with tab (default) separators
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.e.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file)
num_lines = 0
for _ in test_file:
num_lines += 1
self.assertEqual(2, num_lines)
csv_file.close()
def test_basic_example_d_too_many_values(self):
"""Test loading of a simple CSV file with too many values
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.f.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')
num_lines = 0
got_exception = False
try:
for _ in test_file:
num_lines += 1
except TypedColumnReader.ContentError as e:
self.assertEqual(3, e.column)
self.assertEqual(2, e.row)
self.assertEqual('Too many values', e.message)
got_exception = True
self.assertTrue(got_exception)
self.assertEqual(0, num_lines)
csv_file.close()
def test_basic_example_g_booleans(self):
"""Test loading of a simple CSV file with all booleans
"""
test_file = os.path.join(DATA_DIR, 'TypedCsvReader.example.g.csv')
csv_file = open(test_file)
test_file = TypedColumnReader.TypedColumnReader(csv_file, column_sep=',')
num_lines = 0
for row in test_file:
num_lines += 1
self.assertTrue(row['a'])
self.assertFalse(row['b'])
self.assertEqual(4, num_lines)
csv_file.close()
|
<filename>asapis/utils/configuration.py
import sys
import json
import os
import os.path
import ast
import re
from asapis.utils.printUtil import PrintLevel, logger
from asapis.utils.defaultConfig import default_config
class Configuration:
default_file = "./config.json"
config = {}
# Process the execution options.
# If a named config file exists it gets loaded first
# Command-line options override config file options
# NOTE: at minimum, config-file/command-line must include authorization KeyId/KeySecret
def __init__(self, config_values: dict):
"""Creates options object from the config file (default or named) with all the configuration items,
overriding with any listed items (presumably from command-line parameters)
Args:
config_values: an array of items used to add or update the configuration
1) named-items - in the form name=value (also name="value") where the name can be a path in the options object.
Value can be anything that evaluated by literal_eval (https://docs.python.org/3/library/ast.html#ast.literal_eval)
2) flags - a single value, placed at the root of the options with a None value only to indicate it was set
"""
config_values = self.__handle_specials(config_values)
# Process the command-line. To override config file the format must be <name>=<value>
# value-less options can also be provided (for custom execution flags) and they are
# added with the value of 'None'. They are added at the root of the Options object:
# "<val>": None
# This is just a placeholder, to indicate that the flag was set, the value is meaningless
# (as it does not exist)
for val in config_values:
val = val.lstrip('-/')
eq = val.find('=')
# unvalued flags
if eq == -1:
self.config[val] = None
# named values
else:
name = val[0:eq]
value = val[eq + 1:]
self.__set_value(name, value)
# Handle special options that affect following execution
def __handle_specials(self, config_values:dict):
specials_pattern = re.compile("(-?Verbose)|(-?Silent)|(^-?configFile=)", re.I)
specials = list(filter(lambda option: bool(specials_pattern.match(option)), config_values))
explicit_verbose = False
explicit_config = None
os.environ["AppScan_API_Log_Level"] = "Normal"
for special in specials:
if special.lower() == "verbose":
os.environ["AppScan_API_Log_Level"] = "Verbose"
logger("Print level set to Verbose", level=PrintLevel.Verbose)
explicit_verbose = True
elif special.lower() == "silent" and not explicit_verbose: # Verbose trumps Silent
os.environ["AppScan_API_Log_Level"] = "Silent"
elif not explicit_config and special.lower().count("configfile") != 0:
# get config file from option
# the first file is used
explicit_config = specials_pattern.sub("", special)
if explicit_config:
self.__load_config(explicit_config)
else: self.__load_default_config()
return list(set(config_values) - set(specials))
def __load_config(self, config_file_path:str):
if not os.path.exists(config_file_path) or not os.path.isfile(config_file_path):
self.config = default_config
logger(f"\"{config_file_path}\" Custom file does not exist or path is not a file. Using default configuration")
else:
with open(config_file_path, "r") as config_file:
self.config = json.load(config_file)
logger(f"Using configuration file: {config_file_path}")
def __load_default_config(self):
if os.path.exists(self.default_file):
with open(self.default_file, "r") as config_file:
self.config = json.load(config_file)
logger(f"Using configuration file: {self.default_file}")
else:
self.config = default_config
logger(f"Using default configuration", level=PrintLevel.Verbose)
def __set_value(self, param, value):
parts = param.split(".")
member = parts[-1]
del parts[-1]
node = self.config
for part in parts:
if part not in node:
node[part] = {}
node = node[part]
new_value = value
value_type = str
failed_eval = False
try:
new_value = ast.literal_eval(value)
value_type = type(new_value)
except:
new_value = new_value.strip("\"'")
failed_eval = True
if member in node:
# member exist, so we handle existing, known type
member_type = type(node[member])
if value_type is member_type:
node[member] = new_value
logger(f"Option overriding: {param} with {new_value}", level=PrintLevel.Verbose)
elif failed_eval:
logger(f"Option overriding: Failed evaluating \"{value}\" for {param} of type '{member_type.__name__}'")
else:
logger(f"Option overriding: Type mismatch for {param}. Expecting '{member_type.__name__}' and got '{value_type.__name__}'")
else:
# new member, assigned the value and type as it was evaluated
node[member] = new_value
logger(f"Option introducing: new {param} added with {new_value}", level=PrintLevel.Verbose)
def print_config(self):
logger(json.dumps(self.config, indent=2))
if __name__ == "__main__":
Configuration = Configuration(sys.argv[1:])
Configuration.print_config()
|
<gh_stars>1-10
__author__ = 'nash.xiejun'
import sys
import os
import traceback
import json
from keystoneclient.v2_0.endpoints import Endpoint
from novaclient import client as nova_client
from nova.proxy import clients
from nova.proxy import compute_context
from constants import SysUserInfo
# from install_tool import cps_server, fsutils, fs_system_util
# TODO:
sys.path.append('/usr/bin/install_tool')
import cps_server
import fsutils
import fs_system_util
import sshutils
import utils
import log
from constants import ScriptFilePath
class RefServices(object):
def __init__(self, region_name=None, bypass_url=None):
"""
:param region_name: use to specify service in which region want to reference
:param bypass_url: use to specify url of service
:return:
"""
self.tenant = os.environ['OS_TENANT_NAME']
self.user = os.environ['OS_USERNAME']
self.pwd = os.environ['<PASSWORD>']
self.auth_url = os.environ['OS_AUTH_URL']
self.bypass_url = bypass_url
self.region_name = os.environ['OS_REGION_NAME']
keystone_credentials = self.get_keystone_credentials()
self.keystone = self.get_keystone_client(keystone_credentials)
nova_credentials = self.get_nova_credentials_v2()
self.nova = self.get_nova_sync_client(nova_credentials)
self.neutron = self.get_neutron_client(nova_credentials)
def get_keystone_credentials(self):
d = {}
d['version'] = '2'
d['username'] = self.user
d['password'] = <PASSWORD>
d['auth_url'] = self.auth_url
d['tenant'] = self.tenant
if self.region_name is not None:
d['region_name'] = self.region_name
d['bypass_url'] = self.bypass_url
log.info('keystone credentials: %s' % d)
return d
def get_nova_credentials_v2(self):
"""
d = {'version': '2', 'username' : os.environ['OS_USERNAME'], 'api_key' : os.environ['OS_PASSWORD'], 'auth_url' : os.environ['OS_AUTH_URL'], 'project_id' : os.environ['OS_TENANT_NAME']}
:return:
"""
d = {}
d['version'] = '2'
d['username'] = self.user
d['password'] = <PASSWORD>
d['auth_url'] = self.auth_url
d['tenant'] = self.tenant
if self.region_name is not None:
d['region_name'] = self.region_name
if self.bypass_url is not None:
d['bypass_url'] = self.bypass_url
return d
def get_neutron_client(self, kwargs):
req_context = compute_context.RequestContext(**kwargs)
openstack_clients = clients.OpenStackClients(req_context)
return openstack_clients.neutron()
def get_nova_sync_client(self, kwargs):
"""
kwargs = {
'username': CONF.nova_admin_username,
'password': <PASSWORD>,
'tenant': CONF.nova_admin_tenant_name,
'auth_url': CONF.keystone_auth_url,
'region_name': CONF.proxy_region_name
}
:param args:
:return:
"""
req_context = compute_context.RequestContext(**kwargs)
openstack_clients = clients.OpenStackClients(req_context)
return openstack_clients.nova()
def get_keystone_client(self, kwargs):
"""
kwargs = {
'username': CONF.nova_admin_username,
'password': CONF.<PASSWORD>,
'tenant': CONF.nova_admin_tenant_name,
'auth_url': CONF.keystone_auth_url,
'region_name': CONF.proxy_region_name
}
:param args:
:return:
"""
req_context = compute_context.RequestContext(**kwargs)
openstack_clients = clients.OpenStackClients(req_context)
return openstack_clients.keystone().client_v2
def nova_list(self):
return self.nova.servers.list(detailed=True)
def nova_aggregate_create(self, name, availability_zone):
result = None
try:
aggregate_result = self.nova.aggregates.create(name, availability_zone)
log.info('created Aggregate result is : %s ' % aggregate_result)
if aggregate_result.name == name:
result = aggregate_result
except Exception, e:
log.error('Exception when create AG for %s, Exception: %s' % (name, traceback.format_exc()))
print(e.message)
return result
def nova_host_list(self):
result = False
return result
def nova_aggregate_add_host(self, aggregate, host):
result = False
try:
add_result = self.nova.aggregates.add_host(aggregate, host)
log.info('Add host<%s> to aggregate<%s>, result : %s ' % (host, aggregate, add_result))
result = True
except:
log.error('Exception when add host<%s> to aggregate<%s>, Exception : %s ' %
(host, aggregate, traceback.format_exc()))
return result
def nova_aggregate_exist(self, name, availability_zone):
result = False
try:
aggregates = self.nova.aggregates.list()
for aggregate in aggregates:
if aggregate.availability_zone == availability_zone:
result = True
except nova_client.exceptions.NotFound:
return result
except:
log.error('Exception when exec nova_aggregate_exist, Exception: %s' % traceback.format_exc())
print traceback.format_exc()
result = True
return result
def get_tenant_id_for_service(self):
"""
To get tenant id by tenant name 'service'.
step1: use list() to get all tenants:
[<Tenant {u'enabled': True, u'description': None, u'name': u'admin', u'id': u'f7851684a9894e5a9590a97789552879'}>,
<Tenant {u'enabled': True, u'description': None, u'name': u'service', u'id': u'04720946e4f34cf4afed11752b1f5136'}>]
step2: then filter the one which name is 'service'
:return: string, tenant id of tenant named 'service'
"""
tenant_name = 'service'
return self.get_tenant_id_by_tenant_name(tenant_name)
def get_tenant_id_for_admin(self):
return self.get_tenant_id_by_tenant_name('admin')
def get_tenant_id_by_tenant_name(self, tenant_name):
tenant_id = None
tenants = self.keystone.tenants.list()
if tenants is None:
log.info('No any tenant in keystone.')
else:
for tenant in tenants:
if tenant.name == tenant_name:
tenant_id = tenant.id
break
else:
continue
return tenant_id
def get_service_id(self, service_type):
service_id = None
services = self.keystone.services.list()
for service in services:
if service.type == service_type:
service_id = service.id
break
else:
continue
return service_id
def create_endpoint(self, region, service_id, publicurl, adminurl=None,
internalurl=None):
result = False
create_result = self.keystone.endpoints.create(region, service_id, publicurl, adminurl, internalurl)
if isinstance(create_result, Endpoint):
result = True
return result
def create_endpoint_for_service(self, service_type, region, url):
public_url = url
admin_url = url
internal_url = url
try:
service_id = self.get_service_id(service_type)
if self.endpoint_exist(service_id, region):
log.info('Endpoint for service<%s> region <%s> is exist, no need to create again.' %
(service_type, region))
return
if service_id is None:
raise ValueError('Service id of type <%s> is None.' % service_type)
create_result = self.create_endpoint(region, service_id, public_url, admin_url, internal_url)
if create_result is True:
log.info('SUCCESS to create endpoint for type <%s> region: <%s>' % (service_type, region))
else:
log.info('FAILED to create endpoint for type <%s> region: <%s>' % (service_type, region))
except:
err_info = 'Exception occur when create endpoint for type<%s> region <%s>, EXCEPTION %s' % \
(service_type, region, traceback.format_exc())
log.info(err_info)
def endpoint_exist(self, service_id, region):
result = False
endpoints = self.keystone.endpoints.list()
for endpoint in endpoints:
if endpoint.service_id == service_id and endpoint.region == region:
result = True
break
else:
continue
return result
def del_endpoint(self, regions):
"""
:param regions: [], list of regions
:return:
"""
result = False
endpoints = self.keystone.endpoints.list()
for endpoint in endpoints:
if endpoint.region in regions:
self.keystone.endpoints.delete(endpoint.id)
else:
continue
return result
class RefCPSService(object):
@staticmethod
def update_template_params(service_name, template_name, params):
return cps_server.update_template_params(service_name, template_name, params)
@staticmethod
def get_template_params(server, template):
return cps_server.get_template_params(server, template)
@staticmethod
def cps_commit():
return cps_server.cps_commit()
@staticmethod
def get_cps_http(url):
return cps_server.get_cps_http(url)
@staticmethod
def post_cps_http(url, body):
return cps_server.post_cps_http(url, body)
@staticmethod
def get_local_domain():
return cps_server.get_local_domain()
@staticmethod
def host_list():
"""
:return:
"""
return cps_server.cps_host_list()
@staticmethod
def role_host_add(role_name, hosts):
"""
:param role_name: string of role name, e.g. nova-proxy001
:param hosts: list of hosts, e.g. ['9A5A2614-D21D-B211-83F3-000000821800', EF1503DA-AEC8-119F-8567-000000821800]
:return:
"""
return cps_server.role_host_add(role_name, hosts)
@staticmethod
def role_host_list(role):
"""
:param role: string of role name, e.g. nova-proxy001
:return:
"""
return cps_server.get_role_host_list(role)
class RefCPSServiceExtent(object):
@staticmethod
def list_template_instance(service, template):
url = '/cps/v1/instances?service=%s&template=%s' % (service, template)
res_text = RefCPSService.get_cps_http(url)
if not res_text is None:
return json.loads(res_text)
return None
@staticmethod
def host_template_instance_operate(service, template, action):
url = '/cps/v1/instances?service=%s&template=%s' % (service, template)
body = {'action': action}
return RefCPSService.post_cps_http(url, body)
class CPSServiceBusiness(object):
def __init__(self):
self.NOVA = 'nova'
self.NOVA_API = 'nova-api'
self.NEUTRON = 'neutron'
self.NEUTRON_l2 = 'neutron-l2'
self.NEUTRON_l3 = 'neutron-l3'
self.CINDER = 'cinder'
self.OPT_STOP = 'STOP'
self.OPT_START = 'START'
self.STATUS_ACTIVE = 'active'
self.DNS = 'dns'
self.DNS_SERVER_TEMPLATE = 'dns-server'
self.region_match_ip = {}
def get_nova_proxy_template(self, proxy_number):
return '-'.join([self.NOVA, proxy_number])
def get_neutron_l2_proxy_template(self, proxy_number):
return '-'.join([self.NEUTRON_l2, proxy_number])
def get_neutron_l3_proxy_template(self, proxy_number):
return '-'.join([self.NEUTRON_l3, proxy_number])
def get_cinder_template(self, proxy_number):
return '-'.join([self.CINDER, proxy_number])
def stop_nova_proxy(self, proxy_number):
nova_proxy_template = self.get_nova_proxy_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.NOVA, nova_proxy_template, self.OPT_STOP)
def start_nova_proxy(self, proxy_number):
nova_proxy_template = self.get_nova_proxy_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.NOVA, nova_proxy_template, self.OPT_START)
def stop_cinder_proxy(self, proxy_number):
cinder_proxy_template = self.get_cinder_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.CINDER, cinder_proxy_template, self.OPT_STOP)
def start_cinder_proxy(self, proxy_number):
cinder_proxy_template = self.get_cinder_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.CINDER, cinder_proxy_template, self.OPT_START)
def stop_neutron_l2_proxy(self, proxy_number):
neutron_proxy_template = self.get_neutron_l2_proxy_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.NEUTRON, neutron_proxy_template, self.OPT_STOP)
def start_neutron_l2_proxy(self, proxy_number):
neutron_proxy_template = self.get_neutron_l2_proxy_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.NEUTRON, neutron_proxy_template, self.OPT_START)
def stop_neutron_l3_proxy(self, proxy_number):
neutron_proxy_template = self.get_neutron_l3_proxy_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.NEUTRON, neutron_proxy_template, self.OPT_STOP)
def start_neutron_l3_proxy(self, proxy_number):
neutron_proxy_template = self.get_neutron_l3_proxy_template(proxy_number)
RefCPSServiceExtent.host_template_instance_operate(self.NEUTRON, neutron_proxy_template, self.OPT_START)
def stop_nova_api(self):
RefCPSServiceExtent.host_template_instance_operate(self.NOVA, self.NOVA_API, self.OPT_STOP)
def start_nova_api(self):
RefCPSServiceExtent.host_template_instance_operate(self.NOVA, self.NOVA_API, self.OPT_START)
def stop_all(self, proxy_number):
self.stop_cinder_proxy(proxy_number)
self.stop_neutron_l2_proxy(proxy_number)
self.stop_neutron_l3_proxy(proxy_number)
self.stop_nova_proxy(proxy_number)
self.stop_nova_api()
def start_all(self, proxy_number):
self.start_cinder_proxy(proxy_number)
self.start_neutron_l2_proxy(proxy_number)
self.start_neutron_l3_proxy(proxy_number)
self.start_nova_proxy(proxy_number)
self.start_nova_api()
def check_status_for_template(self, service, template, aim_status):
template_instance_info = RefCPSServiceExtent.list_template_instance(service, template)
if template_instance_info is None or len(template_instance_info.get('instances')) < 1:
print('Template instance info of Service<%s> Template<%s> is None.' % (service, template))
log.error('Template instance info of Service<%s> Template<%s> is None.' % (service, template))
log.error('template_instance_info: %s' % template_instance_info)
return None
status = template_instance_info.get('instances')[0].get('hastatus')
if status == aim_status:
log.info('Status of service<%s>, template<%s> is: %s' % (service, template, status))
print('Status of service<%s>, template<%s> is: %s' % (service, template, status))
return True
else:
log.error('Status of service<%s>, template<%s> is: %s' % (service, template, status))
print('Status of service<%s>, template<%s> is: %s' % (service, template, status))
return False
def check_nova_template(self, proxy_number):
nova_template = self.get_nova_proxy_template(proxy_number)
self.check_status_for_template(self.NOVA, nova_template, self.STATUS_ACTIVE)
def check_neutron_l2_template(self, proxy_number):
neutron_l2_template = self.get_neutron_l2_proxy_template(proxy_number)
self.check_status_for_template(self.NEUTRON, neutron_l2_template, self.STATUS_ACTIVE)
def check_neutron_l3_template(self, proxy_number):
neutron_l3_template = self.get_neutron_l3_proxy_template(proxy_number)
self.check_status_for_template(self.NEUTRON, neutron_l3_template, self.STATUS_ACTIVE)
def check_cinder_template(self, proxy_number):
cinder_template = self.get_cinder_template(proxy_number)
check_result = self.check_status_for_template(self.CINDER, cinder_template, self.STATUS_ACTIVE)
return check_result
def check_all_service_template_status(self, proxy_number):
log.info('check cinder proxy status, cinder proxy : <%s>' % proxy_number)
check_cinder_result = self.check_cinder_template(proxy_number)
log.info('check cinder proxy status, cinder proxy : <%s>, status : <%s>' % (proxy_number, check_cinder_result))
if check_cinder_result is not None and not check_cinder_result:
proxy_match_host = self.get_proxy_match_host()
proxy_host = proxy_match_host[proxy_number]
command =\
'echo \'/usr/bin/python /usr/bin/cinder-%s --config-file /etc/cinder/cinder-%s.conf > /dev/null 2>&1 &\' > %s' \
% (proxy_number, proxy_number, ScriptFilePath.PATH_RESTART_CINDER_PROXY_SH)
log.info('cinder proxy=%s, host=%s, cmd=%s' % (proxy_number, proxy_host, command))
utils.remote_execute_cmd(proxy_host, command)
run_restart_cinder_proxy_cmd = '/usr/bin/sh %s' % ScriptFilePath.PATH_RESTART_CINDER_PROXY_SH
log.info('run_restart_cinder_proxy_cmd=%s' % run_restart_cinder_proxy_cmd)
utils.remote_execute_cmd_by_root(proxy_host, run_restart_cinder_proxy_cmd)
self.check_neutron_l2_template(proxy_number)
self.check_neutron_l3_template(proxy_number)
self.check_nova_template(proxy_number)
def get_dns_info(self):
"""
by "cps template-params-show --service dns dns-server", it will get following result:
{u'cfg':
{
u'address': u'/cascading.hybrid.huawei.com/192.168.3.11,
/identity.cascading.hybrid.huawei.com/16172.16.58.3,
/image.cascading.hybrid.huawei.com/16172.16.58.3,
/az01.shenzhen--fusionsphere.huawei.com/16172.16.31.102,
/az11.shenzhen--vcloud.huawei.com/172.16.31.10,
/az31.singapore--aws.vodafone.com/192.168.3.11',
u'network': u'[]',
u'server': u''
}
}
:return:
"""
dns_info = RefCPSService.get_template_params(self.DNS, self.DNS_SERVER_TEMPLATE)
return dns_info
def get_region_match_ip(self):
dns_info = self.get_dns_info()
addresses = dns_info['cfg']['address']
if not addresses:
log.info('address is none in dns info')
return {}
region_match_ip = {}
address_list = addresses.split(',')
for address in address_list:
if address is not None:
tmp_address_content = address.split('/')[1:]
if len(tmp_address_content) == 2:
region_match_ip[tmp_address_content[0]] = tmp_address_content[1]
return region_match_ip
def get_az_ip(self, az):
"""
if the region is "az01.shenzhen--fusionsphere.huawei.com", the az is "az01"
:param az: string, the full name of az, e.g. az01, az11 and so on.
:return: array list, array list of ip address, e.g. ['192.168.3.11', '172.16.31.10', ...]
"""
if not self.region_match_ip:
self.region_match_ip = self.get_region_match_ip()
ip_list = []
for region, ip in self.region_match_ip.items():
if region.startswith(az):
ip_list.append(ip)
return ip_list
def get_cascading_ip(self):
"""
:return: array list, array list of ip address, e.g. ['192.168.3.11', '172.16.31.10', ...]
"""
return self.get_az_ip('cascading')
def get_openstack_hosts(self):
"""
:return: array list, array list of ip address, e.g. ['192.168.3.11', '172.16.31.10', ...]
"""
return self.get_az_ip('az0')
def get_vcloud_node_hosts(self):
"""
:return: array list, array list of ip address, e.g. ['192.168.3.11', '172.16.31.10', ...]
"""
return self.get_az_ip('az1')
def get_aws_node_hosts(self):
"""
:return: array list, array list of ip address, e.g. ['192.168.3.11', '172.16.31.10', ...]
"""
return self.get_az_ip('az3')
def get_os_region_name(self):
region = RefCPSService.get_local_domain()
os_region_name = '.'.join([RefFsSystemUtils.get_az_by_domain(region),
RefFsSystemUtils.get_dc_by_domain(region)])
return os_region_name
def get_all_proxy_nodes(self, proxy_match_region):
proxy_node_hosts = []
host_list = RefCPSService.host_list()
for host in host_list['hosts']:
roles_list = host['roles']
proxy_host_ip = host['manageip']
region = self._get_region_by_roles_list(roles_list, proxy_match_region)
if region is not None:
proxy_node_hosts.append(proxy_host_ip)
else:
log.info('Region of ip <%s> is None, this host of ip address is not a proxy' % proxy_host_ip)
return proxy_node_hosts
def get_proxy_match_host(self):
proxy_match_host = {}
host_list = RefCPSService.host_list()
for host in host_list['hosts']:
roles_list = host['roles']
proxy_host_ip = host['manageip']
proxy_number = self._get_proxy_number_from_roles(roles_list)
if proxy_number is not None:
proxy_match_host[proxy_number] = proxy_host_ip
else:
log.info('Proxy number is none for host: %s' % proxy_host_ip)
return proxy_match_host
def _get_proxy_number_from_roles(self, roles_list):
for role in roles_list:
if 'proxy' in role:
return role.split('-')[1]
else:
continue
return None
def _get_region_by_roles_list(self, roles_list, proxy_match_region):
for role in roles_list:
if 'compute-proxy' in role:
proxy_number = role.split('-')[1]
if proxy_match_region.get(proxy_number):
return proxy_match_region[proxy_number]
return
class RefFsUtils(object):
@staticmethod
def get_local_dc_az():
return fsutils.get_local_dc_az()
class RefFsSystemUtils(object):
@staticmethod
def get_az_by_domain(proxy_matched_region):
domain_url = "".join(['https://service.', proxy_matched_region, ':443'])
return fs_system_util.get_az_by_domain(domain_url)
@staticmethod
def get_dc_by_domain(proxy_matched_region):
domain_url = "".join(['https://service.', proxy_matched_region, ':443'])
return fs_system_util.get_dc_by_domain(domain_url)
if __name__ == '__main__':
cps = CPSServiceBusiness()
print(cps.get_dns_info())
|
<reponame>VUB-HYDR/2021_Grant_Thiery_C3S511-SPQB-ERA5LAND
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 13:18:37 2019
@author: Luke
"""
#==============================================================================
#SUMMARY
#==============================================================================
#This script is used to plot lake cover in Flake for era5 data
#==============================================================================
#IMPORT
#==============================================================================
import xarray as xr
import os
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib as mpl
#==============================================================================
#FUNCTIONS
#==============================================================================
#==============================================================================
#SETTINGS
#==============================================================================
title_font = 13
tick_font = 11
#==============================================================================
#INITIALIZE
#==============================================================================
directory = '/Users/Luke/Documents/PHD/C3S_511/DATA/era5-land/lakecover'
os.chdir(directory)
file = 'era5-land_lakemask.nc'
o_directory = '/Users/Luke/Documents/PHD/C3S_511/FIGURES/era5-land/lakecover'
#==============================================================================
#OPEN DATA
#==============================================================================
cover = xr.open_dataset(file,decode_times=False).cl.squeeze(dim='time')
cover = cover.where(cover>0)
lon = cover.longitude.values
lat = cover.latitude.values
#=============================================================================
#PLOT
#=============================================================================
f, ax = plt.subplots(1,1,figsize=(15,12));
lon, lat = np.meshgrid(lon, lat)
cmap_whole = plt.cm.get_cmap('Spectral')
cmap55 = cmap_whole(0.01)
cmap50 = cmap_whole(0.05)
cmap45 = cmap_whole(0.1)
cmap40 = cmap_whole(0.15)
cmap35 = cmap_whole(0.2)
cmap30 = cmap_whole(0.25)
cmap25 = cmap_whole(0.3)
cmap20 = cmap_whole(0.35)
cmap10 = cmap_whole(0.4)
cmap5 = cmap_whole(0.45)
cmap0 = cmap_whole(0.5)
cmap_5 = cmap_whole(0.55)
cmap_10 = cmap_whole(0.6)
cmap_20 = cmap_whole(0.65)
cmap_25 = cmap_whole(0.7)
cmap_30 = cmap_whole(0.75)
cmap_35 = cmap_whole(0.8)
cmap_40 = cmap_whole(0.85)
cmap_45 = cmap_whole(0.9)
cmap_50 = cmap_whole(0.95)
cmap_55 = cmap_whole(0.99)
cmap = mpl.colors.ListedColormap([cmap_50,cmap_40,cmap_30,cmap_20,cmap_10,cmap10,cmap20,cmap30,cmap40,cmap50], N=10)
parallels = np.arange(-60.,91.,30.);
meridians = np.arange(-135.,136.,45.);
m = Basemap(llcrnrlon=-170, llcrnrlat=-60, urcrnrlon=180, urcrnrlat=90, suppress_ticks=False);
m.ax = ax
m.drawcoastlines(linewidth=0.2);
m.drawmapboundary(fill_color='whitesmoke')
parallels = np.arange(-60.,91.,30.);
m.fillcontinents(color='white');
ax.set_yticks(parallels);
ax.set_xticks(meridians);
ax.tick_params(labelbottom=False, labeltop=False, labelleft=False, labelright=False,
bottom=False, top=False, left=False, right=False, color='0.2',\
labelcolor='0.2', labelsize=5,width=0.4,direction="in",length=2.5)
ax.spines['bottom'].set_color('0.2')
ax.spines['bottom'].set_linewidth(0.4)
ax.spines['top'].set_color('0.2')
ax.spines['top'].set_linewidth(0.4)
ax.xaxis.label.set_color('0.2')
ax.spines['left'].set_color('0.2')
ax.spines['left'].set_linewidth(0.4)
ax.spines['right'].set_color('0.2')
ax.spines['right'].set_linewidth(0.4)
ax.yaxis.label.set_color('0.2')
h = m.pcolormesh(lon, lat, cover, latlon=True, cmap=cmap, vmin=0, vmax=1, zorder=2)
#=============================================================================
#COLORBAR
#=============================================================================
values = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
tick_locs = [0,0.2,0.4,0.6,0.8,1.0]
norm = mpl.colors.BoundaryNorm(values,cmap.N)
cbaxes = f.add_axes([0.25, 0.15, 0.5, 0.015])
cb = mpl.colorbar.ColorbarBase(ax=cbaxes, cmap=cmap,
norm=norm,
spacing='uniform',
orientation='horizontal',
ticks=tick_locs)
cb.set_label('Grid-fraction lake cover',size=title_font)
cb.ax.xaxis.set_label_position('top');
cb.ax.tick_params(labelcolor='0.2', labelsize=tick_font, color='0.2',length=2.5, width=0.4, direction='out'); #change color of ticks?
cb.ax.set_xticklabels(['0','0.2','0.4','0.6','0.8','1.0'])
cb.outline.set_edgecolor('0.2')
cb.outline.set_linewidth(0.4)
plt.subplots_adjust(left=0.15, right=0.85, bottom=0.175, top=0.6, wspace=0.1, hspace=0.0)
plt.show()
#save figure
f.savefig(o_directory+'/'+'era5-land_lakes_lakecover_0_1_map.png',bbox_inches='tight',dpi=500)
|
# Made by Mr. Have fun! Version 0.2
import sys
from com.l2jserver import Config
from com.l2jserver.gameserver.model.quest import State
from com.l2jserver.gameserver.model.quest import QuestState
from com.l2jserver.gameserver.model.quest.jython import QuestJython as JQuest
qn = "153_WharfOldtimersFavor"
DELIVERY_LIST_ID = 1012
HEAVY_WOOD_BOX_ID = 1013
CLOTH_BUNDLE_ID = 1014
CLAY_POT_ID = 1015
JACKSONS_RECEIPT_ID = 1016
SILVIAS_RECEIPT_ID = 1017
RANTS_RECEIPT_ID = 1018
RING_ID = 875
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = [HEAVY_WOOD_BOX_ID, CLOTH_BUNDLE_ID, CLAY_POT_ID, DELIVERY_LIST_ID, JACKSONS_RECEIPT_ID, SILVIAS_RECEIPT_ID, RANTS_RECEIPT_ID]
def onAdvEvent (self,event,npc, player) :
htmltext = event
st = player.getQuestState(qn)
if not st : return
if event == "1" :
st.set("id","0")
st.set("cond","1")
st.setState(State.STARTED)
st.playSound("ItemSound.quest_accept")
if st.getQuestItemsCount(DELIVERY_LIST_ID) == 0 :
st.giveItems(DELIVERY_LIST_ID,1)
if st.getQuestItemsCount(HEAVY_WOOD_BOX_ID) == 0 :
st.giveItems(HEAVY_WOOD_BOX_ID,1)
if st.getQuestItemsCount(CLOTH_BUNDLE_ID) == 0 :
st.giveItems(CLOTH_BUNDLE_ID,1)
if st.getQuestItemsCount(CLAY_POT_ID) == 0 :
st.giveItems(CLAY_POT_ID,1)
htmltext = "30041-04.htm"
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId == 30041 and st.getInt("cond")==0 and st.getInt("onlyone")==0 :
if player.getLevel() >= 2 :
htmltext = "30041-03.htm"
return htmltext
else:
htmltext = "30041-02.htm"
st.exitQuest(1)
elif npcId == 30041 and st.getInt("cond")==0 and st.getInt("onlyone")==1 :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif npcId == 30041 and st.getInt("cond")!=0 and (st.getQuestItemsCount(JACKSONS_RECEIPT_ID)!=0 and st.getQuestItemsCount(SILVIAS_RECEIPT_ID)!=0 and st.getQuestItemsCount(RANTS_RECEIPT_ID)!=0)==0 :
htmltext = "30041-05.htm"
if id == State.STARTED :
if npcId == 30002 and st.getInt("cond")!=0 and st.getQuestItemsCount(HEAVY_WOOD_BOX_ID)!=0 :
st.takeItems(HEAVY_WOOD_BOX_ID,st.getQuestItemsCount(HEAVY_WOOD_BOX_ID))
if st.getQuestItemsCount(JACKSONS_RECEIPT_ID) == 0 :
st.giveItems(JACKSONS_RECEIPT_ID,1)
if st.getQuestItemsCount(JACKSONS_RECEIPT_ID)!=0 and st.getQuestItemsCount(SILVIAS_RECEIPT_ID)!=0 and st.getQuestItemsCount(RANTS_RECEIPT_ID)!=0:
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
htmltext = "30002-01.htm"
elif npcId == 30002 and st.getInt("cond")!=0 and st.getQuestItemsCount(JACKSONS_RECEIPT_ID)!=0 :
htmltext = "30002-02.htm"
elif npcId == 30003 and st.getInt("cond")!=0 and st.getQuestItemsCount(CLOTH_BUNDLE_ID)!=0 :
st.takeItems(CLOTH_BUNDLE_ID,st.getQuestItemsCount(CLOTH_BUNDLE_ID))
if st.getQuestItemsCount(SILVIAS_RECEIPT_ID) == 0 :
st.giveItems(SILVIAS_RECEIPT_ID,1)
st.rewardItems(1835,3)
if st.getQuestItemsCount(JACKSONS_RECEIPT_ID)!=0 and st.getQuestItemsCount(SILVIAS_RECEIPT_ID)!=0 and st.getQuestItemsCount(RANTS_RECEIPT_ID)!=0:
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
htmltext = "30003-01.htm"
elif npcId == 30003 and st.getInt("cond")!=0 and st.getQuestItemsCount(SILVIAS_RECEIPT_ID)!=0 :
htmltext = "30003-02.htm"
elif npcId == 30054 and st.getInt("cond")!=0 and st.getQuestItemsCount(CLAY_POT_ID)!=0 :
st.takeItems(CLAY_POT_ID,st.getQuestItemsCount(CLAY_POT_ID))
if st.getQuestItemsCount(RANTS_RECEIPT_ID) == 0 :
st.giveItems(RANTS_RECEIPT_ID,1)
if st.getQuestItemsCount(JACKSONS_RECEIPT_ID)!=0 and st.getQuestItemsCount(SILVIAS_RECEIPT_ID)!=0 and st.getQuestItemsCount(RANTS_RECEIPT_ID)!=0:
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
htmltext = "30054-01.htm"
elif npcId == 30054 and st.getInt("cond")!=0 and st.getQuestItemsCount(RANTS_RECEIPT_ID)!=0 :
htmltext = "30054-02.htm"
elif npcId == 30041 and st.getInt("cond")!=0 and (st.getQuestItemsCount(JACKSONS_RECEIPT_ID)!=0 and st.getQuestItemsCount(SILVIAS_RECEIPT_ID)!=0 and st.getQuestItemsCount(RANTS_RECEIPT_ID)!=0)!=0 and st.getInt("onlyone")==0 :
if st.getInt("id") != 153 :
st.set("id","153")
st.set("cond","0")
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
st.set("onlyone","1")
st.giveItems(RING_ID,1)
st.giveItems(RING_ID,1)
st.takeItems(DELIVERY_LIST_ID,-1)
st.takeItems(JACKSONS_RECEIPT_ID,-1)
st.takeItems(SILVIAS_RECEIPT_ID,-1)
st.takeItems(RANTS_RECEIPT_ID,-1)
st.addExpAndSp(600,0)
htmltext = "30041-06.htm"
return htmltext
QUEST = Quest(153,qn,"Deliver Goods")
QUEST.addStartNpc(30041)
QUEST.addTalkId(30041)
QUEST.addTalkId(30002)
QUEST.addTalkId(30003)
QUEST.addTalkId(30054) |
from dataclasses import dataclass
from time import perf_counter
from typing import Dict, Optional, Callable
from pyqumo import MarkovArrival, PhaseType, rel_err, MapPh1NQueue, BoundsError, \
Erlang, HyperExponential, Exponential
from pyqumo.fitting import fit_acph2, fit_mern2, fit_map_horvath05
def get_complexity(arrival_order: int, service_order: int, capacity: int,
net_size: int) -> int:
"""
Compute tandem network complexity as `W(V(M+2))^N`.
"""
return arrival_order * (service_order * (capacity + 2))**net_size
@dataclass
class SolveResults:
skipped: bool
delay: Optional[float] = None
delivery_prob: Optional[float] = None
last_system_size: Optional[float] = None
elapsed: Optional[float] = None
max_inp_order: Optional[int] = None
max_out_order: Optional[int] = None
m1_err: Optional[float] = None
cv_err: Optional[float] = None
skew_err: Optional[float] = None
lag1_err: Optional[float] = None
def update_m1_err(self, x: float):
if self.m1_err is None or self.m1_err < x:
self.m1_err = x
def update_cv_err(self, x: float):
if self.cv_err is None or self.cv_err < x:
self.cv_err = x
def update_skew_err(self, x: float):
if self.skew_err is None or self.skew_err < x:
self.skew_err = x
def update_lag1_err(self, x: float):
if self.lag1_err is None or self.lag1_err < x:
self.lag1_err = x
def solve_iterative(
arrival: Optional[MarkovArrival] = None,
service: Optional[PhaseType] = None,
capacity: Optional[int] = None,
net_size: Optional[int] = None,
reducer: Optional[Callable[[MarkovArrival], MarkovArrival]] = None,
reduce_arrival: bool = False,
reduce_departure: bool = False,
max_precise_order: int = 8000) -> SolveResults:
"""
Solve MAP/PH/1/N -> */PH/1/N -> ... */PH/1/N model analytically.
If `reducer` is not None, then this function is applied
to each departure process prior to sending it to the arrival to the
next station.
If `reduce_arrival = True`, then `reducer()` is applied
to the first arrival as well (`inp.arrival`).
Parameters
----------
arrival : MarkovArrival, optional
service : PhaseType, optional
capacity : int, optional
net_size : int, optional
reducer : None or Callable[[MarkovArrival], MarkovArrival]
if not None, this function is applied to each departure
reduce_arrival : bool, optional (default: False)
if True, reduce arrival process as well
max_precise_order : int (default: 8000)
if looking for precise solution, will ignore matrices with number
of rows (or columns) larger then this value
Returns
-------
SolveResults
"""
# Create solution that will be filled later, and start measuring time.
t_start = perf_counter()
solution = SolveResults(
skipped=False, delay=0.0, delivery_prob=1.0, m1_err=0.0,
cv_err=0.0, skew_err=0.0, lag1_err=0.0, max_inp_order=0,
max_out_order=0)
# Если нужно предобработать входной поток, делаем это
_inp_arrival = arrival
if reduce_arrival:
# FIXME: another bad code
try:
arrival = reducer(_inp_arrival)
except Exception:
solution.skipped = True
return solution
# --- end of bad code (.. khm ..)
solution.m1_err = rel_err(arrival.mean, _inp_arrival.mean)
solution.cv_err = rel_err(arrival.cv, _inp_arrival.cv)
solution.skew_err = rel_err(arrival.skewness, _inp_arrival.skewness)
solution.lag1_err = rel_err(arrival.lag(1), _inp_arrival.lag(1))
else:
arrival = _inp_arrival
# Since we now the maximum order, try to avoid useless iterations for
# large tasks:
complexity = get_complexity(arrival.order, service.order, capacity,
net_size)
if not reduce_departure and complexity > max_precise_order:
return SolveResults(skipped=True, max_out_order=complexity)
# Итерационно рассчитываем характеристики сети
solution.max_inp_order = arrival.order
sta_index = 0
while sta_index < net_size:
# If arrival MAP matrix is too large, then abort execution:
if not reduce_departure and arrival.order > max_precise_order:
return SolveResults(skipped=True, max_inp_order=arrival.order)
# Обновляем, если надо, максимальный размер входа:
solution.max_inp_order = max(solution.max_inp_order, arrival.order)
# Строим очередной узел
system = MapPh1NQueue(arrival, service, capacity)
dep = system.departure
# FIXME: better find _real_ reasons for these errors.
# --- from here: we check whether some error appeared and, if
# so, reject to solve this problem by returning 'skipped = True'.
# Most probably, these errors appear due to floating point arithmetics
# and precision on large matrix operations. Another possible source
# is in solving optimization problems when building departure
# approximations.
# For now, just skip such entries.
try:
# If departure matrix is too bad (e.g., rate is negative), skip:
if (isinstance(dep.rate, complex) or
isinstance(dep.cv, complex) or
isinstance(dep.skewness, complex) or
isinstance(system.loss_prob, complex) or
isinstance(system.response_time, complex) or
dep.rate < 0 or dep.cv < 0 or system.loss_prob < 0 or
system.response_time < 0):
solution.skipped = True
except ValueError:
solution.skipped = True
if solution.skipped:
solution.max_out_order = max(solution.max_out_order, dep.order)
return solution
# Рассчитываем и накапливаем задержку, вероятность доставки
# и обновляем размер системы.
solution.delay += system.response_time
solution.delivery_prob *= 1 - system.loss_prob
solution.last_system_size = system.system_size.mean
solution.max_out_order = max(solution.max_out_order, dep.order)
# Если нужно аппроксимировать выход, делаем это.
# Иначе используем выход в качестве нового входа.
if reduce_departure:
# FIXME: another bad code
try:
arrival = reducer(dep)
except Exception:
solution.skipped = True
return solution
# --- end of bad code (.. khm ..)
solution.update_m1_err(rel_err(arrival.mean, dep.mean))
solution.update_cv_err(rel_err(arrival.cv, dep.cv))
solution.update_skew_err(rel_err(arrival.skewness, dep.skewness))
solution.update_lag1_err(rel_err(arrival.lag(1), dep.lag(1)))
else:
arrival = system.departure
# Переходим к следующей станции
sta_index += 1
# Замеряем время завершения
solution.elapsed = perf_counter() - t_start
return solution
def reduce_map(
arrival: MarkovArrival,
num_moments: int = 3,
use_lag: bool = False,
tol: float = .01) -> MarkovArrival:
"""
Find another MAP matching the given number of moments and, optionally,
lag-1 correlation coefficient.
Parameters
----------
arrival : MarkovArrival
the arrival process to reduce
num_moments : 1, 2 or 3 (default: 3)
number of moments to match
use_lag : bool (default: False)
flag indicating whether to try to fit lag-1 autocorrelation.
tol : float (default: .01)
when fitting, if cv differs on this value from 1.0, exponential
distribution is used.
Returns
-------
reduced : MarkovArrival
"""
m1 = arrival.mean
if num_moments == 1:
ph = PhaseType.exponential(1 / m1)
elif num_moments == 2:
cv = arrival.cv
std = arrival.std
if cv < 0.99:
ph = Erlang.fit(m1, std).as_ph()
elif cv > 1.01:
ph = HyperExponential.fit(m1, std).as_ph()
else:
ph = Exponential(1 / m1).as_ph()
elif num_moments == 3:
moments = [arrival.moment(i) for i in range(1, 4)]
try:
ph = fit_acph2(moments, strict=True)[0]
except BoundsError:
dist = fit_mern2(moments, strict=False)[0]
ph = dist.as_ph()
else:
raise ValueError(f"expected num_moments = 1, 2 or 3, but "
f"{num_moments} found")
# Fit lag, if needed:
if use_lag:
return fit_map_horvath05(ph, arrival.lag(1))[0]
return MarkovArrival.phase_type(ph.s, ph.p)
|
# quick sort visualizer
# import tkinter as tk
from tkinter import Tk, Label, Button, Frame, Canvas, Entry, SW, W
from tkinter import messagebox
import random
import time
import sys
sys.setrecursionlimit(10**6)
# colours
DARK_GREY = '#73C6B6'
LIGHT_GREY = '#B2BABB'
WHITE = '#F0F3F4'
GREEN = '#82E0AA'
GREEN_2 = '#76D7C4'
BLUE = '#85C1E9'
PURPLE = '#BB8FCE'
RED = '#F5B7B1'
YELLOW = '#F7E806'
# array of elements / rectangle heights
array = []
# ~30 elements fit in the canvas using below function
def drawRect(array, color):
canvas.delete("all")
c_height = 380
c_width = 1000
x_width = c_width / (len(array) + 1)
x_left = 15
spacing = 10
normalizedArray = [i / max(array) for i in array]
for i, height in enumerate(normalizedArray):
# top left
x0 = i * x_width + x_left + spacing
y0 = c_height - height * 340
# bottom right
x1 = (i + 1) * x_width + x_left
y1 = c_height
canvas.create_rectangle(x0, y0, x1, y1, fill=color[i])
canvas.create_text(x0 + 2, y0, anchor=SW, text=str(array[i]))
root.update_idletasks()
# generate random elements for the array and
# draw their rectangles on the canvas
def Generate():
global array
try:
minVal = int(minEntry.get())
maxVal = int(maxEntry.get())
size = int(sizeEntry.get())
except Exception:
messagebox.showwarning("Message", "Enter all values correctly")
array = []
# generating random list
color = []
for _ in range(size):
array.append(random.randrange(minVal, maxVal + 1))
color.append(GREEN_2)
drawRect(array, color)
# partition function
def partition(array, left, right, drawRect):
i = left + 1
pivot = array[left]
for j in range(left + 1, right + 1):
if array[j] < pivot:
array[i], array[j] = array[j], array[i]
i += 1
array[left], array[i - 1] = array[i - 1], array[left]
return i - 1
# quick sort function
def quickSort(array, left, right, drawRect):
if left < right:
pivot = partition(array, left, right, drawRect)
quickSort(array, left, pivot, drawRect)
quickSort(array, pivot + 1, right, drawRect)
drawRect(array, [BLUE if x >= left and x < pivot
else YELLOW if x == pivot
else PURPLE if x > pivot and x <= right
else RED for x in range(len(array))])
time.sleep(0.5)
drawRect(array, [GREEN for x in range(len(array))])
# actually perform quicksort
def sort():
try:
quickSort(array, 0, len(array) - 1, drawRect)
messagebox.showinfo('Succces', 'Array sorted!')
except Exception:
messagebox.showinfo('Error', 'Array could not be sorted')
def exit_win():
if messagebox.askokcancel("Exit", "Do you want to exit?"):
root.destroy()
# !--GUI code starts--!
# main window
root = Tk()
root.title('Quick Sort Visualizer')
# background color
root.config(bg=LIGHT_GREY)
# disabling resizing of window
root.resizable(0, 0)
# ---adding frames---
# top name frame
top = Frame(root,
width=1300,
height=200,
bg=GREEN_2,
bd=8,
relief="groove")
top.grid(row=0, column=0, padx=10, pady=5)
# frame for canvas
canvas = Canvas(root,
width=1000,
height=380,
bg=WHITE)
canvas.grid(row=1, column=0, padx=10, pady=5)
# frame for user entries
entries = Frame(root,
width=1300,
height=300,
bg=GREEN_2,
bd=8,
relief="groove")
entries.grid(row=2, column=0, padx=10, pady=5)
# ---adding widgets---
# top label
greeting = Label(top,
text="Quick Sort Visualizer",
width=62,
font=("Courier New", 20, "bold"),
background=GREEN_2)
greeting.grid(row=0, column=1, pady=5)
# user entries and buttons
# row 0
Size = Label(entries,
text="Size of array : ",
bg=LIGHT_GREY,
relief="groove")
Size.grid(row=0,
column=0,
padx=15,
pady=5,
sticky=W,
ipadx=20,
ipady=5)
sizeEntry = Entry(entries, justify="center")
sizeEntry.grid(row=0, column=1, padx=15, pady=5, sticky=W, ipady=5)
minn = Label(entries,
text="Minimum element : ",
bg=LIGHT_GREY,
relief="groove")
minn.grid(row=0, column=2, padx=15, pady=5, sticky=W, ipadx=20, ipady=5)
minEntry = Entry(entries, justify="center")
minEntry.grid(row=0, column=3, padx=15, pady=5, sticky=W, ipady=5)
maxx = Label(entries,
text="Maximum element : ",
bg=LIGHT_GREY,
relief="groove")
maxx.grid(row=0, column=4, padx=15, pady=5, sticky=W, ipadx=20, ipady=5)
maxEntry = Entry(entries, justify="center")
maxEntry.grid(row=0, column=5, padx=15, pady=5, sticky=W, ipady=5)
# row 1
generate = Button(entries, text="Generate", bg=LIGHT_GREY, command=Generate)
generate.grid(row=1, column=2, padx=15, pady=5, ipadx=20, ipady=5)
Search = Button(entries, text="Sort", bg=LIGHT_GREY, command=sort)
Search.grid(row=1, column=3, padx=15, pady=5, ipadx=20, ipady=5)
Exitbtn = Button(entries, text="Exit", bg=LIGHT_GREY, command=exit_win)
Exitbtn.grid(row=1, column=4, padx=15, pady=5, ipadx=20, ipady=5)
root.mainloop()
# !--GUI code ends--!
|
<gh_stars>1-10
# ---------------------------------------------------------------------
#
# Copyright (c) 2012 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ---------------------------------------------------------------------
# $Id: TestAll.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for WebBrick library functions (Functions.py)
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys, unittest, logging, zipfile, re, StringIO, os, logging, cgi
from os.path import normpath
from rdflib import URIRef
sys.path.append("..")
sys.path.append("../cgi-bin")
import SubmitDatasetDetailsHandler
import SubmitDatasetConfirmationHandler
import ManifestRDFUtils
import SubmitDatasetUtils
import HttpSession
from MiscLib import TestUtils
import TestConfig
#from TestConfig import setTestConfig.DatasetsBaseDir
#from TestConfig import TestConfig.SiloName, DirName, TestConfig.DatasetsEmptyDir, TestConfig.DatasetsEmptyDirName, UpdatedTitle, UpdatedDescription, TestConfig.formdata, updatedTestConfig.formdata
#from TestConfig import TestConfig.DatasetId, DatasetDir, Title, Description, User, TestConfig.ElementValueList, TestConfig.ElementValueUpdatedList
#from TestConfig import ElementCreatorUri,ElementIdentifierUri,ElementTitleUri,ElementDescriptionUri,TestConfig.ElementUriList
#from TestConfig import TestConfig.DatasetsBaseDir, TestConfig.ManifestFilePath
Logger = logging.getLogger("TestSubmitDatasetHandler")
ExpectedDictionary = {
"creator" : "admiral"
, "identifier" : "SubmissionToolTest"
, "title" : "Submission tool test title"
, "description" : "Submission tool test description"
}
ExpectedUpdatedDictionary = {
"creator" : "admiral"
, "identifier" : "SubmissionToolTest"
, "title" : "Submission tool updated test title"
, "description" : "Submission tool updated test description"
}
class TestSubmitDatasetHandler(unittest.TestCase):
def setUp(self):
self.endpointhost = TestConfig.HostName
self.basepath = "/"+TestConfig.SiloName+"/"
self.session = HttpSession.makeHttpSession(self.endpointhost, self.basepath, TestConfig.Username, TestConfig.Password)
return
def tearDown(self):
try:
SubmitDatasetUtils.deleteDataset(self.session, SubmitDatasetUtils.getFormParam('datId', TestConfig.formdata))
SubmitDatasetUtils.deleteDataset(self.session, SubmitDatasetUtils.getFormParam('datId', TestConfig.formdata) +"-packed");
except:
pass
return
# Tests
# Test that the Dataset handler returned a HTML page back to the client that requested it:
def testSubmitDatasetHandlerHTMLResponse(self):
outputStr = StringIO.StringIO()
# Invoke dataset submission program, passing faked form submission parameters
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(TestConfig.formdata, outputStr)
#Logger.debug("Output String from output stream: "+outputStr.getvalue())
# print "Output String from output stream: "+outputStr.getvalue()
outputStr.seek(0, os.SEEK_SET)
firstLine = outputStr.readline()
Logger.debug("FirstLine = " + firstLine);
#self.assertEqual( firstLine, "Content-type: text/html\n", "Submission Handler could not action the client request!")
self.assertEqual( firstLine.strip(), "Status: 303 Dataset submission successful","Submission Handler could not action the client request!")
SubmitDatasetUtils.deleteDataset(self.session, TestConfig.DatasetId +"-packed");
#SubmitDatasetUtils.deleteDataset(TestConfig.SiloName, datasetId);
return
# Test that the named dataset has been created in the databank
def testSubmitDatasetHandlerDatasetCreation(self):
outputStr = StringIO.StringIO()
# Invoke dataset submission program, passing faked form submission parameters
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(TestConfig.formdata, outputStr)
# Check that the dataset is created
found = SubmitDatasetUtils.ifDatasetExists(self.session, TestConfig.DatasetId)
self.assertEquals(found, True, "Dataset Creation Failed!" )
# Check that the new dataset can be dereferenced in the databank
self.session.doHTTP_GET(resource="/" + TestConfig.SiloName +"/datasets/"+ TestConfig.DatasetId+"-packed",
expect_status=200, expect_reason="OK", accept_type="application/json")
# Check that a HTML Response page is returned
outputStr.seek(0, os.SEEK_SET)
firstLine = outputStr.readline()
# self.assertEqual( firstLine, "Content-type: text/html\n", "Submission Handler could not action the client request!")
self.assertEqual( firstLine.strip(), "Status: 303 Dataset submission successful","Submission Handler could not action the client request!")
SubmitDatasetUtils.deleteDataset(self.session, TestConfig.DatasetId+"-packed")
return
# Test that the named dataset has been created in the databank
def testSubmitDatasetHandlerDatasetDeletion(self):
outputStr = StringIO.StringIO()
# Invoke dataset submission program, passing faked form submission parameters
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(TestConfig.formdata, outputStr)
SubmitDatasetUtils.deleteDataset(self.session, TestConfig.DatasetId)
# Check that the dataset is deleted
found = SubmitDatasetUtils.ifDatasetExists(self.session, TestConfig.DatasetId)
self.assertEquals(found, False, "Dataset Deletion Failed!" )
# Check that the dataset deleted cannot be dereferenced in the databank
self.session.doHTTP_GET(resource="/" + TestConfig.SiloName +"/datasets/"+ TestConfig.DatasetId,
expect_status=404, expect_reason="Not Found", accept_type="application/json")
# Check that a HTML Response page is returned
outputStr.seek(0, os.SEEK_SET)
firstLine = outputStr.readline()
# self.assertEqual( firstLine, "Content-type: text/html\n", "Submission Handler could not action the client request!")
self.assertEqual( firstLine.strip(), "Status: 303 Dataset submission successful","Submission Handler could not action the client request!")
SubmitDatasetUtils.deleteDataset(self.session, TestConfig.DatasetId+"-packed")
return
def testSubmitDatasetHandlerDirectorySubmission(self):
outputStr = StringIO.StringIO()
# Invoke dataset submission program, passing faked form submission parameters
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(TestConfig.formdata, outputStr)
# Check that the dataset created for unzipped data can be dereferenced in the databank
datasetId = SubmitDatasetUtils.getFormParam('datId', TestConfig.formdata)
datasetDir = SubmitDatasetUtils.getFormParam('datDir', TestConfig.formdata)
self.session.doHTTP_GET(resource="/" + TestConfig.SiloName +"/datasets/"+datasetId+"-packed",
expect_status=200, expect_reason="OK", accept_type="application/json")
# Invoke dataset submission program yet again.
# This time, bypassing the dataset creation but continuing submittion of data to the already exiting dataset
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(TestConfig.formdata, outputStr)
# Check that the dataset created for unzipped data can be dereferenced in the databank
self.session.doHTTP_GET(resource="/" + TestConfig.SiloName +"/datasets/"+TestConfig.DatasetId+"-packed",
expect_status=200, expect_reason="OK", accept_type="application/json")
SubmitDatasetUtils.deleteDataset(self.session, TestConfig.DatasetId+"-packed")
return
def testSubmitDatasetHandlerEmptyDirectorySubmission(self):
outputStr = StringIO.StringIO()
# reset the Dataset Directory to point to an empty directory
formdata = TestConfig.formdata.copy()
formdata['datDir'] = cgi.MiniFieldStorage('datDir', TestConfig.DatasetsEmptyDirPath)
# Invoke dataset submission program, passing faked form submission parameters
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(formdata, outputStr)
# Check that the dataset created for unzipped data can be dereferenced in the databank
self.session.doHTTP_GET(resource="/" + TestConfig.SiloName +"/datasets/"+TestConfig.DatasetId+"-packed",
expect_status=200, expect_reason="OK", accept_type="application/json")
# Invoke dataset submission program yet again.
# This time, bypassing the dataset creation but continuing submittion of data to the already exiting dataset
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(formdata, outputStr)
# Check that the dataset created for unzipped data can be dereferenced in the databank
self.session.doHTTP_GET(resource="/" + TestConfig.SiloName +"/datasets/"+TestConfig.DatasetId+"-packed", expect_status=200, expect_reason="OK", accept_type="application/json")
SubmitDatasetUtils.deleteDataset(self.session, TestConfig.DatasetId+"-packed")
return
def testSubmitDatasetHandlerUpdateMetadataBeforeSubmission(self):
# the initial manifest file
SubmitDatasetDetailsHandler.updateMetadataInDirectoryBeforeSubmission(TestConfig.ManifestFilePath, TestConfig.ElementUriList, TestConfig.ElementValueList)
# Assert that the manifets has been created
self.assertEqual(True,ManifestRDFUtils.ifFileExists(TestConfig.ManifestFilePath),"Manifest file was not successfully created!")
# Update the manifets contents
SubmitDatasetDetailsHandler.updateMetadataInDirectoryBeforeSubmission(TestConfig.ManifestFilePath, TestConfig.ElementUriList, TestConfig.ElementValueUpdatedList)
# Read the manifest again
rdfGraph = ManifestRDFUtils. readManifestFile(TestConfig.ManifestFilePath)
# Assert that the Updated Value list from metadata == "TestConfig.ElementValueUpdatedList"
self.assertEqual(ManifestRDFUtils.getElementValuesFromManifest(rdfGraph,TestConfig.ElementUriList),TestConfig.ElementValueUpdatedList,"Error updating the metadata!")
return
def testUpdateLocalManifestAndDatasetSubmission(self):
outputStr = StringIO.StringIO()
# Invoke dataset submission program, passing faked form submission parameters
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(TestConfig.formdata, outputStr)
# Read the dictionary from the manifest
actualDictionary = ManifestRDFUtils.getDictionaryFromManifest(TestConfig.ManifestFilePath, TestConfig.ElementUriList)
Logger.debug("\n Expected Dictionary after form submission= " + repr(ExpectedDictionary))
Logger.debug("\n Actual Dictionary after form submission = " + repr(actualDictionary))
###print "\n---- actualDictionary ---- \n"+repr(actualDictionary)
# Assert that the ExpectedDictionary == actualDictionary
self.assertEqual(ExpectedDictionary,actualDictionary, "The submit Utils Tool is unable to fetch metadata information!")
# Invoke dataset submission program with updated information, passing faked updated form submission parameters
SubmitDatasetConfirmationHandler.processDatasetSubmissionForm(TestConfig.updatedformdata, outputStr)
# Read the dictionary from the manifest after processing the form submission with the updated faked form data
actualUpdatedDictionary = ManifestRDFUtils.getDictionaryFromManifest(TestConfig.ManifestFilePath, TestConfig.ElementUriList)
Logger.debug("\n Expected Updated Dictionary after form resubmission = " + repr(ExpectedUpdatedDictionary))
Logger.debug("\n Actual Updated Dictionary after form resubmission = " + repr(actualUpdatedDictionary))
# Assert that the ExpectedUpdatedDictionary == actualUpdatedDictionary
self.assertEqual(ExpectedUpdatedDictionary,actualUpdatedDictionary, "The submit Utils Tool was unable to update form data information in the metadata file!")
return
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of udirName, baseDir,nit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ #"testUnits"
"testSubmitDatasetHandlerHTMLResponse"
,"testSubmitDatasetHandlerDatasetCreation"
,"testSubmitDatasetHandlerDatasetDeletion"
,"testSubmitDatasetHandlerDirectorySubmission"
,"testSubmitDatasetHandlerEmptyDirectorySubmission"
,"testSubmitDatasetHandlerUpdateMetadataBeforeSubmission"
,"testUpdateLocalManifestAndDatasetSubmission"
],
"component":
[ #"testComponents"
],
"integration":
[ #"testIntegration"
],
"pending":
[ #"testPending"
]
}
return TestUtils.getTestSuite(TestSubmitDatasetHandler, testdict, select=select)
if __name__ == "__main__":
TestConfig.setDatasetsBaseDir(".")
TestUtils.runTests("TestSubmitDatasetHandler.log", getTestSuite, sys.argv)
|
<reponame>chw3k5/WaferScreen
import os
import sys
import time
import socket
import serial
import numpy as np
"""
This is for the Keithley 2450 source meter.
This is designed to be platform independent.
"""
# These are definitions that are used by many methods in the Keithley2450 class below.
def read_serial(serial_device):
if sys.version_info.major == 3:
one_byte = b""
byte_string = b""
while one_byte != b'\n':
one_byte = serial_device.read()
byte_string += one_byte
else:
one_byte = None
byte_string = ""
while one_byte != "":
one_byte = serial_device.read()
byte_string += one_byte
return byte_string
def read_single_lan(lan_device, termination=b'\n', buffer=10000):
all_data = b""
while all_data == b"" or all_data[-1:] != termination:
all_data += lan_device.recv(buffer)
return all_data[:-1]
def read_all_lan(lan_device):
return lan_device.recv(1000000)
def number_format(number):
if isinstance(number, int):
formatted_str = str('%i' % number)
elif isinstance(number, float):
sci_notation = str('%e' % number)
float_part, power_part = sci_notation.split("e")
power_int = int(power_part) - 3
float_part_int = int(float(float_part) * 1000.0)
if float_part_int == 0:
formatted_str = '0'
else:
formatted_str = str(float_part_int) + "e" + str(power_int)
else:
formatted_str = number
formatted = bytes(formatted_str, "UTF-8")
return formatted
"""
This is the class to control the Keithley 2450
"""
class Keithley2450:
def __init__(self, connection_type='lan', port_name='COM2', source_mode='current', verbose=False):
self.connection_type = connection_type.lower().strip()
if connection_type not in {"lan", 'serial'}:
raise TypeError(F"connection_type: {self.connection_type}, is not recognized.")
self.source_mode = source_mode.lower().strip()
if self.source_mode not in {"current", 'voltage'}:
raise TypeError(F"source_mode type: {self.source_mode}, is not recognized.")
self.device = None
self.keithley_source_mode = None
self.keithley_sense_mode = None
self.source_range = None
self.sense_range = None
self.timeout = 2
self.verbose = verbose
if self.connection_type == 'lan':
# IP address information for the Vacuum Gauge
self.ip = '169.254.21.224'
self.gateway = '169.254.21.224'
self.subnet = '255.255.0.0'
self.port = 5025
self.buffer = 10000
elif self.connection_type == "serial":
self.port_name = port_name
self.baudrate = 57600
self.bytesize = 8
self.stopbits = 1
self.parity = "N"
self.fullDataPath = None
def open(self):
if self.connection_type == 'lan':
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.ip, self.port))
elif self.connection_type == "serial":
self.device = serial.Serial(port=self.port_name,
baudrate=self.baudrate,
bytesize=self.bytesize,
stopbits=self.stopbits,
parity=self.parity,
timeout=self.timeout)
def read(self):
if self.connection_type == 'lan':
return read_single_lan(lan_device=self.device)
elif self.connection_type == 'serial':
return read_serial(serial_device=self.device)
def write(self, write_str):
if self.connection_type == 'lan':
self.device.send(write_str)
elif self.connection_type == 'serial':
self.device.write(write_str)
def close(self):
self.device.close()
def output_on(self):
self.write(write_str=b"OUTPUT ON\n")
if self.verbose:
print("The output for the Keithley 2450 has been set to ON")
def output_off(self):
self.write(write_str=b"OUTPUT OFF\n")
if self.verbose:
print("The output for the Keithley 2450 has been set to OFF")
def reset(self):
self.write(write_str=b"*RST\n")
def set_source_type(self, source_type=None, sense_type=None, v_range=b'2', i_range=b'100e-3'):
if source_type is None:
source_type = self.source_mode
self.source_mode = source_type
if self.source_mode == "current":
self.keithley_source_mode = b"CURR"
self.source_range = i_range
if sense_type is None or sense_type != "current":
self.keithley_sense_mode = b"VOLT"
self.sense_range = v_range
else:
self.keithley_sense_mode = b"CURR"
self.sense_range = i_range
elif self.source_mode == "voltage":
self.keithley_source_mode = b"VOLT"
self.source_range = v_range
if sense_type is None or sense_type != "voltage":
self.keithley_sense_mode = b"CURR"
self.sense_range = i_range
else:
self.keithley_sense_mode = b"VOLT"
self.sense_range = v_range
else:
raise TypeError(F"source_mode type: {self.source_mode}, is not recognized.")
self.write(write_str=b"SOUR:FUNC " + self.keithley_source_mode + b"\n")
self.write(write_str=b"SOUR:" + self.keithley_source_mode + b":RANG " + self.source_range + b"\n")
self.write(write_str=b"SENS:FUNC \"" + self.keithley_sense_mode + b"\"\n")
if self.keithley_source_mode != self.keithley_sense_mode:
self.write(write_str=b"SENS:" + self.keithley_sense_mode + b":RANG " + self.sense_range + b"\n")
# activated the 4 wire measurements
self.write(write_str=b"SENS:" + self.keithley_sense_mode + b":RSEN ON\n")
def get_volt(self):
self.write(write_str=b'MEAS:VOLT?\n')
return_str = self.read()
voltage = float(return_str)
if self.verbose:
print(F"{voltage} is the read voltage from the Keithley 2450")
return voltage
def set_volt(self, voltage):
write_str = b":SOUR:VOLT " + number_format(voltage) + b"\n"
self.write(write_str=write_str)
if self.verbose:
print("Keithley 2450 was set to a voltage of", voltage)
def get_current(self):
self.write(write_str=b'SOUR:CURR?\n')
current = float(self.read())
if self.verbose:
print(current, "is the read current from the Keithley 2450")
def set_current(self, current_amps):
write_str = b"SOUR:CURR " + number_format(current_amps) + b"\n"
self.write(write_str=write_str)
if self.verbose:
print("Keithley 2450 was set to a current of", current_amps, "Amps")
def test_output_on_off(self, sleep_time=10):
self.open()
self.output_on()
print(F"sleeping for {sleep_time} seconds...")
time.sleep(sleep_time)
keithley2450.output_off()
keithley2450.close()
def init_sweep(self, v_limit=b"5.0", i_limit=b"0.100"):
self.write(write_str=b"SOUR: " + self.keithley_source_mode + b":VLIM " + number_format(v_limit) + b"\n")
self.write(write_str=b"SOUR: " + self.keithley_source_mode + b":ILIM " + i_limit + b"\n")
def sweep(self, start_curr=-0.001, stop_curr=0.001, num_points=21, delay_s=0.1):
"""
:SOURce[1]:SWEep:<function>:LINear:STEP <start>, <stop>, <steps>, <delay>, <count>,
<rangeType>, <failAbort>, <dual>, "<bufferName>"
SOUR:SWE:CURR:LIN:STEP -1.05, 1.05, .25, 10e-3, 1, FIXED
"""
step_cur = (stop_curr - start_curr) / (num_points - 1.0)
sweep_base_str = b"SOUR:SWE:" + self.keithley_source_mode + b":LIN "
start_curr_str = number_format(start_curr) + b", "
stop_curr_str = number_format(stop_curr) + b", "
num_points_str = number_format(num_points) + b", "
delay_s_str = number_format(delay_s) + b", "
loop_count = number_format(1) + b", "
range_type = b"FIXED, "
fail_abort = b"OFF, "
dual = b"ON, "
buffer_name = b'''\"defbuffer1\"'''
# calculations
sweep_array_a = np.arange(start_curr, stop_curr + (step_cur / 2.0), step_cur)
sweep_str = sweep_base_str + start_curr_str + stop_curr_str + num_points_str + delay_s_str + loop_count
sweep_str += range_type + fail_abort + dual + buffer_name + b"\n"
self.write(write_str=sweep_str)
self.write(write_str=b"INIT\n")
if self.verbose:
print(F"Sweeping:\n start_curr:{'%1.3f' % (1000 * start_curr)}mA, stop_current:{'%1.3f' % (1000 * stop_curr)}mA, num_points{num_points}")
self.write(write_str=b"*WAI\n")
get_sweep_str = b"TRAC:DATA? 1, " + bytes(str(len(sweep_array_a)), "UTF-8") + b", " + buffer_name + b", "
get_sweep_str += b"SOUR, READ\n"
self.write(write_str=get_sweep_str)
split_binary_data = self.read().strip().split(b',')
meas_data = np.array([float(v_point) for v_point in split_binary_data])
output_data = []
for set_point_index, set_point in list(enumerate(sweep_array_a)):
meas_index = 2 * set_point_index
meas_a = meas_data[meas_index]
meas_v = meas_data[meas_index + 1]
output_data.append((set_point, meas_a, meas_v))
return output_data
def get_range_keithley2450(self):
write_str = b":" + self.keithley_source_mode + b":RANG?\n"
self.write(write_str=write_str)
the_range = self.read()
if self.verbose:
print(the_range, "is the current RANGE from the Keithley 2450")
return the_range
def zero(self):
if self.source_mode == "current":
self.set_current(current_amps=0.0)
else:
self.set_volt(voltage=0.0)
def startup(self):
self.open()
self.reset()
def __enter__(self):
self.startup()
def shutdown(self):
self.output_off()
self.close()
def __exit__(self, exc_type, exc_value, exc_traceback):
self.shutdown()
if __name__ == "__main__":
keithley2450 = Keithley2450(connection_type='lan', source_mode="current", verbose=True)
keithley2450.startup()
keithley2450.set_source_type(v_range=b"2e-2", i_range=b"100e-6")
sweep_data = keithley2450.sweep()
keithley2450.shutdown()
|
<filename>WorkoutAnalyze.py
#!/usr/bin/env python
# coding: utf-8
'''
BSD 3-Clause License
Copyright (c) 2020, <NAME>
All rights reserved.
'''
# First party classes
import os,glob,shutil, subprocess
import re
import datetime, time
import configparser
import sys, getopt
import logging
import logging.config
# 3rd party classes
import numpy as np
import pandas as pd
# custom classes
import dao.files as fao
import util.timeConv as tc
import util.WrktSummary as wrktSum
import rungap.normWrkt as rgNorm
# tempDir = '/tmp/' #default to /tmp
logging.config.fileConfig('logging.conf')
logger = logging.getLogger()
def summarizeWrkoutSegments(segments_df):
'''
Get summary of Workout and write it to logs
'''
wrkt_summary = wrktSum.calcWrktSummary(segments_df.rename(columns={'segment': 'interval'}, inplace=False))
logger.info('Workout Stats:')
logger.info('Warm Up: ' \
+ wrkt_summary['warm_up']['dur_str'] + ' total, ' \
+ str(wrkt_summary['warm_up']['dist_mi']) + ' miles, ' \
+ wrkt_summary['warm_up']['pace_str'] + 'per mile, ' \
+ str(wrkt_summary['warm_up']['ele_up']) + ' ele up, ' \
+ str(wrkt_summary['warm_up']['ele_down']) + ' ele down' \
)
logger.info('Intervals: ' \
+ wrkt_summary['intvl_tot']['dur_str'] + ' total, ' \
+ str(wrkt_summary['intvl_tot']['dist_mi']) + ' miles, ' \
+ wrkt_summary['intvl_tot']['pace_str'] + 'per mile, '\
+ str(wrkt_summary['intvl_tot']['ele_up']) + ' ele up, ' \
+ str(wrkt_summary['intvl_tot']['ele_down']) + ' ele down' \
)
logger.info('Cool Down: ' \
+ wrkt_summary['cool_down']['dur_str'] + ' total, ' \
+ str(wrkt_summary['cool_down']['dist_mi']) + ' miles, ' \
+ wrkt_summary['cool_down']['pace_str'] + 'per mile, '\
+ str(wrkt_summary['cool_down']['ele_up']) + ' ele up, ' \
+ str(wrkt_summary['cool_down']['ele_down']) + ' ele down' \
)
# wrkt_sum_frmla = wrktSum.calcWrktSumFrmla(segments_df.rename(columns={'segment': 'interval'}, inplace=False))
return wrkt_summary
def custSplits(actv_df, tempDir):
'''
Create a CSV file with a custom split column for the passed in activity. User can then add the split marks in the CSV and resave it as CSV with the same name.
Job will use markers in the custom column to create a new grouping of splits.
'''
df = actv_df.copy()
# 1) Add Empty Custom_Split column to passed in DataFrame
df.insert(5,'custom', np.nan) #column will be added before other split indicators
# 2) Export DF to CSV and Pickle (Pickle is not needed at this time)
fao.save_df(df, tempDir,'temp_custom_split', frmt=['csv','pickle'])
# 3) Pause job for user input
# 4) Open CSV on users system
openCmd = os.path.join(tempDir,'temp_custom_split.csv')
logger.debug('Path to temp custom file: ' + openCmd)
subprocess.run(args=['open', openCmd], cwd='/')
# 5) User enters data to custom columns of spreadsheet and saves it to CSV of same name (maybe keep the file in Numbers)
print('Export update CSV to: ' + tempDir)
input("Update the temp_custom_split.csv file with custom splits. Then Press Enter to continue")
# 6) Read updated CSV file (and original pickle if needed)
edit_df = pd.read_csv(os.path.join(tempDir,'temp_custom_split.csv'))
# 7) Remove all but the records that have a custom value and change the custom value to a sequential number
edit_df.loc[0,'custom'] = 1
cust_df = edit_df[edit_df['custom'].notna()][['date_time','custom']]
cust_df.reset_index(inplace=True, drop=True)
cust_df['custom'] = cust_df.index.get_level_values(0).values
cust_df['custom'] = cust_df['custom'].astype('int64')
cust_df['date_time'] = cust_df['date_time'].astype('datetime64')
# 7) Update DF custom column with value from custom column in CSV
df.drop(['custom'], axis=1, inplace=True)
actv_cust_df = pd.merge(df, cust_df, how='left', on='date_time')
# 8) In DF fillna based on the split data provided in the custom column
actv_cust_df['custom'].fillna(method='ffill', inplace=True)
# 9) Group using custom column
return rgNorm.group_actv(actv_cust_df, 'custom')
def printArgumentsHelp():
print ('WorkoutAnalyze.py -i <inputfile> -o <outputdir>')
print ("-i, --ifile arg : Input filename to process")
print ("-o, --odir arg : Output directory for results")
print ("--splits arg : Segments to split up file, ")
print (" options are mile, kilometer, segment, pause, custom, all")
print (" all option will generate mile, kilometer, segment, pause")
print (" default is mile, segment, pause")
def getSplitOptions(arg):
'''
Parameger arg: comma delimited list of arguments for splitting the workout
Converts passed in split argument to lower case and splits on comma
Parses each argument doing needed transformations before adding to a list.
Prints to the console if any arguments are invalid
Removes duplicates from list of split arguments
Returns list of split arguments
'''
splitOptions = []
splitArgs = arg.lower().split(',')
for split in splitArgs:
if split == 'all':
splitOptions.extend(['mile','segment','resume','kilometer'])
elif split == 'pause':
splitOptions.append('resume')
elif split in ('custom','mile','segment','kilometer'):
splitOptions.append(split)
else:
print("Invalid split argument: " + split)
return(list(dict.fromkeys(splitOptions)))
def main(argv):
'''
Steps
1. Get config details
2. Extract files
3. Load files into activities and events data frames
4. Clean up and merge the activities and events data frames
5. Group activities by different splits
6. Export activiies grouped by splits to CSV files
'''
config = configparser.ConfigParser()
# progDir = os.path.dirname(os.path.abspath(__file__)) #might need to use this in actual Python script, but does not work in Jupyter Notebook
progDir = os.path.abspath('')
config.read(progDir + "/config.txt")
logger.info('WorkoutAnalyze Start')
tempDir = config['wrkt_analyze_inputs']['temp_dir']
outDir = config['wrkt_analyze_outputs']['dir']
customSplit = False
splitOptions = []
filename = ''
fao.clean_dir(tempDir)
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=", "odir=", "split="])
except getopt.GetoptError:
printArgumentsHelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printArgumentsHelp()
sys.exit()
elif opt in ("-i", "--ifile"):
filename = arg
elif opt in ("-o", "--odir"):
outDir = arg
elif opt in ('--split'):
splitOptions = getSplitOptions(arg)
if splitOptions == []:
splitOptions = config['wrkt_analyze']['dflt_split_opt'].split(',')
if filename == '':
filename = os.path.join(config['rungap']['backup_dir'], fao.getLatestFile(config['rungap']['backup_dir']))
logger.info('Input file: ' + filename)
logger.info('Split arguments: ' + str(splitOptions))
fao.extract_files(filename, tempDir)
data = fao.get_workout_data(tempDir)
actv_df = rgNorm.normalize_activity(data)
# if customSplit:
# cust_splits_df = custSplits(actv_df, tempDir)
# fao.save_df(cust_splits_df, outDir,'custom_split', frmt=['csv','pickle'])
'''
Group activities by different splits
'''
splitDict = {}
for split in splitOptions:
if split == 'custom':
splitDict['custom'] = custSplits(actv_df, tempDir)
else:
splitDict[split] = rgNorm.group_actv(actv_df, split)
'''
# Export data frames to files for review
'''
for split in splitOptions:
fao.save_df(splitDict[split], outDir, split + '_split', frmt=['csv','pickle'])
# Always save the activity dataframe
fao.save_df(actv_df, outDir,'activity', frmt=['csv','pickle'])
fao.clean_dir(tempDir)
if 'segment' in splitOptions:
summarizeWrkoutSegments(splitDict['segment'])
logger.info('WorkoutAnalyze End')
if __name__ == '__main__':
main(sys.argv[1:])
|
<reponame>ccaspers/uncommitted
"""The 'uncommitted' command-line tool itself."""
import os
import sys
from argparse import ArgumentParser
from . import git
from .finder import find_repos
USAGE = """usage: %%prog [options] path [path...]
Checks the status of all git, Subversion, and Mercurial repositories
beneath the paths given on the command line. Any repositories with
uncommitted or unpushed changes are printed to standard out, along
with the status of the files inside."""
SYSTEMS = {b".git": (b"Git", git.status)}
DOTDIRS = set(SYSTEMS)
class ErrorCannotLocate(Exception):
"""Signal that we cannot successfully run the locate(1) binary."""
linesep = os.linesep.encode("ascii")
def output(thing):
"""Replacement for print() that outputs bytes."""
os.write(1, thing + linesep)
def scan(repos, options):
"""Given a repository list [(path, vcsname), ...], scan each of them."""
ignore_set = set()
repos = repos[::-1] # Create a queue we can push and pop from
while repos:
directory, dotdir = repos.pop()
ignore_this = any(pat in directory for pat in options.ignore_patterns)
if ignore_this:
if options.verbose:
output(b"Ignoring repo: %s" % directory)
output(b"")
continue
vcsname, get_status = SYSTEMS[dotdir]
lines, subrepos = get_status(directory, ignore_set, options)
# We want to tackle subrepos immediately after their repository,
# so we put them at the front of the queue.
subrepos = [(os.path.join(directory, r), dotdir) for r in subrepos]
repos.extend(reversed(subrepos))
if lines is None: # signal that we should ignore this one
continue
if lines or options.verbose:
output(b"%s - %s" % (directory, vcsname))
for line in lines:
output(line)
output(b"")
def main():
parser = ArgumentParser(usage=USAGE)
parser.add_argument(
"directory",
type=str,
default=".",
nargs="?",
help="print every repository whether changed or not",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="print every repository whether changed or not",
)
parser.add_argument(
"-n",
"--non-tracking",
action="store_true",
help="print non-tracking branches (git only)",
)
parser.add_argument(
"-u",
"--untracked",
action="store_true",
help="print untracked files (git only)",
)
parser.add_argument(
"-s", "--stash", action="store_true", help="print stash (git only)"
)
parser.add_argument(
"-i",
dest="ignore_patterns",
default=[],
nargs="*",
help="ignore any directory paths that contain the specified string",
)
parser.add_argument(
"--ignore-svn-states",
help="ignore SVN states given as a string of status codes (SVN only)",
)
args = parser.parse_args()
if not args:
parser.print_help()
exit(2)
if sys.version_info[0] >= 3:
# Turn string arguments back into their original bytes.
fix = os.fsencode
args.directory = fix(args.directory)
args.ignore_patterns = [fix(s) for s in args.ignore_patterns]
repos = set()
path = os.path.abspath(args.directory)
if not os.path.isdir(path):
sys.stderr.write("Error: not a directory: %s\n" % (path,))
sys.exit(3)
paths = find_repos(path, patterns=DOTDIRS)
repos.update(paths)
repos = sorted(repos)
scan(repos, args)
|
<reponame>fabian-paul/wepy
import multiprocessing as mulproc
import random as rand
import itertools as it
import numpy as np
from wepy.resampling.resamplers.resampler import Resampler
from wepy.resampling.decisions.clone_merge import MultiCloneMergeDecision
class REVOResampler(Resampler):
DECISION = MultiCloneMergeDecision
# state change data for the resampler
RESAMPLER_FIELDS = ('n_walkers', 'distance_matrix', 'spread', 'image_shape', 'images')
RESAMPLER_SHAPES = ((1,), Ellipsis, (1,), Ellipsis, Ellipsis)
RESAMPLER_DTYPES = (np.int, np.float, np.float, np.int, None)
# fields that can be used for a table like representation
RESAMPLER_RECORD_FIELDS = ('spread',)
# fields for resampling data
RESAMPLING_FIELDS = DECISION.FIELDS + ('step_idx', 'walker_idx',)
RESAMPLING_SHAPES = DECISION.SHAPES + ((1,), (1,),)
RESAMPLING_DTYPES = DECISION.DTYPES + (np.int, np.int,)
# fields that can be used for a table like representation
RESAMPLING_RECORD_FIELDS = DECISION.RECORD_FIELDS + ('step_idx', 'walker_idx',)
def __init__(self, seed=None, pmin=1e-12, pmax=0.1, dpower=4, merge_dist=2.5,
distance=None, init_state=None, weights=True):
self.decision = self.DECISION
# the minimum probability for a walker
self.pmin=pmin
# ln(probability_min)
self.lpmin = np.log(pmin/100)
# maximum probability for a walker
self.pmax=pmax
#
self.dpower = dpower
#
self.merge_dist = merge_dist
# the distance metric
assert distance is not None, "Must give a distance metric class"
self.distance = distance
# setting the random seed
self.seed = seed
if seed is not None:
rand.seed(seed)
# setting the weights parameter
self.weights = weights
# we do not know the shape and dtype of the images until
# runtime so we determine them here
assert init_state is not None, "must give an initial state to infer data about the image"
image = self.distance.image(init_state)
self.image_dtype = image.dtype
# we need this to on the fly find out what the datatype of the
# image is
def resampler_field_dtypes(self):
# index of the image idx
image_idx = self.resampler_field_names().index('images')
# dtypes adding the image dtype
dtypes = list(super().resampler_field_dtypes())
dtypes[image_idx] = self.image_dtype
return tuple(dtypes)
def _calcspread(self, walkerwt, amp, distance_matrix):
n_walkers = len(walkerwt)
# the value to be optimized
spread = 0
#
wsum = np.zeros(n_walkers)
# weight factors for the walkers
wtfac = np.zeros(n_walkers)
# set the weight factors
for i in range(n_walkers):
if walkerwt[i] > 0 and amp[i] > 0:
if self.weights:
wtfac[i] = np.log(walkerwt[i]/amp[i]) - self.lpmin
else:
wtfac[i] = 1
else:
wtfac[i] = 0
if wtfac[i] < 0:
wtfac[i] = 0
#
for i in range(n_walkers - 1):
if amp[i] > 0:
for j in range(i+1, n_walkers):
if amp[j] > 0:
d = ((distance_matrix[i][j])**self.dpower) * wtfac[i] * wtfac[j]
spread += d * amp[i] * amp[j]
wsum[i] += d * amp[j]
wsum[j] += d * amp[i]
# another implementation for personal clarity
# for i, j in it.combinations(range(len(n_walkers)), 2):
# if amp[i] > 0 and amp[j] > 0:
# d = ((distance_matrix[i][j])**self.dpower) * wtfac[i] * wtfac[j]
# spread += d * amp[i] * amp[j]
# wsum[i] = += d * amp[j]
# wsum[j] += d * amp[i]
return spread, wsum
def decide_clone_merge(self, walkerwt, amp, distance_matrix, debug_prints=False):
n_walkers = len(walkerwt)
spreads = []
merge_groups = [[] for i in range(n_walkers)]
walker_clone_nums = [0 for i in range(n_walkers)]
new_wt = walkerwt.copy()
new_amp = amp.copy()
# initialize the actions to nothing, will be overwritten
# calculate the initial spread which will be optimized
spread, wsum = self._calcspread(walkerwt, new_amp, distance_matrix)
spreads.append(spread)
# maximize the variance through cloning and merging
if debug_prints:
print("Starting variance optimization:", spread)
productive = True
while productive:
productive = False
# find min and max wsums, alter new_amp
# initialize to None, we may not find one of each
minwind = None
maxwind = None
# selects a walker with minimum wsum and a walker with
# maximum wsum walker (distance to other walkers) will be
# tagged for cloning (stored in maxwind), except if it is
# already a keep merge target
max_tups = []
for i, value in enumerate(wsum):
# 1. must have an amp >=1 which gives the number of clones to be made of it
# 2. clones for the given amplitude must not be smaller than the minimum probability
# 3. must not already be a keep merge target
if (new_amp[i] >= 1) and \
(new_wt[i]/(new_amp[i] + 1) > self.pmin) and \
(len(merge_groups[i]) == 0):
max_tups.append((value, i))
if len(max_tups) > 0:
maxvalue, maxwind = max(max_tups)
# walker with the lowest wsum (distance to other walkers)
# will be tagged for merging (stored in minwind)
min_tups = [(value, i) for i,value in enumerate(wsum)
if new_amp[i] == 1 and (new_wt[i] < self.pmax)]
if len(min_tups) > 0:
minvalue, minwind = min(min_tups)
# does minwind have an eligible merging partner?
# closedist = self.merge_dist
closewalk = None
condition_list = np.array([i is not None for i in [minwind, maxwind]])
if condition_list.all() and minwind != maxwind:
# get the walkers that aren't the minimum and the max
# wsum walkers, as candidates for merging
closewalks = set(range(n_walkers)).difference([minwind, maxwind])
# remove those walkers that if they were merged with
# the min wsum walker would violate the pmax
closewalks = [idx for idx in closewalks
if (new_amp[idx]==1) and
(new_wt[idx] + new_wt[minwind] < self.pmax)
]
# if there are any walkers left, get the distances of
# the close walkers to the min wsum walker if that
# distance is less than the maximum merge distance
if len(closewalks) > 0:
closewalks_dists = [(distance_matrix[minwind][i], i) for i in closewalks
if distance_matrix[minwind][i] < (self.merge_dist)]
# if any were found set this as the closewalk
if len(closewalks_dists) > 0:
closedist, closewalk = min(closewalks_dists)
# did we find a closewalk?
condition_list = np.array([i is not None for i in [minwind, maxwind, closewalk]])
if condition_list.all() :
# change new_amp
tempsum = new_wt[minwind] + new_wt[closewalk]
new_amp[minwind] = new_wt[minwind]/tempsum
new_amp[closewalk] = new_wt[closewalk]/tempsum
new_amp[maxwind] += 1
# re-determine spread function, and wsum values
newspread, wsum = self._calcspread(new_wt, new_amp, distance_matrix)
if newspread > spread:
spreads.append(newspread)
if debug_prints:
print("Variance move to", newspread, "accepted")
productive = True
spread = newspread
# make a decision on which walker to keep
# (minwind, or closewalk), equivalent to:
# `random.choices([closewalk, minwind],
# weights=[new_wt[closewalk], new_wt[minwind])`
r = rand.uniform(0.0, new_wt[closewalk] + new_wt[minwind])
# keeps closewalk and gets rid of minwind
if r < new_wt[closewalk]:
keep_idx = closewalk
squash_idx = minwind
# keep minwind, get rid of closewalk
else:
keep_idx = minwind
squash_idx = closewalk
# if keep_idx == maxwind:
# import ipdb; ipdb.set_trace()
# if len(merge_groups[maxwind]) > 0:
# import ipdb; ipdb.set_trace()
# print("Attempting to clone a walker which is a keep idx of a merge group")
# if walker_clone_nums[keep_idx] > 0:
# import ipdb; ipdb.set_trace()
# print("Attempting to merge a walker which is to be cloned")
# update weight
new_wt[keep_idx] += new_wt[squash_idx]
new_wt[squash_idx] = 0.0
# update new_amps
new_amp[squash_idx] = 0
new_amp[keep_idx] = 1
# add the squash index to the merge group
merge_groups[keep_idx].append(squash_idx)
# add the indices of the walkers that were already
# in the merge group that was just squashed
merge_groups[keep_idx].extend(merge_groups[squash_idx])
# reset the merge group that was just squashed to empty
merge_groups[squash_idx] = []
# increase the number of clones that the cloned
# walker has
walker_clone_nums[maxwind] += 1
# new spread for starting new stage
newspread, wsum = self._calcspread(new_wt, new_amp, distance_matrix)
spreads.append(newspread)
if debug_prints:
print("variance after selection:", newspread)
# if not productive
else:
new_amp[minwind] = 1
new_amp[closewalk] = 1
new_amp[maxwind] -= 1
# given we know what we want to clone to specific slots
# (squashing other walkers) we need to determine where these
# squashed walkers will be merged
walker_actions = self.assign_clones(merge_groups, walker_clone_nums)
# because there is only one step in resampling here we just
# add another field for the step as 0 and add the walker index
# to its record as well
for walker_idx, walker_record in enumerate(walker_actions):
walker_record['step_idx'] = np.array([0])
walker_record['walker_idx'] = np.array([walker_idx])
return walker_actions, spreads[-1]
def _all_to_all_distance(self, walkers):
# initialize an all-to-all matrix, with 0.0 for self distances
dist_mat = np.zeros((len(walkers), len(walkers)))
# make images for all the walker states for us to compute distances on
images = []
for walker in walkers:
image = self.distance.image(walker.state)
images.append(image)
# get the combinations of indices for all walker pairs
for i, j in it.combinations(range(len(images)), 2):
# calculate the distance between the two walkers
dist = self.distance.image_distance(images[i], images[j])
# save this in the matrix in both spots
dist_mat[i][j] = dist
dist_mat[j][i] = dist
return [walker_dists for walker_dists in dist_mat], images
def resample(self, walkers, debug_prints=False):
n_walkers = len(walkers)
walkerwt = [walker.weight for walker in walkers]
amp = [1 for i in range(n_walkers)]
# calculate distance matrix
distance_matrix, images = self._all_to_all_distance(walkers)
if debug_prints:
print("distance_matrix")
print(np.array(distance_matrix))
# determine cloning and merging actions to be performed, by
# maximizing the spread, i.e. the Decider
resampling_data, spread = self.decide_clone_merge(walkerwt, amp, distance_matrix,
debug_prints=debug_prints)
# convert the target idxs and decision_id to feature vector arrays
for record in resampling_data:
record['target_idxs'] = np.array(record['target_idxs'])
record['decision_id'] = np.array([record['decision_id']])
# actually do the cloning and merging of the walkers
resampled_walkers = self.decision.action(walkers, [resampling_data])
# flatten the distance matrix and give the number of walkers
# as well for the resampler data, there is just one per cycle
resampler_data = [{'distance_matrix' : np.ravel(np.array(distance_matrix)),
'n_walkers' : np.array([len(walkers)]),
'spread' : np.array([spread]),
'images' : np.ravel(np.array(images)),
'image_shape' : np.array(images[0].shape)}]
return resampled_walkers, resampling_data, resampler_data
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apache2_mod_proxy
author: <NAME> (@oboukili)
version_added: "2.2"
short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool
description:
- Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer
pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member
status page has to be enabled and accessible, as this module relies on parsing
this page. This module supports ansible check_mode, and requires BeautifulSoup
python module.
options:
balancer_url_suffix:
description:
- Suffix of the balancer pool url required to access the balancer pool
status page (e.g. balancer_vhost[:port]/balancer_url_suffix).
default: /balancer-manager/
balancer_vhost:
description:
- (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool.
required: true
member_host:
description:
- (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to.
Port number is autodetected and should not be specified here.
If undefined, apache2_mod_proxy module will return a members list of
dictionaries of all the current balancer pool members' attributes.
state:
description:
- Desired state of the member host.
(absent|disabled),drained,hot_standby,ignore_errors can be
simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors).
choices: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]
tls:
description:
- Use https to access balancer management page.
type: bool
default: 'no'
validate_certs:
description:
- Validate ssl/tls certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Get all current balancer pool members' attributes:
- apache2_mod_proxy:
balancer_vhost: 10.0.0.2
# Get a specific member's attributes:
- apache2_mod_proxy:
balancer_vhost: myws.mydomain.org
balancer_suffix: /lb/
member_host: node1.myws.mydomain.org
# Enable all balancer pool members:
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
register: result
- apache2_mod_proxy:
balancer_vhost: '{{ myloadbalancer_host }}'
member_host: '{{ item.host }}'
state: present
with_items: '{{ result.members }}'
# Gracefully disable a member from a loadbalancer node:
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: drained
delegate_to: myloadbalancernode
- wait_for:
host: '{{ member.host }}'
port: '{{ member.port }}'
state: drained
delegate_to: myloadbalancernode
- apache2_mod_proxy:
balancer_vhost: '{{ vhost_host }}'
member_host: '{{ member.host }}'
state: absent
delegate_to: myloadbalancernode
'''
RETURN = '''
member:
description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter.
type: dict
returned: success
sample:
{"attributes":
{"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
}
members:
description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args.
returned: success
type: list
sample:
[{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.20",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false
}
},
{"attributes": {
"Busy": "0",
"Elected": "42",
"Factor": "1",
"From": "136K",
"Load": "0",
"Route": null,
"RouteRedir": null,
"Set": "0",
"Status": "Init Ok ",
"To": " 47K",
"Worker URL": null
},
"balancer_url": "http://10.10.0.2/balancer-manager/",
"host": "10.10.0.21",
"management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.21:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b",
"path": "/ws",
"port": 8080,
"protocol": "http",
"status": {
"disabled": false,
"drained": false,
"hot_standby": false,
"ignore_errors": false}
}
]
'''
import re
import traceback
BEAUTIFUL_SOUP_IMP_ERR = None
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc()
HAS_BEAUTIFULSOUP = False
else:
HAS_BEAUTIFULSOUP = True
# balancer member attributes extraction regexp:
EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)"
# Apache2 server version extraction regexp:
APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)"
def regexp_extraction(string, _regexp, groups=1):
""" Returns the capture group (default=1) specified in the regexp, applied to the string """
regexp_search = re.search(string=str(string), pattern=str(_regexp))
if regexp_search:
if regexp_search.group(groups) != '':
return str(regexp_search.group(groups))
return None
class BalancerMember(object):
""" Apache 2.4 mod_proxy LB balancer member.
attributes:
read-only:
host -> member host (string),
management_url -> member management url (string),
protocol -> member protocol (string)
port -> member port (string),
path -> member location (string),
balancer_url -> url of this member's parent balancer (string),
attributes -> whole member attributes (dictionary)
module -> ansible module instance (AnsibleModule object).
writable:
status -> status of the member (dictionary)
"""
def __init__(self, management_url, balancer_url, module):
self.host = regexp_extraction(management_url, str(EXPRESSION), 4)
self.management_url = str(management_url)
self.protocol = regexp_extraction(management_url, EXPRESSION, 3)
self.port = regexp_extraction(management_url, EXPRESSION, 5)
self.path = regexp_extraction(management_url, EXPRESSION, 6)
self.balancer_url = str(balancer_url)
self.module = module
def get_member_attributes(self):
""" Returns a dictionary of a balancer member's attributes."""
balancer_member_page = fetch_url(self.module, self.management_url)
if balancer_member_page[1]['status'] != 200:
self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1])
else:
try:
soup = BeautifulSoup(balancer_member_page[0])
except TypeError:
self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(soup))
else:
subsoup = soup.findAll('table')[1].findAll('tr')
keys = subsoup[0].findAll('th')
for valuesset in subsoup[1::1]:
if re.search(pattern=self.host, string=str(valuesset)):
values = valuesset.findAll('td')
return dict((keys[x].string, values[x].string) for x in range(0, len(keys)))
def get_member_status(self):
""" Returns a dictionary of a balancer member's status attributes."""
status_mapping = {'disabled': 'Dis',
'drained': 'Drn',
'hot_standby': 'Stby',
'ignore_errors': 'Ign'}
status = {}
actual_status = str(self.attributes['Status'])
for mode in status_mapping.keys():
if re.search(pattern=status_mapping[mode], string=actual_status):
status[mode] = True
else:
status[mode] = False
return status
def set_member_status(self, values):
""" Sets a balancer member's status attributes amongst pre-mapped values."""
values_mapping = {'disabled': '&w_status_D',
'drained': '&w_status_N',
'hot_standby': '&w_status_H',
'ignore_errors': '&w_status_I'}
request_body = regexp_extraction(self.management_url, EXPRESSION, 1)
for k in values_mapping.keys():
if values[str(k)]:
request_body = request_body + str(values_mapping[k]) + '=1'
else:
request_body = request_body + str(values_mapping[k]) + '=0'
response = fetch_url(self.module, self.management_url, data=str(request_body))
if response[1]['status'] != 200:
self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status'])
attributes = property(get_member_attributes)
status = property(get_member_status, set_member_status)
class Balancer(object):
""" Apache httpd 2.4 mod_proxy balancer object"""
def __init__(self, host, suffix, module, members=None, tls=False):
if tls:
self.base_url = str(str('https://') + str(host))
self.url = str(str('https://') + str(host) + str(suffix))
else:
self.base_url = str(str('http://') + str(host))
self.url = str(str('http://') + str(host) + str(suffix))
self.module = module
self.page = self.fetch_balancer_page()
if members is None:
self._members = []
def fetch_balancer_page(self):
""" Returns the balancer management html page as a string for later parsing."""
page = fetch_url(self.module, str(self.url))
if page[1]['status'] != 200:
self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status']))
else:
content = page[0].read()
apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1)
if apache_version:
if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version):
self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version))
return content
else:
self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager")
def get_balancer_members(self):
""" Returns members of the balancer as a generator object for later iteration."""
try:
soup = BeautifulSoup(self.page)
except TypeError:
self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page))
else:
for element in soup.findAll('a')[1::1]:
balancer_member_suffix = str(element.get('href'))
if not balancer_member_suffix:
self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!")
else:
yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module)
members = property(get_balancer_members)
def main():
""" Initiates module."""
module = AnsibleModule(
argument_spec=dict(
balancer_vhost=dict(required=True, default=None, type='str'),
balancer_url_suffix=dict(default="/balancer-manager/", type='str'),
member_host=dict(type='str'),
state=dict(type='str'),
tls=dict(default=False, type='bool'),
validate_certs=dict(default=True, type='bool')
),
supports_check_mode=True
)
if HAS_BEAUTIFULSOUP is False:
module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR)
if module.params['state'] is not None:
states = module.params['state'].split(',')
if (len(states) > 1) and (("present" in states) or ("enabled" in states)):
module.fail_json(msg="state present/enabled is mutually exclusive with other states!")
else:
for _state in states:
if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
module.fail_json(
msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
)
else:
states = ['None']
mybalancer = Balancer(module.params['balancer_vhost'],
module.params['balancer_url_suffix'],
module=module,
tls=module.params['tls'])
if module.params['member_host'] is None:
json_output_list = []
for member in mybalancer.members:
json_output_list.append({
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
})
module.exit_json(
changed=False,
members=json_output_list
)
else:
changed = False
member_exists = False
member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False}
for mode in member_status.keys():
for state in states:
if mode == state:
member_status[mode] = True
elif mode == 'disabled' and state == 'absent':
member_status[mode] = True
for member in mybalancer.members:
if str(member.host) == str(module.params['member_host']):
member_exists = True
if module.params['state'] is not None:
member_status_before = member.status
if not module.check_mode:
member_status_after = member.status = member_status
else:
member_status_after = member_status
if member_status_before != member_status_after:
changed = True
json_output = {
"host": member.host,
"status": member.status,
"protocol": member.protocol,
"port": member.port,
"path": member.path,
"attributes": member.attributes,
"management_url": member.management_url,
"balancer_url": member.balancer_url
}
if member_exists:
module.exit_json(
changed=changed,
member=json_output
)
else:
module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!')
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.urls import fetch_url
if __name__ == '__main__':
main()
|
<filename>xception_hxt.py
from keras.applications.xception import Xception
from keras.layers import *
from keras.models import *
import tensorflow as tf
def Conv_block(x, filters, size, strides=(1, 1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
x = BatchNormalization()(x)
if activation == True:
x = LeakyReLU(alpha=0.1)(x)
return x
def R_block(blockInput, num_filters=16):
x = LeakyReLU(alpha=0.1)(blockInput)
x = BatchNormalization()(x)
blockInput = BatchNormalization()(blockInput)
x = Conv_block(x, num_filters, (3, 3))
x = Conv_block(x, num_filters, (3, 3), activation=False)
x = Add()([x, blockInput])
return x
def xception(input_shape=(None, None, 3)):
# Entry flow
backbone = Xception(input_shape=input_shape, weights='imagenet', include_top=False)
input = backbone.input
start_neurons = 16
conv4 = backbone.layers[121].output
conv4 = LeakyReLU(alpha=0.1)(conv4)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(0.1)(pool4)
# Middle flow
convm = Conv2D(start_neurons * 32, (3, 3), activation=None, padding="same")(pool4)
convm = R_block(convm, start_neurons * 32)
convm = R_block(convm, start_neurons * 32)
convm = LeakyReLU(alpha=0.1)(convm)
# Exit flow
deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3), strides=(2, 2), padding="same")(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(0.1)(uconv4)
uconv4 = Conv2D(start_neurons * 16, (3, 3), activation=None, padding="same")(uconv4)
uconv4 = R_block(uconv4, start_neurons * 16)
uconv4 = R_block(uconv4, start_neurons * 16)
uconv4 = LeakyReLU(alpha=0.1)(uconv4)
deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3), strides=(2, 2), padding="same")(uconv4)
conv3 = backbone.layers[31].output
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(0.1)(uconv3)
uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None, padding="same")(uconv3)
uconv3 = R_block(uconv3, start_neurons * 8)
uconv3 = R_block(uconv3, start_neurons * 8)
uconv3 = LeakyReLU(alpha=0.1)(uconv3)
deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3), strides=(2, 2), padding="same")(uconv3)
conv2 = backbone.layers[21].output
conv2 = ZeroPadding2D(((1, 0), (1, 0)))(conv2)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(0.1)(uconv2)
uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None, padding="same")(uconv2)
uconv2 = R_block(uconv2, start_neurons * 4)
uconv2 = R_block(uconv2, start_neurons * 4)
uconv2 = LeakyReLU(alpha=0.1)(uconv2)
deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3), strides=(2, 2), padding="same")(uconv2)
conv1 = backbone.layers[11].output
conv1 = ZeroPadding2D(((3, 0), (3, 0)))(conv1)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(0.1)(uconv1)
uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None, padding="same")(uconv1)
uconv1 = R_block(uconv1, start_neurons * 2)
uconv1 = R_block(uconv1, start_neurons * 2)
uconv1 = LeakyReLU(alpha=0.1)(uconv1)
uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3), strides=(2, 2), padding="same")(uconv1)
uconv0 = Dropout(0.1)(uconv0)
uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None, padding="same")(uconv0)
uconv0 = R_block(uconv0, start_neurons * 1)
uconv0 = R_block(uconv0, start_neurons * 1)
uconv0 = LeakyReLU(alpha=0.1)(uconv0)
uconv0 = Dropout(0.1 / 2)(uconv0)
output_layer = Conv2D(1, (1, 1), padding="same", activation="sigmoid")(uconv0)
model = Model(input, output_layer)
# model.name = 'u-xception'
return model
if __name__ == '__main__':
xception((256, 256, 3)) |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import unittest
from hl7apy.core import Message, Group, Segment, Field, Component, SubComponent
from hl7apy.parser import parse_segment, parse_message
from hl7apy.base_datatypes import *
from hl7apy.exceptions import InvalidHighlightRange
from hl7apy.consts import MLLP_ENCODING_CHARS, VALIDATION_LEVEL, DEFAULT_ENCODING_CHARS
class ToStringTestCase(unittest.TestCase):
"""
TestCase class which tests the functionalities of the API converting the
HL7 elements to string
"""
def setUp(self):
self.custom_encoding_chars = {'SEGMENT': '\r',
'FIELD': '!',
'COMPONENT': '$',
'SUBCOMPONENT': '@',
'REPETITION': 'r',
'ESCAPE': '?'}
self.msh_values_standard = ['|', '^~\&', 'LIP', 'LIP', 'LB', 'LB', '20111207121030', '', 'RSP^SLI^RSP_K11',
'430102', 'D', '2.5', '', '', '', '', 'IT', '', 'EN', '', '']
self.msh_standard = 'MSH|^~\\&|LIP|LIP|LB|LB|20111207121030||RSP^SLI^RSP_K11|430102|D|2.5|||||IT||EN'
self.msh_values_custom = ['!', '$r?@', 'LIP', 'LIP', 'LB', 'LB', '20111207121030', '', 'RSP^SLI^RSP_K11',
'430102', 'D', '2.5', '', '', '', '', 'IT', '', 'EN', '', '']
self.msh_custom = 'MSH!$r?@!LIP!LIP!LB!LB!20111207121030!!RSP^SLI^RSP_K11!430102!D!2.5!!!!!IT!!EN'
self.msh_highlighted = \
'MSH|^~\\&|LIP|LIP|LB|LB|20111207121030|\\H\\HIGHLIGHTED\\N\\TEXT\\H\\IMPORTANT\\N\\|RSP^SLI^RSP_K11|430102|D|2.5|||||IT||EN'
@staticmethod
def _get_msh(values):
encoding_chars = {'SEGMENT': '\r',
'FIELD': values[0],
'COMPONENT': values[1][0],
'SUBCOMPONENT': values[1][1],
'REPETITION': values[1][2],
'ESCAPE': values[1][3]}
msh = parse_segment('MSH{0}{1}'.format(encoding_chars['FIELD'], encoding_chars['FIELD'].join(values[1:])),
encoding_chars=encoding_chars)
return msh
@staticmethod
def _get_test_msg(trailing_children=False):
if trailing_children is False:
return \
'MSH|^~\\&|SEND APP|SEND FAC|REC APP|REC FAC|20110708162817||OML^O33^OML_O33|978226056138290600|D|2.5|||||USA||EN\r' \
'PID|1||566-554-3423^^^GHH^MR||SURNAME^NAME^A|||M|||1111 SOMEWHERE STREET^^SOMEWHERE^^^USA||555-555-2004~444-333-222|||M\r' \
'PV1||O|||||||||||||||||1107080001^^^LIS\r' \
'SPM|1|100187400201||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC\r' \
'ORC|NW|83428|83428|18740|SC||||20110708162817\r' \
'TQ1|||||||||R\r' \
'OBR||83428|83428|TPO^ANTI THYROPEROXIDASE ANTIBODIES(TPO)^^TPO||||||||||||ND^UNKNOWN^UNKNOWN'
else:
return 'MSH|^~\\&|SEND APP|SEND FAC|REC APP|REC FAC|20110708162817||OML^O33^OML_O33|978226056138290600|D|2.5|||||USA||EN||\r' \
'PID|1||566-554-3423^^^GHH^MR||SURNAME^NAME^A|||M|||1111 SOMEWHERE STREET^^SOMEWHERE^^^USA||555-555-2004~444-333-222|||M|||||||||||||||||||||||\r' \
'PV1||O|||||||||||||||||1107080001^^^LIS|||||||||||||||||||||||||||||||||\r' \
'SPM|1|100187400201||SPECIMEN^Blood|||||||PSN^Human Patient||||||20110708162817||20110708162817|||||||1|CONTAINER^CONTAINER DESC||\r' \
'ORC|NW|83428|83428|18740|SC||||20110708162817|||||||||||||||||||||\r' \
'TQ1|||||||||R|||||\r' \
'OBR||83428|83428|TPO^ANTI THYROPEROXIDASE ANTIBODIES(TPO)^^TPO||||||||||||ND^UNKNOWN^UNKNOWN|||||||||||||||||||||||||||||||||'
def _create_test_message(self, msh_values):
"""
Create a test message - RSP_K11 - with only the msh segment.
The msh is filled with the sent in input
"""
msg = Message('RSP_K11')
msg.msh = self._get_msh(msh_values)
return msg
def test_msg_to_string_standard_encoding_chars(self):
"""
It tests the to_er7 message functionality using default encoding chars
"""
msg = self._create_test_message(self.msh_values_standard)
self.assertEqual(msg.to_er7(), self.msh_standard)
def test_msg_to_string_custom_encoding_chars(self):
"""
It tests the to_er7 message functionality using custom encoding chars
"""
msg = self._create_test_message(self.msh_values_custom)
msg.to_er7(self.custom_encoding_chars)
self.assertEqual(msg.to_er7(self.custom_encoding_chars), self.msh_custom)
def test_msg_to_string_empty(self):
"""
It tests the to_er7 message for an empty message
"""
msg = Message('RSP_K11')
self.assertRegexpMatches(msg.to_er7(), 'MSH|^~\\&|||||d+|||||2.5')
def test_highlights(self):
"""
It tests the highlighting functionaly
"""
msg = self._create_test_message(self.msh_values_standard)
value = ST('HIGHLIGHTEDTEXTIMPORTANT', highlights=((0, 11), (15, 24)))
s = SubComponent(datatype='ST', value=value)
c = Component(datatype='ST')
c.add(s)
msg.msh.msh_8.msh_8_1 = c
self.assertEqual(msg.to_er7(), self.msh_highlighted)
value = ST('HIGHLIGHTEDTEXTIMPORTANT', highlights=((15, 24), (0, 11)))
s = SubComponent(datatype='ST', value=value)
c = Component(datatype='ST')
c.add(s)
msg.msh.msh_8.msh_8_1 = c
self.assertEqual(msg.to_er7(), self.msh_highlighted)
def test_invalid_highlights(self):
"""
It tests that highlighting functionality raises the
:exc:`InvalidHighlightRange` exception in case of invalid range
"""
data = ST('HIGHLIGHTEDTEXTIMPORTANT', highlights=((0, 11), (4, 24)))
self.assertRaises(InvalidHighlightRange, data.to_er7)
data = ST('HIGHLIGHTEDTEXTIMPORTANT', highlights=((4, 24), (0, 11)))
self.assertRaises(InvalidHighlightRange, data.to_er7)
data = ST('HIGHLIGHTEDTEXTIMPORTANT', highlights=((5, 11), (0, 11)))
self.assertRaises(InvalidHighlightRange, data.to_er7)
data = ST('HIGHLIGHTEDTEXTIMPORTANT', highlights=((0, 11), (0, 4)))
self.assertRaises(InvalidHighlightRange, data.to_er7)
def test_to_string_msh_field(self):
m = Message('OML_O33')
msh = m.msh
self.assertEqual(msh.msh_1.to_er7(), '|')
self.assertEqual(msh.msh_2.to_er7(), '^~\\&')
msh_1 = Field('MSH_1')
msh_2 = Field('MSH_2')
self.assertRaises(IndexError, msh_1.to_er7)
self.assertRaises(IndexError, msh_2.to_er7)
def test_to_string_msh_field_v27(self):
for v in ('2.7', '2.8', '2.8.1', '2.8.2'):
m = Message('OML_O33', version=v)
msh = m.msh
self.assertEqual(msh.msh_1.to_er7(), '|')
self.assertEqual(msh.msh_2.to_er7(), '^~\\&#')
msh_1 = Field('MSH_1')
msh_2 = Field('MSH_2')
self.assertRaises(IndexError, msh_1.to_er7)
self.assertRaises(IndexError, msh_2.to_er7)
def test_to_string_msh_field_v27_no_truncation(self):
for v in ('2.7', '2.8', '2.8.1', '2.8.2'):
m = Message('OML_O33', encoding_chars=DEFAULT_ENCODING_CHARS, version=v)
msh = m.msh
self.assertEqual(msh.msh_1.to_er7(), '|')
self.assertEqual(msh.msh_2.to_er7(), '^~\\&')
msh_1 = Field('MSH_1')
msh_2 = Field('MSH_2')
self.assertRaises(IndexError, msh_1.to_er7)
self.assertRaises(IndexError, msh_2.to_er7)
def test_trailing_children(self):
test_msg = self._get_test_msg(trailing_children=False)
test_msg_with_trailing = self._get_test_msg(trailing_children=True)
msg = parse_message(test_msg)
self.assertEqual(msg.to_er7(trailing_children=True), test_msg_with_trailing)
self.assertEqual(msg.to_er7(trailing_children=False), test_msg)
def test_to_mllp(self):
test_msg = self._get_test_msg()
mllp_msg = '{0}{1}{2}{3}{2}'.format(MLLP_ENCODING_CHARS.SB, test_msg,
MLLP_ENCODING_CHARS.CR, MLLP_ENCODING_CHARS.EB)
msg = parse_message(test_msg)
self.assertEqual(msg.to_mllp(), mllp_msg)
def test_to_mllp_with_trailing(self):
test_msg = self._get_test_msg(trailing_children=True)
mllp_msg = '{0}{1}{2}{3}{2}'.format(MLLP_ENCODING_CHARS.SB, test_msg,
MLLP_ENCODING_CHARS.CR, MLLP_ENCODING_CHARS.EB)
msg = parse_message(test_msg)
self.assertEqual(msg.to_mllp(trailing_children=True), mllp_msg)
def test_to_string_segment_with_infinite_children(self):
qpd = Segment('QPD', validation_level=VALIDATION_LEVEL.STRICT)
qpd.qpd_3 = 'abc'
qpd.qpd_10 = 'cba'
self.assertEqual(qpd.to_er7(), 'QPD|||abc|||||||cba')
zin = Segment('ZIN', validation_level=VALIDATION_LEVEL.STRICT)
zin.zin_1 = 'yyy'
self.assertEqual(zin.to_er7(), 'ZIN|yyy')
zin.zin_10 = 'www'
self.assertEqual(zin.to_er7(), 'ZIN|yyy|||||||||www')
def test_to_string_segment_with_unknown_fields(self):
f1 = Field()
f1.value = 'abc'
f2 = Field()
f2.value = 'cba'
pid_er7 = 'PID|1||566-554-3423^^^GHH^MR||SURNAME^NAME^A|||M|||1111 SOMEWHERE^^SOMEWHERE^^^USA||555~444|||M|||||||||||||||||||||||'
pid = parse_segment(pid_er7)
pid.add(f1)
self.assertEqual(pid.to_er7(trailing_children=True), pid_er7 + '|abc')
pid.add(f2)
self.assertEqual(pid.to_er7(trailing_children=True), pid_er7 + '|abc|cba')
def test_to_string_z_segment_with_unknown_fields(self):
f1 = Field()
f1.value = 'abc'
f2 = Field()
f2.value = 'cba'
zin = Segment('ZIN')
zin.zin_1 = 'yyy'
zin.add(f1)
self.assertEqual(zin.to_er7(), 'ZIN|yyy|abc')
zin.zin_4 = 'zzz'
self.assertEqual(zin.to_er7(), 'ZIN|yyy|||zzz|abc')
zin.add(f2)
self.assertEqual(zin.to_er7(), 'ZIN|yyy|||zzz|abc|cba')
def test_to_string_message_with_z_segment(self):
msg = self._create_test_message(self.msh_values_standard)
msg.zin = 'ZIN||abc||cba^www~abc^yyy'
self.assertEqual(msg.to_er7(), self.msh_standard + '\rZIN||abc||cba^www~abc^yyy')
msg.zbe = 'ZBE|yyy|ww||||||yyy'
self.assertEqual(msg.to_er7(), self.msh_standard + '\rZIN||abc||cba^www~abc^yyy\rZBE|yyy|ww||||||yyy')
g = Group('OML_O33_PATIENT', validation_level=VALIDATION_LEVEL.TOLERANT)
g.pid = 'PID|1'
g.zbe = 'ZBE||ab|ab'
msg.add(g)
self.assertEqual(msg.to_er7(), self.msh_standard +
'\rZIN||abc||cba^www~abc^yyy\rZBE|yyy|ww||||||yyy\rPID|1\rZBE||ab|ab')
def test_to_string_wd_field(self):
"""
Tests that, in strict mode, a wd field is not present
"""
# The EV1 message is of type WD
s = 'EVN||20080115153000||AAA|AAA|20080114003000'
parsed_s = parse_segment(s, version='2.7')
self.assertEqual(parsed_s.to_er7(), 'EVN||20080115153000||AAA|AAA|20080114003000')
if __name__ == '__main__':
unittest.main()
|
<filename>convex_hull.py<gh_stars>0
"""
Goal - To caluclate convex hull area
"""
import os
import pathlib
from pprint import pprint
import numpy as np
from scipy import stats
from scipy.spatial import distance
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import trajectorytools as tt
import trajectorytools.plot as ttplot
import trajectorytools.socialcontext as ttsocial
from trajectorytools.constants import dir_of_data
import csv
import pickle
import argparse
from scipy.spatial import ConvexHull
import pandas as pd
#argparse
def boolean_string(s):
# this function helps with getting Boolean input
if s not in ['False', 'True']:
raise ValueError('Not a valid boolean string')
return s == 'True' # note use of ==
# create the parser object
parser = argparse.ArgumentParser()
# NOTE: argparse will throw an error if:
# - a flag is given with no value
# - the value does not match the type
# and if a flag is not given it will be filled with the default.
parser.add_argument('-a', '--a_string', default='hi', type=str)
parser.add_argument('-b1', '--integer_b1', default=29, type=int)
parser.add_argument('-b2', '--integer_b2', default=16, type=int)
parser.add_argument('-b3', '--integer_b3', default=3, type=int)
parser.add_argument('-f1', '--integer_f1', default=0, type=int)
parser.add_argument('-f2', '--integer_f2', default=10000, type=int)
parser.add_argument('-c', '--float_c', default=1.5, type=float)
parser.add_argument('-v', '--verbose', default=True, type=boolean_string)
# Note that you assign a short name and a long name to each argument.
# You can use either when you call the program, but you have to use the
# long name when getting the values back from "args".
# get the arguments
args = parser.parse_args()
parent_dir = '../../output/temp_collective/roi'
input_dir = parent_dir + '/' + str(args.integer_b1) + '/' + str(args.integer_b2) + '/'
input_file = input_dir + str(args.integer_b3) + '_nosmooth.p'
#sigma_values = 1.5 #smoothing parameter
if args.integer_b2 == 1:
trajectories_file_path = '../../data/temp_collective/roi/'+str(args.integer_b1)+'/' +str(args.integer_b2)+'/GS_'+str(args.integer_b2)+'_T_'+str(args.integer_b1)+'_roi_'+str(args.integer_b3)+'/trajectories.npy'
else:
trajectories_file_path = '../../data/temp_collective/roi/'+str(args.integer_b1)+'/' +str(args.integer_b2)+'/GS_'+str(args.integer_b2)+'_T_'+str(args.integer_b1)+'_roi_'+str(args.integer_b3)+'/trajectories_wo_gaps.npy'
try:
tr = tt.Trajectories.from_idtrackerai(trajectories_file_path,center=True).normalise_by('body_length')
tr.new_time_unit(tr.params['frame_rate'], 'seconds')
except FileNotFoundError:
print(args.integer_b1,args.integer_b2,args.integer_b3)
print('File not found')
pass
looms = []
met = pd.read_csv('../../data/temp_collective/roi/metadata_w_loom.csv')
for i in range(len(met.Temperature)):
if met.Temperature[i] == args.integer_b1 and met.Groupsize[i] == args.integer_b2 and met.Replicate[i] == args.integer_b3 :
looms.append(met['Loom 1'][i])
looms.append(met['Loom 2'][i])
looms.append(met['Loom 3'][i])
looms.append(met['Loom 4'][i])
looms.append(met['Loom 5'][i])
hull = []
for i in list(range(looms[0]+200, looms[0]+500))+ list(range(looms[1]+200, looms[1]+500)) :
hull.append(ConvexHull(tr.s[i]).area)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
frame_range1 = list(range(looms[0]+200, looms[0]+500))
frame_range2 = list(range(looms[1]+200, looms[1]+500))
ax.plot(np.asarray(frame_range1),hull[0:len(frame_range1)])
ax.plot(np.asarray(frame_range2),hull[len(frame_range1):len(hull)])
#ax.plot(np.asarray(frame_range),hull)
for j in range(5):
plt.axvline(looms[j], color = 'k')
ax.set_xlabel('Frame number')
ax.set_ylabel('Convex hull area')
plt.show()
|
<gh_stars>10-100
import sys
import os
#new_path = os.path.join(os.getcwd(), '..', 'SemMedDB')
#sys.path.insert(0, new_path)
from NormGoogleDistance import NormGoogleDistance
#from SemMedInterface import SemMedInterface
from QueryMyGene import QueryMyGene
import mygene
import requests
from QueryMyChem import QueryMyChem
import requests_cache
import pandas
#import _mysql_exceptions
class SynonymMapper():
def __init__(self):
#try:
# self.smi = SemMedInterface()
#except _mysql_exceptions.OperationalError:
# print('Warning: No connection was made to the SemMEdDB MySQL server.')
# self.smi = None
self.biothings_url = "http://c.biothings.io/v1/query?q="
self.mygene_obj = mygene.MyGeneInfo()
self.qmg = QueryMyGene()
def prot_to_gene(self, curie_id):
"""
This takes a uniprot curie id and converts it into a few different gene ids
"""
if len(curie_id.split(':'))>1:
uniprot_id = curie_id.split(':')[1]
else:
return None
entrez_ids = self.qmg.convert_uniprot_id_to_entrez_gene_ID(uniprot_id)
if entrez_ids is not None:
entrez_ids = set(entrez_ids)
else:
entrez_ids = set()
hgnc_ids = set()
mim_ids = set()
vega_ids = set()
ensembl_ids = set()
synonyms = []
symbols = self.qmg.convert_uniprot_id_to_gene_symbol(uniprot_id)
for symbol in symbols:
synonyms += ['HGNC.Symbol:' + symbol]
for gene_id in entrez_ids:
synonyms += ['NCBIGene:' + str(gene_id)]
try:
res = self.mygene_obj.getgene(int(gene_id), fields = 'HGNC,MIM,Vega,ensembl', verbose = False)
except requests.exceptions.HTTPError:
print('HTTP error for querying uniprot to gene symbol mygene: ' + uniprot_id, file=sys.stderr)
res = None
if res is not None:
hgnc_res = res.get('HGNC', None)
mim_res = res.get('MIM', None)
vega_res = res.get('Vega', None)
ensembl_res = res.get('ensembl', None)
else:
hgnc_res = None
mim_res = None
vega_res = None
ensembl_res = None
if hgnc_res is not None:
hgnc_ids |= set([hgnc_res])
if mim_res is not None:
mim_ids |= set([mim_res])
if vega_res is not None:
vega_ids |= set([vega_res])
if ensembl_res is not None:
if type(ensembl_res) == list:
for ens_res in ensembl_res:
ensembl_gene_res = ens_res.get('gene', None)
if ensembl_gene_res is not None:
ensembl_ids |= set([ensembl_gene_res])
else:
ensembl_gene_res = ensembl_res.get('gene', None)
if ensembl_gene_res is not None:
ensembl_ids |= set([ensembl_gene_res])
for hgnc_id in hgnc_ids:
synonyms += ['HGNC:' + str(hgnc_id)]
for mim_id in mim_ids:
synonyms += ['OMIM:' + str(mim_id)]
for vega_id in vega_ids:
synonyms += ['Vega:' + str(vega_id)]
for ensembl_id in ensembl_ids:
synonyms += ['ensembl:' + str(ensembl_id)]
if len(synonyms)>0:
return synonyms
else:
return None
def get_all_from_oxo(self, curie_id, map_to = None):
"""
this takes a curie id and gets all the mappings that oxo has for the given id
:param curie_id: The string for the curie id to submit to OXO (e.g. 'HP:0001947')
:param map_to: A string containing the prefix for the resulting ids. If set to None it will return all mappings. (default is none)
:return: A list of strings containing the found mapped ids or None if none where found
"""
if map_to is None:
map_to = ''
if type(curie_id) != str:
curie_id = str(curie_id)
if curie_id.startswith('REACT:'):
curie_id = curie_id.replace('REACT', 'Reactome')
prefix = curie_id.split(':')[0]
res = NormGoogleDistance.query_oxo(curie_id)
synonym_ids=None
if res is not None:
res = res.json()
synonym_ids = set()
n_res = res['page']['totalElements']
if int(n_res) > 0:
mappings = res['_embedded']['mappings']
for mapping in mappings:
if type(map_to) == list:
for elm in map_to:
if mapping['fromTerm']['curie'].startswith(prefix):
if mapping['toTerm']['curie'].startswith(elm):
synonym_ids |= set([mapping['toTerm']['curie']])
elif mapping['toTerm']['curie'].startswith(prefix):
if mapping['fromTerm']['curie'].startswith(elm):
synonym_ids |= set([mapping['fromTerm']['curie']])
else:
if mapping['fromTerm']['curie'].startswith(prefix):
if mapping['toTerm']['curie'].startswith(map_to):
synonym_ids |= set([mapping['toTerm']['curie']])
elif mapping['toTerm']['curie'].startswith(prefix):
if mapping['fromTerm']['curie'].startswith(map_to):
synonym_ids |= set([mapping['fromTerm']['curie']])
if len(synonym_ids) == 0:
synonym_ids = None
else:
synonym_ids = list(synonym_ids)
return synonym_ids
#def id_to_cui(self, curie_id):
# """
# this takes a currie id and finds a UMLS cui for it
# """
# assert self.smi is not None, "No connection was made to the MySQL SemMedDB server on rtxdev.saramsey.org if you want to try to connect again reinitialize the class."
# cuis = self.smi.get_cui_for_id(curie_id)
# return cuis
def chembl_to_chebi(self, chemical_substance_id):
"""
This takes a chembl curie id and return a chebi curie id
"""
if chemical_substance_id[:7] == "ChEMBL:":
chemical_substance_id = chemical_substance_id.replace("ChEMBL:", "CHEMBL")
if chemical_substance_id.startswith('CHEMBL:CHEMBL'):
chemical_substance_id = chemical_substance_id.replace("CHEMBL:", "")
handler = 'chem/' + chemical_substance_id + '?fields=chebi.chebi_id'
url = QueryMyChem.API_BASE_URL + '/' + handler
try:
res = requests.get(url, timeout=QueryMyChem.TIMEOUT_SEC)
except requests.exceptions.Timeout:
#print(url, file=sys.stderr)
#print('Timeout in QueryMyChem for URL: ' + url, file=sys.stderr)
return None
if res is None:
return None
status_code = res.status_code
if status_code != 200:
#print(url, file=sys.stderr)
#print('Status code ' + str(status_code) + ' for url: ' + url, file=sys.stderr)
return None
id_json = res.json()
if 'chebi' in id_json.keys():
return id_json['chebi']['chebi_id']
else:
return None
|
<reponame>amolk/AGI-experiments
# %%
%cd ~/work/free-energy-minimization-framework/7/
%load_ext autoreload
%autoreload 2
# %%
from f import F
import torch
from torch import nn
import pdb
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
%matplotlib inline
figsize=(15,5)
t_sample = 20
learning_rate = 0.001
# sin wave with noise present only certain ranges
pattern_length = 100
pattern = torch.tensor(np.sin(np.arange(pattern_length) * 0.30) - np.sin(np.arange(pattern_length) * 0.20) + np.random.sample(pattern_length) * 0.3).float().unsqueeze(1)
plt.figure(figsize=figsize)
plt.plot(pattern.numpy(), label='full pattern')
plt.legend()
mu_size = pattern[0].shape[0]
f = F(input_size=mu_size, hidden_size=50, output_size=mu_size)
losses = []
for epoch in range(50):
# print("epoch {}".format(epoch))
epoch_losses = []
for offset in range(0, pattern.shape[0] - t_sample):
loss = f.train_sample(pattern[offset:t_sample+offset], pattern[t_sample+offset])
epoch_losses.append(loss)
#print("loss = {}".format(loss))
#print("------------")
#pdb.set_trace()
losses.append(np.mean(epoch_losses))
plt.figure(figsize=figsize)
plt.plot(losses, label='loss for f()')
plt.legend()
# %%
# A few sequential predictions
offset = 10
prediction_count = 50
predictions = f.predict(pattern[offset:t_sample+offset], prediction_count)
plt.figure(figsize=figsize)
plt.plot(range(0, t_sample + prediction_count + 1), pattern[offset:t_sample + offset + prediction_count + 1].numpy(), label='seed')
plt.plot(range(t_sample + 1, t_sample + prediction_count + 1), predictions, label='sequential predictions')
plt.legend()
# %%
# Expected precision
# Let's try training on expected error
f_precision = F(input_size=mu_size, hidden_size=50, output_size=1)
losses = []
for epoch in range(20):
# print("epoch {}".format(epoch))
epoch_losses = []
for offset in range(0, pattern.shape[0] - t_sample):
(output, hidden) = f.run_sample(pattern[offset:t_sample+offset])
error = pattern[t_sample+offset] - output
error = torch.abs(error.squeeze(0))
#print("error = {}".format(error))
loss = f_precision.train_sample(pattern[offset:t_sample+offset], error)
epoch_losses.append(loss)
#print("loss = {}".format(loss))
#print("------------")
#pdb.set_trace()
losses.append(np.mean(epoch_losses))
plt.figure(figsize=figsize)
plt.plot(losses, label='loss for f_precision()')
plt.legend()
# %%
# actual vs predicted error
actual_errors = []
predicted_errors = []
for offset in range(0, pattern.shape[0] - t_sample):
(output, _) = f.run_sample(pattern[offset:t_sample+offset])
actual_error = np.abs(pattern[t_sample+offset][0] - output.item())
actual_errors.append(actual_error)
(predicted_error, _) = f_precision.run_sample(pattern[offset:t_sample+offset])
predicted_errors.append(predicted_error.item())
plt.figure(figsize=figsize)
plt.plot(actual_errors, label='actual error')
plt.plot(predicted_errors, label='predicted error')
plt.legend()
# %%
# %% |
#!/usr/bin/env python3
"""SvakSvat Member register GUI."""
import sys
import pickle
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from sqlalchemy.orm import scoped_session
from backend.orm import (Member, ContactInformation, get_field_max_length,
create_member, create_phux, backup_everything,
restore_everything)
from backend.listmodels import (GroupListModel, PostListModel,
DepartmentListModel, MembershipListModel,
MembershipDelegate,
configure_membership_qcombobox,
assign_membership_to_member)
from ui.mainwindow import Ui_MainWindow
from ui.memberedit import Ui_MemberEdit
from ui.newmember import Ui_NewMember
import passwordsafe
import useraccounts
import mailutil
import atexit
import svaksvat_rc
def init_gender_combobox(combobox, member=None):
"""Initializes a QComboBox to gender_fld.
Inserts the possible gender names in the database and assigns the
combobox's current value to the given member's gender_fld value.
combobox -- The QComboBox to be initialized.
member -- A backend.orm.Member whose gender_fld gets assigned to the
combobox
"""
combobox.addItem("Okänd")
combobox.addItem("Man")
combobox.addItem("Kvinna")
if member and member.gender_fld:
combobox.setCurrentIndex(member.gender_fld)
def fill_qlineedit_from_db(lineedit, fieldname, table):
"""Initialize QLineEdit from a ORM-mapped table.
lineedit -- QLineEdit or similar. setText and setMaxLength methods used.
fieldname -- Table's column name. Ie. givenNames_fld.
table -- ORM-mapping for database table Ie. Member or ContactInformation.
Fills the container with corresponding value from the database table field
of string type and sets the maximum length. Skip if fieldname not present
in container or table.
Use update_qtextfield_to_db to bring the changes back to database.
"""
try:
getattr(lineedit, fieldname).setText(getattr(table, fieldname))
getattr(lineedit, fieldname).setMaxLength(get_field_max_length(table,
fieldname))
except AttributeError:
return
def update_qtextfield_to_db(container, fieldname, table):
"""Write the text from a container to database table.
container -- needs to have text()-method
fieldname -- Table's column name. Ie. givenNames_fld.
table -- ORM-mapping for database table Ie. Member or ContactInformation.
"""
try:
setattr(table, fieldname, str(getattr(container, fieldname).text()))
except AttributeError:
return
class UsernameValidator(QValidator):
"""Validates LDAP-usernames in QTextFields.
Makes sure that no other Member has the same username_fld in the database.
"""
def __init__(self, session, parent):
"""Construct the validator with given session and parent.
session -- Sqlalchemy session.
parent -- Parent including "member" and ui.username_fld members.
The MemberEdit and NewMemberDialog classes are designed to be parent
parameters.
"""
super().__init__()
self.parent = parent
self.session = session
def fixup(self, input):
"""Strip whitespace + special characters and make lower case"""
return ''.join(c.lower() for c in input if c.isalnum())
def validate(self, input, pos):
"""Checks for username uniqueness."""
stripped = self.fixup(input)
not_unique = self.session.query(Member).filter(Member.username_fld ==
stripped).filter(Member.objectId !=
self.parent.member.objectId).count()
if not_unique:
self.parent.ui.username_fld.setStyleSheet("QLineEdit {\
background-color: rgb(255, 100, 100); }")
return (QValidator.Intermediate, stripped, pos)
else:
self.parent.ui.username_fld.setStyleSheet("QLineEdit {\
background-color: rgb(255, 255, 255); }")
return (QValidator.Acceptable, stripped, pos)
class NewMemberDialog(QDialog):
def __init__(self, session, parent=None):
"""Create the dialog and initialize the fields.
session -- Sqlalchemy session.
parent -- Parent for the QDialog.
"""
self.parent = parent
super().__init__(parent=self.parent)
self.ui = Ui_NewMember()
self.ui.setupUi(self)
self.session = session
self.setWindowTitle("Ny medlem")
self.usernamevalidator = UsernameValidator(self.session, self)
self.member = Member() # Needed in UsernameValidator
self.ui.username_fld.setValidator(self.usernamevalidator)
init_gender_combobox(self.ui.gender_fld)
configure_membership_qcombobox(self.ui.department_comboBox,
"Department", self.session)
# Set correct lengths for QTextEdits
for field in self.member.editable_text_fields:
fill_qlineedit_from_db(self.ui, field, self.member)
contactinfo = ContactInformation()
for field in contactinfo.publicfields:
fill_qlineedit_from_db(self.ui, field, contactinfo)
if self.member.birthDate_fld:
self.ui.birthDate_fld.setDateTime(self.member.birthDate_fld)
self.show()
def accept(self):
"""Commit the new member to the database."""
self.member = None
if self.ui.makePhux_CheckBox.isChecked():
self.member = create_phux(self.session)
else:
self.member = create_member(self.session)
for field in Member.editable_text_fields:
if (field == "username_fld" and not
self.ui.username_fld.hasAcceptableInput()):
continue
update_qtextfield_to_db(self.ui, field, self.member)
contactinfo = self.member.contactinfo
for field in contactinfo.publicfields:
update_qtextfield_to_db(self.ui, field, contactinfo)
department = self.ui.department_comboBox.currentText()
if department and not assign_membership_to_member(self.session, "Department",
department, self.member, parent=self,
combobox=self.ui.department_comboBox, indefinite_time=True):
return # Don't yet commit if Department not chosen.
self.member.gender_fld = self.ui.gender_fld.currentIndex()
self.member.birthDate_fld = self.ui.birthDate_fld.dateTime().toPyDateTime()
self.session.commit()
self.parent.populateMemberList(choosemember=self.member)
self.parent.setStatusMessage("Medlem %s skapad!" %
self.member.getWholeName())
super().accept()
def reject(self):
"""Close the dialog without saving any changes."""
super().reject()
class MemberEdit(QWidget):
"""Dialog to edit almost every aspect of a member."""
def __init__(self, session, member, parent=None, ldapmanager=None):
"""Create the dialog and fill in the values from a member.
session -- Sqlalchemy session.
member -- backend.orm.Member to be edited.
parent -- Parent for the QDialog
"""
self.parent = parent
super().__init__()
self.ui = Ui_MemberEdit()
self.ui.setupUi(self)
self.session = session
self.member = self.session.query(Member).filter_by(
objectId=member.objectId).one()
self.ldapmanager = ldapmanager
self.fillFields()
self.setWindowTitle(self.member.getWholeName())
self.usernamevalidator = UsernameValidator(self.session,
self)
self.ui.username_fld.setValidator(self.usernamevalidator)
def refreshUserAccounts(self):
ldapuserexists = "Nej"
billuserexists = "Nej"
ldapcolor = "red"
billcolor = "red"
self.ui.removeAccountButton.setEnabled(False)
self.ui.username_fld.setEnabled(True)
self.ui.billAccountCreditLabel.setText("Icke tillgänglig")
if self.ldapmanager.check_bill_account(self.member):
billuserexists = "Ja"
billcolor = "green"
self.ui.billAccountCreditLabel.setText(str(
self.ldapmanager.get_bill_balance(self.member)) + " €")
if self.ldapmanager.checkldapuser(self.member):
ldapuserexists = "Ja"
ldapcolor = "green"
self.ui.ldapGroupsLabel.setPlainText("\n".join(self.ldapmanager.getPosixGroups(self.member)))
self.ui.removeAccountButton.setEnabled(True)
self.ui.username_fld.setEnabled(False)
self.ui.ldapAccountStatusLabel.setText(ldapuserexists)
self.ui.billAccountStatusLabel.setText(billuserexists)
stylesheet = "QLabel {color:%s}" % ldapcolor
self.ui.ldapAccountStatusLabel.setStyleSheet(stylesheet)
stylesheet = "QLabel {color:%s}" % billcolor
self.ui.billAccountStatusLabel.setStyleSheet(stylesheet)
def createAccountOrChangePassword(self):
password, ok = QInputDialog.getText(self, "Ange lösenord", "Lösenord",
QLineEdit.Password)
if not ok:
return
password2, ok2 = QInputDialog.getText(self, "Lösenord igen", "Lösenord",
QLineEdit.Password)
if not ok2:
return
if password != password2:
QMessageBox.information(self, "Åtgärden misslyckades!",
"Lösenorden matchar inte.", QMessageBox.Ok)
return
send_password= (password + '.')[:-1]
if QMessageBox.question(self, "Hemligt lösenord?",
"Ett hemligt lösenord synns inte i mailet som skickas.",
QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes:
send_password = "*****"
if self.ldapmanager.checkldapuser(self.member):
# Change only password if account exists
self.ldapmanager.change_ldap_password(self.member.username_fld, password)
self.refreshUserAccounts()
mailutil.send_mail(self.ldapmanager.ps, self.ui.email_fld.text(),
'Ditt konto vid Teknologföreningen', 'Hejsan\n\nDitt lösenord vid Teknologföreningen har blivit bytt.\n\nDitt nya lösenord är: {:s}\n\nVid frågor eller ifall du inte begärt detta, kontakta <EMAIL>\n\nDetta är ett automatiskt meddelande, du behöver inte svara på det.'.format(send_password))
QMessageBox.information(self, "Lösenord bytt!",
"Lösenordet skickat till användarens e-post.", QMessageBox.Ok)
return
username = self.ui.username_fld.text()
email = self.ui.email_fld.text()
preferredname = self.ui.preferredName_fld.text()
surname = self.ui.surName_fld.text()
if (username and email and preferredname and surname):
if not self.member.ifOrdinarieMedlem():
if QMessageBox.question(self, "Skapa användarkonto?",
"Användaren är inte ordinarie medlem, skapa konto ändå?",
QMessageBox.Yes, QMessageBox.No) == QMessageBox.No:
return
self.member.username_fld = username
self.member.email_fld = email
self.member.preferredName_fld = preferredname
self.member.surName_fld = surname
self.session.commit()
self.ldapmanager.addldapuser(self.member, password)
self.refreshUserAccounts()
mailutil.send_mail(self.ldapmanager.ps, self.member.email_fld,
'Ditt konto vid Teknologföreningen', 'Du har skapat ett konto till Teknologföreningens IT-system.\nDitt användarnamn är {:s}\noch ditt lösenord är {:s}\n\nGå in på http://bill.teknologforeningen.fi/code för att ta reda på din BILL-kod som du kan använda till kopiering o.dyl.\n\nLäs igenom reglerna för användandet av TF:s IT-tjänster på denna sida (fyll dock inte i blanketten; reglerna berör dig ändå):\nhttps://www.teknologforeningen.fi/index.php?option=com_content&view=article&id=115&Itemid=177&lang=sv\n\nKom ihåg att användarkonto och BILL-kod är ett privilegium, inte en rätt. De kan tas bort vid missbruk.\n\n/Infochefen & TF-IC'.format(username,send_password))
QMessageBox.information(self, "Användare skapad!",
"Lösenordet skickat till användarens e-post.", QMessageBox.Ok)
return
QMessageBox.information(self, "Kunde inte skapa användarkonto",
"Felaktigt användarnamn, email, efternamn eller tilltalsnamn", QMessageBox.Ok)
def removeAccount(self):
if QMessageBox.question(self, "Ta bort användarkonto?",
"Är du säker att du vill radera användarkontot för användaren %s?"
% self.member.username_fld + " BILL krediter kommer att bevaras.",
QMessageBox.Yes, QMessageBox.No) == QMessageBox.Yes:
self.ldapmanager.delldapuser(self.member)
self.refreshUserAccounts()
def fillFields(self):
"""Fill every widget with the corresponding values of a Member."""
for field in Member.editable_text_fields:
fill_qlineedit_from_db(self.ui, field, self.member)
self.ui.notes_fld.setPlainText(self.member.notes_fld)
self.ui.dead_fld.setChecked(bool(self.member.dead_fld))
if self.member.birthDate_fld:
self.ui.birthDate_fld.setDateTime(self.member.birthDate_fld)
self.ui.subscribedToModulen_fld_checkbox.setChecked(
bool(self.member.subscribedtomodulen_fld))
self.ui.noPublishContactInfo_fld_checkbox.setChecked(
bool(self.member.noPublishContactInfo_fld))
init_gender_combobox(self.ui.gender_fld, self.member)
# Contact information
contactinfo = self.member.contactinfo
for field in contactinfo.publicfields:
fill_qlineedit_from_db(self.ui, field, contactinfo)
mshipdelegate = MembershipDelegate()
# Groups
grouplistmodel = GroupListModel(self.session, self.member, self,
self.ui.group_comboBox)
self.ui.groupView.setModel(grouplistmodel)
self.ui.groupView.setItemDelegate(mshipdelegate)
self.ui.removeGroupButton.clicked.connect(lambda:
self.removeSelectedMembership(self.ui.groupView))
grouplistmodel.rowsInserted.connect(lambda index, row:
self.ui.groupView.edit(grouplistmodel.index(row)))
# Posts
postlistmodel = PostListModel(self.session, self.member, self,
self.ui.post_comboBox)
self.ui.postView.setModel(postlistmodel)
self.ui.removePostButton.clicked.connect(lambda:
self.removeSelectedMembership(self.ui.postView))
self.ui.postView.setItemDelegate(mshipdelegate)
postlistmodel.rowsInserted.connect(lambda index, row:
self.ui.postView.edit(postlistmodel.index(row)))
# Departments
departmentlistmodel = DepartmentListModel(self.session, self.member, self,
self.ui.department_comboBox)
self.ui.departmentView.setModel(departmentlistmodel)
self.ui.removeDepartmentButton.clicked.connect(lambda:
self.removeSelectedMembership(self.ui.departmentView))
self.ui.departmentView.setItemDelegate(mshipdelegate)
# Memberships
membershiplistmodel = MembershipListModel(self.session, self.member,
self, self.ui.membership_comboBox)
self.ui.membershipView.setModel(membershiplistmodel)
self.ui.removeMembershipButton.clicked.connect(lambda:
self.removeSelectedMembership(self.ui.membershipView))
self.ui.membershipView.setItemDelegate(mshipdelegate)
self.ui.makePhuxButton.clicked.connect(lambda:
membershiplistmodel.insertMembership("Phux"))
self.ui.makeOrdinarieButton.clicked.connect(lambda:
membershiplistmodel.insertMembership("Ordinarie medlem"))
self.ui.makeActiveAlumnButton.clicked.connect(lambda:
membershiplistmodel.insertMembership("Aktiv alumn"))
self.ui.makeStAlMButton.clicked.connect(lambda:
membershiplistmodel.insertMembership("StÄlM"))
self.ui.makeJuniorStAlMButton.clicked.connect(lambda:
membershiplistmodel.insertMembership("JuniorStÄlM"))
self.ui.makeEjMedlemButton.clicked.connect(lambda:
membershiplistmodel.insertMembership("Ej längre medlem"))
# Optional LDAP-integration.
self.ui.tabWidget.setTabEnabled(1, False)
if self.ldapmanager:
self.ui.createUserAccountOrChangePasswordButton.clicked.connect(
lambda: self.createAccountOrChangePassword())
self.ui.removeAccountButton.clicked.connect(lambda:
self.removeAccount())
self.ui.tabWidget.setTabEnabled(1, True)
self.refreshUserAccounts()
def removeSelectedMembership(self, listview):
"""Remove selected items from a QListView."""
selections = listview.selectedIndexes()
for index in selections:
listview.model().removeRow(index.row())
def accept(self):
"""Commit Member.*_fld and contactinfo.*_fld changes to database.
Most notably all the QListView changes are already committed.
"""
for field in Member.editable_text_fields:
if (field == "username_fld" and not
self.ui.username_fld.hasAcceptableInput()):
continue
update_qtextfield_to_db(self.ui, field, self.member)
self.member.notes_fld = str(self.ui.notes_fld.toPlainText()[:255])
self.member.gender_fld = self.ui.gender_fld.currentIndex()
date = self.ui.birthDate_fld.dateTime().date()
self.member.birthDate_fld = self.ui.birthDate_fld.dateTime(
).toPyDateTime()
self.member.dead_fld = int(self.ui.dead_fld.isChecked())
self.member.subscribedtomodulen_fld = int(
self.ui.subscribedToModulen_fld_checkbox.isChecked())
self.member.noPublishContactInfo_fld = int(
self.ui.noPublishContactInfo_fld_checkbox.isChecked())
contactinfo = self.member.contactinfo
for field in contactinfo.publicfields:
update_qtextfield_to_db(self.ui, field, contactinfo)
self.session.commit()
self.parent.populateMemberList(choosemember=self.member)
self.close()
def reject(self):
"""Close the dialog without saving the fields to the database."""
#TODO: Also rollback the MembershipListView changes.
self.session.rollback()
self.close()
class SvakSvat(QMainWindow):
"""Member Registry Application."""
def __init__(self, session):
"""Create the window.
session -- Sqlalchemy scoped_session.
"""
super().__init__()
self.session = session # Assuming scoped_session
self.initUI()
try:
self.ldapmanager = useraccounts.LDAPAccountManager()
except Exception as e:
print(e)
print("Deaktiverar LDAP-integration.")
self.ldapmanager = None
self.setStatusMessage("Redo!", 3000)
def initUI(self):
"""Create the window and connect the Qt signals to slots"""
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Connect signals to slots.
self.ui.searchfield.textChanged.connect(self.searchlist)
self.ui.memberlistwidget.currentRowChanged.connect(lambda:
self.showMemberInfo())
self.ui.memberlistwidget.itemActivated.connect(self.editMember)
self.ui.searchfield.returnPressed.connect(self.ui.memberlistwidget.setFocus)
self.ui.actionNewMember.triggered.connect(self.createMember)
self.ui.actionRemoveMember.triggered.connect(self.removeMember)
self.ui.actionEditMember.triggered.connect(self.editMember)
self.ui.actionMakeBackup.triggered.connect(self.makeBackup)
self.ui.actionRestoreFromBackup.triggered.connect(self.restoreFromBackup)
self.ui.memberlistwidget.addAction(self.ui.actionEditMember)
self.ui.memberlistwidget.addAction(self.ui.actionRemoveMember)
self.ui.memberlistwidget.setContextMenuPolicy(Qt.ActionsContextMenu)
self.populateMemberList()
self.setWindowTitle('SvakSvat')
def makeBackup(self):
filename = QFileDialog.getSaveFileName(self,
'Välj namn för säkerhetskopia', '.')[0]
with open(filename, 'wb') as f:
pickler = pickle.Pickler(f)
pickler.dump(backup_everything(self.session))
def restoreFromBackup(self):
filename = QFileDialog.getOpenFileName(self,
'Öppna säkerthetskopia',
'.')
with open(filename, 'rb') as f:
data = pickle.Unpickler(f).load()
if restore_everything(self.session, data):
self.session.commit()
self.populateMemberList()
def createMember(self):
newmemberdialog = NewMemberDialog(self.session, self)
newmemberdialog.exec()
def removeMember(self):
"""Remove selected member."""
member = self.currentMember()
wholename = member.getWholeName()
confirm = "Är du säker att du vill ta bort användaren %s?" % wholename
reply = QMessageBox.question(self, 'Bekräfta',
confirm, QMessageBox.Yes,
QMessageBox.No)
if reply == QMessageBox.Yes:
self.session.delete(member)
self.session.commit()
self.populateMemberList()
self.setStatusMessage("Användare %s borttagen!" % wholename)
def populateMemberList(self, choosemember=None):
"""Fill the memberlist from the database."""
self.memberlist = self.session.query(Member).order_by(
Member.surName_fld).all()
self.ui.searchfield.clear()
self.searchlist()
if choosemember:
memberindex = self.memberlist.index(choosemember)
self.ui.memberlistwidget.setCurrentRow(memberindex)
def currentMember(self):
"""Returns the currently selected member."""
member = self.filteredmemberlist[self.ui.memberlistwidget.currentRow()]
return member
def editMember(self):
"""Edit the currently selected member."""
member = self.currentMember()
self.membereditwidget = MemberEdit(self.session, member, self,
self.ldapmanager)
self.membereditwidget.show()
def searchlist(self, pattern=''):
"""Perform a filter operation on the memberlist.
pattern -- The string to match.
"""
self.filteredmemberlist = [member for member in self.memberlist
if member.getWholeName().upper().find(pattern.upper()) != -1]
self.ui.memberlistwidget.clear()
for member in self.filteredmemberlist:
self.ui.memberlistwidget.addItem(member.getWholeName())
def showMemberInfo(self, member=None):
"""Show the member's info in the panel below.
member -- backend.orm.Member to show.
"""
if not member:
member = self.currentMember()
contactinfo = member.contactinfo
memberinfo = """Namn: %s %s
Address: %s %s %s %s
Telefon: %s
Mobiltelefon: %s
Email: %s
Användarnamn: %s
""" % (
member.givenNames_fld,
member.surName_fld,
contactinfo.streetAddress_fld,
contactinfo.postalCode_fld,
contactinfo.city_fld,
contactinfo.country_fld,
contactinfo.phone_fld,
contactinfo.cellPhone_fld,
contactinfo.email_fld,
member.username_fld
)
membershipinfo = self.getMembershipInfo(member)
self.ui.memberinfo.setText(memberinfo + membershipinfo)
def getMembershipInfo(self, member):
"""Get the current membershipinfo for a member.
member -- backend.orm.Member
Used in showMemberInfo"""
currentposts = [postmembership.post.name_fld for postmembership in
member.postmemberships if postmembership.isCurrent()]
currentgroups = [groupmembership.group.name_fld for groupmembership in
member.groupmemberships if groupmembership.isCurrent()]
return ("\n".join(["\nPoster:"] + currentposts) +
"\n".join(["\n\nGrupper:"] + currentgroups))
def setStatusMessage(self, message, milliseconds=3000):
"""Sets a status message in the MainWindow.
message -- The status message to set.
milliseconds -- The lifetime of the message.
"""
self.ui.statusbar.showMessage(message, milliseconds)
def main():
app = QApplication(sys.argv)
# Initialize SvakSvat
ps = passwordsafe.PasswordSafe(enablegui=True)
SessionMaker = scoped_session(ps.connect_with_config("members"))
ss = SvakSvat(SessionMaker)
ss.show()
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
|
<reponame>k-wojcik/kylin_client_tool
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
from models.object import JsonSerializableObj
from models.cube import CubeDesc, CubeModel
class CubeRequest(JsonSerializableObj):
"""
python class mapping to org.apache.kylin.rest.request.CubeRequest
"""
def __init__(self):
JsonSerializableObj.__init__(self)
self.uuid = None
self.cubeName = None
self.cubeDescData = None
self.modelDescData = None
self.successful = None
self.message = None
self.cubeDescName = None
self.project = None
@staticmethod
def from_json(json_dict):
if not json_dict or type(json_dict) != dict: return None
cr = CubeRequest()
cr.uuid = json_dict.get('uuid')
cr.cubeName = json_dict.get('cubeName')
cr.cubeDescData = json_dict.get('cubeDescData')
cr.modelDescData = json_dict.get('modelDescData')
cr.successful = json_dict.get('successful')
cr.message = json_dict.get('message')
cr.cubeDescName = json_dict.get('cubeDescName')
cr.project = json_dict.get('project')
return cr
@staticmethod
def get_cube_request_from_cube_desc(cube_desc, model_desc, project=None):
if not cube_desc or not isinstance(cube_desc, CubeDesc): return None
if not model_desc or not isinstance(model_desc, CubeModel): return None
cr = CubeRequest()
# cr.uuid = cube_desc.uuid
cr.cubeDescData = cube_desc.to_json()
# print cr.cubeDescData
cr.modelDescData = model_desc.to_json()
cr.cubeName = cube_desc.name
cr.project = project
return cr
class JobBuildRequest(JsonSerializableObj):
"""
python class mapping to org.apache.kylin.rest.request.JobBuildRequest
"""
BUILD = 'BUILD'
REFRESH = 'REFRESH'
MERGE = 'MERGE'
def __init__(self):
JsonSerializableObj.__init__(self)
self.startTime = None
self.endTime = None
self.buildType = JobBuildRequest.BUILD
class JobListRequest(JsonSerializableObj):
"""
python class mapping to org.apache.kylin.rest.request.JobListRequest
"""
def __init__(self):
JsonSerializableObj.__init__(self)
self.cubeName = None
self.projectName = None
self.offset = None
self.limit = None
self.status = None
def to_query_string(self):
qs = ""
if self.cubeName:
qs += "cubeName=" + self.cubeName + "&"
if self.projectName:
qs += "projectName=" + self.projectName + "&"
if self.offset is not None:
qs += "offset=" + str(self.offset) + "&"
if self.limit is not None:
qs += "limit=" + str(self.limit) + "&"
if self.status:
for status in self.status:
qs += "status=" + str(status) + "&"
return qs[:-1] if qs else ""
class ProjectRequest(JsonSerializableObj):
"""
python class mapping to org.apache.kylin.rest.request.CreateProjectRequest
"""
def __init__(self):
JsonSerializableObj.__init__(self)
self.name = None
self.description = None
class SQLRequest(JsonSerializableObj):
"""
python class mapping to org.apache.kylin.rest.request.SQLRequest
"""
def __init__(self):
JsonSerializableObj.__init__(self)
self.sql = None
self.project = None
self.offset = None
self.limit = None
self.acceptPartial = None
class PrepareSqlRequest(JsonSerializableObj):
"""
python class mapping to org.apache.kylin.rest.request.PrepareSqlRequest
"""
def __init__(self):
JsonSerializableObj.__init__(self)
self.sql = None
self.project = None
self.offset = None
self.limit = None
self.acceptPartial = None
self.params = None
@staticmethod
def from_json(json_dict):
if not json_dict or type(json_dict) != dict: return None
psr = PrepareSqlRequest()
psr.sql = json_dict.get('sql')
psr.project = json_dict.get('project')
psr.offset = json_dict.get('offset')
psr.limit = json_dict.get('limit')
psr.acceptPartial = json_dict.get('acceptPartial')
if json_dict.get('params') and type(json_dict.get('params')) == list:
param_list = json_dict.get('params')
psr.params = [StateParam.from_json(param) for param in param_list]
return psr
class StateParam(JsonSerializableObj):
"""
python class mapping to org.apache.kylin.rest.request.PrepareSqlRequest.StateParam
"""
def __init__(self):
JsonSerializableObj.__init__(self)
self.className = None
self.value = None
@staticmethod
def from_json(json_dict):
if not json_dict or type(json_dict) != dict: return None
sp = StateParam()
sp.className = json_dict.get('className')
sp.value = json_dict.get('value')
return sp
|
<reponame>NIEHS/P-MACD
## This code was developed and authored by <NAME>, Ph.D.
## Unauthorized commercial reuse of the code and removal of this notice
## are prohibited.
## This research was supported by the Intramural Research Program of the NIH,
## National Institute of Environmental Health Sciences.
if (not CUSTOMMOTIFS):
motifs2Find = ("A", "T", "G", "C", "Cg", "cG", "tC[at]", "[at]Ga", "tCa", "tGa", "tCt", "aGa", "tC", "Ga", "tC[atc]", "[atg]Ga", "cC", "Gg", "[at][ag]C", "G[ct][at]", "Cc", "gG", "[at]A", "T[at]")
findTitles = ("A", "T", "G", "C", "Cg", "cG", "tCw", "wGa", "tCa", "tGa", "tCt", "aGa", "tC", "Ga", "tCh", "dGa", "cC", "Gg", "wrC", "Gyw", "Cc", "gG", "wA", "Tw")
# modified 10/28/14
apobecTitles = ("tC_mutation", "tC_mutation_to_G", "tC_mutation_to_T", "APOBEC_mutation", "APOBEC_mutation_to_G", "APOBEC_mutation_to_T")
def findMotifs(seq):
motifPresent = ["0"] * len(motifs2Find)
c = 0
for motif in motifs2Find:
if (motif=="+"):
motifPresent[c] = motifPresent[c-1] + motifPresent[c-2]
else:
if re.search(motif, seq):
motifPresent[c] = "1"
next
c = c + 1
return reduce(lambda x, y: x+'\t'+y, motifPresent)
tCwPos = which([x=="tCw" for x in findTitles])[0]
wGaPos = which([x=="wGa" for x in findTitles])[0]
# modified 10/28/14 to add 3 tC_mutation columns
tCPos = which([x=="tC" for x in findTitles])[0]
GaPos = which([x=="Ga" for x in findTitles])[0]
def isApobec(findString):
apobecBits = ["0", "0", "0", "0", "0", "0"]
findBits = findString.split("\t")
if (
((findBits[tCwPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="G") |
((findBits[wGaPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="C")
):
apobecBits[4] = "1"
if (
((findBits[tCwPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="T") |
((findBits[wGaPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="A")
):
apobecBits[5] = "1"
if((apobecBits[4]=="1") or (apobecBits[5]=="1")):
apobecBits[3] = "1"
if((apobecBits[4]=="1") or
((findBits[tCPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="G") |
((findBits[GaPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="C")
):
apobecBits[1] = "1"
if((apobecBits[5]=="1") or
((findBits[tCPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="T") |
((findBits[GaPos]=="1") and fields[TUMOR_SEQ_ALLELE2_FIELD]=="A")
):
apobecBits[2] = "1"
if((apobecBits[1]=="1") | (apobecBits[2]=="1")):
apobecBits[0] = "1"
return list2tabString(apobecBits)
# version before 10/28/14
'''
isApobecOld = function(findString) {
apobecBits = c("0", "0", "0")
findBits = unlist(strsplit(findString, "\t"))
if(
((findBits[tCwPos]=="1") & fields[TUMOR_SEQ_ALLELE2_FIELD]=="G") |
((findBits[wGaPos]=="1") & fields[TUMOR_SEQ_ALLELE2_FIELD]=="C")
) {
apobecBits[2] = "1"
}
if(
((findBits[tCwPos]=="1") & fields[TUMOR_SEQ_ALLELE2_FIELD]=="T") |
((findBits[wGaPos]=="1") & fields[TUMOR_SEQ_ALLELE2_FIELD]=="A")
) {
apobecBits[3] = "1"
}
if((apobecBits[2]=="1") | (apobecBits[3]=="1")) apobecBits[1] = "1"
return(paste(apobecBits, collapse="\t"))
}
'''
|
from os.path import splitext, abspath
from sys import modules
try:
import win32serviceutil
except ImportError, details:
print 'WARNING due to "%s".' % str(details)
try:
import win32service
RUNNING = win32service.SERVICE_RUNNING
STARTING = win32service.SERVICE_START_PENDING
STOPPING = win32service.SERVICE_STOP_PENDING
STOPPED = win32service.SERVICE_STOPPED
except ImportError, details:
print 'WARNING due to "%s".' % str(details)
try:
import win32event
except ImportError, details:
print 'WARNING due to "%s".' % str(details)
try:
import win32api
except ImportError, details:
print 'WARNING due to "%s".' % str(details)
def get_service_containing(service_name):
import wmi
response = None
c = wmi.WMI()
service_name = str(service_name).lower()
for service in c.Win32_Service():
if (str(service.Name).lower().find(service_name) > -1):
response = service
break
return response
def has_service_containing(service_name):
return get_service_containing(service_name) is not None
def GetShortName(longName):
import win32con
hkey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services", 0, win32con.KEY_ALL_ACCESS)
num = win32api.RegQueryInfoKey(hkey)[0]
svc = None
for x in range(0, num):
svc = win32api.RegEnumKey(hkey, x)
skey = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\%s" % svc, 0, win32con.KEY_ALL_ACCESS)
try:
shortName = str(win32api.RegQueryValueEx(skey, "DisplayName")[0])
if shortName == longName:
break
except win32api.error:
svc = None
return svc
class Service(win32serviceutil.ServiceFramework):
_svc_name_ = '_unNamed'
_svc_display_name_ = '_Service Template'
def __init__(self, *args):
win32serviceutil.ServiceFramework.__init__(self, *args)
self.log('init')
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def log(self, msg):
import servicemanager
servicemanager.LogInfoMsg(str(msg))
def sleep(self, sec):
win32api.Sleep(sec*1000, True)
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
try:
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.log('start')
self.start()
self.log('wait')
win32event.WaitForSingleObject(self.stop_event, win32event.INFINITE)
self.log('done')
except Exception as x:
self.log('Exception : %s' % x)
self.SvcStop()
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.log('stopping')
self.stop()
self.log('stopped')
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
# to be overridden
def start(self): pass
# to be overridden
def stop(self): pass
def instart(cls, name, display_name=None, stay_alive=True):
'''
Install and Start (auto) a Service
cls : the class (derived from Service) that implement the Service
name : Service name
display_name : the name displayed in the service manager
stay_alive : Service will stop on logout if False
'''
cls._svc_name_ = name
cls._svc_display_name_ = display_name or name
try:
module_path=modules[cls.__module__].__file__
except AttributeError:
# maybe py2exe went by
from sys import executable
module_path=executable
module_file=splitext(abspath(module_path))[0]
cls._svc_reg_class_ = '%s.%s' % (module_file, cls.__name__)
if stay_alive: win32api.SetConsoleCtrlHandler(lambda x: True, True)
try:
win32serviceutil.InstallService(
cls._svc_reg_class_,
cls._svc_name_,
cls._svc_display_name_,
startType=win32service.SERVICE_AUTO_START
)
print 'Install ok'
win32serviceutil.StartService(
cls._svc_name_
)
print 'Start ok'
except Exception as x:
print str(x)
if (__name__ == '__main__'):
services = ['memcache','tntdrive']
for service in services:
print 'Checking for %s' % (service)
__service__ = get_service_containing(service)
__has__ = __service__ is not None
print '%s %s%s' % ('Has' if (__has__) else 'Does not have',service,' (%s)'%(__service__.State if (__has__) else ''))
|
# Copyright 2016-2020 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/caliban-toolbox/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import os
import json
from itertools import product
def save_npzs_for_caliban(X_data, y_data, original_data, log_data, save_dir,
blank_labels='include', save_format='npz', verbose=True):
"""Take an array of processed image data and save as NPZ for caliban
Args:
X_data: 7D tensor of cropped and sliced raw images
y_data: 7D tensor of cropped and sliced labeled images
original_data: the original unmodified images
log_data: data used to reconstruct images
save_dir: path to save the npz and JSON files
blank_labels: whether to include NPZs with blank labels (poor predictions)
or skip (no cells)
save_format: format to save the data (currently only NPZ)
verbose: flag to control print statements
"""
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# if these are present, it means data was cropped/sliced. Otherwise, default to 1
num_crops = log_data.get('num_crops', 1)
num_slices = log_data.get('num_slices', 1)
fov_names = original_data.fovs.values
fov_len = len(fov_names)
if blank_labels not in ['skip', 'include', 'separate']:
raise ValueError('blank_labels must be one of '
'[skip, include, separate], got {}'.format(blank_labels))
if blank_labels == 'separate':
os.makedirs(os.path.join(save_dir, 'separate'))
# for each fov, loop through 2D crops and 3D slices
for fov, crop, slice in product(range(fov_len), range(num_crops), range(num_slices)):
# generate identifier for crop
npz_id = 'fov_{}_crop_{}_slice_{}'.format(fov_names[fov], crop, slice)
# get working batch
labels = y_data[fov, :, crop, slice, ...].values
channels = X_data[fov, :, crop, slice, ...].values
# determine if labels are blank, and if so what to do with npz
if np.sum(labels) == 0:
# blank labels get saved to separate folder
if blank_labels == 'separate':
if verbose:
print('{} is blank, saving to separate folder'.format(npz_id))
save_path = os.path.join(save_dir, blank_labels, npz_id)
# save images as either npz or xarray
if save_format == 'npz':
np.savez_compressed(save_path + '.npz', X=channels, y=labels)
elif save_format == 'xr':
raise NotImplementedError()
# blank labels don't get saved, empty area of tissue
elif blank_labels == 'skip':
if verbose:
print('{} is blank, skipping saving'.format(npz_id))
# blank labels get saved along with other crops
elif blank_labels == 'include':
if verbose:
print('{} is blank, saving to folder'.format(npz_id))
save_path = os.path.join(save_dir, npz_id)
# save images as either npz or xarray
if save_format == 'npz':
np.savez_compressed(save_path + '.npz', X=channels, y=labels)
elif save_format == 'xr':
raise NotImplementedError()
else:
# crop is not blank, save based on file_format
save_path = os.path.join(save_dir, npz_id)
# save images as either npz or xarray
if save_format == 'npz':
np.savez_compressed(save_path + '.npz', X=channels, y=labels)
elif save_format == 'xr':
raise NotImplementedError()
log_data['fov_names'] = fov_names.tolist()
log_data['label_name'] = str(y_data.coords[y_data.dims[-1]][0].values)
log_data['original_shape'] = original_data.shape
log_data['slice_stack_len'] = X_data.shape[1]
log_data['save_format'] = save_format
log_data['label_dtype'] = str(y_data.dtype)
log_path = os.path.join(save_dir, 'log_data.json')
with open(log_path, 'w') as write_file:
json.dump(log_data, write_file)
def get_saved_file_path(dir_list, fov_name, crop, slice, file_ext='.npz'):
"""Helper function to identify correct file path for an npz file
Args:
dir_list: list of files in directory
fov_name: string of the current fov_name
crop: int of current crop
slice: int of current slice
file_ext: extension file was saved with
Returns:
string: formatted file name
Raises:
ValueError: If multiple file path matches were found
"""
base_string = 'fov_{}_crop_{}_slice_{}'.format(fov_name, crop, slice)
string_matches = [string for string in dir_list if base_string + '_save_version' in string]
if len(string_matches) == 0:
full_string = base_string + file_ext
elif len(string_matches) == 1:
full_string = string_matches[0]
else:
raise ValueError('Multiple save versions found: '
'please select only a single save version. {}'.format(string_matches))
return full_string
def load_npzs(crop_dir, log_data, verbose=True):
"""Reads all of the cropped images from a directory, and aggregates them into a single stack
Args:
crop_dir: path to directory with cropped npz or xarray files
log_data: dictionary of parameters generated during data saving
verbose: flag to control print statements
Returns:
numpy.array: 7D tensor of labeled crops
"""
fov_names = log_data['fov_names']
fov_len, stack_len, _, _, row_size, col_size, _ = log_data['original_shape']
save_format = log_data['save_format']
label_dtype = log_data['label_dtype']
# if cropped/sliced, get size of dimensions. Otherwise, use size in original data
row_crop_size = log_data.get('row_crop_size', row_size)
col_crop_size = log_data.get('col_crop_size', col_size)
slice_stack_len = log_data.get('slice_stack_len', stack_len)
# if cropped/sliced, get number of crops/slices
num_crops, num_slices = log_data.get('num_crops', 1), log_data.get('num_slices', 1)
stack = np.zeros((fov_len, slice_stack_len, num_crops,
num_slices, row_crop_size, col_crop_size, 1), dtype=label_dtype)
saved_files = os.listdir(crop_dir)
# for each fov, loop over each 2D crop and 3D slice
for fov, crop, slice in product(range(fov_len), range(num_crops), range(num_slices)):
# load NPZs
if save_format == 'npz':
npz_path = os.path.join(crop_dir, get_saved_file_path(saved_files,
fov_names[fov],
crop, slice))
if os.path.exists(npz_path):
temp_npz = np.load(npz_path)
# determine how labels were named
labels_key = 'y' if 'y' in temp_npz else 'annotated'
# last slice may be truncated, modify index
if slice == num_slices - 1:
current_stack_len = temp_npz[labels_key].shape[1]
else:
current_stack_len = slice_stack_len
stack[fov, :current_stack_len, crop, slice, ...] = temp_npz[labels_key]
else:
# npz not generated, did not contain any labels, keep blank
if verbose:
print('could not find npz {}, skipping'.format(npz_path))
# load xarray
elif save_format == 'xr':
raise NotImplementedError()
# xr_path = os.path.join(crop_dir, get_saved_file_path(saved_files, fov_names[fov],
# crop, slice))
# if os.path.exists(xr_path):
# temp_xr = xr.open_dataarray(xr_path)
#
# # last slice may be truncated, modify index
# if slice == num_slices - 1:
# current_stack_len = temp_xr.shape[1]
# else:
# current_stack_len = stack_len
#
# stack[fov, :current_stack_len, crop, slice, ...] = temp_xr[..., -1:]
# else:
# # npz not generated, did not contain any labels, keep blank
# print('could not find xr {}, skipping'.format(xr_path))
return stack
|
# a toolbox for music songs similarity weighting, song clustering, and so on
import csv
import logging
import os
import shutil
import numpy as np
import scipy
from matplotlib.pyplot import specgram
from scipy.io import wavfile
from sklearn.cluster import KMeans
SONGS_DIR = '/home/lucasx/Documents/Dataset/CloudMusic/1'
FFT_NPY_DIR = '/home/lucasx/Documents/Dataset/CloudMusic/fft_npy'
def generate_data_and_label(songs_dir):
"""
genertate dataset with its label
:param songs_dir:
:return:
"""
song_data = dict()
for label_dir in os.listdir(songs_dir):
if label_dir == '1':
for _ in os.listdir(os.path.join(songs_dir, label_dir)):
song_filepath = os.path.join(songs_dir, label_dir, _)
# print(song_filepath)
song_data[_] = 1
elif label_dir == '0':
for _ in os.listdir(os.path.join(songs_dir, label_dir)):
song_filepath = os.path.join(songs_dir, label_dir, _)
# print(song_filepath)
song_data[_] = 0
with open('dataset.csv', 'wt', encoding='UTF-8', newline='') as csvfile:
fieldnames = ['songname', 'label']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key, value in song_data.items():
writer.writerow({'songname': key.split('/')[-1], 'label': value})
print('CSV file Pre-processing done!!!')
def create_fft(filename):
sample_rate, X = wavfile.read(filename)
fft_features = abs(scipy.fft(X)[0:1000])
base_fn, ext = os.path.splitext(filename)
data_fn = FFT_NPY_DIR + base_fn.split('/')[-1] + '.fft'
np.save(data_fn, fft_features)
# draw the spec gram figure
print(sample_rate, X.shape)
# specgram(X, Fs=sample_rate, xextent=(0, 30))
def batch_create_fft():
if os.path.exists(FFT_NPY_DIR):
shutil.rmtree(FFT_NPY_DIR)
os.makedirs(FFT_NPY_DIR)
for _ in os.listdir(SONGS_DIR):
create_fft(os.path.join(SONGS_DIR, _))
logging.log(logging.INFO, 'All music files have been processed successfully~~~')
def read_fft(fft_npy_file_dir):
X = []
y = []
for fft_npy_file in os.listdir(fft_npy_file_dir):
y.append(1)
if fft_npy_file.endswith('.fft.npy'):
X.append(np.load(os.path.join(fft_npy_file_dir, fft_npy_file))[:1000])
else:
logging.error('unsupported format for file %s' % fft_npy_file)
return np.array(X), np.array(y)
def batch_rename(dir_):
num = 1
for _ in os.listdir(dir_):
os.rename(os.path.join(dir_, _), os.path.join(dir_, '%d.mp3' % num))
num += 1
print('All mp3 files have been renamed...')
if __name__ == '__main__':
# generate_data_and_label(SONGS_DIR)
# batch_create_fft()
X, y = read_fft(FFT_NPY_DIR)
kmeans_model = KMeans(n_clusters=8, random_state=1).fit(X)
labels = kmeans_model.labels_
print(labels)
|
<reponame>n-kawauchi/pipeline-test
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
## AttachDetachRTCTest.py
##
## メモリーリークチェック
## RTC.idlで定義されているオペレーション
## ECのアタッチ・デタッチに関するオペレーション
#
# $Id$
#
from rtc_handle import *
from BasicDataType_idl import *
import time
import commands
env = RtmEnv(sys.argv, ["localhost:9898"])
list0 = env.name_space["localhost:9898"].list_obj()
env.name_space['localhost:9898'].rtc_handles.keys()
ns = env.name_space['localhost:9898']
time.sleep(2)
compo1 = ns.rtc_handles["ConsoleIn0.rtc"]
compo0 = ns.rtc_handles["ConsoleOut0.rtc"]
seqin0 = ns.rtc_handles["SequenceInComponent0.rtc"]
ec = compo0.rtc_ref.get_owned_contexts()
def mem_rss():
(stat, output) = commands.getstatusoutput("ps alxww | grep \"[r]\"tcd")
return output.split()[7]
## file and console out
def print_file_and_cons(out_data, out_flag=0):
## out_flag:1 is file out only
if out_flag == 1:
fout.write(out_data + '\n')
fout.flush()
## out_flag:2 is console out only
elif out_flag == 2:
print out_data
## out_flag:0 is console and file out (default)
else:
print out_data
fout.write(out_data + '\n')
fout.flush()
return
## memory leak check
def leak_check(rss_start, rss_end):
if rss_start != rss_end:
fodat = " result: memory leak was found !!!"
else:
fodat = " result: memory leak was not found."
print_file_and_cons(fodat)
return
## file out setting
test_case = "AttachDetachRTCTest"
fout = open(test_case + ".log", 'w')
fodat = "=== " + test_case + " start ==="
print_file_and_cons(fodat)
loop_cnt = 1000
## -----------------------------------------------------------------------------
fodat = "attach_context() and detach_context()"
print_file_and_cons(fodat)
for i in range(loop_cnt):
# LightweightRTObject::attach_context(in ExecutionContext exec_context)
ec_id = compo0.rtc_ref.attach_context(ec[0]) # set used OK. single use NG.
#print "attach_context() ret=",ec_id
# LightweightRTObject::detach_context(in ExecutionContextHandle_t exec_handle)
retcode = compo0.rtc_ref.detach_context(ec_id)
#print "detach_context() ret=",retcode
if i == 0:
rss0 = mem_rss() ; j0 = 0 ; rssStart = rss0
fodat = " %05d: %s KB start" % (1, rss0)
print_file_and_cons(fodat,1)
rss1 = mem_rss() ; j1 = i
if rss0 != rss1:
fodat = " %05d: %s KB -> %d KB. count diff -> %d" % (i+1, rss1,int(rss1)-int(rss0),int(j1)-int(j0) )
print_file_and_cons(fodat,1)
rss0 = rss1 ; j0 = j1
rssEnd = mem_rss()
fodat = " %05d: %s KB end" % (i+1, rssEnd)
print_file_and_cons(fodat,1)
leak_check(rssStart, rssEnd)
## -----------------------------------------------------------------------------
fodat = "=== " + test_case + " end ==="
print_file_and_cons(fodat)
fout.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.