hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f704acaf0d0906ebdcf3194cb863c6e94506ef5d
| 2,146
|
py
|
Python
|
stubs.min/System/Windows/Forms/__init___parts/ToolStripRenderEventArgs.py
|
ricardyn/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2021-02-02T13:39:16.000Z
|
2021-02-02T13:39:16.000Z
|
stubs.min/System/Windows/Forms/__init___parts/ToolStripRenderEventArgs.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/System/Windows/Forms/__init___parts/ToolStripRenderEventArgs.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class ToolStripRenderEventArgs(EventArgs):
"""
Provides data for the System.Windows.Forms.ToolStripRenderer.OnRenderImageMargin(System.Windows.Forms.ToolStripRenderEventArgs),System.Windows.Forms.ToolStripRenderer.OnRenderToolStripBorder(System.Windows.Forms.ToolStripRenderEventArgs),and System.Windows.Forms.ToolStripRenderer.OnRenderToolStripBackground(System.Windows.Forms.ToolStripRenderEventArgs) methods.
ToolStripRenderEventArgs(g: Graphics,toolStrip: ToolStrip)
ToolStripRenderEventArgs(g: Graphics,toolStrip: ToolStrip,affectedBounds: Rectangle,backColor: Color)
"""
@staticmethod
def __new__(self,g,toolStrip,affectedBounds=None,backColor=None):
"""
__new__(cls: type,g: Graphics,toolStrip: ToolStrip)
__new__(cls: type,g: Graphics,toolStrip: ToolStrip,affectedBounds: Rectangle,backColor: Color)
"""
pass
AffectedBounds=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Rectangle representing the bounds of the area to be painted.
Get: AffectedBounds(self: ToolStripRenderEventArgs) -> Rectangle
"""
BackColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Color that the background of the System.Windows.Forms.ToolStrip is painted with.
Get: BackColor(self: ToolStripRenderEventArgs) -> Color
"""
ConnectedArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Rectangle representing the overlap area between a System.Windows.Forms.ToolStripDropDown and its System.Windows.Forms.ToolStripDropDown.OwnerItem.
Get: ConnectedArea(self: ToolStripRenderEventArgs) -> Rectangle
"""
Graphics=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Graphics used to paint.
Get: Graphics(self: ToolStripRenderEventArgs) -> Graphics
"""
ToolStrip=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Windows.Forms.ToolStrip to be painted.
Get: ToolStrip(self: ToolStripRenderEventArgs) -> ToolStrip
"""
| 42.078431
| 367
| 0.768872
|
class ToolStripRenderEventArgs(EventArgs):
__new__(cls: type,g: Graphics,toolStrip: ToolStrip)
__new__(cls: type,g: Graphics,toolStrip: ToolStrip,affectedBounds: Rectangle,backColor: Color)
Get: AffectedBounds(self: ToolStripRenderEventArgs) -> Rectangle
ConnectedArea=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Drawing.Rectangle representing the overlap area between a System.Windows.Forms.ToolStripDropDown and its System.Windows.Forms.ToolStripDropDown.OwnerItem.
"""
| true
| true
|
f704acb27652fac4c53df6d424b7bb033879e704
| 15,975
|
py
|
Python
|
CollisionAvoidanceMonitor/main.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
CollisionAvoidanceMonitor/main.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
CollisionAvoidanceMonitor/main.py
|
GustavLero/EPICS-inst_servers
|
4bcdd6a80f1d9e074de3f0f7c66968d506981988
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
import ode
import logging
import threading
from time import sleep, time
from genie_python.genie_startup import *
import pv_server
import render
from configurations import config_zoom as config
from collide import collide, CollisionDetector
from geometry import GeometryBox
from move import move_all
sys.path.insert(0, os.path.abspath(os.environ["MYDIRCD"]))
from monitor import Monitor
from server_common.loggers.isis_logger import IsisLogger
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(threadName)-2s) %(message)s',
)
def auto_seek(start_step_size, start_values, end_value, geometries, moves, axis_index, ignore, fine_step=None):
limit = end_value
current_value = start_values[axis_index]
if current_value == end_value:
return end_value
values = start_values[:]
last_value = None
old_points = None
step_checked = False
if current_value < end_value:
# Going up
def comp(a, b):
return a < b
step_size = abs(start_step_size)
else:
# Going down
def comp(a, b):
return a > b
step_size = -abs(start_step_size)
while last_value is None or comp(last_value, end_value):
# Move if we need to
if last_value is not None:
current_value += step_size
# print "Using step size of %f" % step_size
else:
current_value = start_values[axis_index]
if not comp(current_value, end_value):
current_value = end_value
values[axis_index] = current_value
move_all(geometries, moves, values=values[:])
# Check nothing moved too far
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > start_step_size:
# Work out a new step size
step_size *= start_step_size/delta
last_value = None
continue
step_checked = True
# Check for collisions
collisions = collide(geometries, ignore)
if any(collisions):
if current_value == start_values[axis_index]:
# There was already a collision
limit = current_value
break
elif fine_step and fine_step < step_size:
start_values[axis_index] = last_value
limit = auto_seek(fine_step, start_values, current_value, geometries, moves, axis_index, ignore)
else:
limit = last_value
break
old_points = new_points[:]
last_value = current_value
# print "Found limits for axis %d using step size of %f" % (axis_index, step_size)
if limit is None:
raise ValueError("Null limit")
return limit
def max_delta(geometries, new_points, old_points):
# Calculate the greatest position deltas
delta = 0
for j in range(len(geometries)):
old = old_points[j]
new = new_points[j]
deltas = [map(float, n - o) for n, o in zip(new, old)]
for i, (x, y, z) in enumerate(deltas):
mag = float(x) ** 2 + float(y) ** 2 + float(z) ** 2
if mag > delta:
delta = mag
# print "New max delta of %f (%f, %f, %f) for body %d at %s from %s" % \
# (mag ** 0.5, x, y, z, j, new[i], old[i])
delta = float(delta) ** 0.5
return delta
def compare(sign):
if sign > 0:
return lambda a, b: a > b
else:
return lambda a, b: a < b
def auto_seek_limits(geometries, ignore, moves, values, limits, coarse=1.0, fine=0.1):
dynamic_limits = []
for i in range(len(values)):
logging.debug("Seeking for axis %d" % i)
lower_limit = auto_seek(coarse, values[:], min(limits[i]), geometries, moves, i, ignore, fine)
upper_limit = auto_seek(coarse, values[:], max(limits[i]), geometries, moves, i, ignore, fine)
dynamic_limits.append([lower_limit, upper_limit])
logging.debug("Found limits for axis %d at %s, %s" % (i, upper_limit, lower_limit))
return dynamic_limits
def look_ahead(start_values, pvs, is_moving, geometries, moves, ignore, max_movement=1.0, max_time=10., time_step=0.1):
# Get the indices of the axes currently moving
moving = [i for i, m in enumerate(is_moving) if m == 0] # DMOV = 0 when motors not moving
msg = "No collisions predicted in the next %fs" % max_time
safe_time = max_time
safe = True
# Only worth calculating if more than one axis is moving
if len(moving) > 1:
set_points = [None] * len(pvs)
speeds = [None] * len(pvs)
directions = [None] * len(pvs)
# Assume everything has finished moving
move_complete = [True] * len(pvs)
# Get some settings:
for i in moving:
pv = pvs[i]
set_point = get_pv(pv + '.DVAL')
speed = get_pv(pv + '.VELO')
direction = 0.
move = set_point - start_values[i]
if move > 0:
direction = 1.
if move < 0:
direction = -1.
set_points[i] = set_point
speeds[i] = speed
directions[i] = direction
# This axis has not finished moving!
move_complete[i] = False
current_time = 0.
values = start_values[:]
old_points = None
step_checked = False
last_time = None
while current_time < max_time:
if last_time is None:
values = start_values[:]
current_time = 0.
old_points = None
else:
current_time += time_step
for i in moving:
if move_complete[i] is False:
values[i] = start_values[i] + (directions[i] * speeds[i] * current_time)
comp = compare(directions[i])(values[i], set_points[i])
if comp:
values[i] = set_points[i]
# Move the bodies
move_all(geometries, moves, values=values)
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > max_movement:
# Reduce the size of the time step
time_step *= max_movement/delta
# Reset to starting point
last_time = None
old_points = None
continue
step_checked = True
# Check for collisions
collisions = collide(geometries, ignore)
if any(collisions):
if last_time is None:
msg = "There is already a collision"
safe_time = 0.
else:
msg = "Collision expected in %.1fs - %.1fs" % (last_time, current_time)
safe_time = last_time
safe = False
break
old_points = new_points[:]
last_time = current_time
return msg, safe_time, safe
# Set the high and low dial limits for each motor
def set_limits(limits, pvs):
for limit, pv in zip(limits, pvs):
set_pv(pv + '.DLLM', limit[0])
set_pv(pv + '.DHLM', limit[1])
# Contains operating mode events
class OperatingMode(object):
def __init__(self):
# Close event to be triggered by the render thread
self.close = threading.Event()
# Set dynamic limits automatically
self.set_limits = threading.Event()
# Stop the motors on a collision
self.auto_stop = threading.Event()
# Re-calculate limits on demand
self.calc_limits = threading.Event()
def get_operation_mode(self):
return self.auto_stop.is_set(), self.set_limits.is_set(), self.close.is_set()
def set_operation_mode(self, auto_stop, set_limits, close):
if auto_stop:
self.auto_stop.set()
else:
self.auto_stop.clear()
if set_limits:
self.set_limits.set()
else:
self.set_limits.clear()
if close:
self.close.set()
else:
self.close.clear()
# The main routine to execute
def main():
# Load config:
colors = config.colors
moves = config.moves
ignore = config.ignore
pvs = config.pvs
config_limits = config.hardlimits
old_limits = config_limits[:]
# Create space objects for the live and rendered world
space = ode.Space()
render_space = ode.Space()
collision_space = ode.Space()
# Create and populate lists of geometries
geometries = []
render_geometries = []
collision_geometries = []
for i, geometry in enumerate(config.geometries):
geometries.append(GeometryBox(space, oversize=config.oversize, **geometry))
render_geometries.append(GeometryBox(render_space, **geometry))
collision_geometries.append(GeometryBox(collision_space, oversize=config.oversize, **geometry))
# Create and populate two lists of monitors
monitors = []
is_moving = []
for pv in pvs:
m = Monitor(pv + ".DRBV")
m.start()
monitors.append(m)
any_moving = Monitor(pv + ".DMOV")
any_moving.start()
is_moving.append(any_moving)
# Create a shared operating mode object to control the main thread
op_mode = OperatingMode()
# Set the default behaviour to set_limits as calculated, and auto_stop on collision
op_mode.set_limits.set()
op_mode.auto_stop.set()
# Start a logger
logger = IsisLogger()
# Create a shared render parameter object to update the render thread
parameters = render.RenderParams()
if 'blind' not in sys.argv:
# Initialise the render thread, and set it to daemon - won't prevent the main thread from exiting
renderer = render.Renderer(parameters, render_geometries, colors, monitors, pvs, moves, op_mode)
renderer.daemon = True
# Need to know if this is the first execution of the main loop
op_mode.calc_limits.set()
# Initialise the pv server
# Loop over the pvdb and update the counts based on the number of aves/bodies
for pv in pv_server.pvdb:
for key, val in pv_server.pvdb[pv].items():
if key == 'count':
if val is pv_server.axis_count:
pv_server.pvdb[pv]['count'] = len(config.pvs)
if val is pv_server.body_count:
pv_server.pvdb[pv]['count'] = len(config.geometries)
driver = pv_server.start_thread(config.control_pv, op_mode)
driver.setParam('OVERSIZE', config.oversize)
driver.setParam('COARSE', config.coarse)
driver.setParam('FINE', config.fine)
driver.setParam('NAMES', [g['name'] for g in config.geometries])
# Only report for new collisions
collision_detector = CollisionDetector(driver, collision_geometries, config.moves, monitors, config.ignore,
is_moving, logger, op_mode, config.pvs)
collision_detector.start()
# Main loop
while True:
# Freeze the positions of our current monitors by creating some dummies
# This stops the threads from trying to reading each monitor sequentially, and holding each other up
frozen = [m.value() for m in monitors]
# Execute the move
move_all(geometries, moves, values=frozen)
# Check if the oversize has been changed, ahead of any collision calcs
if driver.new_data.isSet():
for geometry, collision_geometry in zip(geometries, collision_geometries):
geometry.set_size(oversize=driver.getParam('OVERSIZE'))
collision_geometry.set_size(oversize=driver.getParam('OVERSIZE'))
driver.new_data.clear()
op_mode.calc_limits.set()
if driver.getParam("CALC") != 0:
op_mode.calc_limits.set()
collisions = collision_detector.collisions[:]
collision_message = collision_detector.message[:]
# Check if there have been any changes to the .MOVN monitors
fresh = any([m.fresh() for m in is_moving])
# Check if any of the motors monitors are moving
moving = [not m.value() for m in is_moving] # Invert because DMOV is inverted from MOVN
any_moving = any(moving)
new_limits = []
if fresh or any_moving or op_mode.calc_limits.isSet():
# Look ahead some time to see if any collisions are going to happen in the future
msg, safe_time, safe = look_ahead(frozen, config.pvs, moving, geometries, moves, ignore,
max_movement=driver.getParam('COARSE'))
if not safe and not any(collisions):
logger.write_to_log(msg, "MAJOR", "COLLIDE")
driver.setParam('MSG', msg)
else:
driver.setParam('MSG', collision_message)
logging.info(msg)
# Start timing for diagnostics
time_passed = time()
# Seek the correct limit values
dynamic_limits = auto_seek_limits(geometries, ignore, moves, frozen, config_limits,
coarse=driver.getParam('COARSE'), fine=driver.getParam('FINE'))
# Calculate and log the time taken to calculate
time_passed = (time() - time_passed) * 1000
# Log the new limits
logging.info("New limits calculated in %dms, are %s" % (time_passed, dynamic_limits))
# Set the limits according to the set_limits operating mode
if op_mode.set_limits.is_set():
# Apply the calculated limits
new_limits = dynamic_limits[:]
else:
# Restore the configuration limits
new_limits = config_limits[:]
# Update the render thread parameters
parameters.update_params(dynamic_limits, collisions, time_passed)
# # Update the PVs
driver.setParam('TIME', time_passed)
driver.setParam('HI_LIM', [l[1] for l in dynamic_limits])
driver.setParam('LO_LIM', [l[0] for l in dynamic_limits])
driver.setParam('TRAVEL', [min([l[0] - m, l[1] - m], key=abs)
for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_F', [l[1] - m for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_R', [l[0] - m for l, m in zip(dynamic_limits, frozen)])
driver.updatePVs()
if 'blind' not in sys.argv:
# On the first run, start the renderer
if renderer.is_alive() is False:
renderer.start()
op_mode.calc_limits.clear()
driver.setParam("CALC", False)
else:
# Restore the configuration limits
if op_mode.set_limits.is_set() is False:
new_limits = config_limits[:]
# Stop us overloading the limits
if not new_limits == old_limits:
threading.Thread(target=set_limits, args=(new_limits, pvs)).start()
old_limits = new_limits[:]
# Exit the program
if op_mode.close.is_set():
# Restore the configuration limits
set_limits(config_limits, pvs)
return
# Give the CPU a break
sleep(0.01)
if 'return' in sys.argv:
return
# Execute main
main()
| 33.560924
| 119
| 0.587856
|
import sys
import os
import ode
import logging
import threading
from time import sleep, time
from genie_python.genie_startup import *
import pv_server
import render
from configurations import config_zoom as config
from collide import collide, CollisionDetector
from geometry import GeometryBox
from move import move_all
sys.path.insert(0, os.path.abspath(os.environ["MYDIRCD"]))
from monitor import Monitor
from server_common.loggers.isis_logger import IsisLogger
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(threadName)-2s) %(message)s',
)
def auto_seek(start_step_size, start_values, end_value, geometries, moves, axis_index, ignore, fine_step=None):
limit = end_value
current_value = start_values[axis_index]
if current_value == end_value:
return end_value
values = start_values[:]
last_value = None
old_points = None
step_checked = False
if current_value < end_value:
def comp(a, b):
return a < b
step_size = abs(start_step_size)
else:
def comp(a, b):
return a > b
step_size = -abs(start_step_size)
while last_value is None or comp(last_value, end_value):
if last_value is not None:
current_value += step_size
else:
current_value = start_values[axis_index]
if not comp(current_value, end_value):
current_value = end_value
values[axis_index] = current_value
move_all(geometries, moves, values=values[:])
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > start_step_size:
step_size *= start_step_size/delta
last_value = None
continue
step_checked = True
collisions = collide(geometries, ignore)
if any(collisions):
if current_value == start_values[axis_index]:
limit = current_value
break
elif fine_step and fine_step < step_size:
start_values[axis_index] = last_value
limit = auto_seek(fine_step, start_values, current_value, geometries, moves, axis_index, ignore)
else:
limit = last_value
break
old_points = new_points[:]
last_value = current_value
if limit is None:
raise ValueError("Null limit")
return limit
def max_delta(geometries, new_points, old_points):
delta = 0
for j in range(len(geometries)):
old = old_points[j]
new = new_points[j]
deltas = [map(float, n - o) for n, o in zip(new, old)]
for i, (x, y, z) in enumerate(deltas):
mag = float(x) ** 2 + float(y) ** 2 + float(z) ** 2
if mag > delta:
delta = mag
delta = float(delta) ** 0.5
return delta
def compare(sign):
if sign > 0:
return lambda a, b: a > b
else:
return lambda a, b: a < b
def auto_seek_limits(geometries, ignore, moves, values, limits, coarse=1.0, fine=0.1):
dynamic_limits = []
for i in range(len(values)):
logging.debug("Seeking for axis %d" % i)
lower_limit = auto_seek(coarse, values[:], min(limits[i]), geometries, moves, i, ignore, fine)
upper_limit = auto_seek(coarse, values[:], max(limits[i]), geometries, moves, i, ignore, fine)
dynamic_limits.append([lower_limit, upper_limit])
logging.debug("Found limits for axis %d at %s, %s" % (i, upper_limit, lower_limit))
return dynamic_limits
def look_ahead(start_values, pvs, is_moving, geometries, moves, ignore, max_movement=1.0, max_time=10., time_step=0.1):
moving = [i for i, m in enumerate(is_moving) if m == 0]
msg = "No collisions predicted in the next %fs" % max_time
safe_time = max_time
safe = True
if len(moving) > 1:
set_points = [None] * len(pvs)
speeds = [None] * len(pvs)
directions = [None] * len(pvs)
move_complete = [True] * len(pvs)
for i in moving:
pv = pvs[i]
set_point = get_pv(pv + '.DVAL')
speed = get_pv(pv + '.VELO')
direction = 0.
move = set_point - start_values[i]
if move > 0:
direction = 1.
if move < 0:
direction = -1.
set_points[i] = set_point
speeds[i] = speed
directions[i] = direction
move_complete[i] = False
current_time = 0.
values = start_values[:]
old_points = None
step_checked = False
last_time = None
while current_time < max_time:
if last_time is None:
values = start_values[:]
current_time = 0.
old_points = None
else:
current_time += time_step
for i in moving:
if move_complete[i] is False:
values[i] = start_values[i] + (directions[i] * speeds[i] * current_time)
comp = compare(directions[i])(values[i], set_points[i])
if comp:
values[i] = set_points[i]
move_all(geometries, moves, values=values)
if step_checked is False:
new_points = [g.get_vertices() for g in geometries]
if old_points is not None:
delta = max_delta(geometries, new_points, old_points)
if delta > max_movement:
time_step *= max_movement/delta
last_time = None
old_points = None
continue
step_checked = True
collisions = collide(geometries, ignore)
if any(collisions):
if last_time is None:
msg = "There is already a collision"
safe_time = 0.
else:
msg = "Collision expected in %.1fs - %.1fs" % (last_time, current_time)
safe_time = last_time
safe = False
break
old_points = new_points[:]
last_time = current_time
return msg, safe_time, safe
def set_limits(limits, pvs):
for limit, pv in zip(limits, pvs):
set_pv(pv + '.DLLM', limit[0])
set_pv(pv + '.DHLM', limit[1])
class OperatingMode(object):
def __init__(self):
self.close = threading.Event()
self.set_limits = threading.Event()
self.auto_stop = threading.Event()
self.calc_limits = threading.Event()
def get_operation_mode(self):
return self.auto_stop.is_set(), self.set_limits.is_set(), self.close.is_set()
def set_operation_mode(self, auto_stop, set_limits, close):
if auto_stop:
self.auto_stop.set()
else:
self.auto_stop.clear()
if set_limits:
self.set_limits.set()
else:
self.set_limits.clear()
if close:
self.close.set()
else:
self.close.clear()
def main():
colors = config.colors
moves = config.moves
ignore = config.ignore
pvs = config.pvs
config_limits = config.hardlimits
old_limits = config_limits[:]
space = ode.Space()
render_space = ode.Space()
collision_space = ode.Space()
geometries = []
render_geometries = []
collision_geometries = []
for i, geometry in enumerate(config.geometries):
geometries.append(GeometryBox(space, oversize=config.oversize, **geometry))
render_geometries.append(GeometryBox(render_space, **geometry))
collision_geometries.append(GeometryBox(collision_space, oversize=config.oversize, **geometry))
monitors = []
is_moving = []
for pv in pvs:
m = Monitor(pv + ".DRBV")
m.start()
monitors.append(m)
any_moving = Monitor(pv + ".DMOV")
any_moving.start()
is_moving.append(any_moving)
op_mode = OperatingMode()
op_mode.set_limits.set()
op_mode.auto_stop.set()
logger = IsisLogger()
parameters = render.RenderParams()
if 'blind' not in sys.argv:
renderer = render.Renderer(parameters, render_geometries, colors, monitors, pvs, moves, op_mode)
renderer.daemon = True
# Need to know if this is the first execution of the main loop
op_mode.calc_limits.set()
# Initialise the pv server
# Loop over the pvdb and update the counts based on the number of aves/bodies
for pv in pv_server.pvdb:
for key, val in pv_server.pvdb[pv].items():
if key == 'count':
if val is pv_server.axis_count:
pv_server.pvdb[pv]['count'] = len(config.pvs)
if val is pv_server.body_count:
pv_server.pvdb[pv]['count'] = len(config.geometries)
driver = pv_server.start_thread(config.control_pv, op_mode)
driver.setParam('OVERSIZE', config.oversize)
driver.setParam('COARSE', config.coarse)
driver.setParam('FINE', config.fine)
driver.setParam('NAMES', [g['name'] for g in config.geometries])
# Only report for new collisions
collision_detector = CollisionDetector(driver, collision_geometries, config.moves, monitors, config.ignore,
is_moving, logger, op_mode, config.pvs)
collision_detector.start()
# Main loop
while True:
# Freeze the positions of our current monitors by creating some dummies
# This stops the threads from trying to reading each monitor sequentially, and holding each other up
frozen = [m.value() for m in monitors]
# Execute the move
move_all(geometries, moves, values=frozen)
# Check if the oversize has been changed, ahead of any collision calcs
if driver.new_data.isSet():
for geometry, collision_geometry in zip(geometries, collision_geometries):
geometry.set_size(oversize=driver.getParam('OVERSIZE'))
collision_geometry.set_size(oversize=driver.getParam('OVERSIZE'))
driver.new_data.clear()
op_mode.calc_limits.set()
if driver.getParam("CALC") != 0:
op_mode.calc_limits.set()
collisions = collision_detector.collisions[:]
collision_message = collision_detector.message[:]
# Check if there have been any changes to the .MOVN monitors
fresh = any([m.fresh() for m in is_moving])
# Check if any of the motors monitors are moving
moving = [not m.value() for m in is_moving] # Invert because DMOV is inverted from MOVN
any_moving = any(moving)
new_limits = []
if fresh or any_moving or op_mode.calc_limits.isSet():
# Look ahead some time to see if any collisions are going to happen in the future
msg, safe_time, safe = look_ahead(frozen, config.pvs, moving, geometries, moves, ignore,
max_movement=driver.getParam('COARSE'))
if not safe and not any(collisions):
logger.write_to_log(msg, "MAJOR", "COLLIDE")
driver.setParam('MSG', msg)
else:
driver.setParam('MSG', collision_message)
logging.info(msg)
# Start timing for diagnostics
time_passed = time()
# Seek the correct limit values
dynamic_limits = auto_seek_limits(geometries, ignore, moves, frozen, config_limits,
coarse=driver.getParam('COARSE'), fine=driver.getParam('FINE'))
# Calculate and log the time taken to calculate
time_passed = (time() - time_passed) * 1000
# Log the new limits
logging.info("New limits calculated in %dms, are %s" % (time_passed, dynamic_limits))
# Set the limits according to the set_limits operating mode
if op_mode.set_limits.is_set():
# Apply the calculated limits
new_limits = dynamic_limits[:]
else:
# Restore the configuration limits
new_limits = config_limits[:]
# Update the render thread parameters
parameters.update_params(dynamic_limits, collisions, time_passed)
# # Update the PVs
driver.setParam('TIME', time_passed)
driver.setParam('HI_LIM', [l[1] for l in dynamic_limits])
driver.setParam('LO_LIM', [l[0] for l in dynamic_limits])
driver.setParam('TRAVEL', [min([l[0] - m, l[1] - m], key=abs)
for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_F', [l[1] - m for l, m in zip(dynamic_limits, frozen)])
driver.setParam('TRAV_R', [l[0] - m for l, m in zip(dynamic_limits, frozen)])
driver.updatePVs()
if 'blind' not in sys.argv:
# On the first run, start the renderer
if renderer.is_alive() is False:
renderer.start()
op_mode.calc_limits.clear()
driver.setParam("CALC", False)
else:
# Restore the configuration limits
if op_mode.set_limits.is_set() is False:
new_limits = config_limits[:]
# Stop us overloading the limits
if not new_limits == old_limits:
threading.Thread(target=set_limits, args=(new_limits, pvs)).start()
old_limits = new_limits[:]
# Exit the program
if op_mode.close.is_set():
# Restore the configuration limits
set_limits(config_limits, pvs)
return
# Give the CPU a break
sleep(0.01)
if 'return' in sys.argv:
return
# Execute main
main()
| true
| true
|
f704aebb270ae5ced9b2f8e4f29e963b3e7dd7bd
| 3,084
|
py
|
Python
|
rbflayer.py
|
edwardstm/rbf_keras
|
4029d1c15003438f7caadb9efefe0c026ba18933
|
[
"MIT"
] | 1
|
2020-04-20T12:34:06.000Z
|
2020-04-20T12:34:06.000Z
|
rbflayer.py
|
edwardstm/rbf_keras
|
4029d1c15003438f7caadb9efefe0c026ba18933
|
[
"MIT"
] | null | null | null |
rbflayer.py
|
edwardstm/rbf_keras
|
4029d1c15003438f7caadb9efefe0c026ba18933
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras.initializers import RandomUniform, Initializer, Constant
import numpy as np
class InitCentersRandom(Initializer):
""" Initializer for initialization of centers of RBF network
as random samples from the given data set.
# Arguments
X: matrix, dataset to choose the centers from (random rows
are taken as centers)
"""
def __init__(self, X):
self.X = X
def __call__(self, shape, dtype=None):
assert shape[1] == self.X.shape[1]
idx = tf.constant( np.random.randint(self.X.shape[0], size=shape[0]) )
return self.X[idx, :]
class RBFLayer(Layer):
""" Layer of Gaussian RBF units.
# Example
```python
model = Sequential()
model.add(RBFLayer(10,
initializer=InitCentersRandom(X),
betas=1.0,
input_shape=(1,)))
model.add(Dense(1))
```
# Arguments
output_dim: number of hidden units (i.e. number of outputs of the
layer)
initializer: instance of initiliazer to initialize centers
betas: float, initial value for betas
"""
def __init__(self, output_dim, initializer=None, betas=1.0, **kwargs):
self.output_dim = output_dim
self.init_betas = betas
if not initializer:
self.initializer = RandomUniform(0.0, 1.0)
else:
self.initializer = initializer
super(RBFLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.centers = self.add_weight(name='centers',
shape=(self.output_dim, input_shape[1]),
initializer=self.initializer,
trainable=True)
self.betas = self.add_weight(name='betas',
shape=(self.output_dim,),
initializer=Constant(
value=self.init_betas),
# initializer='ones',
trainable=True)
super(RBFLayer, self).build(input_shape)
def call(self, x):
C = K.expand_dims(self.centers)
H = K.transpose(C-K.transpose(x))
return K.exp(-self.betas * K.sum(H**2, axis=1))
# C = self.centers[np.newaxis, :, :]
# X = x[:, np.newaxis, :]
# diffnorm = K.sum((C-X)**2, axis=-1)
# ret = K.exp( - self.betas * diffnorm)
# return ret
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def get_config(self):
# have to define get_config to be able to use model_from_json
config = {
'output_dim': self.output_dim
}
base_config = super(RBFLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 32.463158
| 79
| 0.552205
|
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras.initializers import RandomUniform, Initializer, Constant
import numpy as np
class InitCentersRandom(Initializer):
def __init__(self, X):
self.X = X
def __call__(self, shape, dtype=None):
assert shape[1] == self.X.shape[1]
idx = tf.constant( np.random.randint(self.X.shape[0], size=shape[0]) )
return self.X[idx, :]
class RBFLayer(Layer):
def __init__(self, output_dim, initializer=None, betas=1.0, **kwargs):
self.output_dim = output_dim
self.init_betas = betas
if not initializer:
self.initializer = RandomUniform(0.0, 1.0)
else:
self.initializer = initializer
super(RBFLayer, self).__init__(**kwargs)
def build(self, input_shape):
self.centers = self.add_weight(name='centers',
shape=(self.output_dim, input_shape[1]),
initializer=self.initializer,
trainable=True)
self.betas = self.add_weight(name='betas',
shape=(self.output_dim,),
initializer=Constant(
value=self.init_betas),
trainable=True)
super(RBFLayer, self).build(input_shape)
def call(self, x):
C = K.expand_dims(self.centers)
H = K.transpose(C-K.transpose(x))
return K.exp(-self.betas * K.sum(H**2, axis=1))
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def get_config(self):
config = {
'output_dim': self.output_dim
}
base_config = super(RBFLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| true
| true
|
f704af9532b46b3dbbc7284a567541674794e691
| 9,805
|
py
|
Python
|
tools/data/build_rawframes.py
|
vt-vl-lab/video-data-aug
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
[
"Apache-2.0"
] | 20
|
2021-03-31T02:25:20.000Z
|
2022-03-11T11:45:59.000Z
|
tools/data/build_rawframes.py
|
vt-vl-lab/video-data-aug
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
[
"Apache-2.0"
] | 6
|
2021-05-27T18:08:39.000Z
|
2022-03-23T14:00:51.000Z
|
tools/data/build_rawframes.py
|
vt-vl-lab/video-data-aug
|
01667cdbd1b952f2510af3422beeeb76e0d9e15a
|
[
"Apache-2.0"
] | 4
|
2021-03-31T03:11:45.000Z
|
2021-08-22T11:11:45.000Z
|
import argparse
import glob
import os
import os.path as osp
import sys
import warnings
from multiprocessing import Pool
import mmcv
import numpy as np
# custom import
import pandas as pd
import pdb
def extract_frame(vid_item):
"""Generate optical flow using dense flow.
Args:
vid_item (list): Video item containing video full path,
video (short) path, video id.
Returns:
bool: Whether generate optical flow successfully.
"""
full_path, vid_path, vid_id, method, task = vid_item
if '/' in vid_path:
act_name = osp.basename(osp.dirname(vid_path))
out_full_path = osp.join(args.out_dir, act_name)
else:
out_full_path = args.out_dir
if task == 'rgb':
if args.use_opencv:
# Not like using denseflow,
# Use OpenCV will not make a sub directory with the video name
video_name = osp.splitext(osp.basename(vid_path))[0]
out_full_path = osp.join(out_full_path, video_name)
vr = mmcv.VideoReader(full_path)
for i in range(len(vr)):
if vr[i] is not None:
w, h, c = np.shape(vr[i])
if args.new_short == 0:
out_img = mmcv.imresize(vr[i], (args.new_width,
args.new_height))
else:
if min(h, w) == h:
new_h = args.new_short
new_w = int((new_h / h) * w)
else:
new_w = args.new_short
new_h = int((new_w / w) * h)
out_img = mmcv.imresize(vr[i], (new_h, new_w))
mmcv.imwrite(out_img,
f'{out_full_path}/img_{i + 1:05d}.jpg')
else:
warnings.warn(
'Length inconsistent!'
f'Early stop with {i + 1} out of {len(vr)} frames.')
break
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
os.system(cmd)
elif task == 'flow':
if args.input_frames:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v --if')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v --if')
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} --nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
os.system(cmd)
else:
if args.new_short == 0:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'" # noqa: E501
f' -ns={args.new_short} -v')
os.system(cmd_rgb)
os.system(cmd_flow)
print(f'{task} {vid_id} {vid_path} {method} done')
sys.stdout.flush()
return True
def parse_args():
parser = argparse.ArgumentParser(description='extract optical flows')
parser.add_argument('src_dir', type=str, help='source video directory')
parser.add_argument('out_dir', type=str, help='output rawframe directory')
parser.add_argument(
'--task',
type=str,
default='flow',
choices=['rgb', 'flow', 'both'],
help='which type of frames to be extracted')
parser.add_argument(
'--level',
type=int,
choices=[1, 2],
default=2,
help='directory level of data')
parser.add_argument(
'--num-worker',
type=int,
default=8,
help='number of workers to build rawframes')
parser.add_argument(
'--flow-type',
type=str,
default=None,
choices=[None, 'tvl1', 'warp_tvl1', 'farn', 'brox'],
help='flow type to be generated')
parser.add_argument(
'--out-format',
type=str,
default='jpg',
choices=['jpg', 'h5', 'png'],
help='output format')
parser.add_argument(
'--ext',
type=str,
default='avi',
choices=['avi', 'mp4', 'webm'],
help='video file extensions')
parser.add_argument(
'--new-width', type=int, default=0, help='resize image width')
parser.add_argument(
'--new-height', type=int, default=0, help='resize image height')
parser.add_argument(
'--new-short',
type=int,
default=0,
help='resize image short side length keeping ratio')
parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU')
parser.add_argument(
'--resume',
action='store_true',
default=False,
help='resume optical flow extraction instead of overwriting')
parser.add_argument(
'--use-opencv',
action='store_true',
help='Whether to use opencv to extract rgb frames')
parser.add_argument(
'--input-frames',
action='store_true',
help='Whether to extract flow frames based on rgb frames')
parser.add_argument(
'--ref_listfile_path', type=str, default='', help='reference listfile path for the subset')
args = parser.parse_args()
return args
def get_subset_classes(ref_listfile_path):
df = pd.read_csv(ref_listfile_path, header=None, sep='*')
cur_data = df.values
subset_classes = []
for i,row in enumerate(cur_data):
cur_cls = row[0].split('/')[1]
cur_cls = cur_cls.replace(' ', '_').replace('(', '-').replace(')', '-')
if cur_cls not in subset_classes:
subset_classes.append(cur_cls)
return subset_classes
def filter_vid_list(vid_list, src_dir, ref_listfile_path):
subset_classes = get_subset_classes(ref_listfile_path)
filtered_vid_list = []
filtered_full_path_list = []
for vid,fpath in zip(vid_list,fullpath_list):
cur_cls = vid.split('/')[0]
if cur_cls in subset_classes:
filtered_vid_list.append(vid)
filtered_full_path_list.append(os.path.join(src_dir, vid))
return filtered_vid_list, filtered_full_path_list
if __name__ == '__main__':
args = parse_args()
if not osp.isdir(args.out_dir):
print(f'Creating folder: {args.out_dir}')
os.makedirs(args.out_dir)
if args.level == 2:
if args.ref_listfile_path != '':
classes = get_subset_classes(args.ref_listfile_path)
else:
classes = os.listdir(args.src_dir)
for classname in classes:
new_dir = osp.join(args.out_dir, classname)
if not osp.isdir(new_dir):
print(f'Creating folder: {new_dir}')
os.makedirs(new_dir)
if args.input_frames:
print('Reading rgb frames from folder: ', args.src_dir)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of rgb frame folders found: ', len(fullpath_list))
else:
print('Reading videos from folder: ', args.src_dir)
print('Extension of videos: ', args.ext)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +
args.ext)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of videos found: ', len(fullpath_list))
if args.resume:
fullpath_list = set(fullpath_list).difference(set(done_fullpath_list))
fullpath_list = list(fullpath_list)
print('Resuming. number of videos to be done: ', len(fullpath_list))
if args.level == 2:
vid_list = list(
map(
lambda p: osp.join(
osp.basename(osp.dirname(p)), osp.basename(p)),
fullpath_list))
elif args.level == 1:
vid_list, fullpath_list = list(map(lambda p: osp.basename(p), fullpath_list))
if args.ref_listfile_path != '':
vid_list, fullpath_list = filter_vid_list(vid_list, args.src_dir, args.ref_listfile_path)
pool = Pool(args.num_worker)
pool.map(
extract_frame,
zip(fullpath_list, vid_list, range(len(vid_list)),
len(vid_list) * [args.flow_type],
len(vid_list) * [args.task]))
| 37.140152
| 107
| 0.544416
|
import argparse
import glob
import os
import os.path as osp
import sys
import warnings
from multiprocessing import Pool
import mmcv
import numpy as np
import pandas as pd
import pdb
def extract_frame(vid_item):
full_path, vid_path, vid_id, method, task = vid_item
if '/' in vid_path:
act_name = osp.basename(osp.dirname(vid_path))
out_full_path = osp.join(args.out_dir, act_name)
else:
out_full_path = args.out_dir
if task == 'rgb':
if args.use_opencv:
video_name = osp.splitext(osp.basename(vid_path))[0]
out_full_path = osp.join(out_full_path, video_name)
vr = mmcv.VideoReader(full_path)
for i in range(len(vr)):
if vr[i] is not None:
w, h, c = np.shape(vr[i])
if args.new_short == 0:
out_img = mmcv.imresize(vr[i], (args.new_width,
args.new_height))
else:
if min(h, w) == h:
new_h = args.new_short
new_w = int((new_h / h) * w)
else:
new_w = args.new_short
new_h = int((new_w / w) * h)
out_img = mmcv.imresize(vr[i], (new_h, new_w))
mmcv.imwrite(out_img,
f'{out_full_path}/img_{i + 1:05d}.jpg')
else:
warnings.warn(
'Length inconsistent!'
f'Early stop with {i + 1} out of {len(vr)} frames.')
break
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
os.system(cmd)
elif task == 'flow':
if args.input_frames:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'"
f' -nw={args.new_width} --nh={args.new_height} -v --if')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'"
f' -ns={args.new_short} -v --if')
else:
if args.new_short == 0:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'"
f' -nw={args.new_width} --nh={args.new_height} -v')
else:
cmd = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
os.system(cmd)
else:
if args.new_short == 0:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'"
f' -nw={args.new_width} -nh={args.new_height} -v')
else:
cmd_rgb = osp.join(
f"denseflow '{full_path}' -b=20 -s=0 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
cmd_flow = osp.join(
f"denseflow '{full_path}' -a={method} -b=20 -s=1 -o='{out_full_path}'"
f' -ns={args.new_short} -v')
os.system(cmd_rgb)
os.system(cmd_flow)
print(f'{task} {vid_id} {vid_path} {method} done')
sys.stdout.flush()
return True
def parse_args():
parser = argparse.ArgumentParser(description='extract optical flows')
parser.add_argument('src_dir', type=str, help='source video directory')
parser.add_argument('out_dir', type=str, help='output rawframe directory')
parser.add_argument(
'--task',
type=str,
default='flow',
choices=['rgb', 'flow', 'both'],
help='which type of frames to be extracted')
parser.add_argument(
'--level',
type=int,
choices=[1, 2],
default=2,
help='directory level of data')
parser.add_argument(
'--num-worker',
type=int,
default=8,
help='number of workers to build rawframes')
parser.add_argument(
'--flow-type',
type=str,
default=None,
choices=[None, 'tvl1', 'warp_tvl1', 'farn', 'brox'],
help='flow type to be generated')
parser.add_argument(
'--out-format',
type=str,
default='jpg',
choices=['jpg', 'h5', 'png'],
help='output format')
parser.add_argument(
'--ext',
type=str,
default='avi',
choices=['avi', 'mp4', 'webm'],
help='video file extensions')
parser.add_argument(
'--new-width', type=int, default=0, help='resize image width')
parser.add_argument(
'--new-height', type=int, default=0, help='resize image height')
parser.add_argument(
'--new-short',
type=int,
default=0,
help='resize image short side length keeping ratio')
parser.add_argument('--num-gpu', type=int, default=8, help='number of GPU')
parser.add_argument(
'--resume',
action='store_true',
default=False,
help='resume optical flow extraction instead of overwriting')
parser.add_argument(
'--use-opencv',
action='store_true',
help='Whether to use opencv to extract rgb frames')
parser.add_argument(
'--input-frames',
action='store_true',
help='Whether to extract flow frames based on rgb frames')
parser.add_argument(
'--ref_listfile_path', type=str, default='', help='reference listfile path for the subset')
args = parser.parse_args()
return args
def get_subset_classes(ref_listfile_path):
df = pd.read_csv(ref_listfile_path, header=None, sep='*')
cur_data = df.values
subset_classes = []
for i,row in enumerate(cur_data):
cur_cls = row[0].split('/')[1]
cur_cls = cur_cls.replace(' ', '_').replace('(', '-').replace(')', '-')
if cur_cls not in subset_classes:
subset_classes.append(cur_cls)
return subset_classes
def filter_vid_list(vid_list, src_dir, ref_listfile_path):
subset_classes = get_subset_classes(ref_listfile_path)
filtered_vid_list = []
filtered_full_path_list = []
for vid,fpath in zip(vid_list,fullpath_list):
cur_cls = vid.split('/')[0]
if cur_cls in subset_classes:
filtered_vid_list.append(vid)
filtered_full_path_list.append(os.path.join(src_dir, vid))
return filtered_vid_list, filtered_full_path_list
if __name__ == '__main__':
args = parse_args()
if not osp.isdir(args.out_dir):
print(f'Creating folder: {args.out_dir}')
os.makedirs(args.out_dir)
if args.level == 2:
if args.ref_listfile_path != '':
classes = get_subset_classes(args.ref_listfile_path)
else:
classes = os.listdir(args.src_dir)
for classname in classes:
new_dir = osp.join(args.out_dir, classname)
if not osp.isdir(new_dir):
print(f'Creating folder: {new_dir}')
os.makedirs(new_dir)
if args.input_frames:
print('Reading rgb frames from folder: ', args.src_dir)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of rgb frame folders found: ', len(fullpath_list))
else:
print('Reading videos from folder: ', args.src_dir)
print('Extension of videos: ', args.ext)
fullpath_list = glob.glob(args.src_dir + '/*' * args.level + '.' +
args.ext)
done_fullpath_list = glob.glob(args.out_dir + '/*' * args.level)
print('Total number of videos found: ', len(fullpath_list))
if args.resume:
fullpath_list = set(fullpath_list).difference(set(done_fullpath_list))
fullpath_list = list(fullpath_list)
print('Resuming. number of videos to be done: ', len(fullpath_list))
if args.level == 2:
vid_list = list(
map(
lambda p: osp.join(
osp.basename(osp.dirname(p)), osp.basename(p)),
fullpath_list))
elif args.level == 1:
vid_list, fullpath_list = list(map(lambda p: osp.basename(p), fullpath_list))
if args.ref_listfile_path != '':
vid_list, fullpath_list = filter_vid_list(vid_list, args.src_dir, args.ref_listfile_path)
pool = Pool(args.num_worker)
pool.map(
extract_frame,
zip(fullpath_list, vid_list, range(len(vid_list)),
len(vid_list) * [args.flow_type],
len(vid_list) * [args.task]))
| true
| true
|
f704b08c47257333fbfca18d0f73801f0031cb66
| 1,991
|
py
|
Python
|
cliff/tests/test_formatters_value.py
|
serrollc/cliff
|
1dd3edafab1a34d194c4510310653ad7e2cbb582
|
[
"Apache-2.0"
] | null | null | null |
cliff/tests/test_formatters_value.py
|
serrollc/cliff
|
1dd3edafab1a34d194c4510310653ad7e2cbb582
|
[
"Apache-2.0"
] | null | null | null |
cliff/tests/test_formatters_value.py
|
serrollc/cliff
|
1dd3edafab1a34d194c4510310653ad7e2cbb582
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from cliff.formatters import value
from cliff.tests import test_columns
def test_value_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', '"no escape me"')
expected = 'A\nB\nC\n"no escape me"\n'
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', test_columns.FauxColumn(['the', 'value']))
expected = "A\nB\nC\n['the', 'value']\n"
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = 'A B C\nD E F\n'
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', test_columns.FauxColumn(['the', 'value']))
data = [d1]
expected = "A B ['the', 'value']\n"
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
| 30.166667
| 76
| 0.631341
|
import six
from cliff.formatters import value
from cliff.tests import test_columns
def test_value_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', '"no escape me"')
expected = 'A\nB\nC\n"no escape me"\n'
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c', 'd')
d = ('A', 'B', 'C', test_columns.FauxColumn(['the', 'value']))
expected = "A\nB\nC\n['the', 'value']\n"
output = six.StringIO()
sf.emit_one(c, d, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', 'C')
d2 = ('D', 'E', 'F')
data = [d1, d2]
expected = 'A B C\nD E F\n'
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
def test_value_list_formatter_formattable_column():
sf = value.ValueFormatter()
c = ('a', 'b', 'c')
d1 = ('A', 'B', test_columns.FauxColumn(['the', 'value']))
data = [d1]
expected = "A B ['the', 'value']\n"
output = six.StringIO()
sf.emit_list(c, data, output, None)
actual = output.getvalue()
assert expected == actual
| true
| true
|
f704b1b6e255bd26c9802820bac5475dcc418066
| 6,716
|
py
|
Python
|
scraper/src/meilisearch_helper.py
|
myface-wang/docs-scraper
|
86fa2130000b6d00b6a9520d176d02ea470aec2d
|
[
"MIT"
] | null | null | null |
scraper/src/meilisearch_helper.py
|
myface-wang/docs-scraper
|
86fa2130000b6d00b6a9520d176d02ea470aec2d
|
[
"MIT"
] | null | null | null |
scraper/src/meilisearch_helper.py
|
myface-wang/docs-scraper
|
86fa2130000b6d00b6a9520d176d02ea470aec2d
|
[
"MIT"
] | null | null | null |
"""MeiliSearchHelper
Wrapper on top of the MeiliSearch API client"""
import meilisearch
from builtins import range
def remove_bad_encoding(value):
return value.replace(''', "'")
def clean_one_field(value):
if isinstance(value, bool):
return str(value)
elif isinstance(value, str):
return remove_bad_encoding(value)
return value
def clean_dict(record):
for key, value in record.items():
if isinstance(value, dict):
record[key] = clean_dict(value)
else:
record[key] = clean_one_field(value)
return record
def parse_record(record):
new_weight = {}
for k, v in record['weight'].items():
new_weight[k] = v
new_hierarchy = {}
for k, v in record['hierarchy'].items():
new_hierarchy['hierarchy_' + k] = v
new_hierarchy_radio = {}
for k, v in record['hierarchy_radio'].items():
key = 'hierarchy_radio_' + k
new_hierarchy_radio = {**{key: v}, **new_hierarchy_radio}
del record['weight']
del record['hierarchy']
del record['hierarchy_radio']
del record['hierarchy_camel']
del record['hierarchy_radio_camel']
del record['content_camel']
return {**record, **new_weight, **new_hierarchy, **new_hierarchy_radio}
class MeiliSearchHelper:
"""MeiliSearchHelper"""
# Cf the end of this file to understand these settings
SETTINGS = {
'rankingRules': [
'words',
'typo',
'attribute',
'proximity',
'exactness',
'desc(page_rank)',
'desc(level)',
'asc(position)'
],
'distinctAttribute': 'url',
'searchableAttributes': [
'hierarchy_radio_lvl0',
'hierarchy_radio_lvl1',
'hierarchy_radio_lvl2',
'hierarchy_radio_lvl3',
'hierarchy_radio_lvl4',
'hierarchy_radio_lvl5',
'hierarchy_lvl0',
'hierarchy_lvl1',
'hierarchy_lvl2',
'hierarchy_lvl3',
'hierarchy_lvl4',
'hierarchy_lvl5',
'hierarchy_lvl6',
'content',
'objectID',
'page_rank',
'level',
'position'
],
'displayedAttributes': [
'hierarchy_radio_lvl0',
'hierarchy_radio_lvl1',
'hierarchy_radio_lvl2',
'hierarchy_radio_lvl3',
'hierarchy_radio_lvl4',
'hierarchy_radio_lvl5',
'hierarchy_lvl0',
'hierarchy_lvl1',
'hierarchy_lvl2',
'hierarchy_lvl3',
'hierarchy_lvl4',
'hierarchy_lvl5',
'hierarchy_lvl6',
'anchor',
'url',
'content',
'objectID'
]
}
def __init__(self, host_url, api_key, index_uid, custom_settings):
self.meilisearch_client = meilisearch.Client(host_url, api_key)
self.meilisearch_index = self.__delete_and_create_index(index_uid)
self.add_settings(MeiliSearchHelper.SETTINGS, custom_settings)
def add_settings(self, default_settings, custom_settings):
settings = {**default_settings, **custom_settings}
self.meilisearch_index.update_settings(settings)
def add_records(self, records, url, from_sitemap):
"""Add new records to the index"""
record_count = len(records)
for i in range(0, record_count, 50):
parsed_records = list(map(parse_record, records[i:i + 50]))
cleaned_records = list(map(clean_dict, parsed_records))
self.meilisearch_index.add_documents(cleaned_records)
color = "96" if from_sitemap else "94"
print(
'\033[{}m> Docs-Scraper: \033[0m{}\033[93m {} records\033[0m)'.format(
color, url, record_count))
def __delete_and_create_index(self, index_uid):
try:
self.meilisearch_client.get_index(index_uid).delete()
except Exception:
print("The index " + index_uid + " does not exist. Creating...")
return self.meilisearch_client.create_index(index_uid, {'primaryKey': 'objectID'})
# Algolia's settings:
# {"minWordSizefor1Typo"=>3,
# "minWordSizefor2Typos"=>7,
# "hitsPerPage"=>20,
# "maxValuesPerFacet"=>100,
# "minProximity"=>1,
# "version"=>2,
# "attributesToIndex"=>
# ["unordered(hierarchy_radio_camel.lvl0)",
# "unordered(hierarchy_radio.lvl0)",
# "unordered(hierarchy_radio_camel.lvl1)",
# "unordered(hierarchy_radio.lvl1)",
# "unordered(hierarchy_radio_camel.lvl2)",
# "unordered(hierarchy_radio.lvl2)",
# "unordered(hierarchy_radio_camel.lvl3)",
# "unordered(hierarchy_radio.lvl3)",
# "unordered(hierarchy_radio_camel.lvl4)",
# "unordered(hierarchy_radio.lvl4)",
# "unordered(hierarchy_radio_camel.lvl5)",
# "unordered(hierarchy_radio.lvl5)",
# "unordered(hierarchy_camel.lvl0)",
# "unordered(hierarchy.lvl0)",
# "unordered(hierarchy_camel.lvl1)",
# "unordered(hierarchy.lvl1)",
# "unordered(hierarchy_camel.lvl2)",
# "unordered(hierarchy.lvl2)",
# "unordered(hierarchy_camel.lvl3)",
# "unordered(hierarchy.lvl3)",
# "unordered(hierarchy_camel.lvl4)",
# "unordered(hierarchy.lvl4)",
# "unordered(hierarchy_camel.lvl5)",
# "unordered(hierarchy.lvl5)",
# "content"],
# "numericAttributesToIndex"=>nil,
# "attributesToRetrieve"=>["hierarchy", "content", "anchor", "url"],
# "allowTyposOnNumericTokens"=>false,
# "ignorePlurals"=>true,
# "camelCaseAttributes"=>["hierarchy", "hierarchy_radio", "content"],
# "advancedSyntax"=>true,
# "attributeCriteriaComputedByMinProximity"=>true,
# "distinct"=>true,
# "unretrievableAttributes"=>nil,
# "optionalWords"=>nil,
# "userData"=>{"crawling_issue"=>false},
# "attributesForFaceting"=>["lang"],
# "attributesToSnippet"=>["content:10"],
# "attributesToHighlight"=>["hierarchy", "hierarchy_camel", "content"],
# "paginationLimitedTo"=>1000,
# "attributeForDistinct"=>"url",
# "exactOnSingleWordQuery"=>"attribute",
# "ranking"=>
# ["words", "filters", "typo", "attribute", "proximity", "exact", "custom"],
# "customRanking"=>
# ["desc(weight.page_rank)", "desc(weight.level)", "asc(weight.position)"],
# "separatorsToIndex"=>"",
# "removeWordsIfNoResults"=>"allOptional",
# "queryType"=>"prefixLast",
# "highlightPreTag"=>"<span class=\"algolia-docsearch-suggestion--highlight\">",
# "highlightPostTag"=>"</span>",
# "snippetEllipsisText"=>"",
# "alternativesAsExact"=>["ignorePlurals", "singleWordSynonym"]}
| 34.618557
| 90
| 0.611078
|
import meilisearch
from builtins import range
def remove_bad_encoding(value):
return value.replace(''', "'")
def clean_one_field(value):
if isinstance(value, bool):
return str(value)
elif isinstance(value, str):
return remove_bad_encoding(value)
return value
def clean_dict(record):
for key, value in record.items():
if isinstance(value, dict):
record[key] = clean_dict(value)
else:
record[key] = clean_one_field(value)
return record
def parse_record(record):
new_weight = {}
for k, v in record['weight'].items():
new_weight[k] = v
new_hierarchy = {}
for k, v in record['hierarchy'].items():
new_hierarchy['hierarchy_' + k] = v
new_hierarchy_radio = {}
for k, v in record['hierarchy_radio'].items():
key = 'hierarchy_radio_' + k
new_hierarchy_radio = {**{key: v}, **new_hierarchy_radio}
del record['weight']
del record['hierarchy']
del record['hierarchy_radio']
del record['hierarchy_camel']
del record['hierarchy_radio_camel']
del record['content_camel']
return {**record, **new_weight, **new_hierarchy, **new_hierarchy_radio}
class MeiliSearchHelper:
# Cf the end of this file to understand these settings
SETTINGS = {
'rankingRules': [
'words',
'typo',
'attribute',
'proximity',
'exactness',
'desc(page_rank)',
'desc(level)',
'asc(position)'
],
'distinctAttribute': 'url',
'searchableAttributes': [
'hierarchy_radio_lvl0',
'hierarchy_radio_lvl1',
'hierarchy_radio_lvl2',
'hierarchy_radio_lvl3',
'hierarchy_radio_lvl4',
'hierarchy_radio_lvl5',
'hierarchy_lvl0',
'hierarchy_lvl1',
'hierarchy_lvl2',
'hierarchy_lvl3',
'hierarchy_lvl4',
'hierarchy_lvl5',
'hierarchy_lvl6',
'content',
'objectID',
'page_rank',
'level',
'position'
],
'displayedAttributes': [
'hierarchy_radio_lvl0',
'hierarchy_radio_lvl1',
'hierarchy_radio_lvl2',
'hierarchy_radio_lvl3',
'hierarchy_radio_lvl4',
'hierarchy_radio_lvl5',
'hierarchy_lvl0',
'hierarchy_lvl1',
'hierarchy_lvl2',
'hierarchy_lvl3',
'hierarchy_lvl4',
'hierarchy_lvl5',
'hierarchy_lvl6',
'anchor',
'url',
'content',
'objectID'
]
}
def __init__(self, host_url, api_key, index_uid, custom_settings):
self.meilisearch_client = meilisearch.Client(host_url, api_key)
self.meilisearch_index = self.__delete_and_create_index(index_uid)
self.add_settings(MeiliSearchHelper.SETTINGS, custom_settings)
def add_settings(self, default_settings, custom_settings):
settings = {**default_settings, **custom_settings}
self.meilisearch_index.update_settings(settings)
def add_records(self, records, url, from_sitemap):
record_count = len(records)
for i in range(0, record_count, 50):
parsed_records = list(map(parse_record, records[i:i + 50]))
cleaned_records = list(map(clean_dict, parsed_records))
self.meilisearch_index.add_documents(cleaned_records)
color = "96" if from_sitemap else "94"
print(
'\033[{}m> Docs-Scraper: \033[0m{}\033[93m {} records\033[0m)'.format(
color, url, record_count))
def __delete_and_create_index(self, index_uid):
try:
self.meilisearch_client.get_index(index_uid).delete()
except Exception:
print("The index " + index_uid + " does not exist. Creating...")
return self.meilisearch_client.create_index(index_uid, {'primaryKey': 'objectID'})
# Algolia's settings:
| true
| true
|
f704b1f07f7a454874c406e6aa73fc6f56056467
| 1,848
|
py
|
Python
|
moray/_browser/chrome.py
|
hirorich/moray
|
69421ce739960c10c343ff1d72e1337594ea5c30
|
[
"MIT"
] | null | null | null |
moray/_browser/chrome.py
|
hirorich/moray
|
69421ce739960c10c343ff1d72e1337594ea5c30
|
[
"MIT"
] | null | null | null |
moray/_browser/chrome.py
|
hirorich/moray
|
69421ce739960c10c343ff1d72e1337594ea5c30
|
[
"MIT"
] | null | null | null |
"""
chromeをアプリモードで起動するためのコマンドを生成する
"""
import sys, os
from moray.exception import SupportError
name = 'chrome'
def create_command(path, url, cmdline_args):
"""
起動コマンド生成
Attributes:
path (str): chromeコマンドのパス
url (str): 接続先のURL
cmdline_args (list<str>): コマンドライン引数
Returns:
list<str>: 生成された起動コマンド
"""
return [path, '--app=' + url] + cmdline_args
def find_path():
"""
chromeの実行ファイルパスを取得
Returns:
str: chromeの実行ファイルパス
Raises:
SupportError: 対象外OSの場合
"""
if sys.platform in ('win32', 'win64'):
# Windowsの場合
return _find_chrome_windows()
else:
# 対象外OSの場合
# このOSはサポート対象外のOSです。
error_msg = 'This OS is not a supported OS.'
raise SupportError(error_msg)
def _find_chrome_windows():
"""
Windowsのchromeの実行ファイルパスを取得
Returns:
str: Windowsのchromeの実行ファイルパス
Raises:
FileNotFoundError: ブラウザ実行ファイル不明
"""
import winreg
reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
# HKEY_CURRENT_USER: 現在のユーザーのレジストリ設定
# HKEY_LOCAL_MACHINE: すべてのユーザーのレジストリ設定
for reg_entry in winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE:
try:
# レジストリからchromeの実行ファイルパスを取得
with winreg.OpenKey(reg_entry, reg_path, 0, winreg.KEY_READ) as reg_key:
chrome_path = winreg.QueryValueEx(reg_key, None)[0]
if os.path.isfile(chrome_path):
return chrome_path
except Exception as e:
pass
# chrome.exe が見つかりませんでした
error_msg = '"chrome.exe" is not found.'
raise FileNotFoundError(error_msg)
| 23.692308
| 85
| 0.581169
|
import sys, os
from moray.exception import SupportError
name = 'chrome'
def create_command(path, url, cmdline_args):
return [path, '--app=' + url] + cmdline_args
def find_path():
if sys.platform in ('win32', 'win64'):
return _find_chrome_windows()
else:
error_msg = 'This OS is not a supported OS.'
raise SupportError(error_msg)
def _find_chrome_windows():
import winreg
reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
for reg_entry in winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE:
try:
with winreg.OpenKey(reg_entry, reg_path, 0, winreg.KEY_READ) as reg_key:
chrome_path = winreg.QueryValueEx(reg_key, None)[0]
if os.path.isfile(chrome_path):
return chrome_path
except Exception as e:
pass
error_msg = '"chrome.exe" is not found.'
raise FileNotFoundError(error_msg)
| true
| true
|
f704b28f204b556b3ce64a8a530557cbeb92babb
| 1,407
|
py
|
Python
|
itspylearning/itslearning.py
|
HubertJan/itspylearning
|
6110f87c78dfc5b78e8d52c0b05e2c376749bce3
|
[
"MIT"
] | null | null | null |
itspylearning/itslearning.py
|
HubertJan/itspylearning
|
6110f87c78dfc5b78e8d52c0b05e2c376749bce3
|
[
"MIT"
] | 1
|
2021-12-16T15:52:34.000Z
|
2022-01-03T17:17:09.000Z
|
itspylearning/itslearning.py
|
HubertJan/itspylearning
|
6110f87c78dfc5b78e8d52c0b05e2c376749bce3
|
[
"MIT"
] | 1
|
2021-11-30T16:26:08.000Z
|
2021-11-30T16:26:08.000Z
|
from typing import List, Optional
import aiohttp
import json
from aiohttp.client import ClientSession
from itspylearning.consts import ITSLEARNING_URL
from itspylearning.organisation import Organisation
_clientSession: Optional[ClientSession] = None
def _getClient() -> aiohttp.ClientSession:
global _clientSession
if(_clientSession is None):
_clientSession = aiohttp.ClientSession()
return _clientSession
async def search_organisations(query) -> List[dict]:
response = await _getClient().get(f"{ITSLEARNING_URL}/restapi/sites/all/organisations/search/v1/?searchText={query}")
rawData = await response.text()
data = json.loads(rawData)
matches = []
for match in data["EntityArray"]:
matches.append({"id": match["CustomerId"], "name": match["SiteName"],})
await close_session()
return matches
async def fetch_organisation( id) -> Organisation:
response = await _getClient().get(f"{ITSLEARNING_URL}/restapi/sites/{id}/v1")
if response.status != 200:
raise Exception('Request failure.')
rawData = await response.text()
data = json.loads(rawData)
if data == None:
raise Exception("Organisation did not exist.")
organisation = Organisation(data)
await close_session()
return organisation
async def close_session():
global _clientSession
await _clientSession.close()
_clientSession = None
| 31.977273
| 121
| 0.722814
|
from typing import List, Optional
import aiohttp
import json
from aiohttp.client import ClientSession
from itspylearning.consts import ITSLEARNING_URL
from itspylearning.organisation import Organisation
_clientSession: Optional[ClientSession] = None
def _getClient() -> aiohttp.ClientSession:
global _clientSession
if(_clientSession is None):
_clientSession = aiohttp.ClientSession()
return _clientSession
async def search_organisations(query) -> List[dict]:
response = await _getClient().get(f"{ITSLEARNING_URL}/restapi/sites/all/organisations/search/v1/?searchText={query}")
rawData = await response.text()
data = json.loads(rawData)
matches = []
for match in data["EntityArray"]:
matches.append({"id": match["CustomerId"], "name": match["SiteName"],})
await close_session()
return matches
async def fetch_organisation( id) -> Organisation:
response = await _getClient().get(f"{ITSLEARNING_URL}/restapi/sites/{id}/v1")
if response.status != 200:
raise Exception('Request failure.')
rawData = await response.text()
data = json.loads(rawData)
if data == None:
raise Exception("Organisation did not exist.")
organisation = Organisation(data)
await close_session()
return organisation
async def close_session():
global _clientSession
await _clientSession.close()
_clientSession = None
| true
| true
|
f704b2a011d6048694d79069a8471cef3118250c
| 2,356
|
py
|
Python
|
backend/app/tests/api/node_directive/test_create.py
|
hollyfoxx/ace2-gui
|
e0f72cafdd524e0cd66549a9315697aa21ae46fa
|
[
"Apache-2.0"
] | 1
|
2021-07-16T10:34:22.000Z
|
2021-07-16T10:34:22.000Z
|
backend/app/tests/api/node_directive/test_create.py
|
hollyfoxx/ace2-gui
|
e0f72cafdd524e0cd66549a9315697aa21ae46fa
|
[
"Apache-2.0"
] | null | null | null |
backend/app/tests/api/node_directive/test_create.py
|
hollyfoxx/ace2-gui
|
e0f72cafdd524e0cd66549a9315697aa21ae46fa
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import uuid
from fastapi import status
#
# INVALID TESTS
#
@pytest.mark.parametrize(
"key,value",
[
("description", 123),
("description", ""),
("uuid", None),
("uuid", 1),
("uuid", "abc"),
("uuid", ""),
("value", 123),
("value", None),
("value", ""),
],
)
def test_create_invalid_fields(client, key, value):
create_json = {"value": "test"}
create_json[key] = value
create = client.post("/api/node/directive/", json=create_json)
assert create.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.parametrize(
"key",
[
("uuid"),
("value"),
],
)
def test_create_duplicate_unique_fields(client, key):
# Create an object
create1_json = {"uuid": str(uuid.uuid4()), "value": "test"}
client.post("/api/node/directive/", json=create1_json)
# Ensure you cannot create another object with the same unique field value
create2_json = {"value": "test2"}
create2_json[key] = create1_json[key]
create2 = client.post("/api/node/directive/", json=create2_json)
assert create2.status_code == status.HTTP_409_CONFLICT
@pytest.mark.parametrize(
"key",
[
("value"),
],
)
def test_create_missing_required_fields(client, key):
create_json = {"value": "test"}
del create_json[key]
create = client.post("/api/node/directive/", json=create_json)
assert create.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
#
# VALID TESTS
#
@pytest.mark.parametrize(
"key,value",
[
("description", None),
("description", "test"),
("uuid", str(uuid.uuid4()))
],
)
def test_create_valid_optional_fields(client, key, value):
# Create the object
create = client.post("/api/node/directive/", json={key: value, "value": "test"})
assert create.status_code == status.HTTP_201_CREATED
# Read it back
get = client.get(create.headers["Content-Location"])
assert get.json()[key] == value
def test_create_valid_required_fields(client):
# Create the object
create = client.post("/api/node/directive/", json={"value": "test"})
assert create.status_code == status.HTTP_201_CREATED
# Read it back
get = client.get(create.headers["Content-Location"])
assert get.json()["value"] == "test"
| 24.541667
| 84
| 0.629032
|
import pytest
import uuid
from fastapi import status
@pytest.mark.parametrize(
"key,value",
[
("description", 123),
("description", ""),
("uuid", None),
("uuid", 1),
("uuid", "abc"),
("uuid", ""),
("value", 123),
("value", None),
("value", ""),
],
)
def test_create_invalid_fields(client, key, value):
create_json = {"value": "test"}
create_json[key] = value
create = client.post("/api/node/directive/", json=create_json)
assert create.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.parametrize(
"key",
[
("uuid"),
("value"),
],
)
def test_create_duplicate_unique_fields(client, key):
create1_json = {"uuid": str(uuid.uuid4()), "value": "test"}
client.post("/api/node/directive/", json=create1_json)
create2_json = {"value": "test2"}
create2_json[key] = create1_json[key]
create2 = client.post("/api/node/directive/", json=create2_json)
assert create2.status_code == status.HTTP_409_CONFLICT
@pytest.mark.parametrize(
"key",
[
("value"),
],
)
def test_create_missing_required_fields(client, key):
create_json = {"value": "test"}
del create_json[key]
create = client.post("/api/node/directive/", json=create_json)
assert create.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.parametrize(
"key,value",
[
("description", None),
("description", "test"),
("uuid", str(uuid.uuid4()))
],
)
def test_create_valid_optional_fields(client, key, value):
create = client.post("/api/node/directive/", json={key: value, "value": "test"})
assert create.status_code == status.HTTP_201_CREATED
get = client.get(create.headers["Content-Location"])
assert get.json()[key] == value
def test_create_valid_required_fields(client):
create = client.post("/api/node/directive/", json={"value": "test"})
assert create.status_code == status.HTTP_201_CREATED
get = client.get(create.headers["Content-Location"])
assert get.json()["value"] == "test"
| true
| true
|
f704b2ba2399a4abbde2614f88886205e3ef96db
| 1,398
|
py
|
Python
|
Transition.py
|
gerth2/FSMMaker
|
933db1fb2bd4b88a62590c07a58842983b625d43
|
[
"MIT"
] | null | null | null |
Transition.py
|
gerth2/FSMMaker
|
933db1fb2bd4b88a62590c07a58842983b625d43
|
[
"MIT"
] | null | null | null |
Transition.py
|
gerth2/FSMMaker
|
933db1fb2bd4b88a62590c07a58842983b625d43
|
[
"MIT"
] | null | null | null |
from Comparison import Comparison
from Action import Action
from TransitionCodegen import TransitionCodegen
from TransitionGraphic import TransitionGraphic
import xml.etree.ElementTree as ET
class Transition:
def __init__(self, id):
self.id = id
self.fromStateID = None
self.toStateID = None
self.condition = None
self.priority = 0
self.cg = TransitionCodegen(self)
self.graphic = TransitionGraphic(self)
self.actions = []
def parseCfg(self, etreeNode):
for child in etreeNode:
if(child.tag == "from"):
self.fromStateID = int(child.text)
elif(child.tag == "to"):
self.toStateID = int(child.text)
elif(child.tag == "action"):
newAction = Action()
newAction.parseCfg(child)
self.actions.append(newAction)
elif(child.tag == "condition"):
self.condition = Comparison()
self.condition.parseCfg(child)
elif(child.tag == "priority"):
self.priority = int(child.text)
elif(child.tag == "graphic"):
self.graphic.parseCfg(child)
def dumpCfg(self):
return ET.Element() #TODO: generate XML representation of current object
| 31.772727
| 81
| 0.557225
|
from Comparison import Comparison
from Action import Action
from TransitionCodegen import TransitionCodegen
from TransitionGraphic import TransitionGraphic
import xml.etree.ElementTree as ET
class Transition:
def __init__(self, id):
self.id = id
self.fromStateID = None
self.toStateID = None
self.condition = None
self.priority = 0
self.cg = TransitionCodegen(self)
self.graphic = TransitionGraphic(self)
self.actions = []
def parseCfg(self, etreeNode):
for child in etreeNode:
if(child.tag == "from"):
self.fromStateID = int(child.text)
elif(child.tag == "to"):
self.toStateID = int(child.text)
elif(child.tag == "action"):
newAction = Action()
newAction.parseCfg(child)
self.actions.append(newAction)
elif(child.tag == "condition"):
self.condition = Comparison()
self.condition.parseCfg(child)
elif(child.tag == "priority"):
self.priority = int(child.text)
elif(child.tag == "graphic"):
self.graphic.parseCfg(child)
def dumpCfg(self):
return ET.Element()
| true
| true
|
f704b2e0ef291ebe1fc31f07298db6d46894ad1c
| 366
|
py
|
Python
|
build/kinova-ros/kinova_bringup/catkin_generated/pkg.installspace.context.pc.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
build/kinova-ros/kinova_bringup/catkin_generated/pkg.installspace.context.pc.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
build/kinova-ros/kinova_bringup/catkin_generated/pkg.installspace.context.pc.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kinova_bringup"
PROJECT_SPACE_DIR = "/workspace/install"
PROJECT_VERSION = "0.0.0"
| 40.666667
| 68
| 0.704918
|
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "kinova_bringup"
PROJECT_SPACE_DIR = "/workspace/install"
PROJECT_VERSION = "0.0.0"
| true
| true
|
f704b34c87ed8874507207d4c03838d31c80bd03
| 113
|
py
|
Python
|
pfdo_med2image/__init__.py
|
FNNDSC/pfdo_med2image
|
f65412aea362d0db5b8e7e2257b1d8fc1e696494
|
[
"Apache-2.0"
] | null | null | null |
pfdo_med2image/__init__.py
|
FNNDSC/pfdo_med2image
|
f65412aea362d0db5b8e7e2257b1d8fc1e696494
|
[
"Apache-2.0"
] | 2
|
2020-08-18T21:47:22.000Z
|
2021-03-12T14:45:35.000Z
|
pfdo_med2image/__init__.py
|
FNNDSC/pfdo_med2image
|
f65412aea362d0db5b8e7e2257b1d8fc1e696494
|
[
"Apache-2.0"
] | 1
|
2020-11-12T21:40:01.000Z
|
2020-11-12T21:40:01.000Z
|
try:
from .pfdo_med2image import pfdo_med2image
except:
from pfdo_med2image import pfdo_med2image
| 22.6
| 49
| 0.752212
|
try:
from .pfdo_med2image import pfdo_med2image
except:
from pfdo_med2image import pfdo_med2image
| true
| true
|
f704b3d19693548dbf7626d9ff72c05289e02e97
| 226
|
bzl
|
Python
|
BUILD.bzl
|
mobileink/coda
|
0612b142738c68155e9a9bba16cd3787bba4feed
|
[
"Apache-2.0"
] | null | null | null |
BUILD.bzl
|
mobileink/coda
|
0612b142738c68155e9a9bba16cd3787bba4feed
|
[
"Apache-2.0"
] | 1
|
2021-03-06T14:52:32.000Z
|
2021-03-06T14:52:32.000Z
|
BUILD.bzl
|
mobileink/coda
|
0612b142738c68155e9a9bba16cd3787bba4feed
|
[
"Apache-2.0"
] | null | null | null |
# CONFIG_MLH = ["//mina/config"]
CONFIG_MLH = select({
"//:profile_debug": ["//src/config/debug"],
"//:profile_dev": ["//src:dev"],
"//:profile_release": ["//src:release"],
}, no_match_error = "Unknown profile")
| 25.111111
| 47
| 0.588496
|
CONFIG_MLH = select({
"//:profile_debug": ["//src/config/debug"],
"//:profile_dev": ["//src:dev"],
"//:profile_release": ["//src:release"],
}, no_match_error = "Unknown profile")
| true
| true
|
f704b5b6daab1e1692d607b7bf7753465d5a41cd
| 2,445
|
py
|
Python
|
test/test_gbtile.py
|
flozz/img2gb
|
2564a718d0b377d1b524204d97a674aedeec770d
|
[
"BSD-3-Clause"
] | 23
|
2018-11-14T12:50:31.000Z
|
2022-03-30T17:28:43.000Z
|
test/test_gbtile.py
|
flozz/img2gb
|
2564a718d0b377d1b524204d97a674aedeec770d
|
[
"BSD-3-Clause"
] | 10
|
2019-07-01T17:24:47.000Z
|
2022-01-13T12:38:38.000Z
|
test/test_gbtile.py
|
flozz/img2gb
|
2564a718d0b377d1b524204d97a674aedeec770d
|
[
"BSD-3-Clause"
] | 3
|
2019-10-16T23:27:28.000Z
|
2022-01-23T22:28:29.000Z
|
import pytest
from PIL import Image
from img2gb.gbtile import GBTile
class Test_GBTile(object):
@pytest.fixture
def image(self):
return Image.open("./test/assets/tileset.png")
@pytest.mark.parametrize("x,result", [
(0, "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00"),
(8, "FF 01 81 7F BD 7F A5 7B A5 7B BD 63 81 7F FF FF"),
(16, "7E 00 81 7F 81 7F 81 7F 81 7F 81 7F 81 7F 7E 7E"),
(24, "3C 00 54 2A A3 5F C1 3F 83 7F C5 3F 2A 7E 3C 3C"),
(32, "04 04 04 04 0A 0A 12 12 66 00 99 77 99 77 66 66"),
])
def test_from_image(self, image, x, result):
tile = GBTile.from_image(image, x)
assert tile.to_hex_string() == result
def test_put_pixel(self):
tile = GBTile()
for b in tile.data:
assert b == 0
tile.put_pixel(0, 0, 3)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x80
tile.put_pixel(4, 0, 2)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x88
def test_get_pixel(self, image):
tile = GBTile.from_image(image, 32)
assert tile.get_pixel(0, 0) == 0b00
assert tile.get_pixel(0, 6) == 0b01
assert tile.get_pixel(2, 6) == 0b10
assert tile.get_pixel(5, 0) == 0b11
def test_to_hex_string(self):
tile = GBTile()
assert tile.to_hex_string() == "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" # noqa
tile.put_pixel(0, 0, 3)
tile.put_pixel(1, 0, 3)
assert tile.to_hex_string() == "C0 C0 00 00 00 00 00 00 00 00 00 00 00 00 00 00" # noqa
def test_to_image(self, image):
tile = GBTile.from_image(image, 32)
tile_image = tile.to_image()
assert tile_image.getpixel((0, 0)) == 0b00
assert tile_image.getpixel((0, 6)) == 0b01
assert tile_image.getpixel((2, 6)) == 0b10
assert tile_image.getpixel((5, 0)) == 0b11
def test_gbtile_equality(self):
tile1 = GBTile()
tile2 = GBTile()
assert tile1 == tile2
tile1.put_pixel(0, 0, 3)
assert tile1 != tile2
tile2.put_pixel(0, 0, 3)
assert tile1 == tile2
def test_data(self):
tile = GBTile()
assert len(tile.data) == 16
assert tile.data[0] == 0x00
assert tile.data[1] == 0x00
tile.put_pixel(0, 0, 3)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x80
| 28.430233
| 96
| 0.566871
|
import pytest
from PIL import Image
from img2gb.gbtile import GBTile
class Test_GBTile(object):
@pytest.fixture
def image(self):
return Image.open("./test/assets/tileset.png")
@pytest.mark.parametrize("x,result", [
(0, "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00"),
(8, "FF 01 81 7F BD 7F A5 7B A5 7B BD 63 81 7F FF FF"),
(16, "7E 00 81 7F 81 7F 81 7F 81 7F 81 7F 81 7F 7E 7E"),
(24, "3C 00 54 2A A3 5F C1 3F 83 7F C5 3F 2A 7E 3C 3C"),
(32, "04 04 04 04 0A 0A 12 12 66 00 99 77 99 77 66 66"),
])
def test_from_image(self, image, x, result):
tile = GBTile.from_image(image, x)
assert tile.to_hex_string() == result
def test_put_pixel(self):
tile = GBTile()
for b in tile.data:
assert b == 0
tile.put_pixel(0, 0, 3)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x80
tile.put_pixel(4, 0, 2)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x88
def test_get_pixel(self, image):
tile = GBTile.from_image(image, 32)
assert tile.get_pixel(0, 0) == 0b00
assert tile.get_pixel(0, 6) == 0b01
assert tile.get_pixel(2, 6) == 0b10
assert tile.get_pixel(5, 0) == 0b11
def test_to_hex_string(self):
tile = GBTile()
assert tile.to_hex_string() == "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00"
tile.put_pixel(0, 0, 3)
tile.put_pixel(1, 0, 3)
assert tile.to_hex_string() == "C0 C0 00 00 00 00 00 00 00 00 00 00 00 00 00 00"
def test_to_image(self, image):
tile = GBTile.from_image(image, 32)
tile_image = tile.to_image()
assert tile_image.getpixel((0, 0)) == 0b00
assert tile_image.getpixel((0, 6)) == 0b01
assert tile_image.getpixel((2, 6)) == 0b10
assert tile_image.getpixel((5, 0)) == 0b11
def test_gbtile_equality(self):
tile1 = GBTile()
tile2 = GBTile()
assert tile1 == tile2
tile1.put_pixel(0, 0, 3)
assert tile1 != tile2
tile2.put_pixel(0, 0, 3)
assert tile1 == tile2
def test_data(self):
tile = GBTile()
assert len(tile.data) == 16
assert tile.data[0] == 0x00
assert tile.data[1] == 0x00
tile.put_pixel(0, 0, 3)
assert tile.data[0] == 0x80
assert tile.data[1] == 0x80
| true
| true
|
f704b74596dc89d75957ad11f92f138e450dc2bd
| 2,050
|
py
|
Python
|
mathics/profile.py
|
jake100/Mathics
|
f90f9107e12072dcfbd76549b61897bc8feb04a8
|
[
"Apache-2.0"
] | 1
|
2019-04-15T13:18:05.000Z
|
2019-04-15T13:18:05.000Z
|
mathics/profile.py
|
jake100/Mathics
|
f90f9107e12072dcfbd76549b61897bc8feb04a8
|
[
"Apache-2.0"
] | null | null | null |
mathics/profile.py
|
jake100/Mathics
|
f90f9107e12072dcfbd76549b61897bc8feb04a8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf8 -*-
u"""
Mathics: a general-purpose computer algebra system
Copyright (C) 2011-2013 The Mathics Team
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import cProfile
import pstats
from mathics.core.definitions import Definitions
from mathics.core.evaluation import Evaluation
definitions = Definitions(add_builtin=True)
def prepare():
pass
result = None
def run():
global result
# prompt = '(1+a)(1+b)(1+c)(1+d)(1+e)//Expand'
# prompt = 'f/@Range[20000];'
# prompt = 'Plus @@ Range[50000]'
# prompt = 'Range[100000];'
try:
# prompt = 'SetAttributes[v, Flat]; v[x_]:={x}; v[a,b]'
# prompt = """(Plus@@Symbol/@CharacterRange["a","z"])^2//Expand;"""
# prompt = (
# 'Plus@@f/@Symbol/@StringJoin/@Tuples[CharacterRange["a","z"],2]')
# prompt = 'FullForm[Nest[1+Sqrt[1+#]&, x, 20]]'
# prompt = '1+2'
prompt = 'DensityPlot[x*y,{x,-1,1},{y,-1,1}]'
evaluation = Evaluation(prompt, definitions, format='xml')
if evaluation.results:
result = evaluation.results[0].result
except KeyboardInterrupt:
result = 'INTERRUPTED'
def _profile():
global result
prepare()
cProfile.run('run()', 'profile')
# print 'Result: %s\n' % result
p = pstats.Stats('profile')
p.sort_stats('cumulative').print_stats(50)
p.print_callees(20)
if __name__ == '__main__':
_profile()
| 30.147059
| 79
| 0.643902
|
import cProfile
import pstats
from mathics.core.definitions import Definitions
from mathics.core.evaluation import Evaluation
definitions = Definitions(add_builtin=True)
def prepare():
pass
result = None
def run():
global result
try:
prompt = 'DensityPlot[x*y,{x,-1,1},{y,-1,1}]'
evaluation = Evaluation(prompt, definitions, format='xml')
if evaluation.results:
result = evaluation.results[0].result
except KeyboardInterrupt:
result = 'INTERRUPTED'
def _profile():
global result
prepare()
cProfile.run('run()', 'profile')
p = pstats.Stats('profile')
p.sort_stats('cumulative').print_stats(50)
p.print_callees(20)
if __name__ == '__main__':
_profile()
| true
| true
|
f704b79922c2bcb710beb9717a76fb07b5ab4af6
| 390
|
py
|
Python
|
beagle/transformers/__init__.py
|
truongdo619/beagle
|
55a5d30a381438ae66b6c1c57f57b2403621db87
|
[
"MIT"
] | 1
|
2019-10-01T19:26:16.000Z
|
2019-10-01T19:26:16.000Z
|
beagle/transformers/__init__.py
|
truongdo619/beagle
|
55a5d30a381438ae66b6c1c57f57b2403621db87
|
[
"MIT"
] | null | null | null |
beagle/transformers/__init__.py
|
truongdo619/beagle
|
55a5d30a381438ae66b6c1c57f57b2403621db87
|
[
"MIT"
] | 1
|
2019-10-04T15:30:08.000Z
|
2019-10-04T15:30:08.000Z
|
from __future__ import absolute_import
from .base_transformer import Transformer # noqa
from .fireeye_hx_transformer import FireEyeHXTransformer # noqa
from .generic_transformer import GenericTransformer # noqa
from .sysmon_transformer import SysmonTransformer # noqa
from .evtx_transformer import WinEVTXTransformer # noqa
from .procmon_transformer import ProcmonTransformer # noqa
| 43.333333
| 64
| 0.848718
|
from __future__ import absolute_import
from .base_transformer import Transformer
from .fireeye_hx_transformer import FireEyeHXTransformer
from .generic_transformer import GenericTransformer
from .sysmon_transformer import SysmonTransformer
from .evtx_transformer import WinEVTXTransformer
from .procmon_transformer import ProcmonTransformer
| true
| true
|
f704b7dccf154ecda012dac0c1c0edf8a17dd5c2
| 804
|
py
|
Python
|
setup.py
|
jsleb333/py2tex
|
737292bc53115310a7e495a3781c84a3fe53b57e
|
[
"MIT"
] | 3
|
2019-02-11T19:14:08.000Z
|
2019-02-11T22:47:15.000Z
|
setup.py
|
jsleb333/py2tex
|
737292bc53115310a7e495a3781c84a3fe53b57e
|
[
"MIT"
] | null | null | null |
setup.py
|
jsleb333/py2tex
|
737292bc53115310a7e495a3781c84a3fe53b57e
|
[
"MIT"
] | null | null | null |
import setuptools
from version import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="python2latex",
version=__version__,
author="Jean-Samuel Leboeuf",
author_email="jean-samuel.leboeuf.1@ulaval.ca",
description="A Python to LaTeX converter",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jsleb333/python2latex",
packages=setuptools.find_packages(),
install_requires=['numpy', 'colorspacious', 'matplotlib'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
data_files=[('', ['version.py'])]
)
| 30.923077
| 62
| 0.677861
|
import setuptools
from version import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="python2latex",
version=__version__,
author="Jean-Samuel Leboeuf",
author_email="jean-samuel.leboeuf.1@ulaval.ca",
description="A Python to LaTeX converter",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jsleb333/python2latex",
packages=setuptools.find_packages(),
install_requires=['numpy', 'colorspacious', 'matplotlib'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
data_files=[('', ['version.py'])]
)
| true
| true
|
f704b88ab2cda09b5ebb43639854a0c2c0509468
| 10,852
|
py
|
Python
|
as/tools/generator.py
|
Xuyiyang23333/asbot
|
c3b8a88e0970c1b39f9f7575f64b3fc3fe5161ba
|
[
"MIT"
] | null | null | null |
as/tools/generator.py
|
Xuyiyang23333/asbot
|
c3b8a88e0970c1b39f9f7575f64b3fc3fe5161ba
|
[
"MIT"
] | null | null | null |
as/tools/generator.py
|
Xuyiyang23333/asbot
|
c3b8a88e0970c1b39f9f7575f64b3fc3fe5161ba
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from decimal import Decimal, ROUND_HALF_UP
from math import radians, tan, cos, sin
from os import path
_round = lambda f, r=ROUND_HALF_UP: int(Decimal(str(f)).quantize(Decimal("0"), rounding=r))
rgb = lambda r, g, b: (r, g, b)
upper_font_path = path.join(path.dirname(__file__), 'NotoSansCJKSC-Black.ttf')
downer_font_path = path.join(path.dirname(__file__), 'NotoSerifCJKSC-Black.ttf')
def get_gradient_2d(start, stop, width, height, is_horizontal=False):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def getTextWidth(text, font, width=100, height=500, recursive=False):
step = 100
img = Image.new("L", (width, height))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, font=font, fill=255)
box = img.getbbox()
if box[2] < width - step or (recursive and box[2] == width - step):
return box[2]
else:
return getTextWidth(text=text, font=font, width=width + step, height=height, recursive=True)
def get_gradient_3d(width, height, start_list, stop_list, is_horizontal_list=(False, False, False)):
result = np.zeros((height, width, len(start_list)), dtype=float)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = get_gradient_2d(start, stop, width, height, is_horizontal)
return result
def createLinearGradient(steps, width, height, size=1, center=0.5):
margin_up = _round(height * (center - size / 2))
margin_down = _round(height * (1 - center - size / 2))
result = np.zeros((0, width, len(steps[0])), dtype=float)
for i, k in enumerate(steps.keys()):
if k == 0:
array = get_gradient_3d(width, _round(margin_up), steps[k], steps[k])
result = np.vstack([result, array])
continue
pk = list(steps.keys())[i - 1]
h = _round(height * size * (k - pk))
array = get_gradient_3d(width, h, steps[pk], steps[k])
result = np.vstack([result, array])
if k == 1:
array = get_gradient_3d(width, _round(margin_down), steps[k], steps[k])
result = np.vstack([result, array])
continue
return result
def genBaseImage(width=1500, height=500):
k = 0.63 # 渐变色缩放系数,不应大于1
c = 0.53 # 渐变色中心位置
downerSilverArray = createLinearGradient({
0: rgb(0, 15, 36),
0.10: rgb(255, 255, 255),
0.18: rgb(55, 58, 59),
0.25: rgb(55, 58, 59),
0.5: rgb(200, 200, 200),
0.75: rgb(55, 58, 59),
0.85: rgb(25, 20, 31),
0.91: rgb(240, 240, 240),
0.95: rgb(166, 175, 194),
1: rgb(50, 50, 50)
}, width=width, height=height, size=k, center=c)
goldArray = createLinearGradient({
0: rgb(253, 241, 0),
0.25: rgb(245, 253, 187),
0.4: rgb(255, 255, 255),
0.75: rgb(253, 219, 9),
0.9: rgb(127, 53, 0),
1: rgb(243, 196, 11)
}, width=width, height=height, size=k, center=c)
strokeRedArray = createLinearGradient({
0: rgb(255, 100, 0),
0.5: rgb(123, 0, 0),
0.51: rgb(240, 0, 0),
1: rgb(5, 0, 0)
}, width=width, height=height, size=k, center=c)
redArray = createLinearGradient({
0: rgb(230, 0, 0),
0.5: rgb(123, 0, 0),
0.51: rgb(240, 0, 0),
1: rgb(5, 0, 0)
}, width=width, height=height, size=k, center=c)
silver2Array = createLinearGradient({
0: rgb(245, 246, 248),
0.15: rgb(255, 255, 255),
0.35: rgb(195, 213, 220),
0.5: rgb(160, 190, 201),
0.51: rgb(160, 190, 201),
0.52: rgb(196, 215, 222),
1.0: rgb(255, 255, 255)
}, width=width, height=height, size=k, center=c)
navyArray = createLinearGradient({
0: rgb(16, 25, 58),
0.03: rgb(255, 255, 255),
0.08: rgb(16, 25, 58),
0.2: rgb(16, 25, 58),
1: rgb(16, 25, 58)
}, width=width, height=height, size=k, center=c)
result = {
"downerSilver": Image.fromarray(np.uint8(downerSilverArray)).crop((0, 0, width, height)),
"gold": Image.fromarray(np.uint8(goldArray)).crop((0, 0, width, height)),
"red": Image.fromarray(np.uint8(redArray)).crop((0, 0, width, height)),
"strokeRed": Image.fromarray(np.uint8(strokeRedArray)).crop((0, 0, width, height)),
"silver2": Image.fromarray(np.uint8(silver2Array)).crop((0, 0, width, height)),
"strokeNavy": Image.fromarray(np.uint8(navyArray)).crop((0, 0, width, height)), # Width: 7
"baseStrokeBlack": Image.new("RGBA", (width, height), rgb(0, 0, 0)).crop((0, 0, width, height)), # Width: 17
"strokeBlack": Image.new("RGBA", (width, height), rgb(16, 25, 58)).crop((0, 0, width, height)), # Width: 17
"strokeWhite": Image.new("RGBA", (width, height), rgb(221, 221, 221)).crop((0, 0, width, height)), # Width: 8
"baseStrokeWhite": Image.new("RGBA", (width, height), rgb(255, 255, 255)).crop((0, 0, width, height))
# Width: 8
}
for k in result.keys():
result[k].putalpha(255)
return result
def genImage(word_a="5000兆円", word_b="欲しい!", default_width=1500, height=500,
bg="white", subset=250, default_base=None):
# width = max_width
k = 0.8 # 字体缩放系数
alpha = (0, 0, 0, 0)
leftmargin = 50
upmargin = 20
font_upper = ImageFont.truetype(upper_font_path, _round(height * 0.35 * k) + upmargin)
font_downer = ImageFont.truetype(downer_font_path, _round(height * 0.35 * k) + upmargin)
# Prepare Width
upper_width = max([default_width,
getTextWidth(word_a, font_upper, width=default_width,
height=_round(height / 2))]) + 300
downer_width = max([default_width,
getTextWidth(word_b, font_upper, width=default_width,
height=_round(height / 2))]) + 300
# Prepare base - Upper (if required)
if default_width == upper_width:
upper_base = default_base
else:
upper_base = genBaseImage(width=upper_width + leftmargin, height=_round(height / 2) + upmargin)
# Prepare base - Downer (if required)
downer_base = genBaseImage(width=downer_width + leftmargin, height=_round(height / 2) + upmargin)
# if default_width == downer_width:
# downer_base = default_base
# else:
# Prepare mask - Upper
upper_mask_base = Image.new("L", (upper_width + leftmargin, _round(height / 2) + upmargin), 0)
mask_img_upper = list()
upper_data = [
[
(4, 4), (4, 4), (0, 0), (0, 0), (2, -3), (0, -3), (0, -3), (0, -3)
],
[
22, 20, 16, 10, 6, 6, 3, 0
],
[
"baseStrokeBlack",
"downerSilver",
"baseStrokeBlack",
"gold",
"baseStrokeBlack",
"baseStrokeWhite",
"strokeRed",
"red",
]
]
for pos, stroke, color in zip(upper_data[0], upper_data[1], upper_data[2]):
mask_img_upper.append(upper_mask_base.copy())
mask_draw_upper = ImageDraw.Draw(mask_img_upper[-1])
mask_draw_upper.text((pos[0] + leftmargin, pos[1] + upmargin), word_a,
font=font_upper, fill=255,
stroke_width=_round(stroke * height / 500))
# Prepare mask - Downer
downer_mask_base = Image.new("L", (downer_width + leftmargin, _round(height / 2) + upmargin), 0)
mask_img_downer = list()
downer_data = [
[
(5, 2), (5, 2), (0, 0), (0, 0), (0, 0), (0, -3)
], [
22, 19, 17, 8, 7, 0
], [
"baseStrokeBlack",
"downerSilver",
"strokeBlack",
"strokeWhite",
"strokeNavy",
"silver2"
]
]
for pos, stroke, color in zip(downer_data[0], downer_data[1], downer_data[2]):
mask_img_downer.append(downer_mask_base.copy())
mask_draw_downer = ImageDraw.Draw(mask_img_downer[-1])
mask_draw_downer.text((pos[0] + leftmargin, pos[1] + upmargin), word_b,
font=font_downer, fill=255,
stroke_width=_round(stroke * height / 500))
# Draw text - Upper
img_upper = Image.new("RGBA", (upper_width, _round(height / 2)), alpha)
for i, (pos, stroke, color) in enumerate(zip(upper_data[0], upper_data[1], upper_data[2])):
img_upper_part = Image.new("RGBA", (upper_width + leftmargin, _round(height / 2) + upmargin), alpha)
img_upper_part.paste(upper_base[color], (0, 0), mask=mask_img_upper[i])
img_upper.alpha_composite(img_upper_part)
# Draw text - Downer
img_downer = Image.new("RGBA", (downer_width + leftmargin, _round(height / 2)), alpha)
for i, (pos, stroke, color) in enumerate(zip(downer_data[0], downer_data[1], downer_data[2])):
img_downer_part = Image.new("RGBA", (downer_width + leftmargin, _round(height / 2) + upmargin), alpha)
img_downer_part.paste(downer_base[color], (0, 0), mask=mask_img_downer[i])
img_downer.alpha_composite(img_downer_part)
# img_upper.save("./uptemp.png")
# img_downer.save("./downtemp.png")
# tilt image
tiltres = list()
angle = 20
for img in [img_upper, img_downer]:
dist = img.height * tan(radians(angle))
data = (1, tan(radians(angle)), -dist, 0, 1, 0)
imgc = img.crop((0, 0, img.width + dist, img.height))
imgt = imgc.transform(imgc.size, Image.AFFINE, data, Image.BILINEAR)
tiltres.append(imgt)
# finish
previmg = Image.new("RGBA", (max([upper_width, downer_width]) + leftmargin + subset + 100, height + upmargin + 100),
(255, 255, 255, 0))
# previmg.paste(tiltres[0], (0, 0))
# previmg.paste(tiltres[1], (subset, _round(height/2)))
previmg.alpha_composite(tiltres[0], (0, 50), (0, 0))
if upper_width > downer_width + subset:
previmg.alpha_composite(tiltres[1], (upper_width + subset - downer_width, _round(height / 2) + 50), (0, 0))
else:
previmg.alpha_composite(tiltres[1], (subset, _round(height / 2) + 50), (0, 0))
# previmg.save("./test1.png")
croprange = previmg.getbbox()
img = previmg.crop(croprange)
final_image = Image.new("RGB", (img.size[0] + 100, img.size[1] + 100), bg)
final_image.paste(img, (50, 50))
return final_image
| 40.950943
| 121
| 0.570217
|
from PIL import Image, ImageDraw, ImageFont
import numpy as np
from decimal import Decimal, ROUND_HALF_UP
from math import radians, tan, cos, sin
from os import path
_round = lambda f, r=ROUND_HALF_UP: int(Decimal(str(f)).quantize(Decimal("0"), rounding=r))
rgb = lambda r, g, b: (r, g, b)
upper_font_path = path.join(path.dirname(__file__), 'NotoSansCJKSC-Black.ttf')
downer_font_path = path.join(path.dirname(__file__), 'NotoSerifCJKSC-Black.ttf')
def get_gradient_2d(start, stop, width, height, is_horizontal=False):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def getTextWidth(text, font, width=100, height=500, recursive=False):
step = 100
img = Image.new("L", (width, height))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, font=font, fill=255)
box = img.getbbox()
if box[2] < width - step or (recursive and box[2] == width - step):
return box[2]
else:
return getTextWidth(text=text, font=font, width=width + step, height=height, recursive=True)
def get_gradient_3d(width, height, start_list, stop_list, is_horizontal_list=(False, False, False)):
result = np.zeros((height, width, len(start_list)), dtype=float)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = get_gradient_2d(start, stop, width, height, is_horizontal)
return result
def createLinearGradient(steps, width, height, size=1, center=0.5):
margin_up = _round(height * (center - size / 2))
margin_down = _round(height * (1 - center - size / 2))
result = np.zeros((0, width, len(steps[0])), dtype=float)
for i, k in enumerate(steps.keys()):
if k == 0:
array = get_gradient_3d(width, _round(margin_up), steps[k], steps[k])
result = np.vstack([result, array])
continue
pk = list(steps.keys())[i - 1]
h = _round(height * size * (k - pk))
array = get_gradient_3d(width, h, steps[pk], steps[k])
result = np.vstack([result, array])
if k == 1:
array = get_gradient_3d(width, _round(margin_down), steps[k], steps[k])
result = np.vstack([result, array])
continue
return result
def genBaseImage(width=1500, height=500):
k = 0.63
c = 0.53
downerSilverArray = createLinearGradient({
0: rgb(0, 15, 36),
0.10: rgb(255, 255, 255),
0.18: rgb(55, 58, 59),
0.25: rgb(55, 58, 59),
0.5: rgb(200, 200, 200),
0.75: rgb(55, 58, 59),
0.85: rgb(25, 20, 31),
0.91: rgb(240, 240, 240),
0.95: rgb(166, 175, 194),
1: rgb(50, 50, 50)
}, width=width, height=height, size=k, center=c)
goldArray = createLinearGradient({
0: rgb(253, 241, 0),
0.25: rgb(245, 253, 187),
0.4: rgb(255, 255, 255),
0.75: rgb(253, 219, 9),
0.9: rgb(127, 53, 0),
1: rgb(243, 196, 11)
}, width=width, height=height, size=k, center=c)
strokeRedArray = createLinearGradient({
0: rgb(255, 100, 0),
0.5: rgb(123, 0, 0),
0.51: rgb(240, 0, 0),
1: rgb(5, 0, 0)
}, width=width, height=height, size=k, center=c)
redArray = createLinearGradient({
0: rgb(230, 0, 0),
0.5: rgb(123, 0, 0),
0.51: rgb(240, 0, 0),
1: rgb(5, 0, 0)
}, width=width, height=height, size=k, center=c)
silver2Array = createLinearGradient({
0: rgb(245, 246, 248),
0.15: rgb(255, 255, 255),
0.35: rgb(195, 213, 220),
0.5: rgb(160, 190, 201),
0.51: rgb(160, 190, 201),
0.52: rgb(196, 215, 222),
1.0: rgb(255, 255, 255)
}, width=width, height=height, size=k, center=c)
navyArray = createLinearGradient({
0: rgb(16, 25, 58),
0.03: rgb(255, 255, 255),
0.08: rgb(16, 25, 58),
0.2: rgb(16, 25, 58),
1: rgb(16, 25, 58)
}, width=width, height=height, size=k, center=c)
result = {
"downerSilver": Image.fromarray(np.uint8(downerSilverArray)).crop((0, 0, width, height)),
"gold": Image.fromarray(np.uint8(goldArray)).crop((0, 0, width, height)),
"red": Image.fromarray(np.uint8(redArray)).crop((0, 0, width, height)),
"strokeRed": Image.fromarray(np.uint8(strokeRedArray)).crop((0, 0, width, height)),
"silver2": Image.fromarray(np.uint8(silver2Array)).crop((0, 0, width, height)),
"strokeNavy": Image.fromarray(np.uint8(navyArray)).crop((0, 0, width, height)),
"baseStrokeBlack": Image.new("RGBA", (width, height), rgb(0, 0, 0)).crop((0, 0, width, height)),
"strokeBlack": Image.new("RGBA", (width, height), rgb(16, 25, 58)).crop((0, 0, width, height)),
"strokeWhite": Image.new("RGBA", (width, height), rgb(221, 221, 221)).crop((0, 0, width, height)),
"baseStrokeWhite": Image.new("RGBA", (width, height), rgb(255, 255, 255)).crop((0, 0, width, height))
}
for k in result.keys():
result[k].putalpha(255)
return result
def genImage(word_a="5000兆円", word_b="欲しい!", default_width=1500, height=500,
bg="white", subset=250, default_base=None):
k = 0.8
alpha = (0, 0, 0, 0)
leftmargin = 50
upmargin = 20
font_upper = ImageFont.truetype(upper_font_path, _round(height * 0.35 * k) + upmargin)
font_downer = ImageFont.truetype(downer_font_path, _round(height * 0.35 * k) + upmargin)
upper_width = max([default_width,
getTextWidth(word_a, font_upper, width=default_width,
height=_round(height / 2))]) + 300
downer_width = max([default_width,
getTextWidth(word_b, font_upper, width=default_width,
height=_round(height / 2))]) + 300
if default_width == upper_width:
upper_base = default_base
else:
upper_base = genBaseImage(width=upper_width + leftmargin, height=_round(height / 2) + upmargin)
downer_base = genBaseImage(width=downer_width + leftmargin, height=_round(height / 2) + upmargin)
upper_mask_base = Image.new("L", (upper_width + leftmargin, _round(height / 2) + upmargin), 0)
mask_img_upper = list()
upper_data = [
[
(4, 4), (4, 4), (0, 0), (0, 0), (2, -3), (0, -3), (0, -3), (0, -3)
],
[
22, 20, 16, 10, 6, 6, 3, 0
],
[
"baseStrokeBlack",
"downerSilver",
"baseStrokeBlack",
"gold",
"baseStrokeBlack",
"baseStrokeWhite",
"strokeRed",
"red",
]
]
for pos, stroke, color in zip(upper_data[0], upper_data[1], upper_data[2]):
mask_img_upper.append(upper_mask_base.copy())
mask_draw_upper = ImageDraw.Draw(mask_img_upper[-1])
mask_draw_upper.text((pos[0] + leftmargin, pos[1] + upmargin), word_a,
font=font_upper, fill=255,
stroke_width=_round(stroke * height / 500))
downer_mask_base = Image.new("L", (downer_width + leftmargin, _round(height / 2) + upmargin), 0)
mask_img_downer = list()
downer_data = [
[
(5, 2), (5, 2), (0, 0), (0, 0), (0, 0), (0, -3)
], [
22, 19, 17, 8, 7, 0
], [
"baseStrokeBlack",
"downerSilver",
"strokeBlack",
"strokeWhite",
"strokeNavy",
"silver2"
]
]
for pos, stroke, color in zip(downer_data[0], downer_data[1], downer_data[2]):
mask_img_downer.append(downer_mask_base.copy())
mask_draw_downer = ImageDraw.Draw(mask_img_downer[-1])
mask_draw_downer.text((pos[0] + leftmargin, pos[1] + upmargin), word_b,
font=font_downer, fill=255,
stroke_width=_round(stroke * height / 500))
img_upper = Image.new("RGBA", (upper_width, _round(height / 2)), alpha)
for i, (pos, stroke, color) in enumerate(zip(upper_data[0], upper_data[1], upper_data[2])):
img_upper_part = Image.new("RGBA", (upper_width + leftmargin, _round(height / 2) + upmargin), alpha)
img_upper_part.paste(upper_base[color], (0, 0), mask=mask_img_upper[i])
img_upper.alpha_composite(img_upper_part)
img_downer = Image.new("RGBA", (downer_width + leftmargin, _round(height / 2)), alpha)
for i, (pos, stroke, color) in enumerate(zip(downer_data[0], downer_data[1], downer_data[2])):
img_downer_part = Image.new("RGBA", (downer_width + leftmargin, _round(height / 2) + upmargin), alpha)
img_downer_part.paste(downer_base[color], (0, 0), mask=mask_img_downer[i])
img_downer.alpha_composite(img_downer_part)
tiltres = list()
angle = 20
for img in [img_upper, img_downer]:
dist = img.height * tan(radians(angle))
data = (1, tan(radians(angle)), -dist, 0, 1, 0)
imgc = img.crop((0, 0, img.width + dist, img.height))
imgt = imgc.transform(imgc.size, Image.AFFINE, data, Image.BILINEAR)
tiltres.append(imgt)
previmg = Image.new("RGBA", (max([upper_width, downer_width]) + leftmargin + subset + 100, height + upmargin + 100),
(255, 255, 255, 0))
previmg.alpha_composite(tiltres[0], (0, 50), (0, 0))
if upper_width > downer_width + subset:
previmg.alpha_composite(tiltres[1], (upper_width + subset - downer_width, _round(height / 2) + 50), (0, 0))
else:
previmg.alpha_composite(tiltres[1], (subset, _round(height / 2) + 50), (0, 0))
croprange = previmg.getbbox()
img = previmg.crop(croprange)
final_image = Image.new("RGB", (img.size[0] + 100, img.size[1] + 100), bg)
final_image.paste(img, (50, 50))
return final_image
| true
| true
|
f704b95c88cfabc1dac0e794e4484161cd8c29c9
| 5,893
|
py
|
Python
|
idiota/data.py
|
prakashsellathurai/idiota
|
e3cebb669bc5d04f30279c15939465aec2495eb6
|
[
"Apache-2.0"
] | null | null | null |
idiota/data.py
|
prakashsellathurai/idiota
|
e3cebb669bc5d04f30279c15939465aec2495eb6
|
[
"Apache-2.0"
] | null | null | null |
idiota/data.py
|
prakashsellathurai/idiota
|
e3cebb669bc5d04f30279c15939465aec2495eb6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Idiota object types
tree - A tree (directory listing) object that represents the directory structure in a tree object.
commit(ref) - A object that represents the changes in a single commit.
blob - A blob object that represents a file or a piece of data.
parent - A object that represents the ancestor to the commit in the DaG.
tag - A object that represents a meta info.
"""
__author__ = "prakashsellathurai"
__copyright__ = "Copyright 2021"
__version__ = "1.0.1"
__email__ = "prakashsellathurai@gmail.com"
import os
import hashlib
import shutil
import json
from collections import namedtuple
from contextlib import contextmanager
GIT_DIR = None
RefValue = namedtuple('RefValue', ['symbolic', 'value'])
@contextmanager
def change_git_dir(new_dir) -> None:
"""
Change the current git directory
Args:
new_dir (str): new git directory
Yields:
str: old git directory
"""
global GIT_DIR
old_dir = GIT_DIR
GIT_DIR = f'{new_dir}/.idiota'
yield
GIT_DIR = old_dir
def init() -> None:
"""
Create .idiota directory
Returns:
None
"""
os.makedirs(GIT_DIR, exist_ok=True)
os.makedirs(f'{GIT_DIR}/objects')
def update_ref(ref, value, deref: bool=True) -> None:
""" Update a ref
Args:
ref (str): ref name
value (str): ref value
deref (bool): dereference symbolic refs
Returns:
None
"""
# TODO: check if ref exists
# TODO: check if value is valid
# TODO: check if ref is symbolic
ref = _get_ref_internal(ref, deref)[0]
assert value.value
if value.symbolic:
value = f'ref: {value.value}'
else:
value = value.value
ref_path = f'{GIT_DIR}/{ref}'
os.makedirs(os.path.dirname(ref_path), exist_ok=True)
with open(ref_path, 'w') as f:
f.write(value)
def get_ref(ref, deref=True) -> RefValue:
""" Get a ref value
Args:
ref (str): ref name
deref (bool): dereference symbolic refs
Returns:
RefValue(str): ref value
"""
return _get_ref_internal(ref, deref)[1]
def delete_ref(ref, deref=True)->None:
""" Delete a ref"""
ref = _get_ref_internal(ref, deref)[0]
os.remove(f'{GIT_DIR}/{ref}')
def _get_ref_internal(ref, deref) -> RefValue:
""" Get a ref value
Args:
ref (str): ref name
deref (bool): dereference symbolic refs
Returns:
RefValue (str): ref value
"""
ref_path = f'{GIT_DIR}/{ref}'
value = None
if os.path.isfile(ref_path):
with open(ref_path) as f:
value = f.read().strip()
symbolic = bool(value) and value.startswith('ref:')
if symbolic:
value = value.split(':', 1)[1].strip()
if deref:
return _get_ref_internal(value, deref=True)
return ref, RefValue(symbolic=symbolic, value=value)
def iter_refs(prefix='', deref=True):
""" Iterate over refs
Args:
prefix (str): ref prefix
deref (bool): dereference symbolic refs
Returns:
Iterator[Tup(str, RefValue)]: ref name and ref value
"""
refs = ['HEAD', 'MERGE_HEAD']
for root, _, filenames in os.walk(f'{GIT_DIR}/refs/'):
root = os.path.relpath(root, GIT_DIR)
refs.extend(f'{root}/{name}' for name in filenames)
for refname in refs:
if not refname.startswith(prefix):
continue
ref = get_ref(refname, deref=deref)
if ref.value:
yield refname, ref
@contextmanager
def get_index():
""" Get index
Yields:
Index: index
"""
index = {}
if os.path.isfile(f'{GIT_DIR}/index'):
with open(f'{GIT_DIR}/index') as f:
index = json.load(f)
yield index
with open(f'{GIT_DIR}/index', 'w') as f:
json.dump(index, f)
def hash_object(data: object, type_='blob')-> str:
"""
Hash an object
uses: Sha1 algorithm
Args:
data (bytes): object data
Returns:
str: object id
"""
obj = type_.encode() + b'\x00' + data
oid = hashlib.sha1(obj).hexdigest()
with open(f'{GIT_DIR}/objects/{oid}', 'wb') as out:
out.write(obj)
return oid
def get_object(oid: str, expected='blob')-> object:
"""
get an object
Args:
oid (str): object id
Returns:
bytes: object data
"""
with open(f'{GIT_DIR}/objects/{oid}', 'rb') as f:
obj = f.read()
first_null = obj.index(b'\x00')
type_ = obj[:first_null].decode()
content = obj[first_null + 1:]
if expected is not None:
assert type_ == expected, f'Expected {expected}, got {type_}'
return content
def object_exists(oid: bool)-> bool:
"""
checks if object of given id exists in the repository
Args:
oid (str): object id
Returns:
bool: True if object exists
"""
return os.path.isfile(f'{GIT_DIR}/objects/{oid}')
def fetch_object_if_missing(oid, remote_git_dir):
"""
fetch object from remote repository if it is not present in local repository
Args:
oid (str): object id
remote_git_dir (str): remote git directory
Returns:
None
"""
if object_exists(oid):
return
remote_git_dir += '/.ugit'
shutil.copy(f'{remote_git_dir}/objects/{oid}',
f'{GIT_DIR}/objects/{oid}')
def push_object(oid, remote_git_dir):
"""
push object to remote repository
Args:
oid (str): object id
remote_git_dir (str): remote git directory
Returns:
None
"""
remote_git_dir += '/.ugit'
shutil.copy(f'{GIT_DIR}/objects/{oid}',
f'{remote_git_dir}/objects/{oid}')
| 22.32197
| 102
| 0.589004
|
__author__ = "prakashsellathurai"
__copyright__ = "Copyright 2021"
__version__ = "1.0.1"
__email__ = "prakashsellathurai@gmail.com"
import os
import hashlib
import shutil
import json
from collections import namedtuple
from contextlib import contextmanager
GIT_DIR = None
RefValue = namedtuple('RefValue', ['symbolic', 'value'])
@contextmanager
def change_git_dir(new_dir) -> None:
global GIT_DIR
old_dir = GIT_DIR
GIT_DIR = f'{new_dir}/.idiota'
yield
GIT_DIR = old_dir
def init() -> None:
os.makedirs(GIT_DIR, exist_ok=True)
os.makedirs(f'{GIT_DIR}/objects')
def update_ref(ref, value, deref: bool=True) -> None:
ref = _get_ref_internal(ref, deref)[0]
assert value.value
if value.symbolic:
value = f'ref: {value.value}'
else:
value = value.value
ref_path = f'{GIT_DIR}/{ref}'
os.makedirs(os.path.dirname(ref_path), exist_ok=True)
with open(ref_path, 'w') as f:
f.write(value)
def get_ref(ref, deref=True) -> RefValue:
return _get_ref_internal(ref, deref)[1]
def delete_ref(ref, deref=True)->None:
ref = _get_ref_internal(ref, deref)[0]
os.remove(f'{GIT_DIR}/{ref}')
def _get_ref_internal(ref, deref) -> RefValue:
ref_path = f'{GIT_DIR}/{ref}'
value = None
if os.path.isfile(ref_path):
with open(ref_path) as f:
value = f.read().strip()
symbolic = bool(value) and value.startswith('ref:')
if symbolic:
value = value.split(':', 1)[1].strip()
if deref:
return _get_ref_internal(value, deref=True)
return ref, RefValue(symbolic=symbolic, value=value)
def iter_refs(prefix='', deref=True):
refs = ['HEAD', 'MERGE_HEAD']
for root, _, filenames in os.walk(f'{GIT_DIR}/refs/'):
root = os.path.relpath(root, GIT_DIR)
refs.extend(f'{root}/{name}' for name in filenames)
for refname in refs:
if not refname.startswith(prefix):
continue
ref = get_ref(refname, deref=deref)
if ref.value:
yield refname, ref
@contextmanager
def get_index():
index = {}
if os.path.isfile(f'{GIT_DIR}/index'):
with open(f'{GIT_DIR}/index') as f:
index = json.load(f)
yield index
with open(f'{GIT_DIR}/index', 'w') as f:
json.dump(index, f)
def hash_object(data: object, type_='blob')-> str:
obj = type_.encode() + b'\x00' + data
oid = hashlib.sha1(obj).hexdigest()
with open(f'{GIT_DIR}/objects/{oid}', 'wb') as out:
out.write(obj)
return oid
def get_object(oid: str, expected='blob')-> object:
with open(f'{GIT_DIR}/objects/{oid}', 'rb') as f:
obj = f.read()
first_null = obj.index(b'\x00')
type_ = obj[:first_null].decode()
content = obj[first_null + 1:]
if expected is not None:
assert type_ == expected, f'Expected {expected}, got {type_}'
return content
def object_exists(oid: bool)-> bool:
return os.path.isfile(f'{GIT_DIR}/objects/{oid}')
def fetch_object_if_missing(oid, remote_git_dir):
if object_exists(oid):
return
remote_git_dir += '/.ugit'
shutil.copy(f'{remote_git_dir}/objects/{oid}',
f'{GIT_DIR}/objects/{oid}')
def push_object(oid, remote_git_dir):
remote_git_dir += '/.ugit'
shutil.copy(f'{GIT_DIR}/objects/{oid}',
f'{remote_git_dir}/objects/{oid}')
| true
| true
|
f704b968b6da8f6293a4c1d3f9417ce9c6268bfa
| 15,401
|
py
|
Python
|
libcxx/utils/libcxx/test/newformat.py
|
LevyForchh/llvm-project
|
904c0865dfaef343245d6496623f187c4cdc1b61
|
[
"Apache-2.0"
] | null | null | null |
libcxx/utils/libcxx/test/newformat.py
|
LevyForchh/llvm-project
|
904c0865dfaef343245d6496623f187c4cdc1b61
|
[
"Apache-2.0"
] | 9
|
2020-04-24T21:51:04.000Z
|
2020-11-06T01:04:09.000Z
|
libcxx/utils/libcxx/test/newformat.py
|
LevyForchh/llvm-project
|
904c0865dfaef343245d6496623f187c4cdc1b61
|
[
"Apache-2.0"
] | null | null | null |
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import lit
import os
import pipes
import re
import subprocess
class CxxStandardLibraryTest(lit.formats.TestFormat):
"""
Lit test format for the C++ Standard Library conformance test suite.
This test format is based on top of the ShTest format -- it basically
creates a shell script performing the right operations (compile/link/run)
based on the extension of the test file it encounters. It supports files
with the following extensions:
FOO.pass.cpp - Compiles, links and runs successfully
FOO.pass.mm - Same as .pass.cpp, but for Objective-C++
FOO.run.fail.cpp - Compiles and links successfully, but fails at runtime
FOO.compile.pass.cpp - Compiles successfully, link and run not attempted
FOO.compile.fail.cpp - Does not compile successfully
FOO.link.pass.cpp - Compiles and links successfully, run not attempted
FOO.link.fail.cpp - Compiles successfully, but fails to link
FOO.sh.<anything> - A builtin Lit Shell test
FOO.verify.cpp - Compiles with clang-verify
FOO.fail.cpp - Compiled with clang-verify if clang-verify is
supported, and equivalent to a .compile.fail.cpp
test otherwise. This is supported only for backwards
compatibility with the test suite.
Substitution requirements
===============================
The test format operates by assuming that each test's configuration provides
the following substitutions, which it will reuse in the shell scripts it
constructs:
%{cxx} - A command that can be used to invoke the compiler
%{compile_flags} - Flags to use when compiling a test case
%{link_flags} - Flags to use when linking a test case
%{flags} - Flags to use either when compiling or linking a test case
%{exec} - A command to prefix the execution of executables
Note that when building an executable (as opposed to only compiling a source
file), all three of %{flags}, %{compile_flags} and %{link_flags} will be used
in the same command line. In other words, the test format doesn't perform
separate compilation and linking steps in this case.
Additional supported directives
===============================
In addition to everything that's supported in Lit ShTests, this test format
also understands the following directives inside test files:
// FILE_DEPENDENCIES: file, directory, /path/to/file
This directive expresses that the test requires the provided files
or directories in order to run. An example is a test that requires
some test input stored in a data file. When a test file contains
such a directive, this test format will collect them and make them
available in a special %{file_dependencies} substitution. The intent
is that if one needs to e.g. execute tests on a remote host, the
%{exec} substitution could use %{file_dependencies} to know which
files and directories to copy to the remote host.
// ADDITIONAL_COMPILE_FLAGS: flag1, flag2, flag3
This directive will cause the provided flags to be added to the
%{compile_flags} substitution for the test that contains it. This
allows adding special compilation flags without having to use a
.sh.cpp test, which would be more powerful but perhaps overkill.
Additional provided substitutions and features
==============================================
The test format will define the following substitutions for use inside
tests:
%{verify}
This expands to the set of flags that must be passed to the
compiler in order to use Clang-verify, if that is supported.
verify-support
This Lit feature will be made available when the compiler supports
Clang-verify. This can be used to disable tests that require that
feature, such as `.verify.cpp` tests.
%{file_dependencies}
Expands to the list of files that this test depends on.
See FILE_DEPENDENCIES above.
%{build}
Expands to a command-line that builds the current source
file with the %{flags}, %{compile_flags} and %{link_flags}
substitutions, and that produces an executable named %t.exe.
%{run}
Equivalent to `%{exec} %t.exe`. This is intended to be used
in conjunction with the %{build} substitution.
Design notes
============
This test format never implicitly disables a type of test. For example,
we could be tempted to automatically mark `.verify.cpp` tests as
UNSUPPORTED when clang-verify isn't supported by the compiler. However,
this sort of logic has been known to cause tests to be ignored in the
past, so we favour having tests mark themselves as unsupported explicitly.
This test format still needs work in the following areas:
- It is unknown how well it works on Windows yet.
"""
def getTestsInDirectory(self, testSuite, pathInSuite, litConfig, localConfig):
SUPPORTED_SUFFIXES = ['[.]pass[.]cpp$', '[.]pass[.]mm$', '[.]run[.]fail[.]cpp$',
'[.]compile[.]pass[.]cpp$', '[.]compile[.]fail[.]cpp$',
'[.]link[.]pass[.]cpp$', '[.]link[.]fail[.]cpp$',
'[.]sh[.][^.]+$',
'[.]verify[.]cpp$',
'[.]fail[.]cpp$']
sourcePath = testSuite.getSourcePath(pathInSuite)
for filename in os.listdir(sourcePath):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(sourcePath, filename)
if not os.path.isdir(filepath):
if any([re.search(ext, filename) for ext in SUPPORTED_SUFFIXES]):
yield lit.Test.Test(testSuite, pathInSuite + (filename,), localConfig)
def _checkBaseSubstitutions(self, substitutions):
substitutions = [s for (s, _) in substitutions]
for s in ['%{cxx}', '%{compile_flags}', '%{link_flags}', '%{flags}', '%{exec}']:
assert s in substitutions, "Required substitution {} was not provided".format(s)
# Determine whether clang-verify is supported.
def _supportsVerify(self, test, litConfig):
command = "echo | %{cxx} -xc++ - -Werror -fsyntax-only -Xclang -verify-ignore-unexpected"
command = lit.TestRunner.applySubstitutions([command], test.config.substitutions,
recursion_limit=test.config.recursiveExpansionLimit)[0]
devNull = open(os.devnull, 'w')
result = subprocess.call(command, shell=True, stdout=devNull, stderr=devNull)
return result == 0
def _disableWithModules(self, test, litConfig):
with open(test.getSourcePath(), 'rb') as f:
contents = f.read()
return b'#define _LIBCPP_ASSERT' in contents
def execute(self, test, litConfig):
self._checkBaseSubstitutions(test.config.substitutions)
filename = test.path_in_suite[-1]
# TODO(ldionne): We currently disable tests that re-define _LIBCPP_ASSERT
# when we run with modules enabled. Instead, we should
# split the part that does a death test outside of the
# test, and only disable that part when modules are
# enabled.
if '-fmodules' in test.config.available_features and self._disableWithModules(test, litConfig):
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test {} is unsupported when modules are enabled')
if re.search('[.]sh[.][^.]+$', filename):
steps = [ ] # The steps are already in the script
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -c -o %t.o",
"%dbg(LINKED WITH) ! %{cxx} %t.o %{flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.run.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} ! %t.exe"
]
return self._executeShTest(test, litConfig, steps, fileDependencies=['%t.exe'])
elif filename.endswith('.verify.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only %{verify}"
]
return self._executeShTest(test, litConfig, steps)
# Make sure to check these ones last, since they will match other
# suffixes above too.
elif filename.endswith('.pass.cpp') or filename.endswith('.pass.mm'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} %t.exe"
]
return self._executeShTest(test, litConfig, steps, fileDependencies=['%t.exe'])
# This is like a .verify.cpp test when clang-verify is supported,
# otherwise it's like a .compile.fail.cpp test. This is only provided
# for backwards compatibility with the test suite.
elif filename.endswith('.fail.cpp'):
if self._supportsVerify(test, litConfig):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only %{verify}"
]
else:
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
else:
return lit.Test.Result(lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename))
# Utility function to add compile flags in lit.local.cfg files.
def addCompileFlags(self, config, *flags):
string = ' '.join(flags)
config.substitutions = [(s, x + ' ' + string) if s == '%{compile_flags}' else (s, x) for (s, x) in config.substitutions]
# Modified version of lit.TestRunner.executeShTest to handle custom parsers correctly.
def _executeShTest(self, test, litConfig, steps, fileDependencies=None):
if test.config.unsupported:
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test is unsupported')
# Get the default substitutions
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
useExternalSh = True
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir, tmpBase,
normalize_slashes=useExternalSh)
# Add the %{build} and %{run} convenience substitutions
substitutions.append(('%{build}', '%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe'))
substitutions.append(('%{run}', '%{exec} %t.exe'))
# Add the %{verify} substitution and the verify-support feature if Clang-verify is supported
if self._supportsVerify(test, litConfig):
test.config.available_features.add('verify-support')
substitutions.append(('%{verify}', '-Xclang -verify -Xclang -verify-ignore-unexpected=note -ferror-limit=0'))
# Parse the test file, including custom directives
additionalCompileFlags = []
fileDependencies = fileDependencies or []
parsers = [
lit.TestRunner.IntegratedTestKeywordParser('FILE_DEPENDENCIES:',
lit.TestRunner.ParserKind.LIST,
initial_value=fileDependencies),
lit.TestRunner.IntegratedTestKeywordParser('ADDITIONAL_COMPILE_FLAGS:',
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags)
]
script = list(steps)
parsed = lit.TestRunner.parseIntegratedTestScript(test, additional_parsers=parsers,
require_script=not script)
if isinstance(parsed, lit.Test.Result):
return parsed
script += parsed
# Add compile flags specified with ADDITIONAL_COMPILE_FLAGS.
substitutions = [(s, x + ' ' + ' '.join(additionalCompileFlags)) if s == '%{compile_flags}'
else (s, x) for (s, x) in substitutions]
# Perform substitutions inside FILE_DEPENDENCIES lines (or injected dependencies).
# This allows using variables like %t in file dependencies. Also note that we really
# need to resolve %{file_dependencies} now, because otherwise we won't be able to
# make all paths absolute below.
fileDependencies = lit.TestRunner.applySubstitutions(fileDependencies, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
# Add the %{file_dependencies} substitution before we perform substitutions
# inside the script.
testDir = os.path.dirname(test.getSourcePath())
fileDependencies = [f if os.path.isabs(f) else os.path.join(testDir, f) for f in fileDependencies]
substitutions.append(('%{file_dependencies}', ' '.join(map(pipes.quote, fileDependencies))))
# Perform substitution in the script itself.
script = lit.TestRunner.applySubstitutions(script, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
if litConfig.noExecute:
return lit.Test.Result(lit.Test.PASS)
else:
return lit.TestRunner._runShTest(test, litConfig, useExternalSh, script, tmpBase)
| 50.661184
| 128
| 0.60087
|
mport lit
import os
import pipes
import re
import subprocess
class CxxStandardLibraryTest(lit.formats.TestFormat):
def getTestsInDirectory(self, testSuite, pathInSuite, litConfig, localConfig):
SUPPORTED_SUFFIXES = ['[.]pass[.]cpp$', '[.]pass[.]mm$', '[.]run[.]fail[.]cpp$',
'[.]compile[.]pass[.]cpp$', '[.]compile[.]fail[.]cpp$',
'[.]link[.]pass[.]cpp$', '[.]link[.]fail[.]cpp$',
'[.]sh[.][^.]+$',
'[.]verify[.]cpp$',
'[.]fail[.]cpp$']
sourcePath = testSuite.getSourcePath(pathInSuite)
for filename in os.listdir(sourcePath):
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(sourcePath, filename)
if not os.path.isdir(filepath):
if any([re.search(ext, filename) for ext in SUPPORTED_SUFFIXES]):
yield lit.Test.Test(testSuite, pathInSuite + (filename,), localConfig)
def _checkBaseSubstitutions(self, substitutions):
substitutions = [s for (s, _) in substitutions]
for s in ['%{cxx}', '%{compile_flags}', '%{link_flags}', '%{flags}', '%{exec}']:
assert s in substitutions, "Required substitution {} was not provided".format(s)
def _supportsVerify(self, test, litConfig):
command = "echo | %{cxx} -xc++ - -Werror -fsyntax-only -Xclang -verify-ignore-unexpected"
command = lit.TestRunner.applySubstitutions([command], test.config.substitutions,
recursion_limit=test.config.recursiveExpansionLimit)[0]
devNull = open(os.devnull, 'w')
result = subprocess.call(command, shell=True, stdout=devNull, stderr=devNull)
return result == 0
def _disableWithModules(self, test, litConfig):
with open(test.getSourcePath(), 'rb') as f:
contents = f.read()
return b'#define _LIBCPP_ASSERT' in contents
def execute(self, test, litConfig):
self._checkBaseSubstitutions(test.config.substitutions)
filename = test.path_in_suite[-1]
if '-fmodules' in test.config.available_features and self._disableWithModules(test, litConfig):
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test {} is unsupported when modules are enabled')
if re.search('[.]sh[.][^.]+$', filename):
steps = [ ]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -c -o %t.o",
"%dbg(LINKED WITH) ! %{cxx} %t.o %{flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.run.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} ! %t.exe"
]
return self._executeShTest(test, litConfig, steps, fileDependencies=['%t.exe'])
elif filename.endswith('.verify.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only %{verify}"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.pass.cpp') or filename.endswith('.pass.mm'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} %t.exe"
]
return self._executeShTest(test, litConfig, steps, fileDependencies=['%t.exe'])
# for backwards compatibility with the test suite.
elif filename.endswith('.fail.cpp'):
if self._supportsVerify(test, litConfig):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only %{verify}"
]
else:
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
else:
return lit.Test.Result(lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename))
# Utility function to add compile flags in lit.local.cfg files.
def addCompileFlags(self, config, *flags):
string = ' '.join(flags)
config.substitutions = [(s, x + ' ' + string) if s == '%{compile_flags}' else (s, x) for (s, x) in config.substitutions]
# Modified version of lit.TestRunner.executeShTest to handle custom parsers correctly.
def _executeShTest(self, test, litConfig, steps, fileDependencies=None):
if test.config.unsupported:
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test is unsupported')
# Get the default substitutions
tmpDir, tmpBase = lit.TestRunner.getTempPaths(test)
useExternalSh = True
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir, tmpBase,
normalize_slashes=useExternalSh)
# Add the %{build} and %{run} convenience substitutions
substitutions.append(('%{build}', '%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe'))
substitutions.append(('%{run}', '%{exec} %t.exe'))
# Add the %{verify} substitution and the verify-support feature if Clang-verify is supported
if self._supportsVerify(test, litConfig):
test.config.available_features.add('verify-support')
substitutions.append(('%{verify}', '-Xclang -verify -Xclang -verify-ignore-unexpected=note -ferror-limit=0'))
# Parse the test file, including custom directives
additionalCompileFlags = []
fileDependencies = fileDependencies or []
parsers = [
lit.TestRunner.IntegratedTestKeywordParser('FILE_DEPENDENCIES:',
lit.TestRunner.ParserKind.LIST,
initial_value=fileDependencies),
lit.TestRunner.IntegratedTestKeywordParser('ADDITIONAL_COMPILE_FLAGS:',
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags)
]
script = list(steps)
parsed = lit.TestRunner.parseIntegratedTestScript(test, additional_parsers=parsers,
require_script=not script)
if isinstance(parsed, lit.Test.Result):
return parsed
script += parsed
# Add compile flags specified with ADDITIONAL_COMPILE_FLAGS.
substitutions = [(s, x + ' ' + ' '.join(additionalCompileFlags)) if s == '%{compile_flags}'
else (s, x) for (s, x) in substitutions]
# Perform substitutions inside FILE_DEPENDENCIES lines (or injected dependencies).
# This allows using variables like %t in file dependencies. Also note that we really
# need to resolve %{file_dependencies} now, because otherwise we won't be able to
fileDependencies = lit.TestRunner.applySubstitutions(fileDependencies, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
testDir = os.path.dirname(test.getSourcePath())
fileDependencies = [f if os.path.isabs(f) else os.path.join(testDir, f) for f in fileDependencies]
substitutions.append(('%{file_dependencies}', ' '.join(map(pipes.quote, fileDependencies))))
script = lit.TestRunner.applySubstitutions(script, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
if litConfig.noExecute:
return lit.Test.Result(lit.Test.PASS)
else:
return lit.TestRunner._runShTest(test, litConfig, useExternalSh, script, tmpBase)
| true
| true
|
f704b9fd1df872c794b4f8b835d03462af5fb8c8
| 3,282
|
py
|
Python
|
UpWork_Projects/Will Farell_Spiders/innerwest/innerwest/settings.py
|
SurendraTamang/Web-Scrapping
|
2bb60cce9010b4b68f5c11bf295940832bb5df50
|
[
"MIT"
] | null | null | null |
UpWork_Projects/Will Farell_Spiders/innerwest/innerwest/settings.py
|
SurendraTamang/Web-Scrapping
|
2bb60cce9010b4b68f5c11bf295940832bb5df50
|
[
"MIT"
] | null | null | null |
UpWork_Projects/Will Farell_Spiders/innerwest/innerwest/settings.py
|
SurendraTamang/Web-Scrapping
|
2bb60cce9010b4b68f5c11bf295940832bb5df50
|
[
"MIT"
] | 1
|
2022-01-18T17:15:51.000Z
|
2022-01-18T17:15:51.000Z
|
# -*- coding: utf-8 -*-
# Scrapy settings for innerwest project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'Googlebot'
SPIDER_MODULES = ['innerwest.spiders']
NEWSPIDER_MODULE = 'innerwest.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'innerwest (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'innerwest.middlewares.InnerwestSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy_selenium.SeleniumMiddleware': 800,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#ITEM_PIPELINES = {
# 'innerwest.pipelines.InnerwestPipeline': 300,
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
SELENIUM_DRIVER_NAME = 'chrome'
SELENIUM_DRIVER_EXECUTABLE_PATH = "../chromedriver"
SELENIUM_DRIVER_ARGUMENTS=['--headless']
#SELENIUM_DRIVER_ARGUMENTS=[]
FEED_EXPORT_ENCODING = 'utf-8'
| 34.1875
| 103
| 0.777575
|
BOT_NAME = 'Googlebot'
SPIDER_MODULES = ['innerwest.spiders']
NEWSPIDER_MODULE = 'innerwest.spiders'
ROBOTSTXT_OBEY = False
DOWNLOADER_MIDDLEWARES = {
'scrapy_selenium.SeleniumMiddleware': 800,
}
'chrome'
SELENIUM_DRIVER_EXECUTABLE_PATH = "../chromedriver"
SELENIUM_DRIVER_ARGUMENTS=['--headless']
FEED_EXPORT_ENCODING = 'utf-8'
| true
| true
|
f704bd5d6983569ac694551a583366b858070681
| 4,151
|
py
|
Python
|
legged_gym/envs/anymal_c/anymal.py
|
mcx/legged_gym
|
dd6a6892e54c4f111a203319c05da8dca9595ae1
|
[
"BSD-3-Clause"
] | 159
|
2021-10-30T02:53:14.000Z
|
2022-03-31T20:59:20.000Z
|
legged_gym/envs/anymal_c/anymal.py
|
mcx/legged_gym
|
dd6a6892e54c4f111a203319c05da8dca9595ae1
|
[
"BSD-3-Clause"
] | 13
|
2021-11-01T06:57:56.000Z
|
2022-03-19T07:16:47.000Z
|
legged_gym/envs/anymal_c/anymal.py
|
mcx/legged_gym
|
dd6a6892e54c4f111a203319c05da8dca9595ae1
|
[
"BSD-3-Clause"
] | 49
|
2021-11-01T03:00:38.000Z
|
2022-03-31T21:00:30.000Z
|
# SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2021 ETH Zurich, Nikita Rudin
from time import time
import numpy as np
import os
from isaacgym.torch_utils import *
from isaacgym import gymtorch, gymapi, gymutil
import torch
# from torch.tensor import Tensor
from typing import Tuple, Dict
from legged_gym.envs import LeggedRobot
from legged_gym import LEGGED_GYM_ROOT_DIR
from .mixed_terrains.anymal_c_rough_config import AnymalCRoughCfg
class Anymal(LeggedRobot):
cfg : AnymalCRoughCfg
def __init__(self, cfg, sim_params, physics_engine, sim_device, headless):
super().__init__(cfg, sim_params, physics_engine, sim_device, headless)
# load actuator network
if self.cfg.control.use_actuator_network:
actuator_network_path = self.cfg.control.actuator_net_file.format(LEGGED_GYM_ROOT_DIR=LEGGED_GYM_ROOT_DIR)
self.actuator_network = torch.jit.load(actuator_network_path).to(self.device)
def reset_idx(self, env_ids):
super().reset_idx(env_ids)
# Additionaly empty actuator network hidden states
self.sea_hidden_state_per_env[:, env_ids] = 0.
self.sea_cell_state_per_env[:, env_ids] = 0.
def _init_buffers(self):
super()._init_buffers()
# Additionally initialize actuator network hidden state tensors
self.sea_input = torch.zeros(self.num_envs*self.num_actions, 1, 2, device=self.device, requires_grad=False)
self.sea_hidden_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)
self.sea_cell_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)
self.sea_hidden_state_per_env = self.sea_hidden_state.view(2, self.num_envs, self.num_actions, 8)
self.sea_cell_state_per_env = self.sea_cell_state.view(2, self.num_envs, self.num_actions, 8)
def _compute_torques(self, actions):
# Choose between pd controller and actuator network
if self.cfg.control.use_actuator_network:
with torch.inference_mode():
self.sea_input[:, 0, 0] = (actions * self.cfg.control.action_scale + self.default_dof_pos - self.dof_pos).flatten()
self.sea_input[:, 0, 1] = self.dof_vel.flatten()
torques, (self.sea_hidden_state[:], self.sea_cell_state[:]) = self.actuator_network(self.sea_input, (self.sea_hidden_state, self.sea_cell_state))
return torques
else:
# pd controller
return super()._compute_torques(actions)
| 51.246914
| 161
| 0.747049
|
from time import time
import numpy as np
import os
from isaacgym.torch_utils import *
from isaacgym import gymtorch, gymapi, gymutil
import torch
from typing import Tuple, Dict
from legged_gym.envs import LeggedRobot
from legged_gym import LEGGED_GYM_ROOT_DIR
from .mixed_terrains.anymal_c_rough_config import AnymalCRoughCfg
class Anymal(LeggedRobot):
cfg : AnymalCRoughCfg
def __init__(self, cfg, sim_params, physics_engine, sim_device, headless):
super().__init__(cfg, sim_params, physics_engine, sim_device, headless)
if self.cfg.control.use_actuator_network:
actuator_network_path = self.cfg.control.actuator_net_file.format(LEGGED_GYM_ROOT_DIR=LEGGED_GYM_ROOT_DIR)
self.actuator_network = torch.jit.load(actuator_network_path).to(self.device)
def reset_idx(self, env_ids):
super().reset_idx(env_ids)
self.sea_hidden_state_per_env[:, env_ids] = 0.
self.sea_cell_state_per_env[:, env_ids] = 0.
def _init_buffers(self):
super()._init_buffers()
self.sea_input = torch.zeros(self.num_envs*self.num_actions, 1, 2, device=self.device, requires_grad=False)
self.sea_hidden_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)
self.sea_cell_state = torch.zeros(2, self.num_envs*self.num_actions, 8, device=self.device, requires_grad=False)
self.sea_hidden_state_per_env = self.sea_hidden_state.view(2, self.num_envs, self.num_actions, 8)
self.sea_cell_state_per_env = self.sea_cell_state.view(2, self.num_envs, self.num_actions, 8)
def _compute_torques(self, actions):
if self.cfg.control.use_actuator_network:
with torch.inference_mode():
self.sea_input[:, 0, 0] = (actions * self.cfg.control.action_scale + self.default_dof_pos - self.dof_pos).flatten()
self.sea_input[:, 0, 1] = self.dof_vel.flatten()
torques, (self.sea_hidden_state[:], self.sea_cell_state[:]) = self.actuator_network(self.sea_input, (self.sea_hidden_state, self.sea_cell_state))
return torques
else:
return super()._compute_torques(actions)
| true
| true
|
f704bde830d47e285d80fa6723a8d574657b41c6
| 2,358
|
py
|
Python
|
clcd/rnn_train/RNNConfig.py
|
felipessalvatore/CLCD
|
422f9e93d49e4fcfd8048ad5b36898f8713d0370
|
[
"MIT"
] | 4
|
2020-02-06T19:35:13.000Z
|
2021-09-04T10:29:11.000Z
|
clcd/rnn_train/RNNConfig.py
|
felipessalvatore/CLCD
|
422f9e93d49e4fcfd8048ad5b36898f8713d0370
|
[
"MIT"
] | null | null | null |
clcd/rnn_train/RNNConfig.py
|
felipessalvatore/CLCD
|
422f9e93d49e4fcfd8048ad5b36898f8713d0370
|
[
"MIT"
] | null | null | null |
class RNNConfig(object):
"""
Holds logistic regression model hyperparams.
:param height: image height
:type heights: int
:param width: image width
:type width: int
:param channels: image channels
:type channels: int
:param batch_size: batch size for training
:type batch_size: int
:param epochs: number of epochs
:type epochs: int
:param save_step: when step % save_step == 0, the model
parameters are saved.
:type save_step: int
:param learning_rate: learning rate for the optimizer
:type learning_rate: float
:param momentum: momentum param
:type momentum: float
"""
def __init__(self,
vocab_size=25000,
batch_size=32,
embedding_dim=100,
rnn_dim=100,
output_dim=2,
layers=1,
epochs=8,
learning_rate=0.01,
momentum=0.2,
bidirectional=False,
opt="sgd",
drop=0):
self.vocab_size = vocab_size
self.batch_size = batch_size
self.embedding_dim = embedding_dim
self.rnn_dim = rnn_dim
self.layers = layers
self.output_dim = output_dim
self.epochs = epochs
self.learning_rate = learning_rate
self.momentum = momentum
self.bidirectional = bidirectional
self.opt = opt
self.drop = drop
def __str__(self):
"""
Get all attributs values.
:return: all hyperparams as a string
:rtype: str
"""
status = "vocab_size = {}\n".format(self.vocab_size)
status += "batch_size = {}\n".format(self.batch_size)
status += "embedding_dim = {}\n".format(self.embedding_dim)
status += "rnn_dim = {}\n".format(self.rnn_dim)
status += "layers = {}\n".format(self.layers)
status += "output_dim = {}\n".format(self.output_dim)
status += "epochs = {}\n".format(self.epochs)
status += "learning_rate = {}\n".format(self.learning_rate)
status += "momentum = {}\n".format(self.momentum)
status += "bidirectional = {}\n".format(self.bidirectional)
status += "opt = {}\n".format(self.opt)
status += "drop = {}\n".format(self.drop)
return status
| 34.173913
| 67
| 0.56743
|
class RNNConfig(object):
def __init__(self,
vocab_size=25000,
batch_size=32,
embedding_dim=100,
rnn_dim=100,
output_dim=2,
layers=1,
epochs=8,
learning_rate=0.01,
momentum=0.2,
bidirectional=False,
opt="sgd",
drop=0):
self.vocab_size = vocab_size
self.batch_size = batch_size
self.embedding_dim = embedding_dim
self.rnn_dim = rnn_dim
self.layers = layers
self.output_dim = output_dim
self.epochs = epochs
self.learning_rate = learning_rate
self.momentum = momentum
self.bidirectional = bidirectional
self.opt = opt
self.drop = drop
def __str__(self):
status = "vocab_size = {}\n".format(self.vocab_size)
status += "batch_size = {}\n".format(self.batch_size)
status += "embedding_dim = {}\n".format(self.embedding_dim)
status += "rnn_dim = {}\n".format(self.rnn_dim)
status += "layers = {}\n".format(self.layers)
status += "output_dim = {}\n".format(self.output_dim)
status += "epochs = {}\n".format(self.epochs)
status += "learning_rate = {}\n".format(self.learning_rate)
status += "momentum = {}\n".format(self.momentum)
status += "bidirectional = {}\n".format(self.bidirectional)
status += "opt = {}\n".format(self.opt)
status += "drop = {}\n".format(self.drop)
return status
| true
| true
|
f704bec2aa2f0c4a7762297c633a64db6aef42a5
| 231
|
py
|
Python
|
Problems/HackerRank/weird.py
|
kvlizhvn/Lab_7
|
e7f7f8da2b5f52a426bb55981594fb8ddcbd127a
|
[
"MIT"
] | 1
|
2022-02-18T15:44:46.000Z
|
2022-02-18T15:44:46.000Z
|
Problems/HackerRank/weird.py
|
kvlizhvn/Lab_7
|
e7f7f8da2b5f52a426bb55981594fb8ddcbd127a
|
[
"MIT"
] | null | null | null |
Problems/HackerRank/weird.py
|
kvlizhvn/Lab_7
|
e7f7f8da2b5f52a426bb55981594fb8ddcbd127a
|
[
"MIT"
] | 1
|
2021-03-26T13:55:52.000Z
|
2021-03-26T13:55:52.000Z
|
if __name__ == '__main__':
n = int(input().strip())
if n % 2 != 0:
print("Weird")
elif 2 <= n <= 5:
print("Not Weird")
elif 6 <= n <= 20:
print("Weird")
else:
print("Not Weird")
| 19.25
| 28
| 0.441558
|
if __name__ == '__main__':
n = int(input().strip())
if n % 2 != 0:
print("Weird")
elif 2 <= n <= 5:
print("Not Weird")
elif 6 <= n <= 20:
print("Weird")
else:
print("Not Weird")
| true
| true
|
f704bf1e7920df37e01acd48750fcd9d28294a7e
| 1,913
|
py
|
Python
|
tools/client.py
|
gitter-badger/electrs
|
a797a3864e1215a671b1d6a4efa5268c96d3f55d
|
[
"MIT"
] | null | null | null |
tools/client.py
|
gitter-badger/electrs
|
a797a3864e1215a671b1d6a4efa5268c96d3f55d
|
[
"MIT"
] | null | null | null |
tools/client.py
|
gitter-badger/electrs
|
a797a3864e1215a671b1d6a4efa5268c96d3f55d
|
[
"MIT"
] | null | null | null |
import hashlib
import json
import sys
from logbook import Logger, StreamHandler
from pycoin.coins.bitcoin.networks import BitcoinMainnet
import pycoin.ui.key_from_text
import pycoin.key
import socket
script_for_address = BitcoinMainnet.ui.script_for_address
log = Logger(__name__)
class Connection:
def __init__(self, addr):
self.s = socket.create_connection(addr)
self.f = self.s.makefile('r')
self.id = 0
def call(self, method, *args):
req = {
'id': self.id,
'method': method,
'params': list(args),
}
msg = json.dumps(req) + '\n'
self.s.sendall(msg.encode('ascii'))
return json.loads(self.f.readline())
def main():
conn = Connection(('localhost', 50001))
xpub, = sys.argv[1:]
total = 0
k = pycoin.ui.key_from_text.key_from_text(xpub)
for change in (0, 1):
empty = 0
for n in range(100):
address = k.subkey(change).subkey(n).address()
script = script_for_address(address)
script_hash = hashlib.sha256(script).digest()
log.debug('{}', conn.call('blockchain.scripthash.get_history',
script_hash[::-1].hex()))
reply = conn.call('blockchain.scripthash.get_balance',
script_hash[::-1].hex())
result = reply['result']
confirmed = result['confirmed'] / 1e8
total += confirmed
if confirmed:
log.info('{}/{} => {} has {:11.8f} BTC',
change, n, address, confirmed)
empty = 0
else:
empty += 1
if empty >= 10:
break
log.info('total balance: {} BTC', total)
if __name__ == '__main__':
with StreamHandler(sys.stderr, level='INFO').applicationbound():
main()
| 28.984848
| 74
| 0.547308
|
import hashlib
import json
import sys
from logbook import Logger, StreamHandler
from pycoin.coins.bitcoin.networks import BitcoinMainnet
import pycoin.ui.key_from_text
import pycoin.key
import socket
script_for_address = BitcoinMainnet.ui.script_for_address
log = Logger(__name__)
class Connection:
def __init__(self, addr):
self.s = socket.create_connection(addr)
self.f = self.s.makefile('r')
self.id = 0
def call(self, method, *args):
req = {
'id': self.id,
'method': method,
'params': list(args),
}
msg = json.dumps(req) + '\n'
self.s.sendall(msg.encode('ascii'))
return json.loads(self.f.readline())
def main():
conn = Connection(('localhost', 50001))
xpub, = sys.argv[1:]
total = 0
k = pycoin.ui.key_from_text.key_from_text(xpub)
for change in (0, 1):
empty = 0
for n in range(100):
address = k.subkey(change).subkey(n).address()
script = script_for_address(address)
script_hash = hashlib.sha256(script).digest()
log.debug('{}', conn.call('blockchain.scripthash.get_history',
script_hash[::-1].hex()))
reply = conn.call('blockchain.scripthash.get_balance',
script_hash[::-1].hex())
result = reply['result']
confirmed = result['confirmed'] / 1e8
total += confirmed
if confirmed:
log.info('{}/{} => {} has {:11.8f} BTC',
change, n, address, confirmed)
empty = 0
else:
empty += 1
if empty >= 10:
break
log.info('total balance: {} BTC', total)
if __name__ == '__main__':
with StreamHandler(sys.stderr, level='INFO').applicationbound():
main()
| true
| true
|
f704c0b981e5c808fcf2d361794e61ab733573e8
| 400
|
py
|
Python
|
backend/auth/urls.py
|
Aquinology/Inel_Music
|
15fe344e9932389df09e6219d2b1ae030cfd1219
|
[
"MIT"
] | null | null | null |
backend/auth/urls.py
|
Aquinology/Inel_Music
|
15fe344e9932389df09e6219d2b1ae030cfd1219
|
[
"MIT"
] | null | null | null |
backend/auth/urls.py
|
Aquinology/Inel_Music
|
15fe344e9932389df09e6219d2b1ae030cfd1219
|
[
"MIT"
] | 1
|
2021-02-18T11:20:34.000Z
|
2021-02-18T11:20:34.000Z
|
from django.urls import path
from .views import MyObtainTokenPairView, RegisterView
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('login/', MyObtainTokenPairView.as_view(), name='token_obtain_pair'),
path('login/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('register/', RegisterView.as_view(), name='auth_register'),
]
| 36.363636
| 79
| 0.75
|
from django.urls import path
from .views import MyObtainTokenPairView, RegisterView
from rest_framework_simplejwt.views import TokenRefreshView
urlpatterns = [
path('login/', MyObtainTokenPairView.as_view(), name='token_obtain_pair'),
path('login/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('register/', RegisterView.as_view(), name='auth_register'),
]
| true
| true
|
f704c0f9b4488488f3aae9f679bb84275d8e52d4
| 11,405
|
py
|
Python
|
src/core/src/core_logic/PackageFilter.py
|
Azure/LinuxPatchExtension
|
6af622afb4298805bdf47328d6bc66a785f7166b
|
[
"Apache-2.0"
] | 4
|
2020-06-01T14:36:30.000Z
|
2021-08-24T16:55:50.000Z
|
src/core/src/core_logic/PackageFilter.py
|
Azure/LinuxPatchExtension
|
6af622afb4298805bdf47328d6bc66a785f7166b
|
[
"Apache-2.0"
] | 34
|
2020-09-11T17:20:42.000Z
|
2022-03-28T14:08:44.000Z
|
src/core/src/core_logic/PackageFilter.py
|
Azure/LinuxPatchExtension
|
6af622afb4298805bdf47328d6bc66a785f7166b
|
[
"Apache-2.0"
] | 1
|
2020-12-28T10:13:20.000Z
|
2020-12-28T10:13:20.000Z
|
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
"""Package Filter"""
from core.src.bootstrap.Constants import Constants
import fnmatch
class PackageFilter(object):
"""implements the Package filtering logic"""
def __init__(self, execution_config, composite_logger):
self.execution_config = execution_config
self.composite_logger = composite_logger
# Exclusions - note: version based exclusion is not supported
self.global_excluded_packages = self.sanitize_str_to_list(self.execution_config.global_exclusion_list)
self.installation_excluded_package_masks = self.execution_config.excluded_package_name_mask_list
self.installation_excluded_packages, self.installation_excluded_package_versions = self.get_packages_and_versions_from_masks(self.installation_excluded_package_masks)
# Inclusions - note: version based inclusion is optionally supported
self.installation_included_package_masks = self.execution_config.included_package_name_mask_list
self.installation_included_packages, self.installation_included_package_versions = self.get_packages_and_versions_from_masks(self.installation_included_package_masks)
self.installation_included_classifications = [] if self.execution_config.included_classifications_list is None else self.execution_config.included_classifications_list
# Neutralize global excluded packages, if customer explicitly includes the package
packages_to_clear_from_global = []
for package in self.global_excluded_packages:
if self.check_for_explicit_inclusion(package):
self.composite_logger.log_debug('Removing package from global exclusion list: ' + package)
packages_to_clear_from_global.append(package)
self.global_excluded_packages = [x for x in self.global_excluded_packages if x not in packages_to_clear_from_global]
# Logging
self.composite_logger.log("\nAzure globally-excluded packages: " + str(self.global_excluded_packages))
self.composite_logger.log("Included package classifications: " + ', '.join(self.installation_included_classifications))
self.composite_logger.log("Included packages: " + str(self.installation_included_package_masks))
self.composite_logger.log("Excluded packages: " + str(self.installation_excluded_packages))
if '=' in str(self.installation_excluded_package_masks):
self.composite_logger.log_error("\n /!\\ Package exclusions do not support version matching in the filter today. "
"Due to this, more packages than expected may be excluded from this update deployment.")
# region Inclusion / exclusion presence checks
def is_exclusion_list_present(self):
"""Return true if either Global or patch installation specific exclusion list present"""
return bool(self.global_excluded_packages) or bool(self.installation_excluded_packages)
def is_inclusion_list_present(self):
"""Return true if patch installation Inclusion is present"""
return bool(self.installation_included_packages)
# endregion
# region Package exclusion checks
def check_for_exclusion(self, one_or_more_packages):
"""Return true if package need to be excluded"""
return self.check_for_match(one_or_more_packages, self.installation_excluded_packages) or \
self.check_for_match(one_or_more_packages, self.global_excluded_packages)
# endregion
# region Package inclusion checks
def check_for_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
"""Return true if package should be included (either because no inclusion list is specified, or because of explicit match)"""
return not self.is_inclusion_list_present() or self.check_for_explicit_inclusion(package, package_version)
def check_for_explicit_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
"""Return true if package should be included due to an explicit match to the inclusion list """
return self.check_for_match(package, self.installation_included_packages, package_version, self.installation_included_package_versions)
# endregion
# region Inclusion / exclusion common match checker
def check_for_match(self, one_or_more_packages, matching_list, linked_package_versions=Constants.DEFAULT_UNSPECIFIED_VALUE, version_matching_list=Constants.DEFAULT_UNSPECIFIED_VALUE):
# type: (str, object, str, object) -> bool # type hinting to remove a warning
"""Return true if package(s) (with, optionally, linked version(s)) matches the filter list"""
if matching_list:
if type(one_or_more_packages) is str:
return self.single_package_check_for_match(one_or_more_packages, matching_list, linked_package_versions, version_matching_list)
else:
for index, each_package in enumerate(one_or_more_packages):
if type(linked_package_versions) is str:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions, version_matching_list):
return True
else:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions[index], version_matching_list):
return True
return False
def single_package_check_for_match(self, package, matching_list, package_version, version_matching_list):
"""Returns true if a single package (optionally, version) matches the filter list"""
for index, matching_package in enumerate(matching_list):
if fnmatch.fnmatch(package, matching_package) or fnmatch.fnmatch(self.get_product_name_without_arch(package), matching_package):
self.composite_logger.log_debug(' - [Package] {0} matches expression {1}'.format(package, matching_package))
if package_version == Constants.DEFAULT_UNSPECIFIED_VALUE or not version_matching_list or version_matching_list[index] == Constants.DEFAULT_UNSPECIFIED_VALUE:
self.composite_logger.log_debug(' - [Version] Check skipped as not specified.')
return True
elif len(version_matching_list) > index and fnmatch.fnmatch(package_version, version_matching_list[index]):
self.composite_logger.log_debug(' - [Version] {0} matches expression {1}'.format(package, version_matching_list[index]))
return True
elif len(version_matching_list) <= index: # This should never happen - something has gone horribly wrong
self.composite_logger.log_error(' - [Version] Index error - ({0} of {1})'.format(index + 1, len(version_matching_list)))
else:
self.composite_logger.log_debug(' - Package {0} (version={1}) was found, but it did not match filter specified for version ({2})'.format(package, package_version, version_matching_list[index]))
return False
@staticmethod
def get_product_name_without_arch(package_name):
"""Splits out product name without architecture - if this is changed, review YumPackageManager"""
architectures = ['.x86_64', '.noarch', '.i686']
for arch in architectures:
if package_name.endswith(arch):
return package_name.replace(arch, '')
return package_name
# endregion
# region Get included / excluded package masks
def get_packages_and_versions_from_masks(self, package_masks):
"""Return package names and versions"""
packages = []
package_versions = []
if package_masks is not None:
for index, package_mask in enumerate(package_masks):
package_mask_split = str(package_mask).split('=')
if len(package_mask_split) == 1: # no version specified
packages.append(package_mask_split[0].strip())
package_versions.append(Constants.DEFAULT_UNSPECIFIED_VALUE)
elif len(package_mask_split) == 2: # version also specified
packages.append(package_mask_split[0].strip())
package_versions.append(package_mask_split[1].strip())
else: # invalid format
self.composite_logger.log_warning("Invalid package format: " + str(package_mask) + " [Ignored]")
return packages, package_versions
@staticmethod
def sanitize_str_to_list(string_input):
"""Strips excess white-space and converts a comma-separated string to a list"""
return [] if (string_input is None) else string_input.strip().split(",")
# endregion
# region Get installation classifications from execution configuration
def is_msft_critsec_classification_only(self):
return ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications) and 'Other' not in self.installation_included_classifications
def is_msft_other_classification_only(self):
return 'Other' in self.installation_included_classifications and not ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications)
def is_msft_all_classification_included(self):
"""Returns true if all classifications were individually selected *OR* (nothing was selected AND no inclusion list is present) -- business logic"""
all_classifications = [key for key in Constants.PackageClassification.__dict__.keys() if not key.startswith('__')]
all_classifications_explicitly_selected = bool(len(self.installation_included_classifications) == (len(all_classifications) - 1))
no_classifications_selected = bool(len(self.installation_included_classifications) == 0)
only_unclassified_selected = bool('Unclassified' in self.installation_included_classifications and len(self.installation_included_classifications) == 1)
return all_classifications_explicitly_selected or ((no_classifications_selected or only_unclassified_selected) and not self.is_inclusion_list_present())
def is_invalid_classification_combination(self):
return ('Other' in self.installation_included_classifications and 'Critical' in self.installation_included_classifications and 'Security' not in self.installation_included_classifications) or \
('Other' in self.installation_included_classifications and 'Security' in self.installation_included_classifications and 'Critical' not in self.installation_included_classifications)
# endregion
| 65.924855
| 216
| 0.728979
|
from core.src.bootstrap.Constants import Constants
import fnmatch
class PackageFilter(object):
def __init__(self, execution_config, composite_logger):
self.execution_config = execution_config
self.composite_logger = composite_logger
self.global_excluded_packages = self.sanitize_str_to_list(self.execution_config.global_exclusion_list)
self.installation_excluded_package_masks = self.execution_config.excluded_package_name_mask_list
self.installation_excluded_packages, self.installation_excluded_package_versions = self.get_packages_and_versions_from_masks(self.installation_excluded_package_masks)
self.installation_included_package_masks = self.execution_config.included_package_name_mask_list
self.installation_included_packages, self.installation_included_package_versions = self.get_packages_and_versions_from_masks(self.installation_included_package_masks)
self.installation_included_classifications = [] if self.execution_config.included_classifications_list is None else self.execution_config.included_classifications_list
packages_to_clear_from_global = []
for package in self.global_excluded_packages:
if self.check_for_explicit_inclusion(package):
self.composite_logger.log_debug('Removing package from global exclusion list: ' + package)
packages_to_clear_from_global.append(package)
self.global_excluded_packages = [x for x in self.global_excluded_packages if x not in packages_to_clear_from_global]
self.composite_logger.log("\nAzure globally-excluded packages: " + str(self.global_excluded_packages))
self.composite_logger.log("Included package classifications: " + ', '.join(self.installation_included_classifications))
self.composite_logger.log("Included packages: " + str(self.installation_included_package_masks))
self.composite_logger.log("Excluded packages: " + str(self.installation_excluded_packages))
if '=' in str(self.installation_excluded_package_masks):
self.composite_logger.log_error("\n /!\\ Package exclusions do not support version matching in the filter today. "
"Due to this, more packages than expected may be excluded from this update deployment.")
def is_exclusion_list_present(self):
return bool(self.global_excluded_packages) or bool(self.installation_excluded_packages)
def is_inclusion_list_present(self):
return bool(self.installation_included_packages)
def check_for_exclusion(self, one_or_more_packages):
return self.check_for_match(one_or_more_packages, self.installation_excluded_packages) or \
self.check_for_match(one_or_more_packages, self.global_excluded_packages)
def check_for_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
return not self.is_inclusion_list_present() or self.check_for_explicit_inclusion(package, package_version)
def check_for_explicit_inclusion(self, package, package_version=Constants.DEFAULT_UNSPECIFIED_VALUE):
return self.check_for_match(package, self.installation_included_packages, package_version, self.installation_included_package_versions)
def check_for_match(self, one_or_more_packages, matching_list, linked_package_versions=Constants.DEFAULT_UNSPECIFIED_VALUE, version_matching_list=Constants.DEFAULT_UNSPECIFIED_VALUE):
if type(one_or_more_packages) is str:
return self.single_package_check_for_match(one_or_more_packages, matching_list, linked_package_versions, version_matching_list)
else:
for index, each_package in enumerate(one_or_more_packages):
if type(linked_package_versions) is str:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions, version_matching_list):
return True
else:
if self.single_package_check_for_match(each_package, matching_list, linked_package_versions[index], version_matching_list):
return True
return False
def single_package_check_for_match(self, package, matching_list, package_version, version_matching_list):
for index, matching_package in enumerate(matching_list):
if fnmatch.fnmatch(package, matching_package) or fnmatch.fnmatch(self.get_product_name_without_arch(package), matching_package):
self.composite_logger.log_debug(' - [Package] {0} matches expression {1}'.format(package, matching_package))
if package_version == Constants.DEFAULT_UNSPECIFIED_VALUE or not version_matching_list or version_matching_list[index] == Constants.DEFAULT_UNSPECIFIED_VALUE:
self.composite_logger.log_debug(' - [Version] Check skipped as not specified.')
return True
elif len(version_matching_list) > index and fnmatch.fnmatch(package_version, version_matching_list[index]):
self.composite_logger.log_debug(' - [Version] {0} matches expression {1}'.format(package, version_matching_list[index]))
return True
elif len(version_matching_list) <= index:
self.composite_logger.log_error(' - [Version] Index error - ({0} of {1})'.format(index + 1, len(version_matching_list)))
else:
self.composite_logger.log_debug(' - Package {0} (version={1}) was found, but it did not match filter specified for version ({2})'.format(package, package_version, version_matching_list[index]))
return False
@staticmethod
def get_product_name_without_arch(package_name):
architectures = ['.x86_64', '.noarch', '.i686']
for arch in architectures:
if package_name.endswith(arch):
return package_name.replace(arch, '')
return package_name
def get_packages_and_versions_from_masks(self, package_masks):
packages = []
package_versions = []
if package_masks is not None:
for index, package_mask in enumerate(package_masks):
package_mask_split = str(package_mask).split('=')
if len(package_mask_split) == 1:
packages.append(package_mask_split[0].strip())
package_versions.append(Constants.DEFAULT_UNSPECIFIED_VALUE)
elif len(package_mask_split) == 2:
packages.append(package_mask_split[0].strip())
package_versions.append(package_mask_split[1].strip())
else:
self.composite_logger.log_warning("Invalid package format: " + str(package_mask) + " [Ignored]")
return packages, package_versions
@staticmethod
def sanitize_str_to_list(string_input):
return [] if (string_input is None) else string_input.strip().split(",")
def is_msft_critsec_classification_only(self):
return ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications) and 'Other' not in self.installation_included_classifications
def is_msft_other_classification_only(self):
return 'Other' in self.installation_included_classifications and not ('Critical' in self.installation_included_classifications or 'Security' in self.installation_included_classifications)
def is_msft_all_classification_included(self):
all_classifications = [key for key in Constants.PackageClassification.__dict__.keys() if not key.startswith('__')]
all_classifications_explicitly_selected = bool(len(self.installation_included_classifications) == (len(all_classifications) - 1))
no_classifications_selected = bool(len(self.installation_included_classifications) == 0)
only_unclassified_selected = bool('Unclassified' in self.installation_included_classifications and len(self.installation_included_classifications) == 1)
return all_classifications_explicitly_selected or ((no_classifications_selected or only_unclassified_selected) and not self.is_inclusion_list_present())
def is_invalid_classification_combination(self):
return ('Other' in self.installation_included_classifications and 'Critical' in self.installation_included_classifications and 'Security' not in self.installation_included_classifications) or \
('Other' in self.installation_included_classifications and 'Security' in self.installation_included_classifications and 'Critical' not in self.installation_included_classifications)
| true
| true
|
f704c10fdcec8a9d3f12a9f1b47e921161913390
| 1,253
|
py
|
Python
|
scripts/activate_amplifier.py
|
drocx/RPi-Jukebox-RFID
|
1e211c2f4571a86d97747fe9094a34931de8b7c1
|
[
"MIT"
] | 1
|
2020-03-24T20:27:07.000Z
|
2020-03-24T20:27:07.000Z
|
scripts/activate_amplifier.py
|
drocx/RPi-Jukebox-RFID
|
1e211c2f4571a86d97747fe9094a34931de8b7c1
|
[
"MIT"
] | null | null | null |
scripts/activate_amplifier.py
|
drocx/RPi-Jukebox-RFID
|
1e211c2f4571a86d97747fe9094a34931de8b7c1
|
[
"MIT"
] | 1
|
2019-10-06T16:33:52.000Z
|
2019-10-06T16:33:52.000Z
|
#!/usr/bin/python3
import sys
from signal import pause
import RPi.GPIO as GPIO
# script to activate and deactivate an amplifier, power led, etc. using a GPIO
# pin on power up / down
# see for an example implementation with a PAM8403 digital amplifier
# (PAM pin 12 connected to GPIO 26)
# https://github.com/MiczFlor/RPi-Jukebox-RFID/wiki/Hardware-Hack-PAM8403-Poweroff
# change this value based on which GPIO port the amplifier or other devices are connected to
# Flexible Pinout
AMP_GPIO = 26
# Classic Pinout
# AMP_GPIO = 23
# setup RPi lib to control output pin
# we do not cleanup the GPIO because we want the pin low = off after program exit
# the resulting warning can be ignored
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(AMP_GPIO, GPIO.OUT)
def set_amplifier(status):
if status:
print("Setting amplifier: ON")
GPIO.output(AMP_GPIO, GPIO.HIGH)
else:
print("Setting amplifier: OFF")
GPIO.output(AMP_GPIO, GPIO.LOW)
if __name__ == "__main__":
try:
set_amplifier(True)
pause()
except KeyboardInterrupt:
# turn the relay off
set_amplifier(False)
print("\nExiting amplifier control\n")
# exit the application
sys.exit(0)
| 26.659574
| 92
| 0.703113
|
import sys
from signal import pause
import RPi.GPIO as GPIO
AMP_GPIO = 26
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(AMP_GPIO, GPIO.OUT)
def set_amplifier(status):
if status:
print("Setting amplifier: ON")
GPIO.output(AMP_GPIO, GPIO.HIGH)
else:
print("Setting amplifier: OFF")
GPIO.output(AMP_GPIO, GPIO.LOW)
if __name__ == "__main__":
try:
set_amplifier(True)
pause()
except KeyboardInterrupt:
set_amplifier(False)
print("\nExiting amplifier control\n")
sys.exit(0)
| true
| true
|
f704c2d7e17f9fd856996531dcce0fe615f4d3e0
| 755
|
py
|
Python
|
tests/create_config.py
|
throne/throne-cli
|
5cc66165b858d9a22c65aac8269523ca1e89cbee
|
[
"BSD-3-Clause-Clear"
] | 4
|
2021-05-25T05:56:05.000Z
|
2022-03-24T21:37:04.000Z
|
tests/create_config.py
|
throne/throne-cli
|
5cc66165b858d9a22c65aac8269523ca1e89cbee
|
[
"BSD-3-Clause-Clear"
] | 9
|
2021-04-22T18:43:48.000Z
|
2021-09-05T05:11:59.000Z
|
tests/create_config.py
|
throne/throne-cli
|
5cc66165b858d9a22c65aac8269523ca1e89cbee
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-04-26T07:07:09.000Z
|
2021-04-26T07:07:09.000Z
|
import os
import time
from click.testing import CliRunner
from bin.throne import cli as throne
runner = CliRunner()
shodan_key = os.getenv('SHODAN_KEY')
throne_user = os.getenv('THRONE_USER')
throne_pass = os.getenv('THRONE_PASS')
def test_throne_setapi():
print("Testing: throne api setapi")
response = runner.invoke(throne, ["api", "setapi", "-u", f"{throne_user}", "-p", f"{throne_pass}"])
assert response.exit_code == 0
assert "Successfully set throne API key." in response.output
def test_shodan_setapi():
print("Testing: throne shodan setapi")
response = runner.invoke(throne, ["shodan", "setapi"], input=f"{shodan_key}")
assert response.exit_code == 0
assert "Successfully set Shodan API key." in response.output
| 34.318182
| 103
| 0.717881
|
import os
import time
from click.testing import CliRunner
from bin.throne import cli as throne
runner = CliRunner()
shodan_key = os.getenv('SHODAN_KEY')
throne_user = os.getenv('THRONE_USER')
throne_pass = os.getenv('THRONE_PASS')
def test_throne_setapi():
print("Testing: throne api setapi")
response = runner.invoke(throne, ["api", "setapi", "-u", f"{throne_user}", "-p", f"{throne_pass}"])
assert response.exit_code == 0
assert "Successfully set throne API key." in response.output
def test_shodan_setapi():
print("Testing: throne shodan setapi")
response = runner.invoke(throne, ["shodan", "setapi"], input=f"{shodan_key}")
assert response.exit_code == 0
assert "Successfully set Shodan API key." in response.output
| true
| true
|
f704c4328e33a065550916c30d7752a440d3bddf
| 16,427
|
py
|
Python
|
senlin-7.0.0/senlin/objects/fields.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
senlin-7.0.0/senlin/objects/fields.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
senlin-7.0.0/senlin/objects/fields.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import fields
import re
import six
from senlin.common import consts
from senlin.common.i18n import _
CONF = cfg.CONF
# Field alias for code readability
# BooleanField = fields.BooleanField
FlexibleBooleanField = fields.FlexibleBooleanField
StringField = fields.StringField
IntegerField = fields.IntegerField
FloatField = fields.FloatField
UUIDField = fields.UUIDField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
ListOfStringsField = fields.ListOfStringsField
ListOfEnumField = fields.ListOfEnumField
class Boolean(fields.FieldType):
# NOTE: The following definition is much more stricter than the oslo
# version. Also note that the treatment of default values here:
# we are using the user specified default value when invoking
# the 'bool_from_string' until function.
def __init__(self, default=False):
super(Boolean, self).__init__()
self._default = default
def coerce(self, obj, attr, value):
return strutils.bool_from_string(value, strict=True,
default=self._default)
def get_schema(self):
return {'type': ['boolean']}
class NonNegativeInteger(fields.FieldType):
# NOTE: This definition is kept because we want the error message from
# 'int' conversion to be user friendly.
@staticmethod
def coerce(obj, attr, value):
try:
v = int(value)
except (TypeError, ValueError):
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < 0:
err = _("Value must be >= 0 for field '%s'.") % attr
raise ValueError(err)
return v
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': 0
}
# Senlin has a stricter field checking for object fields.
class Object(fields.Object):
def get_schema(self):
schema = super(Object, self).get_schema()
# we are not checking whether self._obj_name is registered, an
# exception will be raised anyway if it is not registered.
data_key = 'senlin_object.data'
schema['properties'][data_key]['additionalProperties'] = False
return schema
class UUID(fields.FieldType):
_PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
if not uuidutils.is_uuid_like(value):
msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'."
) % {'attr': attr, 'value': value}
raise ValueError(msg)
return str(value)
def get_schema(self):
return {'type': ['string'], 'pattern': self._PATTERN}
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, six.string_types):
try:
return jsonutils.loads(value)
except ValueError:
msg = _("The value (%s) is not a valid JSON.") % value
raise ValueError(msg)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return jsonutils.dumps(value)
def stringify(self, value):
if isinstance(value, six.string_types):
try:
return jsonutils.loads(value)
except ValueError:
raise
return str(value)
def get_schema(self):
return {'type': ['object']}
class NotificationPriority(fields.Enum):
# The priorities here are derived from oslo_messaging.notify.notifier
ALL = consts.NOTIFICATION_PRIORITIES
def __init__(self):
super(NotificationPriority, self).__init__(self.ALL)
class NotificationPhase(fields.Enum):
ALL = consts.NOTIFICATION_PHASES
def __init__(self):
super(NotificationPhase, self).__init__(self.ALL)
class Name(fields.String):
def __init__(self, min_len=1, max_len=255):
super(Name, self).__init__()
self.min_len = min_len
self.max_len = max_len
def coerce(self, obj, attr, value):
err = None
if len(value) < self.min_len:
err = _("The value for the %(attr)s field must be at least "
"%(count)d characters long."
) % {'attr': attr, 'count': self.min_len}
elif len(value) > self.max_len:
err = _("The value for the %(attr)s field must be less than "
"%(count)d characters long."
) % {'attr': attr, 'count': self.max_len}
else:
# NOTE: This is pretty restrictive. We can relax it later when
# there are requests to do so
regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$',
re.IGNORECASE)
if not regex.search(value):
err = _("The value for the '%(attr)s' (%(value)s) contains "
"illegal characters. It must contain only "
"alphanumeric or \"_-.~\" characters and must start "
"with letter."
) % {'attr': attr, 'value': value}
if err:
raise ValueError(err)
return super(Name, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
'minLength': self.min_len,
'maxLength': self.max_len
}
class Capacity(fields.Integer):
def __init__(self, minimum=0, maximum=None):
super(Capacity, self).__init__()
CONF.import_opt("max_nodes_per_cluster", "senlin.common.config")
if minimum > CONF.max_nodes_per_cluster:
err = _("The value of 'minimum' cannot be greater than the global "
"constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.minimum = minimum
if maximum is not None:
if maximum < minimum:
err = _("The value of 'maximum' must be greater than or equal "
"to that of the 'minimum' specified.")
raise ValueError(err)
if maximum > CONF.max_nodes_per_cluster:
err = _("The value of 'maximum' cannot be greater than the "
"global constraint (%(m)d)."
) % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.maximum = maximum
else:
self.maximum = CONF.max_nodes_per_cluster
def coerce(self, obj, attr, value):
try:
v = int(value)
except Exception:
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < self.minimum:
raise ValueError(_("The value for the %(a)s field must be greater "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.minimum})
elif v > self.maximum:
raise ValueError(_("The value for the %(a)s field must be less "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.maximum})
return super(Capacity, self).coerce(obj, attr, v)
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': self.minimum,
'maximum': self.maximum,
'pattern': '^[0-9]*$',
}
class Sort(fields.String):
def __init__(self, valid_keys):
super(Sort, self).__init__()
self.valid_keys = valid_keys
def coerce(self, obj, attr, value):
for s in value.split(','):
s_key, _sep, s_dir = s.partition(':')
err = None
if not s_key:
err = _("Missing sort key for '%s'.") % attr
raise ValueError(err)
if s_key not in self.valid_keys:
err = _("Unsupported sort key '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_key}
if s_dir and s_dir not in ('asc', 'desc'):
err = _("Unsupported sort dir '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_dir}
if err:
raise ValueError(err)
return super(Sort, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
}
class IdentityList(fields.List):
def __init__(self, element_type, min_items=0, unique=True, nullable=False,
**kwargs):
super(IdentityList, self).__init__(element_type, **kwargs)
self.min_items = min_items
self.unique_items = unique
self.nullable = nullable
def coerce(self, obj, attr, value):
res = super(IdentityList, self).coerce(obj, attr, value)
if len(res) < self.min_items:
raise ValueError(_("Value for '%(attr)s' must have at least "
"%(num)s item(s).") %
{'attr': attr, 'num': self.min_items})
if len(set(res)) != len(res) and self.unique_items:
raise ValueError(_("Items for '%(attr)s' must be unique") %
{'attr': attr})
return res
def get_schema(self):
schema = super(IdentityList, self).get_schema()
if self.nullable:
schema['type'].append('null')
schema['minItems'] = self.min_items
schema['uniqueItems'] = self.unique_items
return schema
class BaseEnum(fields.FieldType):
# NOTE: We are not basing Enum on String because String is not working
# correctly when handling None value.
def __init__(self, nullable=False):
valid_values = list(self.__class__.ALL)
if not valid_values:
raise ValueError(_("No list of valid values provided for enum."))
for value in valid_values:
if not isinstance(value, six.string_types):
raise ValueError(_("Enum field only support string values."))
self._valid_values = list(valid_values)
self._nullable = nullable
super(BaseEnum, self).__init__()
def coerce(self, obj, attr, value):
value = six.text_type(value)
if value not in self._valid_values:
raise ValueError(_("Value '%(value)s' is not acceptable for "
"field '%(attr)s'.") %
{'value': value, 'attr': attr})
return value
def stringify(self, value):
if value is None:
return None
return '\'%s\'' % value
class AdjustmentType(BaseEnum):
ALL = consts.ADJUSTMENT_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterActionName(BaseEnum):
ALL = consts.CLUSTER_ACTION_NAMES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterStatus(BaseEnum):
ALL = consts.CLUSTER_STATUSES
class NodeStatus(BaseEnum):
ALL = consts.NODE_STATUSES
class ActionStatus(BaseEnum):
ALL = consts.ACTION_STATUSES
class ReceiverType(BaseEnum):
ALL = consts.RECEIVER_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class UniqueDict(fields.Dict):
def coerce(self, obj, attr, value):
res = super(UniqueDict, self).coerce(obj, attr, value)
new_nodes = res.values()
if len(new_nodes) != len(set(new_nodes)):
raise ValueError(_("Map contains duplicated values"))
return res
# TODO(Qiming): remove this when oslo patch is released
# https://review.openstack.org/#/c/360095
class NonNegativeIntegerField(fields.AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class BooleanField(fields.AutoTypedField):
AUTO_TYPE = Boolean()
# An override to the oslo.versionedobjects version so that we are using
# our own Object definition.
class ObjectField(fields.AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
class NotificationPriorityField(fields.BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(fields.BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NameField(fields.AutoTypedField):
AUTO_TYPE = Name()
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class CapacityField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, nullable=False, default=None, minimum=0, maximum=None):
self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum)
super(CapacityField, self).__init__(nullable=nullable, default=default)
class SortField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, valid_keys, nullable=False, default=None):
self.AUTO_TYPE = Sort(valid_keys)
super(SortField, self).__init__(nullable=nullable, default=default)
class IdentityListField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, min_items=0, unique=True, nullable=False, default=None):
if default is None:
default = []
self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items,
unique=unique)
super(IdentityListField, self).__init__(nullable=nullable,
default=default)
class AdjustmentTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = AdjustmentType(nullable=nullable)
super(AdjustmentTypeField, self).__init__(**kwargs)
class ClusterActionNameField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ClusterActionName(nullable=nullable)
super(ClusterActionNameField, self).__init__(**kwargs)
class ClusterStatusField(fields.AutoTypedField):
AUTO_TYPE = ClusterStatus
class NodeStatusField(fields.AutoTypedField):
AUTO_TYPE = NodeStatus
class ActionStatusField(fields.AutoTypedField):
AUTO_TYPE = ActionStatus
class ReceiverTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ReceiverType(nullable=nullable)
super(ReceiverTypeField, self).__init__(**kwargs)
class NodeReplaceMapField(fields.AutoTypedField):
AUTO_TYPE = UniqueDict(fields.String())
class CustomListField(ListField):
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
super(CustomListField, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
objs = super(CustomListField, self).coerce(obj, attr, value)
custom_list = []
for i in objs:
custom_list.append(getattr(i, self.attr_name))
return custom_list
| 30.647388
| 79
| 0.603397
|
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import strutils
from oslo_utils import uuidutils
from oslo_versionedobjects import fields
import re
import six
from senlin.common import consts
from senlin.common.i18n import _
CONF = cfg.CONF
FlexibleBooleanField = fields.FlexibleBooleanField
StringField = fields.StringField
IntegerField = fields.IntegerField
FloatField = fields.FloatField
UUIDField = fields.UUIDField
DateTimeField = fields.DateTimeField
DictOfStringsField = fields.DictOfStringsField
ListOfStringsField = fields.ListOfStringsField
ListOfEnumField = fields.ListOfEnumField
class Boolean(fields.FieldType):
def __init__(self, default=False):
super(Boolean, self).__init__()
self._default = default
def coerce(self, obj, attr, value):
return strutils.bool_from_string(value, strict=True,
default=self._default)
def get_schema(self):
return {'type': ['boolean']}
class NonNegativeInteger(fields.FieldType):
@staticmethod
def coerce(obj, attr, value):
try:
v = int(value)
except (TypeError, ValueError):
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < 0:
err = _("Value must be >= 0 for field '%s'.") % attr
raise ValueError(err)
return v
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': 0
}
class Object(fields.Object):
def get_schema(self):
schema = super(Object, self).get_schema()
data_key = 'senlin_object.data'
schema['properties'][data_key]['additionalProperties'] = False
return schema
class UUID(fields.FieldType):
_PATTERN = (r'^[a-fA-F0-9]{8}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]{4}-?[a-fA-F0-9]'
r'{4}-?[a-fA-F0-9]{12}$')
@staticmethod
def coerce(obj, attr, value):
if not uuidutils.is_uuid_like(value):
msg = _("The value for %(attr)s is not a valid UUID: '%(value)s'."
) % {'attr': attr, 'value': value}
raise ValueError(msg)
return str(value)
def get_schema(self):
return {'type': ['string'], 'pattern': self._PATTERN}
class Json(fields.FieldType):
def coerce(self, obj, attr, value):
if isinstance(value, six.string_types):
try:
return jsonutils.loads(value)
except ValueError:
msg = _("The value (%s) is not a valid JSON.") % value
raise ValueError(msg)
return value
def from_primitive(self, obj, attr, value):
return self.coerce(obj, attr, value)
def to_primitive(self, obj, attr, value):
return jsonutils.dumps(value)
def stringify(self, value):
if isinstance(value, six.string_types):
try:
return jsonutils.loads(value)
except ValueError:
raise
return str(value)
def get_schema(self):
return {'type': ['object']}
class NotificationPriority(fields.Enum):
ALL = consts.NOTIFICATION_PRIORITIES
def __init__(self):
super(NotificationPriority, self).__init__(self.ALL)
class NotificationPhase(fields.Enum):
ALL = consts.NOTIFICATION_PHASES
def __init__(self):
super(NotificationPhase, self).__init__(self.ALL)
class Name(fields.String):
def __init__(self, min_len=1, max_len=255):
super(Name, self).__init__()
self.min_len = min_len
self.max_len = max_len
def coerce(self, obj, attr, value):
err = None
if len(value) < self.min_len:
err = _("The value for the %(attr)s field must be at least "
"%(count)d characters long."
) % {'attr': attr, 'count': self.min_len}
elif len(value) > self.max_len:
err = _("The value for the %(attr)s field must be less than "
"%(count)d characters long."
) % {'attr': attr, 'count': self.max_len}
else:
regex = re.compile(u'^[a-zA-Z\u4e00-\u9fa5\d\.\_\~-]*$',
re.IGNORECASE)
if not regex.search(value):
err = _("The value for the '%(attr)s' (%(value)s) contains "
"illegal characters. It must contain only "
"alphanumeric or \"_-.~\" characters and must start "
"with letter."
) % {'attr': attr, 'value': value}
if err:
raise ValueError(err)
return super(Name, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
'minLength': self.min_len,
'maxLength': self.max_len
}
class Capacity(fields.Integer):
def __init__(self, minimum=0, maximum=None):
super(Capacity, self).__init__()
CONF.import_opt("max_nodes_per_cluster", "senlin.common.config")
if minimum > CONF.max_nodes_per_cluster:
err = _("The value of 'minimum' cannot be greater than the global "
"constraint (%(m)d).") % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.minimum = minimum
if maximum is not None:
if maximum < minimum:
err = _("The value of 'maximum' must be greater than or equal "
"to that of the 'minimum' specified.")
raise ValueError(err)
if maximum > CONF.max_nodes_per_cluster:
err = _("The value of 'maximum' cannot be greater than the "
"global constraint (%(m)d)."
) % {'m': CONF.max_nodes_per_cluster}
raise ValueError(err)
self.maximum = maximum
else:
self.maximum = CONF.max_nodes_per_cluster
def coerce(self, obj, attr, value):
try:
v = int(value)
except Exception:
raise ValueError(_("The value for %(attr)s must be an integer: "
"'%(value)s'.") %
{'attr': attr, 'value': value})
if v < self.minimum:
raise ValueError(_("The value for the %(a)s field must be greater "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.minimum})
elif v > self.maximum:
raise ValueError(_("The value for the %(a)s field must be less "
"than or equal to %(n)d.") %
{'a': attr, 'n': self.maximum})
return super(Capacity, self).coerce(obj, attr, v)
def get_schema(self):
return {
'type': ['integer', 'string'],
'minimum': self.minimum,
'maximum': self.maximum,
'pattern': '^[0-9]*$',
}
class Sort(fields.String):
def __init__(self, valid_keys):
super(Sort, self).__init__()
self.valid_keys = valid_keys
def coerce(self, obj, attr, value):
for s in value.split(','):
s_key, _sep, s_dir = s.partition(':')
err = None
if not s_key:
err = _("Missing sort key for '%s'.") % attr
raise ValueError(err)
if s_key not in self.valid_keys:
err = _("Unsupported sort key '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_key}
if s_dir and s_dir not in ('asc', 'desc'):
err = _("Unsupported sort dir '%(value)s' for '%(attr)s'."
) % {'attr': attr, 'value': s_dir}
if err:
raise ValueError(err)
return super(Sort, self).coerce(obj, attr, value)
def get_schema(self):
return {
'type': ['string'],
}
class IdentityList(fields.List):
def __init__(self, element_type, min_items=0, unique=True, nullable=False,
**kwargs):
super(IdentityList, self).__init__(element_type, **kwargs)
self.min_items = min_items
self.unique_items = unique
self.nullable = nullable
def coerce(self, obj, attr, value):
res = super(IdentityList, self).coerce(obj, attr, value)
if len(res) < self.min_items:
raise ValueError(_("Value for '%(attr)s' must have at least "
"%(num)s item(s).") %
{'attr': attr, 'num': self.min_items})
if len(set(res)) != len(res) and self.unique_items:
raise ValueError(_("Items for '%(attr)s' must be unique") %
{'attr': attr})
return res
def get_schema(self):
schema = super(IdentityList, self).get_schema()
if self.nullable:
schema['type'].append('null')
schema['minItems'] = self.min_items
schema['uniqueItems'] = self.unique_items
return schema
class BaseEnum(fields.FieldType):
def __init__(self, nullable=False):
valid_values = list(self.__class__.ALL)
if not valid_values:
raise ValueError(_("No list of valid values provided for enum."))
for value in valid_values:
if not isinstance(value, six.string_types):
raise ValueError(_("Enum field only support string values."))
self._valid_values = list(valid_values)
self._nullable = nullable
super(BaseEnum, self).__init__()
def coerce(self, obj, attr, value):
value = six.text_type(value)
if value not in self._valid_values:
raise ValueError(_("Value '%(value)s' is not acceptable for "
"field '%(attr)s'.") %
{'value': value, 'attr': attr})
return value
def stringify(self, value):
if value is None:
return None
return '\'%s\'' % value
class AdjustmentType(BaseEnum):
ALL = consts.ADJUSTMENT_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterActionName(BaseEnum):
ALL = consts.CLUSTER_ACTION_NAMES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class ClusterStatus(BaseEnum):
ALL = consts.CLUSTER_STATUSES
class NodeStatus(BaseEnum):
ALL = consts.NODE_STATUSES
class ActionStatus(BaseEnum):
ALL = consts.ACTION_STATUSES
class ReceiverType(BaseEnum):
ALL = consts.RECEIVER_TYPES
def get_schema(self):
return {'type': ['string'],
'enum': self._valid_values}
class UniqueDict(fields.Dict):
def coerce(self, obj, attr, value):
res = super(UniqueDict, self).coerce(obj, attr, value)
new_nodes = res.values()
if len(new_nodes) != len(set(new_nodes)):
raise ValueError(_("Map contains duplicated values"))
return res
NegativeIntegerField(fields.AutoTypedField):
AUTO_TYPE = NonNegativeInteger()
class BooleanField(fields.AutoTypedField):
AUTO_TYPE = Boolean()
class ObjectField(fields.AutoTypedField):
def __init__(self, objtype, subclasses=False, **kwargs):
self.AUTO_TYPE = Object(objtype, subclasses)
self.objname = objtype
super(ObjectField, self).__init__(**kwargs)
class JsonField(fields.AutoTypedField):
AUTO_TYPE = Json()
class ListField(fields.AutoTypedField):
AUTO_TYPE = fields.List(fields.FieldType())
class NotificationPriorityField(fields.BaseEnumField):
AUTO_TYPE = NotificationPriority()
class NotificationPhaseField(fields.BaseEnumField):
AUTO_TYPE = NotificationPhase()
class NameField(fields.AutoTypedField):
AUTO_TYPE = Name()
class UUIDField(fields.AutoTypedField):
AUTO_TYPE = UUID()
class CapacityField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, nullable=False, default=None, minimum=0, maximum=None):
self.AUTO_TYPE = Capacity(minimum=minimum, maximum=maximum)
super(CapacityField, self).__init__(nullable=nullable, default=default)
class SortField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, valid_keys, nullable=False, default=None):
self.AUTO_TYPE = Sort(valid_keys)
super(SortField, self).__init__(nullable=nullable, default=default)
class IdentityListField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, min_items=0, unique=True, nullable=False, default=None):
if default is None:
default = []
self.AUTO_TYPE = IdentityList(fields.String(), min_items=min_items,
unique=unique)
super(IdentityListField, self).__init__(nullable=nullable,
default=default)
class AdjustmentTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = AdjustmentType(nullable=nullable)
super(AdjustmentTypeField, self).__init__(**kwargs)
class ClusterActionNameField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ClusterActionName(nullable=nullable)
super(ClusterActionNameField, self).__init__(**kwargs)
class ClusterStatusField(fields.AutoTypedField):
AUTO_TYPE = ClusterStatus
class NodeStatusField(fields.AutoTypedField):
AUTO_TYPE = NodeStatus
class ActionStatusField(fields.AutoTypedField):
AUTO_TYPE = ActionStatus
class ReceiverTypeField(fields.AutoTypedField):
AUTO_TYPE = None
def __init__(self, **kwargs):
nullable = kwargs.get('nullable', False)
self.AUTO_TYPE = ReceiverType(nullable=nullable)
super(ReceiverTypeField, self).__init__(**kwargs)
class NodeReplaceMapField(fields.AutoTypedField):
AUTO_TYPE = UniqueDict(fields.String())
class CustomListField(ListField):
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
super(CustomListField, self).__init__(**kwargs)
def coerce(self, obj, attr, value):
objs = super(CustomListField, self).coerce(obj, attr, value)
custom_list = []
for i in objs:
custom_list.append(getattr(i, self.attr_name))
return custom_list
| true
| true
|
f704c46b173fb645449a9542a94234ae033a96b3
| 5,712
|
py
|
Python
|
view/__init__.py
|
sadlll/ablog
|
d04b532751c297fe9cd25563d08f48e8aaee7f48
|
[
"Apache-2.0"
] | null | null | null |
view/__init__.py
|
sadlll/ablog
|
d04b532751c297fe9cd25563d08f48e8aaee7f48
|
[
"Apache-2.0"
] | null | null | null |
view/__init__.py
|
sadlll/ablog
|
d04b532751c297fe9cd25563d08f48e8aaee7f48
|
[
"Apache-2.0"
] | 1
|
2020-09-14T07:09:34.000Z
|
2020-09-14T07:09:34.000Z
|
# coding:utf-8
import json
import random
import string
import tornado.web
import config
from lib.jsdict import JsDict
from model.user import User
# route
class Route(object):
urls = []
def __call__(self, url, name=None):
def _(cls):
self.urls.append(tornado.web.URLSpec(url, cls, name=name))
return cls
return _
route = Route()
# 模板
def get_lookup_mako():
import mako.lookup
_lookup = mako.lookup.TemplateLookup(
directories=['./templates'],
module_directory='/tmp/mako' + ''.join(random.sample(string.ascii_letters + string.digits, 8)),
input_encoding='utf-8',
)
return _lookup
def get_lookup_jinja2(_globals={}, extensions=[]):
from jinja2 import Environment, FileSystemLoader
_lookup = Environment(
loader=FileSystemLoader(['./templates'], encoding='utf-8'),
extensions=extensions
)
# mako 没有全局变量特性,这里为了一致性 jinjia 向 mako 妥协
#_lookup.globals['url_for'] = url_for
_lookup.globals['config'] = config
_lookup.globals.update(_globals)
return _lookup
if config.TEMPLATE == 'mako':
lookup = get_lookup_mako()
elif config.TEMPLATE == 'jinja2':
lookup = get_lookup_jinja2()
else:
lookup = None
# Session
class SimpleSession(object):
def __init__(self, request):
self._request = request
self._data = self.load()
def __delitem__(self, key):
del self._data[key]
def __getitem__(self, key):
return self._data.get(key)
def __setitem__(self, key, value):
self._data[key] = value
def load(self):
_s = self._request.get_secure_cookie('session') or '{}'
try: _s = _s.decode('utf-8') # fix:py2
except: pass
return json.loads(_s)
def flush(self):
self._request.set_secure_cookie('session', json.dumps(self._data))
# 消息闪现支持
class Messages(object):
MESSAGE_LEVEL = JsDict(
DEBUG=10,
INFO=20,
SUCCESS=25,
WARNING=30,
ERROR=40,
)
DEFAULT_TAGS = {
MESSAGE_LEVEL.DEBUG: 'debug',
MESSAGE_LEVEL.INFO: 'info',
MESSAGE_LEVEL.SUCCESS: 'success',
MESSAGE_LEVEL.WARNING: 'warning',
MESSAGE_LEVEL.ERROR: 'error',
}
def __init__(self):
self.messages = []
def _add_message(self, level, message):
self.messages.append([level, message])
def debug(self, message):
self._add_message(self.MESSAGE_LEVEL.DEBUG, message)
def info(self, message):
self._add_message(self.MESSAGE_LEVEL.INFO, message)
def success(self, message):
self._add_message(self.MESSAGE_LEVEL.SUCCESS, message)
def warning(self, message):
self._add_message(self.MESSAGE_LEVEL.WARNING, message)
def error(self, message):
self._add_message(self.MESSAGE_LEVEL.ERROR, message)
class View(tornado.web.RequestHandler):
def render(self, fn=None, **kwargs):
if not fn:
fn = ('/%s/%s.html' % (
'/'.join(self.__module__.split('.')[1:-1]),
self.__class__.__name__.lower()
)).replace(r'//', r'/')
kwargs.update({
'req': self,
'config': config,
'static': self.static_url,
'url_for': self.reverse_url,
'get_messages': self.get_messages,
'xsrf_token': self.xsrf_form_html(),
'csrf_token': self.xsrf_form_html(),
})
if lookup:
tmpl = lookup.get_template(fn)
self.finish(tmpl.render(**kwargs))
else:
if fn.startswith('/'):
fn = '.' + fn
super(View, self).render(fn, config=config, **kwargs)
def get_messages(self):
msg_lst = self.messages.messages + (self.session['_messages'] or [])
_messages = []
for i in msg_lst:
tag, txt = i
try: txt = txt.decode('utf-8') # 为py2做个转换
except: pass
_messages.append(JsDict(tag=Messages.DEFAULT_TAGS[tag], txt=txt))
self.messages.messages = []
return _messages
def initialize(self):
self.messages = Messages()
self.session = SimpleSession(self)
super(View, self).initialize()
def flush(self, include_footers=False, callback=None):
self.session['_messages'] = self.messages.messages
self.session.flush()
super(View, self).flush(include_footers, callback)
def current_user(self):
key = self.get_secure_cookie('u')
return User.get_by_key(key)
def is_admin(self):
user = self.current_user()
if user and user.is_admin():
return user
class LoginView(View):
def prepare(self):
if not self.current_user():
self.redirect(url_for('signin'))
class NoLoginView(View):
def prepare(self):
if self.current_user():
self.messages.error("您已登陆,请先退出")
self.redirect(url_for('index'))
class AjaxView(View):
def check_xsrf_cookie(self):
# useless for json request
pass
def prepare(self):
self.set_header('Content-Type', 'application/json')
super(AjaxView, self).prepare()
class AjaxLoginView(LoginView):
def check_xsrf_cookie(self):
# useless for json request
pass
def prepare(self):
self.set_header('Content-Type', 'application/json')
super(AjaxLoginView, self).prepare()
# sugar
def url_for(name, *args):
return config.app.reverse_url(name, *args)
def page_title(*args):
no_blank = lambda x: x is not None and x != ''
return ' » '.join(list(filter(no_blank, args)) + [config.TITLE])
| 25.386667
| 107
| 0.605042
|
import json
import random
import string
import tornado.web
import config
from lib.jsdict import JsDict
from model.user import User
class Route(object):
urls = []
def __call__(self, url, name=None):
def _(cls):
self.urls.append(tornado.web.URLSpec(url, cls, name=name))
return cls
return _
route = Route()
def get_lookup_mako():
import mako.lookup
_lookup = mako.lookup.TemplateLookup(
directories=['./templates'],
module_directory='/tmp/mako' + ''.join(random.sample(string.ascii_letters + string.digits, 8)),
input_encoding='utf-8',
)
return _lookup
def get_lookup_jinja2(_globals={}, extensions=[]):
from jinja2 import Environment, FileSystemLoader
_lookup = Environment(
loader=FileSystemLoader(['./templates'], encoding='utf-8'),
extensions=extensions
)
_lookup.globals['config'] = config
_lookup.globals.update(_globals)
return _lookup
if config.TEMPLATE == 'mako':
lookup = get_lookup_mako()
elif config.TEMPLATE == 'jinja2':
lookup = get_lookup_jinja2()
else:
lookup = None
class SimpleSession(object):
def __init__(self, request):
self._request = request
self._data = self.load()
def __delitem__(self, key):
del self._data[key]
def __getitem__(self, key):
return self._data.get(key)
def __setitem__(self, key, value):
self._data[key] = value
def load(self):
_s = self._request.get_secure_cookie('session') or '{}'
try: _s = _s.decode('utf-8')
except: pass
return json.loads(_s)
def flush(self):
self._request.set_secure_cookie('session', json.dumps(self._data))
class Messages(object):
MESSAGE_LEVEL = JsDict(
DEBUG=10,
INFO=20,
SUCCESS=25,
WARNING=30,
ERROR=40,
)
DEFAULT_TAGS = {
MESSAGE_LEVEL.DEBUG: 'debug',
MESSAGE_LEVEL.INFO: 'info',
MESSAGE_LEVEL.SUCCESS: 'success',
MESSAGE_LEVEL.WARNING: 'warning',
MESSAGE_LEVEL.ERROR: 'error',
}
def __init__(self):
self.messages = []
def _add_message(self, level, message):
self.messages.append([level, message])
def debug(self, message):
self._add_message(self.MESSAGE_LEVEL.DEBUG, message)
def info(self, message):
self._add_message(self.MESSAGE_LEVEL.INFO, message)
def success(self, message):
self._add_message(self.MESSAGE_LEVEL.SUCCESS, message)
def warning(self, message):
self._add_message(self.MESSAGE_LEVEL.WARNING, message)
def error(self, message):
self._add_message(self.MESSAGE_LEVEL.ERROR, message)
class View(tornado.web.RequestHandler):
def render(self, fn=None, **kwargs):
if not fn:
fn = ('/%s/%s.html' % (
'/'.join(self.__module__.split('.')[1:-1]),
self.__class__.__name__.lower()
)).replace(r'//', r'/')
kwargs.update({
'req': self,
'config': config,
'static': self.static_url,
'url_for': self.reverse_url,
'get_messages': self.get_messages,
'xsrf_token': self.xsrf_form_html(),
'csrf_token': self.xsrf_form_html(),
})
if lookup:
tmpl = lookup.get_template(fn)
self.finish(tmpl.render(**kwargs))
else:
if fn.startswith('/'):
fn = '.' + fn
super(View, self).render(fn, config=config, **kwargs)
def get_messages(self):
msg_lst = self.messages.messages + (self.session['_messages'] or [])
_messages = []
for i in msg_lst:
tag, txt = i
try: txt = txt.decode('utf-8')
except: pass
_messages.append(JsDict(tag=Messages.DEFAULT_TAGS[tag], txt=txt))
self.messages.messages = []
return _messages
def initialize(self):
self.messages = Messages()
self.session = SimpleSession(self)
super(View, self).initialize()
def flush(self, include_footers=False, callback=None):
self.session['_messages'] = self.messages.messages
self.session.flush()
super(View, self).flush(include_footers, callback)
def current_user(self):
key = self.get_secure_cookie('u')
return User.get_by_key(key)
def is_admin(self):
user = self.current_user()
if user and user.is_admin():
return user
class LoginView(View):
def prepare(self):
if not self.current_user():
self.redirect(url_for('signin'))
class NoLoginView(View):
def prepare(self):
if self.current_user():
self.messages.error("您已登陆,请先退出")
self.redirect(url_for('index'))
class AjaxView(View):
def check_xsrf_cookie(self):
pass
def prepare(self):
self.set_header('Content-Type', 'application/json')
super(AjaxView, self).prepare()
class AjaxLoginView(LoginView):
def check_xsrf_cookie(self):
pass
def prepare(self):
self.set_header('Content-Type', 'application/json')
super(AjaxLoginView, self).prepare()
def url_for(name, *args):
return config.app.reverse_url(name, *args)
def page_title(*args):
no_blank = lambda x: x is not None and x != ''
return ' » '.join(list(filter(no_blank, args)) + [config.TITLE])
| true
| true
|
f704c490c9753e311455f361ab6740be9fccc6f7
| 6,062
|
py
|
Python
|
sdks/python/apache_beam/coders/standard_coders_test.py
|
bschell/beam
|
5533acff51cf6157d62a63c60eb3f074f1958df5
|
[
"Apache-2.0"
] | 1
|
2018-12-03T09:37:01.000Z
|
2018-12-03T09:37:01.000Z
|
sdks/python/apache_beam/coders/standard_coders_test.py
|
bschell/beam
|
5533acff51cf6157d62a63c60eb3f074f1958df5
|
[
"Apache-2.0"
] | 2
|
2018-09-09T16:51:47.000Z
|
2018-09-16T15:55:50.000Z
|
sdks/python/apache_beam/coders/standard_coders_test.py
|
bschell/beam
|
5533acff51cf6157d62a63c60eb3f074f1958df5
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for coders that must be consistent across all Beam SDKs.
"""
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os.path
import sys
import unittest
from builtins import map
import yaml
from apache_beam.coders import coder_impl
from apache_beam.coders import coders
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import Timestamp
STANDARD_CODERS_YAML = os.path.join(
os.path.dirname(__file__), '..', 'testing', 'data', 'standard_coders.yaml')
def _load_test_cases(test_yaml):
"""Load test data from yaml file and return an iterable of test cases.
See ``standard_coders.yaml`` for more details.
"""
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
for ix, spec in enumerate(yaml.load_all(open(test_yaml))):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
class StandardCodersTest(unittest.TestCase):
_urn_to_coder_class = {
'beam:coder:bytes:v1': coders.BytesCoder,
'beam:coder:varint:v1': coders.VarIntCoder,
'beam:coder:kv:v1': lambda k, v: coders.TupleCoder((k, v)),
'beam:coder:interval_window:v1': coders.IntervalWindowCoder,
'beam:coder:iterable:v1': lambda t: coders.IterableCoder(t),
'beam:coder:global_window:v1': coders.GlobalWindowCoder,
'beam:coder:windowed_value:v1':
lambda v, w: coders.WindowedValueCoder(v, w)
}
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x,
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1':
lambda x, key_parser, value_parser: (key_parser(x['key']),
value_parser(x['value'])),
'beam:coder:interval_window:v1':
lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x, parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1':
lambda x, value_parser, window_parser: windowed_value.create(
value_parser(x['value']), x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]))
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
else:
# Only verify decoding for a non-deterministic coder
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
def parse_coder(self, spec):
return self._urn_to_coder_class[spec['urn']](
*[self.parse_coder(c) for c in spec.get('components', ())])
def json_value_parser(self, coder_spec):
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
# Used when --fix is passed.
fix = False
to_fix = {}
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
| 37.8875
| 79
| 0.682118
|
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os.path
import sys
import unittest
from builtins import map
import yaml
from apache_beam.coders import coder_impl
from apache_beam.coders import coders
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import Timestamp
STANDARD_CODERS_YAML = os.path.join(
os.path.dirname(__file__), '..', 'testing', 'data', 'standard_coders.yaml')
def _load_test_cases(test_yaml):
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
for ix, spec in enumerate(yaml.load_all(open(test_yaml))):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
class StandardCodersTest(unittest.TestCase):
_urn_to_coder_class = {
'beam:coder:bytes:v1': coders.BytesCoder,
'beam:coder:varint:v1': coders.VarIntCoder,
'beam:coder:kv:v1': lambda k, v: coders.TupleCoder((k, v)),
'beam:coder:interval_window:v1': coders.IntervalWindowCoder,
'beam:coder:iterable:v1': lambda t: coders.IterableCoder(t),
'beam:coder:global_window:v1': coders.GlobalWindowCoder,
'beam:coder:windowed_value:v1':
lambda v, w: coders.WindowedValueCoder(v, w)
}
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x,
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1':
lambda x, key_parser, value_parser: (key_parser(x['key']),
value_parser(x['value'])),
'beam:coder:interval_window:v1':
lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x, parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1':
lambda x, value_parser, window_parser: windowed_value.create(
value_parser(x['value']), x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]))
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
else:
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
def parse_coder(self, spec):
return self._urn_to_coder_class[spec['urn']](
*[self.parse_coder(c) for c in spec.get('components', ())])
def json_value_parser(self, coder_spec):
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
fix = False
to_fix = {}
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
| true
| true
|
f704c56086b8ba866a205345226d91d6b051226d
| 200
|
py
|
Python
|
RULEngine/Game/Field.py
|
wonwon0/RobocupStrategyIA
|
891028f616d476b05b23b40924d7c99502a718e3
|
[
"MIT"
] | null | null | null |
RULEngine/Game/Field.py
|
wonwon0/RobocupStrategyIA
|
891028f616d476b05b23b40924d7c99502a718e3
|
[
"MIT"
] | null | null | null |
RULEngine/Game/Field.py
|
wonwon0/RobocupStrategyIA
|
891028f616d476b05b23b40924d7c99502a718e3
|
[
"MIT"
] | null | null | null |
# Under MIT License, see LICENSE.txt
class Field():
def __init__(self, ball):
self.ball = ball
def move_ball(self, position, delta):
self.ball.set_position(position, delta)
| 20
| 47
| 0.655
|
class Field():
def __init__(self, ball):
self.ball = ball
def move_ball(self, position, delta):
self.ball.set_position(position, delta)
| true
| true
|
f704c67d98e3a2a857e70406e7c669c763faa37b
| 3,213
|
py
|
Python
|
setup.py
|
altosaar/nomen
|
29170a2011decbc9aa4cae48bd5d8d291a6d9fb8
|
[
"MIT"
] | 4
|
2016-12-22T16:37:52.000Z
|
2017-05-31T11:12:57.000Z
|
setup.py
|
altosaar/nomen
|
29170a2011decbc9aa4cae48bd5d8d291a6d9fb8
|
[
"MIT"
] | 2
|
2016-12-23T06:15:31.000Z
|
2019-03-21T21:36:03.000Z
|
setup.py
|
altosaar/nomen
|
29170a2011decbc9aa4cae48bd5d8d291a6d9fb8
|
[
"MIT"
] | 1
|
2018-07-05T21:14:59.000Z
|
2018-07-05T21:14:59.000Z
|
#!/usr/bin/env python
# setup
# Setup script for installing nomen
##########################################################################
## Imports
##########################################################################
import os
import re
import codecs
from setuptools import setup
from setuptools import find_packages
##########################################################################
## Package Information
##########################################################################
## Basic information
NAME = "nomen"
DESCRIPTION = "YAML configuration tree with command line flags."
AUTHOR = "Jaan Altosaar"
EMAIL = "j@jaan.io"
LICENSE = "MIT"
REPOSITORY = "https://github.com/altosaar/nomen"
PACKAGE = "nomen"
## Define the keywords
KEYWORDS = (
'nomen', 'python', 'option', 'tree', 'nested', 'dict', 'parameter', 'flags'
)
## Define the classifiers
## See https://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = (
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
)
## Important Paths
PROJECT = os.path.abspath(os.path.dirname(__file__))
VERSION_PATH = os.path.join(PACKAGE, "version.py")
## Directories to ignore in find_packages
EXCLUDES = (
"tests", "bin", "docs", "fixtures", "register", "notebooks",
)
## Requirements
REQUIREMENTS = ["pyyaml", "addict"]
##########################################################################
## Helper Functions
##########################################################################
def read(*parts):
"""
Assume UTF-8 encoding and return the contents of the file located at the
absolute path from the REPOSITORY joined with *parts.
"""
with codecs.open(os.path.join(PROJECT, *parts), 'rb', 'utf-8') as f:
return f.read()
def get_version(path=VERSION_PATH):
"""
Reads the version.py defined in the VERSION_PATH to find the get_version
function, and executes it to ensure that it is loaded correctly.
"""
namespace = {}
exec(read(path), namespace)
return namespace['get_version']()
##########################################################################
## Define the configuration
##########################################################################
config = {
"name": NAME,
"version": get_version(),
"description": DESCRIPTION,
"long_description": DESCRIPTION,
"license": LICENSE,
"author": AUTHOR,
"author_email": EMAIL,
"maintainer": AUTHOR,
"maintainer_email": EMAIL,
"url": REPOSITORY,
"download_url": "{}/tarball/v{}".format(REPOSITORY, get_version()),
"packages": find_packages(where=PROJECT, exclude=EXCLUDES),
"classifiers": CLASSIFIERS,
"keywords": KEYWORDS,
"zip_safe": False,
"install_requires": REQUIREMENTS,
}
##########################################################################
## Run setup script
##########################################################################
if __name__ == '__main__':
setup(**config)
| 28.945946
| 79
| 0.517585
| true
| true
|
|
f704c68356b63c2761fa7eec5a286419626cb51a
| 407
|
py
|
Python
|
confdgnmi/src/confd_gnmi_netconf_adapter.py
|
micnovak/ConfD-Demos
|
479499e7c5339ae77b611e17196e7516d1f1a1ce
|
[
"Apache-2.0"
] | 11
|
2019-12-07T20:15:57.000Z
|
2022-02-04T18:12:52.000Z
|
confdgnmi/src/confd_gnmi_netconf_adapter.py
|
micnovak/ConfD-Demos
|
479499e7c5339ae77b611e17196e7516d1f1a1ce
|
[
"Apache-2.0"
] | 2
|
2020-03-01T11:04:16.000Z
|
2021-02-03T14:17:23.000Z
|
confdgnmi/src/confd_gnmi_netconf_adapter.py
|
micnovak/ConfD-Demos
|
479499e7c5339ae77b611e17196e7516d1f1a1ce
|
[
"Apache-2.0"
] | 6
|
2019-10-18T15:26:03.000Z
|
2021-01-13T10:28:30.000Z
|
from confd_gnmi_adapter import GnmiServerAdapter
class GnmiNetconfServerAdapter(GnmiServerAdapter):
@classmethod
def get_adapter(cls):
pass
def set(self, prefix, path, val):
pass
def get_subscription_handler(self, subscription_list):
pass
def capabilities(self):
return []
def get(self, prefix, paths, data_type, use_models):
return []
| 19.380952
| 58
| 0.670762
|
from confd_gnmi_adapter import GnmiServerAdapter
class GnmiNetconfServerAdapter(GnmiServerAdapter):
@classmethod
def get_adapter(cls):
pass
def set(self, prefix, path, val):
pass
def get_subscription_handler(self, subscription_list):
pass
def capabilities(self):
return []
def get(self, prefix, paths, data_type, use_models):
return []
| true
| true
|
f704c6f312d2240524282b8c816c45e6b8714e15
| 36,146
|
py
|
Python
|
lightseq/training/ops/pytorch/torch_transformer_layers.py
|
iRmantou/lightseq
|
9a617306fa711a3d6a25ef3eab9bfbe408692189
|
[
"Apache-2.0"
] | 1
|
2022-03-27T17:16:16.000Z
|
2022-03-27T17:16:16.000Z
|
lightseq/training/ops/pytorch/torch_transformer_layers.py
|
iRmantou/lightseq
|
9a617306fa711a3d6a25ef3eab9bfbe408692189
|
[
"Apache-2.0"
] | null | null | null |
lightseq/training/ops/pytorch/torch_transformer_layers.py
|
iRmantou/lightseq
|
9a617306fa711a3d6a25ef3eab9bfbe408692189
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The LightSeq Team
# Copyright Facebook Fairseq
# We use layers from Facebook Fairseq as our baseline
import math
import uuid
from typing import Dict, Optional, Tuple, List
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter, LayerNorm, Dropout, Linear
from lightseq.training.ops.pytorch import util
from lightseq.training.ops.pytorch.layer_base import (
TransformerEmbeddingLayerBase,
TransformerEncoderLayerBase,
TransformerDecoderLayerBase,
)
from .quantization import (
QuantLinear,
TensorQuantizer,
act_quant_config,
weight_quant_config,
)
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
is_decoder=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
self.is_decoder = is_decoder
assert (
not self.self_attention or self.qkv_same_dim
), "Self-attention requires query, key and value to be of the same size"
self.attention_quant = None
if self.self_attention:
# self.qkv_proj = Linear(embed_dim, 3*embed_dim, bias=bias)
self.qkv_proj = QuantLinear(embed_dim, 3 * embed_dim, bias=bias)
self.attention_quant = (
TensorQuantizer(act_quant_config) if self.is_decoder else None
)
elif self.encoder_decoder_attention and self.is_decoder:
self.k_proj = QuantLinear(
self.kdim, embed_dim, pre_activation="encoder_out", bias=bias
)
self.v_proj = QuantLinear(
self.vdim, embed_dim, pre_activation="encoder_out", bias=bias
)
self.q_proj = QuantLinear(embed_dim, embed_dim, bias=bias)
self.out_proj = QuantLinear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.tpu = False
self.init_incremental_state()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
if self.self_attention:
nn.init.xavier_uniform_(self.qkv_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
qkv = self.qkv_proj(query)
if self.attention_quant is not None:
qkv = self.attention_quant(qkv)
q, k, v = qkv.split(self.embed_dim, dim=-1)
# q = self.q_proj(query)
# k = self.k_proj(query)
# v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not self.tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = util.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
class TransformerEncoderLayer(TransformerEncoderLayerBase):
"""Encoder layer implemented by fairseq.
This version only removes the "args" parameter, no other changes
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`.
In the tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
normalize_before to True.
"""
def __init__(self, config, initial_weights=None, initial_biases=None):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = self.build_self_attention(
self.embed_dim, config.nhead, config.attn_prob_dropout_ratio
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = Dropout(config.hidden_dropout_ratio)
self.activation_fn = util.get_activation_fn(activation=config.activation_fn)
self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))
self.normalize_before = config.pre_layer_norm
self.fc1 = QuantLinear(
self.embed_dim,
config.intermediate_size,
)
self.fc2 = QuantLinear(
config.intermediate_size, self.embed_dim, pre_activation="relu"
)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_self_attention(self, embed_dim, nhead, attn_dropout):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
self_attention=True,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask):
"""
Args:
x (Tensor): input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
x = x.transpose(0, 1)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
x = x.transpose(0, 1)
return x
class TransformerDecoderLayer(TransformerDecoderLayerBase):
"""Decoder layer implemented by fairseq.
This version only removes the "args" parameter, no other changes
"""
def __init__(self, config, initial_weights=None, initial_biases=None):
super().__init__()
self.embed_dim = config.hidden_size
self.dropout_module = Dropout(config.hidden_dropout_ratio)
self.cross_self_attention = False
self.self_attn = self.build_self_attention(
self.embed_dim,
config.nhead,
config.attn_prob_dropout_ratio,
)
self.activation_fn = util.get_activation_fn(activation=config.activation_fn)
self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))
self.normalize_before = config.pre_layer_norm
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = self.build_encoder_attention(
self.embed_dim,
config.hidden_size,
config.attn_prob_dropout_ratio,
config.nhead,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = QuantLinear(
self.embed_dim,
config.intermediate_size,
)
self.fc2 = QuantLinear(
config.intermediate_size,
self.embed_dim,
pre_activation="relu",
)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
self.onnx_trace = False
def build_self_attention(
self, embed_dim, nhead, attn_dropout, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not self.cross_self_attention,
is_decoder=True,
)
def build_encoder_attention(
self, embed_dim, encoder_embed_dim, attn_dropout, nhead
):
return MultiheadAttention(
embed_dim,
nhead,
kdim=encoder_embed_dim,
vdim=encoder_embed_dim,
dropout=attn_dropout,
encoder_decoder_attention=True,
is_decoder=True,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
"""
Args:
x (Tensor): input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
x = x.transpose(0, 1)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
if (
encoder_out.shape[1] != x.shape[1]
and x.shape[1] % encoder_out.shape[1] == 0
):
beam_size = int(x.shape[1] / encoder_out.shape[1])
encoder_out = encoder_out.repeat_interleave(beam_size, 1)
encoder_padding_mask = encoder_padding_mask.repeat_interleave(
beam_size, 0
)
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
x = x.transpose(0, 1)
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
class TransformerEmbeddingLayer(TransformerEmbeddingLayerBase):
def __init__(self, config):
super().__init__()
self.emb_lookup = nn.Embedding(
config.vocab_size, config.embedding_dim, padding_idx=config.padding_idx
)
self.emb_lookup.to(dtype=(torch.half if config.fp16 else torch.float))
self.embeddings = self.emb_lookup.weight
nn.init.normal_(self.embeddings, mean=0, std=config.embedding_dim ** -0.5)
nn.init.constant_(self.embeddings[config.padding_idx], 0)
self.embed_positions = SinusoidalPositionalEmbedding(
config.embedding_dim, config.padding_idx, config.max_seq_len, config.fp16
)
self.embedding_dim = config.embedding_dim
self.dropout = Dropout(config.dropout)
self.emb_quant = TensorQuantizer(weight_quant_config)
self.config = config
def forward(self, input, step=0):
x = self.emb_lookup(input)
x = self.emb_quant(x)
x = math.sqrt(self.embedding_dim) * x
x += self.embed_positions(input, step)
x = self.dropout(x)
return x
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024, fp16=False):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
if fp16:
self.weights = self.weights.to(torch.half)
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
return emb
def make_positions(self, tensor, padding_idx, step):
mask = tensor.ne(padding_idx).int()
return ((torch.cumsum(mask, dim=1).type_as(mask) - 1 + step) * mask).long()
def forward(
self,
input,
step=0,
incremental_state=None,
timestep=None,
positions=None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.size(0), input.size(1)
positions = self.make_positions(input, self.padding_idx, step)
mask = (
torch.ne(input, self.padding_idx)
.unsqueeze(2)
.expand(bsz, seq_len, self.embedding_dim)
)
return (
self.weights.to(input.device)
.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
* mask
).detach()
| 38.290254
| 88
| 0.592099
|
import math
import uuid
from typing import Dict, Optional, Tuple, List
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import Parameter, LayerNorm, Dropout, Linear
from lightseq.training.ops.pytorch import util
from lightseq.training.ops.pytorch.layer_base import (
TransformerEmbeddingLayerBase,
TransformerEncoderLayerBase,
TransformerDecoderLayerBase,
)
from .quantization import (
QuantLinear,
TensorQuantizer,
act_quant_config,
weight_quant_config,
)
class MultiheadAttention(nn.Module):
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
is_decoder=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = Dropout(dropout)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
self.is_decoder = is_decoder
assert (
not self.self_attention or self.qkv_same_dim
), "Self-attention requires query, key and value to be of the same size"
self.attention_quant = None
if self.self_attention:
self.qkv_proj = QuantLinear(embed_dim, 3 * embed_dim, bias=bias)
self.attention_quant = (
TensorQuantizer(act_quant_config) if self.is_decoder else None
)
elif self.encoder_decoder_attention and self.is_decoder:
self.k_proj = QuantLinear(
self.kdim, embed_dim, pre_activation="encoder_out", bias=bias
)
self.v_proj = QuantLinear(
self.vdim, embed_dim, pre_activation="encoder_out", bias=bias
)
self.q_proj = QuantLinear(embed_dim, embed_dim, bias=bias)
self.out_proj = QuantLinear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
self.tpu = False
self.init_incremental_state()
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def prepare_for_tpu_(self, **kwargs):
self.tpu = True
def reset_parameters(self):
if self.qkv_same_dim:
if self.self_attention:
nn.init.xavier_uniform_(self.qkv_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
):
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
qkv = self.qkv_proj(query)
if self.attention_quant is not None:
qkv = self.attention_quant(qkv)
q, k, v = qkv.split(self.embed_dim, dim=-1)
elif self.encoder_decoder_attention:
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q = q * self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not self.tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = util.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
class TransformerEncoderLayer(TransformerEncoderLayerBase):
def __init__(self, config, initial_weights=None, initial_biases=None):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = self.build_self_attention(
self.embed_dim, config.nhead, config.attn_prob_dropout_ratio
)
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.dropout_module = Dropout(config.hidden_dropout_ratio)
self.activation_fn = util.get_activation_fn(activation=config.activation_fn)
self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))
self.normalize_before = config.pre_layer_norm
self.fc1 = QuantLinear(
self.embed_dim,
config.intermediate_size,
)
self.fc2 = QuantLinear(
config.intermediate_size, self.embed_dim, pre_activation="relu"
)
self.final_layer_norm = LayerNorm(self.embed_dim)
def build_self_attention(self, embed_dim, nhead, attn_dropout):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
self_attention=True,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(self, x, encoder_padding_mask):
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
x = x.transpose(0, 1)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
x = x.transpose(0, 1)
return x
class TransformerDecoderLayer(TransformerDecoderLayerBase):
def __init__(self, config, initial_weights=None, initial_biases=None):
super().__init__()
self.embed_dim = config.hidden_size
self.dropout_module = Dropout(config.hidden_dropout_ratio)
self.cross_self_attention = False
self.self_attn = self.build_self_attention(
self.embed_dim,
config.nhead,
config.attn_prob_dropout_ratio,
)
self.activation_fn = util.get_activation_fn(activation=config.activation_fn)
self.activation_dropout_module = Dropout(float(config.activation_dropout_ratio))
self.normalize_before = config.pre_layer_norm
self.self_attn_layer_norm = LayerNorm(self.embed_dim)
self.encoder_attn = self.build_encoder_attention(
self.embed_dim,
config.hidden_size,
config.attn_prob_dropout_ratio,
config.nhead,
)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
self.fc1 = QuantLinear(
self.embed_dim,
config.intermediate_size,
)
self.fc2 = QuantLinear(
config.intermediate_size,
self.embed_dim,
pre_activation="relu",
)
self.final_layer_norm = LayerNorm(self.embed_dim)
self.need_attn = True
self.onnx_trace = False
def build_self_attention(
self, embed_dim, nhead, attn_dropout, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
nhead,
dropout=attn_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not self.cross_self_attention,
is_decoder=True,
)
def build_encoder_attention(
self, embed_dim, encoder_embed_dim, attn_dropout, nhead
):
return MultiheadAttention(
embed_dim,
nhead,
kdim=encoder_embed_dim,
vdim=encoder_embed_dim,
dropout=attn_dropout,
encoder_decoder_attention=True,
is_decoder=True,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
):
if need_head_weights:
need_attn = True
x = x.transpose(0, 1)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
if (
encoder_out.shape[1] != x.shape[1]
and x.shape[1] % encoder_out.shape[1] == 0
):
beam_size = int(x.shape[1] / encoder_out.shape[1])
encoder_out = encoder_out.repeat_interleave(beam_size, 1)
encoder_padding_mask = encoder_padding_mask.repeat_interleave(
beam_size, 0
)
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
x = x.transpose(0, 1)
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
class TransformerEmbeddingLayer(TransformerEmbeddingLayerBase):
def __init__(self, config):
super().__init__()
self.emb_lookup = nn.Embedding(
config.vocab_size, config.embedding_dim, padding_idx=config.padding_idx
)
self.emb_lookup.to(dtype=(torch.half if config.fp16 else torch.float))
self.embeddings = self.emb_lookup.weight
nn.init.normal_(self.embeddings, mean=0, std=config.embedding_dim ** -0.5)
nn.init.constant_(self.embeddings[config.padding_idx], 0)
self.embed_positions = SinusoidalPositionalEmbedding(
config.embedding_dim, config.padding_idx, config.max_seq_len, config.fp16
)
self.embedding_dim = config.embedding_dim
self.dropout = Dropout(config.dropout)
self.emb_quant = TensorQuantizer(weight_quant_config)
self.config = config
def forward(self, input, step=0):
x = self.emb_lookup(input)
x = self.emb_quant(x)
x = math.sqrt(self.embedding_dim) * x
x += self.embed_positions(input, step)
x = self.dropout(x)
return x
class SinusoidalPositionalEmbedding(nn.Module):
def __init__(self, embedding_dim, padding_idx, init_size=1024, fp16=False):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
if fp16:
self.weights = self.weights.to(torch.half)
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
return emb
def make_positions(self, tensor, padding_idx, step):
mask = tensor.ne(padding_idx).int()
return ((torch.cumsum(mask, dim=1).type_as(mask) - 1 + step) * mask).long()
def forward(
self,
input,
step=0,
incremental_state=None,
timestep=None,
positions=None,
):
bsz, seq_len = input.size(0), input.size(1)
positions = self.make_positions(input, self.padding_idx, step)
mask = (
torch.ne(input, self.padding_idx)
.unsqueeze(2)
.expand(bsz, seq_len, self.embedding_dim)
)
return (
self.weights.to(input.device)
.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
* mask
).detach()
| true
| true
|
f704c7064a6678cc5c17790c675482e38ef55a1b
| 2,392
|
py
|
Python
|
api/server/swagger_server/models/api_generate_code_response.py
|
srishtipithadia/mlx
|
2fb61a8840696c7ede77cd600caa8922178ec8b0
|
[
"Apache-2.0"
] | null | null | null |
api/server/swagger_server/models/api_generate_code_response.py
|
srishtipithadia/mlx
|
2fb61a8840696c7ede77cd600caa8922178ec8b0
|
[
"Apache-2.0"
] | 1
|
2021-09-21T23:31:13.000Z
|
2021-09-21T23:31:13.000Z
|
api/server/swagger_server/models/api_generate_code_response.py
|
srishtipithadia/mlx
|
2fb61a8840696c7ede77cd600caa8922178ec8b0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ApiGenerateCodeResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, script: str=None): # noqa: E501
"""ApiGenerateCodeResponse - a model defined in Swagger
:param script: The script of this ApiGenerateCodeResponse. # noqa: E501
:type script: str
"""
self.swagger_types = {
'script': str
}
self.attribute_map = {
'script': 'script'
}
self._script = script
@classmethod
def from_dict(cls, dikt) -> 'ApiGenerateCodeResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The apiGenerateCodeResponse of this ApiGenerateCodeResponse. # noqa: E501
:rtype: ApiGenerateCodeResponse
"""
return util.deserialize_model(dikt, cls)
@property
def script(self) -> str:
"""Gets the script of this ApiGenerateCodeResponse.
The script source code to run the component in a pipeline # noqa: E501
:return: The script of this ApiGenerateCodeResponse.
:rtype: str
"""
return self._script
@script.setter
def script(self, script: str):
"""Sets the script of this ApiGenerateCodeResponse.
The script source code to run the component in a pipeline # noqa: E501
:param script: The script of this ApiGenerateCodeResponse.
:type script: str
"""
self._script = script
| 29.9
| 91
| 0.669314
|
from __future__ import absolute_import
from datetime import date, datetime
from typing import List, Dict
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ApiGenerateCodeResponse(Model):
def __init__(self, script: str=None):
self.swagger_types = {
'script': str
}
self.attribute_map = {
'script': 'script'
}
self._script = script
@classmethod
def from_dict(cls, dikt) -> 'ApiGenerateCodeResponse':
return util.deserialize_model(dikt, cls)
@property
def script(self) -> str:
return self._script
@script.setter
def script(self, script: str):
self._script = script
| true
| true
|
f704c748c77fe552c4d56bef1c5dbb0e85cd8b5f
| 917
|
py
|
Python
|
abc231/c/main.py
|
nakamuloud/atcoder
|
aa986bc31ed050bac983888ec500c47f9d12ad2a
|
[
"MIT"
] | null | null | null |
abc231/c/main.py
|
nakamuloud/atcoder
|
aa986bc31ed050bac983888ec500c47f9d12ad2a
|
[
"MIT"
] | null | null | null |
abc231/c/main.py
|
nakamuloud/atcoder
|
aa986bc31ed050bac983888ec500c47f9d12ad2a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
from bisect import bisect, bisect_left, bisect_right, insort, insort_left, insort_right # type: ignore
from collections import Counter, defaultdict, deque # type: ignore
from fractions import gcd # type: ignore
from heapq import heapify, heappop, heappush, heappushpop, heapreplace, merge # type: ignore
from itertools import accumulate, combinations, permutations, product # type: ignore
N, Q = map(int, input().split())
A = list(map(int, input().split()))
x = []
for i in range(Q):
x.append(int(input()))
A.sort(reverse=True)
left = 0
right = N - 1
num = 0
for j in range(Q):
while right - left > 0:
print("left", left, "right", right)
mid = int((left + right) / 2)
if A[mid] < x[j]:
right = mid + 1
elif A[mid] > x[j]:
left = mid - 1
else:
break
num = mid
print("mid", num)
| 26.970588
| 103
| 0.61614
|
import sys
from bisect import bisect, bisect_left, bisect_right, insort, insort_left, insort_right
from collections import Counter, defaultdict, deque
from fractions import gcd
from heapq import heapify, heappop, heappush, heappushpop, heapreplace, merge
from itertools import accumulate, combinations, permutations, product
N, Q = map(int, input().split())
A = list(map(int, input().split()))
x = []
for i in range(Q):
x.append(int(input()))
A.sort(reverse=True)
left = 0
right = N - 1
num = 0
for j in range(Q):
while right - left > 0:
print("left", left, "right", right)
mid = int((left + right) / 2)
if A[mid] < x[j]:
right = mid + 1
elif A[mid] > x[j]:
left = mid - 1
else:
break
num = mid
print("mid", num)
| true
| true
|
f704c7ac312e7babdba94522a4ea34a2966ab06b
| 4,574
|
py
|
Python
|
test/dpt_tests/dpt_time_test.py
|
cyberjunky/xknx
|
c708ed6a2ca6449b74c6cea197d658e3399b99d1
|
[
"MIT"
] | 1
|
2020-12-09T16:17:49.000Z
|
2020-12-09T16:17:49.000Z
|
test/dpt_tests/dpt_time_test.py
|
cyberjunky/xknx
|
c708ed6a2ca6449b74c6cea197d658e3399b99d1
|
[
"MIT"
] | null | null | null |
test/dpt_tests/dpt_time_test.py
|
cyberjunky/xknx
|
c708ed6a2ca6449b74c6cea197d658e3399b99d1
|
[
"MIT"
] | null | null | null |
"""Unit test for KNX time objects."""
import unittest
from xknx.dpt import DPTTime, DPTWeekday
from xknx.exceptions import ConversionError
class TestDPTTime(unittest.TestCase):
"""Test class for KNX time objects."""
#
# TEST NORMAL TIME
#
def test_from_knx(self):
"""Test parsing of DPTTime object from binary values. Example 1."""
self.assertEqual(DPTTime().from_knx((0x4D, 0x17, 0x2A)),
{'weekday': DPTWeekday.TUESDAY,
'hours': 13,
'minutes': 23,
'seconds': 42})
def test_to_knx(self):
"""Testing KNX/Byte representation of DPTTime object."""
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.TUESDAY,
'hours': 13,
'minutes': 23,
'seconds': 42})
self.assertEqual(raw, (0x4D, 0x17, 0x2A))
#
# TEST MAXIMUM TIME
#
def test_to_knx_max(self):
"""Testing KNX/Byte representation of DPTTime object. Maximum values."""
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.SUNDAY,
'hours': 23,
'minutes': 59,
'seconds': 59})
self.assertEqual(raw, (0xF7, 0x3b, 0x3b))
def test_from_knx_max(self):
"""Test parsing of DPTTime object from binary values. Example 2."""
self.assertEqual(DPTTime().from_knx((0xF7, 0x3b, 0x3b)),
{'weekday': DPTWeekday.SUNDAY,
'hours': 23,
'minutes': 59,
'seconds': 59})
#
# TEST MINIMUM TIME
#
def test_to_knx_min(self):
"""Testing KNX/Byte representation of DPTTime object. Minimum values."""
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.NONE,
'hours': 0,
'minutes': 0,
'seconds': 0})
self.assertEqual(raw, (0x0, 0x0, 0x0))
def test_from_knx_min(self):
"""Test parsing of DPTTime object from binary values. Example 3."""
self.assertEqual(DPTTime().from_knx((0x0, 0x0, 0x0)),
{'weekday': DPTWeekday.NONE,
'hours': 0,
'minutes': 0,
'seconds': 0})
#
# TEST INITIALIZATION
#
def test_to_knx_default(self):
"""Testing default initialization of DPTTime object."""
self.assertEqual(DPTTime().to_knx({}), (0x0, 0x0, 0x0))
def test_from_knx_wrong_size(self):
"""Test parsing from DPTTime object from wrong binary values (wrong size)."""
with self.assertRaises(ConversionError):
DPTTime().from_knx((0xF8, 0x23))
def test_from_knx_wrong_bytes(self):
"""Test parsing from DPTTime object from wrong binary values (wrong bytes)."""
with self.assertRaises(ConversionError):
# thirs parameter exceeds limit
DPTTime().from_knx((0xF7, 0x3b, 0x3c))
def test_from_knx_wrong_type(self):
"""Test parsing from DPTTime object from wrong binary values (wrong type)."""
with self.assertRaises(ConversionError):
DPTTime().from_knx((0xF8, "0x23"))
def test_to_knx_wrong_parameter(self):
"""Test parsing from DPTTime object from wrong string value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx("fnord")
def test_to_knx_wrong_seconds(self):
"""Test parsing from DPTTime object from wrong seconds value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 12,
'minutes': 42,
'seconds': 61
})
def test_to_knx_wrong_minutes(self):
"""Test parsing from DPTTime object from wrong minutes value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 12,
'minutes': 61,
'seconds': 53
})
def test_to_knx_wrong_hours(self):
"""Test parsing from DPTTime object from wrong hours value."""
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 24,
'minutes': 42,
'seconds': 53
})
def test_test_range_wrong_weekday(self):
"""Test range testing with wrong weekday (Cant be tested with normal from_/to_knx)."""
# pylint: disable=protected-access
self.assertFalse(DPTTime._test_range(8, 0, 0, 0))
| 35.184615
| 94
| 0.555969
|
import unittest
from xknx.dpt import DPTTime, DPTWeekday
from xknx.exceptions import ConversionError
class TestDPTTime(unittest.TestCase):
def test_from_knx(self):
self.assertEqual(DPTTime().from_knx((0x4D, 0x17, 0x2A)),
{'weekday': DPTWeekday.TUESDAY,
'hours': 13,
'minutes': 23,
'seconds': 42})
def test_to_knx(self):
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.TUESDAY,
'hours': 13,
'minutes': 23,
'seconds': 42})
self.assertEqual(raw, (0x4D, 0x17, 0x2A))
def test_to_knx_max(self):
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.SUNDAY,
'hours': 23,
'minutes': 59,
'seconds': 59})
self.assertEqual(raw, (0xF7, 0x3b, 0x3b))
def test_from_knx_max(self):
self.assertEqual(DPTTime().from_knx((0xF7, 0x3b, 0x3b)),
{'weekday': DPTWeekday.SUNDAY,
'hours': 23,
'minutes': 59,
'seconds': 59})
def test_to_knx_min(self):
raw = DPTTime().to_knx(
{'weekday': DPTWeekday.NONE,
'hours': 0,
'minutes': 0,
'seconds': 0})
self.assertEqual(raw, (0x0, 0x0, 0x0))
def test_from_knx_min(self):
self.assertEqual(DPTTime().from_knx((0x0, 0x0, 0x0)),
{'weekday': DPTWeekday.NONE,
'hours': 0,
'minutes': 0,
'seconds': 0})
def test_to_knx_default(self):
self.assertEqual(DPTTime().to_knx({}), (0x0, 0x0, 0x0))
def test_from_knx_wrong_size(self):
with self.assertRaises(ConversionError):
DPTTime().from_knx((0xF8, 0x23))
def test_from_knx_wrong_bytes(self):
with self.assertRaises(ConversionError):
DPTTime().from_knx((0xF7, 0x3b, 0x3c))
def test_from_knx_wrong_type(self):
with self.assertRaises(ConversionError):
DPTTime().from_knx((0xF8, "0x23"))
def test_to_knx_wrong_parameter(self):
with self.assertRaises(ConversionError):
DPTTime().to_knx("fnord")
def test_to_knx_wrong_seconds(self):
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 12,
'minutes': 42,
'seconds': 61
})
def test_to_knx_wrong_minutes(self):
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 12,
'minutes': 61,
'seconds': 53
})
def test_to_knx_wrong_hours(self):
with self.assertRaises(ConversionError):
DPTTime().to_knx({
'hours': 24,
'minutes': 42,
'seconds': 53
})
def test_test_range_wrong_weekday(self):
self.assertFalse(DPTTime._test_range(8, 0, 0, 0))
| true
| true
|
f704c9482c2b74b28c4faceee71e9f4dcabea3a3
| 316
|
py
|
Python
|
johann/__init__.py
|
lobotmcj/johann
|
c188c6f31446907a5d6a237191540856f02a91a0
|
[
"BSD-3-Clause"
] | 11
|
2020-08-27T18:33:09.000Z
|
2022-03-18T03:09:03.000Z
|
johann/__init__.py
|
johannsdg/johann
|
c188c6f31446907a5d6a237191540856f02a91a0
|
[
"BSD-3-Clause"
] | null | null | null |
johann/__init__.py
|
johannsdg/johann
|
c188c6f31446907a5d6a237191540856f02a91a0
|
[
"BSD-3-Clause"
] | 2
|
2020-09-04T03:07:35.000Z
|
2020-11-06T19:08:03.000Z
|
# Copyright (c) 2019-present, The Johann Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE file. See the AUTHORS file for names of contributors.
"""Johann, lightweight and flexible scenario orchestration"""
__version__ = "0.3.0-alpha"
| 39.5
| 79
| 0.759494
|
__version__ = "0.3.0-alpha"
| true
| true
|
f704cb298b4264480818031b4a5dc27b92ebb46c
| 2,543
|
py
|
Python
|
o3seespy/command/layer.py
|
vijaypolimeru/o3seespy
|
c9ef0c27f685de705721b10eb1ea81c3a3c24c4e
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
o3seespy/command/layer.py
|
vijaypolimeru/o3seespy
|
c9ef0c27f685de705721b10eb1ea81c3a3c24c4e
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2021-06-25T15:33:31.000Z
|
2021-06-25T15:33:31.000Z
|
o3seespy/command/layer.py
|
millen1m/o3seespy
|
7eead6aef8055f73af39b969e0d3499a67e1737f
|
[
"MIT",
"BSD-3-Clause"
] | 1
|
2020-12-12T21:01:42.000Z
|
2020-12-12T21:01:42.000Z
|
from o3seespy.base_model import OpenSeesObject
class LayerBase(OpenSeesObject):
op_base_type = "layer"
class Straight(LayerBase):
"""
The Straight Layer Class
The layer command is used to generate a number of fibers along a line or a circular arc.
"""
op_type = 'straight'
def __init__(self, osi, mat, num_fiber, area_fiber, start, end):
"""
Initial method for Straight
Parameters
----------
mat: obj
Material tag associated with this fiber (uniaxialmaterial tag for a fibersection and ndmaterial tag for use
in an ndfibersection).
num_fiber: int
Number of fibers along line
area_fiber: float
Area of each fiber
start: list
Y & z-coordinates of first fiber in line (local coordinate system)
end: list
Y & z-coordinates of last fiber in line (local coordinate system)
"""
self.mat = mat
self.num_fiber = int(num_fiber)
self.area_fiber = float(area_fiber)
self.start = start
self.end = end
self._parameters = [self.op_type, self.mat.tag, self.num_fiber, self.area_fiber, *self.start, *self.end]
self.to_process(osi)
class Circ(LayerBase):
"""
The Circ Layer Class
This command is used to construct a line of fibers along a circular arc
"""
op_type = 'circ'
def __init__(self, osi, mat, num_fiber, area_fiber, center, radius, ang=None):
"""
Initial method for Circ
Parameters
----------
mat: obj
Material tag associated with this fiber (uniaxialmaterial tag for a fibersection and ndmaterial tag for use
in an ndfibersection).
num_fiber: int
Number of fibers along line
area_fiber: float
Area of each fiber
center: listf
Y & z-coordinates of center of circular arc
radius: float
Radius of circlular arc
ang: listf
Starting and ending angle (optional) [0.0, 360.0-360/num_fibres]
"""
self.mat = mat
self.num_fiber = int(num_fiber)
self.area_fiber = float(area_fiber)
self.center = center
self.radius = float(radius)
self.ang = ang
self._parameters = [self.op_type, self.mat.tag, self.num_fiber, self.area_fiber, *self.center, self.radius]
if self.ang is not None:
self._parameters += self.ang
self.to_process(osi)
| 31.012195
| 119
| 0.604404
|
from o3seespy.base_model import OpenSeesObject
class LayerBase(OpenSeesObject):
op_base_type = "layer"
class Straight(LayerBase):
op_type = 'straight'
def __init__(self, osi, mat, num_fiber, area_fiber, start, end):
self.mat = mat
self.num_fiber = int(num_fiber)
self.area_fiber = float(area_fiber)
self.start = start
self.end = end
self._parameters = [self.op_type, self.mat.tag, self.num_fiber, self.area_fiber, *self.start, *self.end]
self.to_process(osi)
class Circ(LayerBase):
op_type = 'circ'
def __init__(self, osi, mat, num_fiber, area_fiber, center, radius, ang=None):
self.mat = mat
self.num_fiber = int(num_fiber)
self.area_fiber = float(area_fiber)
self.center = center
self.radius = float(radius)
self.ang = ang
self._parameters = [self.op_type, self.mat.tag, self.num_fiber, self.area_fiber, *self.center, self.radius]
if self.ang is not None:
self._parameters += self.ang
self.to_process(osi)
| true
| true
|
f704ccc2e536cf7aaa02ae1d2d184594cab08683
| 14,888
|
py
|
Python
|
analyze.py
|
davidmam/BirdNET-Pi
|
873c8f4c56b30edb9297134a92a7c5a178c390e4
|
[
"Apache-2.0"
] | null | null | null |
analyze.py
|
davidmam/BirdNET-Pi
|
873c8f4c56b30edb9297134a92a7c5a178c390e4
|
[
"Apache-2.0"
] | null | null | null |
analyze.py
|
davidmam/BirdNET-Pi
|
873c8f4c56b30edb9297134a92a7c5a178c390e4
|
[
"Apache-2.0"
] | null | null | null |
# BirdWeather edits by @timsterc
# Other edits by @CaiusX and @mcguirepr89
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
try:
import tflite_runtime.interpreter as tflite
except:
from tensorflow import lite as tflite
import argparse
import operator
import librosa
import numpy as np
import math
import time
from decimal import Decimal
import json
###############################################################################
import requests
import mysql.connector
###############################################################################
import datetime
import pytz
from tzlocal import get_localzone
from pathlib import Path
def loadModel():
global INPUT_LAYER_INDEX
global OUTPUT_LAYER_INDEX
global MDATA_INPUT_INDEX
global CLASSES
print('LOADING TF LITE MODEL...', end=' ')
# Load TFLite model and allocate tensors.
interpreter = tflite.Interpreter(model_path='model/BirdNET_6K_GLOBAL_MODEL.tflite',num_threads=2)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Get input tensor index
INPUT_LAYER_INDEX = input_details[0]['index']
MDATA_INPUT_INDEX = input_details[1]['index']
OUTPUT_LAYER_INDEX = output_details[0]['index']
# Load labels
CLASSES = []
with open('model/labels.txt', 'r') as lfile:
for line in lfile.readlines():
CLASSES.append(line.replace('\n', ''))
print('DONE!')
return interpreter
def loadCustomSpeciesList(path):
slist = []
if os.path.isfile(path):
with open(path, 'r') as csfile:
for line in csfile.readlines():
slist.append(line.replace('\r', '').replace('\n', ''))
return slist
def splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5):
# Split signal with overlap
sig_splits = []
for i in range(0, len(sig), int((seconds - overlap) * rate)):
split = sig[i:i + int(seconds * rate)]
# End of signal?
if len(split) < int(minlen * rate):
break
# Signal chunk too short? Fill with zeros.
if len(split) < int(rate * seconds):
temp = np.zeros((int(rate * seconds)))
temp[:len(split)] = split
split = temp
sig_splits.append(split)
return sig_splits
def readAudioData(path, overlap, sample_rate=48000):
print('READING AUDIO DATA...', end=' ', flush=True)
# Open file with librosa (uses ffmpeg or libav)
sig, rate = librosa.load(path, sr=sample_rate, mono=True, res_type='kaiser_fast')
# Split audio into 3-second chunks
chunks = splitSignal(sig, rate, overlap)
print('DONE! READ', str(len(chunks)), 'CHUNKS.')
return chunks
def convertMetadata(m):
# Convert week to cosine
if m[2] >= 1 and m[2] <= 48:
m[2] = math.cos(math.radians(m[2] * 7.5)) + 1
else:
m[2] = -1
# Add binary mask
mask = np.ones((3,))
if m[0] == -1 or m[1] == -1:
mask = np.zeros((3,))
if m[2] == -1:
mask[2] = 0.0
return np.concatenate([m, mask])
def custom_sigmoid(x, sensitivity=1.0):
return 1 / (1.0 + np.exp(-sensitivity * x))
def predict(sample, interpreter, sensitivity):
# Make a prediction
interpreter.set_tensor(INPUT_LAYER_INDEX, np.array(sample[0], dtype='float32'))
interpreter.set_tensor(MDATA_INPUT_INDEX, np.array(sample[1], dtype='float32'))
interpreter.invoke()
prediction = interpreter.get_tensor(OUTPUT_LAYER_INDEX)[0]
# Apply custom sigmoid
p_sigmoid = custom_sigmoid(prediction, sensitivity)
# Get label and scores for pooled predictions
p_labels = dict(zip(CLASSES, p_sigmoid))
# Sort by score
p_sorted = sorted(p_labels.items(), key=operator.itemgetter(1), reverse=True)
# Remove species that are on blacklist
for i in range(min(10, len(p_sorted))):
if p_sorted[i][0] in ['Human_Human', 'Non-bird_Non-bird', 'Noise_Noise']:
p_sorted[i] = (p_sorted[i][0], 0.0)
# Only return first the top ten results
return p_sorted[:10]
def analyzeAudioData(chunks, lat, lon, week, sensitivity, overlap, interpreter):
detections = {}
start = time.time()
print('ANALYZING AUDIO...', end=' ', flush=True)
# Convert and prepare metadata
mdata = convertMetadata(np.array([lat, lon, week]))
mdata = np.expand_dims(mdata, 0)
# Parse every chunk
pred_start = 0.0
for c in chunks:
# Prepare as input signal
sig = np.expand_dims(c, 0)
# Make prediction
p = predict([sig, mdata], interpreter, sensitivity)
# Save result and timestamp
pred_end = pred_start + 3.0
detections[str(pred_start) + ';' + str(pred_end)] = p
pred_start = pred_end - overlap
print('DONE! Time', int((time.time() - start) * 10) / 10.0, 'SECONDS')
return detections
def writeResultsToFile(detections, min_conf, path):
print('WRITING RESULTS TO', path, '...', end=' ')
rcnt = 0
with open(path, 'w') as rfile:
rfile.write('Start (s);End (s);Scientific name;Common name;Confidence\n')
for d in detections:
for entry in detections[d]:
if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0):
rfile.write(d + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) + '\n')
rcnt += 1
print('DONE! WROTE', rcnt, 'RESULTS.')
def main():
global WHITE_LIST
# Parse passed arguments
parser = argparse.ArgumentParser()
parser.add_argument('--i', help='Path to input file.')
parser.add_argument('--o', default='result.csv', help='Path to output file. Defaults to result.csv.')
parser.add_argument('--lat', type=float, default=-1, help='Recording location latitude. Set -1 to ignore.')
parser.add_argument('--lon', type=float, default=-1, help='Recording location longitude. Set -1 to ignore.')
parser.add_argument('--week', type=int, default=-1, help='Week of the year when the recording was made. Values in [1, 48] (4 weeks per month). Set -1 to ignore.')
parser.add_argument('--overlap', type=float, default=0.0, help='Overlap in seconds between extracted spectrograms. Values in [0.0, 2.9]. Defaults tp 0.0.')
parser.add_argument('--sensitivity', type=float, default=1.0, help='Detection sensitivity; Higher values result in higher sensitivity. Values in [0.5, 1.5]. Defaults to 1.0.')
parser.add_argument('--min_conf', type=float, default=0.1, help='Minimum confidence threshold. Values in [0.01, 0.99]. Defaults to 0.1.')
parser.add_argument('--custom_list', default='', help='Path to text file containing a list of species. Not used if not provided.')
parser.add_argument('--birdweather_id', default='99999', help='Private Station ID for BirdWeather.')
args = parser.parse_args()
# Load model
interpreter = loadModel()
# Load custom species list
if not args.custom_list == '':
WHITE_LIST = loadCustomSpeciesList(args.custom_list)
else:
WHITE_LIST = []
birdweather_id = args.birdweather_id
# Read audio data
audioData = readAudioData(args.i, args.overlap)
# Get Date/Time from filename in case Pi gets behind
#now = datetime.now()
full_file_name = args.i
file_name = Path(full_file_name).stem
file_date = file_name.split('-birdnet-')[0]
file_time = file_name.split('-birdnet-')[1]
date_time_str = file_date + ' ' + file_time
date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S')
#print('Date:', date_time_obj.date())
#print('Time:', date_time_obj.time())
print('Date-time:', date_time_obj)
now = date_time_obj
current_date = now.strftime("%Y/%m/%d")
current_time = now.strftime("%H:%M:%S")
current_iso8601 = now.astimezone(get_localzone()).isoformat()
week_number = int(now.strftime("%V"))
week = max(1, min(week_number, 48))
sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5))
# Process audio data and get detections
detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap, interpreter)
# Write detections to output file
min_conf = max(0.01, min(args.min_conf, 0.99))
writeResultsToFile(detections, min_conf, args.o)
###############################################################################
###############################################################################
soundscape_uploaded = False
# Write detections to Database
for i in detections:
print("\n", detections[i][0],"\n")
with open('BirdDB.txt', 'a') as rfile:
for d in detections:
print("\n", "Database Entry", "\n")
for entry in detections[d]:
if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0):
rfile.write(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' \
+ str(entry[1]) +";" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' \
+ str(sensitivity) +';' + str(args.overlap) + '\n')
def insert_variables_into_table(Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap):
try:
connection = mysql.connector.connect(host='localhost',
database='birds',
user='birder',
password='birdnet')
cursor = connection.cursor()
mySql_insert_query = """INSERT INTO detections (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """
record = (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap)
cursor.execute(mySql_insert_query, record)
connection.commit()
print("Record inserted successfully into detections table")
except mysql.connector.Error as error:
print("Failed to insert record into detections table {}".format(error))
finally:
if connection.is_connected():
connection.close()
print("MySQL connection is closed")
species = entry[0]
sci_name,com_name = species.split('_')
insert_variables_into_table(str(current_date), str(current_time), sci_name, com_name, \
str(entry[1]), str(args.lat), str(args.lon), str(min_conf), str(week), \
str(args.sensitivity), str(args.overlap))
print(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) +";" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' + str(args.sensitivity) +';' + str(args.overlap) + '\n')
if birdweather_id != "99999":
if soundscape_uploaded is False:
# POST soundscape to server
soundscape_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/soundscapes" + "?timestamp=" + current_iso8601
with open(args.i, 'rb') as f:
wav_data = f.read()
response = requests.post(url=soundscape_url, data=wav_data, headers={'Content-Type': 'application/octet-stream'})
print("Soundscape POST Response Status - ", response.status_code)
sdata = response.json()
soundscape_id = sdata['soundscape']['id']
soundscape_uploaded = True
# POST detection to server
detection_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/detections"
start_time = d.split(';')[0]
end_time = d.split(';')[1]
post_begin = "{ "
now_p_start = now + datetime.timedelta(seconds=float(start_time))
current_iso8601 = now_p_start.astimezone(get_localzone()).isoformat()
post_timestamp = "\"timestamp\": \"" + current_iso8601 + "\","
post_lat = "\"lat\": " + str(args.lat) + ","
post_lon = "\"lon\": " + str(args.lon) + ","
post_soundscape_id = "\"soundscapeId\": " + str(soundscape_id) + ","
post_soundscape_start_time = "\"soundscapeStartTime\": " + start_time + ","
post_soundscape_end_time = "\"soundscapeEndTime\": " + end_time + ","
post_commonName = "\"commonName\": \"" + entry[0].split('_')[1] + "\","
post_scientificName = "\"scientificName\": \"" + entry[0].split('_')[0] + "\","
post_algorithm = "\"algorithm\": " + "\"alpha\"" + ","
post_confidence = "\"confidence\": " + str(entry[1])
post_end = " }"
post_json = post_begin + post_timestamp + post_lat + post_lon + post_soundscape_id + post_soundscape_start_time + post_soundscape_end_time + post_commonName + post_scientificName + post_algorithm + post_confidence + post_end
print(post_json)
response = requests.post(detection_url, json=json.loads(post_json))
print("Detection POST Response Status - ", response.status_code)
#time.sleep(3)
###############################################################################
###############################################################################
if __name__ == '__main__':
main()
# Example calls
# python3 analyze.py --i 'example/XC558716 - Soundscape.mp3' --lat 35.4244 --lon -120.7463 --week 18
# python3 analyze.py --i 'example/XC563936 - Soundscape.mp3' --lat 47.6766 --lon -122.294 --week 11 --overlap 1.5 --min_conf 0.25 --sensitivity 1.25 --custom_list 'example/custom_species_list.txt'
| 42.056497
| 272
| 0.558907
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
try:
import tflite_runtime.interpreter as tflite
except:
from tensorflow import lite as tflite
import argparse
import operator
import librosa
import numpy as np
import math
import time
from decimal import Decimal
import json
ate = now.strftime("%Y/%m/%d")
current_time = now.strftime("%H:%M:%S")
current_iso8601 = now.astimezone(get_localzone()).isoformat()
week_number = int(now.strftime("%V"))
week = max(1, min(week_number, 48))
sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5))
detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap, interpreter)
min_conf = max(0.01, min(args.min_conf, 0.99))
writeResultsToFile(detections, min_conf, args.o)
| true
| true
|
f704ccdab8daddc07843c80260a004c3a4b58cc3
| 40,273
|
py
|
Python
|
tests/unit_test/action/action_test.py
|
Anitej/kairon
|
61d6bd7f230a744303abab42e3b54b0381fee7da
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_test/action/action_test.py
|
Anitej/kairon
|
61d6bd7f230a744303abab42e3b54b0381fee7da
|
[
"Apache-2.0"
] | null | null | null |
tests/unit_test/action/action_test.py
|
Anitej/kairon
|
61d6bd7f230a744303abab42e3b54b0381fee7da
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
os.environ["system_file"] = "./tests/testing_data/system.yaml"
from typing import Dict, Text, Any, List
import pytest
import responses
from mongoengine import connect, disconnect
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from kairon.action_server.data_objects import HttpActionRequestBody, HttpActionConfig, HttpActionLog
from kairon.action_server.actions import ActionUtility, HttpAction
from kairon.action_server.exception import HttpActionFailure
from kairon.utils import Utility
def pytest_configure():
return {
'db_url': None,
}
class TestActions:
@pytest.fixture(autouse=True)
def setup(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_evironment()
db_url = Utility.environment['database']["url"]
pytest.db_url = db_url
connect(host=db_url)
@pytest.fixture
def mock_get_http_action_exception(self, monkeypatch):
def _raise_excep(*arge, **kwargs):
raise HttpActionFailure("No HTTP action found for bot and action")
monkeypatch.setattr(ActionUtility, "get_http_action_config", _raise_excep)
@responses.activate
def test_execute_http_request_getWith_auth_token(self):
http_url = 'http://localhost:8080/mock'
# file deepcode ignore HardcodedNonCryptoSecret: Random string for testing
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_get_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_post_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_post_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_put_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_put_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_delete_with_request_body_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_with_auth_token_no_request_body(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=None)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[
responses.json_params_matcher(request_params)
]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
def test_get_http_action_config(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "http_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_config_deleted_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="",
action_name="test_get_http_action_config_deleted_action",
response="${RESPONSE}",
http_url="http://www.digite.com",
request_method="POST",
params_list=http_params,
bot="bot",
user="user",
status=False
).save().to_mongo().to_dict()
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_get_http_action_config_deleted_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "test_get_http_action_config_deleted_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_no_bot(self):
try:
ActionUtility.get_http_action_config(bot=None, action_name="http_action")
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_no_http_action(self):
try:
ActionUtility.get_http_action_config(bot="bot", action_name=None)
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_invalid_bot(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot1", "http_action")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_invalid_http_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_no_request_body(self):
http_params = []
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_prepare_request(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "slot_name": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param2", value="slot_name", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
assert actual_request_body
assert actual_request_body['param1'] == 'value1'
assert actual_request_body['param2'] == 'param2value'
def test_prepare_request_empty_slot(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param3", value="", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert not request_params['param3']
def test_prepare_request_sender_id(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="user_id", value="", parameter_type="sender_id")]
tracker = Tracker(sender_id="kairon_user@digite.com", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert request_params['user_id'] == "kairon_user@digite.com"
def test_prepare_request_no_request_params(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events: List[Dict] = None
http_action_config_params: List[HttpActionRequestBody] = None
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
# deepcode ignore C1801: empty request body for http request with no request body params
assert len(actual_request_body) == 0
@pytest.mark.asyncio
async def test_name(self):
assert await HttpAction().name() == "kairon_http_action"
def test_is_empty(self):
assert ActionUtility.is_empty("")
assert ActionUtility.is_empty(" ")
assert ActionUtility.is_empty(None)
assert not ActionUtility.is_empty("None")
def test_prepare_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json1)
assert response == 'The value of 2 in red is []'
json2 = json.dumps({
"data": [
{"a": {
"b": {
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}}},
{"a": {
"b": {
"43": 5,
"c": [1, 2],
"d": ['buggy', 'bumpers'],
}}}
]
})
response = ActionUtility.prepare_response("The value of ${data.0.a} in ${data.0.a.b} is ${data.0.a.b.d}", json2)
assert response == 'The value of {"b": {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}} in {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]} is [\'red\', \'buggy\', \'bumpers\']'
def test_prepare_response_key_not_present(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_string_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of red is 0", json1)
assert response == "The value of red is 0"
def test_prepare_response_string_empty_response_string(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("", json1)
assert response == '{"a": {"b": {"3": 2, "43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}}}'
def test_prepare_response_string_empty_request_output(self):
json1 = json.dumps("{}")
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_invalid_response_json(self):
json_as_string = "Not a json string"
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json_as_string)
assert False
except HttpActionFailure as e:
assert str(e) == 'Could not find value for keys in response'
def test_prepare_response_as_json_and_expected_as_plain_string(self):
json_as_string = "Not a json string"
response = ActionUtility.prepare_response("The value of 2 in red is []", json_as_string)
assert response == 'The value of 2 in red is []'
def test_prepare_response_as_string_and_expected_as_none(self):
response = ActionUtility.prepare_response("The value of 2 in red is []", None)
assert response == 'The value of 2 in red is []'
@pytest.mark.asyncio
async def test_run_invalid_http_action(self, mock_get_http_action_exception):
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_http_action": "test_run_invalid_http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_run_invalid_http_action1",
response="json",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
).save()
dispatcher: CollectingDispatcher = CollectingDispatcher()
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
await HttpAction().run(dispatcher, tracker, domain)
str(dispatcher.messages[0]['text']).__contains__(
"I have failed to process your request: No HTTP action found for bot")
log = HttpActionLog.objects(sender="sender1",
bot="5f50fd0a56b698ca10d35d2e",
status="FAILURE").get()
assert log['exception'].__contains__('No HTTP action found for bot')
@pytest.mark.asyncio
async def test_run_no_bot(self):
slots = {"bot": None, "http_action_config_http_action": "new_http_action", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender2", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
log = HttpActionLog.objects(sender="sender2",
status="FAILURE").get()
assert log['exception'] == 'Bot id and HTTP action configuration name not found in slot'
@pytest.mark.asyncio
async def test_run_no_http_action(self):
slots = {"bot": "jhgfsjgfausyfgus", "http_action_config_http_action": None, "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
@pytest.mark.asyncio
async def test_run(self, monkeypatch):
action = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="This should be response",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'This should be response'
log = HttpActionLog.objects(sender="sender_test_run",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent']
assert log['action']
assert log['bot_response']
assert log['api_response']
@pytest.mark.asyncio
async def test_run_with_post(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert actual[0]['name'] == 'KAIRON_ACTION_RESPONSE'
assert actual[0]['value'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_post_and_parameters(self, monkeypatch):
request_params = [HttpActionRequestBody(key='key1', value="value1"),
HttpActionRequestBody(key='key2', value="value2")]
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=request_params,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run_with_post", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'Data added successfully, id:5000'
log = HttpActionLog.objects(sender="sender_test_run_with_post",
action="test_run_with_post",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent'] == "test_run"
assert log['action'] == "test_run_with_post"
assert log['request_params'] == {"key1": "value1", "key2": "value2"}
assert log['api_response'] == '5000'
assert log['bot_response'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_get(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8081/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8081/mock'
resp_msg = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'The value of 2 in red is [\'red\', \'buggy\', \'bumpers\']'
@pytest.mark.asyncio
async def test_run_no_connection(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="This should be response",
http_url="http://localhost:8085/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']).__contains__('I have failed to process your request')
@pytest.mark.asyncio
async def test_run_with_get_placeholder_vs_string_response(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get_string_http_response_placeholder_required",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8080/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8082/mock'
resp_msg = "This is string http response"
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e",
"http_action_config_test_run": "test_run_with_get_string_http_response_placeholder_required"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(
actual[0]['value']) == 'I have failed to process your request'
def test_attach_response_no_placeholder(self):
output = ActionUtility.attach_response("This has no placeholder", {"a": "b"})
assert output == "This has no placeholder"
def test_attach_response(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", {"dollars": "51"})
assert output == 'I want ${\'dollars\': \'51\'}'
def test_attach_response_int(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", 51)
assert output == 'I want $51'
def test_retrieve_value_from_response(self):
keys = ["a.b.3", 'a.b']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
key_values = ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert key_values is not None
assert key_values['${a.b.3}'] == 2
assert key_values['${a.b}'] is not None
assert key_values['${a.b}']['3'] == 2
assert key_values['${a.b}']['d'][0] == 'red'
def test_retrieve_value_from_response_invalid_key(self):
keys = ["d.e.f", 'g.h']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
try:
ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert False
except HttpActionFailure as e:
assert str(e) == 'Unable to retrieve value for key from HTTP response: \'d\''
| 45.098544
| 196
| 0.596653
|
import json
import os
os.environ["system_file"] = "./tests/testing_data/system.yaml"
from typing import Dict, Text, Any, List
import pytest
import responses
from mongoengine import connect, disconnect
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from kairon.action_server.data_objects import HttpActionRequestBody, HttpActionConfig, HttpActionLog
from kairon.action_server.actions import ActionUtility, HttpAction
from kairon.action_server.exception import HttpActionFailure
from kairon.utils import Utility
def pytest_configure():
return {
'db_url': None,
}
class TestActions:
@pytest.fixture(autouse=True)
def setup(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_evironment()
db_url = Utility.environment['database']["url"]
pytest.db_url = db_url
connect(host=db_url)
@pytest.fixture
def mock_get_http_action_exception(self, monkeypatch):
def _raise_excep(*arge, **kwargs):
raise HttpActionFailure("No HTTP action found for bot and action")
monkeypatch.setattr(ActionUtility, "get_http_action_config", _raise_excep)
@responses.activate
def test_execute_http_request_getWith_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_get_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_post_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_post_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_put_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_put_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_delete_with_request_body_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_with_auth_token_no_request_body(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=None)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[
responses.json_params_matcher(request_params)
]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
def test_get_http_action_config(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "http_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_config_deleted_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="",
action_name="test_get_http_action_config_deleted_action",
response="${RESPONSE}",
http_url="http://www.digite.com",
request_method="POST",
params_list=http_params,
bot="bot",
user="user",
status=False
).save().to_mongo().to_dict()
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_get_http_action_config_deleted_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "test_get_http_action_config_deleted_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_no_bot(self):
try:
ActionUtility.get_http_action_config(bot=None, action_name="http_action")
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_no_http_action(self):
try:
ActionUtility.get_http_action_config(bot="bot", action_name=None)
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_invalid_bot(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot1", "http_action")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_invalid_http_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_no_request_body(self):
http_params = []
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_prepare_request(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "slot_name": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param2", value="slot_name", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
assert actual_request_body
assert actual_request_body['param1'] == 'value1'
assert actual_request_body['param2'] == 'param2value'
def test_prepare_request_empty_slot(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param3", value="", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert not request_params['param3']
def test_prepare_request_sender_id(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="user_id", value="", parameter_type="sender_id")]
tracker = Tracker(sender_id="kairon_user@digite.com", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert request_params['user_id'] == "kairon_user@digite.com"
def test_prepare_request_no_request_params(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events: List[Dict] = None
http_action_config_params: List[HttpActionRequestBody] = None
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
assert len(actual_request_body) == 0
@pytest.mark.asyncio
async def test_name(self):
assert await HttpAction().name() == "kairon_http_action"
def test_is_empty(self):
assert ActionUtility.is_empty("")
assert ActionUtility.is_empty(" ")
assert ActionUtility.is_empty(None)
assert not ActionUtility.is_empty("None")
def test_prepare_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json1)
assert response == 'The value of 2 in red is []'
json2 = json.dumps({
"data": [
{"a": {
"b": {
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}}},
{"a": {
"b": {
"43": 5,
"c": [1, 2],
"d": ['buggy', 'bumpers'],
}}}
]
})
response = ActionUtility.prepare_response("The value of ${data.0.a} in ${data.0.a.b} is ${data.0.a.b.d}", json2)
assert response == 'The value of {"b": {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}} in {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]} is [\'red\', \'buggy\', \'bumpers\']'
def test_prepare_response_key_not_present(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_string_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of red is 0", json1)
assert response == "The value of red is 0"
def test_prepare_response_string_empty_response_string(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("", json1)
assert response == '{"a": {"b": {"3": 2, "43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}}}'
def test_prepare_response_string_empty_request_output(self):
json1 = json.dumps("{}")
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_invalid_response_json(self):
json_as_string = "Not a json string"
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json_as_string)
assert False
except HttpActionFailure as e:
assert str(e) == 'Could not find value for keys in response'
def test_prepare_response_as_json_and_expected_as_plain_string(self):
json_as_string = "Not a json string"
response = ActionUtility.prepare_response("The value of 2 in red is []", json_as_string)
assert response == 'The value of 2 in red is []'
def test_prepare_response_as_string_and_expected_as_none(self):
response = ActionUtility.prepare_response("The value of 2 in red is []", None)
assert response == 'The value of 2 in red is []'
@pytest.mark.asyncio
async def test_run_invalid_http_action(self, mock_get_http_action_exception):
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_http_action": "test_run_invalid_http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_run_invalid_http_action1",
response="json",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
).save()
dispatcher: CollectingDispatcher = CollectingDispatcher()
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
await HttpAction().run(dispatcher, tracker, domain)
str(dispatcher.messages[0]['text']).__contains__(
"I have failed to process your request: No HTTP action found for bot")
log = HttpActionLog.objects(sender="sender1",
bot="5f50fd0a56b698ca10d35d2e",
status="FAILURE").get()
assert log['exception'].__contains__('No HTTP action found for bot')
@pytest.mark.asyncio
async def test_run_no_bot(self):
slots = {"bot": None, "http_action_config_http_action": "new_http_action", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender2", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
log = HttpActionLog.objects(sender="sender2",
status="FAILURE").get()
assert log['exception'] == 'Bot id and HTTP action configuration name not found in slot'
@pytest.mark.asyncio
async def test_run_no_http_action(self):
slots = {"bot": "jhgfsjgfausyfgus", "http_action_config_http_action": None, "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
@pytest.mark.asyncio
async def test_run(self, monkeypatch):
action = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="This should be response",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'This should be response'
log = HttpActionLog.objects(sender="sender_test_run",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent']
assert log['action']
assert log['bot_response']
assert log['api_response']
@pytest.mark.asyncio
async def test_run_with_post(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert actual[0]['name'] == 'KAIRON_ACTION_RESPONSE'
assert actual[0]['value'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_post_and_parameters(self, monkeypatch):
request_params = [HttpActionRequestBody(key='key1', value="value1"),
HttpActionRequestBody(key='key2', value="value2")]
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=request_params,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run_with_post", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'Data added successfully, id:5000'
log = HttpActionLog.objects(sender="sender_test_run_with_post",
action="test_run_with_post",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent'] == "test_run"
assert log['action'] == "test_run_with_post"
assert log['request_params'] == {"key1": "value1", "key2": "value2"}
assert log['api_response'] == '5000'
assert log['bot_response'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_get(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8081/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8081/mock'
resp_msg = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'The value of 2 in red is [\'red\', \'buggy\', \'bumpers\']'
@pytest.mark.asyncio
async def test_run_no_connection(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="This should be response",
http_url="http://localhost:8085/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']).__contains__('I have failed to process your request')
@pytest.mark.asyncio
async def test_run_with_get_placeholder_vs_string_response(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get_string_http_response_placeholder_required",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8080/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8082/mock'
resp_msg = "This is string http response"
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e",
"http_action_config_test_run": "test_run_with_get_string_http_response_placeholder_required"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(
actual[0]['value']) == 'I have failed to process your request'
def test_attach_response_no_placeholder(self):
output = ActionUtility.attach_response("This has no placeholder", {"a": "b"})
assert output == "This has no placeholder"
def test_attach_response(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", {"dollars": "51"})
assert output == 'I want ${\'dollars\': \'51\'}'
def test_attach_response_int(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", 51)
assert output == 'I want $51'
def test_retrieve_value_from_response(self):
keys = ["a.b.3", 'a.b']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
key_values = ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert key_values is not None
assert key_values['${a.b.3}'] == 2
assert key_values['${a.b}'] is not None
assert key_values['${a.b}']['3'] == 2
assert key_values['${a.b}']['d'][0] == 'red'
def test_retrieve_value_from_response_invalid_key(self):
keys = ["d.e.f", 'g.h']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
try:
ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert False
except HttpActionFailure as e:
assert str(e) == 'Unable to retrieve value for key from HTTP response: \'d\''
| true
| true
|
f704ce218171769e1c7e83c8096eabe16908d3e6
| 848
|
py
|
Python
|
des039.py
|
LeonardoPereirajr/Curso_em_video_Python
|
9d8a97ba3389c8e86b37dfd089fab5d04adc146d
|
[
"MIT"
] | null | null | null |
des039.py
|
LeonardoPereirajr/Curso_em_video_Python
|
9d8a97ba3389c8e86b37dfd089fab5d04adc146d
|
[
"MIT"
] | null | null | null |
des039.py
|
LeonardoPereirajr/Curso_em_video_Python
|
9d8a97ba3389c8e86b37dfd089fab5d04adc146d
|
[
"MIT"
] | null | null | null |
from datetime import date
ano = int(input('ANO de nascimento : '))
ano_hoje = date.today().year
cont = ano_hoje - ano
if cont > 20 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é MASTER. ')
elif cont == 20 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é SENIOR. ')
elif cont >= 19 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é JUNIOR. ')
elif cont >=10 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é INFANTIL. ')
elif cont <= 9 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é MIRIM. ')
| 44.631579
| 82
| 0.597877
|
from datetime import date
ano = int(input('ANO de nascimento : '))
ano_hoje = date.today().year
cont = ano_hoje - ano
if cont > 20 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é MASTER. ')
elif cont == 20 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é SENIOR. ')
elif cont >= 19 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é JUNIOR. ')
elif cont >=10 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é INFANTIL. ')
elif cont <= 9 :
print(' Quem nasceu em {} tem {} anos em {} . '.format(ano, cont, ano_hoje))
print(' Sua classificação é MIRIM. ')
| true
| true
|
f704ce6b1a36073883d29f973825dd98381f1c4b
| 443
|
py
|
Python
|
setup.py
|
msikma/upprint
|
b617eb7d0a661fb3107d85471520270647766906
|
[
"MIT"
] | null | null | null |
setup.py
|
msikma/upprint
|
b617eb7d0a661fb3107d85471520270647766906
|
[
"MIT"
] | null | null | null |
setup.py
|
msikma/upprint
|
b617eb7d0a661fb3107d85471520270647766906
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name='upprint',
packages=['upprint'],
version='0.1',
description='Modified version of pprint with better Unicode output',
author='Michiel Sikma',
author_email='michiel@sikma.org',
url='https://github.com/msikma/upprint',
download_url='https://github.com/msikma/upprint/tarball/0.1',
keywords=['pprint', 'debugging', 'print'],
classifiers=[],
license='MIT'
)
| 27.6875
| 72
| 0.670429
|
from distutils.core import setup
setup(
name='upprint',
packages=['upprint'],
version='0.1',
description='Modified version of pprint with better Unicode output',
author='Michiel Sikma',
author_email='michiel@sikma.org',
url='https://github.com/msikma/upprint',
download_url='https://github.com/msikma/upprint/tarball/0.1',
keywords=['pprint', 'debugging', 'print'],
classifiers=[],
license='MIT'
)
| true
| true
|
f704ce98035e4ea27201a97a216cc694ba65d79b
| 4,375
|
py
|
Python
|
tests/test_tokenizers.py
|
DLPerf/gretel-synthetics
|
58a820327e283ecc224de3686aa035b7e32bfaa6
|
[
"Apache-2.0"
] | 252
|
2020-03-02T16:41:11.000Z
|
2022-03-28T20:57:15.000Z
|
tests/test_tokenizers.py
|
DLPerf/gretel-synthetics
|
58a820327e283ecc224de3686aa035b7e32bfaa6
|
[
"Apache-2.0"
] | 39
|
2020-03-16T18:33:48.000Z
|
2021-11-10T19:13:53.000Z
|
tests/test_tokenizers.py
|
DLPerf/gretel-synthetics
|
58a820327e283ecc224de3686aa035b7e32bfaa6
|
[
"Apache-2.0"
] | 36
|
2020-05-21T14:45:27.000Z
|
2022-03-01T01:32:58.000Z
|
from pathlib import Path
from copy import deepcopy
import pytest
from gretel_synthetics.config import BaseConfig
import gretel_synthetics.tokenizers as tok
class SimpleConfig(BaseConfig):
"""Used for simple tokenization tests
"""
def get_generator_class(self):
return None
def get_training_callable(self):
return None
@pytest.fixture(scope="module")
def input_data_path():
return str(
(Path(__file__).parent / "data" / "smol.txt").resolve()
)
L1 = "Once upon a midnight dreary, while I pondered, weak and weary,\n"
def test_single_char(input_data_path, tmpdir):
# NOTE: Here the line delim should not matter for this char tokenizer
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir, field_delimiter=",")
trainer = tok.CharTokenizerTrainer(config=config)
# We need this for batch mode, so verify it can be copied
deepcopy(trainer)
line_iter = trainer.annotate_data()
# Assert that we didn't do any annotation
line_one = next(line_iter)
assert line_one == L1
# Let's train the tokenizer, and now reload it back in
trainer.train()
tokenizer = tok.CharTokenizer.load(tmpdir)
assert tokenizer.total_vocab_size == 32
# NOTE: this is because we default to using this token as a delim
# in the main config, but this tokenizer doesn't do anything with it anyway
assert tokenizer.field_delimiter == ","
assert tokenizer.field_delimiter_token == "<d>"
l1_ids = [6, 21, 11, 13, 1, 28, 23, 22, 21, 1, 9, 1, 20, 17, 12, 21, 17, 15, 16, 27, 1, 12, 25, 13, 9, 25, 31, 2, 1, 30, 16, 17, 19, 13, 1, 5, 1, 23, 22, 21, 12, 13, 25, 13, 12, 2, 1, 30, 13, 9, 18, 1, 9, 21, 12, 1, 30, 13, 9, 25, 31, 2, 0]
assert tokenizer.encode_to_ids(L1) == l1_ids
assert tokenizer.decode_from_ids(l1_ids) == L1
# Check the factory
assert isinstance(
tok.tokenizer_from_model_dir(tmpdir),
tok.CharTokenizer
)
def test_single_char_small_vocab(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir)
trainer = tok.CharTokenizerTrainer(config=config, vocab_size=10)
trainer.annotate_data()
trainer.train()
tokenizer = tok.CharTokenizer.load(tmpdir)
assert tokenizer.total_vocab_size == 10
# Too small of a vocab...
with pytest.raises(tok.TokenizerError):
tokenizer.encode_to_ids("Once upon")
with pytest.raises(tok.TokenizerError):
tokenizer.decode_from_ids([11])
def test_sp(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir)
trainer = tok.SentencePieceTokenizerTrainer(config=config)
deepcopy(trainer)
line_iter = trainer.annotate_data()
line_one = next(line_iter)
assert line_one == "Once upon a midnight dreary, while I pondered, weak and weary,<n>\n"
trainer.train()
tokenizer = tok.SentencePieceTokenizer.load(tmpdir)
ids = [41, 54, 8, 5, 11, 36, 10, 14, 16, 13, 17, 16, 22, 20, 15, 5, 13, 25, 32, 7, 6, 51, 42, 9, 8, 5, 23, 5, 36, 13, 48, 13, 6, 49, 62, 10, 28, 49, 25, 7, 6, 3]
assert tokenizer.encode_to_ids("Once upon a midnight dreary, while I pondered, weak and weary,<n>\n") == ids
assert tokenizer.decode_from_ids(ids) == "Once upon a midnight dreary, while I pondered, weak and weary,<n>"
def test_sp_field_delim(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir, field_delimiter=",")
trainer = tok.SentencePieceTokenizerTrainer(config=config)
line_iter = trainer.annotate_data()
line_one = next(line_iter)
assert line_one == "Once upon a midnight dreary<d> while I pondered<d> weak and weary<d><n>\n"
trainer.train()
tokenizer = tok.SentencePieceTokenizer.load(tmpdir)
ids = [40, 53, 7, 5, 10, 35, 9, 13, 15, 12, 16, 15, 21, 19, 14, 5, 12, 24, 30, 6, 4, 51, 41, 8, 7, 5, 23, 5, 35, 12, 47, 12, 4, 48, 61, 9, 27, 48, 24, 6, 4, 3]
assert tokenizer.encode_to_ids("Once upon a midnight dreary<d> while I pondered<d> weak and weary<d><n>\n") == ids
assert tokenizer.decode_from_ids(ids) == "Once upon a midnight dreary, while I pondered, weak and weary,<n>"
# Check the factory
assert isinstance(
tok.tokenizer_from_model_dir(tmpdir),
tok.SentencePieceTokenizer
)
| 36.157025
| 244
| 0.688229
|
from pathlib import Path
from copy import deepcopy
import pytest
from gretel_synthetics.config import BaseConfig
import gretel_synthetics.tokenizers as tok
class SimpleConfig(BaseConfig):
def get_generator_class(self):
return None
def get_training_callable(self):
return None
@pytest.fixture(scope="module")
def input_data_path():
return str(
(Path(__file__).parent / "data" / "smol.txt").resolve()
)
L1 = "Once upon a midnight dreary, while I pondered, weak and weary,\n"
def test_single_char(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir, field_delimiter=",")
trainer = tok.CharTokenizerTrainer(config=config)
deepcopy(trainer)
line_iter = trainer.annotate_data()
line_one = next(line_iter)
assert line_one == L1
# Let's train the tokenizer, and now reload it back in
trainer.train()
tokenizer = tok.CharTokenizer.load(tmpdir)
assert tokenizer.total_vocab_size == 32
assert tokenizer.field_delimiter == ","
assert tokenizer.field_delimiter_token == "<d>"
l1_ids = [6, 21, 11, 13, 1, 28, 23, 22, 21, 1, 9, 1, 20, 17, 12, 21, 17, 15, 16, 27, 1, 12, 25, 13, 9, 25, 31, 2, 1, 30, 16, 17, 19, 13, 1, 5, 1, 23, 22, 21, 12, 13, 25, 13, 12, 2, 1, 30, 13, 9, 18, 1, 9, 21, 12, 1, 30, 13, 9, 25, 31, 2, 0]
assert tokenizer.encode_to_ids(L1) == l1_ids
assert tokenizer.decode_from_ids(l1_ids) == L1
# Check the factory
assert isinstance(
tok.tokenizer_from_model_dir(tmpdir),
tok.CharTokenizer
)
def test_single_char_small_vocab(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir)
trainer = tok.CharTokenizerTrainer(config=config, vocab_size=10)
trainer.annotate_data()
trainer.train()
tokenizer = tok.CharTokenizer.load(tmpdir)
assert tokenizer.total_vocab_size == 10
# Too small of a vocab...
with pytest.raises(tok.TokenizerError):
tokenizer.encode_to_ids("Once upon")
with pytest.raises(tok.TokenizerError):
tokenizer.decode_from_ids([11])
def test_sp(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir)
trainer = tok.SentencePieceTokenizerTrainer(config=config)
deepcopy(trainer)
line_iter = trainer.annotate_data()
line_one = next(line_iter)
assert line_one == "Once upon a midnight dreary, while I pondered, weak and weary,<n>\n"
trainer.train()
tokenizer = tok.SentencePieceTokenizer.load(tmpdir)
ids = [41, 54, 8, 5, 11, 36, 10, 14, 16, 13, 17, 16, 22, 20, 15, 5, 13, 25, 32, 7, 6, 51, 42, 9, 8, 5, 23, 5, 36, 13, 48, 13, 6, 49, 62, 10, 28, 49, 25, 7, 6, 3]
assert tokenizer.encode_to_ids("Once upon a midnight dreary, while I pondered, weak and weary,<n>\n") == ids
assert tokenizer.decode_from_ids(ids) == "Once upon a midnight dreary, while I pondered, weak and weary,<n>"
def test_sp_field_delim(input_data_path, tmpdir):
config = SimpleConfig(input_data_path=input_data_path, checkpoint_dir=tmpdir, field_delimiter=",")
trainer = tok.SentencePieceTokenizerTrainer(config=config)
line_iter = trainer.annotate_data()
line_one = next(line_iter)
assert line_one == "Once upon a midnight dreary<d> while I pondered<d> weak and weary<d><n>\n"
trainer.train()
tokenizer = tok.SentencePieceTokenizer.load(tmpdir)
ids = [40, 53, 7, 5, 10, 35, 9, 13, 15, 12, 16, 15, 21, 19, 14, 5, 12, 24, 30, 6, 4, 51, 41, 8, 7, 5, 23, 5, 35, 12, 47, 12, 4, 48, 61, 9, 27, 48, 24, 6, 4, 3]
assert tokenizer.encode_to_ids("Once upon a midnight dreary<d> while I pondered<d> weak and weary<d><n>\n") == ids
assert tokenizer.decode_from_ids(ids) == "Once upon a midnight dreary, while I pondered, weak and weary,<n>"
# Check the factory
assert isinstance(
tok.tokenizer_from_model_dir(tmpdir),
tok.SentencePieceTokenizer
)
| true
| true
|
f704d041172d3126183353d0f84901d0084003b4
| 82,008
|
py
|
Python
|
aiida/backends/tests/export_and_import.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/export_and_import.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/backends/tests/export_and_import.py
|
joepvd/aiida_core
|
6e9711046753332933f982971db1d7ac7e7ade58
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Tests for the export and import routines.
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import io
import six
from six.moves import range, zip
from aiida.backends.testbase import AiidaTestCase
from aiida.orm.importexport import import_data
from aiida import orm
class TestSpecificImport(AiidaTestCase):
def setUp(self):
super(TestSpecificImport, self).setUp()
self.clean_db()
self.insert_data()
def test_simple_import(self):
"""
This is a very simple test which checks that an export file with nodes
that are not associated to a computer is imported correctly. In Django
when such nodes are exported, there is an empty set for computers
in the export file. In SQLA there is such a set only when a computer is
associated with the exported nodes. When an empty computer set is
found at the export file (when imported to an SQLA profile), the SQLA
import code used to crash. This test demonstrates this problem.
"""
import tempfile
from aiida.orm.data.parameter import ParameterData
from aiida.orm.importexport import export, import_data
from aiida.orm.node import Node
from aiida.orm.querybuilder import QueryBuilder
parameters = ParameterData(dict={
'Pr': {
'cutoff': 50.0,
'pseudo_type': 'Wentzcovitch',
'dual': 8,
'cutoff_units': 'Ry'
},
'Ru': {
'cutoff': 40.0,
'pseudo_type': 'SG15',
'dual': 4,
'cutoff_units': 'Ry'
},
}).store()
with tempfile.NamedTemporaryFile() as handle:
nodes = [parameters]
export(nodes, outfile=handle.name, overwrite=True, silent=True)
# Check that we have the expected number of nodes in the database
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
# Clean the database and verify there are no nodes left
self.clean_db()
self.assertEquals(QueryBuilder().append(Node).count(), 0)
# After importing we should have the original number of nodes again
import_data(handle.name, silent=True)
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
def test_cycle_structure_data(self):
"""
Create an export with some Calculation and Data nodes and import it after having
cleaned the database. Verify that the nodes and their attributes are restored
properly after importing the created export archive
"""
import tempfile
from aiida.common.links import LinkType
from aiida.orm.calculation import Calculation
from aiida.orm.data.structure import StructureData
from aiida.orm.data.remote import RemoteData
from aiida.orm.importexport import export, import_data
from aiida.orm.node import Node
from aiida.orm.querybuilder import QueryBuilder
test_label = 'Test structure'
test_cell = [
[8.34, 0.0, 0.0],
[0.298041701839357, 8.53479766274308, 0.0],
[0.842650688117053, 0.47118495164127, 10.6965192730702]
]
test_kinds = [
{
'symbols': [u'Fe'],
'weights': [1.0],
'mass': 55.845,
'name': u'Fe'
},
{
'symbols': [u'S'],
'weights': [1.0],
'mass': 32.065,
'name': u'S'
}
]
structure = StructureData(cell=test_cell)
structure.append_atom(symbols=['Fe'], position=[0, 0, 0])
structure.append_atom(symbols=['S'], position=[2, 2, 2])
structure.label = test_label
structure.store()
parent_calculation = Calculation()
parent_calculation._set_attr('key', 'value')
parent_calculation.store()
child_calculation = Calculation()
child_calculation._set_attr('key', 'value')
child_calculation.store()
remote_folder = RemoteData(computer=self.computer, remote_path='/').store()
remote_folder.add_link_from(parent_calculation, link_type=LinkType.CREATE)
child_calculation.add_link_from(remote_folder, link_type=LinkType.INPUT)
structure.add_link_from(child_calculation, link_type=LinkType.CREATE)
with tempfile.NamedTemporaryFile() as handle:
nodes = [structure, child_calculation, parent_calculation, remote_folder]
export(nodes, outfile=handle.name, overwrite=True, silent=True)
# Check that we have the expected number of nodes in the database
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
# Clean the database and verify there are no nodes left
self.clean_db()
self.assertEquals(QueryBuilder().append(Node).count(), 0)
# After importing we should have the original number of nodes again
import_data(handle.name, silent=True)
self.assertEquals(QueryBuilder().append(Node).count(), len(nodes))
# Verify that Calculations have non-empty attribute dictionaries
qb = QueryBuilder().append(Calculation)
for [calculation] in qb.iterall():
self.assertIsInstance(calculation.get_attrs(), dict)
self.assertNotEquals(len(calculation.get_attrs()), 0)
# Verify that the structure data maintained its label, cell and kinds
qb = QueryBuilder().append(StructureData)
for [structure] in qb.iterall():
self.assertEquals(structure.label, test_label)
self.assertEquals(structure.cell, test_cell)
qb = QueryBuilder().append(StructureData, project=['attributes.kinds'])
for [kinds] in qb.iterall():
self.assertEqual(len(kinds), 2)
for kind in kinds:
self.assertIn(kind, test_kinds)
# Check that there is a StructureData that is an output of a Calculation
qb = QueryBuilder()
qb.append(Calculation, project=['uuid'], tag='calculation')
qb.append(StructureData, output_of='calculation')
self.assertGreater(len(qb.all()), 0)
# Check that there is a RemoteData that is a child and parent of a Calculation
qb = QueryBuilder()
qb.append(Calculation, tag='parent')
qb.append(RemoteData, project=['uuid'], output_of='parent', tag='remote')
qb.append(Calculation, output_of='remote')
self.assertGreater(len(qb.all()), 0)
class TestSimple(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def test_0(self):
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.data.base import Str, Int, Float, Bool
from aiida.orm.importexport import export
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# producing values for each base type
values = ("Hello", 6, -1.2399834e12, False) # , ["Bla", 1, 1e-10])
filename = os.path.join(temp_folder, "export.tar.gz")
# producing nodes:
nodes = [cls(val).store() for val, cls in zip(values, (Str, Int, Float, Bool))]
# my uuid - list to reload the node:
uuids = [n.uuid for n in nodes]
# exporting the nodes:
export(nodes, outfile=filename, silent=True)
# cleaning:
self.clean_db()
# Importing back the data:
import_data(filename, silent=True)
# Checking whether values are preserved:
for uuid, refval in zip(uuids, values):
self.assertEquals(load_node(uuid).value, refval)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_1(self):
import os
import shutil
import tempfile
from aiida.orm import DataFactory
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import export
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
StructureData = DataFactory('structure')
sd = StructureData()
sd.store()
calc = JobCalculation()
calc.set_computer(self.computer)
calc.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc.store()
calc.add_link_from(sd)
pks = [sd.pk, calc.pk]
attrs = {}
for pk in pks:
node = load_node(pk)
attrs[node.uuid] = dict()
for k in node.attrs():
attrs[node.uuid][k] = node.get_attr(k)
filename = os.path.join(temp_folder, "export.tar.gz")
export([calc], outfile=filename, silent=True)
self.clean_db()
# NOTE: it is better to load new nodes by uuid, rather than assuming
# that they will have the first 3 pks. In fact, a recommended policy in
# databases is that pk always increment, even if you've deleted elements
import_data(filename, silent=True)
for uuid in attrs.keys():
node = load_node(uuid)
# for k in node.attrs():
for k in attrs[uuid].keys():
self.assertEquals(attrs[uuid][k], node.get_attr(k))
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
# print temp_folder
def test_2(self):
"""
Test the check for the export format version.
"""
import tarfile
import os
import shutil
import tempfile
from aiida.common import exceptions
from aiida.orm import DataFactory
from aiida.orm.importexport import export
import aiida.utils.json as json
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
StructureData = DataFactory('structure')
sd = StructureData()
sd.store()
filename = os.path.join(export_file_tmp_folder, "export.tar.gz")
export([sd], outfile=filename, silent=True)
with tarfile.open(filename, "r:gz", format=tarfile.PAX_FORMAT) as tar:
tar.extractall(unpack_tmp_folder)
with io.open(os.path.join(unpack_tmp_folder,
'metadata.json'), 'r', encoding='utf8') as fhandle:
metadata = json.load(fhandle)
metadata['export_version'] = 0.0
with io.open(os.path.join(unpack_tmp_folder, 'metadata.json'),
'wb') as fhandle:
json.dump(metadata, fhandle)
with tarfile.open(filename, "w:gz", format=tarfile.PAX_FORMAT) as tar:
tar.add(unpack_tmp_folder, arcname="")
self.tearDownClass()
self.setUpClass()
with self.assertRaises(exceptions.IncompatibleArchiveVersionError):
import_data(filename, silent=True)
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_3(self):
"""
Test importing of nodes, that have links to unknown nodes.
"""
import tarfile
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.common.folders import SandboxFolder
from aiida.orm.data.structure import StructureData
from aiida.orm import load_node
import aiida.utils.json as json
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
node_label = "Test structure data"
sd = StructureData()
sd.label = str(node_label)
sd.store()
filename = os.path.join(temp_folder, "export.tar.gz")
export([sd], outfile=filename, silent=True)
unpack = SandboxFolder()
with tarfile.open(
filename, "r:gz", format=tarfile.PAX_FORMAT) as tar:
tar.extractall(unpack.abspath)
with io.open(unpack.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle:
metadata = json.load(fhandle)
metadata['links_uuid'].append({
'output': sd.uuid,
'input': 'non-existing-uuid',
'label': 'parent'
})
with io.open(unpack.get_abs_path('data.json'), 'wb') as fhandle:
json.dump(metadata, fhandle)
with tarfile.open(
filename, "w:gz", format=tarfile.PAX_FORMAT) as tar:
tar.add(unpack.abspath, arcname="")
self.clean_db()
with self.assertRaises(ValueError):
import_data(filename, silent=True)
import_data(filename, ignore_unknown_nodes=True, silent=True)
self.assertEquals(load_node(sd.uuid).label, node_label)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_4(self):
"""
Test control of licenses.
"""
from aiida.common.exceptions import LicensingException
from aiida.common.folders import SandboxFolder
from aiida.orm.importexport import export_tree
from aiida.orm import DataFactory
StructureData = DataFactory('structure')
sd = StructureData()
sd.source = {'license': 'GPL'}
sd.store()
folder = SandboxFolder()
export_tree([sd], folder=folder, silent=True,
allowed_licenses=['GPL'])
# Folder should contain two files of metadata + nodes/
self.assertEquals(len(folder.get_content_list()), 3)
folder = SandboxFolder()
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=['Academic'])
# Folder should contain two files of metadata + nodes/
self.assertEquals(len(folder.get_content_list()), 3)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=['CC0'])
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=['GPL'])
def cc_filter(license):
return license.startswith('CC')
def gpl_filter(license):
return license == 'GPL'
def crashing_filter(license):
raise NotImplementedError("not implemented yet")
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=cc_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=gpl_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=crashing_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=crashing_filter)
def test_5(self):
"""
This test checks that nodes belonging to different users are correctly
exported & imported.
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
from aiida.common.utils import get_configured_user_email
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend).store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create some nodes from a different user
sd2 = StructureData()
sd2.set_user(user)
sd2.label = 'sd2'
sd2.store()
sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE) # I assume jc1 CREATED sd2
jc2 = JobCalculation()
jc2.set_computer(self.computer)
jc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc2.label = 'jc2'
jc2.store()
jc2.add_link_from(sd2, label='l2')
jc2._set_state(calc_states.PARSING)
sd3 = StructureData()
sd3.label = 'sd3'
sd3.store()
sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)
uuids_u1 = [sd1.uuid, jc1.uuid, sd2.uuid]
uuids_u2 = [jc2.uuid, sd3.uuid]
filename = os.path.join(temp_folder, "export.tar.gz")
export([sd3], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids_u1:
node = load_node(uuid=uuid)
self.assertEquals(node.get_user().email, new_email)
for uuid in uuids_u2:
self.assertEquals(load_node(uuid).get_user().email,
get_configured_user_email())
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_6(self):
"""
This test checks that nodes belonging to user A (which is not the
default user) can be correctly exported, imported, enriched with nodes
from the default user, re-exported & re-imported and that in the end
all the nodes that have been finally imported belonging to the right
users.
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
from aiida.common.utils import get_configured_user_email
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend).store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create some nodes from a different user
sd2 = StructureData()
sd2.set_user(user)
sd2.label = 'sd2'
sd2.store()
sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE)
# Set the jc1 to FINISHED
jc1._set_state(calc_states.FINISHED)
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([sd2], outfile=filename1, silent=True)
uuids1 = [sd1.uuid, jc1.uuid, sd2.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids1:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Now we continue to generate more data based on the imported
# data
sd2_imp = load_node(sd2.uuid)
jc2 = JobCalculation()
jc2.set_computer(self.computer)
jc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc2.label = 'jc2'
jc2.store()
jc2.add_link_from(sd2_imp, label='l2')
jc2._set_state(calc_states.PARSING)
sd3 = StructureData()
sd3.label = 'sd3'
sd3.store()
sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)
# Set the jc2 to FINISHED
jc2._set_state(calc_states.FINISHED)
# Store the UUIDs of the nodes that should be checked
# if they can be imported correctly.
uuids2 = [jc2.uuid, sd3.uuid]
filename2 = os.path.join(temp_folder, "export2.tar.gz")
export([sd3], outfile=filename2, silent=True)
self.clean_db()
self.insert_data()
import_data(filename2, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids1:
self.assertEquals(load_node(uuid).get_user().email, new_email)
for uuid in uuids2:
self.assertEquals(load_node(uuid).get_user().email,
get_configured_user_email())
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_7(self):
"""
This test checks that nodes that belong to a specific group are
correctly imported and exported.
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.orm.querybuilder import QueryBuilder
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend)
user.store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create a group and add the data inside
from aiida.orm.group import Group
g1 = Group(name="node_group")
g1.store()
g1.add_nodes([sd1, jc1])
g1_uuid = g1.uuid
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([sd1, jc1, g1], outfile=filename1,
silent=True)
n_uuids = [sd1.uuid, jc1.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in n_uuids:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Check that the exported group is imported correctly
qb = QueryBuilder()
qb.append(Group, filters={'uuid': {'==': g1_uuid}})
self.assertEquals(qb.count(), 1, "The group was not found.")
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_group_export(self):
"""
Test that when exporting just a group, its nodes are also exported
"""
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend)
user.store()
# Create a structure data node
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
# Create a group and add the data inside
from aiida.orm.group import Group
g1 = Group(name="node_group")
g1.store()
g1.add_nodes([sd1])
g1_uuid = g1.uuid
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([g1], outfile=filename1, silent=True)
n_uuids = [sd1.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in n_uuids:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Check that the exported group is imported correctly
qb = QueryBuilder()
qb.append(Group, filters={'uuid': {'==': g1_uuid}})
self.assertEquals(qb.count(), 1, "The group was not found.")
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_workfunction_1(self):
import shutil, os, tempfile
from aiida.work.workfunctions import workfunction
from aiida.orm.data.float import Float
from aiida.orm import load_node
from aiida.orm.importexport import export
from aiida.common.exceptions import NotExistent
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
@workfunction
def add(a, b):
"""Add 2 numbers"""
return {'res': Float(a + b)}
def max_(**kwargs):
"""select the max value"""
max_val = max([(v.value, v) for v in kwargs.values()])
return {'res': max_val[1]}
try:
# I'm creating a bunch of nuimbers
a, b, c, d, e = (Float(i) for i in range(5))
# this adds the maximum number between bcde to a.
res = add(a=a, b=max_(b=b, c=c, d=d, e=e)['res'])['res']
# These are the uuids that would be exported as well (as parents) if I wanted the final result
uuids_values = [(a.uuid, a.value), (e.uuid, e.value), (res.uuid, res.value)]
# These are the uuids that shouldn't be exported since it's a selection.
not_wanted_uuids = [v.uuid for v in (b, c, d)]
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([res], outfile=filename1, silent=True)
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that the value is preserved
for uuid, value in uuids_values:
self.assertEquals(load_node(uuid).value, value)
for uuid in not_wanted_uuids:
with self.assertRaises(NotExistent):
load_node(uuid)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_workcalculation_2(self):
import shutil, os, tempfile
from aiida.orm.calculation.work import WorkCalculation
from aiida.orm.data.float import Float
from aiida.orm.data.int import Int
from aiida.orm import load_node
from aiida.common.links import LinkType
from aiida.orm.importexport import export
from aiida.common.exceptions import NotExistent
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
master = WorkCalculation().store()
slave = WorkCalculation().store()
input_1 = Int(3).store()
input_2 = Int(5).store()
output_1 = Int(2).store()
master.add_link_from(input_1, 'input_1', link_type=LinkType.INPUT)
slave.add_link_from(master, 'CALL', link_type=LinkType.CALL)
slave.add_link_from(input_2, 'input_2', link_type=LinkType.INPUT)
output_1.add_link_from(master, 'CREATE', link_type=LinkType.CREATE)
uuids_values = [(v.uuid, v.value) for v in (output_1,)]
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([output_1], outfile=filename1, silent=True)
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
for uuid, value in uuids_values:
self.assertEquals(load_node(uuid).value, value)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_reexport(self):
"""
Export something, import and reexport and check if everything is valid.
The export is rather easy::
___ ___ ___
| | INP | | CREATE | |
| p | --> | c | -----> | a |
|___| |___| |___|
"""
import os, shutil, tempfile, numpy as np, string, random
from datetime import datetime
from aiida.orm import Calculation, load_node, Group
from aiida.orm.data.array import ArrayData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.importexport import export
from aiida.common.hashing import make_hash
from aiida.common.links import LinkType
def get_hash_from_db_content(groupname):
qb = QueryBuilder()
qb.append(ParameterData, tag='p', project='*')
qb.append(Calculation, tag='c', project='*', edge_tag='p2c', edge_project=('label', 'type'))
qb.append(ArrayData, tag='a', project='*', edge_tag='c2a', edge_project=('label', 'type'))
qb.append(Group, filters={'name': groupname}, project='*', tag='g', group_of='a')
# I want the query to contain something!
self.assertTrue(qb.count() > 0)
# The hash is given from the preservable entries in an export-import cycle,
# uuids, attributes, labels, descriptions, arrays, link-labels, link-types:
hash_ = make_hash([(
item['p']['*'].get_attrs(),
item['p']['*'].uuid,
item['p']['*'].label,
item['p']['*'].description,
item['c']['*'].uuid,
item['c']['*'].get_attrs(),
item['a']['*'].get_attrs(),
[item['a']['*'].get_array(name) for name in item['a']['*'].get_arraynames()],
item['a']['*'].uuid,
item['g']['*'].uuid,
item['g']['*'].name,
item['p2c']['label'],
item['p2c']['type'],
item['c2a']['label'],
item['c2a']['type'],
item['g']['*'].name,
) for item in qb.dict()])
return hash_
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
chars = string.ascii_uppercase + string.digits
size = 10
groupname = 'test-group'
try:
nparr = np.random.random((4, 3, 2))
trial_dict = {}
# give some integers:
trial_dict.update({str(k): np.random.randint(100) for k in range(10)})
# give some floats:
trial_dict.update({str(k): np.random.random() for k in range(10, 20)})
# give some booleans:
trial_dict.update({str(k): bool(np.random.randint(1)) for k in range(20, 30)})
# give some datetime:
trial_dict.update({str(k): datetime(
year=2017,
month=np.random.randint(1, 12),
day=np.random.randint(1, 28)) for k in range(30, 40)})
# give some text:
trial_dict.update({str(k): ''.join(random.choice(chars) for _ in range(size)) for k in range(20, 30)})
p = ParameterData(dict=trial_dict)
p.label = str(datetime.now())
p.description = 'd_' + str(datetime.now())
p.store()
c = Calculation()
# setting also trial dict as attributes, but randomizing the keys)
(c._set_attr(str(int(k) + np.random.randint(10)), v) for k, v in trial_dict.items())
c.store()
a = ArrayData()
a.set_array('array', nparr)
a.store()
# LINKS
# the calculation has input the parameters-instance
c.add_link_from(p, label='input_parameters', link_type=LinkType.INPUT)
# I want the array to be an output of the calculation
a.add_link_from(c, label='output_array', link_type=LinkType.CREATE)
g = Group(name='test-group')
g.store()
g.add_nodes(a)
hash_from_dbcontent = get_hash_from_db_content(groupname)
# I export and reimport 3 times in a row:
for i in range(3):
# Always new filename:
filename = os.path.join(temp_folder, "export-{}.zip".format(i))
# Loading the group from the string
g = Group.get_from_string(groupname)
# exporting based on all members of the group
# this also checks if group memberships are preserved!
export([g] + [n for n in g.nodes], outfile=filename, silent=True)
# cleaning the DB!
self.clean_db()
# reimporting the data from the file
import_data(filename, silent=True, ignore_unknown_nodes=True)
# creating the hash from db content
new_hash = get_hash_from_db_content(groupname)
# I check for equality against the first hash created, which implies that hashes
# are equal in all iterations of this process
self.assertEqual(hash_from_dbcontent, new_hash)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
class TestComplex(AiidaTestCase):
def test_complex_graph_import_export(self):
"""
This test checks that a small and bit complex graph can be correctly
exported and imported.
It will create the graph, store it to the database, export it to a file
and import it. In the end it will check if the initial nodes are present
at the imported graph.
"""
import tempfile
import shutil
import os
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.folder import FolderData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.remote import RemoteData
from aiida.common.links import LinkType
from aiida.orm.importexport import export, import_data
from aiida.orm.utils import load_node
from aiida.common.exceptions import NotExistent
temp_folder = tempfile.mkdtemp()
try:
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc1.label = "calc1"
calc1.store()
calc1._set_state(u'RETRIEVING')
pd1 = ParameterData()
pd1.label = "pd1"
pd1.store()
pd2 = ParameterData()
pd2.label = "pd2"
pd2.store()
rd1 = RemoteData()
rd1.label = "rd1"
rd1.set_remote_path("/x/y.py")
rd1.set_computer(self.computer)
rd1.store()
rd1.add_link_from(calc1, link_type=LinkType.CREATE)
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc2.label = "calc2"
calc2.store()
calc2.add_link_from(pd1, link_type=LinkType.INPUT)
calc2.add_link_from(pd2, link_type=LinkType.INPUT)
calc2.add_link_from(rd1, link_type=LinkType.INPUT)
calc2._set_state(u'SUBMITTING')
fd1 = FolderData()
fd1.label = "fd1"
fd1.store()
fd1.add_link_from(calc2, link_type=LinkType.CREATE)
node_uuids_labels = {calc1.uuid: calc1.label, pd1.uuid: pd1.label,
pd2.uuid: pd2.label, rd1.uuid: rd1.label,
calc2.uuid: calc2.label, fd1.uuid: fd1.label}
filename = os.path.join(temp_folder, "export.tar.gz")
export([fd1], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True, ignore_unknown_nodes=True)
for uuid, label in node_uuids_labels.items():
try:
load_node(uuid)
except NotExistent:
self.fail("Node with UUID {} and label {} was not "
"found.".format(uuid, label))
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
class TestComputer(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def test_same_computer_import(self):
"""
Test that you can import nodes in steps without any problems. In this
test we will import a first calculation and then a second one. The
import should work as expected and have in the end two job
calculations.
Each calculation is related to the same computer. In the end we should
have only one computer
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Store two job calculation related to the same computer
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
# Store locally the computer name
comp_name = six.text_type(self.computer.name)
comp_uuid = six.text_type(self.computer.uuid)
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Export the second job calculation
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
# Clean the local database
self.clean_db()
# Check that there are no computers
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
# Check that there are no calculations
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
# Import the first calculation
import_data(filename1, silent=True)
# Check that the calculation computer is imported correctly.
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'])
self.assertEqual(qb.count(), 1, "Only one calculation should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), calc1_label,
"The calculation label is not correct.")
# Check that the referenced computer is imported correctly.
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp_name,
"The computer name is not correct.")
self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,
"The computer uuid is not correct.")
# Store the id of the computer
comp_id = qb.first()[2]
# Import the second calculation
import_data(filename2, silent=True)
# Check that the number of computers remains the same and its data
# did not change.
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp_name,
"The computer name is not correct.")
self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,
"The computer uuid is not correct.")
self.assertEqual(qb.first()[2], comp_id,
"The computer id is not correct.")
# Check that now you have two calculations attached to the same
# computer.
qb = QueryBuilder()
qb.append(Computer, tag='comp')
qb.append(JobCalculation, has_computer='comp', project=['label'])
self.assertEqual(qb.count(), 2, "Two calculations should be "
"found.")
ret_labels = set(_ for [_] in qb.all())
self.assertEqual(ret_labels, set([calc1_label, calc2_label]),
"The labels of the calculations are not correct.")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_same_computer_different_name_import(self):
"""
This test checks that if the computer is re-imported with a different
name to the same database, then the original computer will not be
renamed. It also checks that the names were correctly imported (without
any change since there is no computer name collision)
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Store a calculation
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
# Store locally the computer name
comp1_name = six.text_type(self.computer.name)
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Rename the computer
self.computer.set_name(comp1_name + "_updated")
# Store a second calculation
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
# Export the second job calculation
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
# Clean the local database
self.clean_db()
# Check that there are no computers
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
# Check that there are no calculations
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
# Import the first calculation
import_data(filename1, silent=True)
# Check that the calculation computer is imported correctly.
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'])
self.assertEqual(qb.count(), 1, "Only one calculation should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), calc1_label,
"The calculation label is not correct.")
# Check that the referenced computer is imported correctly.
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp1_name,
"The computer name is not correct.")
# Import the second calculation
import_data(filename2, silent=True)
# Check that the number of computers remains the same and its data
# did not change.
qb = QueryBuilder()
qb.append(Computer, project=['name'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp1_name,
"The computer name is not correct.")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_different_computer_same_name_import(self):
"""
This test checks that if there is a name collision, the imported
computers are renamed accordingly.
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import COMP_DUPL_SUFFIX
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Set the computer name
comp1_name = "localhost_1"
self.computer.set_name(comp1_name)
# Store a calculation
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Reset the database
self.clean_db()
self.insert_data()
# Set the computer name to the same name as before
self.computer.set_name(comp1_name)
# Store a second calculation
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
# Export the second job calculation
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
# Reset the database
self.clean_db()
self.insert_data()
# Set the computer name to the same name as before
self.computer.set_name(comp1_name)
# Store a third calculation
calc3_label = "calc3"
calc3 = JobCalculation()
calc3.set_computer(self.computer)
calc3.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc3.label = calc3_label
calc3.store()
calc3._set_state(u'RETRIEVING')
# Export the third job calculation
filename3 = os.path.join(export_file_tmp_folder, "export3.tar.gz")
export([calc3], outfile=filename3, silent=True)
# Clean the local database
self.clean_db()
# Check that there are no computers
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
# Check that there are no calculations
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
# Import all the calculations
import_data(filename1, silent=True)
import_data(filename2, silent=True)
import_data(filename3, silent=True)
# Retrieve the calculation-computer pairs
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'], tag='jcalc')
qb.append(Computer, project=['name'],
computer_of='jcalc')
self.assertEqual(qb.count(), 3, "Three combinations expected.")
res = qb.all()
self.assertIn([calc1_label, comp1_name], res,
"Calc-Computer combination not found.")
self.assertIn([calc2_label,
comp1_name + COMP_DUPL_SUFFIX.format(0)], res,
"Calc-Computer combination not found.")
self.assertIn([calc3_label,
comp1_name + COMP_DUPL_SUFFIX.format(1)], res,
"Calc-Computer combination not found.")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_correct_import_of_computer_json_params(self):
"""
This test checks that the metadata and transport params are
exported and imported correctly in both backends.
"""
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
# Set the computer name
comp1_name = "localhost_1"
comp1_metadata = {
u'workdir': u'/tmp/aiida'
}
comp1_transport_params = {
u'key1': u'value1',
u'key2': 2
}
self.computer.set_name(comp1_name)
self.computer._set_metadata(comp1_metadata)
self.computer.set_transport_params(comp1_transport_params)
# Store a calculation
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
# Export the first job calculation
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
# Clean the local database
self.clean_db()
# Import the data
import_data(filename1, silent=True)
qb = QueryBuilder()
qb.append(Computer, project=['transport_params', '_metadata'],
tag="comp")
self.assertEqual(qb.count(), 1, "Expected only one computer")
res = qb.dict()[0]
self.assertEqual(res['comp']['transport_params'],
comp1_transport_params,
"Not the expected transport parameters "
"were found")
self.assertEqual(res['comp']['_metadata'],
comp1_metadata,
"Not the expected metadata were found")
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_import_of_django_sqla_export_file(self):
"""
Check why sqla import manages to import the django export file correctly
"""
from aiida.backends.tests.utils.fixtures import import_archive_fixture
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
for archive in ['export/compare/django.aiida', 'export/compare/sqlalchemy.aiida']:
# Clean the database
self.clean_db()
# Import the needed data
import_archive_fixture(archive)
# The expected metadata & transport parameters
comp1_metadata = {
u'workdir': u'/tmp/aiida'
}
comp1_transport_params = {
u'key1': u'value1',
u'key2': 2
}
# Check that we got the correct metadata & transport parameters
qb = QueryBuilder()
qb.append(Computer, project=['transport_params', '_metadata'], tag="comp")
self.assertEqual(qb.count(), 1, "Expected only one computer")
res = qb.dict()[0]
self.assertEqual(res['comp']['transport_params'], comp1_transport_params)
self.assertEqual(res['comp']['_metadata'], comp1_metadata)
class TestLinks(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def get_all_node_links(self):
"""
"""
from aiida.orm import load_node, Node
from aiida.orm.querybuilder import QueryBuilder
qb = QueryBuilder()
qb.append(Node, project='uuid', tag='input')
qb.append(Node, project='uuid', tag='output',
edge_project=['label', 'type'], output_of='input')
return qb.all()
def test_input_and_create_links(self):
"""
Simple test that will verify that INPUT and CREATE links are properly exported and
correctly recreated upon import.
"""
import os, shutil, tempfile
from aiida.orm.data.int import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
tmp_folder = tempfile.mkdtemp()
try:
node_work = WorkCalculation().store()
node_input = Int(1).store()
node_output = Int(2).store()
node_work.add_link_from(node_input, 'input', link_type=LinkType.INPUT)
node_output.add_link_from(node_work, 'output', link_type=LinkType.CREATE)
export_links = self.get_all_node_links()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([node_output], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def construct_complex_graph(self, export_combination=0):
"""
This method creates a "complex" graph with all available link types
(INPUT, CREATE, RETURN and CALL) and returns the nodes of the graph. It
also returns various combinations of nodes that need to be extracted
but also the final expected set of nodes (after adding the expected
predecessors, desuccessors).
"""
from aiida.orm.data.base import Int
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
if export_combination < 0 or export_combination > 8:
return None
# Node creation
d1 = Int(1).store()
d2 = Int(1).store()
wc1 = WorkCalculation().store()
wc2 = WorkCalculation().store()
pw1 = JobCalculation()
pw1.set_computer(self.computer)
pw1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
pw1.store()
d3 = Int(1).store()
d4 = Int(1).store()
pw2 = JobCalculation()
pw2.set_computer(self.computer)
pw2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
pw2.store()
d5 = Int(1).store()
d6 = Int(1).store()
# Link creation
wc1.add_link_from(d1, 'input1', link_type=LinkType.INPUT)
wc1.add_link_from(d2, 'input2', link_type=LinkType.INPUT)
wc2.add_link_from(d1, 'input', link_type=LinkType.INPUT)
wc2.add_link_from(wc1, 'call', link_type=LinkType.CALL)
pw1.add_link_from(d1, 'input', link_type=LinkType.INPUT)
pw1.add_link_from(wc2, 'call', link_type=LinkType.CALL)
pw1._set_state(calc_states.PARSING)
d3.add_link_from(pw1, 'create', link_type=LinkType.CREATE)
d3.add_link_from(wc2, 'return', link_type=LinkType.RETURN)
d4.add_link_from(pw1, 'create', link_type=LinkType.CREATE)
d4.add_link_from(wc2, 'return', link_type=LinkType.RETURN)
pw2.add_link_from(d4, 'input', link_type=LinkType.INPUT)
pw2._set_state(calc_states.PARSING)
d5.add_link_from(pw2, 'create', link_type=LinkType.CREATE)
d6.add_link_from(pw2, 'create', link_type=LinkType.CREATE)
# Return the generated nodes
graph_nodes = [d1, d2, d3, d4, d5, d6, pw1, pw2, wc1, wc2]
# Create various combinations of nodes that should be exported
# and the final set of nodes that are exported in each case, following
# predecessor/successor links.
export_list = [
(wc1, [d1, d2, d3, d4, pw1, wc1, wc2]),
(wc2, [d1, d3, d4, pw1, wc2]),
(d3, [d1, d3, d4, pw1]),
(d4, [d1, d3, d4, pw1]),
(d5, [d1, d3, d4, d5, d6, pw1, pw2]),
(d6, [d1, d3, d4, d5, d6, pw1, pw2]),
(pw2, [d1, d3, d4, d5, d6, pw1, pw2]),
(d1, [d1]),
(d2, [d2])
]
return graph_nodes, export_list[export_combination]
def test_data_create_reversed_false(self):
"""Verify that create_reversed = False is respected when only exporting Data nodes."""
import os
import shutil
import tempfile
from aiida.common.datastructures import calc_states
from aiida.orm import Data, Group
from aiida.orm.data.base import Int
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
data_input = Int(1).store()
data_output = Int(2).store()
calc = JobCalculation()
calc.set_computer(self.computer)
calc.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc.store()
calc.add_link_from(data_input, 'input', link_type=LinkType.INPUT)
calc._set_state(calc_states.PARSING)
data_output.add_link_from(calc, 'create', link_type=LinkType.CREATE)
group = Group.create(name='test_group')
group.add_nodes(data_output)
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([group], outfile=export_file, silent=True, create_reversed=False)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
builder = QueryBuilder()
builder.append(Data)
self.assertEqual(builder.count(), 1, 'Expected a single Data node but got {}'.format(builder.count()))
self.assertEqual(builder.all()[0][0].uuid, data_output.uuid)
builder = QueryBuilder()
builder.append(JobCalculation)
self.assertEqual(builder.count(), 0, 'Expected no Calculation nodes')
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_complex_workflow_graph_links(self):
"""
This test checks that all the needed links are correctly exported and
imported. More precisely, it checks that INPUT, CREATE, RETURN and CALL
links connecting Data nodes, JobCalculations and WorkCalculations are
exported and imported correctly.
"""
import os, shutil, tempfile
from aiida.orm import Node
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
graph_nodes, _ = self.construct_complex_graph()
# Getting the input, create, return and call links
qb = QueryBuilder()
qb.append(Node, project='uuid')
qb.append(Node, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'in': (LinkType.INPUT.value,
LinkType.CREATE.value,
LinkType.RETURN.value,
LinkType.CALL.value)}})
export_links = qb.all()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export(graph_nodes, outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_complex_workflow_graph_export_set_expansion(self):
import os, shutil, tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm import Node
for export_conf in range(0, 8):
graph_nodes, (export_node, export_target) = (
self.construct_complex_graph(export_conf))
tmp_folder = tempfile.mkdtemp()
try:
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([export_node], outfile=export_file, silent=True)
export_node_str = str(export_node)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
# Get all the nodes of the database
qb = QueryBuilder()
qb.append(Node, project='uuid')
imported_node_uuids = set(str(_[0]) for _ in qb.all())
export_target_uuids = set(str(_.uuid) for _ in export_target)
from aiida.orm.utils import load_node
self.assertEquals(
export_target_uuids,
imported_node_uuids,
"Problem in comparison of export node: " +
str(export_node_str) + "\n" +
"Expected set: " + str(export_target_uuids) + "\n" +
"Imported set: " + str(imported_node_uuids) + "\n" +
"Difference: " + str([load_node(_) for _ in
export_target_uuids.symmetric_difference(
imported_node_uuids)])
)
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_recursive_export_input_and_create_links_proper(self):
"""
Check that CALL, INPUT, RETURN and CREATE links are followed
recursively.
"""
import os, shutil, tempfile
from aiida.orm import Node
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.inline import InlineCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
wc2 = WorkCalculation().store()
wc1 = WorkCalculation().store()
c1 = InlineCalculation().store()
ni1 = Int(1).store()
ni2 = Int(2).store()
no1 = Int(1).store()
no2 = Int(2).store()
# Create the connections between workcalculations and calculations
wc1.add_link_from(wc2, 'call', link_type=LinkType.CALL)
c1.add_link_from(wc1, 'call', link_type=LinkType.CALL)
# Connect the first data node to wc1 & c1
wc1.add_link_from(ni1, 'ni1-to-wc1',
link_type=LinkType.INPUT)
c1.add_link_from(ni1, 'ni1-to-c1',
link_type=LinkType.INPUT)
# Connect the second data node to wc1 & c1
wc1.add_link_from(ni2, 'ni2-to-wc1',
link_type=LinkType.INPUT)
c1.add_link_from(ni2, 'ni2-to-c1',
link_type=LinkType.INPUT)
# Connecting the first output node to wc1 & c1
no1.add_link_from(wc1, 'output',
link_type=LinkType.RETURN)
no1.add_link_from(c1, 'output',
link_type=LinkType.CREATE)
# Connecting the second output node to wc1 & c1
no2.add_link_from(wc1, 'output',
link_type=LinkType.RETURN)
no2.add_link_from(c1, 'output',
link_type=LinkType.CREATE)
# Getting the input, create, return and call links
qb = QueryBuilder()
qb.append(Node, project='uuid')
qb.append(Node, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'in': (LinkType.INPUT.value,
LinkType.CREATE.value,
LinkType.RETURN.value,
LinkType.CALL.value)}})
export_links = qb.all()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([wc2], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_links_for_workflows(self):
"""
Check that CALL links are not followed in the export procedure, and the only creation
is followed for data::
____ ____ ____
| | INP | | CALL | |
| i1 | --> | w1 | <--- | w2 |
|____| |____| |____|
| |
CREATE v v RETURN
____
| |
| o1 |
|____|
"""
import os, shutil, tempfile
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
tmp_folder = tempfile.mkdtemp()
try:
w1 = WorkCalculation().store()
w2 = WorkCalculation().store()
i1 = Int(1).store()
o1 = Int(2).store()
w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)
w1.add_link_from(w2, 'call', link_type=LinkType.CALL)
o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)
o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)
links_wanted = [l for l in self.get_all_node_links() if l[3] in
(LinkType.CREATE.value,
LinkType.INPUT.value,
LinkType.RETURN.value)]
export_file_1 = os.path.join(tmp_folder, 'export-1.tar.gz')
export_file_2 = os.path.join(tmp_folder, 'export-2.tar.gz')
export([o1], outfile=export_file_1, silent=True)
export([w1], outfile=export_file_2, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file_1, silent=True)
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
self.clean_db()
self.insert_data()
import_data(export_file_2, silent=True)
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_double_return_links_for_workflows(self):
"""
This test checks that double return links to a node can be exported
and imported without problems,
"""
import os, shutil, tempfile
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.node import Node
tmp_folder = tempfile.mkdtemp()
try:
w1 = WorkCalculation().store()
w2 = WorkCalculation().store()
i1 = Int(1).store()
o1 = Int(2).store()
w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)
w1.add_link_from(w2, 'call', link_type=LinkType.CALL)
o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)
o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)
o1.add_link_from(w2, 'return', link_type=LinkType.RETURN)
uuids_wanted = set(_.uuid for _ in (w1, o1, i1, w2))
links_wanted = [l for l in self.get_all_node_links() if l[3] in (
'createlink', 'inputlink', 'returnlink', 'calllink')]
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([o1, w1, w2, i1],
outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
uuids_in_db = [str(uuid) for [uuid] in
QueryBuilder().append(Node, project='uuid').all()]
self.assertEquals(sorted(uuids_wanted), sorted(uuids_in_db))
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_that_solo_code_is_exported_correctly(self):
"""
This test checks that when a calculation is exported then the
corresponding code is also exported.
"""
import os, shutil, tempfile
from aiida.orm.utils import load_node
from aiida.orm.importexport import export
from aiida.orm.code import Code
tmp_folder = tempfile.mkdtemp()
try:
code_label = 'test_code1'
code = Code()
code.set_remote_computer_exec((self.computer, '/bin/true'))
code.label = code_label
code.store()
code_uuid = code.uuid
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([code], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
self.assertEquals(load_node(code_uuid).label, code_label)
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_that_input_code_is_exported_correctly(self):
"""
This test checks that when a calculation is exported then the
corresponding code is also exported. It also checks that the links
are also in place after the import.
"""
import os, shutil, tempfile
from aiida.orm.utils import load_node
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.code import Code
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
code_label = 'test_code1'
code = Code()
code.set_remote_computer_exec((self.computer, '/bin/true'))
code.label = code_label
code.store()
code_uuid = code.uuid
jc = JobCalculation()
jc.set_computer(self.computer)
jc.set_option('resources',
{"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc.store()
jc.add_link_from(code, 'code', link_type=LinkType.INPUT)
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([jc], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
# Check that the node is there
self.assertEquals(load_node(code_uuid).label, code_label)
# Check that the link is in place
qb = QueryBuilder()
qb.append(Code, project='uuid')
qb.append(JobCalculation, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'==': LinkType.INPUT.value}})
self.assertEquals(qb.count(), 1,
"Expected to find one and only one link from "
"code to the calculation node. {} found."
.format(qb.count()))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
| 39.257061
| 114
| 0.574005
|
t tempfile
from aiida.orm import load_node
from aiida.orm.data.base import Str, Int, Float, Bool
from aiida.orm.importexport import export
temp_folder = tempfile.mkdtemp()
try:
values = ("Hello", 6, -1.2399834e12, False)
filename = os.path.join(temp_folder, "export.tar.gz")
nodes = [cls(val).store() for val, cls in zip(values, (Str, Int, Float, Bool))]
uuids = [n.uuid for n in nodes]
export(nodes, outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True)
for uuid, refval in zip(uuids, values):
self.assertEquals(load_node(uuid).value, refval)
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
def test_1(self):
import os
import shutil
import tempfile
from aiida.orm import DataFactory
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import export
temp_folder = tempfile.mkdtemp()
try:
StructureData = DataFactory('structure')
sd = StructureData()
sd.store()
calc = JobCalculation()
calc.set_computer(self.computer)
calc.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc.store()
calc.add_link_from(sd)
pks = [sd.pk, calc.pk]
attrs = {}
for pk in pks:
node = load_node(pk)
attrs[node.uuid] = dict()
for k in node.attrs():
attrs[node.uuid][k] = node.get_attr(k)
filename = os.path.join(temp_folder, "export.tar.gz")
export([calc], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True)
for uuid in attrs.keys():
node = load_node(uuid)
# for k in node.attrs():
for k in attrs[uuid].keys():
self.assertEquals(attrs[uuid][k], node.get_attr(k))
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
# print temp_folder
def test_2(self):
import tarfile
import os
import shutil
import tempfile
from aiida.common import exceptions
from aiida.orm import DataFactory
from aiida.orm.importexport import export
import aiida.utils.json as json
# Creating a folder for the import/export files
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
StructureData = DataFactory('structure')
sd = StructureData()
sd.store()
filename = os.path.join(export_file_tmp_folder, "export.tar.gz")
export([sd], outfile=filename, silent=True)
with tarfile.open(filename, "r:gz", format=tarfile.PAX_FORMAT) as tar:
tar.extractall(unpack_tmp_folder)
with io.open(os.path.join(unpack_tmp_folder,
'metadata.json'), 'r', encoding='utf8') as fhandle:
metadata = json.load(fhandle)
metadata['export_version'] = 0.0
with io.open(os.path.join(unpack_tmp_folder, 'metadata.json'),
'wb') as fhandle:
json.dump(metadata, fhandle)
with tarfile.open(filename, "w:gz", format=tarfile.PAX_FORMAT) as tar:
tar.add(unpack_tmp_folder, arcname="")
self.tearDownClass()
self.setUpClass()
with self.assertRaises(exceptions.IncompatibleArchiveVersionError):
import_data(filename, silent=True)
finally:
# Deleting the created temporary folders
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_3(self):
import tarfile
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.common.folders import SandboxFolder
from aiida.orm.data.structure import StructureData
from aiida.orm import load_node
import aiida.utils.json as json
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
node_label = "Test structure data"
sd = StructureData()
sd.label = str(node_label)
sd.store()
filename = os.path.join(temp_folder, "export.tar.gz")
export([sd], outfile=filename, silent=True)
unpack = SandboxFolder()
with tarfile.open(
filename, "r:gz", format=tarfile.PAX_FORMAT) as tar:
tar.extractall(unpack.abspath)
with io.open(unpack.get_abs_path('data.json'), 'r', encoding='utf8') as fhandle:
metadata = json.load(fhandle)
metadata['links_uuid'].append({
'output': sd.uuid,
'input': 'non-existing-uuid',
'label': 'parent'
})
with io.open(unpack.get_abs_path('data.json'), 'wb') as fhandle:
json.dump(metadata, fhandle)
with tarfile.open(
filename, "w:gz", format=tarfile.PAX_FORMAT) as tar:
tar.add(unpack.abspath, arcname="")
self.clean_db()
with self.assertRaises(ValueError):
import_data(filename, silent=True)
import_data(filename, ignore_unknown_nodes=True, silent=True)
self.assertEquals(load_node(sd.uuid).label, node_label)
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_4(self):
from aiida.common.exceptions import LicensingException
from aiida.common.folders import SandboxFolder
from aiida.orm.importexport import export_tree
from aiida.orm import DataFactory
StructureData = DataFactory('structure')
sd = StructureData()
sd.source = {'license': 'GPL'}
sd.store()
folder = SandboxFolder()
export_tree([sd], folder=folder, silent=True,
allowed_licenses=['GPL'])
# Folder should contain two files of metadata + nodes/
self.assertEquals(len(folder.get_content_list()), 3)
folder = SandboxFolder()
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=['Academic'])
# Folder should contain two files of metadata + nodes/
self.assertEquals(len(folder.get_content_list()), 3)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=['CC0'])
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=['GPL'])
def cc_filter(license):
return license.startswith('CC')
def gpl_filter(license):
return license == 'GPL'
def crashing_filter(license):
raise NotImplementedError("not implemented yet")
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=cc_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=gpl_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
allowed_licenses=crashing_filter)
folder = SandboxFolder()
with self.assertRaises(LicensingException):
export_tree([sd], folder=folder, silent=True,
forbidden_licenses=crashing_filter)
def test_5(self):
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
from aiida.common.utils import get_configured_user_email
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend).store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create some nodes from a different user
sd2 = StructureData()
sd2.set_user(user)
sd2.label = 'sd2'
sd2.store()
sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE) # I assume jc1 CREATED sd2
jc2 = JobCalculation()
jc2.set_computer(self.computer)
jc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc2.label = 'jc2'
jc2.store()
jc2.add_link_from(sd2, label='l2')
jc2._set_state(calc_states.PARSING)
sd3 = StructureData()
sd3.label = 'sd3'
sd3.store()
sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)
uuids_u1 = [sd1.uuid, jc1.uuid, sd2.uuid]
uuids_u2 = [jc2.uuid, sd3.uuid]
filename = os.path.join(temp_folder, "export.tar.gz")
export([sd3], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids_u1:
node = load_node(uuid=uuid)
self.assertEquals(node.get_user().email, new_email)
for uuid in uuids_u2:
self.assertEquals(load_node(uuid).get_user().email,
get_configured_user_email())
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_6(self):
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
from aiida.common.utils import get_configured_user_email
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend).store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create some nodes from a different user
sd2 = StructureData()
sd2.set_user(user)
sd2.label = 'sd2'
sd2.store()
sd2.add_link_from(jc1, label='l1', link_type=LinkType.CREATE)
# Set the jc1 to FINISHED
jc1._set_state(calc_states.FINISHED)
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([sd2], outfile=filename1, silent=True)
uuids1 = [sd1.uuid, jc1.uuid, sd2.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids1:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Now we continue to generate more data based on the imported
# data
sd2_imp = load_node(sd2.uuid)
jc2 = JobCalculation()
jc2.set_computer(self.computer)
jc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc2.label = 'jc2'
jc2.store()
jc2.add_link_from(sd2_imp, label='l2')
jc2._set_state(calc_states.PARSING)
sd3 = StructureData()
sd3.label = 'sd3'
sd3.store()
sd3.add_link_from(jc2, label='l3', link_type=LinkType.CREATE)
# Set the jc2 to FINISHED
jc2._set_state(calc_states.FINISHED)
# Store the UUIDs of the nodes that should be checked
# if they can be imported correctly.
uuids2 = [jc2.uuid, sd3.uuid]
filename2 = os.path.join(temp_folder, "export2.tar.gz")
export([sd3], outfile=filename2, silent=True)
self.clean_db()
self.insert_data()
import_data(filename2, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in uuids1:
self.assertEquals(load_node(uuid).get_user().email, new_email)
for uuid in uuids2:
self.assertEquals(load_node(uuid).get_user().email,
get_configured_user_email())
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_7(self):
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.common.datastructures import calc_states
from aiida.orm.querybuilder import QueryBuilder
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend)
user.store()
# Create a structure data node that has a calculation as output
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
jc1 = JobCalculation()
jc1.set_computer(self.computer)
jc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc1.set_user(user)
jc1.label = 'jc1'
jc1.store()
jc1.add_link_from(sd1)
jc1._set_state(calc_states.PARSING)
# Create a group and add the data inside
from aiida.orm.group import Group
g1 = Group(name="node_group")
g1.store()
g1.add_nodes([sd1, jc1])
g1_uuid = g1.uuid
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([sd1, jc1, g1], outfile=filename1,
silent=True)
n_uuids = [sd1.uuid, jc1.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in n_uuids:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Check that the exported group is imported correctly
qb = QueryBuilder()
qb.append(Group, filters={'uuid': {'==': g1_uuid}})
self.assertEquals(qb.count(), 1, "The group was not found.")
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_group_export(self):
import os
import shutil
import tempfile
from aiida.orm import load_node
from aiida.orm.data.structure import StructureData
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
try:
# Create another user
new_email = "newuser@new.n"
user = orm.User(email=new_email, backend=self.backend)
user.store()
# Create a structure data node
sd1 = StructureData()
sd1.set_user(user)
sd1.label = 'sd1'
sd1.store()
# Create a group and add the data inside
from aiida.orm.group import Group
g1 = Group(name="node_group")
g1.store()
g1.add_nodes([sd1])
g1_uuid = g1.uuid
# At this point we export the generated data
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([g1], outfile=filename1, silent=True)
n_uuids = [sd1.uuid]
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
# Check that the imported nodes are correctly imported and that
# the user assigned to the nodes is the right one
for uuid in n_uuids:
self.assertEquals(load_node(uuid).get_user().email, new_email)
# Check that the exported group is imported correctly
qb = QueryBuilder()
qb.append(Group, filters={'uuid': {'==': g1_uuid}})
self.assertEquals(qb.count(), 1, "The group was not found.")
finally:
# Deleting the created temporary folder
shutil.rmtree(temp_folder, ignore_errors=True)
def test_workfunction_1(self):
import shutil, os, tempfile
from aiida.work.workfunctions import workfunction
from aiida.orm.data.float import Float
from aiida.orm import load_node
from aiida.orm.importexport import export
from aiida.common.exceptions import NotExistent
# Creating a folder for the import/export files
temp_folder = tempfile.mkdtemp()
@workfunction
def add(a, b):
return {'res': Float(a + b)}
def max_(**kwargs):
max_val = max([(v.value, v) for v in kwargs.values()])
return {'res': max_val[1]}
try:
# I'm creating a bunch of nuimbers
a, b, c, d, e = (Float(i) for i in range(5))
res = add(a=a, b=max_(b=b, c=c, d=d, e=e)['res'])['res']
uuids_values = [(a.uuid, a.value), (e.uuid, e.value), (res.uuid, res.value)]
not_wanted_uuids = [v.uuid for v in (b, c, d)]
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([res], outfile=filename1, silent=True)
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
for uuid, value in uuids_values:
self.assertEquals(load_node(uuid).value, value)
for uuid in not_wanted_uuids:
with self.assertRaises(NotExistent):
load_node(uuid)
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
def test_workcalculation_2(self):
import shutil, os, tempfile
from aiida.orm.calculation.work import WorkCalculation
from aiida.orm.data.float import Float
from aiida.orm.data.int import Int
from aiida.orm import load_node
from aiida.common.links import LinkType
from aiida.orm.importexport import export
from aiida.common.exceptions import NotExistent
temp_folder = tempfile.mkdtemp()
try:
master = WorkCalculation().store()
slave = WorkCalculation().store()
input_1 = Int(3).store()
input_2 = Int(5).store()
output_1 = Int(2).store()
master.add_link_from(input_1, 'input_1', link_type=LinkType.INPUT)
slave.add_link_from(master, 'CALL', link_type=LinkType.CALL)
slave.add_link_from(input_2, 'input_2', link_type=LinkType.INPUT)
output_1.add_link_from(master, 'CREATE', link_type=LinkType.CREATE)
uuids_values = [(v.uuid, v.value) for v in (output_1,)]
filename1 = os.path.join(temp_folder, "export1.tar.gz")
export([output_1], outfile=filename1, silent=True)
self.clean_db()
self.insert_data()
import_data(filename1, silent=True)
for uuid, value in uuids_values:
self.assertEquals(load_node(uuid).value, value)
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
def test_reexport(self):
import os, shutil, tempfile, numpy as np, string, random
from datetime import datetime
from aiida.orm import Calculation, load_node, Group
from aiida.orm.data.array import ArrayData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.importexport import export
from aiida.common.hashing import make_hash
from aiida.common.links import LinkType
def get_hash_from_db_content(groupname):
qb = QueryBuilder()
qb.append(ParameterData, tag='p', project='*')
qb.append(Calculation, tag='c', project='*', edge_tag='p2c', edge_project=('label', 'type'))
qb.append(ArrayData, tag='a', project='*', edge_tag='c2a', edge_project=('label', 'type'))
qb.append(Group, filters={'name': groupname}, project='*', tag='g', group_of='a')
self.assertTrue(qb.count() > 0)
hash_ = make_hash([(
item['p']['*'].get_attrs(),
item['p']['*'].uuid,
item['p']['*'].label,
item['p']['*'].description,
item['c']['*'].uuid,
item['c']['*'].get_attrs(),
item['a']['*'].get_attrs(),
[item['a']['*'].get_array(name) for name in item['a']['*'].get_arraynames()],
item['a']['*'].uuid,
item['g']['*'].uuid,
item['g']['*'].name,
item['p2c']['label'],
item['p2c']['type'],
item['c2a']['label'],
item['c2a']['type'],
item['g']['*'].name,
) for item in qb.dict()])
return hash_
temp_folder = tempfile.mkdtemp()
chars = string.ascii_uppercase + string.digits
size = 10
groupname = 'test-group'
try:
nparr = np.random.random((4, 3, 2))
trial_dict = {}
trial_dict.update({str(k): np.random.randint(100) for k in range(10)})
trial_dict.update({str(k): np.random.random() for k in range(10, 20)})
trial_dict.update({str(k): bool(np.random.randint(1)) for k in range(20, 30)})
trial_dict.update({str(k): datetime(
year=2017,
month=np.random.randint(1, 12),
day=np.random.randint(1, 28)) for k in range(30, 40)})
trial_dict.update({str(k): ''.join(random.choice(chars) for _ in range(size)) for k in range(20, 30)})
p = ParameterData(dict=trial_dict)
p.label = str(datetime.now())
p.description = 'd_' + str(datetime.now())
p.store()
c = Calculation()
(c._set_attr(str(int(k) + np.random.randint(10)), v) for k, v in trial_dict.items())
c.store()
a = ArrayData()
a.set_array('array', nparr)
a.store()
c.add_link_from(p, label='input_parameters', link_type=LinkType.INPUT)
a.add_link_from(c, label='output_array', link_type=LinkType.CREATE)
g = Group(name='test-group')
g.store()
g.add_nodes(a)
hash_from_dbcontent = get_hash_from_db_content(groupname)
for i in range(3):
filename = os.path.join(temp_folder, "export-{}.zip".format(i))
g = Group.get_from_string(groupname)
export([g] + [n for n in g.nodes], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True, ignore_unknown_nodes=True)
new_hash = get_hash_from_db_content(groupname)
self.assertEqual(hash_from_dbcontent, new_hash)
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
class TestComplex(AiidaTestCase):
def test_complex_graph_import_export(self):
import tempfile
import shutil
import os
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.folder import FolderData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.remote import RemoteData
from aiida.common.links import LinkType
from aiida.orm.importexport import export, import_data
from aiida.orm.utils import load_node
from aiida.common.exceptions import NotExistent
temp_folder = tempfile.mkdtemp()
try:
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc1.label = "calc1"
calc1.store()
calc1._set_state(u'RETRIEVING')
pd1 = ParameterData()
pd1.label = "pd1"
pd1.store()
pd2 = ParameterData()
pd2.label = "pd2"
pd2.store()
rd1 = RemoteData()
rd1.label = "rd1"
rd1.set_remote_path("/x/y.py")
rd1.set_computer(self.computer)
rd1.store()
rd1.add_link_from(calc1, link_type=LinkType.CREATE)
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc2.label = "calc2"
calc2.store()
calc2.add_link_from(pd1, link_type=LinkType.INPUT)
calc2.add_link_from(pd2, link_type=LinkType.INPUT)
calc2.add_link_from(rd1, link_type=LinkType.INPUT)
calc2._set_state(u'SUBMITTING')
fd1 = FolderData()
fd1.label = "fd1"
fd1.store()
fd1.add_link_from(calc2, link_type=LinkType.CREATE)
node_uuids_labels = {calc1.uuid: calc1.label, pd1.uuid: pd1.label,
pd2.uuid: pd2.label, rd1.uuid: rd1.label,
calc2.uuid: calc2.label, fd1.uuid: fd1.label}
filename = os.path.join(temp_folder, "export.tar.gz")
export([fd1], outfile=filename, silent=True)
self.clean_db()
import_data(filename, silent=True, ignore_unknown_nodes=True)
for uuid, label in node_uuids_labels.items():
try:
load_node(uuid)
except NotExistent:
self.fail("Node with UUID {} and label {} was not "
"found.".format(uuid, label))
finally:
shutil.rmtree(temp_folder, ignore_errors=True)
class TestComputer(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def test_same_computer_import(self):
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
comp_name = six.text_type(self.computer.name)
comp_uuid = six.text_type(self.computer.uuid)
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
self.clean_db()
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
import_data(filename1, silent=True)
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'])
self.assertEqual(qb.count(), 1, "Only one calculation should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), calc1_label,
"The calculation label is not correct.")
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp_name,
"The computer name is not correct.")
self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,
"The computer uuid is not correct.")
comp_id = qb.first()[2]
import_data(filename2, silent=True)
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp_name,
"The computer name is not correct.")
self.assertEqual(six.text_type(qb.first()[1]), comp_uuid,
"The computer uuid is not correct.")
self.assertEqual(qb.first()[2], comp_id,
"The computer id is not correct.")
qb = QueryBuilder()
qb.append(Computer, tag='comp')
qb.append(JobCalculation, has_computer='comp', project=['label'])
self.assertEqual(qb.count(), 2, "Two calculations should be "
"found.")
ret_labels = set(_ for [_] in qb.all())
self.assertEqual(ret_labels, set([calc1_label, calc2_label]),
"The labels of the calculations are not correct.")
finally:
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_same_computer_different_name_import(self):
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
comp1_name = six.text_type(self.computer.name)
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
self.computer.set_name(comp1_name + "_updated")
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
self.clean_db()
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
import_data(filename1, silent=True)
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'])
self.assertEqual(qb.count(), 1, "Only one calculation should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), calc1_label,
"The calculation label is not correct.")
qb = QueryBuilder()
qb.append(Computer, project=['name', 'uuid', 'id'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp1_name,
"The computer name is not correct.")
import_data(filename2, silent=True)
qb = QueryBuilder()
qb.append(Computer, project=['name'])
self.assertEqual(qb.count(), 1, "Only one computer should be "
"found.")
self.assertEqual(six.text_type(qb.first()[0]), comp1_name,
"The computer name is not correct.")
finally:
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_different_computer_same_name_import(self):
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import COMP_DUPL_SUFFIX
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
comp1_name = "localhost_1"
self.computer.set_name(comp1_name)
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
self.clean_db()
self.insert_data()
self.computer.set_name(comp1_name)
calc2_label = "calc2"
calc2 = JobCalculation()
calc2.set_computer(self.computer)
calc2.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc2.label = calc2_label
calc2.store()
calc2._set_state(u'RETRIEVING')
filename2 = os.path.join(export_file_tmp_folder, "export2.tar.gz")
export([calc2], outfile=filename2, silent=True)
self.clean_db()
self.insert_data()
self.computer.set_name(comp1_name)
calc3_label = "calc3"
calc3 = JobCalculation()
calc3.set_computer(self.computer)
calc3.set_option('resources', {"num_machines": 2,
"num_mpiprocs_per_machine": 2})
calc3.label = calc3_label
calc3.store()
calc3._set_state(u'RETRIEVING')
filename3 = os.path.join(export_file_tmp_folder, "export3.tar.gz")
export([calc3], outfile=filename3, silent=True)
self.clean_db()
qb = QueryBuilder()
qb.append(Computer, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any computers"
"in the database at this point.")
qb = QueryBuilder()
qb.append(JobCalculation, project=['*'])
self.assertEqual(qb.count(), 0, "There should not be any "
"calculations in the database at "
"this point.")
import_data(filename1, silent=True)
import_data(filename2, silent=True)
import_data(filename3, silent=True)
qb = QueryBuilder()
qb.append(JobCalculation, project=['label'], tag='jcalc')
qb.append(Computer, project=['name'],
computer_of='jcalc')
self.assertEqual(qb.count(), 3, "Three combinations expected.")
res = qb.all()
self.assertIn([calc1_label, comp1_name], res,
"Calc-Computer combination not found.")
self.assertIn([calc2_label,
comp1_name + COMP_DUPL_SUFFIX.format(0)], res,
"Calc-Computer combination not found.")
self.assertIn([calc3_label,
comp1_name + COMP_DUPL_SUFFIX.format(1)], res,
"Calc-Computer combination not found.")
finally:
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_correct_import_of_computer_json_params(self):
import os
import shutil
import tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
from aiida.orm.calculation.job import JobCalculation
export_file_tmp_folder = tempfile.mkdtemp()
unpack_tmp_folder = tempfile.mkdtemp()
try:
comp1_name = "localhost_1"
comp1_metadata = {
u'workdir': u'/tmp/aiida'
}
comp1_transport_params = {
u'key1': u'value1',
u'key2': 2
}
self.computer.set_name(comp1_name)
self.computer._set_metadata(comp1_metadata)
self.computer.set_transport_params(comp1_transport_params)
calc1_label = "calc1"
calc1 = JobCalculation()
calc1.set_computer(self.computer)
calc1.set_option('resources', {"num_machines": 1,
"num_mpiprocs_per_machine": 1})
calc1.label = calc1_label
calc1.store()
calc1._set_state(u'RETRIEVING')
filename1 = os.path.join(export_file_tmp_folder, "export1.tar.gz")
export([calc1], outfile=filename1, silent=True)
self.clean_db()
import_data(filename1, silent=True)
qb = QueryBuilder()
qb.append(Computer, project=['transport_params', '_metadata'],
tag="comp")
self.assertEqual(qb.count(), 1, "Expected only one computer")
res = qb.dict()[0]
self.assertEqual(res['comp']['transport_params'],
comp1_transport_params,
"Not the expected transport parameters "
"were found")
self.assertEqual(res['comp']['_metadata'],
comp1_metadata,
"Not the expected metadata were found")
finally:
shutil.rmtree(export_file_tmp_folder, ignore_errors=True)
shutil.rmtree(unpack_tmp_folder, ignore_errors=True)
def test_import_of_django_sqla_export_file(self):
from aiida.backends.tests.utils.fixtures import import_archive_fixture
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.computers import Computer
for archive in ['export/compare/django.aiida', 'export/compare/sqlalchemy.aiida']:
self.clean_db()
import_archive_fixture(archive)
comp1_metadata = {
u'workdir': u'/tmp/aiida'
}
comp1_transport_params = {
u'key1': u'value1',
u'key2': 2
}
qb = QueryBuilder()
qb.append(Computer, project=['transport_params', '_metadata'], tag="comp")
self.assertEqual(qb.count(), 1, "Expected only one computer")
res = qb.dict()[0]
self.assertEqual(res['comp']['transport_params'], comp1_transport_params)
self.assertEqual(res['comp']['_metadata'], comp1_metadata)
class TestLinks(AiidaTestCase):
def setUp(self):
self.clean_db()
self.insert_data()
def tearDown(self):
pass
def get_all_node_links(self):
from aiida.orm import load_node, Node
from aiida.orm.querybuilder import QueryBuilder
qb = QueryBuilder()
qb.append(Node, project='uuid', tag='input')
qb.append(Node, project='uuid', tag='output',
edge_project=['label', 'type'], output_of='input')
return qb.all()
def test_input_and_create_links(self):
import os, shutil, tempfile
from aiida.orm.data.int import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
tmp_folder = tempfile.mkdtemp()
try:
node_work = WorkCalculation().store()
node_input = Int(1).store()
node_output = Int(2).store()
node_work.add_link_from(node_input, 'input', link_type=LinkType.INPUT)
node_output.add_link_from(node_work, 'output', link_type=LinkType.CREATE)
export_links = self.get_all_node_links()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([node_output], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def construct_complex_graph(self, export_combination=0):
from aiida.orm.data.base import Int
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.datastructures import calc_states
from aiida.common.links import LinkType
if export_combination < 0 or export_combination > 8:
return None
d1 = Int(1).store()
d2 = Int(1).store()
wc1 = WorkCalculation().store()
wc2 = WorkCalculation().store()
pw1 = JobCalculation()
pw1.set_computer(self.computer)
pw1.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
pw1.store()
d3 = Int(1).store()
d4 = Int(1).store()
pw2 = JobCalculation()
pw2.set_computer(self.computer)
pw2.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
pw2.store()
d5 = Int(1).store()
d6 = Int(1).store()
wc1.add_link_from(d1, 'input1', link_type=LinkType.INPUT)
wc1.add_link_from(d2, 'input2', link_type=LinkType.INPUT)
wc2.add_link_from(d1, 'input', link_type=LinkType.INPUT)
wc2.add_link_from(wc1, 'call', link_type=LinkType.CALL)
pw1.add_link_from(d1, 'input', link_type=LinkType.INPUT)
pw1.add_link_from(wc2, 'call', link_type=LinkType.CALL)
pw1._set_state(calc_states.PARSING)
d3.add_link_from(pw1, 'create', link_type=LinkType.CREATE)
d3.add_link_from(wc2, 'return', link_type=LinkType.RETURN)
d4.add_link_from(pw1, 'create', link_type=LinkType.CREATE)
d4.add_link_from(wc2, 'return', link_type=LinkType.RETURN)
pw2.add_link_from(d4, 'input', link_type=LinkType.INPUT)
pw2._set_state(calc_states.PARSING)
d5.add_link_from(pw2, 'create', link_type=LinkType.CREATE)
d6.add_link_from(pw2, 'create', link_type=LinkType.CREATE)
graph_nodes = [d1, d2, d3, d4, d5, d6, pw1, pw2, wc1, wc2]
export_list = [
(wc1, [d1, d2, d3, d4, pw1, wc1, wc2]),
(wc2, [d1, d3, d4, pw1, wc2]),
(d3, [d1, d3, d4, pw1]),
(d4, [d1, d3, d4, pw1]),
(d5, [d1, d3, d4, d5, d6, pw1, pw2]),
(d6, [d1, d3, d4, d5, d6, pw1, pw2]),
(pw2, [d1, d3, d4, d5, d6, pw1, pw2]),
(d1, [d1]),
(d2, [d2])
]
return graph_nodes, export_list[export_combination]
def test_data_create_reversed_false(self):
import os
import shutil
import tempfile
from aiida.common.datastructures import calc_states
from aiida.orm import Data, Group
from aiida.orm.data.base import Int
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
data_input = Int(1).store()
data_output = Int(2).store()
calc = JobCalculation()
calc.set_computer(self.computer)
calc.set_option('resources', {"num_machines": 1, "num_mpiprocs_per_machine": 1})
calc.store()
calc.add_link_from(data_input, 'input', link_type=LinkType.INPUT)
calc._set_state(calc_states.PARSING)
data_output.add_link_from(calc, 'create', link_type=LinkType.CREATE)
group = Group.create(name='test_group')
group.add_nodes(data_output)
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([group], outfile=export_file, silent=True, create_reversed=False)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
builder = QueryBuilder()
builder.append(Data)
self.assertEqual(builder.count(), 1, 'Expected a single Data node but got {}'.format(builder.count()))
self.assertEqual(builder.all()[0][0].uuid, data_output.uuid)
builder = QueryBuilder()
builder.append(JobCalculation)
self.assertEqual(builder.count(), 0, 'Expected no Calculation nodes')
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_complex_workflow_graph_links(self):
import os, shutil, tempfile
from aiida.orm import Node
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
graph_nodes, _ = self.construct_complex_graph()
qb = QueryBuilder()
qb.append(Node, project='uuid')
qb.append(Node, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'in': (LinkType.INPUT.value,
LinkType.CREATE.value,
LinkType.RETURN.value,
LinkType.CALL.value)}})
export_links = qb.all()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export(graph_nodes, outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_complex_workflow_graph_export_set_expansion(self):
import os, shutil, tempfile
from aiida.orm.importexport import export
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm import Node
for export_conf in range(0, 8):
graph_nodes, (export_node, export_target) = (
self.construct_complex_graph(export_conf))
tmp_folder = tempfile.mkdtemp()
try:
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([export_node], outfile=export_file, silent=True)
export_node_str = str(export_node)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
qb = QueryBuilder()
qb.append(Node, project='uuid')
imported_node_uuids = set(str(_[0]) for _ in qb.all())
export_target_uuids = set(str(_.uuid) for _ in export_target)
from aiida.orm.utils import load_node
self.assertEquals(
export_target_uuids,
imported_node_uuids,
"Problem in comparison of export node: " +
str(export_node_str) + "\n" +
"Expected set: " + str(export_target_uuids) + "\n" +
"Imported set: " + str(imported_node_uuids) + "\n" +
"Difference: " + str([load_node(_) for _ in
export_target_uuids.symmetric_difference(
imported_node_uuids)])
)
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_recursive_export_input_and_create_links_proper(self):
import os, shutil, tempfile
from aiida.orm import Node
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.inline import InlineCalculation
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
wc2 = WorkCalculation().store()
wc1 = WorkCalculation().store()
c1 = InlineCalculation().store()
ni1 = Int(1).store()
ni2 = Int(2).store()
no1 = Int(1).store()
no2 = Int(2).store()
wc1.add_link_from(wc2, 'call', link_type=LinkType.CALL)
c1.add_link_from(wc1, 'call', link_type=LinkType.CALL)
wc1.add_link_from(ni1, 'ni1-to-wc1',
link_type=LinkType.INPUT)
c1.add_link_from(ni1, 'ni1-to-c1',
link_type=LinkType.INPUT)
wc1.add_link_from(ni2, 'ni2-to-wc1',
link_type=LinkType.INPUT)
c1.add_link_from(ni2, 'ni2-to-c1',
link_type=LinkType.INPUT)
no1.add_link_from(wc1, 'output',
link_type=LinkType.RETURN)
no1.add_link_from(c1, 'output',
link_type=LinkType.CREATE)
no2.add_link_from(wc1, 'output',
link_type=LinkType.RETURN)
no2.add_link_from(c1, 'output',
link_type=LinkType.CREATE)
qb = QueryBuilder()
qb.append(Node, project='uuid')
qb.append(Node, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'in': (LinkType.INPUT.value,
LinkType.CREATE.value,
LinkType.RETURN.value,
LinkType.CALL.value)}})
export_links = qb.all()
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([wc2], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
import_links = self.get_all_node_links()
export_set = [tuple(_) for _ in export_links]
import_set = [tuple(_) for _ in import_links]
self.assertEquals(set(export_set), set(import_set))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_links_for_workflows(self):
import os, shutil, tempfile
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
tmp_folder = tempfile.mkdtemp()
try:
w1 = WorkCalculation().store()
w2 = WorkCalculation().store()
i1 = Int(1).store()
o1 = Int(2).store()
w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)
w1.add_link_from(w2, 'call', link_type=LinkType.CALL)
o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)
o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)
links_wanted = [l for l in self.get_all_node_links() if l[3] in
(LinkType.CREATE.value,
LinkType.INPUT.value,
LinkType.RETURN.value)]
export_file_1 = os.path.join(tmp_folder, 'export-1.tar.gz')
export_file_2 = os.path.join(tmp_folder, 'export-2.tar.gz')
export([o1], outfile=export_file_1, silent=True)
export([w1], outfile=export_file_2, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file_1, silent=True)
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
self.clean_db()
self.insert_data()
import_data(export_file_2, silent=True)
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_double_return_links_for_workflows(self):
import os, shutil, tempfile
from aiida.orm.data.base import Int
from aiida.orm.importexport import export
from aiida.orm.calculation.work import WorkCalculation
from aiida.common.links import LinkType
from aiida.orm.querybuilder import QueryBuilder
from aiida.orm.node import Node
tmp_folder = tempfile.mkdtemp()
try:
w1 = WorkCalculation().store()
w2 = WorkCalculation().store()
i1 = Int(1).store()
o1 = Int(2).store()
w1.add_link_from(i1, 'input-i1', link_type=LinkType.INPUT)
w1.add_link_from(w2, 'call', link_type=LinkType.CALL)
o1.add_link_from(w1, 'output', link_type=LinkType.CREATE)
o1.add_link_from(w1, 'return', link_type=LinkType.RETURN)
o1.add_link_from(w2, 'return', link_type=LinkType.RETURN)
uuids_wanted = set(_.uuid for _ in (w1, o1, i1, w2))
links_wanted = [l for l in self.get_all_node_links() if l[3] in (
'createlink', 'inputlink', 'returnlink', 'calllink')]
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([o1, w1, w2, i1],
outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
uuids_in_db = [str(uuid) for [uuid] in
QueryBuilder().append(Node, project='uuid').all()]
self.assertEquals(sorted(uuids_wanted), sorted(uuids_in_db))
links_in_db = self.get_all_node_links()
self.assertEquals(sorted(links_wanted), sorted(links_in_db))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_that_solo_code_is_exported_correctly(self):
import os, shutil, tempfile
from aiida.orm.utils import load_node
from aiida.orm.importexport import export
from aiida.orm.code import Code
tmp_folder = tempfile.mkdtemp()
try:
code_label = 'test_code1'
code = Code()
code.set_remote_computer_exec((self.computer, '/bin/true'))
code.label = code_label
code.store()
code_uuid = code.uuid
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([code], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
self.assertEquals(load_node(code_uuid).label, code_label)
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
def test_that_input_code_is_exported_correctly(self):
import os, shutil, tempfile
from aiida.orm.utils import load_node
from aiida.orm.importexport import export
from aiida.common.links import LinkType
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.code import Code
from aiida.orm.querybuilder import QueryBuilder
tmp_folder = tempfile.mkdtemp()
try:
code_label = 'test_code1'
code = Code()
code.set_remote_computer_exec((self.computer, '/bin/true'))
code.label = code_label
code.store()
code_uuid = code.uuid
jc = JobCalculation()
jc.set_computer(self.computer)
jc.set_option('resources',
{"num_machines": 1, "num_mpiprocs_per_machine": 1})
jc.store()
jc.add_link_from(code, 'code', link_type=LinkType.INPUT)
export_file = os.path.join(tmp_folder, 'export.tar.gz')
export([jc], outfile=export_file, silent=True)
self.clean_db()
self.insert_data()
import_data(export_file, silent=True)
self.assertEquals(load_node(code_uuid).label, code_label)
qb = QueryBuilder()
qb.append(Code, project='uuid')
qb.append(JobCalculation, project='uuid',
edge_project=['label', 'type'],
edge_filters={'type': {'==': LinkType.INPUT.value}})
self.assertEquals(qb.count(), 1,
"Expected to find one and only one link from "
"code to the calculation node. {} found."
.format(qb.count()))
finally:
shutil.rmtree(tmp_folder, ignore_errors=True)
| true
| true
|
f704d12c29a0d4090d125d43a95bf1b2c7c9f9ab
| 1,372
|
py
|
Python
|
mvfy/visual/utils/streamer.py
|
erwingforerocastro/mvfy_visual_py
|
8740f21ffa68d0cfced0d0684251b2198488cb0e
|
[
"MIT"
] | null | null | null |
mvfy/visual/utils/streamer.py
|
erwingforerocastro/mvfy_visual_py
|
8740f21ffa68d0cfced0d0684251b2198488cb0e
|
[
"MIT"
] | null | null | null |
mvfy/visual/utils/streamer.py
|
erwingforerocastro/mvfy_visual_py
|
8740f21ffa68d0cfced0d0684251b2198488cb0e
|
[
"MIT"
] | null | null | null |
import asyncio
import base64
import threading
import cv2
import numpy as np
from flask_socketio import SocketIO, emit
from flask import Flask, render_template
import multiprocessing
class Streamer():
def __init__(self) -> None:
"""Constructor
"""
@staticmethod
async def stream_socket(
url_server: str,
app: 'Flask' = None,
socket_options: 'dict' = None,
socket_msg: 'str' = "mvfy_visual_img",
)-> 'function':
app = Flask(__name__) if app is None else app
socketio = SocketIO(app, **socket_options)
threading.Thread(target=lambda: socketio.run(url_server)).run()
async def wraper_function(img, extension: str = ".jpg", size: tuple = (1920, 1080)):
if size is not None:
frame = cv2.resize(img, size)
_, buffer = cv2.imencode(extension, frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
data = base64.b64encode(buffer)
socketio.emit(socket_msg, {
"data": data
})
return wraper_function
@staticmethod
async def stream_local(
img: np.array,
size: tuple = (1920, 1080),
title: str = "title"
) -> None:
if size is not None:
img = cv2.resize(img, size)
cv2.imshow(title, img)
| 25.886792
| 92
| 0.573615
|
import asyncio
import base64
import threading
import cv2
import numpy as np
from flask_socketio import SocketIO, emit
from flask import Flask, render_template
import multiprocessing
class Streamer():
def __init__(self) -> None:
@staticmethod
async def stream_socket(
url_server: str,
app: 'Flask' = None,
socket_options: 'dict' = None,
socket_msg: 'str' = "mvfy_visual_img",
)-> 'function':
app = Flask(__name__) if app is None else app
socketio = SocketIO(app, **socket_options)
threading.Thread(target=lambda: socketio.run(url_server)).run()
async def wraper_function(img, extension: str = ".jpg", size: tuple = (1920, 1080)):
if size is not None:
frame = cv2.resize(img, size)
_, buffer = cv2.imencode(extension, frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
data = base64.b64encode(buffer)
socketio.emit(socket_msg, {
"data": data
})
return wraper_function
@staticmethod
async def stream_local(
img: np.array,
size: tuple = (1920, 1080),
title: str = "title"
) -> None:
if size is not None:
img = cv2.resize(img, size)
cv2.imshow(title, img)
| true
| true
|
f704d16684d90b723369d286ddc555a9a4d93ac8
| 4,271
|
py
|
Python
|
flask_tutorial/marshmallow_demo/flask_exmaple.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:07:56.000Z
|
2018-12-19T22:07:56.000Z
|
marshmallow_demo/flask_exmaple.py
|
ftconan/flask-tutorial
|
d5164c93b5e6a6e3d2b8980e4b846adb7cb21aee
|
[
"MIT"
] | 12
|
2020-03-14T05:32:26.000Z
|
2022-03-12T00:08:49.000Z
|
marshmallow_demo/flask_exmaple.py
|
ftconan/flask-tutorial
|
d5164c93b5e6a6e3d2b8980e4b846adb7cb21aee
|
[
"MIT"
] | 1
|
2018-12-19T22:08:00.000Z
|
2018-12-19T22:08:00.000Z
|
# coding=utf-8
"""
@author: magician
@date: 2018/9/14
"""
import datetime
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, jsonify, request
from sqlalchemy.exc import IntegrityError
from marshmallow import Schema, fields, ValidationError, pre_load
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
# MODELS
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
class Quote(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
author = db.relationship(
'Author',
backref=db.backref('quotes', lazy='dynamic'),
)
posted_at = db.Column(db.DateTime)
# SCHEMAS
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method('format_name', dump_only=True)
def format_name(self, author):
return '{}, {}'.format(author.last, author.first)
# Custom validator
def must_not_be_blank(data):
if not data:
raise ValidationError('Data not provided.')
class QuoteSchema(Schema):
id = fields.Int(dump_only=True)
author = fields.Nested(AuthorSchema, validate=must_not_be_blank)
content = fields.Str(required=True, validate=must_not_be_blank)
posted_at = fields.DateTime(dump_only=True)
# Allow client to pass author's full name in request body
# e.g. {"author': 'Tim Peters"} rather than {"first": "Tim", "last": "Peters"}
@pre_load
def process_author(self, data):
author_name = data.get('author')
if author_name:
first, last = author_name.split(' ')
author_dict = dict(first=first, last=last)
else:
author_dict = {}
data['author'] = author_dict
return data
author_schema = AuthorSchema()
authors_schema = AuthorSchema(many=True)
quote_schema = QuoteSchema()
quotes_schema = QuoteSchema(many=True, only=('id', 'content'))
# API
@app.route('/authors')
def get_authors():
authors = Author.query.all()
# Serialize the queryset
result = authors_schema.dump(authors)
return jsonify({'authors': result})
@app.route('/authors/<int:pk>')
def get_author(pk):
try:
author = Author.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Author could not be found.'}), 400
author_result = author_schema.dump(author)
quotes_result = quotes_schema.dump(author.quotes.all())
return jsonify({'author': author_result, 'quotes': quotes_result})
@app.route('/quotes/', methods=['GET'])
def get_quotes():
quotes = Quote.query.all()
result = quotes_schema.dump(quotes, many=True)
return jsonify({'quotes': result})
@app.route('/quotes/<int:pk>')
def get_quote(pk):
try:
quote = Quote.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Quote could not be found.'}), 400
result = quote_schema.dump(quote)
return jsonify({'quote': result})
@app.route('/quotes/', methods=['POST'])
def new_quote():
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
# Validate and deserialize input
try:
data = quote_schema.load(json_data)
except ValidationError as err:
return jsonify(err.messages), 422
first, last = data['author']['first'], data['author']['last']
author = Author.query.filter_by(first=first, last=last).first()
if author is None:
# Create a new author
author = Author(first=first, last=last)
db.session.add(author)
# Create new quote
quote = Quote(
content=data['content'],
author=author,
posted_at=datetime.datetime.utcnow(),
)
db.session.add(quote)
db.session.commit()
result = quote_schema.dump(Quote.query.get(quote.id))
return jsonify({
'message': 'Created new quote.',
'quote': result,
})
if __name__ == '__main__':
db.create_all()
app.run(debug=True, port=5000)
| 27.733766
| 82
| 0.661203
|
import datetime
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, jsonify, request
from sqlalchemy.exc import IntegrityError
from marshmallow import Schema, fields, ValidationError, pre_load
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class Author(db.Model):
id = db.Column(db.Integer, primary_key=True)
first = db.Column(db.String(80))
last = db.Column(db.String(80))
class Quote(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('author.id'))
author = db.relationship(
'Author',
backref=db.backref('quotes', lazy='dynamic'),
)
posted_at = db.Column(db.DateTime)
class AuthorSchema(Schema):
id = fields.Int(dump_only=True)
first = fields.Str()
last = fields.Str()
formatted_name = fields.Method('format_name', dump_only=True)
def format_name(self, author):
return '{}, {}'.format(author.last, author.first)
def must_not_be_blank(data):
if not data:
raise ValidationError('Data not provided.')
class QuoteSchema(Schema):
id = fields.Int(dump_only=True)
author = fields.Nested(AuthorSchema, validate=must_not_be_blank)
content = fields.Str(required=True, validate=must_not_be_blank)
posted_at = fields.DateTime(dump_only=True)
# e.g. {"author': 'Tim Peters"} rather than {"first": "Tim", "last": "Peters"}
@pre_load
def process_author(self, data):
author_name = data.get('author')
if author_name:
first, last = author_name.split(' ')
author_dict = dict(first=first, last=last)
else:
author_dict = {}
data['author'] = author_dict
return data
author_schema = AuthorSchema()
authors_schema = AuthorSchema(many=True)
quote_schema = QuoteSchema()
quotes_schema = QuoteSchema(many=True, only=('id', 'content'))
# API
@app.route('/authors')
def get_authors():
authors = Author.query.all()
# Serialize the queryset
result = authors_schema.dump(authors)
return jsonify({'authors': result})
@app.route('/authors/<int:pk>')
def get_author(pk):
try:
author = Author.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Author could not be found.'}), 400
author_result = author_schema.dump(author)
quotes_result = quotes_schema.dump(author.quotes.all())
return jsonify({'author': author_result, 'quotes': quotes_result})
@app.route('/quotes/', methods=['GET'])
def get_quotes():
quotes = Quote.query.all()
result = quotes_schema.dump(quotes, many=True)
return jsonify({'quotes': result})
@app.route('/quotes/<int:pk>')
def get_quote(pk):
try:
quote = Quote.query.get(pk)
except IntegrityError:
return jsonify({'message': 'Quote could not be found.'}), 400
result = quote_schema.dump(quote)
return jsonify({'quote': result})
@app.route('/quotes/', methods=['POST'])
def new_quote():
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
# Validate and deserialize input
try:
data = quote_schema.load(json_data)
except ValidationError as err:
return jsonify(err.messages), 422
first, last = data['author']['first'], data['author']['last']
author = Author.query.filter_by(first=first, last=last).first()
if author is None:
# Create a new author
author = Author(first=first, last=last)
db.session.add(author)
# Create new quote
quote = Quote(
content=data['content'],
author=author,
posted_at=datetime.datetime.utcnow(),
)
db.session.add(quote)
db.session.commit()
result = quote_schema.dump(Quote.query.get(quote.id))
return jsonify({
'message': 'Created new quote.',
'quote': result,
})
if __name__ == '__main__':
db.create_all()
app.run(debug=True, port=5000)
| true
| true
|
f704d2308a4a0e9874c346c623e3850cac9abfa5
| 6,064
|
py
|
Python
|
frappe/tests/test_twofactor.py
|
snehapatil1/frappe
|
dd2c33e34ad120e6305a2fa230a72d23a7a03e98
|
[
"MIT"
] | 1
|
2020-12-07T22:35:21.000Z
|
2020-12-07T22:35:21.000Z
|
frappe/tests/test_twofactor.py
|
snehapatil1/frappe
|
dd2c33e34ad120e6305a2fa230a72d23a7a03e98
|
[
"MIT"
] | 11
|
2018-04-01T18:36:05.000Z
|
2018-10-04T07:56:07.000Z
|
frappe/tests/test_twofactor.py
|
snehapatil1/frappe
|
dd2c33e34ad120e6305a2fa230a72d23a7a03e98
|
[
"MIT"
] | 3
|
2018-01-16T17:59:55.000Z
|
2019-09-24T16:02:10.000Z
|
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest, frappe, pyotp
from werkzeug.wrappers import Request
from werkzeug.test import EnvironBuilder
from frappe.auth import HTTPRequest
from frappe.utils import cint
from frappe.twofactor import (should_run_2fa, authenticate_for_2factor, get_cached_user_pass,
two_factor_is_enabled_for_, confirm_otp_token, get_otpsecret_for_, get_verification_obj,
render_string_template, two_factor_is_enabled)
import time
class TestTwoFactor(unittest.TestCase):
def setUp(self):
self.http_requests = create_http_request()
self.login_manager = frappe.local.login_manager
self.user = self.login_manager.user
def tearDown(self):
frappe.local.response['verification'] = None
frappe.local.response['tmp_id'] = None
disable_2fa()
frappe.clear_cache(user=self.user)
def test_should_run_2fa(self):
'''Should return true if enabled.'''
toggle_2fa_all_role(state=True)
self.assertTrue(should_run_2fa(self.user))
toggle_2fa_all_role(state=False)
self.assertFalse(should_run_2fa(self.user))
def test_get_cached_user_pass(self):
'''Cached data should not contain user and pass before 2fa.'''
user,pwd = get_cached_user_pass()
self.assertTrue(all([not user, not pwd]))
def test_authenticate_for_2factor(self):
'''Verification obj and tmp_id should be set in frappe.local.'''
authenticate_for_2factor(self.user)
verification_obj = frappe.local.response['verification']
tmp_id = frappe.local.response['tmp_id']
self.assertTrue(verification_obj)
self.assertTrue(tmp_id)
for k in ['_usr','_pwd','_otp_secret']:
self.assertTrue(frappe.cache().get('{0}{1}'.format(tmp_id,k)),
'{} not available'.format(k))
def test_two_factor_is_enabled(self):
'''
1. Should return true, if enabled and not bypass_2fa_for_retricted_ip_users
2. Should return false, if not enabled
3. Should return true, if enabled and bypass_2fa_for_retricted_ip_users and not user.restricted_ip
4. Should return false, if enabled and bypass_2fa_for_retricted_ip_users and user.restricted_ip
'''
#Scenario 1
disable_2fa()
self.assertFalse(two_factor_is_enabled(self.user))
#Scenario 2
enable_2fa()
self.assertTrue(two_factor_is_enabled(self.user))
#Scenario 3
enable_2fa()
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
self.assertTrue(two_factor_is_enabled(self.user))
#Scenario 4
user = frappe.get_doc('User', self.user)
user.restrict_ip = ""
user.save()
enable_2fa(1)
self.assertTrue(two_factor_is_enabled(self.user))
#Scenario 5
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
enable_2fa(1)
self.assertFalse(two_factor_is_enabled(self.user))
def test_two_factor_is_enabled_for_user(self):
'''Should return true if enabled for user.'''
toggle_2fa_all_role(state=True)
self.assertTrue(two_factor_is_enabled_for_(self.user))
self.assertFalse(two_factor_is_enabled_for_("Administrator"))
toggle_2fa_all_role(state=False)
self.assertFalse(two_factor_is_enabled_for_(self.user))
def test_get_otpsecret_for_user(self):
'''OTP secret should be set for user.'''
self.assertTrue(get_otpsecret_for_(self.user))
self.assertTrue(frappe.db.get_default(self.user + '_otpsecret'))
def test_confirm_otp_token(self):
'''Ensure otp is confirmed'''
authenticate_for_2factor(self.user)
tmp_id = frappe.local.response['tmp_id']
otp = 'wrongotp'
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
otp = get_otp(self.user)
self.assertTrue(confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id))
if frappe.flags.tests_verbose:
print('Sleeping for 30secs to confirm token expires..')
time.sleep(30)
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
def test_get_verification_obj(self):
'''Confirm verification object is returned.'''
otp_secret = get_otpsecret_for_(self.user)
token = int(pyotp.TOTP(otp_secret).now())
self.assertTrue(get_verification_obj(self.user,token,otp_secret))
def test_render_string_template(self):
'''String template renders as expected with variables.'''
args = {'issuer_name':'Frappe Technologies'}
_str = 'Verification Code from {{issuer_name}}'
_str = render_string_template(_str,args)
self.assertEqual(_str,'Verification Code from Frappe Technologies')
def set_request(**kwargs):
builder = EnvironBuilder(**kwargs)
frappe.local.request = Request(builder.get_environ())
def create_http_request():
'''Get http request object.'''
set_request(method='POST', path='login')
enable_2fa()
frappe.form_dict['usr'] = 'test@erpnext.com'
frappe.form_dict['pwd'] = 'test'
frappe.local.form_dict['cmd'] = 'login'
http_requests = HTTPRequest()
return http_requests
def enable_2fa(bypass_two_factor_auth=0):
'''Enable Two factor in system settings.'''
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 1
system_settings.bypass_2fa_for_retricted_ip_users = cint(bypass_two_factor_auth)
system_settings.two_factor_method = 'OTP App'
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def disable_2fa():
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 0
system_settings.bypass_2fa_for_retricted_ip_users = 0
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def toggle_2fa_all_role(state=None):
'''Enable or disable 2fa for 'all' role on the system.'''
all_role = frappe.get_doc('Role','All')
if state == None:
state = False if all_role.two_factor_auth == True else False
if state not in [True,False]:return
all_role.two_factor_auth = state
all_role.save(ignore_permissions=True)
frappe.db.commit()
def get_otp(user):
otp_secret = get_otpsecret_for_(user)
otp = pyotp.TOTP(otp_secret)
return otp.now()
| 35.255814
| 100
| 0.774571
|
from __future__ import unicode_literals
import unittest, frappe, pyotp
from werkzeug.wrappers import Request
from werkzeug.test import EnvironBuilder
from frappe.auth import HTTPRequest
from frappe.utils import cint
from frappe.twofactor import (should_run_2fa, authenticate_for_2factor, get_cached_user_pass,
two_factor_is_enabled_for_, confirm_otp_token, get_otpsecret_for_, get_verification_obj,
render_string_template, two_factor_is_enabled)
import time
class TestTwoFactor(unittest.TestCase):
def setUp(self):
self.http_requests = create_http_request()
self.login_manager = frappe.local.login_manager
self.user = self.login_manager.user
def tearDown(self):
frappe.local.response['verification'] = None
frappe.local.response['tmp_id'] = None
disable_2fa()
frappe.clear_cache(user=self.user)
def test_should_run_2fa(self):
toggle_2fa_all_role(state=True)
self.assertTrue(should_run_2fa(self.user))
toggle_2fa_all_role(state=False)
self.assertFalse(should_run_2fa(self.user))
def test_get_cached_user_pass(self):
user,pwd = get_cached_user_pass()
self.assertTrue(all([not user, not pwd]))
def test_authenticate_for_2factor(self):
authenticate_for_2factor(self.user)
verification_obj = frappe.local.response['verification']
tmp_id = frappe.local.response['tmp_id']
self.assertTrue(verification_obj)
self.assertTrue(tmp_id)
for k in ['_usr','_pwd','_otp_secret']:
self.assertTrue(frappe.cache().get('{0}{1}'.format(tmp_id,k)),
'{} not available'.format(k))
def test_two_factor_is_enabled(self):
disable_2fa()
self.assertFalse(two_factor_is_enabled(self.user))
enable_2fa()
self.assertTrue(two_factor_is_enabled(self.user))
enable_2fa()
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
self.assertTrue(two_factor_is_enabled(self.user))
user = frappe.get_doc('User', self.user)
user.restrict_ip = ""
user.save()
enable_2fa(1)
self.assertTrue(two_factor_is_enabled(self.user))
user = frappe.get_doc('User', self.user)
user.restrict_ip = frappe.local.request_ip
user.save()
enable_2fa(1)
self.assertFalse(two_factor_is_enabled(self.user))
def test_two_factor_is_enabled_for_user(self):
toggle_2fa_all_role(state=True)
self.assertTrue(two_factor_is_enabled_for_(self.user))
self.assertFalse(two_factor_is_enabled_for_("Administrator"))
toggle_2fa_all_role(state=False)
self.assertFalse(two_factor_is_enabled_for_(self.user))
def test_get_otpsecret_for_user(self):
self.assertTrue(get_otpsecret_for_(self.user))
self.assertTrue(frappe.db.get_default(self.user + '_otpsecret'))
def test_confirm_otp_token(self):
authenticate_for_2factor(self.user)
tmp_id = frappe.local.response['tmp_id']
otp = 'wrongotp'
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
otp = get_otp(self.user)
self.assertTrue(confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id))
if frappe.flags.tests_verbose:
print('Sleeping for 30secs to confirm token expires..')
time.sleep(30)
with self.assertRaises(frappe.AuthenticationError):
confirm_otp_token(self.login_manager,otp=otp,tmp_id=tmp_id)
def test_get_verification_obj(self):
otp_secret = get_otpsecret_for_(self.user)
token = int(pyotp.TOTP(otp_secret).now())
self.assertTrue(get_verification_obj(self.user,token,otp_secret))
def test_render_string_template(self):
args = {'issuer_name':'Frappe Technologies'}
_str = 'Verification Code from {{issuer_name}}'
_str = render_string_template(_str,args)
self.assertEqual(_str,'Verification Code from Frappe Technologies')
def set_request(**kwargs):
builder = EnvironBuilder(**kwargs)
frappe.local.request = Request(builder.get_environ())
def create_http_request():
set_request(method='POST', path='login')
enable_2fa()
frappe.form_dict['usr'] = 'test@erpnext.com'
frappe.form_dict['pwd'] = 'test'
frappe.local.form_dict['cmd'] = 'login'
http_requests = HTTPRequest()
return http_requests
def enable_2fa(bypass_two_factor_auth=0):
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 1
system_settings.bypass_2fa_for_retricted_ip_users = cint(bypass_two_factor_auth)
system_settings.two_factor_method = 'OTP App'
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def disable_2fa():
system_settings = frappe.get_doc('System Settings')
system_settings.enable_two_factor_auth = 0
system_settings.bypass_2fa_for_retricted_ip_users = 0
system_settings.save(ignore_permissions=True)
frappe.db.commit()
def toggle_2fa_all_role(state=None):
all_role = frappe.get_doc('Role','All')
if state == None:
state = False if all_role.two_factor_auth == True else False
if state not in [True,False]:return
all_role.two_factor_auth = state
all_role.save(ignore_permissions=True)
frappe.db.commit()
def get_otp(user):
otp_secret = get_otpsecret_for_(user)
otp = pyotp.TOTP(otp_secret)
return otp.now()
| true
| true
|
f704d2c864bad63628fb027f82d866b3cfbf5677
| 6,290
|
py
|
Python
|
conanfile.py
|
madebr/conan-swig_installer-1
|
20cd423f4a5e6e1b9e8a7633fa22ad429096c499
|
[
"MIT"
] | null | null | null |
conanfile.py
|
madebr/conan-swig_installer-1
|
20cd423f4a5e6e1b9e8a7633fa22ad429096c499
|
[
"MIT"
] | null | null | null |
conanfile.py
|
madebr/conan-swig_installer-1
|
20cd423f4a5e6e1b9e8a7633fa22ad429096c499
|
[
"MIT"
] | null | null | null |
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
from contextlib import contextmanager
import os
import shutil
class SwigConan(ConanFile):
name = "swig_installer"
version = "4.0.1"
description = "SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages."
topics = ("conan", "swig", "python", "java", "wrapper")
url = "https://github.com/bincrafters/conan-swig_installer"
homepage = "http://www.swig.org"
author = "Bincrafters <bincrafters@gmail.com>"
license = "GPL-3.0-or-later"
exports = ["LICENSE.md"]
settings = "os_build", "arch_build", "compiler", "os", "arch"
_source_subfolder = "source_subfolder"
def configure(self):
# Verify build configuration
if str(self.settings.os_build) != str(self.settings.os):
raise ConanInvalidConfiguration("settings.os_build must be equal to settings.os")
if str(self.settings.arch_build) != str(self.settings.arch_build):
raise ConanInvalidConfiguration("settings.arch_build must be equal to settings.arch_build")
def package_id(self):
del self.info.settings.compiler
del self.info.settings.os
del self.info.settings.arch
self.info.include_build_settings()
def build_requirements(self):
if tools.os_info.is_windows:
self.build_requires("msys2/20161025")
if self.settings.os_build == "Windows":
self.build_requires("winflexbison/2.5.18@bincrafters/stable")
else:
self.build_requires("bison_installer/3.3.2@bincrafters/stable")
self.build_requires("pcre/8.41")
if self.settings.compiler == "Visual Studio":
self.build_requires("cccl_installer/1.0@bincrafters/stable")
def system_requirements(self):
if self.develop:
if tools.os_info.with_yum:
installer = tools.SystemPackageTool()
packages = [
"autoconf",
"automake",
]
for package in packages:
installer.install(package)
def source(self):
url = "https://github.com/swig/swig/archive/rel-{}.tar.gz".format(self.version)
sha256 = "2eaf6fb89d071d1be280bf995c63360b3729860c0da64948123b5d7e4cfb6cb7"
foldername = "swig-rel-{}".format(self.version)
tools.get(url, sha256=sha256)
os.rename(foldername, self._source_subfolder)
@contextmanager
def _build_environment(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
yield
else:
yield
def _patch_sources(self):
tools.replace_in_file(os.path.join(self._source_subfolder, "configure.ac"),
"AC_DEFINE_UNQUOTED(SWIG_LIB_WIN_UNIX",
"SWIG_LIB_WIN_UNIX=""\nAC_DEFINE_UNQUOTED(SWIG_LIB_WIN_UNIX")
def build(self):
self._patch_sources()
with tools.chdir(os.path.join(self.build_folder, self._source_subfolder)):
self.run('./autogen.sh', win_bash=tools.os_info.is_windows)
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
deps_libpaths = env_build.library_paths
deps_libs = env_build.libs
deps_defines = env_build.defines
if self.settings.os_build == "Windows" and self.settings.compiler != "Visual Studio":
env_build.link_flags.append("-static")
libargs = list("-L\"{}\"".format(p) for p in deps_libpaths) + list("-l\"{}\"".format(l) for l in deps_libs)
args = [
"PCRE_LIBS={}".format(" ".join(libargs)),
"PCRE_CPPFLAGS={}".format(" ".join("-D{}".format(define) for define in deps_defines)),
"--host={}".format(tools.detected_architecture()),
]
if self.settings.compiler == "Visual Studio":
self.output.warn("Visual Studio compiler cannot create ccache-swig. Disabling ccache-swig.")
args.append("--disable-ccache")
with self._build_environment():
env_build.configure(configure_dir=os.path.join(self.build_folder, self._source_subfolder), args=args)
with tools.environment_append({"CONAN_CPU_COUNT": "1" if self.settings.compiler == "Visual Studio" else str(tools.cpu_count())}):
env_build.make()
def package(self):
self.copy(pattern="LICENSE*", dst="licenses", src=self._source_subfolder)
self.copy(pattern="COPYRIGHT", dst="licenses", src=self._source_subfolder)
with tools.chdir(self.build_folder):
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
env_build.install()
if self.settings.os == "Windows":
shutil.move(os.path.join(self.package_folder, "share", "swig", self.version),
os.path.join(self.package_folder, "bin", "Lib"))
shutil.rmtree(os.path.join(self.package_folder, "share"))
if self.settings.compiler != "Visual Studio":
with tools.chdir(os.path.join(self.package_folder, "bin")):
ext = ".exe" if tools.os_info.is_windows else ""
self.run("strip swig{}".format(ext), win_bash=tools.os_info.is_windows)
self.run("strip ccache-swig{}".format(ext), win_bash=tools.os_info.is_windows)
def package_info(self):
bindir = os.path.join(self.package_folder, "bin")
self.output.info('Appending PATH environment variable: {}'.format(bindir))
self.env_info.PATH.append(bindir)
if self.settings.os == "Windows":
swig_lib_path = os.path.join(self.package_folder, "bin", "Lib")
else:
swig_lib_path = os.path.join(self.package_folder, "share", "swig", self.version)
self.output.info('Setting SWIG_LIB environment variable: {}'.format(swig_lib_path))
self.env_info.SWIG_LIB = swig_lib_path
self.output.info('Setting SWIG_INSTALLER_ROOT to {}'.format(self.package_folder))
self.env_info.SWIG_INSTALLER_ROOT = self.package_folder
| 46.592593
| 151
| 0.641017
|
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanInvalidConfiguration
from contextlib import contextmanager
import os
import shutil
class SwigConan(ConanFile):
name = "swig_installer"
version = "4.0.1"
description = "SWIG is a software development tool that connects programs written in C and C++ with a variety of high-level programming languages."
topics = ("conan", "swig", "python", "java", "wrapper")
url = "https://github.com/bincrafters/conan-swig_installer"
homepage = "http://www.swig.org"
author = "Bincrafters <bincrafters@gmail.com>"
license = "GPL-3.0-or-later"
exports = ["LICENSE.md"]
settings = "os_build", "arch_build", "compiler", "os", "arch"
_source_subfolder = "source_subfolder"
def configure(self):
if str(self.settings.os_build) != str(self.settings.os):
raise ConanInvalidConfiguration("settings.os_build must be equal to settings.os")
if str(self.settings.arch_build) != str(self.settings.arch_build):
raise ConanInvalidConfiguration("settings.arch_build must be equal to settings.arch_build")
def package_id(self):
del self.info.settings.compiler
del self.info.settings.os
del self.info.settings.arch
self.info.include_build_settings()
def build_requirements(self):
if tools.os_info.is_windows:
self.build_requires("msys2/20161025")
if self.settings.os_build == "Windows":
self.build_requires("winflexbison/2.5.18@bincrafters/stable")
else:
self.build_requires("bison_installer/3.3.2@bincrafters/stable")
self.build_requires("pcre/8.41")
if self.settings.compiler == "Visual Studio":
self.build_requires("cccl_installer/1.0@bincrafters/stable")
def system_requirements(self):
if self.develop:
if tools.os_info.with_yum:
installer = tools.SystemPackageTool()
packages = [
"autoconf",
"automake",
]
for package in packages:
installer.install(package)
def source(self):
url = "https://github.com/swig/swig/archive/rel-{}.tar.gz".format(self.version)
sha256 = "2eaf6fb89d071d1be280bf995c63360b3729860c0da64948123b5d7e4cfb6cb7"
foldername = "swig-rel-{}".format(self.version)
tools.get(url, sha256=sha256)
os.rename(foldername, self._source_subfolder)
@contextmanager
def _build_environment(self):
if self.settings.compiler == "Visual Studio":
with tools.vcvars(self.settings):
yield
else:
yield
def _patch_sources(self):
tools.replace_in_file(os.path.join(self._source_subfolder, "configure.ac"),
"AC_DEFINE_UNQUOTED(SWIG_LIB_WIN_UNIX",
"SWIG_LIB_WIN_UNIX=""\nAC_DEFINE_UNQUOTED(SWIG_LIB_WIN_UNIX")
def build(self):
self._patch_sources()
with tools.chdir(os.path.join(self.build_folder, self._source_subfolder)):
self.run('./autogen.sh', win_bash=tools.os_info.is_windows)
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
deps_libpaths = env_build.library_paths
deps_libs = env_build.libs
deps_defines = env_build.defines
if self.settings.os_build == "Windows" and self.settings.compiler != "Visual Studio":
env_build.link_flags.append("-static")
libargs = list("-L\"{}\"".format(p) for p in deps_libpaths) + list("-l\"{}\"".format(l) for l in deps_libs)
args = [
"PCRE_LIBS={}".format(" ".join(libargs)),
"PCRE_CPPFLAGS={}".format(" ".join("-D{}".format(define) for define in deps_defines)),
"--host={}".format(tools.detected_architecture()),
]
if self.settings.compiler == "Visual Studio":
self.output.warn("Visual Studio compiler cannot create ccache-swig. Disabling ccache-swig.")
args.append("--disable-ccache")
with self._build_environment():
env_build.configure(configure_dir=os.path.join(self.build_folder, self._source_subfolder), args=args)
with tools.environment_append({"CONAN_CPU_COUNT": "1" if self.settings.compiler == "Visual Studio" else str(tools.cpu_count())}):
env_build.make()
def package(self):
self.copy(pattern="LICENSE*", dst="licenses", src=self._source_subfolder)
self.copy(pattern="COPYRIGHT", dst="licenses", src=self._source_subfolder)
with tools.chdir(self.build_folder):
env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
env_build.install()
if self.settings.os == "Windows":
shutil.move(os.path.join(self.package_folder, "share", "swig", self.version),
os.path.join(self.package_folder, "bin", "Lib"))
shutil.rmtree(os.path.join(self.package_folder, "share"))
if self.settings.compiler != "Visual Studio":
with tools.chdir(os.path.join(self.package_folder, "bin")):
ext = ".exe" if tools.os_info.is_windows else ""
self.run("strip swig{}".format(ext), win_bash=tools.os_info.is_windows)
self.run("strip ccache-swig{}".format(ext), win_bash=tools.os_info.is_windows)
def package_info(self):
bindir = os.path.join(self.package_folder, "bin")
self.output.info('Appending PATH environment variable: {}'.format(bindir))
self.env_info.PATH.append(bindir)
if self.settings.os == "Windows":
swig_lib_path = os.path.join(self.package_folder, "bin", "Lib")
else:
swig_lib_path = os.path.join(self.package_folder, "share", "swig", self.version)
self.output.info('Setting SWIG_LIB environment variable: {}'.format(swig_lib_path))
self.env_info.SWIG_LIB = swig_lib_path
self.output.info('Setting SWIG_INSTALLER_ROOT to {}'.format(self.package_folder))
self.env_info.SWIG_INSTALLER_ROOT = self.package_folder
| true
| true
|
f704d309b2d0f6c310b68088e4d0a88caea2c3aa
| 1,168
|
py
|
Python
|
pv/wsgi.py
|
Dumbaz/autoradio-pv
|
8aae293e58b2e79a05956c535bb109f74edc89c3
|
[
"BSD-3-Clause"
] | null | null | null |
pv/wsgi.py
|
Dumbaz/autoradio-pv
|
8aae293e58b2e79a05956c535bb109f74edc89c3
|
[
"BSD-3-Clause"
] | null | null | null |
pv/wsgi.py
|
Dumbaz/autoradio-pv
|
8aae293e58b2e79a05956c535bb109f74edc89c3
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WSGI config for pv project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
sys.path.append('/srv/pv/pv')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pv.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 36.5
| 79
| 0.80137
|
import os
import sys
sys.path.append('/srv/pv/pv')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pv.settings")
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| true
| true
|
f704d3fbb8a78fe4a901efbbfcc7672e4820d792
| 2,293
|
py
|
Python
|
python/ray/tune/examples/zoopt_example.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 3
|
2020-12-03T17:48:45.000Z
|
2022-01-22T08:09:46.000Z
|
python/ray/tune/examples/zoopt_example.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 84
|
2021-03-06T08:02:56.000Z
|
2022-03-05T08:07:19.000Z
|
python/ray/tune/examples/zoopt_example.py
|
firebolt55439/ray
|
215300b070628c06f0106906fc6c03bd70ebf140
|
[
"Apache-2.0"
] | 2
|
2020-05-22T15:36:27.000Z
|
2020-05-22T15:52:03.000Z
|
"""This example demonstrates the usage of ZOOptSearch.
It also checks that it is usable with a separate scheduler.
"""
import time
from ray import tune
from ray.tune.suggest.zoopt import ZOOptSearch
from ray.tune.schedulers import AsyncHyperBandScheduler
from zoopt import ValueType # noqa: F401
def evaluation_fn(step, width, height):
time.sleep(0.1)
return (0.1 + width * step / 100)**(-1) + height * 0.1
def easy_objective(config):
# Hyperparameters
width, height = config["width"], config["height"]
for step in range(config["steps"]):
# Iterative training function - can be any arbitrary training procedure
intermediate_score = evaluation_fn(step, width, height)
# Feed the score back back to Tune.
tune.report(iterations=step, mean_loss=intermediate_score)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
num_samples = 10 if args.smoke_test else 1000
# Optional: Pass the parameter space yourself
# space = {
# # for continuous dimensions: (continuous, search_range, precision)
# "height": (ValueType.CONTINUOUS, [-10, 10], 1e-2),
# # for discrete dimensions: (discrete, search_range, has_order)
# "width": (ValueType.DISCRETE, [0, 10], True)
# # for grid dimensions: (grid, grid_list)
# "layers": (ValueType.GRID, [4, 8, 16])
# }
zoopt_search_config = {
"parallel_num": 8,
}
zoopt_search = ZOOptSearch(
algo="Asracos", # only support ASRacos currently
budget=num_samples,
# dim_dict=space, # If you want to set the space yourself
**zoopt_search_config)
scheduler = AsyncHyperBandScheduler()
analysis = tune.run(
easy_objective,
metric="mean_loss",
mode="min",
search_alg=zoopt_search,
name="zoopt_search",
scheduler=scheduler,
num_samples=num_samples,
config={
"steps": 10,
"height": tune.quniform(-10, 10, 1e-2),
"width": tune.randint(0, 10)
})
print("Best config found: ", analysis.best_config)
| 30.573333
| 79
| 0.642826
|
import time
from ray import tune
from ray.tune.suggest.zoopt import ZOOptSearch
from ray.tune.schedulers import AsyncHyperBandScheduler
from zoopt import ValueType
def evaluation_fn(step, width, height):
time.sleep(0.1)
return (0.1 + width * step / 100)**(-1) + height * 0.1
def easy_objective(config):
width, height = config["width"], config["height"]
for step in range(config["steps"]):
intermediate_score = evaluation_fn(step, width, height)
tune.report(iterations=step, mean_loss=intermediate_score)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
num_samples = 10 if args.smoke_test else 1000
um_samples,
heduler = AsyncHyperBandScheduler()
analysis = tune.run(
easy_objective,
metric="mean_loss",
mode="min",
search_alg=zoopt_search,
name="zoopt_search",
scheduler=scheduler,
num_samples=num_samples,
config={
"steps": 10,
"height": tune.quniform(-10, 10, 1e-2),
"width": tune.randint(0, 10)
})
print("Best config found: ", analysis.best_config)
| true
| true
|
f704d464d0afc50d7816616dbcf9d13d85e6185a
| 3,897
|
py
|
Python
|
tests/unit/backend/chalice/api_server/mock_auth.py
|
isabella232/corpora-data-portal
|
09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9
|
[
"MIT"
] | null | null | null |
tests/unit/backend/chalice/api_server/mock_auth.py
|
isabella232/corpora-data-portal
|
09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9
|
[
"MIT"
] | 1
|
2021-02-23T22:56:13.000Z
|
2021-02-23T22:56:13.000Z
|
tests/unit/backend/chalice/api_server/mock_auth.py
|
isabella232/corpora-data-portal
|
09ed3cad3165f8b0db854b76404e0d5d0ea0b7d9
|
[
"MIT"
] | null | null | null |
import urllib
import jose.jwt
import time
import random
import sys
import requests
from flask import Flask, request, redirect, make_response, jsonify
import subprocess
# seconds until the token expires
TOKEN_EXPIRES = 2
# A mocked out oauth server, which serves all the endpoints needed by the oauth type.
class MockOauthApp:
def __init__(self, port):
self.port = port
# mock flask app
self.app = Flask("mock_oauth_app")
self.app.add_url_rule("/authorize", view_func=self.api_authorize)
self.app.add_url_rule("/oauth/token", view_func=self.api_oauth_token, methods=["POST"])
self.app.add_url_rule("/v2/logout", view_func=self.api_logout)
self.app.add_url_rule("/.well-known/openid-configuration", view_func=self.api_openid_configuration)
self.app.add_url_rule("/.well-known/jwks.json", view_func=self.api_jwks)
def api_authorize(self):
callback = request.args.get("redirect_uri")
state = request.args.get("state")
return redirect(callback + f"?code=fakecode&state={state}")
def api_oauth_token(self):
expires_at = time.time()
headers = dict(alg="RS256", kid="fake_kid")
payload = dict(
name="Fake User", sub="test_user_id", email="fake_user@email.com", email_verified=True, exp=expires_at
)
jwt = jose.jwt.encode(claims=payload, key="mysecret", algorithm="HS256", headers=headers)
r = {
"access_token": f"access-{time.time()}",
"id_token": jwt,
"refresh_token": f"random-{time.time()}",
"scope": "openid profile email offline",
"expires_in": TOKEN_EXPIRES,
"token_type": "Bearer",
"expires_at": expires_at,
}
return make_response(jsonify(r))
def api_logout(self):
return_to = request.args.get("returnTo")
return redirect(return_to)
def api_openid_configuration(self):
data = dict(jwks_uri=f"http://localhost:{self.port}/.well-known/jwks.json")
return make_response(jsonify(data))
def api_jwks(self):
data = dict(
alg="RS256",
kty="RSA",
use="sig",
kid="fake_kid",
)
return make_response(jsonify(dict(keys=[data])))
class MockOauthServer:
def __init__(self):
self.process = None
self.port = None
self.server_okay = False
def start(self):
self.port = random.randint(10000, 20000)
self.process = subprocess.Popen([sys.executable, __file__, str(self.port)])
# Verify that the mock oauth server is ready (accepting requests) before starting the tests.
self.server_okay = False
for _ in range(5):
try:
response = requests.get(f"http://localhost:{self.port}/.well-known/jwks.json")
if response.status_code == 200:
self.server_okay = True
break
except Exception:
pass
# wait one second and try again
time.sleep(1)
def terminate(self):
self.process.terminate()
def get_auth_token(app):
"""
Generated an auth token for testing.
:param app: a chalice app.
:return:
"""
headers = dict(host="localhost")
response = app.get("/dp/v1/login", headers=headers)
location = response.headers["Location"]
split = urllib.parse.urlsplit(location)
args = dict(urllib.parse.parse_qsl(split.query))
# follow redirect
url = f"/dp/v1/oauth2/callback?code=fakecode&state={args['state']}"
response = app.get(url, headers=dict(host="localhost", Cookie=response.headers["Set-Cookie"]))
return response.headers["Set-Cookie"]
if __name__ == "__main__":
port = int(sys.argv[1])
mock_app = MockOauthApp(port)
mock_app.app.run(port=port, debug=True)
| 32.747899
| 114
| 0.625353
|
import urllib
import jose.jwt
import time
import random
import sys
import requests
from flask import Flask, request, redirect, make_response, jsonify
import subprocess
TOKEN_EXPIRES = 2
class MockOauthApp:
def __init__(self, port):
self.port = port
self.app = Flask("mock_oauth_app")
self.app.add_url_rule("/authorize", view_func=self.api_authorize)
self.app.add_url_rule("/oauth/token", view_func=self.api_oauth_token, methods=["POST"])
self.app.add_url_rule("/v2/logout", view_func=self.api_logout)
self.app.add_url_rule("/.well-known/openid-configuration", view_func=self.api_openid_configuration)
self.app.add_url_rule("/.well-known/jwks.json", view_func=self.api_jwks)
def api_authorize(self):
callback = request.args.get("redirect_uri")
state = request.args.get("state")
return redirect(callback + f"?code=fakecode&state={state}")
def api_oauth_token(self):
expires_at = time.time()
headers = dict(alg="RS256", kid="fake_kid")
payload = dict(
name="Fake User", sub="test_user_id", email="fake_user@email.com", email_verified=True, exp=expires_at
)
jwt = jose.jwt.encode(claims=payload, key="mysecret", algorithm="HS256", headers=headers)
r = {
"access_token": f"access-{time.time()}",
"id_token": jwt,
"refresh_token": f"random-{time.time()}",
"scope": "openid profile email offline",
"expires_in": TOKEN_EXPIRES,
"token_type": "Bearer",
"expires_at": expires_at,
}
return make_response(jsonify(r))
def api_logout(self):
return_to = request.args.get("returnTo")
return redirect(return_to)
def api_openid_configuration(self):
data = dict(jwks_uri=f"http://localhost:{self.port}/.well-known/jwks.json")
return make_response(jsonify(data))
def api_jwks(self):
data = dict(
alg="RS256",
kty="RSA",
use="sig",
kid="fake_kid",
)
return make_response(jsonify(dict(keys=[data])))
class MockOauthServer:
def __init__(self):
self.process = None
self.port = None
self.server_okay = False
def start(self):
self.port = random.randint(10000, 20000)
self.process = subprocess.Popen([sys.executable, __file__, str(self.port)])
self.server_okay = False
for _ in range(5):
try:
response = requests.get(f"http://localhost:{self.port}/.well-known/jwks.json")
if response.status_code == 200:
self.server_okay = True
break
except Exception:
pass
time.sleep(1)
def terminate(self):
self.process.terminate()
def get_auth_token(app):
headers = dict(host="localhost")
response = app.get("/dp/v1/login", headers=headers)
location = response.headers["Location"]
split = urllib.parse.urlsplit(location)
args = dict(urllib.parse.parse_qsl(split.query))
url = f"/dp/v1/oauth2/callback?code=fakecode&state={args['state']}"
response = app.get(url, headers=dict(host="localhost", Cookie=response.headers["Set-Cookie"]))
return response.headers["Set-Cookie"]
if __name__ == "__main__":
port = int(sys.argv[1])
mock_app = MockOauthApp(port)
mock_app.app.run(port=port, debug=True)
| true
| true
|
f704d4961f151943c889d1c38e2afd4fdd7bde3f
| 4,440
|
py
|
Python
|
topi/python/topi/x86/reduction.py
|
jheo4/incubator-tvm
|
c4c61cb766608fb2f0fd8c9facc480a43afed3f5
|
[
"Apache-2.0"
] | 3
|
2021-02-23T22:06:01.000Z
|
2021-09-30T09:59:17.000Z
|
topi/python/topi/x86/reduction.py
|
jheo4/incubator-tvm
|
c4c61cb766608fb2f0fd8c9facc480a43afed3f5
|
[
"Apache-2.0"
] | 4
|
2021-03-30T11:59:59.000Z
|
2022-03-12T00:40:23.000Z
|
topi/python/topi/x86/reduction.py
|
jheo4/incubator-tvm
|
c4c61cb766608fb2f0fd8c9facc480a43afed3f5
|
[
"Apache-2.0"
] | 3
|
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""x86 declaration and schedules."""
from __future__ import absolute_import as _abs
import tvm
from .. import tag
from .. import generic
from ..util import get_const_tuple
def _schedule_reduce(sch, op, is_idx_reduce=False):
if is_idx_reduce:
real_out = op.output(0)
fused = sch[real_out].fuse(*sch[real_out].op.axis)
out = op.input_tensors[0]
else:
out = op.output(0)
const_shape = True
out_shape = get_const_tuple(out.shape)
for d in out_shape:
if not isinstance(d, int):
const_shape = False
break
if const_shape:
naxes = len(sch[out].op.axis)
parallelism = 1
fuse_axes = []
# We choose a heuristic number 128 to limit the maximum parallelism
while len(fuse_axes) < naxes and parallelism < 128:
ivar = sch[out].op.axis[len(fuse_axes)]
parallelism *= int(ivar.dom.extent)
fuse_axes.append(ivar)
fused = sch[out].fuse(*fuse_axes)
sch[out].parallel(fused)
else:
if len(sch[out].op.axis) >= 5:
# avoid too many parallelism
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
else:
fused = sch[out].fuse(*sch[out].op.axis)
sch[out].parallel(fused)
@generic.schedule_reduce.register(["cpu"])
def schedule_reduce(outs):
"""X86 schedule for reduction op.
Parameters
----------
outs: Array of Tensor
The computation graph description of injective in the format
of an array of tensors.
Returns
-------
sch: Schedule
The computation schedule for the op.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse_before_reduce(operator):
"""Internal traverse function"""
if isinstance(operator, tvm.tensor.PlaceholderOp):
return
if tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
def traverse_after_reduce(operator):
"""Internal traverse function"""
if tag.is_broadcast(operator.tag):
if operator not in scheduled_ops:
generic.schedule_injective_from_existing(sch, operator)
for tensor in operator.input_tensors:
traverse_after_reduce(tensor.op)
elif operator.tag == 'comm_reduce':
_schedule_reduce(sch, operator, is_idx_reduce=False)
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif operator.tag == 'comm_reduce_idx':
_schedule_reduce(sch, operator, is_idx_reduce=True)
input_tensors = operator.input_tensors[0].op.input_tensors
for tensor in input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif isinstance(operator, tvm.tensor.PlaceholderOp):
pass
else:
raise RuntimeError("Unsupported operator: %s (tag: %s)" % (operator, operator.tag))
scheduled_ops.append(operator)
traverse_after_reduce(outs[0].op)
return sch
| 36.694215
| 96
| 0.643919
|
from __future__ import absolute_import as _abs
import tvm
from .. import tag
from .. import generic
from ..util import get_const_tuple
def _schedule_reduce(sch, op, is_idx_reduce=False):
if is_idx_reduce:
real_out = op.output(0)
fused = sch[real_out].fuse(*sch[real_out].op.axis)
out = op.input_tensors[0]
else:
out = op.output(0)
const_shape = True
out_shape = get_const_tuple(out.shape)
for d in out_shape:
if not isinstance(d, int):
const_shape = False
break
if const_shape:
naxes = len(sch[out].op.axis)
parallelism = 1
fuse_axes = []
while len(fuse_axes) < naxes and parallelism < 128:
ivar = sch[out].op.axis[len(fuse_axes)]
parallelism *= int(ivar.dom.extent)
fuse_axes.append(ivar)
fused = sch[out].fuse(*fuse_axes)
sch[out].parallel(fused)
else:
if len(sch[out].op.axis) >= 5:
fused = sch[out].fuse(sch[out].op.axis[0], sch[out].op.axis[1], sch[out].op.axis[2])
sch[out].parallel(fused)
else:
fused = sch[out].fuse(*sch[out].op.axis)
sch[out].parallel(fused)
@generic.schedule_reduce.register(["cpu"])
def schedule_reduce(outs):
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse_before_reduce(operator):
if isinstance(operator, tvm.tensor.PlaceholderOp):
return
if tag.is_injective(operator.tag):
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
def traverse_after_reduce(operator):
if tag.is_broadcast(operator.tag):
if operator not in scheduled_ops:
generic.schedule_injective_from_existing(sch, operator)
for tensor in operator.input_tensors:
traverse_after_reduce(tensor.op)
elif operator.tag == 'comm_reduce':
_schedule_reduce(sch, operator, is_idx_reduce=False)
for tensor in operator.input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif operator.tag == 'comm_reduce_idx':
_schedule_reduce(sch, operator, is_idx_reduce=True)
input_tensors = operator.input_tensors[0].op.input_tensors
for tensor in input_tensors:
if tensor.op not in scheduled_ops:
traverse_before_reduce(tensor.op)
elif isinstance(operator, tvm.tensor.PlaceholderOp):
pass
else:
raise RuntimeError("Unsupported operator: %s (tag: %s)" % (operator, operator.tag))
scheduled_ops.append(operator)
traverse_after_reduce(outs[0].op)
return sch
| true
| true
|
f704d5a672c42774980b2e28551ba3d1cd02079a
| 741
|
py
|
Python
|
micropython/boot.py
|
tinytux/sensor
|
2aa2a9ac34335c0b8579018f670b29455cfd47df
|
[
"MIT"
] | 9
|
2015-01-16T17:12:20.000Z
|
2021-02-26T19:39:44.000Z
|
micropython/boot.py
|
tinytux/sensor
|
2aa2a9ac34335c0b8579018f670b29455cfd47df
|
[
"MIT"
] | null | null | null |
micropython/boot.py
|
tinytux/sensor
|
2aa2a9ac34335c0b8579018f670b29455cfd47df
|
[
"MIT"
] | 2
|
2017-02-14T05:15:03.000Z
|
2017-05-25T10:48:51.000Z
|
# boot.py -- runs on boot-up
import pyb
pyb.LED(3).on() # indicate we are waiting for switch press
pyb.delay(2000) # wait for user to maybe press the switch
switch_value = pyb.Switch()() # sample the switch at end of delay
pyb.LED(3).off() # indicate that we finished waiting for the switch
pyb.LED(4).on() # indicate that we are selecting the mode
if switch_value:
# button pressed, mount SD card as usb storage
pyb.usb_mode('CDC+MSC')
pyb.main('debug.py')
else:
# no button pressed, SD card can be used by script
pyb.usb_mode('CDC+HID')
pyb.main('displaytemp.py')
pyb.LED(4).off() # indicate that we finished selecting the mode
| 30.875
| 82
| 0.624831
|
import pyb
pyb.LED(3).on()
pyb.delay(2000)
switch_value = pyb.Switch()()
pyb.LED(3).off()
pyb.LED(4).on()
if switch_value:
pyb.usb_mode('CDC+MSC')
pyb.main('debug.py')
else:
pyb.usb_mode('CDC+HID')
pyb.main('displaytemp.py')
pyb.LED(4).off()
| true
| true
|
f704d5d4ac02cfcd81c719c22d91a83721a4f86f
| 1,151
|
py
|
Python
|
Milestone2/L2_5_p5_test_vertical.py
|
4ntLu0/1051-Project
|
93ae9b8d312bd6e79949d878d3fb422282de703b
|
[
"Unlicense"
] | null | null | null |
Milestone2/L2_5_p5_test_vertical.py
|
4ntLu0/1051-Project
|
93ae9b8d312bd6e79949d878d3fb422282de703b
|
[
"Unlicense"
] | null | null | null |
Milestone2/L2_5_p5_test_vertical.py
|
4ntLu0/1051-Project
|
93ae9b8d312bd6e79949d878d3fb422282de703b
|
[
"Unlicense"
] | null | null | null |
from Cimpl import *
image = load_image(choose_file())
def flip_vertical(image: image) -> Image:
vertical_image = copy(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
flipped_color = get_color(image, -x, y)
set_color(vertical_image, x, y, flipped_color)
show(vertical_image)
return vertical_image
def test_flip_vertical(image: Image) -> Image:
""" Writen by Abdelrahman Alatoom (101147742). Function tests that all values of the x axis of the inputted image (into the flip_vertical function) are assigned to to their negative counterparts"""
vertical_image = flip_vertical(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
original_colour = get_color(image, x, y)
for x in range(get_width(vertical_image)):
for y in range(get_height(vertical_image)):
vertical_colour = get_color(vertical_image, -x, y)
if original_colour == vertical_colour:
print('Test Passed')
else: print('Test Failed')
| 28.073171
| 201
| 0.636838
|
from Cimpl import *
image = load_image(choose_file())
def flip_vertical(image: image) -> Image:
vertical_image = copy(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
flipped_color = get_color(image, -x, y)
set_color(vertical_image, x, y, flipped_color)
show(vertical_image)
return vertical_image
def test_flip_vertical(image: Image) -> Image:
vertical_image = flip_vertical(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
original_colour = get_color(image, x, y)
for x in range(get_width(vertical_image)):
for y in range(get_height(vertical_image)):
vertical_colour = get_color(vertical_image, -x, y)
if original_colour == vertical_colour:
print('Test Passed')
else: print('Test Failed')
| true
| true
|
f704d62fcf2ec086efa3468b0494a0f1c3b01a19
| 2,675
|
py
|
Python
|
sc-actions-provider/app.py
|
Sage-Bionetworks-IT/cfn-cr-sc-actions-provider
|
f0550c6b810fbb437e24048de73d429b017750b4
|
[
"Apache-2.0"
] | null | null | null |
sc-actions-provider/app.py
|
Sage-Bionetworks-IT/cfn-cr-sc-actions-provider
|
f0550c6b810fbb437e24048de73d429b017750b4
|
[
"Apache-2.0"
] | 4
|
2020-04-28T20:24:30.000Z
|
2021-08-17T01:21:17.000Z
|
sc-actions-provider/app.py
|
Sage-Bionetworks-IT/cfn-cr-sc-actions-provider
|
f0550c6b810fbb437e24048de73d429b017750b4
|
[
"Apache-2.0"
] | 1
|
2020-04-28T18:42:41.000Z
|
2020-04-28T18:42:41.000Z
|
import boto3
import json
import logging
from crhelper import CfnResource
logger = logging.getLogger(__name__)
helper = CfnResource(
json_logging=False, log_level='DEBUG', boto_level='CRITICAL')
try:
sc = boto3.client("servicecatalog")
except Exception as e:
helper.init_failure(e)
def get_parameters(event):
aws_account_id = event['StackId'].split(':')[4]
name = event['ResourceProperties']['Name']
ssm_doc_name = event['ResourceProperties']['SsmDocName']
ssm_doc_version = event['ResourceProperties']['SsmDocVersion']
assume_role = event['ResourceProperties']['AssumeRole']
return aws_account_id, name, ssm_doc_name, ssm_doc_version, assume_role
def create_provider(aws_account_id, name, ssm_doc_name, ssm_doc_version, assume_role):
response = sc.create_service_action(
Name=name,
Description=name,
DefinitionType='SSM_AUTOMATION',
Definition= {
"Name": ssm_doc_name,
"Version": ssm_doc_version,
"AssumeRole": assume_role,
"Parameters": "[{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}]"
}
)
id = response['ServiceActionDetail']['ServiceActionSummary']['Id']
logger.info("created sc action " + id)
return id
@helper.create
def create(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
return create_provider(*get_parameters(event))
@helper.delete
def delete(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
id = event['PhysicalResourceId']
logger.info("deleting sc action " + id)
sc.delete_service_action(
Id=id
)
@helper.update
def update(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
new_properties = event['ResourceProperties']
old_properties = event['OldResourceProperties']
id = event['PhysicalResourceId']
if new_properties != old_properties:
response = sc.update_service_action(
Id=id,
Name=new_properties['Name'],
Description=new_properties['Name'],
Definition= {
"Name": new_properties['SsmDocName'],
"Version": new_properties['SsmDocVersion'],
"AssumeRole": new_properties['AssumeRole'],
"Parameters": "[{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}]"
}
)
id = response['ServiceActionDetail']['ServiceActionSummary']['Id']
logger.info("updated sc action = " + id)
return id
def lambda_handler(event, context):
helper(event, context)
| 34.294872
| 86
| 0.644486
|
import boto3
import json
import logging
from crhelper import CfnResource
logger = logging.getLogger(__name__)
helper = CfnResource(
json_logging=False, log_level='DEBUG', boto_level='CRITICAL')
try:
sc = boto3.client("servicecatalog")
except Exception as e:
helper.init_failure(e)
def get_parameters(event):
aws_account_id = event['StackId'].split(':')[4]
name = event['ResourceProperties']['Name']
ssm_doc_name = event['ResourceProperties']['SsmDocName']
ssm_doc_version = event['ResourceProperties']['SsmDocVersion']
assume_role = event['ResourceProperties']['AssumeRole']
return aws_account_id, name, ssm_doc_name, ssm_doc_version, assume_role
def create_provider(aws_account_id, name, ssm_doc_name, ssm_doc_version, assume_role):
response = sc.create_service_action(
Name=name,
Description=name,
DefinitionType='SSM_AUTOMATION',
Definition= {
"Name": ssm_doc_name,
"Version": ssm_doc_version,
"AssumeRole": assume_role,
"Parameters": "[{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}]"
}
)
id = response['ServiceActionDetail']['ServiceActionSummary']['Id']
logger.info("created sc action " + id)
return id
@helper.create
def create(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
return create_provider(*get_parameters(event))
@helper.delete
def delete(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
id = event['PhysicalResourceId']
logger.info("deleting sc action " + id)
sc.delete_service_action(
Id=id
)
@helper.update
def update(event, context):
logger.debug("Received event: " + json.dumps(event, sort_keys=False))
new_properties = event['ResourceProperties']
old_properties = event['OldResourceProperties']
id = event['PhysicalResourceId']
if new_properties != old_properties:
response = sc.update_service_action(
Id=id,
Name=new_properties['Name'],
Description=new_properties['Name'],
Definition= {
"Name": new_properties['SsmDocName'],
"Version": new_properties['SsmDocVersion'],
"AssumeRole": new_properties['AssumeRole'],
"Parameters": "[{\"Name\":\"InstanceId\",\"Type\":\"TARGET\"}]"
}
)
id = response['ServiceActionDetail']['ServiceActionSummary']['Id']
logger.info("updated sc action = " + id)
return id
def lambda_handler(event, context):
helper(event, context)
| true
| true
|
f704d69bd77a243b1e77d53e8cb7b0fdb8daaf27
| 7,384
|
py
|
Python
|
src/secondaires/rapport/editeurs/bugedit_p/__init__.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 14
|
2015-08-21T19:15:21.000Z
|
2017-11-26T13:59:17.000Z
|
src/secondaires/rapport/editeurs/bugedit_p/__init__.py
|
vincent-lg/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 20
|
2015-09-29T20:50:45.000Z
|
2018-06-21T12:58:30.000Z
|
src/secondaires/rapport/editeurs/bugedit_p/__init__.py
|
vlegoff/tsunami
|
36b3b974f6eefbf15cd5d5f099fc14630e66570b
|
[
"BSD-3-Clause"
] | 3
|
2015-05-02T19:42:03.000Z
|
2018-09-06T10:55:00.000Z
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant l'éditeur 'bugedit'.
Si des redéfinitions de contexte-éditeur standard doivent être faites, elles
seront placées dans ce package
Note importante : ce package contient la définition d'un éditeur, mais
celui-ci peut très bien être étendu par d'autres modules. Au quel cas,
les extensions n'apparaîtront pas ici.
"""
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.description import Description
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.choix import Choix
from primaires.interpreteur.editeur.flag import Flag
from .edt_assigne import EdtAssigne
from .supprimer import NSupprimer
from secondaires.rapport.constantes import *
class EdtBugeditP(Presentation):
"""Classe définissant l'éditeur de rapport 'bugedit'.
"""
nom = "bugedit+"
def __init__(self, personnage, rapport):
"""Constructeur de l'éditeur"""
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, rapport)
if personnage and rapport:
self.construire(rapport)
def __getnewargs__(self):
return (None, None)
def construire(self, rapport):
"""Construction de l'éditeur"""
# Titre
titre = self.ajouter_choix("titre", "t", Uniligne, rapport, "titre")
titre.parent = self
titre.prompt = "Titre du rapport : "
titre.apercu = "{objet.titre}"
titre.aide_courte = \
"Entrez le |ent|titre|ff| du rapport ou |cmd|/|ff| pour revenir " \
"à la fenêtre parente.\n\nTitre actuel : |bc|{objet.titre}|ff|"
# Type
types = sorted(TYPES)
type = self.ajouter_choix("type", "y", Choix, rapport,
"type", types)
type.parent = self
type.prompt = "Type de rapport : "
type.apercu = "{objet.type}"
type.aide_courte = \
"Entrez le |ent|type|ff| de rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\Types disponibles : " \
"{}.\n\Type actuel : |bc|{{objet.type}}|ff|".format(
", ".join(types))
# Catégorie
categories = sorted(CATEGORIES)
categorie = self.ajouter_choix("catégorie", "c", Choix, rapport,
"categorie", categories)
categorie.parent = self
categorie.prompt = "Catégorie du rapport : "
categorie.apercu = "{objet.categorie}"
categorie.aide_courte = \
"Entrez la |ent|catégorie|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nCatégories disponibles : " \
"{}.\n\nCatégorie actuelle : |bc|{{objet.categorie}}|ff|".format(
", ".join(categories))
# Priorité
priorites = sorted(PRIORITES)
priorite = self.ajouter_choix("priorité", "p", Choix, rapport,
"priorite", priorites)
priorite.parent = self
priorite.prompt = "Priorité du rapport : "
priorite.apercu = "{objet.priorite}"
priorite.aide_courte = \
"Entrez la |ent|priorité|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nPriorités disponibles : " \
"{}.\n\nPriorité actuelle : |bc|{{objet.priorite}}|ff|".format(
", ".join(priorites))
# Description
description = self.ajouter_choix("description", "d", Description, \
rapport)
description.parent = self
description.apercu = "{objet.description.paragraphes_indentes}"
description.aide_courte = \
"| |tit|" + "Description du rapport #{}".format(
rapport.id).ljust(74) + \
"|ff||\n" + self.opts.separateur
# Public
public = self.ajouter_choix("public", "b", Flag, rapport, "public")
public.parent = self
# Statut
statut = self.ajouter_choix("statut", "s", Choix, rapport,
"statut", STATUTS)
statut.parent = self
statut.prompt = "Statut du rapport : "
statut.apercu = "{objet.statut}"
statut.aide_courte = \
"Entrez le |ent|statut|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nStatuts disponibles : " \
"{}.\n\nStatut actuel : |bc|{{objet.statut}}|ff|".format(
", ".join(STATUTS))
# Avancement
avancement = self.ajouter_choix("avancement", "a", Entier, rapport,
"avancement", 0, 100, "%")
avancement.parent = self
avancement.prompt = "Avancement de la tâche : "
avancement.apercu = "{objet.avancement}"
avancement.aide_courte = \
"Entrez l'|ent|avancement|ff| en pourcent de la tâche ou " \
"|cmd|/|ff| pour revenir à la\nfenêtre parente.\n\n" \
"Avancement actuel : |bc|{valeur}|ff|"
# Assigné à
assigne_a = self.ajouter_choix("assigné à", "i", EdtAssigne, rapport)
assigne_a.parent = self
assigne_a.prompt = "Entrez un nom de joueur : "
assigne_a.apercu = "{objet.aff_assigne_a}"
assigne_a.aide_courte = \
"Entrez un |ent|Immortel|ff| à qui assigner ce rapport, ou " \
"|cmd|/|ff| pour revenir à la\nfenêtre parente.\n\n" \
"Actuellement assigné à : {objet.aff_assigne_a}"
# Supprimer
sup = self.ajouter_choix("supprimer", "sup", NSupprimer,
rapport)
sup.parent = self
sup.aide_courte = "Souhaitez-vous réellement supprimer " \
"le rapport #{} ?".format(rapport.id)
| 41.717514
| 79
| 0.641522
|
from primaires.interpreteur.editeur.presentation import Presentation
from primaires.interpreteur.editeur.description import Description
from primaires.interpreteur.editeur.uniligne import Uniligne
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.choix import Choix
from primaires.interpreteur.editeur.flag import Flag
from .edt_assigne import EdtAssigne
from .supprimer import NSupprimer
from secondaires.rapport.constantes import *
class EdtBugeditP(Presentation):
nom = "bugedit+"
def __init__(self, personnage, rapport):
if personnage:
instance_connexion = personnage.instance_connexion
else:
instance_connexion = None
Presentation.__init__(self, instance_connexion, rapport)
if personnage and rapport:
self.construire(rapport)
def __getnewargs__(self):
return (None, None)
def construire(self, rapport):
titre = self.ajouter_choix("titre", "t", Uniligne, rapport, "titre")
titre.parent = self
titre.prompt = "Titre du rapport : "
titre.apercu = "{objet.titre}"
titre.aide_courte = \
"Entrez le |ent|titre|ff| du rapport ou |cmd|/|ff| pour revenir " \
"à la fenêtre parente.\n\nTitre actuel : |bc|{objet.titre}|ff|"
types = sorted(TYPES)
type = self.ajouter_choix("type", "y", Choix, rapport,
"type", types)
type.parent = self
type.prompt = "Type de rapport : "
type.apercu = "{objet.type}"
type.aide_courte = \
"Entrez le |ent|type|ff| de rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\Types disponibles : " \
"{}.\n\Type actuel : |bc|{{objet.type}}|ff|".format(
", ".join(types))
categories = sorted(CATEGORIES)
categorie = self.ajouter_choix("catégorie", "c", Choix, rapport,
"categorie", categories)
categorie.parent = self
categorie.prompt = "Catégorie du rapport : "
categorie.apercu = "{objet.categorie}"
categorie.aide_courte = \
"Entrez la |ent|catégorie|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nCatégories disponibles : " \
"{}.\n\nCatégorie actuelle : |bc|{{objet.categorie}}|ff|".format(
", ".join(categories))
priorites = sorted(PRIORITES)
priorite = self.ajouter_choix("priorité", "p", Choix, rapport,
"priorite", priorites)
priorite.parent = self
priorite.prompt = "Priorité du rapport : "
priorite.apercu = "{objet.priorite}"
priorite.aide_courte = \
"Entrez la |ent|priorité|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nPriorités disponibles : " \
"{}.\n\nPriorité actuelle : |bc|{{objet.priorite}}|ff|".format(
", ".join(priorites))
description = self.ajouter_choix("description", "d", Description, \
rapport)
description.parent = self
description.apercu = "{objet.description.paragraphes_indentes}"
description.aide_courte = \
"| |tit|" + "Description du rapport #{}".format(
rapport.id).ljust(74) + \
"|ff||\n" + self.opts.separateur
public = self.ajouter_choix("public", "b", Flag, rapport, "public")
public.parent = self
statut = self.ajouter_choix("statut", "s", Choix, rapport,
"statut", STATUTS)
statut.parent = self
statut.prompt = "Statut du rapport : "
statut.apercu = "{objet.statut}"
statut.aide_courte = \
"Entrez le |ent|statut|ff| du rapport ou |cmd|/|ff| pour " \
"revenir à la fenêtre parente.\n\nStatuts disponibles : " \
"{}.\n\nStatut actuel : |bc|{{objet.statut}}|ff|".format(
", ".join(STATUTS))
avancement = self.ajouter_choix("avancement", "a", Entier, rapport,
"avancement", 0, 100, "%")
avancement.parent = self
avancement.prompt = "Avancement de la tâche : "
avancement.apercu = "{objet.avancement}"
avancement.aide_courte = \
"Entrez l'|ent|avancement|ff| en pourcent de la tâche ou " \
"|cmd|/|ff| pour revenir à la\nfenêtre parente.\n\n" \
"Avancement actuel : |bc|{valeur}|ff|"
# Assigné à
assigne_a = self.ajouter_choix("assigné à", "i", EdtAssigne, rapport)
assigne_a.parent = self
assigne_a.prompt = "Entrez un nom de joueur : "
assigne_a.apercu = "{objet.aff_assigne_a}"
assigne_a.aide_courte = \
"Entrez un |ent|Immortel|ff| à qui assigner ce rapport, ou " \
"|cmd|/|ff| pour revenir à la\nfenêtre parente.\n\n" \
"Actuellement assigné à : {objet.aff_assigne_a}"
# Supprimer
sup = self.ajouter_choix("supprimer", "sup", NSupprimer,
rapport)
sup.parent = self
sup.aide_courte = "Souhaitez-vous réellement supprimer " \
"le rapport #{} ?".format(rapport.id)
| true
| true
|
f704d794aec6ae8e930cc4290ff291c234e2423c
| 16,527
|
py
|
Python
|
privatmarket_service.py
|
dimonklas/robotForPull
|
71485ba9612be4cb8916aae4ed6ca183a0f490ba
|
[
"Apache-2.0"
] | null | null | null |
privatmarket_service.py
|
dimonklas/robotForPull
|
71485ba9612be4cb8916aae4ed6ca183a0f490ba
|
[
"Apache-2.0"
] | null | null | null |
privatmarket_service.py
|
dimonklas/robotForPull
|
71485ba9612be4cb8916aae4ed6ca183a0f490ba
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import os
import sys
from dateutil import parser
from datetime import datetime
from pytz import timezone
import re
import datetime
import dateutil.parser
from datetime import timedelta
def modify_test_data(initial_data):
# set user name
# initial_data['procuringEntity']['name'] = u'Товариство З Обмеженою Відповідальністю \'Мак Медіа Прінт\''
initial_data['procuringEntity']['name'] = u'ТОВ \"СФ \"РУБІЖНЕ\"'
if 'contactPoint' in initial_data['procuringEntity']:
initial_data['procuringEntity']['contactPoint']['telephone'] = u'+380670444580'
initial_data['procuringEntity']['contactPoint']['url'] = u'https://dadadad.com'
initial_data['procuringEntity']['identifier']['legalName'] = u'ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"СІЛЬСЬКОГОСПОДАРСЬКА ФІРМА \"РУБІЖНЕ\"'
initial_data['procuringEntity']['identifier']['id'] = u'38580144'
# #
initial_data['buyers'][0]['identifier']['id'] = u'38580144'
initial_data['buyers'][0]['identifier']['legalName'] = u'ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"СІЛЬСЬКОГОСПОДАРСЬКА ФІРМА \"РУБІЖНЕ\"'
initial_data['buyers'][0]['name'] = u'ТОВ \"СФ \"РУБІЖНЕ\"'
initial_data['tender']['tenderPeriod']['startDate'] = add_day_to_date(initial_data['tender']['tenderPeriod']['startDate'])
# initial_data['procuringEntity']['name'] = u'Макстрой Діск, Товариство З Обмеженою Відповідальністю'
# initial_data['procuringEntity']['name'] = u'ФОП ОГАНІН ОЛЕКСАНДР ПЕТРОВИЧ'
return initial_data
def add_day_to_date(date):
dat = parser.parse(date)
new_date = (dat + timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S%z')
new = parser.parse(new_date).isoformat()
return new
def get_currency_type(currency):
if isinstance(currency, str):
currency = currency.decode("utf-8")
currency_dictionary = {
u'грн': 'UAH'
}
currency_type = currency_dictionary.get(currency)
if currency_type:
return currency_type
else:
return currency
def get_month_number(month_name):
monthes = [u"января", u"февраля", u"марта", u"апреля", u"мая", u"июня",
u"июля", u"августа", u"сентября", u"октября", u"ноября", u"декабря",
u"янв.", u"февр.", u"мар.", u"апр.", u"мая.", u"июн.",
u"июл.", u"авг.", u"сент.", u"окт.", u"нояб.", u"дек.",
u"січ.", u"лют.", u"бер.", u"квіт.", u"трав.", u"черв.",
u"лип.", u"серп.", u"вер.", u"жовт.", u"лист.", u"груд.",
u"січня", u"лютого", u"березня", u"квітня", u"травня", u"червня",
u"липня", u"серпня", u"вересня", u"жовтня", u"листопада", u"грудня"]
return monthes.index(month_name) % 12 + 1
def get_time_with_offset(date):
date_obj = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M")
time_zone = timezone('Europe/Kiev')
localized_date = time_zone.localize(date_obj)
return localized_date.strftime('%Y-%m-%d %H:%M:%S.%f%z')
# def get_time_with_offset_formatted(date, input_format_date, output_format):
# date_obj = datetime.datetime.strptime(date, input_format_date)
# time_zone = timezone('Europe/Kiev')
# localized_date = time_zone.localize(date_obj)
# return localized_date.strftime(output_format)
def get_time_with_offset_formatted(date, input_format_date):
tz = timezone('Europe/Kiev')
date_obj = datetime.datetime.strptime(date, input_format_date)
res = tz.localize(date_obj)
result = res.isoformat()
return result
def get_current_date():
now = datetime.now()
return now.strftime('%d-%m-%Y')
def get_unit_code(name):
dictionary = {
u'кілограми': u'KGM',
u'пара': u'PR',
u'літр': u'LTR',
u'набір': u'SET',
u'пачок': u'NMP',
u'метри': u'MTR',
u'лот': u'LO',
u'послуга': u'E48',
u'метри кубічні': u'MTQ',
u'ящик': u'BX',
u'рейс': u'E54',
u'тони': u'TNE',
u'метри квадратні': u'MTK',
u'кілометри': u'KMT',
u'штуки': u'H87',
u'місяць': u'MON',
u'пачка': u'RM',
u'упаковка': u'PK',
u'гектар': u'HAR',
u'блок': u'D64',
u'Флакон': u'VI'
}
expected_name = dictionary.get(name)
if expected_name:
return expected_name
else:
return name
def get_unit_name(current_name):
if isinstance(current_name, str):
current_name = current_name.decode("utf-8")
dictionary = {
u'кілограми': {u'килограмм', u'килограмма', u'килограммов'},
u'пара': {u'пара', u'пары', u'пар'},
u'літр': {u'литр', u'литра', u'литров'},
u'набір': {u'набор', u'набора', u'наборов'},
u'пачок': {u'пачка', u'пачек', u'пачки'},
u'метри': {u'метр', u'метра', u'метров'},
u'лот': {u'лот', u'лоты', u'лотов'},
u'послуга': {u'услуга', u'услуг', u'услуги'},
u'метри кубічні': {u'метр кубический', u'метра кубического', u'метров кубических'},
u'ящик': {u'ящик', u'ящика', u'ящиков'},
u'рейс': {u'рейс', u'рейса', u'рейсов'},
u'тони': {u'тонна', u'тонны', u'тонн'},
u'метри квадратні': {u'метр квадратный', u'метра квадратного', u'метров квадратных'},
u'кілометри': {u'километр', u'километров', u'километра'},
u'штуки': {u'штука', u'штуки', u'штук', u'Штуки'},
u'місяць': {u'месяц', u'месяца', u'месяцев'},
u'пачка': {u'пачка', u'пачек', u'пачкики'},
u'упаковка': {u'упаковка', u'упаковок', u'упаковки'},
u'гектар': {u'гектар', u'гектара', u'гектаров'},
u'блок': {u'блок', u'блока', u'блоков'}
}
expected_name = None
dictionary.get(current_name)
for name, variants in dictionary.iteritems():
if current_name in variants:
expected_name = name
if expected_name:
return expected_name
else:
return current_name
def get_unit_name_ru(current_name):
if isinstance(current_name, str):
current_name = current_name.decode("utf-8")
dictionary = {
u'килограмм': {u'килограмм', u'килограмма', u'килограммов', u'кілограми'},
u'пара': {u'пара', u'пары', u'пар'},
u'литр': {u'литр', u'литра', u'литров'},
u'набора': {u'набір', u'набора', u'наборов'},
u'пачек': {u'пачка', u'пачек', u'пачки'},
u'метр': {u'метр', u'метра', u'метров'},
u'лот': {u'лот', u'лоты', u'лотов'},
u'услуга': {u'услуга', u'услуг', u'услуги'},
u'метр .куб.': {u'метр кубический', u'метра кубического', u'метров кубических'},
u'ящик': {u'ящик', u'ящика', u'ящиков'},
u'рейс': {u'рейс', u'рейса', u'рейсов'},
u'тонны': {u'тонна', u'тонны', u'тонн'},
u'метр квадратный': {u'метр квадратный', u'метра квадратного', u'метров квадратных'},
u'километры': {u'километр', u'километров', u'километра'},
u'штуки': {u'штука', u'штуки', u'штук'},
u'месяц': {u'месяц', u'месяца', u'месяцев'},
u'пачка': {u'пачка', u'пачек', u'пачкики'},
u'упаковка': {u'упаковка', u'упаковок', u'упаковки'},
u'гектар': {u'гектар', u'гектара', u'гектаров'},
u'блок': {u'блок', u'блока', u'блоков'}
}
expected_name = None
dictionary.get(current_name)
for name, variants in dictionary.iteritems():
if current_name in variants:
expected_name = name
if expected_name:
return expected_name
else:
return current_name
def get_classification_type(classifications):
classifications_dictionary = {
u'ДК 016:2010': u'ДКПП',
u'ДК 021:2015': u'CPV',
u'ДК 18-2000': u'ДК018',
u'ДК003: 2010': u'ДК003',
u'ДК003:2010': u'ДК003',
u'ДК 015-97': u'ДК015',
u'ДК021': u'CPV'
}
classifications_type = classifications_dictionary.get(classifications)
if classifications_type:
return classifications_type
else:
return classifications
def get_status_type(status_name):
status_name = status_name.strip()
type_dictionary = {
u'Период уточнений': 'active.enquiries',
u'Період уточнень': 'active.enquiries',
u'Период уточнений завершен': 'active.enquiries.ended',
u'Період уточнень завершено': 'active.enquiries.ended',
u'Подача предложений': 'active.tendering',
u'Подача пропозицій': 'active.tendering',
u'Торги': 'active.auction',
u'Квалификация победителя': 'active.qualification',
u'Квалификація переможця': 'active.qualification',
u'Предложения рассмотрены': 'active.awarded',
u'Пропозиції розглянуті': 'active.awarded',
u'Закупка не состоялась': 'unsuccessful',
u'Закупівля не відбулась': 'unsuccessful',
u'Завершено': 'complete',
u'Отменено': 'cancelled',
u'Відмінено': 'cancelled',
u'Розглядається': 'pending',
u'Кваліфікація учасника': 'active.pre-qualification',
u'Пауза перед аукціоном': 'active.pre-qualification.stand-still',
u'Прекваліфікація': 'active.pre-qualification',
u'Преквалификация': 'active.pre-qualification'
}
type_name = type_dictionary.get(status_name)
return type_name
def convert_float_to_string(number):
result = number
if type(number) is float:
return format(number, '.2f')
else:
return result
def get_claim_status (status):
type_dictionary = {
u'Вiдправлено': 'claim',
u'Отримано вiдповiдь': 'answered',
u'Задоволено': 'resolved',
u'Скасована': 'cancelled',
u'Не вирiшена, обробляється': 'pending',
u'Залишена без відповіді': 'ignored',
u'Не задоволено': 'declined',
u'Вимога відхилена': 'invalid',
u'Запит для пiдтверждения скасування': 'stopping'
}
type_name = type_dictionary.get(status)
return type_name
def get_procurementMethod_Type (type):
type_dictionary = {
u'Конкурентний діалог з публікацією англійською мовою 1-ий етап': 'competitiveDialogueEU',
u'Конкурентний діалог 1-ий етап': 'competitiveDialogueUA',
u'Переговорна процедура для потреб оборони': 'aboveThresholdUA.defense',
u'Укладання рамкової угоди': 'closeFrameworkAgreementUA',
u'Допорогові закупівлі': 'belowThreshold',
u'Переговорна процедура': 'negotiation',
u'Звіт про укладений договір': 'reporting',
u'Відкриті торги': 'aboveThresholdUA',
u'Відкриті торги з публікацією англійською мовою': 'aboveThresholdEU',
u'Відкриті торги для закупівлі енергосервісу': 'esco'
}
type_name = type_dictionary.get(type)
return type_name
def sum_of_numbers(number, value):
number = int(number) + int(value)
return number
def abs_number(number):
return abs(int(number))
def get_abs_item_index(lot_index, item_index, items_count):
abs_index = ((int(lot_index)-1) * int(items_count)) + int(item_index)
return abs_index
def get_match_from_string(string, pattern, group):
result = 'null';
p = re.compile(pattern)
m = p.search(string)
if p.search(string):
return m.group(int(group))
return result
def get_percent(value):
value = value * 100
return format(value, '.0f')
def get_conversion_to_int(value):
return int(float(value))
def get_cause(cause_text):
cause_dictionary = {
u'Закупівля творів мистецтва або закупівля, пов’язана із захистом прав інтелектуальної власності, або укладення договору про закупівлю з переможцем архітектурного чи мистецького конкурсу': u'artContestIP',
u'Відсутність конкуренції (у тому числі з технічних причин) на відповідному ринку, внаслідок чого договір про закупівлю може бути укладено лише з одним постачальником, завідсутності при цьому альтернативи': u'noCompetition',
u'Нагальна потреба у здійсненні закупівлі у зв’язку з виникненням особливих економічних чи соціальних обставин, яка унеможливлює дотримання замовниками строків для проведення тендеру, а саме пов’язаних з негайною ліквідацією наслідків надзвичайних ситуацій, а також наданням у встановленому порядку Україною гуманітарної допомоги іншим державам. Застосування переговорної процедури закупівлі в таких випадках здійснюється за рішенням замовника щодо кожної процедури': u'quick',
u'Якщо замовником було двічі відмінено тендер через відсутність достатньої кількостіучасників,прицьому предмет закупівлі, його технічні та якісніхарактеристики, атакож вимогидо учасника не повинні відрізнятисявід вимог, що були визначені замовникому тедерній документації': u'twiceUnsuccessful',
u'Потреба здійснити додаткову закупівлю в того самого постачальника з метою уніфікації, стандартизації або забезпечення сумісності з наявними товарами, технологіями, роботами чи послугами, якщо заміна попереднього постачальника (виконавця робіт, надавача послуг) може призвести до несумісності або виникнення проблем технічного характеру,пов’язаних з експлуатацією та обслуговуванням': u'additionalPurchase',
u'Необхідність проведення додаткових будівельних робіт, не зазначених у початковому проекті, але які стали через непередбачувані обставини необхідними для виконання проекту за сукупності таких умов: договір буде укладено з попереднім виконавцем цих робіт, такі роботи технічно чи економічно пов’язані з головним (первинним) договором; загальна вартість додаткових робіт не перевищує 50 відсотків вартості головного (первинного) договору': u'additionalConstruction',
u'Закупівля юридичних послуг, пов’язаних із захистом прав та інтересів України, у тому числі з метою захисту національної безпеки і оборони, під час врегулювання спорів, розгляду в закордонних юрисдикційних органах справ за участю іноземного суб’єкта та України, на підставі рішення Кабінету Міністрів України або введених в дію відповідно до закону рішень Ради національної безпеки і оборони України': u'stateLegalServices'
}
cause_type = cause_dictionary.get(cause_text)
if cause_type:
return cause_type
else:
return cause_text
def get_items_from_lot(items, lot_id):
lot_items = []
for item in items:
if item['relatedLot'] == lot_id:
lot_items.append(item)
return lot_items
def get_ECP_key(path):
return os.path.join(os.getcwd(), path)
def get_date_formatting(date, format_day):
return dateutil.parser.parse(date).date().strftime(format_day)
def get_scenarios_name():
name = ''
for param in sys.argv:
if 'txt' in param:
name = param
return name
def is_click_button(item_index, items_count, lot_index):
status = 'false'
if int(item_index) < int(items_count) and lot_index > 1:
return 'true'
return status
def get_milestones_title(title):
titles = {
u'підписання договору': 'signingTheContract',
u'поставка товару': 'deliveryOfGoods',
u'дата подання заявки': 'submissionDateOfApplications',
u'дата закінчення звітного періоду': 'endDateOfTheReportingPeriod',
u'дата виставлення рахунку': 'dateOfInvoicing',
u'виконання робіт': 'executionOfWorks',
u'надання послуг': 'submittingServices',
u'інша подія': 'anotherEvent'
}
title_name = titles.get(title)
return title_name
def get_milestones_code(code):
codes = {
u'Аванс': 'prepayment',
u'Пiсляоплата': 'postpayment'
}
code_name = codes.get(code)
return code_name
def get_milestones_duration_type(type):
types = {
u'робочих': 'working',
u'банківськіх': 'banking',
u'календарних': 'calendar'
}
type_name = types.get(type)
return type_name
def get_rationaleType (type):
type_dictionary = {
u'Зменшення обсягів закупівлі': 'volumeCuts',
u'Зміна сторонніх показників (курсу, тарифів...)': 'thirdParty',
u'Зміна ціни у зв’язку із зміною ставок податків і зборів': 'taxRate',
u'Покращення якості предмета закупівлі': 'qualityImprovement',
u'Узгоджене зменшення ціни': 'priceReduction',
u'Зміна ціни за одиницю товару': 'itemPriceVariation',
u'Продовження строку дії договору на наступний рік': 'fiscalYearExtension',
u'Продовження строку дії договору (черездокументально підтверджені об’єктивні обставини)': 'durationExtension',
}
type_name = type_dictionary.get(type)
return type_name
def change_fake_date():
return (datetime.datetime.now(timezone('Europe/Kiev')) + timedelta(days=3)).strftime('%Y-%m-%dT%H:%M:%S.%f%z')
| 39.538278
| 485
| 0.66104
|
import os
import sys
from dateutil import parser
from datetime import datetime
from pytz import timezone
import re
import datetime
import dateutil.parser
from datetime import timedelta
def modify_test_data(initial_data):
initial_data['procuringEntity']['name'] = u'ТОВ \"СФ \"РУБІЖНЕ\"'
if 'contactPoint' in initial_data['procuringEntity']:
initial_data['procuringEntity']['contactPoint']['telephone'] = u'+380670444580'
initial_data['procuringEntity']['contactPoint']['url'] = u'https://dadadad.com'
initial_data['procuringEntity']['identifier']['legalName'] = u'ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"СІЛЬСЬКОГОСПОДАРСЬКА ФІРМА \"РУБІЖНЕ\"'
initial_data['procuringEntity']['identifier']['id'] = u'38580144'
initial_data['buyers'][0]['identifier']['id'] = u'38580144'
initial_data['buyers'][0]['identifier']['legalName'] = u'ТОВАРИСТВО З ОБМЕЖЕНОЮ ВІДПОВІДАЛЬНІСТЮ \"СІЛЬСЬКОГОСПОДАРСЬКА ФІРМА \"РУБІЖНЕ\"'
initial_data['buyers'][0]['name'] = u'ТОВ \"СФ \"РУБІЖНЕ\"'
initial_data['tender']['tenderPeriod']['startDate'] = add_day_to_date(initial_data['tender']['tenderPeriod']['startDate'])
return initial_data
def add_day_to_date(date):
dat = parser.parse(date)
new_date = (dat + timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S%z')
new = parser.parse(new_date).isoformat()
return new
def get_currency_type(currency):
if isinstance(currency, str):
currency = currency.decode("utf-8")
currency_dictionary = {
u'грн': 'UAH'
}
currency_type = currency_dictionary.get(currency)
if currency_type:
return currency_type
else:
return currency
def get_month_number(month_name):
monthes = [u"января", u"февраля", u"марта", u"апреля", u"мая", u"июня",
u"июля", u"августа", u"сентября", u"октября", u"ноября", u"декабря",
u"янв.", u"февр.", u"мар.", u"апр.", u"мая.", u"июн.",
u"июл.", u"авг.", u"сент.", u"окт.", u"нояб.", u"дек.",
u"січ.", u"лют.", u"бер.", u"квіт.", u"трав.", u"черв.",
u"лип.", u"серп.", u"вер.", u"жовт.", u"лист.", u"груд.",
u"січня", u"лютого", u"березня", u"квітня", u"травня", u"червня",
u"липня", u"серпня", u"вересня", u"жовтня", u"листопада", u"грудня"]
return monthes.index(month_name) % 12 + 1
def get_time_with_offset(date):
date_obj = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M")
time_zone = timezone('Europe/Kiev')
localized_date = time_zone.localize(date_obj)
return localized_date.strftime('%Y-%m-%d %H:%M:%S.%f%z')
def get_time_with_offset_formatted(date, input_format_date):
tz = timezone('Europe/Kiev')
date_obj = datetime.datetime.strptime(date, input_format_date)
res = tz.localize(date_obj)
result = res.isoformat()
return result
def get_current_date():
now = datetime.now()
return now.strftime('%d-%m-%Y')
def get_unit_code(name):
dictionary = {
u'кілограми': u'KGM',
u'пара': u'PR',
u'літр': u'LTR',
u'набір': u'SET',
u'пачок': u'NMP',
u'метри': u'MTR',
u'лот': u'LO',
u'послуга': u'E48',
u'метри кубічні': u'MTQ',
u'ящик': u'BX',
u'рейс': u'E54',
u'тони': u'TNE',
u'метри квадратні': u'MTK',
u'кілометри': u'KMT',
u'штуки': u'H87',
u'місяць': u'MON',
u'пачка': u'RM',
u'упаковка': u'PK',
u'гектар': u'HAR',
u'блок': u'D64',
u'Флакон': u'VI'
}
expected_name = dictionary.get(name)
if expected_name:
return expected_name
else:
return name
def get_unit_name(current_name):
if isinstance(current_name, str):
current_name = current_name.decode("utf-8")
dictionary = {
u'кілограми': {u'килограмм', u'килограмма', u'килограммов'},
u'пара': {u'пара', u'пары', u'пар'},
u'літр': {u'литр', u'литра', u'литров'},
u'набір': {u'набор', u'набора', u'наборов'},
u'пачок': {u'пачка', u'пачек', u'пачки'},
u'метри': {u'метр', u'метра', u'метров'},
u'лот': {u'лот', u'лоты', u'лотов'},
u'послуга': {u'услуга', u'услуг', u'услуги'},
u'метри кубічні': {u'метр кубический', u'метра кубического', u'метров кубических'},
u'ящик': {u'ящик', u'ящика', u'ящиков'},
u'рейс': {u'рейс', u'рейса', u'рейсов'},
u'тони': {u'тонна', u'тонны', u'тонн'},
u'метри квадратні': {u'метр квадратный', u'метра квадратного', u'метров квадратных'},
u'кілометри': {u'километр', u'километров', u'километра'},
u'штуки': {u'штука', u'штуки', u'штук', u'Штуки'},
u'місяць': {u'месяц', u'месяца', u'месяцев'},
u'пачка': {u'пачка', u'пачек', u'пачкики'},
u'упаковка': {u'упаковка', u'упаковок', u'упаковки'},
u'гектар': {u'гектар', u'гектара', u'гектаров'},
u'блок': {u'блок', u'блока', u'блоков'}
}
expected_name = None
dictionary.get(current_name)
for name, variants in dictionary.iteritems():
if current_name in variants:
expected_name = name
if expected_name:
return expected_name
else:
return current_name
def get_unit_name_ru(current_name):
if isinstance(current_name, str):
current_name = current_name.decode("utf-8")
dictionary = {
u'килограмм': {u'килограмм', u'килограмма', u'килограммов', u'кілограми'},
u'пара': {u'пара', u'пары', u'пар'},
u'литр': {u'литр', u'литра', u'литров'},
u'набора': {u'набір', u'набора', u'наборов'},
u'пачек': {u'пачка', u'пачек', u'пачки'},
u'метр': {u'метр', u'метра', u'метров'},
u'лот': {u'лот', u'лоты', u'лотов'},
u'услуга': {u'услуга', u'услуг', u'услуги'},
u'метр .куб.': {u'метр кубический', u'метра кубического', u'метров кубических'},
u'ящик': {u'ящик', u'ящика', u'ящиков'},
u'рейс': {u'рейс', u'рейса', u'рейсов'},
u'тонны': {u'тонна', u'тонны', u'тонн'},
u'метр квадратный': {u'метр квадратный', u'метра квадратного', u'метров квадратных'},
u'километры': {u'километр', u'километров', u'километра'},
u'штуки': {u'штука', u'штуки', u'штук'},
u'месяц': {u'месяц', u'месяца', u'месяцев'},
u'пачка': {u'пачка', u'пачек', u'пачкики'},
u'упаковка': {u'упаковка', u'упаковок', u'упаковки'},
u'гектар': {u'гектар', u'гектара', u'гектаров'},
u'блок': {u'блок', u'блока', u'блоков'}
}
expected_name = None
dictionary.get(current_name)
for name, variants in dictionary.iteritems():
if current_name in variants:
expected_name = name
if expected_name:
return expected_name
else:
return current_name
def get_classification_type(classifications):
classifications_dictionary = {
u'ДК 016:2010': u'ДКПП',
u'ДК 021:2015': u'CPV',
u'ДК 18-2000': u'ДК018',
u'ДК003: 2010': u'ДК003',
u'ДК003:2010': u'ДК003',
u'ДК 015-97': u'ДК015',
u'ДК021': u'CPV'
}
classifications_type = classifications_dictionary.get(classifications)
if classifications_type:
return classifications_type
else:
return classifications
def get_status_type(status_name):
status_name = status_name.strip()
type_dictionary = {
u'Период уточнений': 'active.enquiries',
u'Період уточнень': 'active.enquiries',
u'Период уточнений завершен': 'active.enquiries.ended',
u'Період уточнень завершено': 'active.enquiries.ended',
u'Подача предложений': 'active.tendering',
u'Подача пропозицій': 'active.tendering',
u'Торги': 'active.auction',
u'Квалификация победителя': 'active.qualification',
u'Квалификація переможця': 'active.qualification',
u'Предложения рассмотрены': 'active.awarded',
u'Пропозиції розглянуті': 'active.awarded',
u'Закупка не состоялась': 'unsuccessful',
u'Закупівля не відбулась': 'unsuccessful',
u'Завершено': 'complete',
u'Отменено': 'cancelled',
u'Відмінено': 'cancelled',
u'Розглядається': 'pending',
u'Кваліфікація учасника': 'active.pre-qualification',
u'Пауза перед аукціоном': 'active.pre-qualification.stand-still',
u'Прекваліфікація': 'active.pre-qualification',
u'Преквалификация': 'active.pre-qualification'
}
type_name = type_dictionary.get(status_name)
return type_name
def convert_float_to_string(number):
result = number
if type(number) is float:
return format(number, '.2f')
else:
return result
def get_claim_status (status):
type_dictionary = {
u'Вiдправлено': 'claim',
u'Отримано вiдповiдь': 'answered',
u'Задоволено': 'resolved',
u'Скасована': 'cancelled',
u'Не вирiшена, обробляється': 'pending',
u'Залишена без відповіді': 'ignored',
u'Не задоволено': 'declined',
u'Вимога відхилена': 'invalid',
u'Запит для пiдтверждения скасування': 'stopping'
}
type_name = type_dictionary.get(status)
return type_name
def get_procurementMethod_Type (type):
type_dictionary = {
u'Конкурентний діалог з публікацією англійською мовою 1-ий етап': 'competitiveDialogueEU',
u'Конкурентний діалог 1-ий етап': 'competitiveDialogueUA',
u'Переговорна процедура для потреб оборони': 'aboveThresholdUA.defense',
u'Укладання рамкової угоди': 'closeFrameworkAgreementUA',
u'Допорогові закупівлі': 'belowThreshold',
u'Переговорна процедура': 'negotiation',
u'Звіт про укладений договір': 'reporting',
u'Відкриті торги': 'aboveThresholdUA',
u'Відкриті торги з публікацією англійською мовою': 'aboveThresholdEU',
u'Відкриті торги для закупівлі енергосервісу': 'esco'
}
type_name = type_dictionary.get(type)
return type_name
def sum_of_numbers(number, value):
number = int(number) + int(value)
return number
def abs_number(number):
return abs(int(number))
def get_abs_item_index(lot_index, item_index, items_count):
abs_index = ((int(lot_index)-1) * int(items_count)) + int(item_index)
return abs_index
def get_match_from_string(string, pattern, group):
result = 'null';
p = re.compile(pattern)
m = p.search(string)
if p.search(string):
return m.group(int(group))
return result
def get_percent(value):
value = value * 100
return format(value, '.0f')
def get_conversion_to_int(value):
return int(float(value))
def get_cause(cause_text):
cause_dictionary = {
u'Закупівля творів мистецтва або закупівля, пов’язана із захистом прав інтелектуальної власності, або укладення договору про закупівлю з переможцем архітектурного чи мистецького конкурсу': u'artContestIP',
u'Відсутність конкуренції (у тому числі з технічних причин) на відповідному ринку, внаслідок чого договір про закупівлю може бути укладено лише з одним постачальником, завідсутності при цьому альтернативи': u'noCompetition',
u'Нагальна потреба у здійсненні закупівлі у зв’язку з виникненням особливих економічних чи соціальних обставин, яка унеможливлює дотримання замовниками строків для проведення тендеру, а саме пов’язаних з негайною ліквідацією наслідків надзвичайних ситуацій, а також наданням у встановленому порядку Україною гуманітарної допомоги іншим державам. Застосування переговорної процедури закупівлі в таких випадках здійснюється за рішенням замовника щодо кожної процедури': u'quick',
u'Якщо замовником було двічі відмінено тендер через відсутність достатньої кількостіучасників,прицьому предмет закупівлі, його технічні та якісніхарактеристики, атакож вимогидо учасника не повинні відрізнятисявід вимог, що були визначені замовникому тедерній документації': u'twiceUnsuccessful',
u'Потреба здійснити додаткову закупівлю в того самого постачальника з метою уніфікації, стандартизації або забезпечення сумісності з наявними товарами, технологіями, роботами чи послугами, якщо заміна попереднього постачальника (виконавця робіт, надавача послуг) може призвести до несумісності або виникнення проблем технічного характеру,пов’язаних з експлуатацією та обслуговуванням': u'additionalPurchase',
u'Необхідність проведення додаткових будівельних робіт, не зазначених у початковому проекті, але які стали через непередбачувані обставини необхідними для виконання проекту за сукупності таких умов: договір буде укладено з попереднім виконавцем цих робіт, такі роботи технічно чи економічно пов’язані з головним (первинним) договором; загальна вартість додаткових робіт не перевищує 50 відсотків вартості головного (первинного) договору': u'additionalConstruction',
u'Закупівля юридичних послуг, пов’язаних із захистом прав та інтересів України, у тому числі з метою захисту національної безпеки і оборони, під час врегулювання спорів, розгляду в закордонних юрисдикційних органах справ за участю іноземного суб’єкта та України, на підставі рішення Кабінету Міністрів України або введених в дію відповідно до закону рішень Ради національної безпеки і оборони України': u'stateLegalServices'
}
cause_type = cause_dictionary.get(cause_text)
if cause_type:
return cause_type
else:
return cause_text
def get_items_from_lot(items, lot_id):
lot_items = []
for item in items:
if item['relatedLot'] == lot_id:
lot_items.append(item)
return lot_items
def get_ECP_key(path):
return os.path.join(os.getcwd(), path)
def get_date_formatting(date, format_day):
return dateutil.parser.parse(date).date().strftime(format_day)
def get_scenarios_name():
name = ''
for param in sys.argv:
if 'txt' in param:
name = param
return name
def is_click_button(item_index, items_count, lot_index):
status = 'false'
if int(item_index) < int(items_count) and lot_index > 1:
return 'true'
return status
def get_milestones_title(title):
titles = {
u'підписання договору': 'signingTheContract',
u'поставка товару': 'deliveryOfGoods',
u'дата подання заявки': 'submissionDateOfApplications',
u'дата закінчення звітного періоду': 'endDateOfTheReportingPeriod',
u'дата виставлення рахунку': 'dateOfInvoicing',
u'виконання робіт': 'executionOfWorks',
u'надання послуг': 'submittingServices',
u'інша подія': 'anotherEvent'
}
title_name = titles.get(title)
return title_name
def get_milestones_code(code):
codes = {
u'Аванс': 'prepayment',
u'Пiсляоплата': 'postpayment'
}
code_name = codes.get(code)
return code_name
def get_milestones_duration_type(type):
types = {
u'робочих': 'working',
u'банківськіх': 'banking',
u'календарних': 'calendar'
}
type_name = types.get(type)
return type_name
def get_rationaleType (type):
type_dictionary = {
u'Зменшення обсягів закупівлі': 'volumeCuts',
u'Зміна сторонніх показників (курсу, тарифів...)': 'thirdParty',
u'Зміна ціни у зв’язку із зміною ставок податків і зборів': 'taxRate',
u'Покращення якості предмета закупівлі': 'qualityImprovement',
u'Узгоджене зменшення ціни': 'priceReduction',
u'Зміна ціни за одиницю товару': 'itemPriceVariation',
u'Продовження строку дії договору на наступний рік': 'fiscalYearExtension',
u'Продовження строку дії договору (черездокументально підтверджені об’єктивні обставини)': 'durationExtension',
}
type_name = type_dictionary.get(type)
return type_name
def change_fake_date():
return (datetime.datetime.now(timezone('Europe/Kiev')) + timedelta(days=3)).strftime('%Y-%m-%dT%H:%M:%S.%f%z')
| true
| true
|
f704d8c0caad083f0db74121655533699125a844
| 22,237
|
py
|
Python
|
test/input_gen/genModelsRecurrent_v2.py
|
corner4world/nntrainer
|
0f342e8f2a1ec95b4e712aa3390b21cf0ea4efae
|
[
"Apache-2.0"
] | null | null | null |
test/input_gen/genModelsRecurrent_v2.py
|
corner4world/nntrainer
|
0f342e8f2a1ec95b4e712aa3390b21cf0ea4efae
|
[
"Apache-2.0"
] | null | null | null |
test/input_gen/genModelsRecurrent_v2.py
|
corner4world/nntrainer
|
0f342e8f2a1ec95b4e712aa3390b21cf0ea4efae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
##
# Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
#
# @file genModelsRecurrent_v2.py
# @date 19 October 2021
# @brief Generate recurrent model tcs
# @author Jihoon lee <jhoon.it.lee@samsung.com>
from recorder_v2 import record_v2, inspect_file
from zoneout import Zoneout
import torch
class FCUnroll(torch.nn.Module):
def __init__(self, unroll_for=1, num_fc=1):
super().__init__()
self.fcs = torch.nn.ModuleList([torch.nn.Linear(1, 1) for i in range(num_fc)])
self.unroll_for = unroll_for
# self.loss = torch.nn.MSELoss()
self.loss = torch.nn.Identity()
def forward(self, inputs, labels):
output = inputs[0]
for i in range(self.unroll_for):
for fc in self.fcs:
output = fc(output)
loss = self.loss(output)
# loss = self.loss(output, labels[0])
return output, loss
class RNNCellStacked(torch.nn.Module):
def __init__(self, unroll_for=1, num_rnn=1, input_size=1, hidden_size=1):
super().__init__()
self.rnns = torch.nn.ModuleList(
[
torch.nn.RNNCell(input_size, hidden_size)
for _ in range(num_rnn)
]
)
self.unroll_for = unroll_for
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
hs = [torch.zeros_like(inputs[0]) for _ in self.rnns]
out = inputs[0]
ret = []
for _ in range(self.unroll_for):
for i, rnn in enumerate(self.rnns):
hs[i] = rnn(out, hs[i])
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class LSTMStacked(torch.nn.Module):
def __init__(self, num_lstm=1, bidirectional=False):
super().__init__()
self.input_size = self.hidden_size = 2
self.num_lstm = num_lstm
self.bidirectional=bidirectional
self.lstms = torch.nn.ModuleList(
[
torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, batch_first=True, bidirectional=bidirectional)
# Intended comment
# torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, num_layers=num_lstm, batch_first=True, bidirectional=bidirectional)
for i in range(num_lstm)
]
)
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
# hs = [states[2 * i] for i in range(self.num_lstm)]
hs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]
# cs = [states[2 * i + 1] for i in range(self.num_lstm)]
cs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]
for i, (lstm, h, c) in enumerate(zip(self.lstms, hs, cs)):
out, (hs[i], cs[i]) = lstm(out, (h, c))
loss = self.loss(out, labels[0])
return out, loss
class LSTMCellStacked(torch.nn.Module):
def __init__(self, unroll_for=2, num_lstmcell=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.lstmcells = torch.nn.ModuleList(
[
torch.nn.LSTMCell(self.input_size, self.hidden_size)
for _ in range(num_lstmcell)
]
)
self.unroll_for = unroll_for
self.num_lstmcell = num_lstmcell
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
hs = [states[2 * i] for i in range(self.num_lstmcell)]
cs = [states[2 * i + 1] for i in range(self.num_lstmcell)]
ret = []
for _ in range(self.unroll_for):
for i, (lstm, h, c) in enumerate(zip(self.lstmcells, hs, cs)):
hs[i], cs[i] = lstm(out, (h, c))
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class ZoneoutLSTMStacked(torch.nn.Module):
def __init__(self, batch_size=3, unroll_for=2, num_lstm=1, hidden_state_zoneout_rate=1, cell_state_zoneout_rate=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.cell_state_zoneout_rate = cell_state_zoneout_rate
self.zoneout_lstms = torch.nn.ModuleList(
[
Zoneout(batch_size, self.input_size, self.hidden_size, unroll_for, hidden_state_zoneout_rate, cell_state_zoneout_rate)
for _ in range(num_lstm)
]
)
self.unroll_for = unroll_for
self.num_lstm = num_lstm
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
hs = [states[2 * i] for i in range(self.num_lstm)]
cs = [states[2 * i + 1] for i in range(self.num_lstm)]
ret = []
for num_unroll in range(self.unroll_for):
for i, (zoneout_lstm, h, c) in enumerate(zip(self.zoneout_lstms, hs, cs)):
hs[i], cs[i] = zoneout_lstm(out, (h, c, num_unroll))
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class GRUCellStacked(torch.nn.Module):
def __init__(self, unroll_for=2, num_grucell=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.grus = torch.nn.ModuleList(
[
torch.nn.GRUCell(self.input_size, self.hidden_size, bias=True)
for _ in range(num_grucell)
]
)
self.unroll_for = unroll_for
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
hs = inputs[1:]
ret = []
for _ in range(self.unroll_for):
for i, (gru, h) in enumerate(zip(self.grus, hs)):
hs[i] = gru(out, h)
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
if __name__ == "__main__":
record_v2(
FCUnroll(unroll_for=5),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_single",
)
record_v2(
FCUnroll(unroll_for=2, num_fc=2),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_stacked",
)
record_v2(
FCUnroll(unroll_for=2, num_fc=2),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_stacked_clipped",
clip=True
)
record_v2(
RNNCellStacked(unroll_for=2, num_rnn=1, input_size=2, hidden_size=2),
iteration=2,
input_dims=[(3, 2)],
label_dims=[(3, 2, 2)],
name="rnncell_single",
)
record_v2(
RNNCellStacked(unroll_for=2, num_rnn=2, input_size=2, hidden_size=2),
iteration=2,
input_dims=[(3, 2)],
label_dims=[(3, 2, 2)],
name="rnncell_stacked",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, False]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstm_single",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, False]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstm_stacked",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, True]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, 2 * unit)],
name="bidirectional_lstm_single",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, True]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
# input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)],
label_dims=[(batch_size, unroll_for, 2 * unit)],
name="bidirectional_lstm_stacked",
)
unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 1, 2, 3, 2, 2, 2]
record_v2(
LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstmcell_single",
)
unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 2, 2, 3, 2, 2, 2]
record_v2(
LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstmcell_stacked",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_100",
)
unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 1, 3, 2, 2, 2]
record_v2(
GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],
label_dims=[(batch_size, unroll_for, unit)],
name="grucell_single",
)
unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 2, 3, 2, 2, 2]
record_v2(
GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],
label_dims=[(batch_size, unroll_for, unit)],
name="grucell_stacked",
)
# inspect_file("lstm_single.nnmodelgolden")
| 48.765351
| 200
| 0.672168
|
from recorder_v2 import record_v2, inspect_file
from zoneout import Zoneout
import torch
class FCUnroll(torch.nn.Module):
def __init__(self, unroll_for=1, num_fc=1):
super().__init__()
self.fcs = torch.nn.ModuleList([torch.nn.Linear(1, 1) for i in range(num_fc)])
self.unroll_for = unroll_for
self.loss = torch.nn.Identity()
def forward(self, inputs, labels):
output = inputs[0]
for i in range(self.unroll_for):
for fc in self.fcs:
output = fc(output)
loss = self.loss(output)
return output, loss
class RNNCellStacked(torch.nn.Module):
def __init__(self, unroll_for=1, num_rnn=1, input_size=1, hidden_size=1):
super().__init__()
self.rnns = torch.nn.ModuleList(
[
torch.nn.RNNCell(input_size, hidden_size)
for _ in range(num_rnn)
]
)
self.unroll_for = unroll_for
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
hs = [torch.zeros_like(inputs[0]) for _ in self.rnns]
out = inputs[0]
ret = []
for _ in range(self.unroll_for):
for i, rnn in enumerate(self.rnns):
hs[i] = rnn(out, hs[i])
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class LSTMStacked(torch.nn.Module):
def __init__(self, num_lstm=1, bidirectional=False):
super().__init__()
self.input_size = self.hidden_size = 2
self.num_lstm = num_lstm
self.bidirectional=bidirectional
self.lstms = torch.nn.ModuleList(
[
torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, batch_first=True, bidirectional=bidirectional)
for i in range(num_lstm)
]
)
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
hs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]
cs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)]
for i, (lstm, h, c) in enumerate(zip(self.lstms, hs, cs)):
out, (hs[i], cs[i]) = lstm(out, (h, c))
loss = self.loss(out, labels[0])
return out, loss
class LSTMCellStacked(torch.nn.Module):
def __init__(self, unroll_for=2, num_lstmcell=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.lstmcells = torch.nn.ModuleList(
[
torch.nn.LSTMCell(self.input_size, self.hidden_size)
for _ in range(num_lstmcell)
]
)
self.unroll_for = unroll_for
self.num_lstmcell = num_lstmcell
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
hs = [states[2 * i] for i in range(self.num_lstmcell)]
cs = [states[2 * i + 1] for i in range(self.num_lstmcell)]
ret = []
for _ in range(self.unroll_for):
for i, (lstm, h, c) in enumerate(zip(self.lstmcells, hs, cs)):
hs[i], cs[i] = lstm(out, (h, c))
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class ZoneoutLSTMStacked(torch.nn.Module):
def __init__(self, batch_size=3, unroll_for=2, num_lstm=1, hidden_state_zoneout_rate=1, cell_state_zoneout_rate=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.cell_state_zoneout_rate = cell_state_zoneout_rate
self.zoneout_lstms = torch.nn.ModuleList(
[
Zoneout(batch_size, self.input_size, self.hidden_size, unroll_for, hidden_state_zoneout_rate, cell_state_zoneout_rate)
for _ in range(num_lstm)
]
)
self.unroll_for = unroll_for
self.num_lstm = num_lstm
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
states = inputs[1:]
hs = [states[2 * i] for i in range(self.num_lstm)]
cs = [states[2 * i + 1] for i in range(self.num_lstm)]
ret = []
for num_unroll in range(self.unroll_for):
for i, (zoneout_lstm, h, c) in enumerate(zip(self.zoneout_lstms, hs, cs)):
hs[i], cs[i] = zoneout_lstm(out, (h, c, num_unroll))
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
class GRUCellStacked(torch.nn.Module):
def __init__(self, unroll_for=2, num_grucell=1):
super().__init__()
self.input_size = self.hidden_size = 2
self.grus = torch.nn.ModuleList(
[
torch.nn.GRUCell(self.input_size, self.hidden_size, bias=True)
for _ in range(num_grucell)
]
)
self.unroll_for = unroll_for
self.loss = torch.nn.MSELoss()
def forward(self, inputs, labels):
out = inputs[0]
hs = inputs[1:]
ret = []
for _ in range(self.unroll_for):
for i, (gru, h) in enumerate(zip(self.grus, hs)):
hs[i] = gru(out, h)
out = hs[i]
ret.append(out)
ret = torch.stack(ret, dim=1)
loss = self.loss(ret, labels[0])
return ret, loss
if __name__ == "__main__":
record_v2(
FCUnroll(unroll_for=5),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_single",
)
record_v2(
FCUnroll(unroll_for=2, num_fc=2),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_stacked",
)
record_v2(
FCUnroll(unroll_for=2, num_fc=2),
iteration=2,
input_dims=[(1,)],
label_dims=[(1,)],
name="fc_unroll_stacked_clipped",
clip=True
)
record_v2(
RNNCellStacked(unroll_for=2, num_rnn=1, input_size=2, hidden_size=2),
iteration=2,
input_dims=[(3, 2)],
label_dims=[(3, 2, 2)],
name="rnncell_single",
)
record_v2(
RNNCellStacked(unroll_for=2, num_rnn=2, input_size=2, hidden_size=2),
iteration=2,
input_dims=[(3, 2)],
label_dims=[(3, 2, 2)],
name="rnncell_stacked",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, False]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstm_single",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, False]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstm_stacked",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, True]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
label_dims=[(batch_size, unroll_for, 2 * unit)],
name="bidirectional_lstm_single",
)
unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, True]
record_v2(
LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional),
iteration=iteration,
input_dims=[(batch_size, unroll_for, feature_size)],
label_dims=[(batch_size, unroll_for, 2 * unit)],
name="bidirectional_lstm_stacked",
)
unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 1, 2, 3, 2, 2, 2]
record_v2(
LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstmcell_single",
)
unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 2, 2, 3, 2, 2, 2]
record_v2(
LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)],
label_dims=[(batch_size, unroll_for, unit)],
name="lstmcell_stacked",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_000",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.5]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_050",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_000_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_000_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_050_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_050_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_single_100_100",
)
unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 1.0]
record_v2(
ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)],
label_dims=[(batch_size, unroll_for, unit)],
name="zoneout_lstm_stacked_100_100",
)
unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 1, 3, 2, 2, 2]
record_v2(
GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],
label_dims=[(batch_size, unroll_for, unit)],
name="grucell_single",
)
unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 2, 3, 2, 2, 2]
record_v2(
GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell),
iteration=iteration,
input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)],
label_dims=[(batch_size, unroll_for, unit)],
name="grucell_stacked",
)
| true
| true
|
f704d925011146519241475ec5938c87167815a6
| 1,196
|
py
|
Python
|
main.py
|
nikolabebic95/MaterialUiColorsScss
|
8f03999534424a84baca5bfd0031b90cf0cca0ae
|
[
"MIT"
] | null | null | null |
main.py
|
nikolabebic95/MaterialUiColorsScss
|
8f03999534424a84baca5bfd0031b90cf0cca0ae
|
[
"MIT"
] | null | null | null |
main.py
|
nikolabebic95/MaterialUiColorsScss
|
8f03999534424a84baca5bfd0031b90cf0cca0ae
|
[
"MIT"
] | null | null | null |
import json
import pathlib
import urllib.request
def main():
# https://gist.github.com/kawanet/a880c83f06d6baf742e45ac9ac52af96
url = 'https://gist.githubusercontent.com/kawanet/a880c83f06d6baf742e45ac9ac52af96/raw' \
'/b4fbc9a730394eb977277e73cc37b60955463f21/material-colors.json'
json_file_name = 'material-colors.json'
urllib.request.urlretrieve(url, json_file_name)
with open(json_file_name, 'r') as json_file:
colors = json.load(json_file)
out_dir_name = 'material_ui_colors'
pathlib.Path(out_dir_name).mkdir(exist_ok=True)
for color in colors:
with open(out_dir_name + '/_' + color + '.scss', 'w') as out_file:
shades = colors[color]
out = ['$material_ui_' + color + '_' + shade + ': ' + value + ';\n' for shade, value in shades.items()]
out.append('$material_ui_' + color + ': $material_ui_' + color + '_500;')
out_file.writelines(out)
with open(out_dir_name + '/_main.scss', 'w') as out_main_file:
out = ['@import "' + color + '";\n' for color in colors]
out_main_file.writelines(out)
if __name__ == '__main__':
main()
| 37.375
| 116
| 0.637124
|
import json
import pathlib
import urllib.request
def main():
url = 'https://gist.githubusercontent.com/kawanet/a880c83f06d6baf742e45ac9ac52af96/raw' \
'/b4fbc9a730394eb977277e73cc37b60955463f21/material-colors.json'
json_file_name = 'material-colors.json'
urllib.request.urlretrieve(url, json_file_name)
with open(json_file_name, 'r') as json_file:
colors = json.load(json_file)
out_dir_name = 'material_ui_colors'
pathlib.Path(out_dir_name).mkdir(exist_ok=True)
for color in colors:
with open(out_dir_name + '/_' + color + '.scss', 'w') as out_file:
shades = colors[color]
out = ['$material_ui_' + color + '_' + shade + ': ' + value + ';\n' for shade, value in shades.items()]
out.append('$material_ui_' + color + ': $material_ui_' + color + '_500;')
out_file.writelines(out)
with open(out_dir_name + '/_main.scss', 'w') as out_main_file:
out = ['@import "' + color + '";\n' for color in colors]
out_main_file.writelines(out)
if __name__ == '__main__':
main()
| true
| true
|
f704d9ac6d70bd3e4025d4307a92b148575602ac
| 345
|
py
|
Python
|
bnn_mcmc_examples/examples/mlp/pima/setting1/hmc/sampler.py
|
papamarkou/bnn_mcmc_examples
|
7bb4ecfb33db4c30a8e61e31f528bda0efb24e3d
|
[
"MIT"
] | 1
|
2021-09-09T15:55:37.000Z
|
2021-09-09T15:55:37.000Z
|
bnn_mcmc_examples/examples/mlp/pima/setting1/hmc/sampler.py
|
kushagragpt99/bnn_mcmc_examples
|
297cdb1e74335860989bebdb4ff6f6322b6adc06
|
[
"MIT"
] | null | null | null |
bnn_mcmc_examples/examples/mlp/pima/setting1/hmc/sampler.py
|
kushagragpt99/bnn_mcmc_examples
|
297cdb1e74335860989bebdb4ff6f6322b6adc06
|
[
"MIT"
] | 1
|
2021-10-05T06:38:57.000Z
|
2021-10-05T06:38:57.000Z
|
# %% Import packages
from eeyore.samplers import HMC
from bnn_mcmc_examples.examples.mlp.pima.setting1.dataloaders import training_dataloader
from bnn_mcmc_examples.examples.mlp.pima.setting1.model import model
# %% Setup HMC sampler
sampler = HMC(model, theta0=model.prior.sample(), dataloader=training_dataloader, step=0.125, num_steps=6)
| 31.363636
| 106
| 0.811594
|
from eeyore.samplers import HMC
from bnn_mcmc_examples.examples.mlp.pima.setting1.dataloaders import training_dataloader
from bnn_mcmc_examples.examples.mlp.pima.setting1.model import model
sampler = HMC(model, theta0=model.prior.sample(), dataloader=training_dataloader, step=0.125, num_steps=6)
| true
| true
|
f704d9e98bd2692af546a133176acf66958374b4
| 866
|
py
|
Python
|
heltour/tournament/migrations/0049_auto_20160804_0509.py
|
zbidwell/heltour
|
3895142695096a81cc65c3fefb7d4501ed796f46
|
[
"MIT"
] | 41
|
2016-08-17T19:58:42.000Z
|
2021-11-08T10:52:07.000Z
|
heltour/tournament/migrations/0049_auto_20160804_0509.py
|
zbidwell/heltour
|
3895142695096a81cc65c3fefb7d4501ed796f46
|
[
"MIT"
] | 257
|
2016-08-17T22:29:05.000Z
|
2022-01-13T00:42:05.000Z
|
heltour/tournament/migrations/0049_auto_20160804_0509.py
|
zbidwell/heltour
|
3895142695096a81cc65c3fefb7d4501ed796f46
|
[
"MIT"
] | 31
|
2016-09-23T23:36:14.000Z
|
2022-01-14T17:05:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-04 05:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0048_auto_20160803_0311'),
]
operations = [
migrations.AddField(
model_name='alternate',
name='season_player',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.SeasonPlayer'),
),
migrations.AlterUniqueTogether(
name='alternate',
unique_together=set([]),
),
migrations.RunSQL('''
UPDATE tournament_alternate alt SET season_player_id = (SELECT id FROM tournament_seasonplayer sp WHERE sp.season_id = alt.season_id AND sp.player_id = alt.player_id)
''')
]
| 29.862069
| 174
| 0.643187
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0048_auto_20160803_0311'),
]
operations = [
migrations.AddField(
model_name='alternate',
name='season_player',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.SeasonPlayer'),
),
migrations.AlterUniqueTogether(
name='alternate',
unique_together=set([]),
),
migrations.RunSQL('''
UPDATE tournament_alternate alt SET season_player_id = (SELECT id FROM tournament_seasonplayer sp WHERE sp.season_id = alt.season_id AND sp.player_id = alt.player_id)
''')
]
| true
| true
|
f704dab721959161224d66936f270ee7bed32f72
| 12,634
|
py
|
Python
|
reconstructPointwise.py
|
LLNL/ferdinand
|
af47b415ea1e9cb21a45b20d1f3854bc7f3a4d70
|
[
"Apache-2.0"
] | null | null | null |
reconstructPointwise.py
|
LLNL/ferdinand
|
af47b415ea1e9cb21a45b20d1f3854bc7f3a4d70
|
[
"Apache-2.0"
] | null | null | null |
reconstructPointwise.py
|
LLNL/ferdinand
|
af47b415ea1e9cb21a45b20d1f3854bc7f3a4d70
|
[
"Apache-2.0"
] | null | null | null |
##############################################
# #
# Ferdinand 0.40, Ian Thompson, LLNL #
# #
# gnd,endf,fresco,azure,hyrma #
# #
##############################################
import os
import math
from write_fresco import write_fresco
import fudge.sums as sumsModule
import fudge.styles as stylesModule
import fudge.reactionData.crossSection as crossSectionModule
import fudge.productData.distributions as distributionsModule
############################################## write_fresco
def reconstructPointwise(gnd,base,verbose,debug,egrid,angles,thin,reconstyle):
projectile = gnd.PoPs[gnd.projectile]
target = gnd.PoPs[gnd.target]
if hasattr(projectile, 'nucleus'): projectile = projectile.nucleus
if hasattr(target, 'nucleus'): target = target.nucleus
pZ = projectile.charge[0].value; tZ = target.charge[0].value
charged = pZ*tZ != 0
identicalParticles = gnd.projectile == gnd.target
rStyle = reconstyle.label
if debug: print("Charged-particle elastic:",charged,", identical:",identicalParticles,' rStyle:',rStyle)
if charged and angles is not None:
from fudge.reactionData.doubleDifferentialCrossSection.chargedParticleElastic import CoulombPlusNuclearElastic as CoulombPlusNuclearElasticModule
from fudge.reactionData.doubleDifferentialCrossSection.chargedParticleElastic import nuclearPlusInterference as nuclearPlusInterferenceModule
# from fudge.reactionData.doubleDifferentialCrossSection.chargedParticleElastic import RutherfordScattering as RutherfordScatteringModule
from fudge.productData.distributions import reference as referenceModule
thmin = angles[0]
pi = 3.1415826536
muCutoff = math.cos(thmin*pi/180.)
fresco_base = base + '.fresco_recon'
channels = write_fresco(gnd,fresco_base,verbose,debug,True,None,None,False,egrid,angles)
name_frin = fresco_base + '.frin' # must be same as in write_fresco
name_frout= fresco_base + '.frout'
accuracy = None
cmd = "frescox < "+name_frin+" > "+name_frout
print(cmd)
os.system(cmd) # Run FRESCO
f239 = open('fort.239','r')
egrid = []
totalxs = []; elasticxs = []; fissionxs = []; absorbtionxs = []
chanxs =[];
# lastzero = [ 0 for i in range(len(channels))]
for rreac in gnd.resonances.resolved.evaluated.resonanceReactions:
if not rreac.eliminated:
chanxs.append([])
if len(channels) != len(chanxs):
print("Only getting",channels," data channels, not",len(chanxs))
exit()
if debug: print("Fresco channel order:",channels)
mb = 1e-3
for line in f239:
if 'NaN' not in line:
data = line.split()
try:
elab,absorbtion,reaction,total,elastic = [float(d) for d in data[:5]]
sigr = [float(d) for d in data[5:]]
#print elab,absorbtion,reaction,total,elastic,sigr
egrid.append(elab)
totalxs.append(total*mb)
elasticxs.append(elastic*mb)
fissionxs.append(0.0)
absorbtionxs.append(absorbtion*mb)
for c in range(len(channels)):
chanxs[c].append(sigr[c]*mb)
# if sigr[c]== 0.: lastzero[c] = elab
except:
pass
crossSectionAxes = crossSectionModule.defaultAxes( 'MeV' )
total = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, totalxs), dataForm="XsAndYs" )
elastic = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, elasticxs), dataForm="XsAndYs" )
fission = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, fissionxs), dataForm="XsAndYs" )
absorbtion = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, absorbtionxs), dataForm="XsAndYs" )
if not isinstance( reconstyle, stylesModule.crossSectionReconstructed ):
raise TypeError("style must be an instance of crossSectionReconstructed, not %s" % type(reconstyle))
haveEliminated = False
for rreac in gnd.resonances.resolved.evaluated.resonanceReactions:
reaction = rreac.reactionLink.link
haveEliminated = haveEliminated or rreac.eliminated
# elastic or capture
if reaction == gnd.getReaction('capture'): rreac.tag = 'capture'
elif reaction == gnd.getReaction('elastic'): rreac.tag = 'elastic'
elif 'fission' in rreac.label: rreac.tag = rreac.label
else: rreac.tag = 'competitive'
xsecs = {'total':total, 'elastic':elastic, 'fission':fission, 'nonelastic':absorbtion}
for c in range(1,len(channels)): # skip c=1 elastic !! FIXME
#print channels[c],':',len(egrid),len(chanxs[c])
xsecs[channels[c]] = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, chanxs[c]), dataForm="XsAndYs" )
# print 'xsecs[channels[c]]',xsecs[channels[c]].toString()
if haveEliminated:
eliminatedReaction = [rr for rr in gnd.resonances.resolved.evaluated.resonanceReactions if rr.eliminated]
if len(eliminatedReaction) != 1:
raise TypeError("Only 1 reaction can be eliminated in Reich-Moore approximation!")
xsecs[eliminatedReaction[0].tag] = absorbtion - fission
epsilon = 1e-8 # for joining multiple regions together
# for each reaction, add tabulated pointwise data (ENDF MF=3) to reconstructed resonances:
possibleChannels = { 'elastic' : True, 'capture' : True, 'fission' : True, 'total' : False, 'nonelastic' : False }
elasticChannel = gnd.getReaction('elastic')
derivedFromLabel = ''
for reaction in gnd :
if isinstance( reaction, sumsModule.multiplicitySum ): continue
iselastic = reaction is elasticChannel
evaluatedCrossSection = reaction.crossSection.evaluated
if not isinstance( evaluatedCrossSection, crossSectionModule.resonancesWithBackground ):
continue
# which reconstructed cross section corresponds to this reaction?
if( derivedFromLabel == '' ) : derivedFromLabel = evaluatedCrossSection.label
if( derivedFromLabel != evaluatedCrossSection.label ) :
print(('WARNING derivedFromLabel = "%s" != "%s"' % (derivedFromLabel, evaluatedCrossSection.label)))
RRxsec = None
if str( reaction ) in xsecs:
RRxsec = xsecs[ str( reaction ) ]
# print 'Assign to ',str(reaction),'\n',RRxsec.toString()
else :
for possibleChannel in possibleChannels :
if( possibleChannels[possibleChannel] ) :
if( possibleChannel in str( reaction ) ) :
RRxsec = xsecs[possibleChannel]
# print 'Assign to ',str(reaction),'\n',RRxsec.toString()
if( RRxsec is None ) :
if( reaction is gnd.getReaction( possibleChannel ) ) :
RRxsec = xsecs[possibleChannel]
# print 'Assign to ',str(reaction),'\n',RRxsec.toString()
if( RRxsec is not None ) : break
if( RRxsec is None ) :
if verbose:
print(( "Warning: couldn't find appropriate reconstructed cross section to add to reaction %s" % reaction ))
continue
background = evaluatedCrossSection.background
background = background.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = epsilon, upperEps = epsilon )
RRxsec = RRxsec.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = epsilon, upperEps = epsilon )
RRxsec.convertUnits( {RRxsec.domainUnit: background.domainUnit, RRxsec.rangeUnit: background.rangeUnit } )
background, RRxsec = background.mutualify(0,0,0, RRxsec, -epsilon,epsilon,True)
RRxsec = background + RRxsec # result is a crossSection.XYs1d instance
if thin:
RRx = RRxsec.thin( accuracy or .001 )
else:
RRx = RRxsec
RRx.label = rStyle
reaction.crossSection.add( RRx )
# print "Channels ",reaction.label,iselastic,":\n",RRxsec.toString(),"\n&\n",RRx.toString()
if iselastic:
effXsc = RRxsec
gnd.styles.add( reconstyle )
# print "Last energies of zero cross section:",lastzero
if angles is None: return
f241 = open('fort.241','r')
sigdd = {}
for rr in channels: sigdd[rr] = []
for line in f241:
if '# Elab =' in line:
elab,ich = float(line[9:9+15]),int(line[9+15:9+15+4])-1 # Elab = 1.00000000E-06 1
line1 = line
dist = []
elif "&" in line:
rr = channels[ich]
sigdd[rr].append([elab,dist])
# if elab<1.0001: print '\n',ich,rr,sigdd[rr]
elif "NaN" in line:
continue
else:
mu,p = line.split()
try:
mu,p = float(mu),float(p)
dist.insert(0,p)
dist.insert(0,mu)
except:
pass
angularAxes = distributionsModule.angular.defaultAxes( 'MeV' )
for rreac in gnd.resonances.resolved.evaluated.resonanceReactions:
if not rreac.eliminated:
productName = rreac.ejectile
residName = rreac.residual
elastic = productName == gnd.projectile and residName == gnd.target
print("Add angular distribution for",productName," in",rreac.label,"channel (elastic=",elastic,")")
reaction = rreac.reactionLink.link
firstProduct = reaction.outputChannel.getProductWithName(productName)
effDist = distributionsModule.angular.XYs2d( axes = angularAxes )
elab_max = 0.; elab_min = 1e10; nangles=0
ne = 0
for elab,dist in sigdd[rreac.label]:
if debug: print('E=',elab,'has',len(dist),' angles')
if len(dist) <= 3:
print(' E=',elab,'has',len(dist),' angles')
continue
angdist = distributionsModule.angular.XYs1d( data = dist, outerDomainValue = elab, axes = angularAxes, dataForm = 'list' )
if thin:
angdist = angdist.thin( accuracy or .001 )
norm = angdist.integrate()
if norm != 0.0:
if debug: print(rreac.label,elab,norm)
effDist.append( angdist )
elab_max = max(elab,elab_max); elab_min = min(elab,elab_min); nangles = max(len(dist),nangles)
ne += 1
print(" Angles reconstructed at %i energies from %s to %s MeV with up to %i angles at each energy" % (ne,elab_min,elab_max,nangles))
newForm = distributionsModule.angular.twoBodyForm( label = reconstyle.label,
productFrame = firstProduct.distribution.evaluated.productFrame, angularSubform = effDist )
firstProduct.distribution.add( newForm )
if elastic and charged: # dCrossSection_dOmega for charged-particle elastics:
NCPI = nuclearPlusInterferenceModule.nuclearPlusInterference( muCutoff=muCutoff,
crossSection=nuclearPlusInterferenceModule.crossSection( effXsc),
distribution=nuclearPlusInterferenceModule.distribution( effDist)
)
# Rutherford = RutherfordScatteringModule.RutherfordScattering()
CoulombElastic = CoulombPlusNuclearElasticModule.form( gnd.projectile, rStyle, nuclearPlusInterference = NCPI, identicalParticles=identicalParticles )
reaction.doubleDifferentialCrossSection.add( CoulombElastic )
reaction.crossSection.remove( rStyle )
reaction.crossSection.add( crossSectionModule.CoulombPlusNuclearElastic( link = reaction.doubleDifferentialCrossSection[rStyle],
label = rStyle, relative = True ) )
firstProduct.distribution.remove( rStyle )
firstProduct.distribution.add( referenceModule.CoulombPlusNuclearElastic( link = reaction.doubleDifferentialCrossSection[rStyle],
label = rStyle, relative = True ) )
secondProduct = reaction.outputChannel[1]
# secondProduct.distribution[rStyle].angularSubform.link = firstProduct.distribution[rStyle] ## Fails
# give 'recoil' distribution!
return
| 48.779923
| 166
| 0.616115
|
'capture'): rreac.tag = 'capture'
elif reaction == gnd.getReaction('elastic'): rreac.tag = 'elastic'
elif 'fission' in rreac.label: rreac.tag = rreac.label
else: rreac.tag = 'competitive'
xsecs = {'total':total, 'elastic':elastic, 'fission':fission, 'nonelastic':absorbtion}
for c in range(1,len(channels)):
xsecs[channels[c]] = crossSectionModule.XYs1d( axes = crossSectionAxes, data=(egrid, chanxs[c]), dataForm="XsAndYs" )
if haveEliminated:
eliminatedReaction = [rr for rr in gnd.resonances.resolved.evaluated.resonanceReactions if rr.eliminated]
if len(eliminatedReaction) != 1:
raise TypeError("Only 1 reaction can be eliminated in Reich-Moore approximation!")
xsecs[eliminatedReaction[0].tag] = absorbtion - fission
epsilon = 1e-8
possibleChannels = { 'elastic' : True, 'capture' : True, 'fission' : True, 'total' : False, 'nonelastic' : False }
elasticChannel = gnd.getReaction('elastic')
derivedFromLabel = ''
for reaction in gnd :
if isinstance( reaction, sumsModule.multiplicitySum ): continue
iselastic = reaction is elasticChannel
evaluatedCrossSection = reaction.crossSection.evaluated
if not isinstance( evaluatedCrossSection, crossSectionModule.resonancesWithBackground ):
continue
if( derivedFromLabel == '' ) : derivedFromLabel = evaluatedCrossSection.label
if( derivedFromLabel != evaluatedCrossSection.label ) :
print(('WARNING derivedFromLabel = "%s" != "%s"' % (derivedFromLabel, evaluatedCrossSection.label)))
RRxsec = None
if str( reaction ) in xsecs:
RRxsec = xsecs[ str( reaction ) ]
else :
for possibleChannel in possibleChannels :
if( possibleChannels[possibleChannel] ) :
if( possibleChannel in str( reaction ) ) :
RRxsec = xsecs[possibleChannel]
if( RRxsec is None ) :
if( reaction is gnd.getReaction( possibleChannel ) ) :
RRxsec = xsecs[possibleChannel]
if( RRxsec is not None ) : break
if( RRxsec is None ) :
if verbose:
print(( "Warning: couldn't find appropriate reconstructed cross section to add to reaction %s" % reaction ))
continue
background = evaluatedCrossSection.background
background = background.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = epsilon, upperEps = epsilon )
RRxsec = RRxsec.toPointwise_withLinearXYs( accuracy = 1e-3, lowerEps = epsilon, upperEps = epsilon )
RRxsec.convertUnits( {RRxsec.domainUnit: background.domainUnit, RRxsec.rangeUnit: background.rangeUnit } )
background, RRxsec = background.mutualify(0,0,0, RRxsec, -epsilon,epsilon,True)
RRxsec = background + RRxsec # result is a crossSection.XYs1d instance
if thin:
RRx = RRxsec.thin( accuracy or .001 )
else:
RRx = RRxsec
RRx.label = rStyle
reaction.crossSection.add( RRx )
# print "Channels ",reaction.label,iselastic,":\n",RRxsec.toString(),"\n&\n",RRx.toString()
if iselastic:
effXsc = RRxsec
gnd.styles.add( reconstyle )
# print "Last energies of zero cross section:",lastzero
if angles is None: return
f241 = open('fort.241','r')
sigdd = {}
for rr in channels: sigdd[rr] = []
for line in f241:
if '
elab,ich = float(line[9:9+15]),int(line[9+15:9+15+4])-1 # Elab = 1.00000000E-06 1
line1 = line
dist = []
elif "&" in line:
rr = channels[ich]
sigdd[rr].append([elab,dist])
# if elab<1.0001: print '\n',ich,rr,sigdd[rr]
elif "NaN" in line:
continue
else:
mu,p = line.split()
try:
mu,p = float(mu),float(p)
dist.insert(0,p)
dist.insert(0,mu)
except:
pass
angularAxes = distributionsModule.angular.defaultAxes( 'MeV' )
for rreac in gnd.resonances.resolved.evaluated.resonanceReactions:
if not rreac.eliminated:
productName = rreac.ejectile
residName = rreac.residual
elastic = productName == gnd.projectile and residName == gnd.target
print("Add angular distribution for",productName," in",rreac.label,"channel (elastic=",elastic,")")
reaction = rreac.reactionLink.link
firstProduct = reaction.outputChannel.getProductWithName(productName)
effDist = distributionsModule.angular.XYs2d( axes = angularAxes )
elab_max = 0.; elab_min = 1e10; nangles=0
ne = 0
for elab,dist in sigdd[rreac.label]:
if debug: print('E=',elab,'has',len(dist),' angles')
if len(dist) <= 3:
print(' E=',elab,'has',len(dist),' angles')
continue
angdist = distributionsModule.angular.XYs1d( data = dist, outerDomainValue = elab, axes = angularAxes, dataForm = 'list' )
if thin:
angdist = angdist.thin( accuracy or .001 )
norm = angdist.integrate()
if norm != 0.0:
if debug: print(rreac.label,elab,norm)
effDist.append( angdist )
elab_max = max(elab,elab_max); elab_min = min(elab,elab_min); nangles = max(len(dist),nangles)
ne += 1
print(" Angles reconstructed at %i energies from %s to %s MeV with up to %i angles at each energy" % (ne,elab_min,elab_max,nangles))
newForm = distributionsModule.angular.twoBodyForm( label = reconstyle.label,
productFrame = firstProduct.distribution.evaluated.productFrame, angularSubform = effDist )
firstProduct.distribution.add( newForm )
if elastic and charged: # dCrossSection_dOmega for charged-particle elastics:
NCPI = nuclearPlusInterferenceModule.nuclearPlusInterference( muCutoff=muCutoff,
crossSection=nuclearPlusInterferenceModule.crossSection( effXsc),
distribution=nuclearPlusInterferenceModule.distribution( effDist)
)
# Rutherford = RutherfordScatteringModule.RutherfordScattering()
CoulombElastic = CoulombPlusNuclearElasticModule.form( gnd.projectile, rStyle, nuclearPlusInterference = NCPI, identicalParticles=identicalParticles )
reaction.doubleDifferentialCrossSection.add( CoulombElastic )
reaction.crossSection.remove( rStyle )
reaction.crossSection.add( crossSectionModule.CoulombPlusNuclearElastic( link = reaction.doubleDifferentialCrossSection[rStyle],
label = rStyle, relative = True ) )
firstProduct.distribution.remove( rStyle )
firstProduct.distribution.add( referenceModule.CoulombPlusNuclearElastic( link = reaction.doubleDifferentialCrossSection[rStyle],
label = rStyle, relative = True ) )
secondProduct = reaction.outputChannel[1]
# secondProduct.distribution[rStyle].angularSubform.link = firstProduct.distribution[rStyle] ## Fails
# give 'recoil' distribution!
return
| true
| true
|
f704dae35a632084b188c064b1c868e00c367228
| 12,829
|
py
|
Python
|
pylabnet/scripts/counter/monitor_counts.py
|
wi11dey/pylabnet
|
a6e3362f727c45aaa60e61496e858ae92e85574d
|
[
"MIT"
] | null | null | null |
pylabnet/scripts/counter/monitor_counts.py
|
wi11dey/pylabnet
|
a6e3362f727c45aaa60e61496e858ae92e85574d
|
[
"MIT"
] | null | null | null |
pylabnet/scripts/counter/monitor_counts.py
|
wi11dey/pylabnet
|
a6e3362f727c45aaa60e61496e858ae92e85574d
|
[
"MIT"
] | null | null | null |
""" Generic script for monitoring counts from a counter """
import numpy as np
import time
import pyqtgraph as pg
from pylabnet.gui.pyqt.external_gui import Window
from pylabnet.utils.logging.logger import LogClient
from pylabnet.scripts.pause_script import PauseService
from pylabnet.network.core.generic_server import GenericServer
from pylabnet.network.client_server import si_tt
from pylabnet.utils.helper_methods import load_script_config, get_ip, unpack_launcher, load_config, get_gui_widgets, get_legend_from_graphics_view, find_client, load_script_config
# Static methods
# def generate_widgets():
# """Static method to return systematically named gui widgets for 4ch wavemeter monitor"""
# graphs, legends, numbers = [], [], []
# for i in range(2):
# graphs.append('graph_widget_' + str(i + 1))
# legends.append('legend_widget_' + str(i + 1))
# numbers.append('number_label_' + str(i + 1))
# for i in range(2, 8):
# numbers.append('number_label_' + str(i + 1))
# return graphs, legends, numbers
class CountMonitor:
# Generate all widget instances for the .ui to use
# _plot_widgets, _legend_widgets, _number_widgets = generate_widgets()
def __init__(self, ctr_client: si_tt.Client, ui='count_monitor', logger_client=None, server_port=None, combined_channel=False, config=None):
""" Constructor for CountMonitor script
:param ctr_client: instance of hardware client for counter
:param gui_client: (optional) instance of client of desired output GUI
:param logger_client: (obj) instance of logger client.
:param server_port: (int) port number of script server
:combined_channel: (bool) If true, show additional trace with summed counts.
"""
self._ctr = ctr_client
self.log = logger_client
self.combined_channel = combined_channel
self._bin_width = None
self._n_bins = None
self._ch_list = None
self._plot_list = None # List of channels to assign to each plot (e.g. [[1,2], [3,4]])
self._plots_assigned = [] # List of plots on the GUI that have been assigned
if self.combined_channel:
ui = 'count_monitor_combined'
else:
ui = 'count_monitor'
# Instantiate GUI window
self.gui = Window(
gui_template=ui,
host=get_ip(),
port=server_port,
log=self.log
)
# Setup stylesheet.
self.gui.apply_stylesheet()
if self.combined_channel:
num_plots = 3
else:
num_plots = 2
# Get all GUI widgets
self.widgets = get_gui_widgets(
self.gui,
graph_widget=num_plots,
number_label=8,
event_button=num_plots,
legend_widget=num_plots
)
# Load config
self.config = {}
if config is not None:
self.config = load_script_config(
script='monitor_counts',
config=config,
logger=self.logger_client
)
if not 'name' in self.config:
self.config.update({'name': f'monitor{np.random.randint(1000)}'})
def set_hardware(self, ctr):
""" Sets hardware client for this script
:param ctr: instance of count monitor hardware client
"""
# Initialize counter instance
self._ctr = ctr
def set_params(self, bin_width=1e9, n_bins=1e4, ch_list=[1], plot_list=None):
""" Sets counter parameters
:param bin_width: bin width in ps
:param n_bins: number of bins to display on graph
:param ch_list: (list) channels to record
:param plot_list: list of channels to assign to each plot (e.g. [[1,2], [3,4]])
"""
# Save params to internal variables
self._bin_width = int(bin_width)
self._n_bins = int(n_bins)
self._ch_list = ch_list
self._plot_list = plot_list
def run(self):
""" Runs the counter from scratch"""
try:
# Start the counter with desired parameters
self._initialize_display()
# Give time to initialize
# time.sleep(0.05)
self._is_running = True
self._ctr.start_trace(
name=self.config['name'],
ch_list=self._ch_list,
bin_width=self._bin_width,
n_bins=self._n_bins
)
# Continuously update data until paused
while self._is_running:
self._update_output()
self.gui.force_update()
except Exception as exc_obj:
self._is_running = False
raise exc_obj
def pause(self):
""" Pauses the counter"""
self._is_running = False
def resume(self):
""" Resumes the counter.
To be used to resume after the counter has been paused.
"""
try:
self._is_running = True
# Clear counter and resume plotting
self._ctr.clear_ctr(name=self.config['name'])
while self._is_running:
self._update_output()
except Exception as exc_obj:
self._is_running = False
raise exc_obj
# Technical methods
def _initialize_display(self):
""" Initializes the display (configures all plots) """
plot_index = 0
for index in range(len(self.widgets['graph_widget'])):
# Configure and return legend widgets
self.widgets['legend_widget'][index] = get_legend_from_graphics_view(
self.widgets['legend_widget'][index]
)
for color, channel in enumerate(self._ch_list):
# Figure out which plot to assign to
if self._plot_list is not None:
for index, channel_set in enumerate(self._plot_list):
if channel in channel_set:
plot_index = index
break
# If we have not assigned this plot yet, assign it
# if plot_index not in self._plots_assigned:
# self.gui_handler.assign_plot(
# plot_widget=self._plot_widgets[plot_index],
# plot_label='Counter Monitor {}'.format(plot_index + 1),
# legend_widget=self._legend_widgets[plot_index]
# )
# self._plots_assigned.append(plot_index)
# Now assign this curve
# self.gui_handler.assign_curve(
# plot_label='Counter Monitor {}'.format(plot_index + 1),
# curve_label='Channel {}'.format(channel),
# error=True
# )
# Create a curve and store the widget in our dictionary
self.widgets[f'curve_{channel}'] = self.widgets['graph_widget'][plot_index].plot(
pen=pg.mkPen(color=self.gui.COLOR_LIST[color])
)
self.widgets['legend_widget'][plot_index].addItem(
self.widgets[f'curve_{channel}'],
' - ' + f'Channel {channel}'
)
# Assign scalar
# self.gui_handler.assign_label(
# label_widget=self._number_widgets[channel - 1],
# label_label='Channel {}'.format(channel)
# )
# Handle button pressing
from functools import partial
for plot_index, clear_button in enumerate(self.widgets['event_button']):
clear_button.clicked.connect(partial(lambda plot_index: self._clear_plot(plot_index), plot_index=plot_index))
if self.combined_channel:
self.widgets['curve_combo'] = self.widgets['graph_widget'][index + 1].plot(
pen=pg.mkPen(color=self.gui.COLOR_LIST[color + 1])
)
self.widgets['legend_widget'][index + 1].addItem(
self.widgets['curve_combo'],
' - ' + 'Combined Counts'
)
def _clear_plot(self, plot_index):
""" Clears the curves on a particular plot
:param plot_index: (int) index of plot to clear
"""
# First, handle case where combined count channel is clears (very ugly).
if self.combined_channel and plot_index == len(self._plot_list):
channel = 'combo'
# Set the curve to constant with last point for all entries
self.widgets[f'curve_{channel}'].setData(
np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]
)
else:
# Find all curves in this plot
for channel in self._plot_list[plot_index]:
# Set the curve to constant with last point for all entries
self.widgets[f'curve_{channel}'].setData(
np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]
)
self._ctr.clear_ctr(name=self.config['name'])
def _update_output(self):
""" Updates the output to all current values"""
# Update all active channels
# x_axis = self._ctr.get_x_axis()/1e12
counts = self._ctr.get_counts(name=self.config['name'])
counts_per_sec = counts * (1e12 / self._bin_width)
# noise = np.sqrt(counts)*(1e12/self._bin_width)
# plot_index = 0
summed_counts = np.sum(counts_per_sec, axis=0)
for index, count_array in enumerate(counts_per_sec):
# Figure out which plot to assign to
channel = self._ch_list[index]
# if self._plot_list is not None:
# for index_plot, channel_set in enumerate(self._plot_list):
# if channel in channel_set:
# plot_index = index_plot
# break
# Update GUI data
# self.gui_handler.set_curve_data(
# data=count_array,
# error=noise[index],
# plot_label='Counter Monitor {}'.format(plot_index + 1),
# curve_label='Channel {}'.format(channel)
# )
# self.gui_handler.set_label(
# text='{:.4e}'.format(count_array[-1]),
# label_label='Channel {}'.format(channel)
# )
self.widgets[f'curve_{channel}'].setData(count_array)
self.widgets[f'number_label'][channel - 1].setText(str(count_array[-1]))
if self.combined_channel:
self.widgets['curve_combo'].setData(summed_counts)
def launch(**kwargs):
""" Launches the count monitor script """
# logger, loghost, logport, clients, guis, params = unpack_launcher(**kwargs)
logger = kwargs['logger']
clients = kwargs['clients']
config = load_script_config(
'monitor_counts',
kwargs['config'],
logger
)
if config['combined_channel'] == 'True':
combined_channel = True
else:
combined_channel = False
# Instantiate CountMonitor
try:
monitor = CountMonitor(
ctr_client=find_client(
clients,
config,
client_type='si_tt',
client_config='standard_ctr',
logger=logger
),
logger_client=logger,
server_port=kwargs['server_port'],
combined_channel=combined_channel
)
except KeyError:
print('Please make sure the module names for required servers and GUIS are correct.')
time.sleep(15)
raise
# except:
# config = None
# ch_list = [7, 8]
# plot_list = [[7], [8]]
# Instantiate Pause server
# try:
# pause_logger = LogClient(
# host=loghost,
# port=logport,
# module_tag='count_monitor_pause_server'
# )
# except ConnectionRefusedError:
# logger.warn('Could not connect Count Monitor Pause server to logger')
# pause_service = PauseService()
# pause_service.assign_module(module=monitor)
# pause_service.assign_logger(logger=pause_logger)
# timeout = 0
# while timeout < 1000:
# try:
# port = np.random.randint(1, 9999)
# pause_server = GenericServer(
# host=get_ip(),
# port=port,
# service=pause_service)
# pause_logger.update_data(data=dict(port=port))
# timeout = 9999
# except ConnectionRefusedError:
# logger.warn(f'Failed to instantiate Count Monitor Pause server at port {port}')
# timeout += 1
# pause_server.start()
# Set parameters
monitor.set_params(**config['params'])
# Run
monitor.run()
| 34.029178
| 179
| 0.581651
|
import numpy as np
import time
import pyqtgraph as pg
from pylabnet.gui.pyqt.external_gui import Window
from pylabnet.utils.logging.logger import LogClient
from pylabnet.scripts.pause_script import PauseService
from pylabnet.network.core.generic_server import GenericServer
from pylabnet.network.client_server import si_tt
from pylabnet.utils.helper_methods import load_script_config, get_ip, unpack_launcher, load_config, get_gui_widgets, get_legend_from_graphics_view, find_client, load_script_config
class CountMonitor:
def __init__(self, ctr_client: si_tt.Client, ui='count_monitor', logger_client=None, server_port=None, combined_channel=False, config=None):
self._ctr = ctr_client
self.log = logger_client
self.combined_channel = combined_channel
self._bin_width = None
self._n_bins = None
self._ch_list = None
self._plot_list = None
self._plots_assigned = []
if self.combined_channel:
ui = 'count_monitor_combined'
else:
ui = 'count_monitor'
self.gui = Window(
gui_template=ui,
host=get_ip(),
port=server_port,
log=self.log
)
self.gui.apply_stylesheet()
if self.combined_channel:
num_plots = 3
else:
num_plots = 2
self.widgets = get_gui_widgets(
self.gui,
graph_widget=num_plots,
number_label=8,
event_button=num_plots,
legend_widget=num_plots
)
self.config = {}
if config is not None:
self.config = load_script_config(
script='monitor_counts',
config=config,
logger=self.logger_client
)
if not 'name' in self.config:
self.config.update({'name': f'monitor{np.random.randint(1000)}'})
def set_hardware(self, ctr):
self._ctr = ctr
def set_params(self, bin_width=1e9, n_bins=1e4, ch_list=[1], plot_list=None):
self._bin_width = int(bin_width)
self._n_bins = int(n_bins)
self._ch_list = ch_list
self._plot_list = plot_list
def run(self):
try:
self._initialize_display()
self._is_running = True
self._ctr.start_trace(
name=self.config['name'],
ch_list=self._ch_list,
bin_width=self._bin_width,
n_bins=self._n_bins
)
while self._is_running:
self._update_output()
self.gui.force_update()
except Exception as exc_obj:
self._is_running = False
raise exc_obj
def pause(self):
self._is_running = False
def resume(self):
try:
self._is_running = True
self._ctr.clear_ctr(name=self.config['name'])
while self._is_running:
self._update_output()
except Exception as exc_obj:
self._is_running = False
raise exc_obj
def _initialize_display(self):
plot_index = 0
for index in range(len(self.widgets['graph_widget'])):
self.widgets['legend_widget'][index] = get_legend_from_graphics_view(
self.widgets['legend_widget'][index]
)
for color, channel in enumerate(self._ch_list):
if self._plot_list is not None:
for index, channel_set in enumerate(self._plot_list):
if channel in channel_set:
plot_index = index
break
self.widgets[f'curve_{channel}'] = self.widgets['graph_widget'][plot_index].plot(
pen=pg.mkPen(color=self.gui.COLOR_LIST[color])
)
self.widgets['legend_widget'][plot_index].addItem(
self.widgets[f'curve_{channel}'],
' - ' + f'Channel {channel}'
)
from functools import partial
for plot_index, clear_button in enumerate(self.widgets['event_button']):
clear_button.clicked.connect(partial(lambda plot_index: self._clear_plot(plot_index), plot_index=plot_index))
if self.combined_channel:
self.widgets['curve_combo'] = self.widgets['graph_widget'][index + 1].plot(
pen=pg.mkPen(color=self.gui.COLOR_LIST[color + 1])
)
self.widgets['legend_widget'][index + 1].addItem(
self.widgets['curve_combo'],
' - ' + 'Combined Counts'
)
def _clear_plot(self, plot_index):
if self.combined_channel and plot_index == len(self._plot_list):
channel = 'combo'
self.widgets[f'curve_{channel}'].setData(
np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]
)
else:
for channel in self._plot_list[plot_index]:
self.widgets[f'curve_{channel}'].setData(
np.ones(self._n_bins) * self.widgets[f'curve_{channel}'].yData[-1]
)
self._ctr.clear_ctr(name=self.config['name'])
def _update_output(self):
counts = self._ctr.get_counts(name=self.config['name'])
counts_per_sec = counts * (1e12 / self._bin_width)
summed_counts = np.sum(counts_per_sec, axis=0)
for index, count_array in enumerate(counts_per_sec):
channel = self._ch_list[index]
self.widgets[f'curve_{channel}'].setData(count_array)
self.widgets[f'number_label'][channel - 1].setText(str(count_array[-1]))
if self.combined_channel:
self.widgets['curve_combo'].setData(summed_counts)
def launch(**kwargs):
logger = kwargs['logger']
clients = kwargs['clients']
config = load_script_config(
'monitor_counts',
kwargs['config'],
logger
)
if config['combined_channel'] == 'True':
combined_channel = True
else:
combined_channel = False
try:
monitor = CountMonitor(
ctr_client=find_client(
clients,
config,
client_type='si_tt',
client_config='standard_ctr',
logger=logger
),
logger_client=logger,
server_port=kwargs['server_port'],
combined_channel=combined_channel
)
except KeyError:
print('Please make sure the module names for required servers and GUIS are correct.')
time.sleep(15)
raise
monitor.set_params(**config['params'])
monitor.run()
| true
| true
|
f704db4f9f14c36b7a4a23d49d7aeb53b61d9f65
| 1,723
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
lucaspyproj/learn-app-api
|
77d1216c9520c21785c7f56f84d8681a2990a6cf
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
lucaspyproj/learn-app-api
|
77d1216c9520c21785c7f56f84d8681a2990a6cf
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
lucaspyproj/learn-app-api
|
77d1216c9520c21785c7f56f84d8681a2990a6cf
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-02-16 11:10
# flake8: noqa
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.676471
| 266
| 0.639582
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true
| true
|
f704db7738c33468077ceb775a163c773f08d47d
| 11,918
|
py
|
Python
|
Notebooks/lib/Message_passing_BN.py
|
olmosUC3M/Inference-and-Learning-in-discrete-Bayesian-Networks
|
12e08f2e3f34146638806212be54837cc22c0516
|
[
"MIT"
] | 3
|
2019-05-19T11:26:42.000Z
|
2021-07-31T07:28:35.000Z
|
Notebooks/lib/Message_passing_BN.py
|
olmosUC3M/Inference-and-Learning-in-discrete-Bayesian-Networks
|
12e08f2e3f34146638806212be54837cc22c0516
|
[
"MIT"
] | null | null | null |
Notebooks/lib/Message_passing_BN.py
|
olmosUC3M/Inference-and-Learning-in-discrete-Bayesian-Networks
|
12e08f2e3f34146638806212be54837cc22c0516
|
[
"MIT"
] | null | null | null |
## Message passing over a discrete BN ##
## Library created by Pablo Martínez Olmos, University Carlos III Madrid ##
## olmos@tsc.uc3m.es ##
## Last modification 15/11/2016 ##
import numpy as np
## Messages are stored in the logaritmic domain ##
## Global constants (to control numerical issues)
inf_log=100 #To impose hard constraints (i.e. an observed variable)
constant_log=50 #Used to improve stability in the Check Node (CN) operation
## Function definitions
def create_var_node(ID,cardinality,neighbor_order,observed_value_index=-1):
# Variable Nodes are defined by a dictionary with several fields
var_node={}
var_node['ID']=ID
var_node['node_type']=0 #type 0 refers to variable node, 1o to check nodes.
var_node['cardinality']=cardinality #Num. of possible values the RV can take
var_node['neighbor_order']=np.array(neighbor_order) #Ordered array of the neighbor's IDs (neighbors are CNs!)
var_node['input_msgs']=[] #List to store input messages
var_node['observed']=observed_value_index #-1 if the variable is not observed
var_node['inner_factor']=np.zeros([cardinality,1]) #Internal vector used to imposed hard messages when variable is observed
#If variable is observed, then the inner_factor vector is log[0 0 ... 0 1 0 ...]
if(observed_value_index!=-1):
var_node['inner_factor']-=inf_log
var_node['inner_factor'][observed_value_index]=inf_log
#Initialize input msgs by filling with zeros
for index,f in enumerate(var_node['neighbor_order']):
var_node['input_msgs'].append(0)
return var_node
def create_message(input_node,output_node,table):
#Messages are defined by a dictionary with three keys: input node (sender node), output_node (receiver node), and table of values
message={}
message['input_node']=input_node
message['output_node']=output_node
message['table']=table
return message
def create_factor_node(ID,neighbors,CPD):
# Check Nodes are defined by a dictionary with several fields
factor_node={}
factor_node['ID']=ID
factor_node['node_type']=1
factor_node['input_msgs']=[]
CPD=np.array(CPD)
CPD=CPD.reshape(CPD.shape[0],) #Just to make sure that CPD is a np. array vector of dim. (n,)
factor_node['CPD']=np.array(CPD) #CPD table associated to the factor
factor_node['CPD_order']=np.zeros([len(neighbors),1]).astype(int) #Ordered array of the neighbor's IDs (neighbors are CNs!)
factor_node['cardinalities']=np.zeros([len(neighbors),1]).astype(int) #Cardinalities of the neighbors
#Initialize input msgs, CPD_order & cardinalities
#Note that creating factor nodes requires variable nodes to be created first, as CN input messages
#are initialized already to the inner_factor field of every neighbor variable node
for index,node in enumerate(neighbors):
card=node['cardinality']
factor_node['input_msgs'].append(
create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))
factor_node['cardinalities'][index]=card
factor_node['CPD_order'][index]=node['ID']
return factor_node
def initialize_variable(var_node,observed_value_index=-1):
#After running message passing, variable nodes store the incoming messages for future calculations
#If we want to run again message passing in the same graph, we have to re-initialize both
#variable nodes and check nodes.
var_node['inner_factor']=np.zeros([var_node['cardinality'],1])
var_node['observed']=observed_value_index
if(observed_value_index!=-1):
var_node['inner_factor']-=inf_log
var_node['inner_factor'][observed_value_index]=inf_log
def initialize_factor_msgs(factor_node,neighbors):
#After running message passing, variable nodes store the incoming messages for future calculations
#If we want to run again message passing in the same graph, we have to re-initialize both
#variable nodes and check nodes.
factor_node['input_msgs']=[]
for index,node in enumerate(neighbors):
factor_node['input_msgs'].append(
create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))
#The next two routines are used to encode and decode positions to store CPD values in a
#vector form. We use a tree-encoding determined by the order of variables and their cardinalities
#See First Example Message Passing.ipynb for an illustration
def CPD_position_to_variable_index(position,v_card,CPD_size):
#We use this function to find the encoding for each position of a CPD table
#of CPD_size positions, where the cardinalities of the variables (in order) are given in v_card
#This function returns the index value of each variable
v_card=np.array(v_card) #To make sure we have a np.array
var_index=np.zeros([v_card.shape[0],1]).astype(int)
remaining=CPD_size
for i,card in enumerate(v_card):
remaining=remaining//card
index_i=position//remaining
position=position-index_i*(remaining)
var_index[i]=index_i
return var_index
def variable_index_to_CPD_position(var_index,v_card,CPD_size):
#This function returns the encoded CPD position for a given configuration of the variables.
#The CPD table is of size CPD_size, the cardinalities of the variables (in order) are given in v_card
#and the value indexes (in order) of the variables are given in var_index
var_index=np.array(var_index)
v_card=np.array(v_card)
position=0
offset=CPD_size
for i,card in enumerate(v_card):
offset=offset//card
position+=var_index[i]*offset
return position
def update_var_to_factor(var_node):
#Routine to update the output messages of a variable node (var_node)
prod_table=np.zeros([var_node['cardinality'],1])
#We first multiply all the input messages (sums in the log domain)
for msg in var_node['input_msgs']:
prod_table+=msg['table']
#We also take into account the inner_factor of the variable_node. In
#case it is observed, the output messages have to be consistent with the observation
prod_table+=var_node['inner_factor']
#For every output message, we have to substract from prod_table the message received
#through the corresponding edge
for msg in var_node['input_msgs']:
if(var_node['observed']==-1):
reply_table=prod_table-msg['table']
else:
reply_table=np.ones([var_node['cardinality'],1])*(-inf_log)
reply_table[var_node['observed']]=inf_log
#We limit the absolute value of the messages, to exp(inf_log)
reply_table[reply_table>inf_log]=inf_log
reply_table[reply_table<-inf_log]=-inf_log
#The ouput message is stored in the corresponding neighbor
factor_rx=msg['input_node']
reply_msg=create_message(input_node=var_node,output_node=factor_rx,table=reply_table)
#Short foor loop to save messages in factor_node in the corresponding order
for index,v in enumerate(factor_rx['CPD_order']):
if(v==var_node['ID']):
factor_rx['input_msgs'][index]=reply_msg
break
def compute_var_marginal(var_node):
#Routine to compute the marginal pmf of a variable node (var_node)
#Simply the product of all incoming msgs times the inner_factor
marg_table=np.zeros([var_node['cardinality'],1])
for msg in var_node['input_msgs']:
marg_table+=msg['table']
marg_table+=var_node['inner_factor']
marg_table=np.exp(marg_table)
marg_table/=sum(marg_table)
return marg_table
def update_factor_to_var(factor_node):
#Routine to update the output messages of a check node (var_node)
#This is the most complicated in the library, as it involves marginalization
#over each argument of the CPD function times the product of incoming messgaes
output_tables=[]
#Output message tables initialization
for card in factor_node['cardinalities']:
output_tables.append(np.zeros([card,1]))
#With a single loop we go only once through every element of the CPD table
#It is multiplied accordingly to input messages and the resulting terms are
#added to the corresponding output tables
for CPD_entry,CPD_val in enumerate(factor_node['CPD']):
values=CPD_position_to_variable_index(
position=CPD_entry,v_card=factor_node['cardinalities'],CPD_size=factor_node['CPD'].shape[0])
#The CPD value is multiplied by all incoming input messages but one,
#and the result is added to the ouput table
#Since we have to marginalize, not all operations can be done in the log domain
#To avoid numerical inestabilities when performing the operations, we substract a large exponent (constant log)
#which is sum at the very end, when we move back to the log domain
for index in range(factor_node['cardinalities'].shape[0]):
aux=CPD_val
for index2 in range(factor_node['cardinalities'].shape[0]):
if(index2!=index):
aux*=np.exp(factor_node['input_msgs'][index2]['table'][values[index2]]-constant_log)
output_tables[index][values[index]]+=aux
#Once the output tables have been computed, we create the output messages and store them in
#the corresponding variable nodes
for index,msg in enumerate(factor_node['input_msgs']):
output=output_tables[index]
output=np.log(output)+constant_log
output[output>inf_log]=inf_log
output[output<-inf_log]=-inf_log
var_rx=msg['input_node']
reply_msg=create_message(input_node=factor_node,output_node=var_rx,table=output)
#Short foor loop to save messages in factor_node in the corresponding order
for index2,f in enumerate(var_rx['neighbor_order']):
if(f==factor_node['ID']):
var_rx['input_msgs'][index2]=reply_msg
break
def create_joint_node(ID,node_members,neighbor_order,observed_values_indexes=-1):
#Routine to define a joint variable node. This is useful to eliminate cycles in
#the factor graph and perform exact inference.
#Note a routine to create a joint factor node that uses joint variable nodes
#is not provided. The corresponding CPD of such factor nodes has to be computed
#first and then create the joint node with the function create_factor_node
#We do not consider the case that the joint variable node is partially observed
#(e.g. one of the joined variable nodes is observed). We only consider the case
#where the joint node is completely observed.
#See Second Example Message Passing.ipynb for an example of how to define and
#manage joint variable nodes.
var_node={}
var_node['ID']=ID
var_node['node_type']=0
var_node['input_msgs']=[]
var_node['observed']=-1
var_node['neighbor_order']=np.array(neighbor_order)
card=1
#Cardinality of joint node is the product of cardinalities
for member in node_members:
card*=member['cardinality']
var_node['cardinality']=card
var_node['inner_factor']=np.zeros([card,1])
if(observed_values_indexes!=-1):
var_node['observed']=variable_index_to_CPD_position(observed_values_indexes,var_node['values'],card)
var_node['inner_factor']-=inf_log
var_node['inner_factor'][var_node['observed']]=inf_log
#Initialize input msgs
for index,f in enumerate(var_node['neighbor_order']):
var_node['input_msgs'].append(0)
return var_node
| 37.360502
| 133
| 0.698439
|
or_order)
var_node['input_msgs']=[] #List to store input messages
var_node['observed']=observed_value_index #-1 if the variable is not observed
var_node['inner_factor']=np.zeros([cardinality,1]) #Internal vector used to imposed hard messages when variable is observed
#If variable is observed, then the inner_factor vector is log[0 0 ... 0 1 0 ...]
if(observed_value_index!=-1):
var_node['inner_factor']-=inf_log
var_node['inner_factor'][observed_value_index]=inf_log
#Initialize input msgs by filling with zeros
for index,f in enumerate(var_node['neighbor_order']):
var_node['input_msgs'].append(0)
return var_node
def create_message(input_node,output_node,table):
#Messages are defined by a dictionary with three keys: input node (sender node), output_node (receiver node), and table of values
message={}
message['input_node']=input_node
message['output_node']=output_node
message['table']=table
return message
def create_factor_node(ID,neighbors,CPD):
# Check Nodes are defined by a dictionary with several fields
factor_node={}
factor_node['ID']=ID
factor_node['node_type']=1
factor_node['input_msgs']=[]
CPD=np.array(CPD)
CPD=CPD.reshape(CPD.shape[0],) #Just to make sure that CPD is a np. array vector of dim. (n,)
factor_node['CPD']=np.array(CPD) #CPD table associated to the factor
factor_node['CPD_order']=np.zeros([len(neighbors),1]).astype(int) #Ordered array of the neighbor's IDs (neighbors are CNs!)
factor_node['cardinalities']=np.zeros([len(neighbors),1]).astype(int)
for index,node in enumerate(neighbors):
card=node['cardinality']
factor_node['input_msgs'].append(
create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))
factor_node['cardinalities'][index]=card
factor_node['CPD_order'][index]=node['ID']
return factor_node
def initialize_variable(var_node,observed_value_index=-1):
var_node['inner_factor']=np.zeros([var_node['cardinality'],1])
var_node['observed']=observed_value_index
if(observed_value_index!=-1):
var_node['inner_factor']-=inf_log
var_node['inner_factor'][observed_value_index]=inf_log
def initialize_factor_msgs(factor_node,neighbors):
factor_node['input_msgs']=[]
for index,node in enumerate(neighbors):
factor_node['input_msgs'].append(
create_message(input_node=node,output_node=factor_node,table=node['inner_factor']))
def CPD_position_to_variable_index(position,v_card,CPD_size):
v_card=np.array(v_card)
var_index=np.zeros([v_card.shape[0],1]).astype(int)
remaining=CPD_size
for i,card in enumerate(v_card):
remaining=remaining//card
index_i=position//remaining
position=position-index_i*(remaining)
var_index[i]=index_i
return var_index
def variable_index_to_CPD_position(var_index,v_card,CPD_size):
var_index=np.array(var_index)
v_card=np.array(v_card)
position=0
offset=CPD_size
for i,card in enumerate(v_card):
offset=offset//card
position+=var_index[i]*offset
return position
def update_var_to_factor(var_node):
prod_table=np.zeros([var_node['cardinality'],1])
for msg in var_node['input_msgs']:
prod_table+=msg['table']
prod_table+=var_node['inner_factor']
for msg in var_node['input_msgs']:
if(var_node['observed']==-1):
reply_table=prod_table-msg['table']
else:
reply_table=np.ones([var_node['cardinality'],1])*(-inf_log)
reply_table[var_node['observed']]=inf_log
reply_table[reply_table>inf_log]=inf_log
reply_table[reply_table<-inf_log]=-inf_log
factor_rx=msg['input_node']
reply_msg=create_message(input_node=var_node,output_node=factor_rx,table=reply_table)
for index,v in enumerate(factor_rx['CPD_order']):
if(v==var_node['ID']):
factor_rx['input_msgs'][index]=reply_msg
break
def compute_var_marginal(var_node):
marg_table=np.zeros([var_node['cardinality'],1])
for msg in var_node['input_msgs']:
marg_table+=msg['table']
marg_table+=var_node['inner_factor']
marg_table=np.exp(marg_table)
marg_table/=sum(marg_table)
return marg_table
def update_factor_to_var(factor_node):
output_tables=[]
for card in factor_node['cardinalities']:
output_tables.append(np.zeros([card,1]))
for CPD_entry,CPD_val in enumerate(factor_node['CPD']):
values=CPD_position_to_variable_index(
position=CPD_entry,v_card=factor_node['cardinalities'],CPD_size=factor_node['CPD'].shape[0])
for index in range(factor_node['cardinalities'].shape[0]):
aux=CPD_val
for index2 in range(factor_node['cardinalities'].shape[0]):
if(index2!=index):
aux*=np.exp(factor_node['input_msgs'][index2]['table'][values[index2]]-constant_log)
output_tables[index][values[index]]+=aux
for index,msg in enumerate(factor_node['input_msgs']):
output=output_tables[index]
output=np.log(output)+constant_log
output[output>inf_log]=inf_log
output[output<-inf_log]=-inf_log
var_rx=msg['input_node']
reply_msg=create_message(input_node=factor_node,output_node=var_rx,table=output)
for index2,f in enumerate(var_rx['neighbor_order']):
if(f==factor_node['ID']):
var_rx['input_msgs'][index2]=reply_msg
break
def create_joint_node(ID,node_members,neighbor_order,observed_values_indexes=-1):
var_node={}
var_node['ID']=ID
var_node['node_type']=0
var_node['input_msgs']=[]
var_node['observed']=-1
var_node['neighbor_order']=np.array(neighbor_order)
card=1
for member in node_members:
card*=member['cardinality']
var_node['cardinality']=card
var_node['inner_factor']=np.zeros([card,1])
if(observed_values_indexes!=-1):
var_node['observed']=variable_index_to_CPD_position(observed_values_indexes,var_node['values'],card)
var_node['inner_factor']-=inf_log
var_node['inner_factor'][var_node['observed']]=inf_log
for index,f in enumerate(var_node['neighbor_order']):
var_node['input_msgs'].append(0)
return var_node
| true
| true
|
f704dbc440727caa33f14c21d525c911a2a366fb
| 2,262
|
py
|
Python
|
atlassian_connect_django/rest_framework/authentication.py
|
gerasev-kirill/atlassian-connect-django
|
cd44232df512691d9ec14722c38785cf802862e9
|
[
"MIT"
] | null | null | null |
atlassian_connect_django/rest_framework/authentication.py
|
gerasev-kirill/atlassian-connect-django
|
cd44232df512691d9ec14722c38785cf802862e9
|
[
"MIT"
] | null | null | null |
atlassian_connect_django/rest_framework/authentication.py
|
gerasev-kirill/atlassian-connect-django
|
cd44232df512691d9ec14722c38785cf802862e9
|
[
"MIT"
] | null | null | null |
from six import text_type
from rest_framework import HTTP_HEADER_ENCODING, exceptions
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from atlassian_connect_django.models.connect import AtlassianUser
from atlassian_connect_django import helpers
from .models import SecurityContextToken
def get_atlassian_security_context_and_user_from_request(request, raise_exceptions=True):
def exception(msg):
if not raise_exceptions:
return None, None
if raise_exceptions == 'rest_framework':
raise exceptions.AuthenticationFailed(msg)
raise PermissionDenied(msg)
auth = request.META.get('HTTP_X_JIRA_SECURITY_CONTEXT', b'')
if isinstance(auth, text_type):
# Work around django test client oddness
auth = auth.encode(HTTP_HEADER_ENCODING)
auth = auth.split()
if not auth or auth[0].lower() != b'token':
return None, None
if len(auth) == 1:
return exception(_('Invalid x-jira-security-context token header. No credentials provided.'))
elif len(auth) > 2:
return exception(_('Invalid x-jira-security-context token header. Token string should not contain spaces.'))
try:
token = auth[1].decode()
except UnicodeError:
return exception(_('Invalid x-jira-security-context token header. Token string should not contain invalid characters.'))
try:
token = SecurityContextToken.objects.select_related('security_context').get(key=token)
except SecurityContextToken.DoesNotExist:
return exception(_('Invalid x-jira-security-context token.'))
if not token.security_context.is_plugin_enabled:
return exception(_('Security context inactive or deleted.'))
site = helpers.get_current_site(request=request)
if site and site != token.security_context.site:
return exception(_('Invalid x-jira-security-context token header. SecurityContext site "%s" not equals to "%s"' % (token.security_context.site.name, site.name)))
atlassian_user = AtlassianUser(accountId=token.atlassian_user_account_id)
atlassian_user.set_secutiry_context(security_context=token.security_context)
return token.security_context, atlassian_user
| 41.127273
| 169
| 0.741821
|
from six import text_type
from rest_framework import HTTP_HEADER_ENCODING, exceptions
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from atlassian_connect_django.models.connect import AtlassianUser
from atlassian_connect_django import helpers
from .models import SecurityContextToken
def get_atlassian_security_context_and_user_from_request(request, raise_exceptions=True):
def exception(msg):
if not raise_exceptions:
return None, None
if raise_exceptions == 'rest_framework':
raise exceptions.AuthenticationFailed(msg)
raise PermissionDenied(msg)
auth = request.META.get('HTTP_X_JIRA_SECURITY_CONTEXT', b'')
if isinstance(auth, text_type):
auth = auth.encode(HTTP_HEADER_ENCODING)
auth = auth.split()
if not auth or auth[0].lower() != b'token':
return None, None
if len(auth) == 1:
return exception(_('Invalid x-jira-security-context token header. No credentials provided.'))
elif len(auth) > 2:
return exception(_('Invalid x-jira-security-context token header. Token string should not contain spaces.'))
try:
token = auth[1].decode()
except UnicodeError:
return exception(_('Invalid x-jira-security-context token header. Token string should not contain invalid characters.'))
try:
token = SecurityContextToken.objects.select_related('security_context').get(key=token)
except SecurityContextToken.DoesNotExist:
return exception(_('Invalid x-jira-security-context token.'))
if not token.security_context.is_plugin_enabled:
return exception(_('Security context inactive or deleted.'))
site = helpers.get_current_site(request=request)
if site and site != token.security_context.site:
return exception(_('Invalid x-jira-security-context token header. SecurityContext site "%s" not equals to "%s"' % (token.security_context.site.name, site.name)))
atlassian_user = AtlassianUser(accountId=token.atlassian_user_account_id)
atlassian_user.set_secutiry_context(security_context=token.security_context)
return token.security_context, atlassian_user
| true
| true
|
f704dd4818c3b8d3017ae2937945bb191f230b62
| 2,054
|
py
|
Python
|
python_smaclient/smapi_request.py
|
jloehel/python_smaclient
|
ed18efb4e19728f3644eb1e510262def57a1767b
|
[
"0BSD"
] | null | null | null |
python_smaclient/smapi_request.py
|
jloehel/python_smaclient
|
ed18efb4e19728f3644eb1e510262def57a1767b
|
[
"0BSD"
] | null | null | null |
python_smaclient/smapi_request.py
|
jloehel/python_smaclient
|
ed18efb4e19728f3644eb1e510262def57a1767b
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
import uuid
from construct import Container
class SMAPI_Request(object):
'''
Implentation of a ICUV Request
'''
def __init__(self, function_name, target_identifier,
authenticated_userid=b"", password=b"", additional_parameters=b""):
self._function_name = function_name
self._function_name_length = len(function_name)
self._authenticated_userid = authenticated_userid
self._authenticated_userid_length = len(authenticated_userid)
self._password = password
self._password_length = len(password)
self._target_identifier = target_identifier
self._target_identifier_length = len(target_identifier)
self._additional_parameters = additional_parameters
self._additional_parameters_length = len(additional_parameters)
self._input_length = (self._function_name_length + 4 +
self._authenticated_userid_length + 4 +
self._password_length + 4 +
self._target_identifier_length + 4 +
self._additional_parameters_length)
def get_container(self):
return Container(input_length = self._input_length,
function_name_length = self._function_name_length,
function_name = self._function_name,
authenticated_userid_length = self._authenticated_userid_length,
authenticated_userid = self._authenticated_userid,
password_length = self._password_length,
password = self._password,
target_identifier_length = self._target_identifier_length,
target_identifier = self._target_identifier,
additional_parameters = self._additional_parameters)
def __repr__(self):
"<{} (container={})>".format(
self.__class__.__name__,
self.get_container())
| 44.652174
| 89
| 0.624635
|
import uuid
from construct import Container
class SMAPI_Request(object):
def __init__(self, function_name, target_identifier,
authenticated_userid=b"", password=b"", additional_parameters=b""):
self._function_name = function_name
self._function_name_length = len(function_name)
self._authenticated_userid = authenticated_userid
self._authenticated_userid_length = len(authenticated_userid)
self._password = password
self._password_length = len(password)
self._target_identifier = target_identifier
self._target_identifier_length = len(target_identifier)
self._additional_parameters = additional_parameters
self._additional_parameters_length = len(additional_parameters)
self._input_length = (self._function_name_length + 4 +
self._authenticated_userid_length + 4 +
self._password_length + 4 +
self._target_identifier_length + 4 +
self._additional_parameters_length)
def get_container(self):
return Container(input_length = self._input_length,
function_name_length = self._function_name_length,
function_name = self._function_name,
authenticated_userid_length = self._authenticated_userid_length,
authenticated_userid = self._authenticated_userid,
password_length = self._password_length,
password = self._password,
target_identifier_length = self._target_identifier_length,
target_identifier = self._target_identifier,
additional_parameters = self._additional_parameters)
def __repr__(self):
"<{} (container={})>".format(
self.__class__.__name__,
self.get_container())
| true
| true
|
f704ddb7dba518e0334a017e32b36881a1730110
| 22,096
|
py
|
Python
|
ppmessage/dispatcher/policy.py
|
augustand/ppmessage
|
73beac9c75f751d5026ff7defff23732c7419b43
|
[
"Apache-2.0"
] | 6
|
2017-11-03T17:31:52.000Z
|
2020-06-14T09:14:36.000Z
|
ppmessage/dispatcher/policy.py
|
augustand/ppmessage
|
73beac9c75f751d5026ff7defff23732c7419b43
|
[
"Apache-2.0"
] | null | null | null |
ppmessage/dispatcher/policy.py
|
augustand/ppmessage
|
73beac9c75f751d5026ff7defff23732c7419b43
|
[
"Apache-2.0"
] | 16
|
2017-08-08T01:25:47.000Z
|
2019-09-17T16:32:06.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
#
from ppmessage.core.constant import IOS_FAKE_TOKEN
from ppmessage.core.constant import CONVERSATION_TYPE
from ppmessage.core.constant import MESSAGE_SUBTYPE
from ppmessage.core.constant import MESSAGE_STATUS
from ppmessage.core.constant import MESSAGE_TYPE
from ppmessage.core.constant import TASK_STATUS
from ppmessage.core.constant import REDIS_DISPATCHER_NOTIFICATION_KEY
from ppmessage.core.constant import REDIS_PUSH_NOTIFICATION_KEY
from ppmessage.core.constant import REDIS_MQTTPUSH_KEY
from ppmessage.core.constant import REDIS_GCMPUSH_KEY
from ppmessage.core.constant import REDIS_IOSPUSH_KEY
from ppmessage.core.constant import REDIS_JPUSH_KEY
from ppmessage.core.constant import PPCOM_OFFLINE
from ppmessage.core.constant import YVOBJECT
from ppmessage.core.constant import DIS_SRV
from ppmessage.core.constant import OS
from ppmessage.db.models import OrgGroup
from ppmessage.db.models import DeviceUser
from ppmessage.db.models import DeviceInfo
from ppmessage.db.models import OrgGroupUserData
from ppmessage.db.models import AppUserData
from ppmessage.db.models import MessagePush
from ppmessage.db.models import MessagePushTask
from ppmessage.db.models import PCSocketInfo
from ppmessage.db.models import PCSocketDeviceData
from ppmessage.db.models import ConversationUserData
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.core.utils.datetimestring import datetime_to_timestamp
from ppmessage.core.utils.datetimestring import datetime_to_microsecond_timestamp
from operator import itemgetter
import uuid
import time
import json
import logging
class Meta(type):
def __init__(cls, name, bases, dict_):
type.__init__(cls, name, bases, dict_)
return
Policy = Meta("Policy", (object,), {})
class AbstractPolicy(Policy):
def __init__(self, dis):
self._dis = dis
self._task = dis._task
self._redis = dis.application.redis
self._online_users = set()
self._offline_users = set()
self._devices = set()
self._devices_hash = {}
self._users_hash = {}
self._is_service_user = {}
self._conversation_users = set()
self._conversation_user_datas_uuid = {}
self._conversation_user_datas_hash = {}
self._users = set()
return
@classmethod
def conversation_users(cls, _app_uuid, _conversation_uuid, _redis):
_key = ConversationUserData.__tablename__ + ".conversation_uuid." + _conversation_uuid
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def conversation_datas(cls, _app_uuid, _conversation_uuid, _users, _redis):
_pi = _redis.pipeline()
_pre = ConversationUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid."
_pos = ".conversation_uuid." + _conversation_uuid
for _user_uuid in _users:
_key = _pre + _user_uuid + _pos
_pi.get(_key)
_datas = _pi.execute()
return _datas
@classmethod
def create_conversation_users(cls, _app_uuid, _group_uuid, _redis):
return []
@classmethod
def app_users(cls, _app_uuid, _is_service_user, _redis):
if _app_uuid == None:
return []
_key = AppUserData.__tablename__ + \
".app_uuid." + _app_uuid + \
".is_service_user." + str(_is_service_user)
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def distributor_users(cls, _app_uuid, _redis):
# is_service_user == True
if _app_uuid == None:
return []
_key = AppUserData.__tablename__ + \
".app_uuid." + _app_uuid + \
".is_service_user.True"
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def group_users(cls, _group_uuid, _redis):
_pattern = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid
_keys = _redis.smembers(_pattern)
return list(_keys)
@classmethod
def get_service_care_users(cls, _app_uuid, _user_uuid, _redis):
return None
@classmethod
def get_portal_care_users(cls, _app_uuid, _user_uuid, _redis):
return None
def _android_token(self, _user_uuid, _device_uuid):
_token = _user_uuid + "/" + _device_uuid + "/" + self._task["message_type"] + "/" + self._task["uuid"]
return _token
def _body(self):
_message = {}
_message["id"] = self._task.get("uuid")
_message["fi"] = self._task.get("from_uuid")
_message["ti"] = self._task.get("to_uuid")
_message["ft"] = self._task.get("from_type")
_message["tt"] = self._task.get("to_type")
_message["mt"] = self._task.get("message_type")
_message["ms"] = self._task.get("message_subtype")
_message["ci"] = self._task.get("conversation_uuid")
_message["ct"] = self._task.get("conversation_type")
_message["tl"] = self._task.get("title")
_message["bo"] = self._task.get("body")
if _message["ct"] == CONVERSATION_TYPE.S2P:
_message["ti"] = self._task["app_uuid"]
_message["tt"] = YVOBJECT.AP
if isinstance(self._task.get("title"), unicode):
_message["tl"] = self._task.get("title").encode("utf-8")
if isinstance(self._task.get("body"), unicode):
_message["bo"] = self._task.get("body").encode("utf-8")
_message["ts"] = datetime_to_microsecond_timestamp(self._task["createtime"])
self._task["message_body"] = _message
_message_body = json.dumps(self._task["message_body"])
if isinstance(_message_body, unicode):
_message_body = _message_body.encode("utf-8")
_values = {
"uuid": self._task["uuid"],
"task_status": TASK_STATUS.PROCESSED,
"message_body": _message_body,
}
_row = MessagePushTask(**_values)
_row.async_update(self._redis)
_row.update_redis_keys(self._redis)
return
def _user_devices(self, _user_uuid):
_user = self._users_hash.get(_user_uuid)
_is_service_user = self._is_service_user.get(_user_uuid)
if _user == None or _is_service_user == None:
logging.error("no user or is_service_user in hash: %s" % _user_uuid)
return
_user["_online_devices"] = {}
_device_name = ["mobile_device_uuid", "browser_device_uuid"]
if _is_service_user == False:
_device_name = ["ppcom_mobile_device_uuid", "ppcom_browser_device_uuid"]
for _i in _device_name:
_device_uuid = self._users_hash[_user_uuid][_i]
if _device_uuid == None or len(_device_uuid) == 0:
continue
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None:
continue
self._devices_hash[_device_uuid] = _device
self._devices.add(_device_uuid)
if _device.get("device_is_online") == True:
_user["_online_devices"][_device_uuid] = _device
if len(_user["_online_devices"]) > 0:
self._online_users.add(_user_uuid)
else:
self._offline_users.add(_user_uuid)
return
def _users_devices(self):
for _i in self._users:
self._users_hash[_i] = redis_hash_to_dict(self._redis, DeviceUser, _i)
for _i in self._users:
self._user_devices(_i)
logging.info("online : %d, %s" % (len(self._online_users), self._online_users))
logging.info("offline : %d, %s" % (len(self._offline_users), self._offline_users))
return
def _pcsocket_data(self, _device_uuid):
_redis = self._redis
_key = PCSocketDeviceData.__tablename__ + ".device_uuid." + _device_uuid
_pc_socket_uuid = _redis.get(_key)
if _pc_socket_uuid == None:
logging.error("device no pcsocket %s" % _device_uuid)
return None
_info = redis_hash_to_dict(_redis, PCSocketInfo, _pc_socket_uuid)
if _info == None:
logging.error("dispatcher cant not find pcsocket %s" % str(_pc_socket_uuid))
return None
_d = {"host": _info["host"], "port": _info["port"], "device_uuid": _device_uuid}
return _d
def _push_to_db(self, _user_uuid, _status=MESSAGE_STATUS.PUSHED):
_values = {
"uuid": str(uuid.uuid1()),
"app_uuid": self._task["app_uuid"],
"task_uuid": self._task["uuid"],
"user_uuid": _user_uuid,
"status": _status
}
_row = MessagePush(**_values)
_row.async_add(self._redis)
_row.create_redis_keys(self._redis)
return _row.uuid
def _push_to_ios(self, _user_uuid, _device_uuid):
logging.info("push ios %s:%s" % (_user_uuid, _device_uuid))
_app_uuid = self._task["app_uuid"]
_user = self._users_hash.get(_user_uuid)
_device = self._devices_hash.get(_device_uuid)
_conversation_data = self._conversation_user_datas_hash.get(_user_uuid)
if _user == None:
logging.error("push ios failed for no user")
return
if _device == None:
logging.error("push ios failed for no device")
return
_token = _device.get("device_ios_token")
if _token == None or len(_token) == 0:
logging.error("push ios failed for no ios token")
return
if _device["device_ios_token"] == IOS_FAKE_TOKEN:
logging.error("push ios failed for fake token")
return
if _conversation_data != None and _conversation_data["user_mute_notification"] == True:
# user only do not want recv push for this conversation
logging.error("push ios failed for silence required")
return
_count = 0
if _user.get("user_show_badge") == True:
_key = MessagePush.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid
_count = self._redis.zcard(_key)
_is_dev = bool(_device.get("is_development"))
_config = {
"is_development": _is_dev,
"user_language": _user.get("user_language"),
"device_ios_token": _token,
"unacked_notification_count": _count,
"user_silence_notification": _user.get("user_silence_notification")
}
_push = {
"config": _config,
"body": self._task.get("message_body"),
"app_uuid": _app_uuid
}
logging.info("push ios: %s" % str(_push))
self._redis.rpush(REDIS_IOSPUSH_KEY, json.dumps(_push))
return
def _push_to_android(self, _user_uuid, _device_uuid):
_app_uuid = self._task["app_uuid"]
_device = self._devices_hash.get(_device_uuid)
_user = self._users_hash.get(_user_uuid)
_conversation_data = self._conversation_user_datas_hash.get(_user_uuid)
_count = 0
if _user.get("user_show_badge") == True:
_key = MessagePush.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid
_count = self._redis.zcard(_key)
_config = {
"user_language": _user.get("user_language"),
"unacked_notification_count": _count,
"user_silence_notification": _user.get("user_silence_notification")
}
_push = {
"config": _config,
"body": self._task.get("message_body"),
"app_uuid": _app_uuid
}
logging.error("try push for android: %s" % str(_push))
if self._task["_app"].get("enable_jpush"):
_config["device_android_jpush_registrationid"] = _device.get("device_android_jpush_registrationid")
self._redis.rpush(REDIS_JPUSH_KEY, json.dumps(_push))
elif self._task["_app"].get("enable_gcm_push"):
_config["device_android_gcmtoken"] = _device.get("device_android_gcmtoken")
self._redis.rpush(REDIS_GCMPUSH_KEY, json.dumps(_push))
else:
logging.error("no push enable for android: %s" % str(_push))
return
def _push_to_socket(self, _user_uuid, _device_uuid):
_pcsocket = self._pcsocket_data(_device_uuid)
if _pcsocket == None:
logging.error("no pcsocket data for: %s" % _device_uuid)
return
_device = self._devices_hash.get(_device_uuid)
# if _device == None:
# logging.error("no device hash for: %s" % _device_uuid)
# return
_from_user = {}
_from_type = self._task.get("from_type")
_fields = [
"uuid",
"user_icon",
"user_email",
"user_fullname",
"updatetime",
]
if _from_type == YVOBJECT.DU:
for _i in _fields:
_from_user[_i] = self._task["_user"].get(_i)
_from_user["updatetime"] = datetime_to_timestamp(_from_user["updatetime"])
if _from_type == YVOBJECT.OG:
_from_user = self._task["_group"]
if _from_type == YVOBJECT.AP:
_from_user = self._task["_app"]
_body = self._task.get("message_body")
_body["pid"] = _device.get("push_uuid")
_body["from_user"] = _from_user
_push = {
"pcsocket": _pcsocket,
"body": _body
}
_key = REDIS_PUSH_NOTIFICATION_KEY + ".host." + _pcsocket["host"] + ".port." + _pcsocket["port"]
self._redis.rpush(_key, json.dumps(_push))
return
def _push_to_mobile(self, _user_uuid, _device_uuid):
_device = self._devices_hash[_device_uuid]
if _device["device_ostype"] == OS.IOS:
self._push_to_ios(_user_uuid, _device_uuid)
return
if _device["device_ostype"] == OS.AND:
self._push_to_android(_user_uuid, _device_uuid)
return
return
def _push(self):
if len(self._online_users) == 0:
self.no_online_user()
return
for _user_uuid in self._online_users:
_user = self._users_hash[_user_uuid]
_online_devices = _user.get("_online_devices")
_real_push = not _user.get("user_mute_notification")
_pid = self._push_to_db(_user_uuid)
for _device_uuid in _online_devices:
self._devices_hash[_device_uuid]["push_uuid"] = _pid
self._push_to_socket(_user_uuid, _device_uuid)
if _real_push == True:
self._push_to_mobile(_user_uuid, _device_uuid)
return
def _other_device(self):
"""
the other device uuid belong to same user uuid
"""
if self._task.get("from_device_uuid") == None:
return
if self._task.get("from_type") != YVOBJECT.DU:
return
if self._task.get("_user") == None:
return
if self._task["conversation_type"] == CONVERSATION_TYPE.P2S:
if self._task["_user"]["ppcom_mobile_device_uuid"] == None or \
self._task["_user"]["ppcom_browser_device_uuid"] == None:
return
if self._task["conversation_type"] == CONVERSATION_TYPE.S2S or \
self._task["conversation_type"] == CONVERSATION_TYPE.S2P:
if self._task["_user"]["mobile_device_uuid"] == None or \
self._task["_user"]["browser_device_uuid"] == None:
return
_device_uuid = None
if self._task["conversation_type"] == CONVERSATION_TYPE.P2S:
_device_uuid = self._task["_user"]["ppcom_mobile_device_uuid"]
if self._task["from_device_uuid"] == self._task["_user"]["ppcom_mobile_device_uuid"]:
_device_uuid = self._task["_user"]["ppcom_browser_device_uuid"]
else:
_device_uuid = self._task["_user"]["mobile_device_uuid"]
if self._task["from_device_uuid"] == self._task["_user"]["mobile_device_uuid"]:
_device_uuid = self._task["_user"]["browser_device_uuid"]
if _device_uuid not in self._devices_hash:
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None or _device["device_is_online"] != True:
return
self._devices_hash[_device_uuid] = _device
_user_uuid = self._task["from_uuid"]
if _user_uuid not in self._users_hash:
self._users_hash[_user_uuid] = self._task["_user"]
_pid = self._push_to_db(_user_uuid)
self._devices_hash[_device_uuid]["push_uuid"] = _pid
self._push_to_socket(_user_uuid, _device_uuid)
return
def _explicit(self):
"""
explicit message SYS type
"""
_device_uuid = self._task.get("to_device_uuid")
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None:
logging.error("no device:%s" % _device_uuid)
return
_user_uuid = self._task.get("from_uuid")
self._users_hash[_user_uuid] = self._task["_user"]
self._devices_hash[_device_uuid] = _device
# not save db for explicit message
self._push_to_socket(_user_uuid, _device_uuid)
return
def _send_apologize(self, _text):
_task = {
"uuid": str(uuid.uuid1()),
"app_uuid": self._task["app_uuid"],
"conversation_uuid": self._task["conversation_uuid"],
"conversation_type": CONVERSATION_TYPE.S2P,
"message_type": MESSAGE_TYPE.NOTI,
"message_subtype": MESSAGE_SUBTYPE.TEXT,
"from_uuid": self._task["to_uuid"],
"from_type": self._task["to_type"],
"to_uuid": self._task["to_uuid"],
"to_type": self._task["to_type"],
"body": _text,
"task_status": TASK_STATUS.PENDING,
}
_row = MessagePushTask(**_task)
_row.async_add(self._redis)
_row.create_redis_keys(self._redis)
_m = {"task_uuid": _row.uuid}
self._redis.rpush(REDIS_DISPATCHER_NOTIFICATION_KEY, json.dumps(_m))
return
def _get_app_apologize(self):
_text = None
_lang = self._task["_user"]["user_language"]
if _lang == None or len(_lang) == 0:
_lang = "zh_cn"
_offline = "offline_" + _lang
_text = self._task["_app"][_offline]
if _text == None:
_text = PPCOM_OFFLINE[_lang]
return _text
def no_online_user(self):
if self._task["conversation_type"] != CONVERSATION_TYPE.P2S:
return
if self._task["_app"].get("return_offline_message") != True:
logging.info("return_offline_message is not set")
return
_text = self._get_app_apologize()
if _text == None:
return
self._send_apologize(_text)
return
def users(self):
_app_uuid = self._task["app_uuid"]
_conversation_uuid = self._task["conversation_uuid"]
_users = AbstractPolicy.conversation_users(_app_uuid, _conversation_uuid, self._redis)
_datas = AbstractPolicy.conversation_datas(_app_uuid, _conversation_uuid, _users, self._redis)
_datas = dict(zip(_users, _datas))
# the is_service_user include the sender user_uuid
_table = AppUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid."
_pi = self._redis.pipeline()
for _user_uuid in _users:
_key = _table + _user_uuid
_pi.get(_key)
_is = _pi.execute()
_is_list = []
for _i in _is:
if _i == None or len(_i) == 0:
_is_list.append(False)
continue
_d = json.loads(_i)
_is_list.append(_d.get("is_service_user"))
self._is_service_user = dict(zip(_users, _is_list))
# remove the sender self
if self._task["from_type"] == YVOBJECT.DU:
_user_uuid = self._task["from_uuid"]
if _user_uuid in _users:
_users.remove(_user_uuid)
if _user_uuid in _datas:
del _datas[_user_uuid]
self._users = _users
self._conversation_users = _users
self._conversation_user_datas_uuid = _datas
return
def dispatch(self):
self._body()
if self._task.get("to_device_uuid") != None:
self._explicit()
return
if self._task.get("conversation_uuid") == None:
logging.error("no conversation should be explicit")
return
self.users()
self._users_devices()
self._push()
self._other_device()
return
class BroadcastPolicy(AbstractPolicy):
def __init__(self, dis):
super(BroadcastPolicy, self).__init__(dis)
return
def users(self):
super(BroadcastPolicy, self).users()
return
@classmethod
def create_conversation_users(cls, _app_uuid, _group_uuid, _redis):
return AbstractPolicy.distributor_users(_app_uuid, _redis)
@classmethod
def get_service_care_users(cls, _app_uuid, _user_uuid, _redis):
_a_users = AbstractPolicy.app_users(_app_uuid, True, _redis)
_b_users = AbstractPolicy.app_users(_app_uuid, False, _redis)
return _a_users + _b_users
@classmethod
def get_portal_care_users(cls, _app_uuid, _user_uuid, _redis):
_a_users = AbstractPolicy.app_users(_app_uuid, True, _redis)
return _a_users
| 36.342105
| 111
| 0.613052
|
from ppmessage.core.constant import IOS_FAKE_TOKEN
from ppmessage.core.constant import CONVERSATION_TYPE
from ppmessage.core.constant import MESSAGE_SUBTYPE
from ppmessage.core.constant import MESSAGE_STATUS
from ppmessage.core.constant import MESSAGE_TYPE
from ppmessage.core.constant import TASK_STATUS
from ppmessage.core.constant import REDIS_DISPATCHER_NOTIFICATION_KEY
from ppmessage.core.constant import REDIS_PUSH_NOTIFICATION_KEY
from ppmessage.core.constant import REDIS_MQTTPUSH_KEY
from ppmessage.core.constant import REDIS_GCMPUSH_KEY
from ppmessage.core.constant import REDIS_IOSPUSH_KEY
from ppmessage.core.constant import REDIS_JPUSH_KEY
from ppmessage.core.constant import PPCOM_OFFLINE
from ppmessage.core.constant import YVOBJECT
from ppmessage.core.constant import DIS_SRV
from ppmessage.core.constant import OS
from ppmessage.db.models import OrgGroup
from ppmessage.db.models import DeviceUser
from ppmessage.db.models import DeviceInfo
from ppmessage.db.models import OrgGroupUserData
from ppmessage.db.models import AppUserData
from ppmessage.db.models import MessagePush
from ppmessage.db.models import MessagePushTask
from ppmessage.db.models import PCSocketInfo
from ppmessage.db.models import PCSocketDeviceData
from ppmessage.db.models import ConversationUserData
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.core.utils.datetimestring import datetime_to_timestamp
from ppmessage.core.utils.datetimestring import datetime_to_microsecond_timestamp
from operator import itemgetter
import uuid
import time
import json
import logging
class Meta(type):
def __init__(cls, name, bases, dict_):
type.__init__(cls, name, bases, dict_)
return
Policy = Meta("Policy", (object,), {})
class AbstractPolicy(Policy):
def __init__(self, dis):
self._dis = dis
self._task = dis._task
self._redis = dis.application.redis
self._online_users = set()
self._offline_users = set()
self._devices = set()
self._devices_hash = {}
self._users_hash = {}
self._is_service_user = {}
self._conversation_users = set()
self._conversation_user_datas_uuid = {}
self._conversation_user_datas_hash = {}
self._users = set()
return
@classmethod
def conversation_users(cls, _app_uuid, _conversation_uuid, _redis):
_key = ConversationUserData.__tablename__ + ".conversation_uuid." + _conversation_uuid
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def conversation_datas(cls, _app_uuid, _conversation_uuid, _users, _redis):
_pi = _redis.pipeline()
_pre = ConversationUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid."
_pos = ".conversation_uuid." + _conversation_uuid
for _user_uuid in _users:
_key = _pre + _user_uuid + _pos
_pi.get(_key)
_datas = _pi.execute()
return _datas
@classmethod
def create_conversation_users(cls, _app_uuid, _group_uuid, _redis):
return []
@classmethod
def app_users(cls, _app_uuid, _is_service_user, _redis):
if _app_uuid == None:
return []
_key = AppUserData.__tablename__ + \
".app_uuid." + _app_uuid + \
".is_service_user." + str(_is_service_user)
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def distributor_users(cls, _app_uuid, _redis):
if _app_uuid == None:
return []
_key = AppUserData.__tablename__ + \
".app_uuid." + _app_uuid + \
".is_service_user.True"
_users = _redis.smembers(_key)
return list(_users)
@classmethod
def group_users(cls, _group_uuid, _redis):
_pattern = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid
_keys = _redis.smembers(_pattern)
return list(_keys)
@classmethod
def get_service_care_users(cls, _app_uuid, _user_uuid, _redis):
return None
@classmethod
def get_portal_care_users(cls, _app_uuid, _user_uuid, _redis):
return None
def _android_token(self, _user_uuid, _device_uuid):
_token = _user_uuid + "/" + _device_uuid + "/" + self._task["message_type"] + "/" + self._task["uuid"]
return _token
def _body(self):
_message = {}
_message["id"] = self._task.get("uuid")
_message["fi"] = self._task.get("from_uuid")
_message["ti"] = self._task.get("to_uuid")
_message["ft"] = self._task.get("from_type")
_message["tt"] = self._task.get("to_type")
_message["mt"] = self._task.get("message_type")
_message["ms"] = self._task.get("message_subtype")
_message["ci"] = self._task.get("conversation_uuid")
_message["ct"] = self._task.get("conversation_type")
_message["tl"] = self._task.get("title")
_message["bo"] = self._task.get("body")
if _message["ct"] == CONVERSATION_TYPE.S2P:
_message["ti"] = self._task["app_uuid"]
_message["tt"] = YVOBJECT.AP
if isinstance(self._task.get("title"), unicode):
_message["tl"] = self._task.get("title").encode("utf-8")
if isinstance(self._task.get("body"), unicode):
_message["bo"] = self._task.get("body").encode("utf-8")
_message["ts"] = datetime_to_microsecond_timestamp(self._task["createtime"])
self._task["message_body"] = _message
_message_body = json.dumps(self._task["message_body"])
if isinstance(_message_body, unicode):
_message_body = _message_body.encode("utf-8")
_values = {
"uuid": self._task["uuid"],
"task_status": TASK_STATUS.PROCESSED,
"message_body": _message_body,
}
_row = MessagePushTask(**_values)
_row.async_update(self._redis)
_row.update_redis_keys(self._redis)
return
def _user_devices(self, _user_uuid):
_user = self._users_hash.get(_user_uuid)
_is_service_user = self._is_service_user.get(_user_uuid)
if _user == None or _is_service_user == None:
logging.error("no user or is_service_user in hash: %s" % _user_uuid)
return
_user["_online_devices"] = {}
_device_name = ["mobile_device_uuid", "browser_device_uuid"]
if _is_service_user == False:
_device_name = ["ppcom_mobile_device_uuid", "ppcom_browser_device_uuid"]
for _i in _device_name:
_device_uuid = self._users_hash[_user_uuid][_i]
if _device_uuid == None or len(_device_uuid) == 0:
continue
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None:
continue
self._devices_hash[_device_uuid] = _device
self._devices.add(_device_uuid)
if _device.get("device_is_online") == True:
_user["_online_devices"][_device_uuid] = _device
if len(_user["_online_devices"]) > 0:
self._online_users.add(_user_uuid)
else:
self._offline_users.add(_user_uuid)
return
def _users_devices(self):
for _i in self._users:
self._users_hash[_i] = redis_hash_to_dict(self._redis, DeviceUser, _i)
for _i in self._users:
self._user_devices(_i)
logging.info("online : %d, %s" % (len(self._online_users), self._online_users))
logging.info("offline : %d, %s" % (len(self._offline_users), self._offline_users))
return
def _pcsocket_data(self, _device_uuid):
_redis = self._redis
_key = PCSocketDeviceData.__tablename__ + ".device_uuid." + _device_uuid
_pc_socket_uuid = _redis.get(_key)
if _pc_socket_uuid == None:
logging.error("device no pcsocket %s" % _device_uuid)
return None
_info = redis_hash_to_dict(_redis, PCSocketInfo, _pc_socket_uuid)
if _info == None:
logging.error("dispatcher cant not find pcsocket %s" % str(_pc_socket_uuid))
return None
_d = {"host": _info["host"], "port": _info["port"], "device_uuid": _device_uuid}
return _d
def _push_to_db(self, _user_uuid, _status=MESSAGE_STATUS.PUSHED):
_values = {
"uuid": str(uuid.uuid1()),
"app_uuid": self._task["app_uuid"],
"task_uuid": self._task["uuid"],
"user_uuid": _user_uuid,
"status": _status
}
_row = MessagePush(**_values)
_row.async_add(self._redis)
_row.create_redis_keys(self._redis)
return _row.uuid
def _push_to_ios(self, _user_uuid, _device_uuid):
logging.info("push ios %s:%s" % (_user_uuid, _device_uuid))
_app_uuid = self._task["app_uuid"]
_user = self._users_hash.get(_user_uuid)
_device = self._devices_hash.get(_device_uuid)
_conversation_data = self._conversation_user_datas_hash.get(_user_uuid)
if _user == None:
logging.error("push ios failed for no user")
return
if _device == None:
logging.error("push ios failed for no device")
return
_token = _device.get("device_ios_token")
if _token == None or len(_token) == 0:
logging.error("push ios failed for no ios token")
return
if _device["device_ios_token"] == IOS_FAKE_TOKEN:
logging.error("push ios failed for fake token")
return
if _conversation_data != None and _conversation_data["user_mute_notification"] == True:
logging.error("push ios failed for silence required")
return
_count = 0
if _user.get("user_show_badge") == True:
_key = MessagePush.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid
_count = self._redis.zcard(_key)
_is_dev = bool(_device.get("is_development"))
_config = {
"is_development": _is_dev,
"user_language": _user.get("user_language"),
"device_ios_token": _token,
"unacked_notification_count": _count,
"user_silence_notification": _user.get("user_silence_notification")
}
_push = {
"config": _config,
"body": self._task.get("message_body"),
"app_uuid": _app_uuid
}
logging.info("push ios: %s" % str(_push))
self._redis.rpush(REDIS_IOSPUSH_KEY, json.dumps(_push))
return
def _push_to_android(self, _user_uuid, _device_uuid):
_app_uuid = self._task["app_uuid"]
_device = self._devices_hash.get(_device_uuid)
_user = self._users_hash.get(_user_uuid)
_conversation_data = self._conversation_user_datas_hash.get(_user_uuid)
_count = 0
if _user.get("user_show_badge") == True:
_key = MessagePush.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid
_count = self._redis.zcard(_key)
_config = {
"user_language": _user.get("user_language"),
"unacked_notification_count": _count,
"user_silence_notification": _user.get("user_silence_notification")
}
_push = {
"config": _config,
"body": self._task.get("message_body"),
"app_uuid": _app_uuid
}
logging.error("try push for android: %s" % str(_push))
if self._task["_app"].get("enable_jpush"):
_config["device_android_jpush_registrationid"] = _device.get("device_android_jpush_registrationid")
self._redis.rpush(REDIS_JPUSH_KEY, json.dumps(_push))
elif self._task["_app"].get("enable_gcm_push"):
_config["device_android_gcmtoken"] = _device.get("device_android_gcmtoken")
self._redis.rpush(REDIS_GCMPUSH_KEY, json.dumps(_push))
else:
logging.error("no push enable for android: %s" % str(_push))
return
def _push_to_socket(self, _user_uuid, _device_uuid):
_pcsocket = self._pcsocket_data(_device_uuid)
if _pcsocket == None:
logging.error("no pcsocket data for: %s" % _device_uuid)
return
_device = self._devices_hash.get(_device_uuid)
_from_user = {}
_from_type = self._task.get("from_type")
_fields = [
"uuid",
"user_icon",
"user_email",
"user_fullname",
"updatetime",
]
if _from_type == YVOBJECT.DU:
for _i in _fields:
_from_user[_i] = self._task["_user"].get(_i)
_from_user["updatetime"] = datetime_to_timestamp(_from_user["updatetime"])
if _from_type == YVOBJECT.OG:
_from_user = self._task["_group"]
if _from_type == YVOBJECT.AP:
_from_user = self._task["_app"]
_body = self._task.get("message_body")
_body["pid"] = _device.get("push_uuid")
_body["from_user"] = _from_user
_push = {
"pcsocket": _pcsocket,
"body": _body
}
_key = REDIS_PUSH_NOTIFICATION_KEY + ".host." + _pcsocket["host"] + ".port." + _pcsocket["port"]
self._redis.rpush(_key, json.dumps(_push))
return
def _push_to_mobile(self, _user_uuid, _device_uuid):
_device = self._devices_hash[_device_uuid]
if _device["device_ostype"] == OS.IOS:
self._push_to_ios(_user_uuid, _device_uuid)
return
if _device["device_ostype"] == OS.AND:
self._push_to_android(_user_uuid, _device_uuid)
return
return
def _push(self):
if len(self._online_users) == 0:
self.no_online_user()
return
for _user_uuid in self._online_users:
_user = self._users_hash[_user_uuid]
_online_devices = _user.get("_online_devices")
_real_push = not _user.get("user_mute_notification")
_pid = self._push_to_db(_user_uuid)
for _device_uuid in _online_devices:
self._devices_hash[_device_uuid]["push_uuid"] = _pid
self._push_to_socket(_user_uuid, _device_uuid)
if _real_push == True:
self._push_to_mobile(_user_uuid, _device_uuid)
return
def _other_device(self):
if self._task.get("from_device_uuid") == None:
return
if self._task.get("from_type") != YVOBJECT.DU:
return
if self._task.get("_user") == None:
return
if self._task["conversation_type"] == CONVERSATION_TYPE.P2S:
if self._task["_user"]["ppcom_mobile_device_uuid"] == None or \
self._task["_user"]["ppcom_browser_device_uuid"] == None:
return
if self._task["conversation_type"] == CONVERSATION_TYPE.S2S or \
self._task["conversation_type"] == CONVERSATION_TYPE.S2P:
if self._task["_user"]["mobile_device_uuid"] == None or \
self._task["_user"]["browser_device_uuid"] == None:
return
_device_uuid = None
if self._task["conversation_type"] == CONVERSATION_TYPE.P2S:
_device_uuid = self._task["_user"]["ppcom_mobile_device_uuid"]
if self._task["from_device_uuid"] == self._task["_user"]["ppcom_mobile_device_uuid"]:
_device_uuid = self._task["_user"]["ppcom_browser_device_uuid"]
else:
_device_uuid = self._task["_user"]["mobile_device_uuid"]
if self._task["from_device_uuid"] == self._task["_user"]["mobile_device_uuid"]:
_device_uuid = self._task["_user"]["browser_device_uuid"]
if _device_uuid not in self._devices_hash:
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None or _device["device_is_online"] != True:
return
self._devices_hash[_device_uuid] = _device
_user_uuid = self._task["from_uuid"]
if _user_uuid not in self._users_hash:
self._users_hash[_user_uuid] = self._task["_user"]
_pid = self._push_to_db(_user_uuid)
self._devices_hash[_device_uuid]["push_uuid"] = _pid
self._push_to_socket(_user_uuid, _device_uuid)
return
def _explicit(self):
_device_uuid = self._task.get("to_device_uuid")
_device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)
if _device == None:
logging.error("no device:%s" % _device_uuid)
return
_user_uuid = self._task.get("from_uuid")
self._users_hash[_user_uuid] = self._task["_user"]
self._devices_hash[_device_uuid] = _device
self._push_to_socket(_user_uuid, _device_uuid)
return
def _send_apologize(self, _text):
_task = {
"uuid": str(uuid.uuid1()),
"app_uuid": self._task["app_uuid"],
"conversation_uuid": self._task["conversation_uuid"],
"conversation_type": CONVERSATION_TYPE.S2P,
"message_type": MESSAGE_TYPE.NOTI,
"message_subtype": MESSAGE_SUBTYPE.TEXT,
"from_uuid": self._task["to_uuid"],
"from_type": self._task["to_type"],
"to_uuid": self._task["to_uuid"],
"to_type": self._task["to_type"],
"body": _text,
"task_status": TASK_STATUS.PENDING,
}
_row = MessagePushTask(**_task)
_row.async_add(self._redis)
_row.create_redis_keys(self._redis)
_m = {"task_uuid": _row.uuid}
self._redis.rpush(REDIS_DISPATCHER_NOTIFICATION_KEY, json.dumps(_m))
return
def _get_app_apologize(self):
_text = None
_lang = self._task["_user"]["user_language"]
if _lang == None or len(_lang) == 0:
_lang = "zh_cn"
_offline = "offline_" + _lang
_text = self._task["_app"][_offline]
if _text == None:
_text = PPCOM_OFFLINE[_lang]
return _text
def no_online_user(self):
if self._task["conversation_type"] != CONVERSATION_TYPE.P2S:
return
if self._task["_app"].get("return_offline_message") != True:
logging.info("return_offline_message is not set")
return
_text = self._get_app_apologize()
if _text == None:
return
self._send_apologize(_text)
return
def users(self):
_app_uuid = self._task["app_uuid"]
_conversation_uuid = self._task["conversation_uuid"]
_users = AbstractPolicy.conversation_users(_app_uuid, _conversation_uuid, self._redis)
_datas = AbstractPolicy.conversation_datas(_app_uuid, _conversation_uuid, _users, self._redis)
_datas = dict(zip(_users, _datas))
_table = AppUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid."
_pi = self._redis.pipeline()
for _user_uuid in _users:
_key = _table + _user_uuid
_pi.get(_key)
_is = _pi.execute()
_is_list = []
for _i in _is:
if _i == None or len(_i) == 0:
_is_list.append(False)
continue
_d = json.loads(_i)
_is_list.append(_d.get("is_service_user"))
self._is_service_user = dict(zip(_users, _is_list))
if self._task["from_type"] == YVOBJECT.DU:
_user_uuid = self._task["from_uuid"]
if _user_uuid in _users:
_users.remove(_user_uuid)
if _user_uuid in _datas:
del _datas[_user_uuid]
self._users = _users
self._conversation_users = _users
self._conversation_user_datas_uuid = _datas
return
def dispatch(self):
self._body()
if self._task.get("to_device_uuid") != None:
self._explicit()
return
if self._task.get("conversation_uuid") == None:
logging.error("no conversation should be explicit")
return
self.users()
self._users_devices()
self._push()
self._other_device()
return
class BroadcastPolicy(AbstractPolicy):
def __init__(self, dis):
super(BroadcastPolicy, self).__init__(dis)
return
def users(self):
super(BroadcastPolicy, self).users()
return
@classmethod
def create_conversation_users(cls, _app_uuid, _group_uuid, _redis):
return AbstractPolicy.distributor_users(_app_uuid, _redis)
@classmethod
def get_service_care_users(cls, _app_uuid, _user_uuid, _redis):
_a_users = AbstractPolicy.app_users(_app_uuid, True, _redis)
_b_users = AbstractPolicy.app_users(_app_uuid, False, _redis)
return _a_users + _b_users
@classmethod
def get_portal_care_users(cls, _app_uuid, _user_uuid, _redis):
_a_users = AbstractPolicy.app_users(_app_uuid, True, _redis)
return _a_users
| true
| true
|
f704e0229ff68105dd98edffdadbd49a9cb411c6
| 1,607
|
py
|
Python
|
hard-gists/6623972/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/6623972/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/6623972/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
#!/usr/bin/env python
#
# author: syl20bnr (2013)
# goal: Focus the nth window in the current workspace (limited to 10 firsts)
#
# Example of usage in i3 config:
#
# bindsym $mod+0 exec focus_win.py -n 0
# bindsym $mod+1 exec focus_win.py -n 1
# ... ...
# bindsym $mod+8 exec focus_win.py -n 8
# bindsym $mod+9 exec focus_win.py -n 9
import argparse
from subprocess import Popen
import i3
PARSER = argparse.ArgumentParser(prog='focus_win')
PARSER.add_argument('-n', '--number',
required=True,
type=int,
choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
help='Window number (limited to [0,9]).')
def focus_nth_window(nth):
''' Roughly focus the nth window in the hierarchy (limited to 10 first) '''
wins = get_windows_from_current_workspace()
if nth == 0:
nth = 10
cmd = 'i3-msg [con_id={0}] focus'.format(wins[nth-1])
Popen(cmd, shell=True)
def get_windows_from_current_workspace():
res = []
ws = get_current_workspace()
workspace = i3.filter(name=ws)
if workspace:
workspace = workspace[0]
windows = i3.filter(workspace, nodes=[])
for window in windows:
res.append(window['id'])
return res
def get_current_workspace():
''' Returns the current workspace '''
workspaces = i3.msg('get_workspaces')
workspace = i3.filter(tree=workspaces, focused=True)
if workspace:
return workspace[0]['name']
return ''
if __name__ == '__main__':
args = PARSER.parse_args()
focus_nth_window(args.number)
| 26.783333
| 79
| 0.616677
|
import argparse
from subprocess import Popen
import i3
PARSER = argparse.ArgumentParser(prog='focus_win')
PARSER.add_argument('-n', '--number',
required=True,
type=int,
choices=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
help='Window number (limited to [0,9]).')
def focus_nth_window(nth):
wins = get_windows_from_current_workspace()
if nth == 0:
nth = 10
cmd = 'i3-msg [con_id={0}] focus'.format(wins[nth-1])
Popen(cmd, shell=True)
def get_windows_from_current_workspace():
res = []
ws = get_current_workspace()
workspace = i3.filter(name=ws)
if workspace:
workspace = workspace[0]
windows = i3.filter(workspace, nodes=[])
for window in windows:
res.append(window['id'])
return res
def get_current_workspace():
workspaces = i3.msg('get_workspaces')
workspace = i3.filter(tree=workspaces, focused=True)
if workspace:
return workspace[0]['name']
return ''
if __name__ == '__main__':
args = PARSER.parse_args()
focus_nth_window(args.number)
| true
| true
|
f704e2908192786114878cd4a17cb8a00a35bca0
| 1,437
|
py
|
Python
|
tests/test_normalizers.py
|
gobadiah/jasonpi
|
c0dc504cb4be5880d3d2dcedeebd9513a8123569
|
[
"MIT"
] | null | null | null |
tests/test_normalizers.py
|
gobadiah/jasonpi
|
c0dc504cb4be5880d3d2dcedeebd9513a8123569
|
[
"MIT"
] | null | null | null |
tests/test_normalizers.py
|
gobadiah/jasonpi
|
c0dc504cb4be5880d3d2dcedeebd9513a8123569
|
[
"MIT"
] | 1
|
2019-03-05T09:35:06.000Z
|
2019-03-05T09:35:06.000Z
|
import datetime
from jasonpi.normalizers import facebook_profile, google_profile
def test_facebook_profile():
"""
Test that facebook_profile computes
a correct profile received from facebook oauth.
"""
data = {
'email': 'some@email.com',
'first_name': 'Alfred',
'last_name': 'Dupont',
'gender': 'male',
'birthday': '02/25/1970'
}
profile = facebook_profile(data)
assert profile['email'] == data['email']
assert profile['first_name'] == data['first_name']
assert profile['last_name'] == data['last_name']
assert profile['gender'] == data['gender']
assert profile['birthday'] == datetime.date(1970, 2, 25)
def test_google_profile():
"""
Test that google_profile computes
a correct profile received from google oauth.
"""
data = {
'emailAddresses': [{'value': 'some@email.com'}],
'names': [{'givenName': 'Alfred', 'familyName': 'Dupont'}],
'genders': [{'value': 'male'}],
'birthdays': [{'date': {'year': 1970, 'month': 2, 'day': 25}}]
}
profile = google_profile(data)
assert profile['email'] == data['emailAddresses'][0]['value']
assert profile['first_name'] == data['names'][0]['givenName']
assert profile['last_name'] == data['names'][0]['familyName']
assert profile['gender'] == data['genders'][0]['value']
assert profile['birthday'] == datetime.date(1970, 2, 25)
| 33.418605
| 70
| 0.610299
|
import datetime
from jasonpi.normalizers import facebook_profile, google_profile
def test_facebook_profile():
data = {
'email': 'some@email.com',
'first_name': 'Alfred',
'last_name': 'Dupont',
'gender': 'male',
'birthday': '02/25/1970'
}
profile = facebook_profile(data)
assert profile['email'] == data['email']
assert profile['first_name'] == data['first_name']
assert profile['last_name'] == data['last_name']
assert profile['gender'] == data['gender']
assert profile['birthday'] == datetime.date(1970, 2, 25)
def test_google_profile():
data = {
'emailAddresses': [{'value': 'some@email.com'}],
'names': [{'givenName': 'Alfred', 'familyName': 'Dupont'}],
'genders': [{'value': 'male'}],
'birthdays': [{'date': {'year': 1970, 'month': 2, 'day': 25}}]
}
profile = google_profile(data)
assert profile['email'] == data['emailAddresses'][0]['value']
assert profile['first_name'] == data['names'][0]['givenName']
assert profile['last_name'] == data['names'][0]['familyName']
assert profile['gender'] == data['genders'][0]['value']
assert profile['birthday'] == datetime.date(1970, 2, 25)
| true
| true
|
f704e2ba80b1a17cddedcb87bc5118b0363446b9
| 441
|
py
|
Python
|
_draft/x_6_8.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
_draft/x_6_8.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | 1
|
2021-11-13T08:03:04.000Z
|
2021-11-13T08:03:04.000Z
|
_draft/x_6_8.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
# x_6_8
#
#
class StockError(Exception):
pass
class NumberError(Exception):
pass
order_count = input('きび団子を何個注文しますか?:')
card_number = input('カード番号を入力してください?(例、0000-0000-0000-0000):')
try:
if int(order_count) > 100:
raise StockError
if card_number != '1111-1111-1111-1111':
raise NumberError
except StockError:
print('在庫切れです')
except NumberError:
print('カードエラー')
else:
print('ご購入ありがとうございます')
| 16.333333
| 62
| 0.673469
|
class StockError(Exception):
pass
class NumberError(Exception):
pass
order_count = input('きび団子を何個注文しますか?:')
card_number = input('カード番号を入力してください?(例、0000-0000-0000-0000):')
try:
if int(order_count) > 100:
raise StockError
if card_number != '1111-1111-1111-1111':
raise NumberError
except StockError:
print('在庫切れです')
except NumberError:
print('カードエラー')
else:
print('ご購入ありがとうございます')
| true
| true
|
f704e2ec4bedf7f9433b07f9722cc1c5b621e5ed
| 10,014
|
py
|
Python
|
electrumx/server/daemon.py
|
bitcoin-global/global-electrumx
|
077dc2c3a5bfd9e82dc11784a98f986b8b726336
|
[
"MIT"
] | 1
|
2020-06-30T18:50:22.000Z
|
2020-06-30T18:50:22.000Z
|
electrumx/server/daemon.py
|
bitcoin-global/global-electrumx
|
077dc2c3a5bfd9e82dc11784a98f986b8b726336
|
[
"MIT"
] | null | null | null |
electrumx/server/daemon.py
|
bitcoin-global/global-electrumx
|
077dc2c3a5bfd9e82dc11784a98f986b8b726336
|
[
"MIT"
] | 1
|
2020-12-18T17:13:31.000Z
|
2020-12-18T17:13:31.000Z
|
# Copyright (c) 2016-2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling asynchronous connections to a blockchain
daemon.'''
import asyncio
import itertools
import json
import time
import aiohttp
from aiorpcx import JSONRPC
from electrumx.lib.util import hex_to_bytes, class_logger
class DaemonError(Exception):
'''Raised when the daemon returns an error in its results.'''
class WarmingUpError(Exception):
'''Internal - when the daemon is warming up.'''
class ServiceRefusedError(Exception):
'''Internal - when the daemon doesn't provide a JSON response, only an HTTP error, for
some reason.'''
class Daemon(object):
'''Handles connections to a daemon at the given URL.'''
WARMING_UP = -28
id_counter = itertools.count()
def __init__(self, coin, url, *, max_workqueue=10, init_retry=0.25, max_retry=4.0):
self.coin = coin
self.logger = class_logger(__name__, self.__class__.__name__)
self.url_index = None
self.urls = []
self.set_url(url)
# Limit concurrent RPC calls to this number.
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
self.init_retry = init_retry
self.max_retry = max_retry
self._height = None
self.available_rpcs = {}
self.session = None
async def __aenter__(self):
self.session = aiohttp.ClientSession(connector=self.connector())
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.session.close()
self.session = None
def connector(self):
return None
def set_url(self, url):
'''Set the URLS to the given list, and switch to the first one.'''
urls = url.split(',')
urls = [self.coin.sanitize_url(url) for url in urls]
for n, url in enumerate(urls):
status = '' if n else ' (current)'
logged_url = self.logged_url(url)
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
self.url_index = 0
self.urls = urls
def current_url(self):
'''Returns the current daemon URL.'''
return self.urls[self.url_index]
def logged_url(self, url=None):
'''The host and port part, for logging.'''
url = url or self.current_url()
return url[url.rindex('@') + 1:]
def failover(self):
'''Call to fail-over to the next daemon URL.
Returns False if there is only one, otherwise True.
'''
if len(self.urls) > 1:
self.url_index = (self.url_index + 1) % len(self.urls)
self.logger.info(f'failing over to {self.logged_url()}')
return True
return False
async def _send_data(self, data):
async with self.workqueue_semaphore:
async with self.session.post(self.current_url(), data=data) as resp:
kind = resp.headers.get('Content-Type', None)
if kind == 'application/json':
return await resp.json()
text = await resp.text()
text = text.strip() or resp.reason
raise ServiceRefusedError(text)
async def _send(self, payload, processor):
'''Send a payload to be converted to JSON.
Handles temporary connection issues. Daemon reponse errors
are raise through DaemonError.
'''
def log_error(error):
nonlocal last_error_log, retry
now = time.time()
if now - last_error_log > 60:
last_error_log = now
self.logger.error(f'{error}. Retrying occasionally...')
if retry == self.max_retry and self.failover():
retry = 0
on_good_message = None
last_error_log = 0
data = json.dumps(payload)
retry = self.init_retry
while True:
try:
result = await self._send_data(data)
result = processor(result)
if on_good_message:
self.logger.info(on_good_message)
return result
except asyncio.TimeoutError:
log_error('timeout error')
except aiohttp.ServerDisconnectedError:
log_error('disconnected')
on_good_message = 'connection restored'
except ConnectionResetError:
log_error('connection reset')
on_good_message = 'connection restored'
except aiohttp.ClientConnectionError:
log_error('connection problem - check your daemon is running')
on_good_message = 'connection restored'
except aiohttp.ClientError as e:
log_error(f'daemon error: {e}')
on_good_message = 'running normally'
except ServiceRefusedError as e:
log_error(f'daemon service refused: {e}')
on_good_message = 'running normally'
except WarmingUpError:
log_error('starting up checking blocks')
on_good_message = 'running normally'
await asyncio.sleep(retry)
retry = max(min(self.max_retry, retry * 2), self.init_retry)
async def _send_single(self, method, params=None):
'''Send a single request to the daemon.'''
def processor(result):
err = result['error']
if not err:
return result['result']
if err.get('code') == self.WARMING_UP:
raise WarmingUpError
raise DaemonError(err)
payload = {'method': method, 'id': next(self.id_counter)}
if params:
payload['params'] = params
return await self._send(payload, processor)
async def _send_vector(self, method, params_iterable, replace_errs=False):
'''Send several requests of the same method.
The result will be an array of the same length as params_iterable.
If replace_errs is true, any item with an error is returned as None,
otherwise an exception is raised.'''
def processor(result):
errs = [item['error'] for item in result if item['error']]
if any(err.get('code') == self.WARMING_UP for err in errs):
raise WarmingUpError
if not errs or replace_errs:
return [item['result'] for item in result]
raise DaemonError(errs)
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
for p in params_iterable]
if payload:
return await self._send(payload, processor)
return []
async def _is_rpc_available(self, method):
'''Return whether given RPC method is available in the daemon.
Results are cached and the daemon will generally not be queried with
the same method more than once.'''
available = self.available_rpcs.get(method)
if available is None:
available = True
try:
await self._send_single(method)
except DaemonError as e:
err = e.args[0]
error_code = err.get("code")
available = error_code != JSONRPC.METHOD_NOT_FOUND
self.available_rpcs[method] = available
return available
async def block_hex_hashes(self, first, count):
'''Return the hex hashes of count block starting at height first.'''
params_iterable = ((h, ) for h in range(first, first + count))
return await self._send_vector('getblockhash', params_iterable)
async def deserialised_block(self, hex_hash):
'''Return the deserialised block with the given hex hash.'''
return await self._send_single('getblock', (hex_hash, True))
async def raw_blocks(self, hex_hashes):
'''Return the raw binary blocks with the given hex hashes.'''
params_iterable = ((h, False) for h in hex_hashes)
blocks = await self._send_vector('getblock', params_iterable)
# Convert hex string to bytes
return [hex_to_bytes(block) for block in blocks]
async def mempool_hashes(self):
'''Update our record of the daemon's mempool hashes.'''
return await self._send_single('getrawmempool')
async def getnetworkinfo(self):
'''Return the result of the 'getnetworkinfo' RPC call.'''
return await self._send_single('getnetworkinfo')
async def getrawtransaction(self, hex_hash, verbose=False):
'''Return the serialized raw transaction with the given hash.'''
# Cast to int because some coin daemons are old and require it
return await self._send_single('getrawtransaction',
(hex_hash, int(verbose)))
async def getrawtransactions(self, hex_hashes, replace_errs=True):
'''Return the serialized raw transactions with the given hashes.
Replaces errors with None by default.'''
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
txs = await self._send_vector('getrawtransaction', params_iterable,
replace_errs=replace_errs)
# Convert hex strings to bytes
return [hex_to_bytes(tx) if tx else None for tx in txs]
async def broadcast_transaction(self, raw_tx):
'''Broadcast a transaction to the network.'''
return await self._send_single('sendrawtransaction', (raw_tx, ))
async def height(self):
'''Query the daemon for its current height.'''
self._height = await self._send_single('getblockcount')
return self._height
def cached_height(self):
'''Return the cached daemon height.
If the daemon has not been queried yet this returns None.'''
return self._height
| 37.931818
| 90
| 0.61454
|
import asyncio
import itertools
import json
import time
import aiohttp
from aiorpcx import JSONRPC
from electrumx.lib.util import hex_to_bytes, class_logger
class DaemonError(Exception):
class WarmingUpError(Exception):
class ServiceRefusedError(Exception):
class Daemon(object):
WARMING_UP = -28
id_counter = itertools.count()
def __init__(self, coin, url, *, max_workqueue=10, init_retry=0.25, max_retry=4.0):
self.coin = coin
self.logger = class_logger(__name__, self.__class__.__name__)
self.url_index = None
self.urls = []
self.set_url(url)
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
self.init_retry = init_retry
self.max_retry = max_retry
self._height = None
self.available_rpcs = {}
self.session = None
async def __aenter__(self):
self.session = aiohttp.ClientSession(connector=self.connector())
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.session.close()
self.session = None
def connector(self):
return None
def set_url(self, url):
urls = url.split(',')
urls = [self.coin.sanitize_url(url) for url in urls]
for n, url in enumerate(urls):
status = '' if n else ' (current)'
logged_url = self.logged_url(url)
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
self.url_index = 0
self.urls = urls
def current_url(self):
return self.urls[self.url_index]
def logged_url(self, url=None):
url = url or self.current_url()
return url[url.rindex('@') + 1:]
def failover(self):
if len(self.urls) > 1:
self.url_index = (self.url_index + 1) % len(self.urls)
self.logger.info(f'failing over to {self.logged_url()}')
return True
return False
async def _send_data(self, data):
async with self.workqueue_semaphore:
async with self.session.post(self.current_url(), data=data) as resp:
kind = resp.headers.get('Content-Type', None)
if kind == 'application/json':
return await resp.json()
text = await resp.text()
text = text.strip() or resp.reason
raise ServiceRefusedError(text)
async def _send(self, payload, processor):
def log_error(error):
nonlocal last_error_log, retry
now = time.time()
if now - last_error_log > 60:
last_error_log = now
self.logger.error(f'{error}. Retrying occasionally...')
if retry == self.max_retry and self.failover():
retry = 0
on_good_message = None
last_error_log = 0
data = json.dumps(payload)
retry = self.init_retry
while True:
try:
result = await self._send_data(data)
result = processor(result)
if on_good_message:
self.logger.info(on_good_message)
return result
except asyncio.TimeoutError:
log_error('timeout error')
except aiohttp.ServerDisconnectedError:
log_error('disconnected')
on_good_message = 'connection restored'
except ConnectionResetError:
log_error('connection reset')
on_good_message = 'connection restored'
except aiohttp.ClientConnectionError:
log_error('connection problem - check your daemon is running')
on_good_message = 'connection restored'
except aiohttp.ClientError as e:
log_error(f'daemon error: {e}')
on_good_message = 'running normally'
except ServiceRefusedError as e:
log_error(f'daemon service refused: {e}')
on_good_message = 'running normally'
except WarmingUpError:
log_error('starting up checking blocks')
on_good_message = 'running normally'
await asyncio.sleep(retry)
retry = max(min(self.max_retry, retry * 2), self.init_retry)
async def _send_single(self, method, params=None):
def processor(result):
err = result['error']
if not err:
return result['result']
if err.get('code') == self.WARMING_UP:
raise WarmingUpError
raise DaemonError(err)
payload = {'method': method, 'id': next(self.id_counter)}
if params:
payload['params'] = params
return await self._send(payload, processor)
async def _send_vector(self, method, params_iterable, replace_errs=False):
def processor(result):
errs = [item['error'] for item in result if item['error']]
if any(err.get('code') == self.WARMING_UP for err in errs):
raise WarmingUpError
if not errs or replace_errs:
return [item['result'] for item in result]
raise DaemonError(errs)
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
for p in params_iterable]
if payload:
return await self._send(payload, processor)
return []
async def _is_rpc_available(self, method):
available = self.available_rpcs.get(method)
if available is None:
available = True
try:
await self._send_single(method)
except DaemonError as e:
err = e.args[0]
error_code = err.get("code")
available = error_code != JSONRPC.METHOD_NOT_FOUND
self.available_rpcs[method] = available
return available
async def block_hex_hashes(self, first, count):
params_iterable = ((h, ) for h in range(first, first + count))
return await self._send_vector('getblockhash', params_iterable)
async def deserialised_block(self, hex_hash):
return await self._send_single('getblock', (hex_hash, True))
async def raw_blocks(self, hex_hashes):
params_iterable = ((h, False) for h in hex_hashes)
blocks = await self._send_vector('getblock', params_iterable)
return [hex_to_bytes(block) for block in blocks]
async def mempool_hashes(self):
return await self._send_single('getrawmempool')
async def getnetworkinfo(self):
return await self._send_single('getnetworkinfo')
async def getrawtransaction(self, hex_hash, verbose=False):
return await self._send_single('getrawtransaction',
(hex_hash, int(verbose)))
async def getrawtransactions(self, hex_hashes, replace_errs=True):
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
txs = await self._send_vector('getrawtransaction', params_iterable,
replace_errs=replace_errs)
return [hex_to_bytes(tx) if tx else None for tx in txs]
async def broadcast_transaction(self, raw_tx):
return await self._send_single('sendrawtransaction', (raw_tx, ))
async def height(self):
self._height = await self._send_single('getblockcount')
return self._height
def cached_height(self):
return self._height
| true
| true
|
f704e31e46f9b00192e8725b26646591bb78db65
| 644
|
py
|
Python
|
TODO_LIST/TODO_APP/migrations/0001_initial.py
|
Amit89499/TODO-APP-DJANGO
|
082a4ffb803778378c6a8077ca47cf868bc55ef8
|
[
"Apache-2.0"
] | 4
|
2020-06-29T16:00:39.000Z
|
2021-05-22T03:40:38.000Z
|
TODO_LIST/TODO_APP/migrations/0001_initial.py
|
Amit89499/TODO-APP-DJANGO
|
082a4ffb803778378c6a8077ca47cf868bc55ef8
|
[
"Apache-2.0"
] | null | null | null |
TODO_LIST/TODO_APP/migrations/0001_initial.py
|
Amit89499/TODO-APP-DJANGO
|
082a4ffb803778378c6a8077ca47cf868bc55ef8
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.2.4 on 2020-06-21 18:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| 26.833333
| 115
| 0.555901
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| true
| true
|
f704e383be902dc878749e1e1053016142dfdbac
| 457
|
py
|
Python
|
sportsbet/datasets/__init__.py
|
ItzBraveNoob/sports-betting
|
521d4ef6bd0e079d508f40609681124edc2c6805
|
[
"MIT"
] | 49
|
2020-12-27T15:23:23.000Z
|
2022-03-30T19:21:13.000Z
|
sportsbet/datasets/__init__.py
|
ItzBraveNoob/sports-betting
|
521d4ef6bd0e079d508f40609681124edc2c6805
|
[
"MIT"
] | 5
|
2021-04-23T17:41:30.000Z
|
2022-02-02T14:03:37.000Z
|
sportsbet/datasets/__init__.py
|
ItzBraveNoob/sports-betting
|
521d4ef6bd0e079d508f40609681124edc2c6805
|
[
"MIT"
] | 15
|
2021-02-13T02:01:49.000Z
|
2022-03-07T01:09:15.000Z
|
"""
The :mod:`sportsbet.datasets` module provides the
tools to download and transform sports betting data.
"""
from ._base import load
from ._soccer._combined import SoccerDataLoader
from ._soccer._fd import FDSoccerDataLoader
from ._soccer._fte import FTESoccerDataLoader
from ._soccer._dummy import DummySoccerDataLoader
__all__ = [
'SoccerDataLoader',
'FDSoccerDataLoader',
'FTESoccerDataLoader',
'DummySoccerDataLoader',
'load',
]
| 24.052632
| 52
| 0.770241
|
from ._base import load
from ._soccer._combined import SoccerDataLoader
from ._soccer._fd import FDSoccerDataLoader
from ._soccer._fte import FTESoccerDataLoader
from ._soccer._dummy import DummySoccerDataLoader
__all__ = [
'SoccerDataLoader',
'FDSoccerDataLoader',
'FTESoccerDataLoader',
'DummySoccerDataLoader',
'load',
]
| true
| true
|
f704e3f96f343e3769531df6f0c6d75bfe0aa5ca
| 66,444
|
py
|
Python
|
lib/galaxy/webapps/galaxy/controllers/dataset.py
|
Galaxyinternship/Galaxy
|
204be086a8c16d6684584cefa9053ed7c86a1784
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/webapps/galaxy/controllers/dataset.py
|
Galaxyinternship/Galaxy
|
204be086a8c16d6684584cefa9053ed7c86a1784
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/webapps/galaxy/controllers/dataset.py
|
Galaxyinternship/Galaxy
|
204be086a8c16d6684584cefa9053ed7c86a1784
|
[
"CC-BY-3.0"
] | null | null | null |
import logging
import os
import urllib
from markupsafe import escape
import paste.httpexceptions
from six import string_types, text_type
from sqlalchemy import false, true
from galaxy import datatypes, model, util, web
from galaxy import managers
from galaxy.datatypes.display_applications.util import decode_dataset_user, encode_dataset_user
from galaxy.model.item_attrs import UsesAnnotations, UsesItemRatings
from galaxy.util import inflector, smart_str
from galaxy.util.sanitize_html import sanitize_html
from galaxy.web.base.controller import BaseUIController, ERROR, SUCCESS, url_for, UsesExtendedMetadataMixin
from galaxy.web.framework.helpers import grids, iff, time_ago, to_unicode
from galaxy.tools.errors import EmailErrorReporter
log = logging.getLogger( __name__ )
comptypes = []
try:
import zlib # noqa: F401
comptypes.append( 'zip' )
except ImportError:
pass
class HistoryDatasetAssociationListGrid( grids.Grid ):
# Custom columns for grid.
class HistoryColumn( grids.GridColumn ):
def get_value( self, trans, grid, hda):
return escape(hda.history.name)
class StatusColumn( grids.GridColumn ):
def get_value( self, trans, grid, hda ):
if hda.deleted:
return "deleted"
return ""
def get_accepted_filters( self ):
""" Returns a list of accepted filters for this column. """
accepted_filter_labels_and_vals = { "Active" : "False", "Deleted" : "True", "All": "All" }
accepted_filters = []
for label, val in accepted_filter_labels_and_vals.items():
args = { self.key: val }
accepted_filters.append( grids.GridColumnFilter( label, args) )
return accepted_filters
# Grid definition
title = "Saved Datasets"
model_class = model.HistoryDatasetAssociation
template = '/dataset/grid.mako'
default_sort_key = "-update_time"
columns = [
grids.TextColumn( "Name", key="name",
# Link name to dataset's history.
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch", id=item.id ) ) ), filterable="advanced", attach_popup=True ),
HistoryColumn( "History", key="history", sortable=False, target="inbound",
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch_history", id=item.id ) ) ) ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryDatasetAssociationTagAssociation, filterable="advanced", grid_name="HistoryDatasetAssocationListGrid" ),
StatusColumn( "Status", key="deleted", attach_popup=False ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[2] ],
key="free-text-search", visible=False, filterable="standard" )
)
operations = [
grids.GridOperation( "Copy to current history", condition=( lambda item: not item.deleted ), async_compatible=True ),
]
standard_filters = []
default_filter = dict( name="All", deleted="False", tags="All" )
preserve_state = False
use_async = True
use_paging = True
num_rows_per_page = 50
def build_initial_query( self, trans, **kwargs ):
# Show user's datasets that are not deleted, not in deleted histories, and not hidden.
# To filter HDAs by user, need to join model class/HDA and History table so that it is
# possible to filter by user. However, for dictionary-based filtering to work, need a
# primary table for the query.
return trans.sa_session.query( self.model_class ).select_from( self.model_class.table.join( model.History.table ) ) \
.filter( model.History.user == trans.user ) \
.filter( self.model_class.deleted == false() ) \
.filter( model.History.deleted == false() ) \
.filter( self.model_class.visible == true() )
class DatasetInterface( BaseUIController, UsesAnnotations, UsesItemRatings, UsesExtendedMetadataMixin ):
stored_list_grid = HistoryDatasetAssociationListGrid()
def __init__( self, app ):
super( DatasetInterface, self ).__init__( app )
self.history_manager = managers.histories.HistoryManager( app )
self.hda_manager = managers.hdas.HDAManager( app )
def _get_job_for_dataset( self, trans, dataset_id ):
'''
Return the job for the given dataset. This will throw an error if the
dataset is either nonexistent or inaccessible to the user.
'''
hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( dataset_id ) )
assert hda and self._can_access_dataset( trans, hda )
return hda.creating_job
def _can_access_dataset( self, trans, dataset_association, allow_admin=True, additional_roles=None ):
roles = trans.get_current_user_roles()
if additional_roles:
roles = roles + additional_roles
return ( allow_admin and trans.user_is_admin() ) or trans.app.security_agent.can_access_dataset( roles, dataset_association.dataset )
@web.expose
def errors( self, trans, id ):
hda = trans.sa_session.query( model.HistoryDatasetAssociation ).get( self.decode_id( id ) )
if not hda or not self._can_access_dataset( trans, hda ):
return trans.show_error_message( "Either this dataset does not exist or you do not have permission to access it." )
return trans.fill_template( "dataset/errors.mako", hda=hda )
@web.expose
def stdout( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
stdout = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
stdout = job.stdout
except:
stdout = "Invalid dataset ID or you are not allowed to access this dataset"
return smart_str( stdout )
@web.expose
# TODO: Migrate stderr and stdout to use _get_job_for_dataset; it wasn't tested.
def stderr( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
stderr = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
stderr = job.stderr
except:
stderr = "Invalid dataset ID or you are not allowed to access this dataset"
return smart_str( stderr )
@web.expose
def exit_code( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
exit_code = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
exit_code = job.exit_code
except:
exit_code = "Invalid dataset ID or you are not allowed to access this dataset"
return exit_code
@web.expose
def report_error( self, trans, id, email='', message="", **kwd ):
biostar_report = 'biostar' in str( kwd.get( 'submit_error_report') ).lower()
if biostar_report:
return trans.response.send_redirect( url_for( controller='biostar', action='biostar_tool_bug_report', hda=id, email=email, message=message ) )
try:
error_reporter = EmailErrorReporter( id, trans.app )
error_reporter.send_report( user=trans.user, email=email, message=message )
return trans.show_ok_message( "Your error report has been sent" )
except Exception as e:
return trans.show_error_message( "An error occurred sending the report by email: %s" % str( e ) )
@web.expose
def default(self, trans, dataset_id=None, **kwd):
return 'This link may not be followed from within Galaxy.'
@web.expose
def get_metadata_file(self, trans, hda_id, metadata_name):
""" Allows the downloading of metadata files associated with datasets (eg. bai index for bam files) """
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( hda_id ) )
if not data or not self._can_access_dataset( trans, data ):
return trans.show_error_message( "You are not allowed to access this dataset" )
fname = ''.join(c in util.FILENAME_VALID_CHARS and c or '_' for c in data.name)[0:150]
file_ext = data.metadata.spec.get(metadata_name).get("file_ext", metadata_name)
trans.response.headers["Content-Type"] = "application/octet-stream"
trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (data.hid, fname, file_ext)
return open(data.metadata.get(metadata_name).file_name)
def _check_dataset(self, trans, hda_id):
# DEPRECATION: We still support unencoded ids for backward compatibility
try:
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( hda_id) )
if data is None:
raise ValueError( 'Invalid reference dataset id: %s.' % hda_id)
except:
try:
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( int( hda_id ) )
except:
data = None
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( hda_id ) )
if not self._can_access_dataset( trans, data ):
return trans.show_error_message( "You are not allowed to access this dataset" )
if data.purged:
return trans.show_error_message( "The dataset you are attempting to view has been purged." )
if data.deleted and not ( trans.user_is_admin() or ( data.history and trans.get_user() == data.history.user ) ):
return trans.show_error_message( "The dataset you are attempting to view has been deleted." )
if data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to view it." )
return data
@web.expose
@web.json
def transfer_status(self, trans, dataset_id, filename=None):
""" Primarily used for the S3ObjectStore - get the status of data transfer
if the file is not in cache """
data = self._check_dataset(trans, dataset_id)
if isinstance( data, string_types ):
return data
log.debug( "Checking transfer status for dataset %s..." % data.dataset.id )
# Pulling files in extra_files_path into cache is not handled via this
# method but that's primarily because those files are typically linked to
# through tool's output page anyhow so tying a JavaScript event that will
# call this method does not seem doable?
if data.dataset.external_filename:
return True
else:
return trans.app.object_store.file_ready(data.dataset)
@web.expose
def display(self, trans, dataset_id=None, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd):
data = self._check_dataset(trans, dataset_id)
if not isinstance( data, trans.app.model.DatasetInstance ):
return data
# Ensure offset is an integer before passing through to datatypes.
if offset:
offset = int(offset)
# Ensure ck_size is an integer before passing through to datatypes.
if ck_size:
ck_size = int(ck_size)
return data.datatype.display_data(trans, data, preview, filename, to_ext, offset=offset, ck_size=ck_size, **kwd)
@web.expose
def edit(self, trans, dataset_id=None, filename=None, hid=None, **kwd):
"""Allows user to modify parameters of an HDA."""
message = None
status = 'done'
refresh_frames = []
error = False
def __ok_to_edit_metadata( dataset_id ):
# prevent modifying metadata when dataset is queued or running as input/output
# This code could be more efficient, i.e. by using mappers, but to prevent slowing down loading a History panel, we'll leave the code here for now
for job_to_dataset_association in trans.sa_session.query(
self.app.model.JobToInputDatasetAssociation ) \
.filter_by( dataset_id=dataset_id ) \
.all() \
+ trans.sa_session.query( self.app.model.JobToOutputDatasetAssociation ) \
.filter_by( dataset_id=dataset_id ) \
.all():
if job_to_dataset_association.job.state not in [ job_to_dataset_association.job.states.OK, job_to_dataset_association.job.states.ERROR, job_to_dataset_association.job.states.DELETED ]:
return False
return True
if hid is not None:
history = trans.get_history()
# TODO: hid handling
data = history.datasets[ int( hid ) - 1 ]
id = None
elif dataset_id is not None:
id = self.decode_id( dataset_id )
data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
else:
trans.log_event( "dataset_id and hid are both None, cannot load a dataset to edit" )
return trans.show_error_message( "You must provide a history dataset id to edit" )
if data is None:
trans.log_event( "Problem retrieving dataset (encoded: %s, decoded: %s) with history id %s." % ( str( dataset_id ), str( id ), str( hid ) ) )
return trans.show_error_message( "History dataset id is invalid" )
if dataset_id is not None and data.history.user is not None and data.history.user != trans.user:
trans.log_event( "User attempted to edit an HDA they do not own (encoded: %s, decoded: %s)" % ( dataset_id, id ) )
# Do not reveal the dataset's existence
return trans.show_error_message( "History dataset id is invalid" )
current_user_roles = trans.get_current_user_roles()
if data.history.user and not data.dataset.has_manage_permissions_roles( trans ):
# Permission setting related to DATASET_MANAGE_PERMISSIONS was broken for a period of time,
# so it is possible that some Datasets have no roles associated with the DATASET_MANAGE_PERMISSIONS
# permission. In this case, we'll reset this permission to the hda user's private role.
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
permissions = { manage_permissions_action : [ trans.app.security_agent.get_private_user_role( data.history.user ) ] }
trans.app.security_agent.set_dataset_permission( data.dataset, permissions )
if self._can_access_dataset( trans, data ):
if data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to edit its metadata." )
params = util.Params( kwd, sanitize=False )
if params.change:
# The user clicked the Save button on the 'Change data type' form
if data.datatype.allow_datatype_change and trans.app.datatypes_registry.get_datatype_by_extension( params.datatype ).allow_datatype_change:
# prevent modifying datatype when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change datatype until the jobs have completed or you have canceled them."
error = True
else:
trans.app.datatypes_registry.change_datatype( data, params.datatype )
trans.sa_session.flush()
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={ 'input1': data }, overwrite=False ) # overwrite is False as per existing behavior
message = "Changed the type of dataset '%s' to %s" % ( to_unicode( data.name ), params.datatype )
refresh_frames = ['history']
else:
message = "You are unable to change datatypes in this manner. Changing %s to %s is not allowed." % ( data.extension, params.datatype )
error = True
elif params.save:
# The user clicked the Save button on the 'Edit Attributes' form
data.name = params.name if params.name else ''
data.info = params.info if params.info else ''
message = ''
if __ok_to_edit_metadata( data.id ):
# The following for loop will save all metadata_spec items
for name, spec in data.datatype.metadata_spec.items():
if spec.get("readonly"):
continue
optional = params.get("is_" + name, None)
other = params.get("or_" + name, None)
if optional and optional == '__NOTHING__':
# optional element... == '__NOTHING__' actually means it is NOT checked (and therefore omitted)
setattr(data.metadata, name, None)
else:
if other:
setattr( data.metadata, name, other )
else:
setattr( data.metadata, name, spec.unwrap( params.get(name, None) ) )
data.datatype.after_setting_metadata( data )
# Sanitize annotation before adding it.
if params.annotation:
annotation = sanitize_html( params.annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), data, annotation )
# This block on controller code is inactive until the 'extended_metadata' edit box is added back into the UI
# Add or delete extended metadata
# if params.extended_metadata:
# em_string = params.extended_metadata
# if len(em_string):
# em_payload = None
# try:
# em_payload = loads(em_string)
# except Exception as e:
# message = 'Invalid JSON input'
# error = True
# if em_payload is not None:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# ex_obj = self.create_extended_metadata(trans, em_payload)
# self.set_item_extended_metadata_obj(trans, data, ex_obj)
# message = "Updated Extended metadata '%s'." % data.name
# status = 'done'
# else:
# message = "data not found"
# error = True
# else:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# message = "Deleted Extended metadata '%s'." % data.name
# status = 'done'
# If setting metadata previously failed and all required elements have now been set, clear the failed state.
if data._state == trans.model.Dataset.states.FAILED_METADATA and not data.missing_meta():
data._state = None
trans.sa_session.flush()
message = "Attributes updated%s" % message
refresh_frames = ['history']
else:
trans.sa_session.flush()
message = "Attributes updated, but metadata could not be changed because this dataset is currently being used as input or output. You must cancel or wait for these jobs to complete before changing metadata."
status = "warning"
refresh_frames = ['history']
elif params.detect:
# The user clicked the Auto-detect button on the 'Edit Attributes' form
# prevent modifying metadata when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change metadata until the jobs have completed or you have canceled them."
error = True
else:
for name, spec in data.metadata.spec.items():
# We need to be careful about the attributes we are resetting
if name not in [ 'name', 'info', 'dbkey', 'base_name' ]:
if spec.get( 'default' ):
setattr( data.metadata, name, spec.unwrap( spec.get( 'default' ) ) )
message = 'Attributes have been queued to be updated'
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={ 'input1': data } )
trans.sa_session.flush()
refresh_frames = ['history']
elif params.convert_data:
target_type = kwd.get("target_type", None)
if target_type:
message = data.datatype.convert_dataset(trans, data, target_type)
refresh_frames = ['history']
elif params.update_roles_button:
if not trans.user:
return trans.show_error_message( "You must be logged in if you want to change permissions." )
if trans.app.security_agent.can_manage_dataset( current_user_roles, data.dataset ):
access_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action )
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
# The user associated the DATASET_ACCESS permission on the dataset with 1 or more roles. We
# need to ensure that they did not associate roles that would cause accessibility problems.
permissions, in_roles, error, message = \
trans.app.security_agent.derive_roles_from_access( trans, data.dataset.id, 'root', **kwd )
if error:
# Keep the original role associations for the DATASET_ACCESS permission on the dataset.
permissions[ access_action ] = data.dataset.get_access_roles( trans )
status = 'error'
else:
error = trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
if error:
message += error
status = 'error'
else:
message = 'Your changes completed successfully.'
trans.sa_session.refresh( data.dataset )
else:
message = "You are not authorized to change this dataset's permissions"
error = True
else:
if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey:
# Copy dbkey into metadata, for backwards compatability
# This looks like it does nothing, but getting the dbkey
# returns the metadata dbkey unless it is None, in which
# case it resorts to the old dbkey. Setting the dbkey
# sets it properly in the metadata
# This is likely no longer required, since the dbkey exists entirely within metadata (the old_dbkey field is gone): REMOVE ME?
data.metadata.dbkey = data.dbkey
# let's not overwrite the imported datatypes module with the variable datatypes?
# the built-in 'id' is overwritten in lots of places as well
ldatatypes = [ dtype_name for dtype_name, dtype_value in trans.app.datatypes_registry.datatypes_by_extension.iteritems() if dtype_value.allow_datatype_change ]
ldatatypes.sort()
all_roles = trans.app.security_agent.get_legitimate_roles( trans, data.dataset, 'root' )
if error:
status = 'error'
return trans.fill_template( "/dataset/edit_attributes.mako",
data=data,
data_annotation=self.get_item_annotation_str( trans.sa_session, trans.user, data ),
datatypes=ldatatypes,
current_user_roles=current_user_roles,
all_roles=all_roles,
message=message,
status=status,
dataset_id=dataset_id,
refresh_frames=refresh_frames )
else:
return trans.show_error_message( "You do not have permission to edit this dataset's ( id: %s ) information." % str( dataset_id ) )
@web.expose
@web.require_login( "see all available datasets" )
def list( self, trans, **kwargs ):
"""List all available datasets"""
status = message = None
if 'operation' in kwargs:
operation = kwargs['operation'].lower()
hda_ids = util.listify( kwargs.get( 'id', [] ) )
# Display no message by default
status, message = None, None
# Load the hdas and ensure they all belong to the current user
hdas = []
for encoded_hda_id in hda_ids:
hda_id = self.decode_id( encoded_hda_id )
hda = trans.sa_session.query( model.HistoryDatasetAssociation ).filter_by( id=hda_id ).first()
if hda:
# Ensure history is owned by current user
if hda.history.user_id is not None and trans.user:
assert trans.user.id == hda.history.user_id, "HistoryDatasetAssocation does not belong to current user"
hdas.append( hda )
else:
log.warning( "Invalid history_dataset_association id '%r' passed to list", hda_id )
if hdas:
if operation == "switch" or operation == "switch_history":
# Switch to a history that the HDA resides in.
# Convert hda to histories.
histories = []
for hda in hdas:
histories.append( hda.history )
# Use history controller to switch the history. TODO: is this reasonable?
status, message = trans.webapp.controllers['history']._list_switch( trans, histories )
# Current history changed, refresh history frame; if switching to a dataset, set hda seek.
trans.template_context['refresh_frames'] = ['history']
if operation == "switch":
hda_ids = [ trans.security.encode_id( hda.id ) for hda in hdas ]
trans.template_context[ 'seek_hda_ids' ] = hda_ids
elif operation == "copy to current history":
#
# Copy datasets to the current history.
#
target_histories = [ trans.get_history() ]
# Reverse HDAs so that they appear in the history in the order they are provided.
hda_ids.reverse()
status, message = self._copy_datasets( trans, hda_ids, target_histories )
# Current history changed, refresh history frame.
trans.template_context['refresh_frames'] = ['history']
# Render the list view
return self.stored_list_grid( trans, status=status, message=message, **kwargs )
@web.expose
def imp( self, trans, dataset_id=None, **kwd ):
""" Import another user's dataset via a shared URL; dataset is added to user's current history. """
# Set referer message.
referer = trans.request.referer
if referer:
referer_message = "<a href='%s'>return to the previous page</a>" % escape(referer)
else:
referer_message = "<a href='%s'>go to Galaxy's start page</a>" % url_for( '/' )
# Error checking.
if not dataset_id:
return trans.show_error_message( "You must specify a dataset to import. You can %s." % referer_message, use_panels=True )
# Do import.
cur_history = trans.get_history( create=True )
status, message = self._copy_datasets( trans, [ dataset_id ], [ cur_history ], imported=True )
message = "Dataset imported. <br>You can <a href='%s'>start using the dataset</a> or %s." % ( url_for('/'), referer_message )
return trans.show_message( message, type=status, use_panels=True )
@web.expose
@web.json
@web.require_login( "use Galaxy datasets" )
def get_name_and_link_async( self, trans, id=None ):
""" Returns dataset's name and link. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
return_dict = { "name" : dataset.name, "link" : url_for( controller='dataset', action="display_by_username_and_slug", username=dataset.history.user.username, slug=trans.security.encode_id( dataset.id ) ) }
return return_dict
@web.expose
def get_embed_html_async( self, trans, id ):
""" Returns HTML for embedding a dataset in a page. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset:
return "Embedded Dataset '%s'" % dataset.name
@web.expose
@web.require_login( "use Galaxy datasets" )
def set_accessible_async( self, trans, id=None, accessible=False ):
""" Does nothing because datasets do not have an importable/accessible attribute. This method could potentially set another attribute. """
return
@web.expose
@web.require_login( "rate items" )
@web.json
def rate_async( self, trans, id, rating ):
""" Rate a dataset asynchronously and return updated community data. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
return trans.show_error_message( "The specified dataset does not exist." )
# Rate dataset.
self.rate_item( trans.sa_session, trans.get_user(), dataset, rating )
return self.get_ave_item_rating_data( trans.sa_session, dataset )
@web.expose
def display_by_username_and_slug( self, trans, username, slug, filename=None, preview=True ):
""" Display dataset by username and slug; because datasets do not yet have slugs, the slug is the dataset's id. """
id = slug
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset:
# Filename used for composite types.
if filename:
return self.display( trans, dataset_id=slug, filename=filename)
truncated, dataset_data = self.hda_manager.text_data( dataset, preview )
dataset.annotation = self.get_item_annotation_str( trans.sa_session, dataset.history.user, dataset )
# If dataset is chunkable, get first chunk.
first_chunk = None
if dataset.datatype.CHUNKABLE:
first_chunk = dataset.datatype.get_chunk(trans, dataset, 0)
# If data is binary or an image, stream without template; otherwise, use display template.
# TODO: figure out a way to display images in display template.
if isinstance(dataset.datatype, datatypes.binary.Binary) or isinstance(dataset.datatype, datatypes.images.Image) or isinstance(dataset.datatype, datatypes.text.Html):
trans.response.set_content_type( dataset.get_mime() )
return open( dataset.file_name )
else:
# Get rating data.
user_item_rating = 0
if trans.get_user():
user_item_rating = self.get_user_item_rating( trans.sa_session, trans.get_user(), dataset )
if user_item_rating:
user_item_rating = user_item_rating.rating
else:
user_item_rating = 0
ave_item_rating, num_ratings = self.get_ave_item_rating_data( trans.sa_session, dataset )
return trans.fill_template_mako( "/dataset/display.mako", item=dataset, item_data=dataset_data,
truncated=truncated, user_item_rating=user_item_rating,
ave_item_rating=ave_item_rating, num_ratings=num_ratings,
first_chunk=first_chunk )
else:
raise web.httpexceptions.HTTPNotFound()
@web.expose
def get_item_content_async( self, trans, id ):
""" Returns item content in HTML format. """
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset is None:
raise web.httpexceptions.HTTPNotFound()
truncated, dataset_data = self.hda_manager.text_data( dataset, preview=True )
# Get annotation.
dataset.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, dataset )
return trans.stream_template_mako( "/dataset/item_content.mako", item=dataset, item_data=dataset_data, truncated=truncated )
@web.expose
def annotate_async( self, trans, id, new_annotation=None, **kwargs ):
# TODO:?? why is this an access check only?
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
web.httpexceptions.HTTPNotFound()
if dataset and new_annotation:
# Sanitize annotation before adding it.
new_annotation = sanitize_html( new_annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), dataset, new_annotation )
trans.sa_session.flush()
return new_annotation
@web.expose
def get_annotation_async( self, trans, id ):
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
web.httpexceptions.HTTPNotFound()
annotation = self.get_item_annotation_str( trans.sa_session, trans.user, dataset )
if annotation and isinstance( annotation, text_type ):
annotation = annotation.encode( 'ascii', 'replace' ) # paste needs ascii here
return annotation
@web.expose
def display_at( self, trans, dataset_id, filename=None, **kwd ):
"""Sets up a dataset permissions so it is viewable at an external site"""
if not trans.app.config.enable_old_display_applications:
return trans.show_error_message( "This method of accessing external display applications has been disabled by a Galaxy administrator." )
site = filename
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataset_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
if 'display_url' not in kwd or 'redirect_url' not in kwd:
return trans.show_error_message( 'Invalid parameters specified for "display at" link, please contact a Galaxy administrator' )
try:
redirect_url = kwd['redirect_url'] % urllib.quote_plus( kwd['display_url'] )
except:
redirect_url = kwd['redirect_url'] # not all will need custom text
if trans.app.security_agent.dataset_is_public( data.dataset ):
return trans.response.send_redirect( redirect_url ) # anon access already permitted by rbac
if self._can_access_dataset( trans, data ):
trans.app.host_security_agent.set_dataset_permissions( data, trans.user, site )
return trans.response.send_redirect( redirect_url )
else:
return trans.show_error_message( "You are not allowed to view this dataset at external sites. Please contact your Galaxy administrator to acquire management permissions for this dataset." )
@web.expose
def display_application( self, trans, dataset_id=None, user_id=None, app_name=None, link_name=None, app_action=None, action_param=None, action_param_extra=None, **kwds ):
"""Access to external display applications"""
# Build list of parameters to pass in to display application logic (app_kwds)
app_kwds = {}
for name, value in dict(kwds).iteritems(): # clone kwds because we remove stuff as we go.
if name.startswith( "app_" ):
app_kwds[ name[ len( "app_" ): ] ] = value
del kwds[ name ]
if kwds:
log.debug( "Unexpected Keywords passed to display_application: %s" % kwds ) # route memory?
# decode ids
data, user = decode_dataset_user( trans, dataset_id, user_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
if user is None:
user = trans.user
if user:
user_roles = user.all_roles()
else:
user_roles = []
# Decode application name and link name
app_name = urllib.unquote_plus( app_name )
link_name = urllib.unquote_plus( link_name )
if None in [ app_name, link_name ]:
return trans.show_error_message( "A display application name and link name must be provided." )
if self._can_access_dataset( trans, data, additional_roles=user_roles ):
msg = []
preparable_steps = []
refresh = False
display_app = trans.app.datatypes_registry.display_applications.get( app_name )
if not display_app:
log.debug( "Unknown display application has been requested: %s", app_name )
return paste.httpexceptions.HTTPNotFound( "The requested display application (%s) is not available." % ( app_name ) )
dataset_hash, user_hash = encode_dataset_user( trans, data, user )
try:
display_link = display_app.get_link( link_name, data, dataset_hash, user_hash, trans, app_kwds )
except Exception as e:
log.debug( "Error generating display_link: %s", e )
# User can sometimes recover from, e.g. conversion errors by fixing input metadata, so use conflict
return paste.httpexceptions.HTTPConflict( "Error generating display_link: %s" % e )
if not display_link:
log.debug( "Unknown display link has been requested: %s", link_name )
return paste.httpexceptions.HTTPNotFound( "Unknown display link has been requested: %s" % link_name )
if data.state == data.states.ERROR:
msg.append( ( 'This dataset is in an error state, you cannot view it at an external display application.', 'error' ) )
elif data.deleted:
msg.append( ( 'This dataset has been deleted, you cannot view it at an external display application.', 'error' ) )
elif data.state != data.states.OK:
msg.append( ( 'You must wait for this dataset to be created before you can view it at an external display application.', 'info' ) )
refresh = True
else:
# We have permissions, dataset is not deleted and is in OK state, allow access
if display_link.display_ready():
if app_action in [ 'data', 'param' ]:
assert action_param, "An action param must be provided for a data or param action"
# data is used for things with filenames that could be passed off to a proxy
# in case some display app wants all files to be in the same 'directory',
# data can be forced to param, but not the other way (no filename for other direction)
# get param name from url param name
try:
action_param = display_link.get_param_name_by_url( action_param )
except ValueError as e:
log.debug( e )
return paste.httpexceptions.HTTPNotFound( str( e ) )
value = display_link.get_param_value( action_param )
assert value, "An invalid parameter name was provided: %s" % action_param
assert value.parameter.viewable, "This parameter is not viewable."
if value.parameter.type == 'data':
try:
if action_param_extra:
assert value.parameter.allow_extra_files_access, "Extra file content requested (%s), but allow_extra_files_access is False." % ( action_param_extra )
file_name = os.path.join( value.extra_files_path, action_param_extra )
else:
file_name = value.file_name
content_length = os.path.getsize( file_name )
rval = open( file_name )
except OSError as e:
log.debug( "Unable to access requested file in display application: %s", e )
return paste.httpexceptions.HTTPNotFound( "This file is no longer available." )
else:
rval = str( value )
content_length = len( rval )
trans.response.set_content_type( value.mime_type( action_param_extra=action_param_extra ) )
trans.response.headers[ 'Content-Length' ] = content_length
return rval
elif app_action is None:
# redirect user to url generated by display link
# Fix for Safari caching display links, which can change if the underlying dataset has an attribute change, e.g. name, metadata, etc
trans.response.headers[ 'Cache-Control' ] = [ 'no-cache', 'max-age=0', 'no-store', 'must-revalidate' ]
return trans.response.send_redirect( display_link.display_url() )
else:
msg.append( ( 'Invalid action provided: %s' % app_action, 'error' ) )
else:
if app_action is None:
if trans.history != data.history:
msg.append( ( 'You must import this dataset into your current history before you can view it at the desired display application.', 'error' ) )
else:
refresh = True
msg.append( ( 'Launching this display application required additional datasets to be generated, you can view the status of these jobs below. ', 'info' ) )
if not display_link.preparing_display():
display_link.prepare_display()
preparable_steps = display_link.get_prepare_steps()
else:
raise Exception( 'Attempted a view action (%s) on a non-ready display application' % app_action )
return trans.fill_template_mako( "dataset/display_application/display.mako",
msg=msg,
display_app=display_app,
display_link=display_link,
refresh=refresh,
preparable_steps=preparable_steps )
return trans.show_error_message( 'You do not have permission to view this dataset at an external display application.' )
def _delete( self, trans, dataset_id ):
message = None
status = 'done'
id = None
try:
id = self.decode_id( dataset_id )
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
assert hda, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in trans.history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
hda.mark_deleted()
hda.clear_associated_files()
trans.log_event( "Dataset id %s marked as deleted" % str(id) )
self.hda_manager.stop_creating_job( hda )
trans.sa_session.flush()
except Exception as e:
msg = 'HDA deletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id )
log.exception( msg + ': ' + str( e ) )
trans.log_event( msg )
message = 'Dataset deletion failed'
status = 'error'
return ( message, status )
def _undelete( self, trans, dataset_id ):
message = None
status = 'done'
id = None
try:
id = self.decode_id( dataset_id )
history = trans.get_history()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
assert hda and hda.undeletable, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_undeleted()
trans.sa_session.flush()
trans.log_event( "Dataset id %s has been undeleted" % str(id) )
except Exception:
msg = 'HDA undeletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id )
log.exception( msg )
trans.log_event( msg )
message = 'Dataset undeletion failed'
status = 'error'
return ( message, status )
def _unhide( self, trans, dataset_id ):
try:
id = self.decode_id( dataset_id )
except:
return False
history = trans.get_history()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
if hda:
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_unhidden()
trans.sa_session.flush()
trans.log_event( "Dataset id %s has been unhidden" % str(id) )
return True
return False
def _purge( self, trans, dataset_id ):
message = None
status = 'done'
try:
id = self.decode_id( dataset_id )
user = trans.get_user()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
# Invalid HDA
assert hda, 'Invalid history dataset ID'
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
# If the user is anonymous, make sure the HDA is owned by the current session.
if not user:
current_history_id = trans.galaxy_session.current_history_id
assert topmost_parent.history.id == current_history_id, 'Data does not belong to current user'
# If the user is known, make sure the HDA is owned by the current user.
else:
assert topmost_parent.history.user == user, 'Data does not belong to current user'
# Ensure HDA is deleted
hda.deleted = True
# HDA is purgeable
# Decrease disk usage first
if user:
user.adjust_total_disk_usage(-hda.quota_amount(user))
# Mark purged
hda.purged = True
trans.sa_session.add( hda )
trans.log_event( "HDA id %s has been purged" % hda.id )
trans.sa_session.flush()
# Don't delete anything if there are active HDAs or any LDDAs, even if
# the LDDAs are deleted. Let the cleanup scripts get it in the latter
# case.
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) )
trans.sa_session.add( hda.dataset )
except:
log.exception( 'Unable to purge dataset (%s) on purge of HDA (%s):' % ( hda.dataset.id, hda.id ) )
trans.sa_session.flush()
except Exception as exc:
msg = 'HDA purge failed (encoded: %s, decoded: %s): %s' % ( dataset_id, id, exc )
log.exception( msg )
trans.log_event( msg )
message = 'Dataset removal from disk failed'
status = 'error'
return ( message, status )
@web.expose
def delete( self, trans, dataset_id, filename, show_deleted_on_refresh=False ):
message, status = self._delete( trans, dataset_id )
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) )
@web.expose
def delete_async( self, trans, dataset_id, filename ):
message, status = self._delete( trans, dataset_id )
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def undelete( self, trans, dataset_id, filename ):
message, status = self._undelete( trans, dataset_id )
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=True, message=message, status=status ) )
@web.expose
def undelete_async( self, trans, dataset_id, filename ):
message, status = self._undelete( trans, dataset_id )
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def unhide( self, trans, dataset_id, filename ):
if self._unhide( trans, dataset_id ):
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_hidden=True ) )
raise Exception( "Error unhiding" )
@web.expose
def purge( self, trans, dataset_id, filename, show_deleted_on_refresh=False ):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge( trans, dataset_id )
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) )
@web.expose
def purge_async( self, trans, dataset_id, filename ):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge( trans, dataset_id )
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def show_params( self, trans, dataset_id=None, from_noframe=None, **kwd ):
"""
Show the parameters used for the job associated with an HDA
"""
try:
hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( dataset_id ) )
except ValueError:
hda = None
if not hda:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % escape( str( dataset_id ) ) )
if not self._can_access_dataset( trans, hda ):
return trans.show_error_message( "You are not allowed to access this dataset" )
# Get the associated job, if any. If this hda was copied from another,
# we need to find the job that created the origial dataset association.
params_objects = None
job = None
tool = None
upgrade_messages = {}
has_parameter_errors = False
inherit_chain = hda.source_dataset_chain
if inherit_chain:
job_dataset_association = inherit_chain[-1][0]
else:
job_dataset_association = hda
if job_dataset_association.creating_job_associations:
job = job_dataset_association.creating_job_associations[0].job
if job:
# Get the tool object
try:
# Load the tool
toolbox = self.get_toolbox()
tool = toolbox.get_tool( job.tool_id )
assert tool is not None, 'Requested tool has not been loaded.'
# Load parameter objects, if a parameter type has changed, it's possible for the value to no longer be valid
try:
params_objects = job.get_param_values( trans.app, ignore_errors=False )
except:
params_objects = job.get_param_values( trans.app, ignore_errors=True )
# use different param_objects in the following line, since we want to display original values as much as possible
upgrade_messages = tool.check_and_update_param_values( job.get_param_values( trans.app, ignore_errors=True ),
trans,
update_values=False )
has_parameter_errors = True
except:
pass
if job is None:
return trans.show_error_message( "Job information is not available for this dataset." )
# TODO: we should provide the basic values along with the objects, in order to better handle reporting of old values during upgrade
return trans.fill_template( "show_params.mako",
inherit_chain=inherit_chain,
history=trans.get_history(),
hda=hda,
job=job,
tool=tool,
params_objects=params_objects,
upgrade_messages=upgrade_messages,
has_parameter_errors=has_parameter_errors )
@web.expose
def copy_datasets( self, trans, source_history=None, source_content_ids="", target_history_id=None, target_history_ids="", new_history_name="", do_copy=False, **kwd ):
user = trans.get_user()
if source_history is not None:
decoded_source_history_id = self.decode_id( source_history )
history = self.history_manager.get_owned( decoded_source_history_id, trans.user, current_history=trans.history )
current_history = trans.get_history()
else:
history = current_history = trans.get_history()
refresh_frames = []
if source_content_ids:
if not isinstance( source_content_ids, list ):
source_content_ids = source_content_ids.split(",")
encoded_dataset_collection_ids = [ s[ len("dataset_collection|"): ] for s in source_content_ids if s.startswith("dataset_collection|") ]
encoded_dataset_ids = [ s[ len("dataset|"): ] for s in source_content_ids if s.startswith("dataset|") ]
decoded_dataset_collection_ids = set(map( self.decode_id, encoded_dataset_collection_ids ))
decoded_dataset_ids = set(map( self.decode_id, encoded_dataset_ids ))
else:
decoded_dataset_collection_ids = []
decoded_dataset_ids = []
if new_history_name:
target_history_ids = []
else:
if target_history_id:
target_history_ids = [ self.decode_id(target_history_id) ]
elif target_history_ids:
if not isinstance( target_history_ids, list ):
target_history_ids = target_history_ids.split(",")
target_history_ids = list(set([ self.decode_id(h) for h in target_history_ids if h ]))
else:
target_history_ids = []
done_msg = error_msg = ""
new_history = None
if do_copy:
invalid_contents = 0
if not ( decoded_dataset_ids or decoded_dataset_collection_ids ) or not ( target_history_ids or new_history_name ):
error_msg = "You must provide both source datasets and target histories. "
else:
if new_history_name:
new_history = trans.app.model.History()
new_history.name = new_history_name
new_history.user = user
trans.sa_session.add( new_history )
trans.sa_session.flush()
target_history_ids.append( new_history.id )
if user:
target_histories = [ hist for hist in map( trans.sa_session.query( trans.app.model.History ).get, target_history_ids ) if hist is not None and hist.user == user ]
else:
target_histories = [ history ]
if len( target_histories ) != len( target_history_ids ):
error_msg = error_msg + "You do not have permission to add datasets to %i requested histories. " % ( len( target_history_ids ) - len( target_histories ) )
source_contents = map( trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get, decoded_dataset_ids )
source_contents.extend( map( trans.sa_session.query( trans.app.model.HistoryDatasetCollectionAssociation ).get, decoded_dataset_collection_ids ) )
source_contents.sort(key=lambda content: content.hid)
for content in source_contents:
if content is None:
error_msg = error_msg + "You tried to copy a dataset that does not exist. "
invalid_contents += 1
elif content.history != history:
error_msg = error_msg + "You tried to copy a dataset which is not in your current history. "
invalid_contents += 1
else:
for hist in target_histories:
if content.history_content_type == "dataset":
hist.add_dataset( content.copy( copy_children=True ) )
else:
copy_collected_datasets = True
copy_kwds = {}
if copy_collected_datasets:
copy_kwds["element_destination"] = hist
hist.add_dataset_collection( content.copy( **copy_kwds ) )
if current_history in target_histories:
refresh_frames = ['history']
trans.sa_session.flush()
hist_names_str = ", ".join( ['<a href="%s" target="_top">%s</a>' %
( url_for( controller="history", action="switch_to_history",
hist_id=trans.security.encode_id( hist.id ) ), escape(hist.name) )
for hist in target_histories ] )
num_source = len( source_content_ids ) - invalid_contents
num_target = len(target_histories)
done_msg = "%i %s copied to %i %s: %s." % (num_source, inflector.cond_plural(num_source, "dataset"), num_target, inflector.cond_plural(num_target, "history"), hist_names_str )
trans.sa_session.refresh( history )
source_contents = history.active_contents
target_histories = [history]
if user:
target_histories = user.active_histories
return trans.fill_template( "/dataset/copy_view.mako",
source_history=history,
current_history=current_history,
source_content_ids=source_content_ids,
target_history_id=target_history_id,
target_history_ids=target_history_ids,
source_contents=source_contents,
target_histories=target_histories,
new_history_name=new_history_name,
done_msg=done_msg,
error_msg=error_msg,
refresh_frames=refresh_frames )
def _copy_datasets( self, trans, dataset_ids, target_histories, imported=False ):
""" Helper method for copying datasets. """
user = trans.get_user()
done_msg = error_msg = ""
invalid_datasets = 0
if not dataset_ids or not target_histories:
error_msg = "You must provide both source datasets and target histories."
else:
# User must own target histories to copy datasets to them.
for history in target_histories:
if user != history.user:
error_msg = error_msg + "You do not have permission to add datasets to %i requested histories. " % ( len( target_histories ) )
for dataset_id in dataset_ids:
decoded_id = self.decode_id( dataset_id )
data = self.hda_manager.get_accessible( decoded_id, trans.user )
data = self.hda_manager.error_if_uploading( data )
if data is None:
error_msg = error_msg + "You tried to copy a dataset that does not exist or that you do not have access to. "
invalid_datasets += 1
else:
for hist in target_histories:
dataset_copy = data.copy( copy_children=True )
if imported:
dataset_copy.name = "imported: " + dataset_copy.name
hist.add_dataset( dataset_copy )
trans.sa_session.flush()
num_datasets_copied = len( dataset_ids ) - invalid_datasets
done_msg = "%i dataset%s copied to %i histor%s." % \
( num_datasets_copied, iff( num_datasets_copied == 1, "", "s"), len( target_histories ), iff( len( target_histories ) == 1, "y", "ies") )
trans.sa_session.refresh( history )
if error_msg != "":
status = ERROR
message = error_msg
else:
status = SUCCESS
message = done_msg
return status, message
| 56.548085
| 258
| 0.59438
|
import logging
import os
import urllib
from markupsafe import escape
import paste.httpexceptions
from six import string_types, text_type
from sqlalchemy import false, true
from galaxy import datatypes, model, util, web
from galaxy import managers
from galaxy.datatypes.display_applications.util import decode_dataset_user, encode_dataset_user
from galaxy.model.item_attrs import UsesAnnotations, UsesItemRatings
from galaxy.util import inflector, smart_str
from galaxy.util.sanitize_html import sanitize_html
from galaxy.web.base.controller import BaseUIController, ERROR, SUCCESS, url_for, UsesExtendedMetadataMixin
from galaxy.web.framework.helpers import grids, iff, time_ago, to_unicode
from galaxy.tools.errors import EmailErrorReporter
log = logging.getLogger( __name__ )
comptypes = []
try:
import zlib
comptypes.append( 'zip' )
except ImportError:
pass
class HistoryDatasetAssociationListGrid( grids.Grid ):
class HistoryColumn( grids.GridColumn ):
def get_value( self, trans, grid, hda):
return escape(hda.history.name)
class StatusColumn( grids.GridColumn ):
def get_value( self, trans, grid, hda ):
if hda.deleted:
return "deleted"
return ""
def get_accepted_filters( self ):
accepted_filter_labels_and_vals = { "Active" : "False", "Deleted" : "True", "All": "All" }
accepted_filters = []
for label, val in accepted_filter_labels_and_vals.items():
args = { self.key: val }
accepted_filters.append( grids.GridColumnFilter( label, args) )
return accepted_filters
title = "Saved Datasets"
model_class = model.HistoryDatasetAssociation
template = '/dataset/grid.mako'
default_sort_key = "-update_time"
columns = [
grids.TextColumn( "Name", key="name",
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch", id=item.id ) ) ), filterable="advanced", attach_popup=True ),
HistoryColumn( "History", key="history", sortable=False, target="inbound",
link=( lambda item: iff( item.history.deleted, None, dict( operation="switch_history", id=item.id ) ) ) ),
grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryDatasetAssociationTagAssociation, filterable="advanced", grid_name="HistoryDatasetAssocationListGrid" ),
StatusColumn( "Status", key="deleted", attach_popup=False ),
grids.GridColumn( "Last Updated", key="update_time", format=time_ago ),
]
columns.append(
grids.MulticolFilterColumn(
"Search",
cols_to_filter=[ columns[0], columns[2] ],
key="free-text-search", visible=False, filterable="standard" )
)
operations = [
grids.GridOperation( "Copy to current history", condition=( lambda item: not item.deleted ), async_compatible=True ),
]
standard_filters = []
default_filter = dict( name="All", deleted="False", tags="All" )
preserve_state = False
use_async = True
use_paging = True
num_rows_per_page = 50
def build_initial_query( self, trans, **kwargs ):
# Show user's datasets that are not deleted, not in deleted histories, and not hidden.
return trans.sa_session.query( self.model_class ).select_from( self.model_class.table.join( model.History.table ) ) \
.filter( model.History.user == trans.user ) \
.filter( self.model_class.deleted == false() ) \
.filter( model.History.deleted == false() ) \
.filter( self.model_class.visible == true() )
class DatasetInterface( BaseUIController, UsesAnnotations, UsesItemRatings, UsesExtendedMetadataMixin ):
stored_list_grid = HistoryDatasetAssociationListGrid()
def __init__( self, app ):
super( DatasetInterface, self ).__init__( app )
self.history_manager = managers.histories.HistoryManager( app )
self.hda_manager = managers.hdas.HDAManager( app )
def _get_job_for_dataset( self, trans, dataset_id ):
hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( dataset_id ) )
assert hda and self._can_access_dataset( trans, hda )
return hda.creating_job
def _can_access_dataset( self, trans, dataset_association, allow_admin=True, additional_roles=None ):
roles = trans.get_current_user_roles()
if additional_roles:
roles = roles + additional_roles
return ( allow_admin and trans.user_is_admin() ) or trans.app.security_agent.can_access_dataset( roles, dataset_association.dataset )
@web.expose
def errors( self, trans, id ):
hda = trans.sa_session.query( model.HistoryDatasetAssociation ).get( self.decode_id( id ) )
if not hda or not self._can_access_dataset( trans, hda ):
return trans.show_error_message( "Either this dataset does not exist or you do not have permission to access it." )
return trans.fill_template( "dataset/errors.mako", hda=hda )
@web.expose
def stdout( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
stdout = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
stdout = job.stdout
except:
stdout = "Invalid dataset ID or you are not allowed to access this dataset"
return smart_str( stdout )
@web.expose
def stderr( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
stderr = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
stderr = job.stderr
except:
stderr = "Invalid dataset ID or you are not allowed to access this dataset"
return smart_str( stderr )
@web.expose
def exit_code( self, trans, dataset_id=None, **kwargs ):
trans.response.set_content_type( 'text/plain' )
exit_code = ""
try:
job = self._get_job_for_dataset( trans, dataset_id )
exit_code = job.exit_code
except:
exit_code = "Invalid dataset ID or you are not allowed to access this dataset"
return exit_code
@web.expose
def report_error( self, trans, id, email='', message="", **kwd ):
biostar_report = 'biostar' in str( kwd.get( 'submit_error_report') ).lower()
if biostar_report:
return trans.response.send_redirect( url_for( controller='biostar', action='biostar_tool_bug_report', hda=id, email=email, message=message ) )
try:
error_reporter = EmailErrorReporter( id, trans.app )
error_reporter.send_report( user=trans.user, email=email, message=message )
return trans.show_ok_message( "Your error report has been sent" )
except Exception as e:
return trans.show_error_message( "An error occurred sending the report by email: %s" % str( e ) )
@web.expose
def default(self, trans, dataset_id=None, **kwd):
return 'This link may not be followed from within Galaxy.'
@web.expose
def get_metadata_file(self, trans, hda_id, metadata_name):
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( hda_id ) )
if not data or not self._can_access_dataset( trans, data ):
return trans.show_error_message( "You are not allowed to access this dataset" )
fname = ''.join(c in util.FILENAME_VALID_CHARS and c or '_' for c in data.name)[0:150]
file_ext = data.metadata.spec.get(metadata_name).get("file_ext", metadata_name)
trans.response.headers["Content-Type"] = "application/octet-stream"
trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (data.hid, fname, file_ext)
return open(data.metadata.get(metadata_name).file_name)
def _check_dataset(self, trans, hda_id):
# DEPRECATION: We still support unencoded ids for backward compatibility
try:
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( hda_id) )
if data is None:
raise ValueError( 'Invalid reference dataset id: %s.' % hda_id)
except:
try:
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( int( hda_id ) )
except:
data = None
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( hda_id ) )
if not self._can_access_dataset( trans, data ):
return trans.show_error_message( "You are not allowed to access this dataset" )
if data.purged:
return trans.show_error_message( "The dataset you are attempting to view has been purged." )
if data.deleted and not ( trans.user_is_admin() or ( data.history and trans.get_user() == data.history.user ) ):
return trans.show_error_message( "The dataset you are attempting to view has been deleted." )
if data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to view it." )
return data
@web.expose
@web.json
def transfer_status(self, trans, dataset_id, filename=None):
data = self._check_dataset(trans, dataset_id)
if isinstance( data, string_types ):
return data
log.debug( "Checking transfer status for dataset %s..." % data.dataset.id )
# Pulling files in extra_files_path into cache is not handled via this
# method but that's primarily because those files are typically linked to
# call this method does not seem doable?
if data.dataset.external_filename:
return True
else:
return trans.app.object_store.file_ready(data.dataset)
@web.expose
def display(self, trans, dataset_id=None, preview=False, filename=None, to_ext=None, offset=None, ck_size=None, **kwd):
data = self._check_dataset(trans, dataset_id)
if not isinstance( data, trans.app.model.DatasetInstance ):
return data
# Ensure offset is an integer before passing through to datatypes.
if offset:
offset = int(offset)
# Ensure ck_size is an integer before passing through to datatypes.
if ck_size:
ck_size = int(ck_size)
return data.datatype.display_data(trans, data, preview, filename, to_ext, offset=offset, ck_size=ck_size, **kwd)
@web.expose
def edit(self, trans, dataset_id=None, filename=None, hid=None, **kwd):
message = None
status = 'done'
refresh_frames = []
error = False
def __ok_to_edit_metadata( dataset_id ):
# prevent modifying metadata when dataset is queued or running as input/output
# This code could be more efficient, i.e. by using mappers, but to prevent slowing down loading a History panel, we'll leave the code here for now
for job_to_dataset_association in trans.sa_session.query(
self.app.model.JobToInputDatasetAssociation ) \
.filter_by( dataset_id=dataset_id ) \
.all() \
+ trans.sa_session.query( self.app.model.JobToOutputDatasetAssociation ) \
.filter_by( dataset_id=dataset_id ) \
.all():
if job_to_dataset_association.job.state not in [ job_to_dataset_association.job.states.OK, job_to_dataset_association.job.states.ERROR, job_to_dataset_association.job.states.DELETED ]:
return False
return True
if hid is not None:
history = trans.get_history()
data = history.datasets[ int( hid ) - 1 ]
id = None
elif dataset_id is not None:
id = self.decode_id( dataset_id )
data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
else:
trans.log_event( "dataset_id and hid are both None, cannot load a dataset to edit" )
return trans.show_error_message( "You must provide a history dataset id to edit" )
if data is None:
trans.log_event( "Problem retrieving dataset (encoded: %s, decoded: %s) with history id %s." % ( str( dataset_id ), str( id ), str( hid ) ) )
return trans.show_error_message( "History dataset id is invalid" )
if dataset_id is not None and data.history.user is not None and data.history.user != trans.user:
trans.log_event( "User attempted to edit an HDA they do not own (encoded: %s, decoded: %s)" % ( dataset_id, id ) )
return trans.show_error_message( "History dataset id is invalid" )
current_user_roles = trans.get_current_user_roles()
if data.history.user and not data.dataset.has_manage_permissions_roles( trans ):
# Permission setting related to DATASET_MANAGE_PERMISSIONS was broken for a period of time,
# so it is possible that some Datasets have no roles associated with the DATASET_MANAGE_PERMISSIONS
# permission. In this case, we'll reset this permission to the hda user's private role.
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
permissions = { manage_permissions_action : [ trans.app.security_agent.get_private_user_role( data.history.user ) ] }
trans.app.security_agent.set_dataset_permission( data.dataset, permissions )
if self._can_access_dataset( trans, data ):
if data.state == trans.model.Dataset.states.UPLOAD:
return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to edit its metadata." )
params = util.Params( kwd, sanitize=False )
if params.change:
# The user clicked the Save button on the 'Change data type' form
if data.datatype.allow_datatype_change and trans.app.datatypes_registry.get_datatype_by_extension( params.datatype ).allow_datatype_change:
# prevent modifying datatype when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change datatype until the jobs have completed or you have canceled them."
error = True
else:
trans.app.datatypes_registry.change_datatype( data, params.datatype )
trans.sa_session.flush()
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={ 'input1': data }, overwrite=False ) # overwrite is False as per existing behavior
message = "Changed the type of dataset '%s' to %s" % ( to_unicode( data.name ), params.datatype )
refresh_frames = ['history']
else:
message = "You are unable to change datatypes in this manner. Changing %s to %s is not allowed." % ( data.extension, params.datatype )
error = True
elif params.save:
# The user clicked the Save button on the 'Edit Attributes' form
data.name = params.name if params.name else ''
data.info = params.info if params.info else ''
message = ''
if __ok_to_edit_metadata( data.id ):
# The following for loop will save all metadata_spec items
for name, spec in data.datatype.metadata_spec.items():
if spec.get("readonly"):
continue
optional = params.get("is_" + name, None)
other = params.get("or_" + name, None)
if optional and optional == '__NOTHING__':
# optional element... == '__NOTHING__' actually means it is NOT checked (and therefore omitted)
setattr(data.metadata, name, None)
else:
if other:
setattr( data.metadata, name, other )
else:
setattr( data.metadata, name, spec.unwrap( params.get(name, None) ) )
data.datatype.after_setting_metadata( data )
# Sanitize annotation before adding it.
if params.annotation:
annotation = sanitize_html( params.annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), data, annotation )
# This block on controller code is inactive until the 'extended_metadata' edit box is added back into the UI
# Add or delete extended metadata
# if params.extended_metadata:
# em_string = params.extended_metadata
# if len(em_string):
# em_payload = None
# try:
# em_payload = loads(em_string)
# except Exception as e:
# message = 'Invalid JSON input'
# error = True
# if em_payload is not None:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# ex_obj = self.create_extended_metadata(trans, em_payload)
# self.set_item_extended_metadata_obj(trans, data, ex_obj)
# message = "Updated Extended metadata '%s'." % data.name
# status = 'done'
# else:
# message = "data not found"
# error = True
# else:
# if data is not None:
# ex_obj = self.get_item_extended_metadata_obj(trans, data)
# if ex_obj is not None:
# self.unset_item_extended_metadata_obj(trans, data)
# self.delete_extended_metadata(trans, ex_obj)
# message = "Deleted Extended metadata '%s'." % data.name
# status = 'done'
# If setting metadata previously failed and all required elements have now been set, clear the failed state.
if data._state == trans.model.Dataset.states.FAILED_METADATA and not data.missing_meta():
data._state = None
trans.sa_session.flush()
message = "Attributes updated%s" % message
refresh_frames = ['history']
else:
trans.sa_session.flush()
message = "Attributes updated, but metadata could not be changed because this dataset is currently being used as input or output. You must cancel or wait for these jobs to complete before changing metadata."
status = "warning"
refresh_frames = ['history']
elif params.detect:
# The user clicked the Auto-detect button on the 'Edit Attributes' form
# prevent modifying metadata when dataset is queued or running as input/output
if not __ok_to_edit_metadata( data.id ):
message = "This dataset is currently being used as input or output. You cannot change metadata until the jobs have completed or you have canceled them."
error = True
else:
for name, spec in data.metadata.spec.items():
# We need to be careful about the attributes we are resetting
if name not in [ 'name', 'info', 'dbkey', 'base_name' ]:
if spec.get( 'default' ):
setattr( data.metadata, name, spec.unwrap( spec.get( 'default' ) ) )
message = 'Attributes have been queued to be updated'
trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming={ 'input1': data } )
trans.sa_session.flush()
refresh_frames = ['history']
elif params.convert_data:
target_type = kwd.get("target_type", None)
if target_type:
message = data.datatype.convert_dataset(trans, data, target_type)
refresh_frames = ['history']
elif params.update_roles_button:
if not trans.user:
return trans.show_error_message( "You must be logged in if you want to change permissions." )
if trans.app.security_agent.can_manage_dataset( current_user_roles, data.dataset ):
access_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action )
manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action )
# The user associated the DATASET_ACCESS permission on the dataset with 1 or more roles. We
# need to ensure that they did not associate roles that would cause accessibility problems.
permissions, in_roles, error, message = \
trans.app.security_agent.derive_roles_from_access( trans, data.dataset.id, 'root', **kwd )
if error:
# Keep the original role associations for the DATASET_ACCESS permission on the dataset.
permissions[ access_action ] = data.dataset.get_access_roles( trans )
status = 'error'
else:
error = trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions )
if error:
message += error
status = 'error'
else:
message = 'Your changes completed successfully.'
trans.sa_session.refresh( data.dataset )
else:
message = "You are not authorized to change this dataset's permissions"
error = True
else:
if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey:
data.metadata.dbkey = data.dbkey
# the built-in 'id' is overwritten in lots of places as well
ldatatypes = [ dtype_name for dtype_name, dtype_value in trans.app.datatypes_registry.datatypes_by_extension.iteritems() if dtype_value.allow_datatype_change ]
ldatatypes.sort()
all_roles = trans.app.security_agent.get_legitimate_roles( trans, data.dataset, 'root' )
if error:
status = 'error'
return trans.fill_template( "/dataset/edit_attributes.mako",
data=data,
data_annotation=self.get_item_annotation_str( trans.sa_session, trans.user, data ),
datatypes=ldatatypes,
current_user_roles=current_user_roles,
all_roles=all_roles,
message=message,
status=status,
dataset_id=dataset_id,
refresh_frames=refresh_frames )
else:
return trans.show_error_message( "You do not have permission to edit this dataset's ( id: %s ) information." % str( dataset_id ) )
@web.expose
@web.require_login( "see all available datasets" )
def list( self, trans, **kwargs ):
status = message = None
if 'operation' in kwargs:
operation = kwargs['operation'].lower()
hda_ids = util.listify( kwargs.get( 'id', [] ) )
status, message = None, None
hdas = []
for encoded_hda_id in hda_ids:
hda_id = self.decode_id( encoded_hda_id )
hda = trans.sa_session.query( model.HistoryDatasetAssociation ).filter_by( id=hda_id ).first()
if hda:
if hda.history.user_id is not None and trans.user:
assert trans.user.id == hda.history.user_id, "HistoryDatasetAssocation does not belong to current user"
hdas.append( hda )
else:
log.warning( "Invalid history_dataset_association id '%r' passed to list", hda_id )
if hdas:
if operation == "switch" or operation == "switch_history":
histories = []
for hda in hdas:
histories.append( hda.history )
status, message = trans.webapp.controllers['history']._list_switch( trans, histories )
trans.template_context['refresh_frames'] = ['history']
if operation == "switch":
hda_ids = [ trans.security.encode_id( hda.id ) for hda in hdas ]
trans.template_context[ 'seek_hda_ids' ] = hda_ids
elif operation == "copy to current history":
target_histories = [ trans.get_history() ]
hda_ids.reverse()
status, message = self._copy_datasets( trans, hda_ids, target_histories )
trans.template_context['refresh_frames'] = ['history']
return self.stored_list_grid( trans, status=status, message=message, **kwargs )
@web.expose
def imp( self, trans, dataset_id=None, **kwd ):
referer = trans.request.referer
if referer:
referer_message = "<a href='%s'>return to the previous page</a>" % escape(referer)
else:
referer_message = "<a href='%s'>go to Galaxy's start page</a>" % url_for( '/' )
# Error checking.
if not dataset_id:
return trans.show_error_message( "You must specify a dataset to import. You can %s." % referer_message, use_panels=True )
# Do import.
cur_history = trans.get_history( create=True )
status, message = self._copy_datasets( trans, [ dataset_id ], [ cur_history ], imported=True )
message = "Dataset imported. <br>You can <a href='%s'>start using the dataset</a> or %s." % ( url_for('/'), referer_message )
return trans.show_message( message, type=status, use_panels=True )
@web.expose
@web.json
@web.require_login( "use Galaxy datasets" )
def get_name_and_link_async( self, trans, id=None ):
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
return_dict = { "name" : dataset.name, "link" : url_for( controller='dataset', action="display_by_username_and_slug", username=dataset.history.user.username, slug=trans.security.encode_id( dataset.id ) ) }
return return_dict
@web.expose
def get_embed_html_async( self, trans, id ):
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset:
return "Embedded Dataset '%s'" % dataset.name
@web.expose
@web.require_login( "use Galaxy datasets" )
def set_accessible_async( self, trans, id=None, accessible=False ):
return
@web.expose
@web.require_login( "rate items" )
@web.json
def rate_async( self, trans, id, rating ):
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
return trans.show_error_message( "The specified dataset does not exist." )
# Rate dataset.
self.rate_item( trans.sa_session, trans.get_user(), dataset, rating )
return self.get_ave_item_rating_data( trans.sa_session, dataset )
@web.expose
def display_by_username_and_slug( self, trans, username, slug, filename=None, preview=True ):
id = slug
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset:
# Filename used for composite types.
if filename:
return self.display( trans, dataset_id=slug, filename=filename)
truncated, dataset_data = self.hda_manager.text_data( dataset, preview )
dataset.annotation = self.get_item_annotation_str( trans.sa_session, dataset.history.user, dataset )
# If dataset is chunkable, get first chunk.
first_chunk = None
if dataset.datatype.CHUNKABLE:
first_chunk = dataset.datatype.get_chunk(trans, dataset, 0)
# If data is binary or an image, stream without template; otherwise, use display template.
# TODO: figure out a way to display images in display template.
if isinstance(dataset.datatype, datatypes.binary.Binary) or isinstance(dataset.datatype, datatypes.images.Image) or isinstance(dataset.datatype, datatypes.text.Html):
trans.response.set_content_type( dataset.get_mime() )
return open( dataset.file_name )
else:
# Get rating data.
user_item_rating = 0
if trans.get_user():
user_item_rating = self.get_user_item_rating( trans.sa_session, trans.get_user(), dataset )
if user_item_rating:
user_item_rating = user_item_rating.rating
else:
user_item_rating = 0
ave_item_rating, num_ratings = self.get_ave_item_rating_data( trans.sa_session, dataset )
return trans.fill_template_mako( "/dataset/display.mako", item=dataset, item_data=dataset_data,
truncated=truncated, user_item_rating=user_item_rating,
ave_item_rating=ave_item_rating, num_ratings=num_ratings,
first_chunk=first_chunk )
else:
raise web.httpexceptions.HTTPNotFound()
@web.expose
def get_item_content_async( self, trans, id ):
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if dataset is None:
raise web.httpexceptions.HTTPNotFound()
truncated, dataset_data = self.hda_manager.text_data( dataset, preview=True )
# Get annotation.
dataset.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, dataset )
return trans.stream_template_mako( "/dataset/item_content.mako", item=dataset, item_data=dataset_data, truncated=truncated )
@web.expose
def annotate_async( self, trans, id, new_annotation=None, **kwargs ):
# TODO:?? why is this an access check only?
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
web.httpexceptions.HTTPNotFound()
if dataset and new_annotation:
# Sanitize annotation before adding it.
new_annotation = sanitize_html( new_annotation, 'utf-8', 'text/html' )
self.add_item_annotation( trans.sa_session, trans.get_user(), dataset, new_annotation )
trans.sa_session.flush()
return new_annotation
@web.expose
def get_annotation_async( self, trans, id ):
decoded_id = self.decode_id( id )
dataset = self.hda_manager.get_accessible( decoded_id, trans.user )
dataset = self.hda_manager.error_if_uploading( dataset )
if not dataset:
web.httpexceptions.HTTPNotFound()
annotation = self.get_item_annotation_str( trans.sa_session, trans.user, dataset )
if annotation and isinstance( annotation, text_type ):
annotation = annotation.encode( 'ascii', 'replace' ) # paste needs ascii here
return annotation
@web.expose
def display_at( self, trans, dataset_id, filename=None, **kwd ):
if not trans.app.config.enable_old_display_applications:
return trans.show_error_message( "This method of accessing external display applications has been disabled by a Galaxy administrator." )
site = filename
data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( dataset_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
if 'display_url' not in kwd or 'redirect_url' not in kwd:
return trans.show_error_message( 'Invalid parameters specified for "display at" link, please contact a Galaxy administrator' )
try:
redirect_url = kwd['redirect_url'] % urllib.quote_plus( kwd['display_url'] )
except:
redirect_url = kwd['redirect_url'] # not all will need custom text
if trans.app.security_agent.dataset_is_public( data.dataset ):
return trans.response.send_redirect( redirect_url ) # anon access already permitted by rbac
if self._can_access_dataset( trans, data ):
trans.app.host_security_agent.set_dataset_permissions( data, trans.user, site )
return trans.response.send_redirect( redirect_url )
else:
return trans.show_error_message( "You are not allowed to view this dataset at external sites. Please contact your Galaxy administrator to acquire management permissions for this dataset." )
@web.expose
def display_application( self, trans, dataset_id=None, user_id=None, app_name=None, link_name=None, app_action=None, action_param=None, action_param_extra=None, **kwds ):
# Build list of parameters to pass in to display application logic (app_kwds)
app_kwds = {}
for name, value in dict(kwds).iteritems(): # clone kwds because we remove stuff as we go.
if name.startswith( "app_" ):
app_kwds[ name[ len( "app_" ): ] ] = value
del kwds[ name ]
if kwds:
log.debug( "Unexpected Keywords passed to display_application: %s" % kwds ) # route memory?
# decode ids
data, user = decode_dataset_user( trans, dataset_id, user_id )
if not data:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) )
if user is None:
user = trans.user
if user:
user_roles = user.all_roles()
else:
user_roles = []
# Decode application name and link name
app_name = urllib.unquote_plus( app_name )
link_name = urllib.unquote_plus( link_name )
if None in [ app_name, link_name ]:
return trans.show_error_message( "A display application name and link name must be provided." )
if self._can_access_dataset( trans, data, additional_roles=user_roles ):
msg = []
preparable_steps = []
refresh = False
display_app = trans.app.datatypes_registry.display_applications.get( app_name )
if not display_app:
log.debug( "Unknown display application has been requested: %s", app_name )
return paste.httpexceptions.HTTPNotFound( "The requested display application (%s) is not available." % ( app_name ) )
dataset_hash, user_hash = encode_dataset_user( trans, data, user )
try:
display_link = display_app.get_link( link_name, data, dataset_hash, user_hash, trans, app_kwds )
except Exception as e:
log.debug( "Error generating display_link: %s", e )
# User can sometimes recover from, e.g. conversion errors by fixing input metadata, so use conflict
return paste.httpexceptions.HTTPConflict( "Error generating display_link: %s" % e )
if not display_link:
log.debug( "Unknown display link has been requested: %s", link_name )
return paste.httpexceptions.HTTPNotFound( "Unknown display link has been requested: %s" % link_name )
if data.state == data.states.ERROR:
msg.append( ( 'This dataset is in an error state, you cannot view it at an external display application.', 'error' ) )
elif data.deleted:
msg.append( ( 'This dataset has been deleted, you cannot view it at an external display application.', 'error' ) )
elif data.state != data.states.OK:
msg.append( ( 'You must wait for this dataset to be created before you can view it at an external display application.', 'info' ) )
refresh = True
else:
# We have permissions, dataset is not deleted and is in OK state, allow access
if display_link.display_ready():
if app_action in [ 'data', 'param' ]:
assert action_param, "An action param must be provided for a data or param action"
# data is used for things with filenames that could be passed off to a proxy
# in case some display app wants all files to be in the same 'directory',
# data can be forced to param, but not the other way (no filename for other direction)
# get param name from url param name
try:
action_param = display_link.get_param_name_by_url( action_param )
except ValueError as e:
log.debug( e )
return paste.httpexceptions.HTTPNotFound( str( e ) )
value = display_link.get_param_value( action_param )
assert value, "An invalid parameter name was provided: %s" % action_param
assert value.parameter.viewable, "This parameter is not viewable."
if value.parameter.type == 'data':
try:
if action_param_extra:
assert value.parameter.allow_extra_files_access, "Extra file content requested (%s), but allow_extra_files_access is False." % ( action_param_extra )
file_name = os.path.join( value.extra_files_path, action_param_extra )
else:
file_name = value.file_name
content_length = os.path.getsize( file_name )
rval = open( file_name )
except OSError as e:
log.debug( "Unable to access requested file in display application: %s", e )
return paste.httpexceptions.HTTPNotFound( "This file is no longer available." )
else:
rval = str( value )
content_length = len( rval )
trans.response.set_content_type( value.mime_type( action_param_extra=action_param_extra ) )
trans.response.headers[ 'Content-Length' ] = content_length
return rval
elif app_action is None:
# redirect user to url generated by display link
# Fix for Safari caching display links, which can change if the underlying dataset has an attribute change, e.g. name, metadata, etc
trans.response.headers[ 'Cache-Control' ] = [ 'no-cache', 'max-age=0', 'no-store', 'must-revalidate' ]
return trans.response.send_redirect( display_link.display_url() )
else:
msg.append( ( 'Invalid action provided: %s' % app_action, 'error' ) )
else:
if app_action is None:
if trans.history != data.history:
msg.append( ( 'You must import this dataset into your current history before you can view it at the desired display application.', 'error' ) )
else:
refresh = True
msg.append( ( 'Launching this display application required additional datasets to be generated, you can view the status of these jobs below. ', 'info' ) )
if not display_link.preparing_display():
display_link.prepare_display()
preparable_steps = display_link.get_prepare_steps()
else:
raise Exception( 'Attempted a view action (%s) on a non-ready display application' % app_action )
return trans.fill_template_mako( "dataset/display_application/display.mako",
msg=msg,
display_app=display_app,
display_link=display_link,
refresh=refresh,
preparable_steps=preparable_steps )
return trans.show_error_message( 'You do not have permission to view this dataset at an external display application.' )
def _delete( self, trans, dataset_id ):
message = None
status = 'done'
id = None
try:
id = self.decode_id( dataset_id )
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
assert hda, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in trans.history.datasets, "Data does not belong to current history"
# Mark deleted and cleanup
hda.mark_deleted()
hda.clear_associated_files()
trans.log_event( "Dataset id %s marked as deleted" % str(id) )
self.hda_manager.stop_creating_job( hda )
trans.sa_session.flush()
except Exception as e:
msg = 'HDA deletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id )
log.exception( msg + ': ' + str( e ) )
trans.log_event( msg )
message = 'Dataset deletion failed'
status = 'error'
return ( message, status )
def _undelete( self, trans, dataset_id ):
message = None
status = 'done'
id = None
try:
id = self.decode_id( dataset_id )
history = trans.get_history()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
assert hda and hda.undeletable, 'Invalid HDA: %s' % id
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_undeleted()
trans.sa_session.flush()
trans.log_event( "Dataset id %s has been undeleted" % str(id) )
except Exception:
msg = 'HDA undeletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id )
log.exception( msg )
trans.log_event( msg )
message = 'Dataset undeletion failed'
status = 'error'
return ( message, status )
def _unhide( self, trans, dataset_id ):
try:
id = self.decode_id( dataset_id )
except:
return False
history = trans.get_history()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
if hda:
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
assert topmost_parent in history.datasets, "Data does not belong to current history"
# Mark undeleted
hda.mark_unhidden()
trans.sa_session.flush()
trans.log_event( "Dataset id %s has been unhidden" % str(id) )
return True
return False
def _purge( self, trans, dataset_id ):
message = None
status = 'done'
try:
id = self.decode_id( dataset_id )
user = trans.get_user()
hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id )
# Invalid HDA
assert hda, 'Invalid history dataset ID'
# Walk up parent datasets to find the containing history
topmost_parent = hda
while topmost_parent.parent:
topmost_parent = topmost_parent.parent
# If the user is anonymous, make sure the HDA is owned by the current session.
if not user:
current_history_id = trans.galaxy_session.current_history_id
assert topmost_parent.history.id == current_history_id, 'Data does not belong to current user'
# If the user is known, make sure the HDA is owned by the current user.
else:
assert topmost_parent.history.user == user, 'Data does not belong to current user'
# Ensure HDA is deleted
hda.deleted = True
# HDA is purgeable
# Decrease disk usage first
if user:
user.adjust_total_disk_usage(-hda.quota_amount(user))
# Mark purged
hda.purged = True
trans.sa_session.add( hda )
trans.log_event( "HDA id %s has been purged" % hda.id )
trans.sa_session.flush()
# Don't delete anything if there are active HDAs or any LDDAs, even if
if hda.dataset.user_can_purge:
try:
hda.dataset.full_delete()
trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) )
trans.sa_session.add( hda.dataset )
except:
log.exception( 'Unable to purge dataset (%s) on purge of HDA (%s):' % ( hda.dataset.id, hda.id ) )
trans.sa_session.flush()
except Exception as exc:
msg = 'HDA purge failed (encoded: %s, decoded: %s): %s' % ( dataset_id, id, exc )
log.exception( msg )
trans.log_event( msg )
message = 'Dataset removal from disk failed'
status = 'error'
return ( message, status )
@web.expose
def delete( self, trans, dataset_id, filename, show_deleted_on_refresh=False ):
message, status = self._delete( trans, dataset_id )
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) )
@web.expose
def delete_async( self, trans, dataset_id, filename ):
message, status = self._delete( trans, dataset_id )
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def undelete( self, trans, dataset_id, filename ):
message, status = self._undelete( trans, dataset_id )
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=True, message=message, status=status ) )
@web.expose
def undelete_async( self, trans, dataset_id, filename ):
message, status = self._undelete( trans, dataset_id )
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def unhide( self, trans, dataset_id, filename ):
if self._unhide( trans, dataset_id ):
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_hidden=True ) )
raise Exception( "Error unhiding" )
@web.expose
def purge( self, trans, dataset_id, filename, show_deleted_on_refresh=False ):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge( trans, dataset_id )
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) )
@web.expose
def purge_async( self, trans, dataset_id, filename ):
if trans.app.config.allow_user_dataset_purge:
message, status = self._purge( trans, dataset_id )
else:
message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator."
status = 'error'
if status == 'done':
return "OK"
else:
raise Exception( message )
@web.expose
def show_params( self, trans, dataset_id=None, from_noframe=None, **kwd ):
try:
hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( self.decode_id( dataset_id ) )
except ValueError:
hda = None
if not hda:
raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % escape( str( dataset_id ) ) )
if not self._can_access_dataset( trans, hda ):
return trans.show_error_message( "You are not allowed to access this dataset" )
params_objects = None
job = None
tool = None
upgrade_messages = {}
has_parameter_errors = False
inherit_chain = hda.source_dataset_chain
if inherit_chain:
job_dataset_association = inherit_chain[-1][0]
else:
job_dataset_association = hda
if job_dataset_association.creating_job_associations:
job = job_dataset_association.creating_job_associations[0].job
if job:
try:
toolbox = self.get_toolbox()
tool = toolbox.get_tool( job.tool_id )
assert tool is not None, 'Requested tool has not been loaded.'
try:
params_objects = job.get_param_values( trans.app, ignore_errors=False )
except:
params_objects = job.get_param_values( trans.app, ignore_errors=True )
# use different param_objects in the following line, since we want to display original values as much as possible
upgrade_messages = tool.check_and_update_param_values( job.get_param_values( trans.app, ignore_errors=True ),
trans,
update_values=False )
has_parameter_errors = True
except:
pass
if job is None:
return trans.show_error_message( "Job information is not available for this dataset." )
# TODO: we should provide the basic values along with the objects, in order to better handle reporting of old values during upgrade
return trans.fill_template( "show_params.mako",
inherit_chain=inherit_chain,
history=trans.get_history(),
hda=hda,
job=job,
tool=tool,
params_objects=params_objects,
upgrade_messages=upgrade_messages,
has_parameter_errors=has_parameter_errors )
@web.expose
def copy_datasets( self, trans, source_history=None, source_content_ids="", target_history_id=None, target_history_ids="", new_history_name="", do_copy=False, **kwd ):
user = trans.get_user()
if source_history is not None:
decoded_source_history_id = self.decode_id( source_history )
history = self.history_manager.get_owned( decoded_source_history_id, trans.user, current_history=trans.history )
current_history = trans.get_history()
else:
history = current_history = trans.get_history()
refresh_frames = []
if source_content_ids:
if not isinstance( source_content_ids, list ):
source_content_ids = source_content_ids.split(",")
encoded_dataset_collection_ids = [ s[ len("dataset_collection|"): ] for s in source_content_ids if s.startswith("dataset_collection|") ]
encoded_dataset_ids = [ s[ len("dataset|"): ] for s in source_content_ids if s.startswith("dataset|") ]
decoded_dataset_collection_ids = set(map( self.decode_id, encoded_dataset_collection_ids ))
decoded_dataset_ids = set(map( self.decode_id, encoded_dataset_ids ))
else:
decoded_dataset_collection_ids = []
decoded_dataset_ids = []
if new_history_name:
target_history_ids = []
else:
if target_history_id:
target_history_ids = [ self.decode_id(target_history_id) ]
elif target_history_ids:
if not isinstance( target_history_ids, list ):
target_history_ids = target_history_ids.split(",")
target_history_ids = list(set([ self.decode_id(h) for h in target_history_ids if h ]))
else:
target_history_ids = []
done_msg = error_msg = ""
new_history = None
if do_copy:
invalid_contents = 0
if not ( decoded_dataset_ids or decoded_dataset_collection_ids ) or not ( target_history_ids or new_history_name ):
error_msg = "You must provide both source datasets and target histories. "
else:
if new_history_name:
new_history = trans.app.model.History()
new_history.name = new_history_name
new_history.user = user
trans.sa_session.add( new_history )
trans.sa_session.flush()
target_history_ids.append( new_history.id )
if user:
target_histories = [ hist for hist in map( trans.sa_session.query( trans.app.model.History ).get, target_history_ids ) if hist is not None and hist.user == user ]
else:
target_histories = [ history ]
if len( target_histories ) != len( target_history_ids ):
error_msg = error_msg + "You do not have permission to add datasets to %i requested histories. " % ( len( target_history_ids ) - len( target_histories ) )
source_contents = map( trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get, decoded_dataset_ids )
source_contents.extend( map( trans.sa_session.query( trans.app.model.HistoryDatasetCollectionAssociation ).get, decoded_dataset_collection_ids ) )
source_contents.sort(key=lambda content: content.hid)
for content in source_contents:
if content is None:
error_msg = error_msg + "You tried to copy a dataset that does not exist. "
invalid_contents += 1
elif content.history != history:
error_msg = error_msg + "You tried to copy a dataset which is not in your current history. "
invalid_contents += 1
else:
for hist in target_histories:
if content.history_content_type == "dataset":
hist.add_dataset( content.copy( copy_children=True ) )
else:
copy_collected_datasets = True
copy_kwds = {}
if copy_collected_datasets:
copy_kwds["element_destination"] = hist
hist.add_dataset_collection( content.copy( **copy_kwds ) )
if current_history in target_histories:
refresh_frames = ['history']
trans.sa_session.flush()
hist_names_str = ", ".join( ['<a href="%s" target="_top">%s</a>' %
( url_for( controller="history", action="switch_to_history",
hist_id=trans.security.encode_id( hist.id ) ), escape(hist.name) )
for hist in target_histories ] )
num_source = len( source_content_ids ) - invalid_contents
num_target = len(target_histories)
done_msg = "%i %s copied to %i %s: %s." % (num_source, inflector.cond_plural(num_source, "dataset"), num_target, inflector.cond_plural(num_target, "history"), hist_names_str )
trans.sa_session.refresh( history )
source_contents = history.active_contents
target_histories = [history]
if user:
target_histories = user.active_histories
return trans.fill_template( "/dataset/copy_view.mako",
source_history=history,
current_history=current_history,
source_content_ids=source_content_ids,
target_history_id=target_history_id,
target_history_ids=target_history_ids,
source_contents=source_contents,
target_histories=target_histories,
new_history_name=new_history_name,
done_msg=done_msg,
error_msg=error_msg,
refresh_frames=refresh_frames )
def _copy_datasets( self, trans, dataset_ids, target_histories, imported=False ):
user = trans.get_user()
done_msg = error_msg = ""
invalid_datasets = 0
if not dataset_ids or not target_histories:
error_msg = "You must provide both source datasets and target histories."
else:
# User must own target histories to copy datasets to them.
for history in target_histories:
if user != history.user:
error_msg = error_msg + "You do not have permission to add datasets to %i requested histories. " % ( len( target_histories ) )
for dataset_id in dataset_ids:
decoded_id = self.decode_id( dataset_id )
data = self.hda_manager.get_accessible( decoded_id, trans.user )
data = self.hda_manager.error_if_uploading( data )
if data is None:
error_msg = error_msg + "You tried to copy a dataset that does not exist or that you do not have access to. "
invalid_datasets += 1
else:
for hist in target_histories:
dataset_copy = data.copy( copy_children=True )
if imported:
dataset_copy.name = "imported: " + dataset_copy.name
hist.add_dataset( dataset_copy )
trans.sa_session.flush()
num_datasets_copied = len( dataset_ids ) - invalid_datasets
done_msg = "%i dataset%s copied to %i histor%s." % \
( num_datasets_copied, iff( num_datasets_copied == 1, "", "s"), len( target_histories ), iff( len( target_histories ) == 1, "y", "ies") )
trans.sa_session.refresh( history )
if error_msg != "":
status = ERROR
message = error_msg
else:
status = SUCCESS
message = done_msg
return status, message
| true
| true
|
f704e406d6064c1b07ddb3fa9237f28659c4ec07
| 7,122
|
py
|
Python
|
daal4py/sklearn/neighbors/_classification.py
|
Alexsandruss/daal4py
|
6e5a02d3fd46095585e618edba24fc258e8b0052
|
[
"Apache-2.0"
] | null | null | null |
daal4py/sklearn/neighbors/_classification.py
|
Alexsandruss/daal4py
|
6e5a02d3fd46095585e618edba24fc258e8b0052
|
[
"Apache-2.0"
] | null | null | null |
daal4py/sklearn/neighbors/_classification.py
|
Alexsandruss/daal4py
|
6e5a02d3fd46095585e618edba24fc258e8b0052
|
[
"Apache-2.0"
] | null | null | null |
#===============================================================================
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# daal4py KNN classification scikit-learn-compatible classes
from ._base import NeighborsBase, KNeighborsMixin
from ._base import parse_auto_method, prediction_algorithm
from sklearn.base import ClassifierMixin as BaseClassifierMixin
from .._utils import (
getFPType,
sklearn_check_version,
get_patch_message,
PatchingConditionsChain)
from .._device_offload import support_usm_ndarray
from sklearn.utils.validation import check_array
import numpy as np
from scipy import sparse as sp
import logging
if sklearn_check_version("0.22"):
from sklearn.neighbors._classification import KNeighborsClassifier as \
BaseKNeighborsClassifier
from sklearn.neighbors._base import _check_weights
from sklearn.utils.validation import _deprecate_positional_args
else:
from sklearn.neighbors.classification import KNeighborsClassifier as \
BaseKNeighborsClassifier
from sklearn.neighbors.base import _check_weights
def _deprecate_positional_args(f):
return f
def daal4py_classifier_predict(estimator, X, base_predict):
if sklearn_check_version('1.0'):
estimator._check_feature_names(X, reset=False)
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
daal_model = getattr(estimator, '_daal_model', None)
n_features = getattr(estimator, 'n_features_in_', None)
shape = getattr(X, 'shape', None)
if n_features and shape and len(shape) > 1 and shape[1] != n_features:
raise ValueError((f'X has {X.shape[1]} features, '
f'but KNNClassifier is expecting '
f'{n_features} features as input'))
try:
fptype = getFPType(X)
except ValueError:
fptype = None
_patching_status = PatchingConditionsChain(
"sklearn.neighbors.KNeighborsClassifier.predict")
_dal_ready = _patching_status.and_conditions([
(daal_model is not None, "oneDAL model was not trained."),
(fptype is not None, "Unable to get dtype."),
(not sp.issparse(X), "X is sparse. Sparse input is not supported.")])
_patching_status.write_log()
if _dal_ready:
params = {
'method': 'defaultDense',
'k': estimator.n_neighbors,
'nClasses': len(estimator.classes_),
'voteWeights': 'voteUniform'
if estimator.weights == 'uniform' else 'voteDistance',
'resultsToEvaluate': 'computeClassLabels',
'resultsToCompute': ''
}
method = parse_auto_method(
estimator, estimator.algorithm, estimator.n_samples_fit_, n_features)
predict_alg = prediction_algorithm(method, fptype, params)
prediction_result = predict_alg.compute(X, daal_model)
result = estimator.classes_.take(
np.asarray(prediction_result.prediction.ravel(), dtype=np.intp))
else:
result = base_predict(estimator, X)
return result
if sklearn_check_version("0.24"):
class KNeighborsClassifier_(KNeighborsMixin, BaseClassifierMixin, NeighborsBase):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = \
weights if sklearn_check_version("1.0") else _check_weights(weights)
elif sklearn_check_version("0.22"):
from sklearn.neighbors._base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,
BaseSupervisedIntegerMixin, BaseClassifierMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
else:
from sklearn.neighbors.base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,
BaseSupervisedIntegerMixin, BaseClassifierMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
class KNeighborsClassifier(KNeighborsClassifier_):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
@support_usm_ndarray()
def fit(self, X, y):
return NeighborsBase._fit(self, X, y)
@support_usm_ndarray()
def predict(self, X):
return daal4py_classifier_predict(self, X, BaseKNeighborsClassifier.predict)
@support_usm_ndarray()
def predict_proba(self, X):
if sklearn_check_version('1.0'):
self._check_feature_names(X, reset=False)
return BaseKNeighborsClassifier.predict_proba(self, X)
| 40.697143
| 85
| 0.641252
|
from ._base import NeighborsBase, KNeighborsMixin
from ._base import parse_auto_method, prediction_algorithm
from sklearn.base import ClassifierMixin as BaseClassifierMixin
from .._utils import (
getFPType,
sklearn_check_version,
get_patch_message,
PatchingConditionsChain)
from .._device_offload import support_usm_ndarray
from sklearn.utils.validation import check_array
import numpy as np
from scipy import sparse as sp
import logging
if sklearn_check_version("0.22"):
from sklearn.neighbors._classification import KNeighborsClassifier as \
BaseKNeighborsClassifier
from sklearn.neighbors._base import _check_weights
from sklearn.utils.validation import _deprecate_positional_args
else:
from sklearn.neighbors.classification import KNeighborsClassifier as \
BaseKNeighborsClassifier
from sklearn.neighbors.base import _check_weights
def _deprecate_positional_args(f):
return f
def daal4py_classifier_predict(estimator, X, base_predict):
if sklearn_check_version('1.0'):
estimator._check_feature_names(X, reset=False)
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
daal_model = getattr(estimator, '_daal_model', None)
n_features = getattr(estimator, 'n_features_in_', None)
shape = getattr(X, 'shape', None)
if n_features and shape and len(shape) > 1 and shape[1] != n_features:
raise ValueError((f'X has {X.shape[1]} features, '
f'but KNNClassifier is expecting '
f'{n_features} features as input'))
try:
fptype = getFPType(X)
except ValueError:
fptype = None
_patching_status = PatchingConditionsChain(
"sklearn.neighbors.KNeighborsClassifier.predict")
_dal_ready = _patching_status.and_conditions([
(daal_model is not None, "oneDAL model was not trained."),
(fptype is not None, "Unable to get dtype."),
(not sp.issparse(X), "X is sparse. Sparse input is not supported.")])
_patching_status.write_log()
if _dal_ready:
params = {
'method': 'defaultDense',
'k': estimator.n_neighbors,
'nClasses': len(estimator.classes_),
'voteWeights': 'voteUniform'
if estimator.weights == 'uniform' else 'voteDistance',
'resultsToEvaluate': 'computeClassLabels',
'resultsToCompute': ''
}
method = parse_auto_method(
estimator, estimator.algorithm, estimator.n_samples_fit_, n_features)
predict_alg = prediction_algorithm(method, fptype, params)
prediction_result = predict_alg.compute(X, daal_model)
result = estimator.classes_.take(
np.asarray(prediction_result.prediction.ravel(), dtype=np.intp))
else:
result = base_predict(estimator, X)
return result
if sklearn_check_version("0.24"):
class KNeighborsClassifier_(KNeighborsMixin, BaseClassifierMixin, NeighborsBase):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = \
weights if sklearn_check_version("1.0") else _check_weights(weights)
elif sklearn_check_version("0.22"):
from sklearn.neighbors._base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,
BaseSupervisedIntegerMixin, BaseClassifierMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
else:
from sklearn.neighbors.base import SupervisedIntegerMixin as \
BaseSupervisedIntegerMixin
class KNeighborsClassifier_(NeighborsBase, KNeighborsMixin,
BaseSupervisedIntegerMixin, BaseClassifierMixin):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
class KNeighborsClassifier(KNeighborsClassifier_):
@_deprecate_positional_args
def __init__(self, n_neighbors=5, *,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=None,
**kwargs):
super().__init__(
n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params,
n_jobs=n_jobs, **kwargs)
@support_usm_ndarray()
def fit(self, X, y):
return NeighborsBase._fit(self, X, y)
@support_usm_ndarray()
def predict(self, X):
return daal4py_classifier_predict(self, X, BaseKNeighborsClassifier.predict)
@support_usm_ndarray()
def predict_proba(self, X):
if sklearn_check_version('1.0'):
self._check_feature_names(X, reset=False)
return BaseKNeighborsClassifier.predict_proba(self, X)
| true
| true
|
f704e4bde1c29b09b13e9c055dbbdaff730de2a4
| 383
|
py
|
Python
|
other/dingding/dingtalk/api/rest/OapiAtsChannelAccountAddRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiAtsChannelAccountAddRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiAtsChannelAccountAddRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
'''
Created by auto_sdk on 2020.08.19
'''
from dingtalk.api.base import RestApi
class OapiAtsChannelAccountAddRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.biz_code = None
self.channel_user_identify = None
self.userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.ats.channel.account.add'
| 22.529412
| 48
| 0.75718
|
from dingtalk.api.base import RestApi
class OapiAtsChannelAccountAddRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.biz_code = None
self.channel_user_identify = None
self.userid = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.ats.channel.account.add'
| true
| true
|
f704e67f0939a74be44c84c108717c2e056598e9
| 1,634
|
py
|
Python
|
tests/test_web_flask/test_c_route.py
|
RodrigoSierraV/AirBnB_clone_v4
|
314a2f20ea3f1de89255317d0b52a4289b36ccbc
|
[
"MIT"
] | 5
|
2017-09-12T18:23:55.000Z
|
2021-07-27T18:05:37.000Z
|
tests/test_web_flask/test_c_route.py
|
petehwu/AirBnB_clone_v4
|
bf0528c99662285139aa56fe8e752d239e2f7b2a
|
[
"MIT"
] | 8
|
2019-09-27T17:23:04.000Z
|
2019-09-30T23:31:31.000Z
|
tests/test_web_flask/test_c_route.py
|
petehwu/AirBnB_clone_v4
|
bf0528c99662285139aa56fe8e752d239e2f7b2a
|
[
"MIT"
] | 10
|
2017-09-20T18:50:07.000Z
|
2022-02-17T20:58:23.000Z
|
#!/usr/bin/python3
"""
Unit Test for api v1 Flask App
"""
import inspect
import pep8
import web_flask
import unittest
from os import stat
web_flask = __import__('web_flask.2-c_route', globals(), locals(), ['*'])
class TestCRouteDocs(unittest.TestCase):
"""Class for testing Hello Route docs"""
all_funcs = inspect.getmembers(web_flask, inspect.isfunction)
@classmethod
def setUpClass(cls):
print('\n\n.................................')
print('..... Testing Documentation .....')
print('............ C Route ...........')
print('.................................\n\n')
def test_doc_file(self):
"""... documentation for the file"""
actual = web_flask.__doc__
self.assertIsNotNone(actual)
def test_all_function_docs(self):
"""... tests for ALL DOCS for all functions"""
all_functions = TestCRouteDocs.all_funcs
for function in all_functions:
self.assertIsNotNone(function[1].__doc__)
def test_pep8(self):
"""... tests if file conforms to PEP8 Style"""
pep8style = pep8.StyleGuide(quiet=True)
errors = pep8style.check_files(['web_flask/2-c_route.py'])
self.assertEqual(errors.total_errors, 0, errors.messages)
def test_file_is_executable(self):
"""... tests if file has correct permissions so user can execute"""
file_stat = stat('web_flask/2-c_route.py')
permissions = str(oct(file_stat[0]))
actual = int(permissions[5:-2]) >= 5
self.assertTrue(actual)
if __name__ == '__main__':
"""
MAIN TESTS
"""
unittest.main
| 29.709091
| 75
| 0.601591
|
import inspect
import pep8
import web_flask
import unittest
from os import stat
web_flask = __import__('web_flask.2-c_route', globals(), locals(), ['*'])
class TestCRouteDocs(unittest.TestCase):
all_funcs = inspect.getmembers(web_flask, inspect.isfunction)
@classmethod
def setUpClass(cls):
print('\n\n.................................')
print('..... Testing Documentation .....')
print('............ C Route ...........')
print('.................................\n\n')
def test_doc_file(self):
actual = web_flask.__doc__
self.assertIsNotNone(actual)
def test_all_function_docs(self):
all_functions = TestCRouteDocs.all_funcs
for function in all_functions:
self.assertIsNotNone(function[1].__doc__)
def test_pep8(self):
pep8style = pep8.StyleGuide(quiet=True)
errors = pep8style.check_files(['web_flask/2-c_route.py'])
self.assertEqual(errors.total_errors, 0, errors.messages)
def test_file_is_executable(self):
file_stat = stat('web_flask/2-c_route.py')
permissions = str(oct(file_stat[0]))
actual = int(permissions[5:-2]) >= 5
self.assertTrue(actual)
if __name__ == '__main__':
unittest.main
| true
| true
|
f704e6c02a08c79f1e412b550fbc5e2ad285adf4
| 11,273
|
py
|
Python
|
multiqc_clarity/multiqc_clarity.py
|
MultiQC/MultiQC_Clarity
|
9ac177dffa8c9a5a5d57ec0c6739a74a974b8ab3
|
[
"MIT"
] | 1
|
2018-06-18T15:31:10.000Z
|
2018-06-18T15:31:10.000Z
|
multiqc_clarity/multiqc_clarity.py
|
MultiQC/MultiQC_Clarity
|
9ac177dffa8c9a5a5d57ec0c6739a74a974b8ab3
|
[
"MIT"
] | 5
|
2017-03-13T17:21:45.000Z
|
2019-02-14T12:34:11.000Z
|
multiqc_clarity/multiqc_clarity.py
|
MultiQC/MultiQC_Clarity
|
9ac177dffa8c9a5a5d57ec0c6739a74a974b8ab3
|
[
"MIT"
] | 4
|
2017-03-06T09:47:42.000Z
|
2019-02-13T13:19:23.000Z
|
from genologics.lims import Lims
from genologics.config import BASEURI, USERNAME, PASSWORD
from multiqc.utils import report, config
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import table
from collections import OrderedDict
import logging
import re
class MultiQC_clarity_metadata(BaseMultiqcModule):
def __init__(self):
self.log = logging.getLogger('multiqc')
# Check that this plugin hasn't been disabled
if config.kwargs.get('disable_clarity', False) is True:
self.log.info("Skipping MultiQC_Clarity as disabled on command line")
return None
if getattr(config, 'disable_clarity', False) is True:
self.log.debug("Skipping MultiQC_Clarity as specified in config file")
return None
super(MultiQC_clarity_metadata, self).__init__(name='Clarity LIMS', anchor='clarity')
self.intro = '''<p>The <a href="https://github.com/MultiQC/MultiQC_Clarity" target="_blank">MultiQC_Clarity</a>
plugin fetches data from a specified
<a href="https://www.genologics.com/clarity-lims/" target="_blank">Basespace Clarity LIMS</a> instance.</p>'''
self.lims = Lims(BASEURI, USERNAME, PASSWORD)
self.metadata = {}
self.header_metadata = {}
self.general_metadata = {}
self.tab_metadata = {}
self.samples = []
self.schema = getattr(config, 'clarity', None)
if self.schema is None:
self.log.debug("No config found for MultiQC_Clarity")
return None
self.name_edit_regex = self.schema.get("name_edit_regex")
self.get_samples()
self.get_metadata('report_header_info')
self.get_metadata('general_stats')
self.get_metadata('clarity_module')
self.update_multiqc_report()
self.make_sections()
report.modules_output.append(self)
def get_samples(self):
if config.kwargs.get('clarity_project'):
pj = self.lims.get_projects(name=config.kwargs['clarity_project'])
if len(pj) > 1:
self.log.error("Found multiple match projects in Clarity.")
elif len(pj) < 1:
self.log.error("Could not identify project in Clarity.")
else:
self.samples = self.lims.get_samples(projectlimsid=pj[0].id)
else:
names = set()
for x in report.general_stats_data:
names.update(x.keys())
for d in report.saved_raw_data.values():
try:
self.names.update(d.keys())
except AttributeError:
pass
if not config.kwargs.get('clarity_skip_edit_names'):
names = self.edit_names(names)
self.log.info("Looking into Clarity for samples {}".format(", ".join(names)))
found = 0
try:
for name in names:
matching_samples = self.lims.get_samples(name=name)
if not matching_samples:
self.log.error("Could not find a sample matching {0}, skipping.".format(name))
continue
if len(matching_samples) > 1:
self.log.error("Found multiple samples matching {0}, skipping".format(name))
continue
found += 1
self.samples.append(matching_samples[0])
except Exception as e:
self.log.warn("Could not connect to Clarity LIMS: {}".format(e))
return None
self.log.info("Found {} out of {} samples in LIMS.".format(found, len(names)))
def edit_names(self, names):
if self.name_edit_regex:
return self.edit_names_with_regex(names)
edited=[]
for name in names:
if name.endswith("_1") or name.endswith("_2"):
edited.append(name[:-2])
elif name.endswith("_R1") or name.endswith("_R2"):
edited.append(name[:-3])
else:
edited.append(name)
return edited
def edit_names_with_regex(self, names):
edited = []
for name in names:
matches = re.search(re.compile(self.name_edit_regex), name)
edited.append(matches.group(1))
return edited
def flatten_metadata(self, metadata):
for first_level in metadata:
for second_level in metadata[first_level]:
if isinstance(metadata[first_level][second_level], set) or isinstance(metadata[first_level][second_level], list):
metadata[first_level][second_level] = ", ".join(metadata[first_level][second_level])
return metadata
def get_project_metadata(self, udfs):
project_metadata={}
for sample in self.samples:
project_metadata[sample.project.name]={}
for udf in udfs:
if udf in sample.project.udf:
try:
project_metadata[sample.project.name][udf].add(str(sample.project.udf[udf]))
except:
project_metadata[sample.project.name][udf] = set()
project_metadata[sample.project.name][udf].add(str(sample.project.udf[udf]))
return self.flatten_metadata(project_metadata)
def get_sample_metadata(self, udfs):
sample_metadata={}
for sample in self.samples:
sample_metadata[sample.name]={}
for udf in udfs:
if udf in sample.udf:
try:
sample_metadata[sample.name][udf].add(str(sample.udf[udf]))
except:
sample_metadata[sample.name][udf] = set()
sample_metadata[sample.name][udf].add(str(sample.udf[udf]))
return self.flatten_metadata(sample_metadata)
def get_metadata(self, part):
for key in self.schema[part]:
if key == 'Project':
metadata = self.get_project_metadata(self.schema[part]['Project'])
elif key == 'Sample':
metadata =self.get_sample_metadata(self.schema[part]['Sample'])
else:
metadata = self.get_artifact_metadata(self.schema[part])
if part == "report_header_info":
self.header_metadata.update(metadata)
elif part == "general_stats":
self.general_metadata.update(metadata)
else:
self.tab_metadata.update(metadata)
def get_artifact_metadata(self, pt_to_udfs):
artifact_metadata={}
for sample in self.samples:
artifact_metadata[sample.name]={}
for process_type in pt_to_udfs:
if process_type == 'Sample':
continue
if process_type == 'Project':
continue
artifacts = self.lims.get_artifacts(sample_name=sample.name, process_type=process_type)
for udf_name in pt_to_udfs[process_type].get("outputs", []):
values = []
for artifact in artifacts:
if udf_name in artifact.udf:
values.append(str(artifact.udf[udf_name]))
artifact_metadata[sample.name][udf_name]=values
processes = set([art.parent_process for art in artifacts])
inputs=[]
for p in processes:
inputs.extend([art for art in p.all_inputs() if sample.name in [s.name for s in art.samples]])
for udf_name in pt_to_udfs[process_type].get("inputs", []):
values = []
for artifact in inputs:
if udf_name in artifact.udf:
values.append(str(artifact.udf[udf_name]))
artifact_metadata[sample.name][udf_name]=values
return self.flatten_metadata(artifact_metadata)
def update_multiqc_report(self):
if config.report_header_info is None:
config.report_header_info = []
for first_level in self.header_metadata:
d = {}
for key in self.header_metadata[first_level]:
d[key] = self.header_metadata[first_level][key]
config.report_header_info.append(d)
headers = {}
for first_level in self.schema["general_stats"]:
for header in self.schema["general_stats"][first_level]:
headers[header] = {}
if isinstance(self.schema["general_stats"][first_level][header], dict):
for subsubkey, cfg in self.schema["general_stats"][first_level][header].items():
if subsubkey == 'multiply_by':
mby = str(cfg)[:]
headers[header]['modify'] = lambda x: float(x) * float(mby)
else:
headers[header][subsubkey] = cfg
headers[header]['description'] = headers[header].get('description', '{} - {}'.format(first_level, header))
headers[header]['namespace'] = headers[header].get('namespace', 'Clarity LIMS')
headers[header]['scale'] = headers[header].get('scale', 'YlGn')
report.general_stats_headers.append(headers)
report.general_stats_data.append(self.general_metadata)
def make_sections(self):
headers = OrderedDict()
for first_level in self.tab_metadata:
for header in self.tab_metadata[first_level]:
desc = header
if header not in headers:
headers[header] = {}
for key in self.schema['clarity_module']:
if header in self.schema['clarity_module'][key]:
desc = key
elif isinstance(self.schema['clarity_module'][key], dict):
for subkey, val in self.schema['clarity_module'][key].items():
# print(val)
if val is None:
break
elif header in val:
desc = key
if isinstance(val[header], dict):
for subsubkey, cfg in val[header].items():
if subsubkey == 'multiply_by':
mby = str(cfg)[:]
headers[header]['modify'] = lambda x: float(x) * float(mby)
else:
headers[header][subsubkey] = cfg
headers[header]['namespace'] = headers[header].get('namespace', desc)
headers[header]['title'] = headers[header].get('title', header)
headers[header]['description'] = headers[header].get('description', header)
self.intro += table.plot(self.tab_metadata, headers)
| 42.863118
| 129
| 0.548213
|
from genologics.lims import Lims
from genologics.config import BASEURI, USERNAME, PASSWORD
from multiqc.utils import report, config
from multiqc.modules.base_module import BaseMultiqcModule
from multiqc.plots import table
from collections import OrderedDict
import logging
import re
class MultiQC_clarity_metadata(BaseMultiqcModule):
def __init__(self):
self.log = logging.getLogger('multiqc')
if config.kwargs.get('disable_clarity', False) is True:
self.log.info("Skipping MultiQC_Clarity as disabled on command line")
return None
if getattr(config, 'disable_clarity', False) is True:
self.log.debug("Skipping MultiQC_Clarity as specified in config file")
return None
super(MultiQC_clarity_metadata, self).__init__(name='Clarity LIMS', anchor='clarity')
self.intro = '''<p>The <a href="https://github.com/MultiQC/MultiQC_Clarity" target="_blank">MultiQC_Clarity</a>
plugin fetches data from a specified
<a href="https://www.genologics.com/clarity-lims/" target="_blank">Basespace Clarity LIMS</a> instance.</p>'''
self.lims = Lims(BASEURI, USERNAME, PASSWORD)
self.metadata = {}
self.header_metadata = {}
self.general_metadata = {}
self.tab_metadata = {}
self.samples = []
self.schema = getattr(config, 'clarity', None)
if self.schema is None:
self.log.debug("No config found for MultiQC_Clarity")
return None
self.name_edit_regex = self.schema.get("name_edit_regex")
self.get_samples()
self.get_metadata('report_header_info')
self.get_metadata('general_stats')
self.get_metadata('clarity_module')
self.update_multiqc_report()
self.make_sections()
report.modules_output.append(self)
def get_samples(self):
if config.kwargs.get('clarity_project'):
pj = self.lims.get_projects(name=config.kwargs['clarity_project'])
if len(pj) > 1:
self.log.error("Found multiple match projects in Clarity.")
elif len(pj) < 1:
self.log.error("Could not identify project in Clarity.")
else:
self.samples = self.lims.get_samples(projectlimsid=pj[0].id)
else:
names = set()
for x in report.general_stats_data:
names.update(x.keys())
for d in report.saved_raw_data.values():
try:
self.names.update(d.keys())
except AttributeError:
pass
if not config.kwargs.get('clarity_skip_edit_names'):
names = self.edit_names(names)
self.log.info("Looking into Clarity for samples {}".format(", ".join(names)))
found = 0
try:
for name in names:
matching_samples = self.lims.get_samples(name=name)
if not matching_samples:
self.log.error("Could not find a sample matching {0}, skipping.".format(name))
continue
if len(matching_samples) > 1:
self.log.error("Found multiple samples matching {0}, skipping".format(name))
continue
found += 1
self.samples.append(matching_samples[0])
except Exception as e:
self.log.warn("Could not connect to Clarity LIMS: {}".format(e))
return None
self.log.info("Found {} out of {} samples in LIMS.".format(found, len(names)))
def edit_names(self, names):
if self.name_edit_regex:
return self.edit_names_with_regex(names)
edited=[]
for name in names:
if name.endswith("_1") or name.endswith("_2"):
edited.append(name[:-2])
elif name.endswith("_R1") or name.endswith("_R2"):
edited.append(name[:-3])
else:
edited.append(name)
return edited
def edit_names_with_regex(self, names):
edited = []
for name in names:
matches = re.search(re.compile(self.name_edit_regex), name)
edited.append(matches.group(1))
return edited
def flatten_metadata(self, metadata):
for first_level in metadata:
for second_level in metadata[first_level]:
if isinstance(metadata[first_level][second_level], set) or isinstance(metadata[first_level][second_level], list):
metadata[first_level][second_level] = ", ".join(metadata[first_level][second_level])
return metadata
def get_project_metadata(self, udfs):
project_metadata={}
for sample in self.samples:
project_metadata[sample.project.name]={}
for udf in udfs:
if udf in sample.project.udf:
try:
project_metadata[sample.project.name][udf].add(str(sample.project.udf[udf]))
except:
project_metadata[sample.project.name][udf] = set()
project_metadata[sample.project.name][udf].add(str(sample.project.udf[udf]))
return self.flatten_metadata(project_metadata)
def get_sample_metadata(self, udfs):
sample_metadata={}
for sample in self.samples:
sample_metadata[sample.name]={}
for udf in udfs:
if udf in sample.udf:
try:
sample_metadata[sample.name][udf].add(str(sample.udf[udf]))
except:
sample_metadata[sample.name][udf] = set()
sample_metadata[sample.name][udf].add(str(sample.udf[udf]))
return self.flatten_metadata(sample_metadata)
def get_metadata(self, part):
for key in self.schema[part]:
if key == 'Project':
metadata = self.get_project_metadata(self.schema[part]['Project'])
elif key == 'Sample':
metadata =self.get_sample_metadata(self.schema[part]['Sample'])
else:
metadata = self.get_artifact_metadata(self.schema[part])
if part == "report_header_info":
self.header_metadata.update(metadata)
elif part == "general_stats":
self.general_metadata.update(metadata)
else:
self.tab_metadata.update(metadata)
def get_artifact_metadata(self, pt_to_udfs):
artifact_metadata={}
for sample in self.samples:
artifact_metadata[sample.name]={}
for process_type in pt_to_udfs:
if process_type == 'Sample':
continue
if process_type == 'Project':
continue
artifacts = self.lims.get_artifacts(sample_name=sample.name, process_type=process_type)
for udf_name in pt_to_udfs[process_type].get("outputs", []):
values = []
for artifact in artifacts:
if udf_name in artifact.udf:
values.append(str(artifact.udf[udf_name]))
artifact_metadata[sample.name][udf_name]=values
processes = set([art.parent_process for art in artifacts])
inputs=[]
for p in processes:
inputs.extend([art for art in p.all_inputs() if sample.name in [s.name for s in art.samples]])
for udf_name in pt_to_udfs[process_type].get("inputs", []):
values = []
for artifact in inputs:
if udf_name in artifact.udf:
values.append(str(artifact.udf[udf_name]))
artifact_metadata[sample.name][udf_name]=values
return self.flatten_metadata(artifact_metadata)
def update_multiqc_report(self):
if config.report_header_info is None:
config.report_header_info = []
for first_level in self.header_metadata:
d = {}
for key in self.header_metadata[first_level]:
d[key] = self.header_metadata[first_level][key]
config.report_header_info.append(d)
headers = {}
for first_level in self.schema["general_stats"]:
for header in self.schema["general_stats"][first_level]:
headers[header] = {}
if isinstance(self.schema["general_stats"][first_level][header], dict):
for subsubkey, cfg in self.schema["general_stats"][first_level][header].items():
if subsubkey == 'multiply_by':
mby = str(cfg)[:]
headers[header]['modify'] = lambda x: float(x) * float(mby)
else:
headers[header][subsubkey] = cfg
headers[header]['description'] = headers[header].get('description', '{} - {}'.format(first_level, header))
headers[header]['namespace'] = headers[header].get('namespace', 'Clarity LIMS')
headers[header]['scale'] = headers[header].get('scale', 'YlGn')
report.general_stats_headers.append(headers)
report.general_stats_data.append(self.general_metadata)
def make_sections(self):
headers = OrderedDict()
for first_level in self.tab_metadata:
for header in self.tab_metadata[first_level]:
desc = header
if header not in headers:
headers[header] = {}
for key in self.schema['clarity_module']:
if header in self.schema['clarity_module'][key]:
desc = key
elif isinstance(self.schema['clarity_module'][key], dict):
for subkey, val in self.schema['clarity_module'][key].items():
# print(val)
if val is None:
break
elif header in val:
desc = key
if isinstance(val[header], dict):
for subsubkey, cfg in val[header].items():
if subsubkey == 'multiply_by':
mby = str(cfg)[:]
headers[header]['modify'] = lambda x: float(x) * float(mby)
else:
headers[header][subsubkey] = cfg
headers[header]['namespace'] = headers[header].get('namespace', desc)
headers[header]['title'] = headers[header].get('title', header)
headers[header]['description'] = headers[header].get('description', header)
self.intro += table.plot(self.tab_metadata, headers)
| true
| true
|
f704e7c24f57a177cdaf904a0e365c964a9eeb80
| 1,308
|
py
|
Python
|
setup.py
|
ScyThe1289/cgen
|
e644d790fce3a06457eff30044c31eb549d8e2f8
|
[
"MIT"
] | null | null | null |
setup.py
|
ScyThe1289/cgen
|
e644d790fce3a06457eff30044c31eb549d8e2f8
|
[
"MIT"
] | null | null | null |
setup.py
|
ScyThe1289/cgen
|
e644d790fce3a06457eff30044c31eb549d8e2f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
with open("README.rst", "rt") as inf:
readme = inf.read()
ver_dic = {}
with open("cgen/version.py") as version_file:
version_file_contents = version_file.read()
exec(compile(version_file_contents, "cgen/version.py", 'exec'), ver_dic)
setup(
name="cgen",
version=ver_dic["VERSION_TEXT"],
description="C/C++ source generation from an AST",
long_description=readme,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
author="Andreas Kloeckner",
author_email="inform@tiker.net",
license="MIT",
url="http://documen.tician.de/cgen/",
packages=["cgen"],
python_requires="~=3.6",
install_requires=[
"pytools>=2015.1.2",
"numpy>=1.6",
])
| 29.727273
| 72
| 0.563456
|
from setuptools import setup
with open("README.rst", "rt") as inf:
readme = inf.read()
ver_dic = {}
with open("cgen/version.py") as version_file:
version_file_contents = version_file.read()
exec(compile(version_file_contents, "cgen/version.py", 'exec'), ver_dic)
setup(
name="cgen",
version=ver_dic["VERSION_TEXT"],
description="C/C++ source generation from an AST",
long_description=readme,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
],
author="Andreas Kloeckner",
author_email="inform@tiker.net",
license="MIT",
url="http://documen.tician.de/cgen/",
packages=["cgen"],
python_requires="~=3.6",
install_requires=[
"pytools>=2015.1.2",
"numpy>=1.6",
])
| true
| true
|
f704e89b073258465eda8a5b1cc588d46c48769f
| 1,044
|
py
|
Python
|
src/robotide/utils/versioncomparator.py
|
ludovicurbain/SWIFT-RIDE
|
ab72df08a57101c433bfa5ad44949d9983e4e611
|
[
"ECL-2.0",
"Apache-2.0"
] | 775
|
2015-01-12T06:54:09.000Z
|
2022-03-25T05:18:05.000Z
|
src/robotide/utils/versioncomparator.py
|
ludovicurbain/SWIFT-RIDE
|
ab72df08a57101c433bfa5ad44949d9983e4e611
|
[
"ECL-2.0",
"Apache-2.0"
] | 2,191
|
2015-05-19T16:49:09.000Z
|
2022-03-28T21:38:34.000Z
|
src/robotide/utils/versioncomparator.py
|
ludovicurbain/SWIFT-RIDE
|
ab72df08a57101c433bfa5ad44949d9983e4e611
|
[
"ECL-2.0",
"Apache-2.0"
] | 382
|
2015-01-24T08:41:44.000Z
|
2022-03-13T10:14:20.000Z
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkg_resources import parse_version
def cmp_versions(version1, version2):
if version1 is None:
if version2 is None:
return 0
else:
return -1
if version2 is None:
return 1
if parse_version(version1) == parse_version(version2):
return 0
elif parse_version(version1) > parse_version(version2):
return 1
return -1
| 32.625
| 75
| 0.703065
|
from pkg_resources import parse_version
def cmp_versions(version1, version2):
if version1 is None:
if version2 is None:
return 0
else:
return -1
if version2 is None:
return 1
if parse_version(version1) == parse_version(version2):
return 0
elif parse_version(version1) > parse_version(version2):
return 1
return -1
| true
| true
|
f704e942f03f28034ae8717e0065554e929f93f4
| 4,009
|
py
|
Python
|
zer018_perception/lane_vision/src/homography.py
|
ZEROSNU/zer018
|
c469cf22fa1fdf731b02c79f296ee96d35dccb25
|
[
"MIT"
] | 1
|
2019-01-05T07:01:46.000Z
|
2019-01-05T07:01:46.000Z
|
zer018_perception/lane_vision/src/homography.py
|
ZEROSNU/zer018
|
c469cf22fa1fdf731b02c79f296ee96d35dccb25
|
[
"MIT"
] | null | null | null |
zer018_perception/lane_vision/src/homography.py
|
ZEROSNU/zer018
|
c469cf22fa1fdf731b02c79f296ee96d35dccb25
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import time
'''
TEST FILE using 1000, 1000 output image.
Actual code will have an output image of 200,200, which also means a different homography
'''
#recalculated homography
# homography_front = np.array([[3.12570133882145e-05, 0.000286172662353515, -0.680179732686621],
# [0.000967963380750764,-0.00220708598330688,-0.733040431894039],
# [9.31003590466217e-08,-7.28146482745869e-06,-0.00116847956395974]])
# homography_left = np.array([[-0.000710128671370178, 6.65307627276203e-05, -0.0692689783742822],
# [0.000516381003921171, -0.00181011134155597, -0.997595526929844],
# [-2.51074118905076e-08, -6.83854860981181e-06, -0.000959883483255739]])
# homography_right = np.array([[-0.000926831714971124,-7.57332958427531e-05,0.994215703860414],
# [-0.000923137149283102,0.00327126641381199,0.107337667969103],
# [-2.77833313194565e-07,1.03110471009649e-05,0.00115801865068319]])
# Original
homography_front = np.array([[4.62227601649053e-05, 0.000243520884225642, -0.678748083960862],
[0.000969465596108860, -0.00207033488113324, -0.734366621126640],
[1.58512860546350e-07, -6.83048800828728e-06, -0.00119023476366804]])
homography_left = np.array([[-0.000759672412515488, 2.34075591542924e-05, -0.0699936817773495],
[0.000483107853918350, -0.00189886717269873, -0.997544805245074],
[-1.49265515027449e-07, -7.08702713960990e-06, -0.000910631508297557]])
homography_right = np.array([[-0.000908962187561903, -3.67579540055241e-05, 0.994837127281325],
[-0.000886484342219692, 0.00317263543314027, 0.101420799019439],
[-1.14460320494404e-07, 9.99234254412552e-06, 0.00111021419224332]])
#LARGER RANGE OF VIEW
translation = np.array([[1, 0, 0],[0,1,100],[0,0,1]])
def warp_image(image, homography):
im_out = cv2.warpPerspective(image, np.matmul(translation,homography), (600, 800))
# cv2.imshow('warped', im_out)
# cv2.waitKey(0)
#cv2.imshow('image', im_out)
return im_out
def left_hom(image):
im_out = cv2.warp
# Create mask of front image. im_mask indicates black pixel area
def find_mask(image):
black_range1 = np.array([0,0,0])
im_mask = (cv2.inRange(image, black_range1, black_range1)).astype('bool')
im_mask_inv = (1-im_mask).astype('bool')
im_mask_inv = np.dstack((im_mask_inv, im_mask_inv, im_mask_inv))
im_mask= np.dstack((im_mask, im_mask, im_mask))
return im_mask_inv, im_mask
if __name__ == "__main__":
count = 0
while True:
img_front = cv2.imread('../collected_images/5/center/'+ str(count)+'.jpg')
img_left = cv2.imread('../collected_images/5/left/'+ str(count)+'.jpg')
img_right = cv2.imread('../collected_images/5/right/'+ str(count)+'.jpg')
im_front = warp_image(img_front, homography_front).astype('uint8')
im_left = warp_image(img_left, homography_left).astype('uint8')
im_right = warp_image(img_right, homography_right).astype('uint8')
init_time = time.time()
im_side = im_left + im_right
im_mask_inv, im_mask = find_mask(im_side)
front_masked = np.multiply(im_front, im_mask).astype('uint8')
side_masked = np.multiply(im_side, im_mask_inv).astype('uint8')
print("Masking Time: ", time.time()-init_time)
summed_image = front_masked + side_masked
#Gaussian Blurring?
#summed_image = cv2.GaussianBlur(summed_image, (5,5), 0)
# cv2.imshow('front', front_masked)
# cv2.imshow('left', im_left)
# cv2.imshow('right', im_right)
# cv2.imshow('front', im_front)
cv2.imshow('summed', summed_image)
cv2.imwrite('../collected_images/5/mosaic_full/'+str(count) + '.jpg', summed_image)
#summed_image_cropped = summed_image[200:800, :500, :]
print("Time elapsed: ", (time.time() - init_time))
#cv2.imshow('summed cropped', summed_image_cropped)
count +=1
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
| 40.494949
| 97
| 0.687204
|
import cv2
import numpy as np
import time
homography_front = np.array([[4.62227601649053e-05, 0.000243520884225642, -0.678748083960862],
[0.000969465596108860, -0.00207033488113324, -0.734366621126640],
[1.58512860546350e-07, -6.83048800828728e-06, -0.00119023476366804]])
homography_left = np.array([[-0.000759672412515488, 2.34075591542924e-05, -0.0699936817773495],
[0.000483107853918350, -0.00189886717269873, -0.997544805245074],
[-1.49265515027449e-07, -7.08702713960990e-06, -0.000910631508297557]])
homography_right = np.array([[-0.000908962187561903, -3.67579540055241e-05, 0.994837127281325],
[-0.000886484342219692, 0.00317263543314027, 0.101420799019439],
[-1.14460320494404e-07, 9.99234254412552e-06, 0.00111021419224332]])
translation = np.array([[1, 0, 0],[0,1,100],[0,0,1]])
def warp_image(image, homography):
im_out = cv2.warpPerspective(image, np.matmul(translation,homography), (600, 800))
return im_out
def left_hom(image):
im_out = cv2.warp
def find_mask(image):
black_range1 = np.array([0,0,0])
im_mask = (cv2.inRange(image, black_range1, black_range1)).astype('bool')
im_mask_inv = (1-im_mask).astype('bool')
im_mask_inv = np.dstack((im_mask_inv, im_mask_inv, im_mask_inv))
im_mask= np.dstack((im_mask, im_mask, im_mask))
return im_mask_inv, im_mask
if __name__ == "__main__":
count = 0
while True:
img_front = cv2.imread('../collected_images/5/center/'+ str(count)+'.jpg')
img_left = cv2.imread('../collected_images/5/left/'+ str(count)+'.jpg')
img_right = cv2.imread('../collected_images/5/right/'+ str(count)+'.jpg')
im_front = warp_image(img_front, homography_front).astype('uint8')
im_left = warp_image(img_left, homography_left).astype('uint8')
im_right = warp_image(img_right, homography_right).astype('uint8')
init_time = time.time()
im_side = im_left + im_right
im_mask_inv, im_mask = find_mask(im_side)
front_masked = np.multiply(im_front, im_mask).astype('uint8')
side_masked = np.multiply(im_side, im_mask_inv).astype('uint8')
print("Masking Time: ", time.time()-init_time)
summed_image = front_masked + side_masked
cv2.imshow('summed', summed_image)
cv2.imwrite('../collected_images/5/mosaic_full/'+str(count) + '.jpg', summed_image)
print("Time elapsed: ", (time.time() - init_time))
count +=1
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
| true
| true
|
f704e9b9373cb103702b97b1698ac5e81ca15084
| 2,543
|
py
|
Python
|
app/forms/user.py
|
Kbman99/Flask-Startup
|
e3fea9c7e16a650abd9768ea1e9aee845dcecfda
|
[
"MIT"
] | null | null | null |
app/forms/user.py
|
Kbman99/Flask-Startup
|
e3fea9c7e16a650abd9768ea1e9aee845dcecfda
|
[
"MIT"
] | null | null | null |
app/forms/user.py
|
Kbman99/Flask-Startup
|
e3fea9c7e16a650abd9768ea1e9aee845dcecfda
|
[
"MIT"
] | null | null | null |
from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import (Required, Length, Email, ValidationError,
EqualTo)
from app.models import User
class Unique(object):
'''
Custom validator to check an object's attribute
is unique. For example users should not be able
to create an account if the account's email
address is already in the database. This class
supposes you are using SQLAlchemy to query the
database.
'''
def __init__(self, model, field, message):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
class Forgot(Form):
''' User forgot password form. '''
email = TextField(validators=[Required(), Email()],
description='Email address')
class Resend(Form):
''' User forgot password form. '''
email = TextField(validators=[Required(), Email()],
description='Email address')
class Reset(Form):
''' User reset password form. '''
password = PasswordField(validators=[
Required(), Length(min=6),
EqualTo('confirm', message='Passwords must match.')
], description='Password')
confirm = PasswordField(description='Confirm password')
class Login(Form):
''' User login form. '''
email = TextField(validators=[Required(), Email()],
description='Email address')
password = PasswordField(validators=[Required()],
description='Password')
class SignUp(Form):
''' User sign up form. '''
first_name = TextField(validators=[Required(), Length(min=2)],
description='Name')
last_name = TextField(validators=[Required(), Length(min=2)],
description='Surname')
email = TextField(validators=[Required(), Email(),
Unique(User, User.email,
'This email address is ' +
'already linked to an account.')],
description='Email address')
password = PasswordField(validators=[
Required(), Length(min=6),
EqualTo('confirm', message='Passwords must match.')
], description='Password')
confirm = PasswordField(description='Confirm password')
| 29.917647
| 75
| 0.594573
|
from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import (Required, Length, Email, ValidationError,
EqualTo)
from app.models import User
class Unique(object):
def __init__(self, model, field, message):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
class Forgot(Form):
email = TextField(validators=[Required(), Email()],
description='Email address')
class Resend(Form):
email = TextField(validators=[Required(), Email()],
description='Email address')
class Reset(Form):
password = PasswordField(validators=[
Required(), Length(min=6),
EqualTo('confirm', message='Passwords must match.')
], description='Password')
confirm = PasswordField(description='Confirm password')
class Login(Form):
email = TextField(validators=[Required(), Email()],
description='Email address')
password = PasswordField(validators=[Required()],
description='Password')
class SignUp(Form):
first_name = TextField(validators=[Required(), Length(min=2)],
description='Name')
last_name = TextField(validators=[Required(), Length(min=2)],
description='Surname')
email = TextField(validators=[Required(), Email(),
Unique(User, User.email,
'This email address is ' +
'already linked to an account.')],
description='Email address')
password = PasswordField(validators=[
Required(), Length(min=6),
EqualTo('confirm', message='Passwords must match.')
], description='Password')
confirm = PasswordField(description='Confirm password')
| true
| true
|
f704e9eac8de03f01fa77d9579930caec71292b1
| 22
|
py
|
Python
|
luvio/externals/domain_api/api.py
|
nguyenhailong253/luvio-server
|
8e75bea4171fc2367cc6d7ebd5a19382932840d5
|
[
"MIT"
] | null | null | null |
luvio/externals/domain_api/api.py
|
nguyenhailong253/luvio-server
|
8e75bea4171fc2367cc6d7ebd5a19382932840d5
|
[
"MIT"
] | null | null | null |
luvio/externals/domain_api/api.py
|
nguyenhailong253/luvio-server
|
8e75bea4171fc2367cc6d7ebd5a19382932840d5
|
[
"MIT"
] | null | null | null |
# Call Domain api here
| 22
| 22
| 0.772727
| true
| true
|
|
f704ea950fc187b21bea1b7de6bc90411e2926da
| 845
|
py
|
Python
|
adm/makesedonac.py
|
AndreySV/sedona
|
9fe9e800ba3454b725d96355abee172591ceca1f
|
[
"AFL-3.0"
] | 26
|
2015-02-16T18:35:06.000Z
|
2021-12-22T03:10:32.000Z
|
adm/makesedonac.py
|
AndreySV/sedona
|
9fe9e800ba3454b725d96355abee172591ceca1f
|
[
"AFL-3.0"
] | 40
|
2015-09-29T11:19:16.000Z
|
2021-07-12T02:53:35.000Z
|
adm/makesedonac.py
|
AndreySV/sedona
|
9fe9e800ba3454b725d96355abee172591ceca1f
|
[
"AFL-3.0"
] | 34
|
2015-12-10T02:53:21.000Z
|
2022-01-13T16:28:30.000Z
|
#! /usr/bin/env python3
#
# makesedonac.py
#
# Compile sedonac.jar
#
# Author: Brian Frank
# Creation: 7 Dec 07
#
from __future__ import print_function
import os
import env
import compilejar
depends = [env.sedonaJar]
srcDir = os.path.join(env.src, "sedonac", "src")
jarFile = env.sedonacJar
packages = [
"sedonac",
"sedonac.analysis",
"sedonac.asm",
"sedonac.ast",
"sedonac.gen",
"sedonac.ir",
"sedonac.namespace",
"sedonac.parser",
"sedonac.platform",
"sedonac.scode",
"sedonac.steps",
"sedonac.test",
"sedonac.translate",
"sedonac.util",
]
# Make
def compile():
try:
compilejar.compile(srcDir, depends, packages, jarFile)
except env.BuildError:
print("**")
print("** FAILED [" + jarFile + "]")
print("**")
return 1
# Main
if __name__ == '__main__':
compile()
| 17.244898
| 58
| 0.631953
|
from __future__ import print_function
import os
import env
import compilejar
depends = [env.sedonaJar]
srcDir = os.path.join(env.src, "sedonac", "src")
jarFile = env.sedonacJar
packages = [
"sedonac",
"sedonac.analysis",
"sedonac.asm",
"sedonac.ast",
"sedonac.gen",
"sedonac.ir",
"sedonac.namespace",
"sedonac.parser",
"sedonac.platform",
"sedonac.scode",
"sedonac.steps",
"sedonac.test",
"sedonac.translate",
"sedonac.util",
]
def compile():
try:
compilejar.compile(srcDir, depends, packages, jarFile)
except env.BuildError:
print("**")
print("** FAILED [" + jarFile + "]")
print("**")
return 1
if __name__ == '__main__':
compile()
| true
| true
|
f704eb8f47624f4a65229a7680567e12eb42eb47
| 2,619
|
py
|
Python
|
test/test.py
|
RLSwanepoel/edsa_packages
|
6258dd20c2508c5bcc298b3de86cec9e6a1403d2
|
[
"MIT"
] | null | null | null |
test/test.py
|
RLSwanepoel/edsa_packages
|
6258dd20c2508c5bcc298b3de86cec9e6a1403d2
|
[
"MIT"
] | null | null | null |
test/test.py
|
RLSwanepoel/edsa_packages
|
6258dd20c2508c5bcc298b3de86cec9e6a1403d2
|
[
"MIT"
] | null | null | null |
from edsa_packages import recursion, sorting
#Recursion tests
def test_sum_array():
'''
Make sure sum_array works
'''
assert recursion.sum_array([8, 3, 2, 7, 4]) == 24, 'incorrect'
assert recursion.sum_array([5, 7, 8, 8, 6, 3, 4]) == 41, 'incorrect'
assert recursion.sum_array([25, 14, 2, 3, 5]) == 49, 'incorrect'
def test_fibonacci():
'''
Make sure fibonacci works
'''
assert recursion.fibonacci(8) == 22, 'incorrect'
assert recursion.fibonacci(10) == 55, 'incorrect'
assert recursion.fibonacci(5) == 5, 'incorrect'
def test_factorial():
'''
Make sure factorial works
'''
assert recursion.factorial(4) == 24, 'incorrect'
assert recursion.factorial(8) == 40320, 'incorrect'
assert recursion.factorial(3) == 6, 'incorrect'
def test_reverse():
'''
Make sure reverse works
'''
assert recursion.reverse('apple') == 'elppa', 'incorrect'
assert recursion.reverse('test') == 'tset', 'incorrect'
assert recursion.reverse('peanut') == 'tunaep', 'incorrect'
#Sorting tests
def test_bubble_sort():
'''
Make sure bubble_sort works
'''
assert sorting.bubble_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.bubble_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.bubble_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
def test_merge_sort():
'''
Make sure merge_sort works
'''
assert sorting.merge_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.merge_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.merge_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
def test_quick_sort():
'''
Make sure quick_sort works
'''
assert sorting.quick_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.quick_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.quick_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
| 42.241935
| 175
| 0.613975
|
from edsa_packages import recursion, sorting
def test_sum_array():
assert recursion.sum_array([8, 3, 2, 7, 4]) == 24, 'incorrect'
assert recursion.sum_array([5, 7, 8, 8, 6, 3, 4]) == 41, 'incorrect'
assert recursion.sum_array([25, 14, 2, 3, 5]) == 49, 'incorrect'
def test_fibonacci():
assert recursion.fibonacci(8) == 22, 'incorrect'
assert recursion.fibonacci(10) == 55, 'incorrect'
assert recursion.fibonacci(5) == 5, 'incorrect'
def test_factorial():
assert recursion.factorial(4) == 24, 'incorrect'
assert recursion.factorial(8) == 40320, 'incorrect'
assert recursion.factorial(3) == 6, 'incorrect'
def test_reverse():
assert recursion.reverse('apple') == 'elppa', 'incorrect'
assert recursion.reverse('test') == 'tset', 'incorrect'
assert recursion.reverse('peanut') == 'tunaep', 'incorrect'
def test_bubble_sort():
assert sorting.bubble_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.bubble_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.bubble_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
def test_merge_sort():
assert sorting.merge_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.merge_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.merge_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
def test_quick_sort():
assert sorting.quick_sort(['apple', 'pear', 'orange', 'pineapple', 'strawberry', 'lemon']) == ['apple', 'lemon', 'orange', 'pear', 'pineapple', 'strawberry'], 'incorrect'
assert sorting.quick_sort(['horse', 'cat', 'aardvark', 'dog', 'fish', 'bird']) == ['aardvark', 'bird', 'cat', 'dog', 'fish', 'horse'], 'incorrect'
assert sorting.quick_sort(['Ford', 'Mitsubishi', 'BMW', 'VW']) == ['BMW', 'Ford', 'Mitsubishi', 'VW'], 'incorrect'
| true
| true
|
f704ec3011295133131ca2780e725d89200d860c
| 6,746
|
py
|
Python
|
bin/3rdparty/awscli/customizations/ec2/bundleinstance.py
|
gayatrisingh31/grand_central
|
eae635d865549b8002a42d051d9af69e8688e129
|
[
"MIT"
] | 36
|
2019-11-06T20:49:07.000Z
|
2021-07-07T02:26:52.000Z
|
bin/3rdparty/awscli/customizations/ec2/bundleinstance.py
|
gayatrisingh31/grand_central
|
eae635d865549b8002a42d051d9af69e8688e129
|
[
"MIT"
] | 21
|
2019-11-10T05:38:06.000Z
|
2022-03-10T15:07:48.000Z
|
bin/3rdparty/awscli/customizations/ec2/bundleinstance.py
|
gayatrisingh31/grand_central
|
eae635d865549b8002a42d051d9af69e8688e129
|
[
"MIT"
] | 7
|
2020-02-13T22:56:46.000Z
|
2022-01-22T05:57:34.000Z
|
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from hashlib import sha1
import hmac
import base64
import datetime
from awscli.compat import six
from awscli.arguments import CustomArgument
logger = logging.getLogger('ec2bundleinstance')
# This customization adds the following scalar parameters to the
# bundle-instance operation:
# --bucket:
BUCKET_DOCS = ('The bucket in which to store the AMI. '
'You can specify a bucket that you already own or '
'a new bucket that Amazon EC2 creates on your behalf. '
'If you specify a bucket that belongs to someone else, '
'Amazon EC2 returns an error.')
# --prefix:
PREFIX_DOCS = ('The prefix for the image component names being stored '
'in Amazon S3.')
# --owner-akid
OWNER_AKID_DOCS = 'The access key ID of the owner of the Amazon S3 bucket.'
# --policy
POLICY_DOCS = (
"An Amazon S3 upload policy that gives "
"Amazon EC2 permission to upload items into Amazon S3 "
"on the user's behalf. If you provide this parameter, "
"you must also provide "
"your secret access key, so we can create a policy "
"signature for you (the secret access key is not passed "
"to Amazon EC2). If you do not provide this parameter, "
"we generate an upload policy for you automatically. "
"For more information about upload policies see the "
"sections about policy construction and signatures in the "
'<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev'
'/HTTPPOSTForms.html">'
'Amazon Simple Storage Service Developer Guide</a>.')
# --owner-sak
OWNER_SAK_DOCS = ('The AWS secret access key for the owner of the '
'Amazon S3 bucket specified in the --bucket '
'parameter. This parameter is required so that a '
'signature can be computed for the policy.')
def _add_params(argument_table, **kwargs):
# Add the scalar parameters and also change the complex storage
# param to not be required so the user doesn't get an error from
# argparse if they only supply scalar params.
storage_arg = argument_table['storage']
storage_arg.required = False
arg = BundleArgument(storage_param='Bucket',
name='bucket',
help_text=BUCKET_DOCS)
argument_table['bucket'] = arg
arg = BundleArgument(storage_param='Prefix',
name='prefix',
help_text=PREFIX_DOCS)
argument_table['prefix'] = arg
arg = BundleArgument(storage_param='AWSAccessKeyId',
name='owner-akid',
help_text=OWNER_AKID_DOCS)
argument_table['owner-akid'] = arg
arg = BundleArgument(storage_param='_SAK',
name='owner-sak',
help_text=OWNER_SAK_DOCS)
argument_table['owner-sak'] = arg
arg = BundleArgument(storage_param='UploadPolicy',
name='policy',
help_text=POLICY_DOCS)
argument_table['policy'] = arg
def _check_args(parsed_args, **kwargs):
# This function checks the parsed args. If the user specified
# the --ip-permissions option with any of the scalar options we
# raise an error.
logger.debug(parsed_args)
arg_dict = vars(parsed_args)
if arg_dict['storage']:
for key in ('bucket', 'prefix', 'owner_akid',
'owner_sak', 'policy'):
if arg_dict[key]:
msg = ('Mixing the --storage option '
'with the simple, scalar options is '
'not recommended.')
raise ValueError(msg)
POLICY = ('{{"expiration": "{expires}",'
'"conditions": ['
'{{"bucket": "{bucket}"}},'
'{{"acl": "ec2-bundle-read"}},'
'["starts-with", "$key", "{prefix}"]'
']}}'
)
def _generate_policy(params):
# Called if there is no policy supplied by the user.
# Creates a policy that provides access for 24 hours.
delta = datetime.timedelta(hours=24)
expires = datetime.datetime.utcnow() + delta
expires_iso = expires.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
policy = POLICY.format(expires=expires_iso,
bucket=params['Bucket'],
prefix=params['Prefix'])
params['UploadPolicy'] = policy
def _generate_signature(params):
# If we have a policy and a sak, create the signature.
policy = params.get('UploadPolicy')
sak = params.get('_SAK')
if policy and sak:
policy = base64.b64encode(six.b(policy)).decode('utf-8')
new_hmac = hmac.new(sak.encode('utf-8'), digestmod=sha1)
new_hmac.update(six.b(policy))
ps = base64.encodestring(new_hmac.digest()).strip().decode('utf-8')
params['UploadPolicySignature'] = ps
del params['_SAK']
def _check_params(params, **kwargs):
# Called just before call but prior to building the params.
# Adds information not supplied by the user.
storage = params['Storage']['S3']
if 'UploadPolicy' not in storage:
_generate_policy(storage)
if 'UploadPolicySignature' not in storage:
_generate_signature(storage)
EVENTS = [
('building-argument-table.ec2.bundle-instance', _add_params),
('operation-args-parsed.ec2.bundle-instance', _check_args),
('before-parameter-build.ec2.BundleInstance', _check_params),
]
def register_bundleinstance(event_handler):
# Register all of the events for customizing BundleInstance
for event, handler in EVENTS:
event_handler.register(event, handler)
class BundleArgument(CustomArgument):
def __init__(self, storage_param, *args, **kwargs):
super(BundleArgument, self).__init__(*args, **kwargs)
self._storage_param = storage_param
def _build_storage(self, params, value):
# Build up the Storage data structure
if 'Storage' not in params:
params['Storage'] = {'S3': {}}
params['Storage']['S3'][self._storage_param] = value
def add_to_params(self, parameters, value):
if value:
self._build_storage(parameters, value)
| 37.270718
| 75
| 0.641121
|
import logging
from hashlib import sha1
import hmac
import base64
import datetime
from awscli.compat import six
from awscli.arguments import CustomArgument
logger = logging.getLogger('ec2bundleinstance')
BUCKET_DOCS = ('The bucket in which to store the AMI. '
'You can specify a bucket that you already own or '
'a new bucket that Amazon EC2 creates on your behalf. '
'If you specify a bucket that belongs to someone else, '
'Amazon EC2 returns an error.')
PREFIX_DOCS = ('The prefix for the image component names being stored '
'in Amazon S3.')
OWNER_AKID_DOCS = 'The access key ID of the owner of the Amazon S3 bucket.'
POLICY_DOCS = (
"An Amazon S3 upload policy that gives "
"Amazon EC2 permission to upload items into Amazon S3 "
"on the user's behalf. If you provide this parameter, "
"you must also provide "
"your secret access key, so we can create a policy "
"signature for you (the secret access key is not passed "
"to Amazon EC2). If you do not provide this parameter, "
"we generate an upload policy for you automatically. "
"For more information about upload policies see the "
"sections about policy construction and signatures in the "
'<a href="http://docs.aws.amazon.com/AmazonS3/latest/dev'
'/HTTPPOSTForms.html">'
'Amazon Simple Storage Service Developer Guide</a>.')
# --owner-sak
OWNER_SAK_DOCS = ('The AWS secret access key for the owner of the '
'Amazon S3 bucket specified in the --bucket '
'parameter. This parameter is required so that a '
'signature can be computed for the policy.')
def _add_params(argument_table, **kwargs):
# Add the scalar parameters and also change the complex storage
# param to not be required so the user doesn't get an error from
storage_arg = argument_table['storage']
storage_arg.required = False
arg = BundleArgument(storage_param='Bucket',
name='bucket',
help_text=BUCKET_DOCS)
argument_table['bucket'] = arg
arg = BundleArgument(storage_param='Prefix',
name='prefix',
help_text=PREFIX_DOCS)
argument_table['prefix'] = arg
arg = BundleArgument(storage_param='AWSAccessKeyId',
name='owner-akid',
help_text=OWNER_AKID_DOCS)
argument_table['owner-akid'] = arg
arg = BundleArgument(storage_param='_SAK',
name='owner-sak',
help_text=OWNER_SAK_DOCS)
argument_table['owner-sak'] = arg
arg = BundleArgument(storage_param='UploadPolicy',
name='policy',
help_text=POLICY_DOCS)
argument_table['policy'] = arg
def _check_args(parsed_args, **kwargs):
logger.debug(parsed_args)
arg_dict = vars(parsed_args)
if arg_dict['storage']:
for key in ('bucket', 'prefix', 'owner_akid',
'owner_sak', 'policy'):
if arg_dict[key]:
msg = ('Mixing the --storage option '
'with the simple, scalar options is '
'not recommended.')
raise ValueError(msg)
POLICY = ('{{"expiration": "{expires}",'
'"conditions": ['
'{{"bucket": "{bucket}"}},'
'{{"acl": "ec2-bundle-read"}},'
'["starts-with", "$key", "{prefix}"]'
']}}'
)
def _generate_policy(params):
delta = datetime.timedelta(hours=24)
expires = datetime.datetime.utcnow() + delta
expires_iso = expires.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
policy = POLICY.format(expires=expires_iso,
bucket=params['Bucket'],
prefix=params['Prefix'])
params['UploadPolicy'] = policy
def _generate_signature(params):
policy = params.get('UploadPolicy')
sak = params.get('_SAK')
if policy and sak:
policy = base64.b64encode(six.b(policy)).decode('utf-8')
new_hmac = hmac.new(sak.encode('utf-8'), digestmod=sha1)
new_hmac.update(six.b(policy))
ps = base64.encodestring(new_hmac.digest()).strip().decode('utf-8')
params['UploadPolicySignature'] = ps
del params['_SAK']
def _check_params(params, **kwargs):
storage = params['Storage']['S3']
if 'UploadPolicy' not in storage:
_generate_policy(storage)
if 'UploadPolicySignature' not in storage:
_generate_signature(storage)
EVENTS = [
('building-argument-table.ec2.bundle-instance', _add_params),
('operation-args-parsed.ec2.bundle-instance', _check_args),
('before-parameter-build.ec2.BundleInstance', _check_params),
]
def register_bundleinstance(event_handler):
for event, handler in EVENTS:
event_handler.register(event, handler)
class BundleArgument(CustomArgument):
def __init__(self, storage_param, *args, **kwargs):
super(BundleArgument, self).__init__(*args, **kwargs)
self._storage_param = storage_param
def _build_storage(self, params, value):
if 'Storage' not in params:
params['Storage'] = {'S3': {}}
params['Storage']['S3'][self._storage_param] = value
def add_to_params(self, parameters, value):
if value:
self._build_storage(parameters, value)
| true
| true
|
f704ecdfca4d6662c111ca95bb581c2a4f67afb6
| 891
|
py
|
Python
|
loadtests/locustfile.py
|
javieraviles/quarkus-github-flow
|
38ecb9cc626b21e621e20cd77a9638780245047c
|
[
"MIT"
] | 8
|
2020-09-04T02:16:18.000Z
|
2022-01-23T18:40:21.000Z
|
loadtests/locustfile.py
|
javieraviles/quarkus-github-flow
|
38ecb9cc626b21e621e20cd77a9638780245047c
|
[
"MIT"
] | null | null | null |
loadtests/locustfile.py
|
javieraviles/quarkus-github-flow
|
38ecb9cc626b21e621e20cd77a9638780245047c
|
[
"MIT"
] | 3
|
2020-12-14T19:32:10.000Z
|
2022-01-17T15:06:20.000Z
|
import datetime
from http import HTTPStatus
from locust import HttpUser, task, between
# This test can be run after installing locust through the cli as "locust --host=http://<deployed_host>:<port>"
# Then url http://localhost:8089/ should be access to start the test.
# Can also be run using no UI mode as "locust --no-web -c <number_of_clients> -r <clients_per_second> --run-time <time e.g. 1h30m> --host=http://<deployed_host>:<port>"
class QuickstartUser(HttpUser):
wait_time = between(1, 2)
@task(1)
def get_developers(self):
r = self.client.get("/developers")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
@task(1)
def get_developers_search(self):
r = self.client.get("/developers/search/james")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
| 40.5
| 168
| 0.699214
|
import datetime
from http import HTTPStatus
from locust import HttpUser, task, between
class QuickstartUser(HttpUser):
wait_time = between(1, 2)
@task(1)
def get_developers(self):
r = self.client.get("/developers")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
@task(1)
def get_developers_search(self):
r = self.client.get("/developers/search/james")
assert r.status_code == HTTPStatus.OK, "Unexpected response code: " + str(r.status_code)
| true
| true
|
f704ed56e109cde4331623e75aa489ba21777f29
| 2,861
|
py
|
Python
|
ipynb_py_convert/__main__.py
|
max-yue/ipynb-py-convert
|
77dc636240560892aedbc8a36532784dee408cfa
|
[
"MIT"
] | null | null | null |
ipynb_py_convert/__main__.py
|
max-yue/ipynb-py-convert
|
77dc636240560892aedbc8a36532784dee408cfa
|
[
"MIT"
] | null | null | null |
ipynb_py_convert/__main__.py
|
max-yue/ipynb-py-convert
|
77dc636240560892aedbc8a36532784dee408cfa
|
[
"MIT"
] | null | null | null |
import json
import sys
from os import path
header_comment = '# %%\n'
def nb2py(notebook):
result = []
cells = notebook['cells']
for cell in cells:
cell_type = cell['cell_type']
if cell_type == 'markdown':
result.append('%s"""\n%s\n"""'%
(header_comment, ''.join(cell['source'])))
if cell_type == 'code':
result.append("%s%s" % (header_comment, ''.join(cell['source'])))
return '\n\n'.join(result)
def py2nb(py_str):
# remove leading header comment
if py_str.startswith(header_comment):
py_str = py_str[len(header_comment):]
cells = []
chunks = py_str.split('\n\n%s' % header_comment)
for chunk in chunks:
cell_type = 'code'
if chunk.startswith("'''"):
chunk = chunk.strip("'\n")
cell_type = 'markdown'
elif chunk.startswith('"""'):
chunk = chunk.strip('"\n')
cell_type = 'markdown'
cell = {
'cell_type': cell_type,
'metadata': {},
'source': chunk.splitlines(True),
}
if cell_type == 'code':
cell.update({'outputs': [], 'execution_count': None})
cells.append(cell)
notebook = {
'cells': cells,
'metadata': {
'anaconda-cloud': {},
'kernelspec': {
'display_name': 'Python 3',
'language': 'python',
'name': 'python3'},
'language_info': {
'codemirror_mode': {'name': 'ipython', 'version': 3},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.6.1'}},
'nbformat': 4,
'nbformat_minor': 1
}
return notebook
def convert(in_file, out_file):
_, in_ext = path.splitext(in_file)
_, out_ext = path.splitext(out_file)
if in_ext == '.ipynb' and out_ext == '.py':
with open(in_file, 'r') as f:
notebook = json.load(f)
py_str = nb2py(notebook)
with open(out_file, 'w') as f:
f.write(py_str)
elif in_ext == '.py' and out_ext == '.ipynb':
with open(in_file, 'r') as f:
py_str = f.read()
notebook = py2nb(py_str)
with open(out_file, 'w') as f:
json.dump(notebook, f, indent=2)
else:
raise(Exception('Extensions must be .ipynb and .py or vice versa'))
def main():
argv = sys.argv
if len(argv) < 3:
print('Usage: ipynb-py-convert in.ipynb out.py')
print('or: ipynb-py-convert in.py out.ipynb')
sys.exit(1)
convert(in_file=argv[1], out_file=argv[2])
if __name__ == '__main__':
main()
| 26.009091
| 77
| 0.510311
|
import json
import sys
from os import path
header_comment = '# %%\n'
def nb2py(notebook):
result = []
cells = notebook['cells']
for cell in cells:
cell_type = cell['cell_type']
if cell_type == 'markdown':
result.append('%s"""\n%s\n"""'%
(header_comment, ''.join(cell['source'])))
if cell_type == 'code':
result.append("%s%s" % (header_comment, ''.join(cell['source'])))
return '\n\n'.join(result)
def py2nb(py_str):
if py_str.startswith(header_comment):
py_str = py_str[len(header_comment):]
cells = []
chunks = py_str.split('\n\n%s' % header_comment)
for chunk in chunks:
cell_type = 'code'
if chunk.startswith("'''"):
chunk = chunk.strip("'\n")
cell_type = 'markdown'
elif chunk.startswith('"""'):
chunk = chunk.strip('"\n')
cell_type = 'markdown'
cell = {
'cell_type': cell_type,
'metadata': {},
'source': chunk.splitlines(True),
}
if cell_type == 'code':
cell.update({'outputs': [], 'execution_count': None})
cells.append(cell)
notebook = {
'cells': cells,
'metadata': {
'anaconda-cloud': {},
'kernelspec': {
'display_name': 'Python 3',
'language': 'python',
'name': 'python3'},
'language_info': {
'codemirror_mode': {'name': 'ipython', 'version': 3},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.6.1'}},
'nbformat': 4,
'nbformat_minor': 1
}
return notebook
def convert(in_file, out_file):
_, in_ext = path.splitext(in_file)
_, out_ext = path.splitext(out_file)
if in_ext == '.ipynb' and out_ext == '.py':
with open(in_file, 'r') as f:
notebook = json.load(f)
py_str = nb2py(notebook)
with open(out_file, 'w') as f:
f.write(py_str)
elif in_ext == '.py' and out_ext == '.ipynb':
with open(in_file, 'r') as f:
py_str = f.read()
notebook = py2nb(py_str)
with open(out_file, 'w') as f:
json.dump(notebook, f, indent=2)
else:
raise(Exception('Extensions must be .ipynb and .py or vice versa'))
def main():
argv = sys.argv
if len(argv) < 3:
print('Usage: ipynb-py-convert in.ipynb out.py')
print('or: ipynb-py-convert in.py out.ipynb')
sys.exit(1)
convert(in_file=argv[1], out_file=argv[2])
if __name__ == '__main__':
main()
| true
| true
|
f704ee4619176809e6126c4c9fe27a817e363dc3
| 580
|
py
|
Python
|
day_02/day_02.py
|
aclima93/AdventOfCode
|
73cc2c194b5ffc27e4d275a3693c148d690bca1f
|
[
"WTFPL"
] | null | null | null |
day_02/day_02.py
|
aclima93/AdventOfCode
|
73cc2c194b5ffc27e4d275a3693c148d690bca1f
|
[
"WTFPL"
] | null | null | null |
day_02/day_02.py
|
aclima93/AdventOfCode
|
73cc2c194b5ffc27e4d275a3693c148d690bca1f
|
[
"WTFPL"
] | null | null | null |
import sys
input_file = open(sys.argv[1])
input_lines = input_file.readlines()
total_wrapping = 0
total_ribbon = 0
for line in input_lines:
l, w, h = line.split("x")
l = int(l)
w = int(w)
h = int(h)
dimensions = [l, w, h]
min_1 = min(dimensions)
dimensions.remove(min_1)
min_2 = min(dimensions)
total_wrapping += (2 * l * w) + (2 * w * h) + (2 * h * l) + (min_1 * min_2)
total_ribbon += ((min_1 * 2) + (min_2 * 2)) + (l * w * h)
# first half
print("total_wrapping", total_wrapping)
# second half
print("total_ribbon", total_ribbon)
| 20
| 79
| 0.6
|
import sys
input_file = open(sys.argv[1])
input_lines = input_file.readlines()
total_wrapping = 0
total_ribbon = 0
for line in input_lines:
l, w, h = line.split("x")
l = int(l)
w = int(w)
h = int(h)
dimensions = [l, w, h]
min_1 = min(dimensions)
dimensions.remove(min_1)
min_2 = min(dimensions)
total_wrapping += (2 * l * w) + (2 * w * h) + (2 * h * l) + (min_1 * min_2)
total_ribbon += ((min_1 * 2) + (min_2 * 2)) + (l * w * h)
print("total_wrapping", total_wrapping)
print("total_ribbon", total_ribbon)
| true
| true
|
f704ee9eba83abf2786c8b03afb6216b6afd600a
| 8,786
|
py
|
Python
|
responder/models.py
|
repodevs/responder
|
4d15dbc4654038130f43bff48f51627dfd4b5df7
|
[
"Apache-2.0"
] | null | null | null |
responder/models.py
|
repodevs/responder
|
4d15dbc4654038130f43bff48f51627dfd4b5df7
|
[
"Apache-2.0"
] | null | null | null |
responder/models.py
|
repodevs/responder
|
4d15dbc4654038130f43bff48f51627dfd4b5df7
|
[
"Apache-2.0"
] | null | null | null |
import io
import json
import gzip
from base64 import b64decode
from http.cookies import SimpleCookie
import chardet
import rfc3986
import graphene
import yaml
from requests.structures import CaseInsensitiveDict
from requests.cookies import RequestsCookieJar
from starlette.datastructures import MutableHeaders
from starlette.requests import Request as StarletteRequest
from starlette.responses import Response as StarletteResponse
from urllib.parse import parse_qs
from .status_codes import HTTP_200
from .statics import DEFAULT_ENCODING
class QueryDict(dict):
def __init__(self, query_string):
self.update(parse_qs(query_string))
def __getitem__(self, key):
"""
Return the last data value for this key, or [] if it's an empty list;
raise KeyError if not found.
"""
list_ = super().__getitem__(key)
try:
return list_[-1]
except IndexError:
return []
def get(self, key, default=None):
"""
Return the last data value for the passed key. If key doesn't exist
or value is an empty list, return `default`.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _get_list(self, key, default=None, force_list=False):
"""
Return a list of values for the key.
Used internally to manipulate values list. If force_list is True,
return a new copy of values.
"""
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def get_list(self, key, default=None):
"""
Return the list of values for the key. If key doesn't exist, return a
default value.
"""
return self._get_list(key, default, force_list=True)
def items(self):
"""
Yield (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self:
yield key, self[key]
def items_list(self):
"""
Yield (key, value) pairs, where value is the the list.
"""
yield from super().items()
# TODO: add slots
class Request:
__slots__ = ["_starlette", "formats", "_headers", "_encoding", "api", "_content"]
def __init__(self, scope, receive, api=None):
self._starlette = StarletteRequest(scope, receive)
self.formats = None
self._encoding = None
self.api = api
self._content = None
headers = CaseInsensitiveDict()
for key, value in self._starlette.headers.items():
headers[key] = value
self._headers = headers
@property
def session(self):
"""The session data, in dict form, from the Request."""
if "Responder-Session" in self.cookies:
data = self.cookies[self.api.session_cookie]
data = self.api._signer.unsign(data)
data = b64decode(data)
return json.loads(data)
return {}
@property
def headers(self):
"""A case-insensitive dictionary, containing all headers sent in the Request."""
return self._headers
@property
def mimetype(self):
return self.headers.get("Content-Type", "")
@property
def method(self):
"""The incoming HTTP method used for the request, lower-cased."""
return self._starlette.method.lower()
@property
def full_url(self):
"""The full URL of the Request, query parameters and all."""
return str(self._starlette.url)
@property
def url(self):
"""The parsed URL of the Request."""
return rfc3986.urlparse(self.full_url)
@property
def cookies(self):
"""The cookies sent in the Request, as a dictionary."""
cookies = RequestsCookieJar()
cookie_header = self.headers.get("Cookie", "")
bc = SimpleCookie(cookie_header)
for k, v in bc.items():
cookies[k] = v
return cookies.get_dict()
@property
def params(self):
"""A dictionary of the parsed query parameters used for the Request."""
try:
return QueryDict(self.url.query)
except AttributeError:
return QueryDict({})
@property
async def encoding(self):
"""The encoding of the Request's body. Can be set, manually. Must be awaited."""
# Use the user-set encoding first.
if self._encoding:
return self._encoding
# Then try what's defined by the Request.
elif await self.declared_encoding:
return self.declared_encoding
# Then, automatically detect the encoding.
else:
return await self.apparent_encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
@property
async def content(self):
"""The Request body, as bytes. Must be awaited."""
if not self._content:
self._content = await self._starlette.body()
return self._content
@property
async def text(self):
"""The Request body, as unicode. Must be awaited."""
return (await self.content).decode(await self.encoding)
@property
async def declared_encoding(self):
if "Encoding" in self.headers:
return self.headers["Encoding"]
@property
async def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library. Must be awaited."""
declared_encoding = await self.declared_encoding
if declared_encoding:
return declared_encoding
else:
return chardet.detect(await self.content)["encoding"]
@property
def is_secure(self):
return self.url.scheme == "https"
def accepts(self, content_type):
"""Returns ``True`` if the incoming Request accepts the given ``content_type``."""
return content_type in self.headers.get("Accept", [])
async def media(self, format=None):
"""Renders incoming json/yaml/form data as Python objects. Must be awaited.
:param format: The name of the format being used. Alternatively accepts a custom callable for the format type.
"""
if format is None:
format = "yaml" if "yaml" in self.mimetype or "" else "json"
format = "form" if "form" in self.mimetype or "" else format
if format in self.formats:
return await self.formats[format](self)
else:
return await format(self)
class Response:
__slots__ = [
"req",
"status_code",
"text",
"content",
"encoding",
"media",
"headers",
"formats",
"cookies",
"session",
]
def __init__(self, req, *, formats):
self.req = req
self.status_code = None #: The HTTP Status Code to use for the Response.
self.text = None #: A unicode representation of the response body.
self.content = None #: A bytes representation of the response body.
self.encoding = DEFAULT_ENCODING
self.media = (
None
) #: A Python object that will be content-negotiated and sent back to the client. Typically, in JSON formatting.
self.headers = (
{}
) #: A Python dictionary of ``{key: value}``, representing the headers of the response.
self.formats = formats
self.cookies = {} #: The cookies set in the Response, as a dictionary
self.session = (
req.session.copy()
) #: The cookie-based session data, in dict form, to add to the Response.
@property
async def body(self):
if self.content:
return (self.content, {})
if self.text:
return (self.text.encode(self.encoding), {"Encoding": self.encoding})
for format in self.formats:
if self.req.accepts(format):
return (await self.formats[format](self, encode=True)), {}
# Default to JSON anyway.
return (
await self.formats["json"](self, encode=True),
{"Content-Type": "application/json"},
)
async def __call__(self, receive, send):
body, headers = await self.body
if self.headers:
headers.update(self.headers)
response = StarletteResponse(
body, status_code=self.status_code, headers=headers
)
await response(receive, send)
| 29.884354
| 121
| 0.600842
|
import io
import json
import gzip
from base64 import b64decode
from http.cookies import SimpleCookie
import chardet
import rfc3986
import graphene
import yaml
from requests.structures import CaseInsensitiveDict
from requests.cookies import RequestsCookieJar
from starlette.datastructures import MutableHeaders
from starlette.requests import Request as StarletteRequest
from starlette.responses import Response as StarletteResponse
from urllib.parse import parse_qs
from .status_codes import HTTP_200
from .statics import DEFAULT_ENCODING
class QueryDict(dict):
def __init__(self, query_string):
self.update(parse_qs(query_string))
def __getitem__(self, key):
list_ = super().__getitem__(key)
try:
return list_[-1]
except IndexError:
return []
def get(self, key, default=None):
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def _get_list(self, key, default=None, force_list=False):
try:
values = super().__getitem__(key)
except KeyError:
if default is None:
return []
return default
else:
if force_list:
values = list(values) if values is not None else None
return values
def get_list(self, key, default=None):
return self._get_list(key, default, force_list=True)
def items(self):
for key in self:
yield key, self[key]
def items_list(self):
yield from super().items()
class Request:
__slots__ = ["_starlette", "formats", "_headers", "_encoding", "api", "_content"]
def __init__(self, scope, receive, api=None):
self._starlette = StarletteRequest(scope, receive)
self.formats = None
self._encoding = None
self.api = api
self._content = None
headers = CaseInsensitiveDict()
for key, value in self._starlette.headers.items():
headers[key] = value
self._headers = headers
@property
def session(self):
if "Responder-Session" in self.cookies:
data = self.cookies[self.api.session_cookie]
data = self.api._signer.unsign(data)
data = b64decode(data)
return json.loads(data)
return {}
@property
def headers(self):
return self._headers
@property
def mimetype(self):
return self.headers.get("Content-Type", "")
@property
def method(self):
return self._starlette.method.lower()
@property
def full_url(self):
return str(self._starlette.url)
@property
def url(self):
return rfc3986.urlparse(self.full_url)
@property
def cookies(self):
cookies = RequestsCookieJar()
cookie_header = self.headers.get("Cookie", "")
bc = SimpleCookie(cookie_header)
for k, v in bc.items():
cookies[k] = v
return cookies.get_dict()
@property
def params(self):
try:
return QueryDict(self.url.query)
except AttributeError:
return QueryDict({})
@property
async def encoding(self):
if self._encoding:
return self._encoding
elif await self.declared_encoding:
return self.declared_encoding
# Then, automatically detect the encoding.
else:
return await self.apparent_encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
@property
async def content(self):
if not self._content:
self._content = await self._starlette.body()
return self._content
@property
async def text(self):
return (await self.content).decode(await self.encoding)
@property
async def declared_encoding(self):
if "Encoding" in self.headers:
return self.headers["Encoding"]
@property
async def apparent_encoding(self):
declared_encoding = await self.declared_encoding
if declared_encoding:
return declared_encoding
else:
return chardet.detect(await self.content)["encoding"]
@property
def is_secure(self):
return self.url.scheme == "https"
def accepts(self, content_type):
return content_type in self.headers.get("Accept", [])
async def media(self, format=None):
if format is None:
format = "yaml" if "yaml" in self.mimetype or "" else "json"
format = "form" if "form" in self.mimetype or "" else format
if format in self.formats:
return await self.formats[format](self)
else:
return await format(self)
class Response:
__slots__ = [
"req",
"status_code",
"text",
"content",
"encoding",
"media",
"headers",
"formats",
"cookies",
"session",
]
def __init__(self, req, *, formats):
self.req = req
self.status_code = None #: The HTTP Status Code to use for the Response.
self.text = None #: A unicode representation of the response body.
self.content = None #: A bytes representation of the response body.
self.encoding = DEFAULT_ENCODING
self.media = (
None
) #: A Python object that will be content-negotiated and sent back to the client. Typically, in JSON formatting.
self.headers = (
{}
) #: A Python dictionary of ``{key: value}``, representing the headers of the response.
self.formats = formats
self.cookies = {} #: The cookies set in the Response, as a dictionary
self.session = (
req.session.copy()
) #: The cookie-based session data, in dict form, to add to the Response.
@property
async def body(self):
if self.content:
return (self.content, {})
if self.text:
return (self.text.encode(self.encoding), {"Encoding": self.encoding})
for format in self.formats:
if self.req.accepts(format):
return (await self.formats[format](self, encode=True)), {}
# Default to JSON anyway.
return (
await self.formats["json"](self, encode=True),
{"Content-Type": "application/json"},
)
async def __call__(self, receive, send):
body, headers = await self.body
if self.headers:
headers.update(self.headers)
response = StarletteResponse(
body, status_code=self.status_code, headers=headers
)
await response(receive, send)
| true
| true
|
f704ef2c8233458ae3f08016c0e7150a60a3e915
| 6,863
|
py
|
Python
|
aae.py
|
anonymous-iclr-2019/acai-iclr-2019
|
233058a8330e8162e199933ee22b8e5fcac22072
|
[
"Apache-2.0"
] | 20
|
2018-12-25T05:05:11.000Z
|
2021-06-21T02:27:53.000Z
|
aae.py
|
anonymous-iclr-2019/acai-iclr-2019
|
233058a8330e8162e199933ee22b8e5fcac22072
|
[
"Apache-2.0"
] | 1
|
2021-02-08T23:40:40.000Z
|
2021-02-08T23:40:40.000Z
|
aae.py
|
anonymous-iclr-2019/acai-iclr-2019
|
233058a8330e8162e199933ee22b8e5fcac22072
|
[
"Apache-2.0"
] | 5
|
2018-11-04T22:11:45.000Z
|
2019-09-11T12:57:15.000Z
|
# Copyright 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Adversarial autoencoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
import tensorflow as tf
from lib import data, layers, train, utils, classifiers, eval
FLAGS = flags.FLAGS
class AAE(train.AE):
def model(self, latent, depth, scales, adversary_lr, disc_layer_sizes):
x = tf.placeholder(tf.float32,
[None, self.height, self.width, self.colors], 'x')
l = tf.placeholder(tf.float32, [None, self.nclass], 'label')
h = tf.placeholder(
tf.float32,
[None, self.height >> scales, self.width >> scales, latent], 'h')
def encoder(x):
return layers.encoder(x, scales, depth, latent, 'ae_enc')
def decoder(h):
return layers.decoder(h, scales, depth, self.colors, 'ae_dec')
def discriminator(h):
with tf.variable_scope('disc', reuse=tf.AUTO_REUSE):
h = tf.layers.flatten(h)
for size in [int(s) for s in disc_layer_sizes.split(',')]:
h = tf.layers.dense(h, size, tf.nn.leaky_relu)
return tf.layers.dense(h, 1)
encode = encoder(x)
decode = decoder(h)
ae = decoder(encode)
loss_ae = tf.losses.mean_squared_error(x, ae)
prior_samples = tf.random_normal(tf.shape(encode), dtype=encode.dtype)
adversary_logit_latent = discriminator(encode)
adversary_logit_prior = discriminator(prior_samples)
adversary_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.zeros_like(adversary_logit_latent)))
adversary_loss_prior = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_prior,
labels=tf.ones_like(adversary_logit_prior)))
autoencoder_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.ones_like(adversary_logit_latent)))
def _accuracy(logits, label):
labels = tf.logical_and(label, tf.ones_like(logits, dtype=bool))
correct = tf.equal(tf.greater(logits, 0), labels)
return tf.reduce_mean(tf.to_float(correct))
latent_accuracy = _accuracy(adversary_logit_latent, False)
prior_accuracy = _accuracy(adversary_logit_prior, True)
adversary_accuracy = (latent_accuracy + prior_accuracy)/2
utils.HookReport.log_tensor(loss_ae, 'loss_ae')
utils.HookReport.log_tensor(adversary_loss_latents, 'loss_adv_latent')
utils.HookReport.log_tensor(adversary_loss_prior, 'loss_adv_prior')
utils.HookReport.log_tensor(autoencoder_loss_latents, 'loss_ae_latent')
utils.HookReport.log_tensor(adversary_accuracy, 'adversary_accuracy')
xops = classifiers.single_layer_classifier(
tf.stop_gradient(encode), l, self.nclass)
xloss = tf.reduce_mean(xops.loss)
utils.HookReport.log_tensor(xloss, 'classify_latent')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ae_vars = tf.global_variables('ae_')
disc_vars = tf.global_variables('disc')
xl_vars = tf.global_variables('single_layer_classifier')
with tf.control_dependencies(update_ops):
train_ae = tf.train.AdamOptimizer(FLAGS.lr).minimize(
loss_ae + autoencoder_loss_latents, var_list=ae_vars)
train_disc = tf.train.AdamOptimizer(adversary_lr).minimize(
adversary_loss_prior + adversary_loss_latents,
var_list=disc_vars)
train_xl = tf.train.AdamOptimizer(FLAGS.lr).minimize(
xloss, tf.train.get_global_step(), var_list=xl_vars)
ops = train.AEOps(x, h, l, encode, decode, ae,
tf.group(train_ae, train_disc, train_xl),
classify_latent=xops.output)
n_interpolations = 16
n_images_per_interpolation = 16
def gen_images():
return self.make_sample_grid_and_save(
ops, interpolation=n_interpolations,
height=n_images_per_interpolation)
recon, inter, slerp, samples = tf.py_func(
gen_images, [], [tf.float32]*4)
tf.summary.image('reconstruction', tf.expand_dims(recon, 0))
tf.summary.image('interpolation', tf.expand_dims(inter, 0))
tf.summary.image('slerp', tf.expand_dims(slerp, 0))
tf.summary.image('samples', tf.expand_dims(samples, 0))
if FLAGS.dataset == 'lines32':
batched = (n_interpolations, 32, n_images_per_interpolation, 32, 1)
batched_interp = tf.transpose(
tf.reshape(inter, batched), [0, 2, 1, 3, 4])
mean_distance, mean_smoothness = tf.py_func(
eval.line_eval, [batched_interp], [tf.float32, tf.float32])
tf.summary.scalar('mean_distance', mean_distance)
tf.summary.scalar('mean_smoothness', mean_smoothness)
return ops
def main(argv):
del argv # Unused.
batch = FLAGS.batch
dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))
model = AAE(
dataset,
FLAGS.train_dir,
latent=FLAGS.latent,
depth=FLAGS.depth,
scales=scales,
adversary_lr=FLAGS.adversary_lr,
disc_layer_sizes=FLAGS.disc_layer_sizes)
model.train()
if __name__ == '__main__':
flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')
flags.DEFINE_integer(
'latent', 16,
'Latent space depth, the total latent size is the depth multiplied by '
'latent_width ** 2.')
flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')
flags.DEFINE_float('adversary_lr', 1e-4,
'Learning rate for discriminator.')
flags.DEFINE_string('disc_layer_sizes', '100,100',
'Comma-separated list of discriminator layer sizes.')
app.run(main)
| 40.85119
| 79
| 0.652339
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
import tensorflow as tf
from lib import data, layers, train, utils, classifiers, eval
FLAGS = flags.FLAGS
class AAE(train.AE):
def model(self, latent, depth, scales, adversary_lr, disc_layer_sizes):
x = tf.placeholder(tf.float32,
[None, self.height, self.width, self.colors], 'x')
l = tf.placeholder(tf.float32, [None, self.nclass], 'label')
h = tf.placeholder(
tf.float32,
[None, self.height >> scales, self.width >> scales, latent], 'h')
def encoder(x):
return layers.encoder(x, scales, depth, latent, 'ae_enc')
def decoder(h):
return layers.decoder(h, scales, depth, self.colors, 'ae_dec')
def discriminator(h):
with tf.variable_scope('disc', reuse=tf.AUTO_REUSE):
h = tf.layers.flatten(h)
for size in [int(s) for s in disc_layer_sizes.split(',')]:
h = tf.layers.dense(h, size, tf.nn.leaky_relu)
return tf.layers.dense(h, 1)
encode = encoder(x)
decode = decoder(h)
ae = decoder(encode)
loss_ae = tf.losses.mean_squared_error(x, ae)
prior_samples = tf.random_normal(tf.shape(encode), dtype=encode.dtype)
adversary_logit_latent = discriminator(encode)
adversary_logit_prior = discriminator(prior_samples)
adversary_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.zeros_like(adversary_logit_latent)))
adversary_loss_prior = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_prior,
labels=tf.ones_like(adversary_logit_prior)))
autoencoder_loss_latents = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=adversary_logit_latent,
labels=tf.ones_like(adversary_logit_latent)))
def _accuracy(logits, label):
labels = tf.logical_and(label, tf.ones_like(logits, dtype=bool))
correct = tf.equal(tf.greater(logits, 0), labels)
return tf.reduce_mean(tf.to_float(correct))
latent_accuracy = _accuracy(adversary_logit_latent, False)
prior_accuracy = _accuracy(adversary_logit_prior, True)
adversary_accuracy = (latent_accuracy + prior_accuracy)/2
utils.HookReport.log_tensor(loss_ae, 'loss_ae')
utils.HookReport.log_tensor(adversary_loss_latents, 'loss_adv_latent')
utils.HookReport.log_tensor(adversary_loss_prior, 'loss_adv_prior')
utils.HookReport.log_tensor(autoencoder_loss_latents, 'loss_ae_latent')
utils.HookReport.log_tensor(adversary_accuracy, 'adversary_accuracy')
xops = classifiers.single_layer_classifier(
tf.stop_gradient(encode), l, self.nclass)
xloss = tf.reduce_mean(xops.loss)
utils.HookReport.log_tensor(xloss, 'classify_latent')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ae_vars = tf.global_variables('ae_')
disc_vars = tf.global_variables('disc')
xl_vars = tf.global_variables('single_layer_classifier')
with tf.control_dependencies(update_ops):
train_ae = tf.train.AdamOptimizer(FLAGS.lr).minimize(
loss_ae + autoencoder_loss_latents, var_list=ae_vars)
train_disc = tf.train.AdamOptimizer(adversary_lr).minimize(
adversary_loss_prior + adversary_loss_latents,
var_list=disc_vars)
train_xl = tf.train.AdamOptimizer(FLAGS.lr).minimize(
xloss, tf.train.get_global_step(), var_list=xl_vars)
ops = train.AEOps(x, h, l, encode, decode, ae,
tf.group(train_ae, train_disc, train_xl),
classify_latent=xops.output)
n_interpolations = 16
n_images_per_interpolation = 16
def gen_images():
return self.make_sample_grid_and_save(
ops, interpolation=n_interpolations,
height=n_images_per_interpolation)
recon, inter, slerp, samples = tf.py_func(
gen_images, [], [tf.float32]*4)
tf.summary.image('reconstruction', tf.expand_dims(recon, 0))
tf.summary.image('interpolation', tf.expand_dims(inter, 0))
tf.summary.image('slerp', tf.expand_dims(slerp, 0))
tf.summary.image('samples', tf.expand_dims(samples, 0))
if FLAGS.dataset == 'lines32':
batched = (n_interpolations, 32, n_images_per_interpolation, 32, 1)
batched_interp = tf.transpose(
tf.reshape(inter, batched), [0, 2, 1, 3, 4])
mean_distance, mean_smoothness = tf.py_func(
eval.line_eval, [batched_interp], [tf.float32, tf.float32])
tf.summary.scalar('mean_distance', mean_distance)
tf.summary.scalar('mean_smoothness', mean_smoothness)
return ops
def main(argv):
del argv
batch = FLAGS.batch
dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))
model = AAE(
dataset,
FLAGS.train_dir,
latent=FLAGS.latent,
depth=FLAGS.depth,
scales=scales,
adversary_lr=FLAGS.adversary_lr,
disc_layer_sizes=FLAGS.disc_layer_sizes)
model.train()
if __name__ == '__main__':
flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')
flags.DEFINE_integer(
'latent', 16,
'Latent space depth, the total latent size is the depth multiplied by '
'latent_width ** 2.')
flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')
flags.DEFINE_float('adversary_lr', 1e-4,
'Learning rate for discriminator.')
flags.DEFINE_string('disc_layer_sizes', '100,100',
'Comma-separated list of discriminator layer sizes.')
app.run(main)
| true
| true
|
f704ef3af73f982bb2a2835418342e4bbbf74ec9
| 17,036
|
py
|
Python
|
django/contrib/admin/utils.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/admin/utils.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
django/contrib/admin/utils.py
|
devops2014/djangosite
|
db77915c9fd35a203edd8206f702ee4082f04d4a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and the string literals 'false' and '0'
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
"""Returns a list which is a single level of flattening of the
original list."""
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogeneous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
"""
For historical reasons, the admin app relies on GenericForeignKeys as being
"not found" by get_field(). This could likely be cleaned up.
"""
field = opts.get_field(name)
if field.is_relation and field.one_to_many and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable,
property (but not created with @property decorator) or the name of an
object's attribute, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.opts.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
elif isinstance(field, models.FileField):
return mark_safe('<a href="%s">%s</a>' % (
conditional_escape(value.url),
conditional_escape(value),
))
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a ``limit_choices_to`` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| 33.535433
| 94
| 0.617751
|
from __future__ import unicode_literals
import datetime
import decimal
from collections import defaultdict
from django.contrib.auth import get_permission_codename
from django.core.exceptions import FieldDoesNotExist
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.forms.forms import pretty_name
from django.utils import formats, six, timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils.html import conditional_escape, format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ungettext
def lookup_needs_distinct(opts, lookup_path):
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field(field_name)
if hasattr(field, 'get_path_info') and any(path.m2m for path in field.get_path_info()):
return True
return False
def prepare_lookup_value(key, value):
if key.endswith('__in'):
value = value.split(',')
if key.endswith('__isnull'):
if value.lower() in ('', 'false', '0'):
value = False
else:
value = True
return value
def quote(s):
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"[]<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten(fields):
flat = []
for field in fields:
if isinstance(field, (list, tuple)):
flat.extend(field)
else:
flat.append(field)
return flat
def flatten_fieldsets(fieldsets):
field_names = []
for name, opts in fieldsets:
field_names.extend(
flatten(opts['fields'])
)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
no_edit_link = '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
if has_admin:
try:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.model_name),
None, (quote(obj._get_pk_val()),))
except NoReverseMatch:
# Change url doesn't exist -- don't display link to edit
return no_edit_link
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{}: <a href="{}">{}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return no_edit_link
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, collector.model_count, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
self.model_count = defaultdict(int)
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source=None, source_attr=None, **kwargs):
for obj in objs:
if source_attr and not source_attr.endswith('+'):
related_name = source_attr % {
'class': source._meta.model_name,
'app_label': source._meta.app_label,
}
self.add_edge(getattr(obj, related_name), obj)
else:
self.add_edge(None, obj)
self.model_count[obj._meta.verbose_name_plural] += 1
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
return False
def model_format_dict(obj):
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = _get_non_gfk_field(opts, name)
except FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and
hasattr(model_admin, name) and
not name == '__str__' and
not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def _get_non_gfk_field(opts, name):
field = opts.get_field(name)
if field.is_relation and field.one_to_many and not field.related_model:
raise FieldDoesNotExist()
return field
def label_for_field(name, model, model_admin=None, return_attr=False):
attr = None
try:
field = _get_non_gfk_field(model._meta, name)
try:
label = field.verbose_name
except AttributeError:
# field is likely a ForeignObjectRel
label = field.opts.verbose_name
except FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif (isinstance(attr, property) and
hasattr(attr, "fget") and
hasattr(attr.fget, "short_description")):
label = attr.fget.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
help_text = ""
try:
field = _get_non_gfk_field(model._meta, name)
except FieldDoesNotExist:
pass
else:
if hasattr(field, 'help_text'):
help_text = field.help_text
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
elif isinstance(field, models.FileField):
return mark_safe('<a href="%s">%s</a>' % (
conditional_escape(value.url),
conditional_escape(value),
))
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if hasattr(field, 'get_path_info'):
return field.get_path_info()[-1].to_opts.model
else:
raise NotRelationField
def reverse_field_path(model, path):
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field = parent._meta.get_field(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces) - 1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
# Field should point to another model
if field.is_relation and not (field.auto_created and not field.concrete):
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.related_model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field(piece))
return fields
def remove_trailing_data_field(fields):
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
get_limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'get_limit_choices_to', None))
if not get_limit_choices_to:
return models.Q() # empty Q
limit_choices_to = get_limit_choices_to()
if isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q
| true
| true
|
f704effc26992be20ba41471a7122694d4e7fcf5
| 41,290
|
py
|
Python
|
numba/core/dispatcher.py
|
blair1306/numba
|
3b9647d17d653abac15363da604eeb804dbdd15a
|
[
"BSD-2-Clause"
] | 2
|
2018-04-09T18:50:16.000Z
|
2019-06-11T15:19:51.000Z
|
numba/core/dispatcher.py
|
blair1306/numba
|
3b9647d17d653abac15363da604eeb804dbdd15a
|
[
"BSD-2-Clause"
] | 2
|
2015-04-15T20:25:48.000Z
|
2021-03-03T12:32:59.000Z
|
numba/core/dispatcher.py
|
blair1306/numba
|
3b9647d17d653abac15363da604eeb804dbdd15a
|
[
"BSD-2-Clause"
] | 1
|
2021-05-12T07:29:28.000Z
|
2021-05-12T07:29:28.000Z
|
# -*- coding: utf-8 -*-
import collections
import functools
import os
import struct
import sys
import types as pytypes
import uuid
import weakref
from copy import deepcopy
from numba import _dispatcher
from numba.core import utils, types, errors, typing, serialize, config, compiler, sigutils
from numba.core.compiler_lock import global_compiler_lock
from numba.core.typeconv.rules import default_type_manager
from numba.core.typing.templates import fold_arguments
from numba.core.typing.typeof import Purpose, typeof
from numba.core.bytecode import get_code_object
from numba.core.caching import NullCache, FunctionCache
from numba.core import entrypoints
class OmittedArg(object):
"""
A placeholder for omitted arguments with a default value.
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return "omitted arg(%r)" % (self.value,)
@property
def _numba_type_(self):
return types.Omitted(self.value)
class _FunctionCompiler(object):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
self.py_func = py_func
self.targetdescr = targetdescr
self.targetoptions = targetoptions
self.locals = locals
self.pysig = utils.pysignature(self.py_func)
self.pipeline_class = pipeline_class
# Remember key=(args, return_type) combinations that will fail
# compilation to avoid compilation attempt on them. The values are
# the exceptions.
self._failed_cache = {}
def fold_argument_types(self, args, kws):
"""
Given positional and named argument types, fold keyword arguments
and resolve defaults by inserting types.Omitted() instances.
A (pysig, argument types) tuple is returned.
"""
def normal_handler(index, param, value):
return value
def default_handler(index, param, default):
return types.Omitted(default)
def stararg_handler(index, param, values):
return types.StarArgTuple(values)
# For now, we take argument values from the @jit function, even
# in the case of generated jit.
args = fold_arguments(self.pysig, args, kws,
normal_handler,
default_handler,
stararg_handler)
return self.pysig, args
def compile(self, args, return_type):
status, retval = self._compile_cached(args, return_type)
if status:
return retval
else:
raise retval
def _compile_cached(self, args, return_type):
key = tuple(args), return_type
try:
return False, self._failed_cache[key]
except KeyError:
pass
try:
retval = self._compile_core(args, return_type)
except errors.TypingError as e:
self._failed_cache[key] = e
return False, e
else:
return True, retval
def _compile_core(self, args, return_type):
flags = compiler.Flags()
self.targetdescr.options.parse_as_flags(flags, self.targetoptions)
flags = self._customize_flags(flags)
impl = self._get_implementation(args, {})
cres = compiler.compile_extra(self.targetdescr.typing_context,
self.targetdescr.target_context,
impl,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
pipeline_class=self.pipeline_class)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
return cres
def get_globals_for_reduction(self):
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
return self.py_func
def _customize_flags(self, flags):
return flags
class _GeneratedFunctionCompiler(_FunctionCompiler):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
super(_GeneratedFunctionCompiler, self).__init__(
py_func, targetdescr, targetoptions, locals, pipeline_class)
self.impls = set()
def get_globals_for_reduction(self):
# This will recursively get the globals used by any nested
# implementation function.
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
impl = self.py_func(*args, **kws)
# Check the generating function and implementation signatures are
# compatible, otherwise compiling would fail later.
pysig = utils.pysignature(self.py_func)
implsig = utils.pysignature(impl)
ok = len(pysig.parameters) == len(implsig.parameters)
if ok:
for pyparam, implparam in zip(pysig.parameters.values(),
implsig.parameters.values()):
# We allow the implementation to omit default values, but
# if it mentions them, they should have the same value...
if (pyparam.name != implparam.name or
pyparam.kind != implparam.kind or
(implparam.default is not implparam.empty and
implparam.default != pyparam.default)):
ok = False
if not ok:
raise TypeError("generated implementation %s should be compatible "
"with signature '%s', but has signature '%s'"
% (impl, pysig, implsig))
self.impls.add(impl)
return impl
_CompileStats = collections.namedtuple(
'_CompileStats', ('cache_path', 'cache_hits', 'cache_misses'))
class _CompilingCounter(object):
"""
A simple counter that increment in __enter__ and decrement in __exit__.
"""
def __init__(self):
self.counter = 0
def __enter__(self):
assert self.counter >= 0
self.counter += 1
def __exit__(self, *args, **kwargs):
self.counter -= 1
assert self.counter >= 0
def __bool__(self):
return self.counter > 0
__nonzero__ = __bool__
class _DispatcherBase(_dispatcher.Dispatcher):
"""
Common base class for dispatcher Implementations.
"""
__numba__ = "py_func"
def __init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required):
self._tm = default_type_manager
# A mapping of signatures to compile results
self.overloads = collections.OrderedDict()
self.py_func = py_func
# other parts of Numba assume the old Python 2 name for code object
self.func_code = get_code_object(py_func)
# but newer python uses a different name
self.__code__ = self.func_code
argnames = tuple(pysig.parameters)
default_values = self.py_func.__defaults__ or ()
defargs = tuple(OmittedArg(val) for val in default_values)
try:
lastarg = list(pysig.parameters.values())[-1]
except IndexError:
has_stararg = False
else:
has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL
_dispatcher.Dispatcher.__init__(self, self._tm.get_pointer(),
arg_count, self._fold_args,
argnames, defargs,
can_fallback,
has_stararg,
exact_match_required)
self.doc = py_func.__doc__
self._compiling_counter = _CompilingCounter()
weakref.finalize(self, self._make_finalizer())
def _compilation_chain_init_hook(self):
"""
This will be called ahead of any part of compilation taking place (this
even includes being ahead of working out the types of the arguments).
This permits activities such as initialising extension entry points so
that the compiler knows about additional externally defined types etc
before it does anything.
"""
entrypoints.init_all()
def _reset_overloads(self):
self._clear()
self.overloads.clear()
def _make_finalizer(self):
"""
Return a finalizer function that will release references to
related compiled functions.
"""
overloads = self.overloads
targetctx = self.targetctx
# Early-bind utils.shutting_down() into the function's local namespace
# (see issue #689)
def finalizer(shutting_down=utils.shutting_down):
# The finalizer may crash at shutdown, skip it (resources
# will be cleared by the process exiting, anyway).
if shutting_down():
return
# This function must *not* hold any reference to self:
# we take care to bind the necessary objects in the closure.
for cres in overloads.values():
try:
targetctx.remove_user_function(cres.entry_point)
except KeyError:
pass
return finalizer
@property
def signatures(self):
"""
Returns a list of compiled function signatures.
"""
return list(self.overloads)
@property
def nopython_signatures(self):
return [cres.signature for cres in self.overloads.values()
if not cres.objectmode and not cres.interpmode]
def disable_compile(self, val=True):
"""Disable the compilation of new signatures at call time.
"""
# If disabling compilation then there must be at least one signature
assert (not val) or len(self.signatures) > 0
self._can_compile = not val
def add_overload(self, cres):
args = tuple(cres.signature.args)
sig = [a._code for a in args]
self._insert(sig, cres.entry_point, cres.objectmode, cres.interpmode)
self.overloads[args] = cres
def fold_argument_types(self, args, kws):
return self._compiler.fold_argument_types(args, kws)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This allows to resolve the return type.
A (template, pysig, args, kws) tuple is returned.
"""
# XXX how about a dispatcher template class automating the
# following?
# Fold keyword arguments and resolve default values
pysig, args = self._compiler.fold_argument_types(args, kws)
kws = {}
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
def get_overload(self, sig):
"""
Return the compiled function for the given signature.
"""
args, return_type = sigutils.normalize_signature(sig)
return self.overloads[tuple(args)].entry_point
@property
def is_compiling(self):
"""
Whether a specialization is currently being compiled.
"""
return self._compiling_counter
def _compile_for_args(self, *args, **kws):
"""
For internal use. Compile a specialized version of the function
for the given *args* and *kws*, and return the resulting callable.
"""
assert not kws
# call any initialisation required for the compilation chain (e.g.
# extension point registration).
self._compilation_chain_init_hook()
def error_rewrite(e, issue_type):
"""
Rewrite and raise Exception `e` with help supplied based on the
specified issue_type.
"""
if config.SHOW_HELP:
help_msg = errors.error_extras[issue_type]
e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
if config.FULL_TRACEBACKS:
raise e
else:
raise e.with_traceback(None)
argtypes = []
for a in args:
if isinstance(a, OmittedArg):
argtypes.append(types.Omitted(a.value))
else:
argtypes.append(self.typeof_pyval(a))
try:
return self.compile(tuple(argtypes))
except errors.ForceLiteralArg as e:
# Received request for compiler re-entry with the list of arguments
# indicated by e.requested_args.
# First, check if any of these args are already Literal-ized
already_lit_pos = [i for i in e.requested_args
if isinstance(args[i], types.Literal)]
if already_lit_pos:
# Abort compilation if any argument is already a Literal.
# Letting this continue will cause infinite compilation loop.
m = ("Repeated literal typing request.\n"
"{}.\n"
"This is likely caused by an error in typing. "
"Please see nested and suppressed exceptions.")
info = ', '.join('Arg #{} is {}'.format(i, args[i])
for i in sorted(already_lit_pos))
raise errors.CompilerError(m.format(info))
# Convert requested arguments into a Literal.
args = [(types.literal
if i in e.requested_args
else lambda x: x)(args[i])
for i, v in enumerate(args)]
# Re-enter compilation with the Literal-ized arguments
return self._compile_for_args(*args)
except errors.TypingError as e:
# Intercept typing error that may be due to an argument
# that failed inferencing as a Numba type
failed_args = []
for i, arg in enumerate(args):
val = arg.value if isinstance(arg, OmittedArg) else arg
try:
tp = typeof(val, Purpose.argument)
except ValueError as typeof_exc:
failed_args.append((i, str(typeof_exc)))
else:
if tp is None:
failed_args.append(
(i,
"cannot determine Numba type of value %r" % (val,)))
if failed_args:
# Patch error message to ease debugging
msg = str(e).rstrip() + (
"\n\nThis error may have been caused by the following argument(s):\n%s\n"
% "\n".join("- argument %d: %s" % (i, err)
for i, err in failed_args))
e.patch_message(msg)
error_rewrite(e, 'typing')
except errors.UnsupportedError as e:
# Something unsupported is present in the user code, add help info
error_rewrite(e, 'unsupported_error')
except (errors.NotDefinedError, errors.RedefinedError,
errors.VerificationError) as e:
# These errors are probably from an issue with either the code supplied
# being syntactically or otherwise invalid
error_rewrite(e, 'interpreter')
except errors.ConstantInferenceError as e:
# this is from trying to infer something as constant when it isn't
# or isn't supported as a constant
error_rewrite(e, 'constant_inference')
except Exception as e:
if config.SHOW_HELP:
if hasattr(e, 'patch_message'):
help_msg = errors.error_extras['reportable']
e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
# ignore the FULL_TRACEBACKS config, this needs reporting!
raise e
def inspect_llvm(self, signature=None):
"""Get the LLVM intermediate representation generated by compilation.
Parameters
----------
signature : tuple of numba types, optional
Specify a signature for which to obtain the LLVM IR. If None, the
IR is returned for all available signatures.
Returns
-------
llvm : dict[signature, str] or str
Either the LLVM IR string for the specified signature, or, if no
signature was given, a dictionary mapping signatures to LLVM IR
strings.
"""
if signature is not None:
lib = self.overloads[signature].library
return lib.get_llvm_str()
return dict((sig, self.inspect_llvm(sig)) for sig in self.signatures)
def inspect_asm(self, signature=None):
"""Get the generated assembly code.
Parameters
----------
signature : tuple of numba types, optional
Specify a signature for which to obtain the assembly code. If
None, the assembly code is returned for all available signatures.
Returns
-------
asm : dict[signature, str] or str
Either the assembly code for the specified signature, or, if no
signature was given, a dictionary mapping signatures to assembly
code.
"""
if signature is not None:
lib = self.overloads[signature].library
return lib.get_asm_str()
return dict((sig, self.inspect_asm(sig)) for sig in self.signatures)
def inspect_types(self, file=None, signature=None,
pretty=False, style='default', **kwargs):
"""Print/return Numba intermediate representation (IR)-annotated code.
Parameters
----------
file : file-like object, optional
File to which to print. Defaults to sys.stdout if None. Must be
None if ``pretty=True``.
signature : tuple of numba types, optional
Print/return the intermediate representation for only the given
signature. If None, the IR is printed for all available signatures.
pretty : bool, optional
If True, an Annotate object will be returned that can render the
IR with color highlighting in Jupyter and IPython. ``file`` must
be None if ``pretty`` is True. Additionally, the ``pygments``
library must be installed for ``pretty=True``.
style : str, optional
Choose a style for rendering. Ignored if ``pretty`` is ``False``.
This is directly consumed by ``pygments`` formatters. To see a
list of available styles, import ``pygments`` and run
``list(pygments.styles.get_all_styles())``.
Returns
-------
annotated : Annotate object, optional
Only returned if ``pretty=True``, otherwise this function is only
used for its printing side effect. If ``pretty=True``, an Annotate
object is returned that can render itself in Jupyter and IPython.
"""
overloads = self.overloads
if signature is not None:
overloads = {signature: self.overloads[signature]}
if not pretty:
if file is None:
file = sys.stdout
for ver, res in overloads.items():
print("%s %s" % (self.py_func.__name__, ver), file=file)
print('-' * 80, file=file)
print(res.type_annotation, file=file)
print('=' * 80, file=file)
else:
if file is not None:
raise ValueError("`file` must be None if `pretty=True`")
from numba.core.annotations.pretty_annotate import Annotate
return Annotate(self, signature=signature, style=style)
def inspect_cfg(self, signature=None, show_wrapper=None):
"""
For inspecting the CFG of the function.
By default the CFG of the user function is shown. The *show_wrapper*
option can be set to "python" or "cfunc" to show the python wrapper
function or the *cfunc* wrapper function, respectively.
"""
if signature is not None:
cres = self.overloads[signature]
lib = cres.library
if show_wrapper == 'python':
fname = cres.fndesc.llvm_cpython_wrapper_name
elif show_wrapper == 'cfunc':
fname = cres.fndesc.llvm_cfunc_wrapper_name
else:
fname = cres.fndesc.mangled_name
return lib.get_function_cfg(fname)
return dict((sig, self.inspect_cfg(sig, show_wrapper=show_wrapper))
for sig in self.signatures)
def inspect_disasm_cfg(self, signature=None):
"""
For inspecting the CFG of the disassembly of the function.
Requires python package: r2pipe
Requires radare2 binary on $PATH.
Notebook rendering requires python package: graphviz
signature : tuple of Numba types, optional
Print/return the disassembly CFG for only the given signatures.
If None, the IR is printed for all available signatures.
"""
if signature is not None:
cres = self.overloads[signature]
lib = cres.library
return lib.get_disasm_cfg()
return dict((sig, self.inspect_disasm_cfg(sig))
for sig in self.signatures)
def get_annotation_info(self, signature=None):
"""
Gets the annotation information for the function specified by
signature. If no signature is supplied a dictionary of signature to
annotation information is returned.
"""
signatures = self.signatures if signature is None else [signature]
out = collections.OrderedDict()
for sig in signatures:
cres = self.overloads[sig]
ta = cres.type_annotation
key = (ta.func_id.filename + ':' + str(ta.func_id.firstlineno + 1),
ta.signature)
out[key] = ta.annotate_raw()[key]
return out
def _explain_ambiguous(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
"""
assert not kws, "kwargs not handled"
args = tuple([self.typeof_pyval(a) for a in args])
# The order here must be deterministic for testing purposes, which
# is ensured by the OrderedDict.
sigs = self.nopython_signatures
# This will raise
self.typingctx.resolve_overload(self.py_func, sigs, args, kws,
allow_ambiguous=False)
def _explain_matching_error(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
"""
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
msg = ("No matching definition for argument type(s) %s"
% ', '.join(map(str, args)))
raise TypeError(msg)
def _search_new_conversions(self, *args, **kws):
"""
Callback for the C _Dispatcher object.
Search for approximately matching signatures for the given arguments,
and ensure the corresponding conversions are registered in the C++
type manager.
"""
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
found = False
for sig in self.nopython_signatures:
conv = self.typingctx.install_possible_conversions(args, sig.args)
if conv:
found = True
return found
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.py_func)
def typeof_pyval(self, val):
"""
Resolve the Numba type of Python value *val*.
This is called from numba._dispatcher as a fallback if the native code
cannot decide the type.
"""
# Not going through the resolve_argument_type() indirection
# can save a couple µs.
try:
tp = typeof(val, Purpose.argument)
except ValueError:
tp = types.pyobject
else:
if tp is None:
tp = types.pyobject
return tp
class _MemoMixin:
__uuid = None
# A {uuid -> instance} mapping, for deserialization
_memo = weakref.WeakValueDictionary()
# hold refs to last N functions deserialized, retaining them in _memo
# regardless of whether there is another reference
_recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE)
@property
def _uuid(self):
"""
An instance-specific UUID, to avoid multiple deserializations of
a given instance.
Note: this is lazily-generated, for performance reasons.
"""
u = self.__uuid
if u is None:
u = str(uuid.uuid1())
self._set_uuid(u)
return u
def _set_uuid(self, u):
assert self.__uuid is None
self.__uuid = u
self._memo[u] = self
self._recent.append(self)
class Dispatcher(serialize.ReduceMixin, _MemoMixin, _DispatcherBase):
"""
Implementation of user-facing dispatcher objects (i.e. created using
the @jit decorator).
This is an abstract base class. Subclasses should define the targetdescr
class attribute.
"""
_fold_args = True
_impl_kinds = {
'direct': _FunctionCompiler,
'generated': _GeneratedFunctionCompiler,
}
__numba__ = 'py_func'
def __init__(self, py_func, locals={}, targetoptions={},
impl_kind='direct', pipeline_class=compiler.Compiler):
"""
Parameters
----------
py_func: function object to be compiled
locals: dict, optional
Mapping of local variable names to Numba types. Used to override
the types deduced by the type inference engine.
targetoptions: dict, optional
Target-specific config options.
impl_kind: str
Select the compiler mode for `@jit` and `@generated_jit`
pipeline_class: type numba.compiler.CompilerBase
The compiler pipeline type.
"""
self.typingctx = self.targetdescr.typing_context
self.targetctx = self.targetdescr.target_context
pysig = utils.pysignature(py_func)
arg_count = len(pysig.parameters)
can_fallback = not targetoptions.get('nopython', False)
_DispatcherBase.__init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required=False)
functools.update_wrapper(self, py_func)
self.targetoptions = targetoptions
self.locals = locals
self._cache = NullCache()
compiler_class = self._impl_kinds[impl_kind]
self._impl_kind = impl_kind
self._compiler = compiler_class(py_func, self.targetdescr,
targetoptions, locals, pipeline_class)
self._cache_hits = collections.Counter()
self._cache_misses = collections.Counter()
self._type = types.Dispatcher(self)
self.typingctx.insert_global(self, self._type)
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__}[{self.py_func.__name__}, type code={self._type._code}]')
for cres in self.overloads.values():
cres.dump(tab = tab + ' ')
print(f'{tab}END DUMP {type(self).__name__}[{self.py_func.__name__}]')
@property
def _numba_type_(self):
return types.Dispatcher(self)
def enable_caching(self):
self._cache = FunctionCache(self.py_func)
def __get__(self, obj, objtype=None):
'''Allow a JIT function to be bound as a method to an object'''
if obj is None: # Unbound method
return self
else: # Bound method
return pytypes.MethodType(self, obj)
def _reduce_states(self):
"""
Reduce the instance for pickling. This will serialize
the original function as well the compilation options and
compiled signatures, but not the compiled code itself.
NOTE: part of ReduceMixin protocol
"""
if self._can_compile:
sigs = []
else:
sigs = [cr.signature for cr in self.overloads.values()]
return dict(
uuid=str(self._uuid),
py_func=self.py_func,
locals=self.locals,
targetoptions=self.targetoptions,
impl_kind=self._impl_kind,
can_compile=self._can_compile,
sigs=sigs,
)
@classmethod
def _rebuild(cls, uuid, py_func, locals, targetoptions, impl_kind,
can_compile, sigs):
"""
Rebuild an Dispatcher instance after it was __reduce__'d.
NOTE: part of ReduceMixin protocol
"""
try:
return cls._memo[uuid]
except KeyError:
pass
self = cls(py_func, locals, targetoptions, impl_kind)
# Make sure this deserialization will be merged with subsequent ones
self._set_uuid(uuid)
for sig in sigs:
self.compile(sig)
self._can_compile = can_compile
return self
@global_compiler_lock
def compile(self, sig):
if not self._can_compile:
raise RuntimeError("compilation disabled")
# Use counter to track recursion compilation depth
with self._compiling_counter:
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
# Try to load from disk cache
cres = self._cache.load_overload(sig, self.targetctx)
if cres is not None:
self._cache_hits[sig] += 1
# XXX fold this in add_overload()? (also see compiler.py)
if not cres.objectmode and not cres.interpmode:
self.targetctx.insert_user_function(cres.entry_point,
cres.fndesc, [cres.library])
self.add_overload(cres)
return cres.entry_point
self._cache_misses[sig] += 1
try:
cres = self._compiler.compile(args, return_type)
except errors.ForceLiteralArg as e:
def folded(args, kws):
return self._compiler.fold_argument_types(args, kws)[1]
raise e.bind_fold_arguments(folded)
self.add_overload(cres)
self._cache.save_overload(sig, cres)
return cres.entry_point
def get_compile_result(self, sig):
"""Compile (if needed) and return the compilation result with the
given signature.
"""
atypes = tuple(sig.args)
if atypes not in self.overloads:
self.compile(atypes)
return self.overloads[atypes]
def recompile(self):
"""
Recompile all signatures afresh.
"""
sigs = list(self.overloads)
old_can_compile = self._can_compile
# Ensure the old overloads are disposed of, including compiled functions.
self._make_finalizer()()
self._reset_overloads()
self._cache.flush()
self._can_compile = True
try:
for sig in sigs:
self.compile(sig)
finally:
self._can_compile = old_can_compile
@property
def stats(self):
return _CompileStats(
cache_path=self._cache.cache_path,
cache_hits=self._cache_hits,
cache_misses=self._cache_misses,
)
def parallel_diagnostics(self, signature=None, level=1):
"""
Print parallel diagnostic information for the given signature. If no
signature is present it is printed for all known signatures. level is
used to adjust the verbosity, level=1 (default) is minimal verbosity,
and 2, 3, and 4 provide increasing levels of verbosity.
"""
def dump(sig):
ol = self.overloads[sig]
pfdiag = ol.metadata.get('parfor_diagnostics', None)
if pfdiag is None:
msg = "No parfors diagnostic available, is 'parallel=True' set?"
raise ValueError(msg)
pfdiag.dump(level)
if signature is not None:
dump(signature)
else:
[dump(sig) for sig in self.signatures]
def get_metadata(self, signature=None):
"""
Obtain the compilation metadata for a given signature.
"""
if signature is not None:
return self.overloads[signature].metadata
else:
return dict((sig, self.overloads[sig].metadata) for sig in self.signatures)
def get_function_type(self):
"""Return unique function type of dispatcher when possible, otherwise
return None.
A Dispatcher instance has unique function type when it
contains exactly one compilation result and its compilation
has been disabled (via its disable_compile method).
"""
if not self._can_compile and len(self.overloads) == 1:
cres = tuple(self.overloads.values())[0]
return types.FunctionType(cres.signature)
class LiftedCode(serialize.ReduceMixin, _MemoMixin, _DispatcherBase):
"""
Implementation of the hidden dispatcher objects used for lifted code
(a lifted loop is really compiled as a separate function).
"""
_fold_args = False
can_cache = False
def __init__(self, func_ir, typingctx, targetctx, flags, locals):
self.func_ir = func_ir
self.lifted_from = None
self.typingctx = typingctx
self.targetctx = targetctx
self.flags = flags
self.locals = locals
_DispatcherBase.__init__(self, self.func_ir.arg_count,
self.func_ir.func_id.func,
self.func_ir.func_id.pysig,
can_fallback=True,
exact_match_required=False)
def _reduce_states(self):
"""
Reduce the instance for pickling. This will serialize
the original function as well the compilation options and
compiled signatures, but not the compiled code itself.
NOTE: part of ReduceMixin protocol
"""
return dict(
uuid=self._uuid, func_ir=self.func_ir, flags=self.flags,
locals=self.locals, extras=self._reduce_extras(),
)
def _reduce_extras(self):
"""
NOTE: sub-class can override to add extra states
"""
return {}
@classmethod
def _rebuild(cls, uuid, func_ir, flags, locals, extras):
"""
Rebuild an Dispatcher instance after it was __reduce__'d.
NOTE: part of ReduceMixin protocol
"""
try:
return cls._memo[uuid]
except KeyError:
pass
# NOTE: We are assuming that this is must be cpu_target, which is true
# for now.
# TODO: refactor this to not assume on `cpu_target`
from numba.core import registry
typingctx = registry.cpu_target.typing_context
targetctx = registry.cpu_target.target_context
self = cls(func_ir, typingctx, targetctx, flags, locals, **extras)
self._set_uuid(uuid)
return self
def get_source_location(self):
"""Return the starting line number of the loop.
"""
return self.func_ir.loc.line
def _pre_compile(self, args, return_type, flags):
"""Pre-compile actions
"""
pass
@global_compiler_lock
def compile(self, sig):
# Use counter to track recursion compilation depth
with self._compiling_counter:
# XXX this is mostly duplicated from Dispatcher.
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
# (e.g. if another thread compiled it before we got the lock)
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
self._pre_compile(args, return_type, flags)
# Clone IR to avoid (some of the) mutation in the rewrite pass
cloned_func_ir = self.func_ir.copy()
cres = compiler.compile_ir(typingctx=self.typingctx,
targetctx=self.targetctx,
func_ir=cloned_func_ir,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
lifted=(),
lifted_from=self.lifted_from,
is_lifted_loop=True,)
# Check typing error if object mode is used
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
class LiftedLoop(LiftedCode):
def _pre_compile(self, args, return_type, flags):
assert not flags.enable_looplift, "Enable looplift flags is on"
class LiftedWith(LiftedCode):
can_cache = True
def _reduce_extras(self):
return dict(output_types=self.output_types)
@property
def _numba_type_(self):
return types.Dispatcher(self)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This enables the resolving of the return type.
A (template, pysig, args, kws) tuple is returned.
"""
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
pysig = None
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
class ObjModeLiftedWith(LiftedWith):
def __init__(self, *args, **kwargs):
self.output_types = kwargs.pop('output_types', None)
super(LiftedWith, self).__init__(*args, **kwargs)
if not self.flags.force_pyobject:
raise ValueError("expecting `flags.force_pyobject`")
if self.output_types is None:
raise TypeError('`output_types` must be provided')
@property
def _numba_type_(self):
return types.ObjModeDispatcher(self)
def get_call_template(self, args, kws):
"""
Get a typing.ConcreteTemplate for this dispatcher and the given
*args* and *kws* types. This enables the resolving of the return type.
A (template, pysig, args, kws) tuple is returned.
"""
assert not kws
self._legalize_arg_types(args)
# Coerce to object mode
args = [types.ffi_forced_object] * len(args)
if self._can_compile:
self.compile(tuple(args))
signatures = [typing.signature(self.output_types, *args)]
pysig = None
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
call_template = typing.make_concrete_template(
name, key=func_name, signatures=signatures)
return call_template, pysig, args, kws
def _legalize_arg_types(self, args):
for i, a in enumerate(args, start=1):
if isinstance(a, types.List):
msg = (
'Does not support list type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
elif isinstance(a, types.Dispatcher):
msg = (
'Does not support function type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
# Initialize typeof machinery
_dispatcher.typeof_init(
OmittedArg,
dict((str(t), t._code) for t in types.number_domain))
| 37.265343
| 104
| 0.598377
|
import collections
import functools
import os
import struct
import sys
import types as pytypes
import uuid
import weakref
from copy import deepcopy
from numba import _dispatcher
from numba.core import utils, types, errors, typing, serialize, config, compiler, sigutils
from numba.core.compiler_lock import global_compiler_lock
from numba.core.typeconv.rules import default_type_manager
from numba.core.typing.templates import fold_arguments
from numba.core.typing.typeof import Purpose, typeof
from numba.core.bytecode import get_code_object
from numba.core.caching import NullCache, FunctionCache
from numba.core import entrypoints
class OmittedArg(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "omitted arg(%r)" % (self.value,)
@property
def _numba_type_(self):
return types.Omitted(self.value)
class _FunctionCompiler(object):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
self.py_func = py_func
self.targetdescr = targetdescr
self.targetoptions = targetoptions
self.locals = locals
self.pysig = utils.pysignature(self.py_func)
self.pipeline_class = pipeline_class
self._failed_cache = {}
def fold_argument_types(self, args, kws):
def normal_handler(index, param, value):
return value
def default_handler(index, param, default):
return types.Omitted(default)
def stararg_handler(index, param, values):
return types.StarArgTuple(values)
args = fold_arguments(self.pysig, args, kws,
normal_handler,
default_handler,
stararg_handler)
return self.pysig, args
def compile(self, args, return_type):
status, retval = self._compile_cached(args, return_type)
if status:
return retval
else:
raise retval
def _compile_cached(self, args, return_type):
key = tuple(args), return_type
try:
return False, self._failed_cache[key]
except KeyError:
pass
try:
retval = self._compile_core(args, return_type)
except errors.TypingError as e:
self._failed_cache[key] = e
return False, e
else:
return True, retval
def _compile_core(self, args, return_type):
flags = compiler.Flags()
self.targetdescr.options.parse_as_flags(flags, self.targetoptions)
flags = self._customize_flags(flags)
impl = self._get_implementation(args, {})
cres = compiler.compile_extra(self.targetdescr.typing_context,
self.targetdescr.target_context,
impl,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
pipeline_class=self.pipeline_class)
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
return cres
def get_globals_for_reduction(self):
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
return self.py_func
def _customize_flags(self, flags):
return flags
class _GeneratedFunctionCompiler(_FunctionCompiler):
def __init__(self, py_func, targetdescr, targetoptions, locals,
pipeline_class):
super(_GeneratedFunctionCompiler, self).__init__(
py_func, targetdescr, targetoptions, locals, pipeline_class)
self.impls = set()
def get_globals_for_reduction(self):
return serialize._get_function_globals_for_reduction(self.py_func)
def _get_implementation(self, args, kws):
impl = self.py_func(*args, **kws)
pysig = utils.pysignature(self.py_func)
implsig = utils.pysignature(impl)
ok = len(pysig.parameters) == len(implsig.parameters)
if ok:
for pyparam, implparam in zip(pysig.parameters.values(),
implsig.parameters.values()):
if (pyparam.name != implparam.name or
pyparam.kind != implparam.kind or
(implparam.default is not implparam.empty and
implparam.default != pyparam.default)):
ok = False
if not ok:
raise TypeError("generated implementation %s should be compatible "
"with signature '%s', but has signature '%s'"
% (impl, pysig, implsig))
self.impls.add(impl)
return impl
_CompileStats = collections.namedtuple(
'_CompileStats', ('cache_path', 'cache_hits', 'cache_misses'))
class _CompilingCounter(object):
def __init__(self):
self.counter = 0
def __enter__(self):
assert self.counter >= 0
self.counter += 1
def __exit__(self, *args, **kwargs):
self.counter -= 1
assert self.counter >= 0
def __bool__(self):
return self.counter > 0
__nonzero__ = __bool__
class _DispatcherBase(_dispatcher.Dispatcher):
__numba__ = "py_func"
def __init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required):
self._tm = default_type_manager
self.overloads = collections.OrderedDict()
self.py_func = py_func
self.func_code = get_code_object(py_func)
self.__code__ = self.func_code
argnames = tuple(pysig.parameters)
default_values = self.py_func.__defaults__ or ()
defargs = tuple(OmittedArg(val) for val in default_values)
try:
lastarg = list(pysig.parameters.values())[-1]
except IndexError:
has_stararg = False
else:
has_stararg = lastarg.kind == lastarg.VAR_POSITIONAL
_dispatcher.Dispatcher.__init__(self, self._tm.get_pointer(),
arg_count, self._fold_args,
argnames, defargs,
can_fallback,
has_stararg,
exact_match_required)
self.doc = py_func.__doc__
self._compiling_counter = _CompilingCounter()
weakref.finalize(self, self._make_finalizer())
def _compilation_chain_init_hook(self):
entrypoints.init_all()
def _reset_overloads(self):
self._clear()
self.overloads.clear()
def _make_finalizer(self):
overloads = self.overloads
targetctx = self.targetctx
# (see issue #689)
def finalizer(shutting_down=utils.shutting_down):
# The finalizer may crash at shutdown, skip it (resources
# will be cleared by the process exiting, anyway).
if shutting_down():
return
# This function must *not* hold any reference to self:
# we take care to bind the necessary objects in the closure.
for cres in overloads.values():
try:
targetctx.remove_user_function(cres.entry_point)
except KeyError:
pass
return finalizer
@property
def signatures(self):
return list(self.overloads)
@property
def nopython_signatures(self):
return [cres.signature for cres in self.overloads.values()
if not cres.objectmode and not cres.interpmode]
def disable_compile(self, val=True):
# If disabling compilation then there must be at least one signature
assert (not val) or len(self.signatures) > 0
self._can_compile = not val
def add_overload(self, cres):
args = tuple(cres.signature.args)
sig = [a._code for a in args]
self._insert(sig, cres.entry_point, cres.objectmode, cres.interpmode)
self.overloads[args] = cres
def fold_argument_types(self, args, kws):
return self._compiler.fold_argument_types(args, kws)
def get_call_template(self, args, kws):
# XXX how about a dispatcher template class automating the
# following?
# Fold keyword arguments and resolve default values
pysig, args = self._compiler.fold_argument_types(args, kws)
kws = {}
# Ensure an overload is available
if self._can_compile:
self.compile(tuple(args))
# Create function type for typing
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# The `key` isn't really used except for diagnosis here,
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
def get_overload(self, sig):
args, return_type = sigutils.normalize_signature(sig)
return self.overloads[tuple(args)].entry_point
@property
def is_compiling(self):
return self._compiling_counter
def _compile_for_args(self, *args, **kws):
assert not kws
self._compilation_chain_init_hook()
def error_rewrite(e, issue_type):
if config.SHOW_HELP:
help_msg = errors.error_extras[issue_type]
e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
if config.FULL_TRACEBACKS:
raise e
else:
raise e.with_traceback(None)
argtypes = []
for a in args:
if isinstance(a, OmittedArg):
argtypes.append(types.Omitted(a.value))
else:
argtypes.append(self.typeof_pyval(a))
try:
return self.compile(tuple(argtypes))
except errors.ForceLiteralArg as e:
already_lit_pos = [i for i in e.requested_args
if isinstance(args[i], types.Literal)]
if already_lit_pos:
m = ("Repeated literal typing request.\n"
"{}.\n"
"This is likely caused by an error in typing. "
"Please see nested and suppressed exceptions.")
info = ', '.join('Arg #{} is {}'.format(i, args[i])
for i in sorted(already_lit_pos))
raise errors.CompilerError(m.format(info))
args = [(types.literal
if i in e.requested_args
else lambda x: x)(args[i])
for i, v in enumerate(args)]
return self._compile_for_args(*args)
except errors.TypingError as e:
failed_args = []
for i, arg in enumerate(args):
val = arg.value if isinstance(arg, OmittedArg) else arg
try:
tp = typeof(val, Purpose.argument)
except ValueError as typeof_exc:
failed_args.append((i, str(typeof_exc)))
else:
if tp is None:
failed_args.append(
(i,
"cannot determine Numba type of value %r" % (val,)))
if failed_args:
msg = str(e).rstrip() + (
"\n\nThis error may have been caused by the following argument(s):\n%s\n"
% "\n".join("- argument %d: %s" % (i, err)
for i, err in failed_args))
e.patch_message(msg)
error_rewrite(e, 'typing')
except errors.UnsupportedError as e:
error_rewrite(e, 'unsupported_error')
except (errors.NotDefinedError, errors.RedefinedError,
errors.VerificationError) as e:
error_rewrite(e, 'interpreter')
except errors.ConstantInferenceError as e:
# or isn't supported as a constant
error_rewrite(e, 'constant_inference')
except Exception as e:
if config.SHOW_HELP:
if hasattr(e, 'patch_message'):
help_msg = errors.error_extras['reportable']
e.patch_message('\n'.join((str(e).rstrip(), help_msg)))
raise e
def inspect_llvm(self, signature=None):
if signature is not None:
lib = self.overloads[signature].library
return lib.get_llvm_str()
return dict((sig, self.inspect_llvm(sig)) for sig in self.signatures)
def inspect_asm(self, signature=None):
if signature is not None:
lib = self.overloads[signature].library
return lib.get_asm_str()
return dict((sig, self.inspect_asm(sig)) for sig in self.signatures)
def inspect_types(self, file=None, signature=None,
pretty=False, style='default', **kwargs):
overloads = self.overloads
if signature is not None:
overloads = {signature: self.overloads[signature]}
if not pretty:
if file is None:
file = sys.stdout
for ver, res in overloads.items():
print("%s %s" % (self.py_func.__name__, ver), file=file)
print('-' * 80, file=file)
print(res.type_annotation, file=file)
print('=' * 80, file=file)
else:
if file is not None:
raise ValueError("`file` must be None if `pretty=True`")
from numba.core.annotations.pretty_annotate import Annotate
return Annotate(self, signature=signature, style=style)
def inspect_cfg(self, signature=None, show_wrapper=None):
if signature is not None:
cres = self.overloads[signature]
lib = cres.library
if show_wrapper == 'python':
fname = cres.fndesc.llvm_cpython_wrapper_name
elif show_wrapper == 'cfunc':
fname = cres.fndesc.llvm_cfunc_wrapper_name
else:
fname = cres.fndesc.mangled_name
return lib.get_function_cfg(fname)
return dict((sig, self.inspect_cfg(sig, show_wrapper=show_wrapper))
for sig in self.signatures)
def inspect_disasm_cfg(self, signature=None):
if signature is not None:
cres = self.overloads[signature]
lib = cres.library
return lib.get_disasm_cfg()
return dict((sig, self.inspect_disasm_cfg(sig))
for sig in self.signatures)
def get_annotation_info(self, signature=None):
signatures = self.signatures if signature is None else [signature]
out = collections.OrderedDict()
for sig in signatures:
cres = self.overloads[sig]
ta = cres.type_annotation
key = (ta.func_id.filename + ':' + str(ta.func_id.firstlineno + 1),
ta.signature)
out[key] = ta.annotate_raw()[key]
return out
def _explain_ambiguous(self, *args, **kws):
assert not kws, "kwargs not handled"
args = tuple([self.typeof_pyval(a) for a in args])
sigs = self.nopython_signatures
self.typingctx.resolve_overload(self.py_func, sigs, args, kws,
allow_ambiguous=False)
def _explain_matching_error(self, *args, **kws):
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
msg = ("No matching definition for argument type(s) %s"
% ', '.join(map(str, args)))
raise TypeError(msg)
def _search_new_conversions(self, *args, **kws):
assert not kws, "kwargs not handled"
args = [self.typeof_pyval(a) for a in args]
found = False
for sig in self.nopython_signatures:
conv = self.typingctx.install_possible_conversions(args, sig.args)
if conv:
found = True
return found
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.py_func)
def typeof_pyval(self, val):
try:
tp = typeof(val, Purpose.argument)
except ValueError:
tp = types.pyobject
else:
if tp is None:
tp = types.pyobject
return tp
class _MemoMixin:
__uuid = None
_memo = weakref.WeakValueDictionary()
_recent = collections.deque(maxlen=config.FUNCTION_CACHE_SIZE)
@property
def _uuid(self):
u = self.__uuid
if u is None:
u = str(uuid.uuid1())
self._set_uuid(u)
return u
def _set_uuid(self, u):
assert self.__uuid is None
self.__uuid = u
self._memo[u] = self
self._recent.append(self)
class Dispatcher(serialize.ReduceMixin, _MemoMixin, _DispatcherBase):
_fold_args = True
_impl_kinds = {
'direct': _FunctionCompiler,
'generated': _GeneratedFunctionCompiler,
}
__numba__ = 'py_func'
def __init__(self, py_func, locals={}, targetoptions={},
impl_kind='direct', pipeline_class=compiler.Compiler):
self.typingctx = self.targetdescr.typing_context
self.targetctx = self.targetdescr.target_context
pysig = utils.pysignature(py_func)
arg_count = len(pysig.parameters)
can_fallback = not targetoptions.get('nopython', False)
_DispatcherBase.__init__(self, arg_count, py_func, pysig, can_fallback,
exact_match_required=False)
functools.update_wrapper(self, py_func)
self.targetoptions = targetoptions
self.locals = locals
self._cache = NullCache()
compiler_class = self._impl_kinds[impl_kind]
self._impl_kind = impl_kind
self._compiler = compiler_class(py_func, self.targetdescr,
targetoptions, locals, pipeline_class)
self._cache_hits = collections.Counter()
self._cache_misses = collections.Counter()
self._type = types.Dispatcher(self)
self.typingctx.insert_global(self, self._type)
def dump(self, tab=''):
print(f'{tab}DUMP {type(self).__name__}[{self.py_func.__name__}, type code={self._type._code}]')
for cres in self.overloads.values():
cres.dump(tab = tab + ' ')
print(f'{tab}END DUMP {type(self).__name__}[{self.py_func.__name__}]')
@property
def _numba_type_(self):
return types.Dispatcher(self)
def enable_caching(self):
self._cache = FunctionCache(self.py_func)
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return pytypes.MethodType(self, obj)
def _reduce_states(self):
if self._can_compile:
sigs = []
else:
sigs = [cr.signature for cr in self.overloads.values()]
return dict(
uuid=str(self._uuid),
py_func=self.py_func,
locals=self.locals,
targetoptions=self.targetoptions,
impl_kind=self._impl_kind,
can_compile=self._can_compile,
sigs=sigs,
)
@classmethod
def _rebuild(cls, uuid, py_func, locals, targetoptions, impl_kind,
can_compile, sigs):
try:
return cls._memo[uuid]
except KeyError:
pass
self = cls(py_func, locals, targetoptions, impl_kind)
self._set_uuid(uuid)
for sig in sigs:
self.compile(sig)
self._can_compile = can_compile
return self
@global_compiler_lock
def compile(self, sig):
if not self._can_compile:
raise RuntimeError("compilation disabled")
with self._compiling_counter:
args, return_type = sigutils.normalize_signature(sig)
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
# Try to load from disk cache
cres = self._cache.load_overload(sig, self.targetctx)
if cres is not None:
self._cache_hits[sig] += 1
# XXX fold this in add_overload()? (also see compiler.py)
if not cres.objectmode and not cres.interpmode:
self.targetctx.insert_user_function(cres.entry_point,
cres.fndesc, [cres.library])
self.add_overload(cres)
return cres.entry_point
self._cache_misses[sig] += 1
try:
cres = self._compiler.compile(args, return_type)
except errors.ForceLiteralArg as e:
def folded(args, kws):
return self._compiler.fold_argument_types(args, kws)[1]
raise e.bind_fold_arguments(folded)
self.add_overload(cres)
self._cache.save_overload(sig, cres)
return cres.entry_point
def get_compile_result(self, sig):
atypes = tuple(sig.args)
if atypes not in self.overloads:
self.compile(atypes)
return self.overloads[atypes]
def recompile(self):
sigs = list(self.overloads)
old_can_compile = self._can_compile
# Ensure the old overloads are disposed of, including compiled functions.
self._make_finalizer()()
self._reset_overloads()
self._cache.flush()
self._can_compile = True
try:
for sig in sigs:
self.compile(sig)
finally:
self._can_compile = old_can_compile
@property
def stats(self):
return _CompileStats(
cache_path=self._cache.cache_path,
cache_hits=self._cache_hits,
cache_misses=self._cache_misses,
)
def parallel_diagnostics(self, signature=None, level=1):
def dump(sig):
ol = self.overloads[sig]
pfdiag = ol.metadata.get('parfor_diagnostics', None)
if pfdiag is None:
msg = "No parfors diagnostic available, is 'parallel=True' set?"
raise ValueError(msg)
pfdiag.dump(level)
if signature is not None:
dump(signature)
else:
[dump(sig) for sig in self.signatures]
def get_metadata(self, signature=None):
if signature is not None:
return self.overloads[signature].metadata
else:
return dict((sig, self.overloads[sig].metadata) for sig in self.signatures)
def get_function_type(self):
if not self._can_compile and len(self.overloads) == 1:
cres = tuple(self.overloads.values())[0]
return types.FunctionType(cres.signature)
class LiftedCode(serialize.ReduceMixin, _MemoMixin, _DispatcherBase):
_fold_args = False
can_cache = False
def __init__(self, func_ir, typingctx, targetctx, flags, locals):
self.func_ir = func_ir
self.lifted_from = None
self.typingctx = typingctx
self.targetctx = targetctx
self.flags = flags
self.locals = locals
_DispatcherBase.__init__(self, self.func_ir.arg_count,
self.func_ir.func_id.func,
self.func_ir.func_id.pysig,
can_fallback=True,
exact_match_required=False)
def _reduce_states(self):
return dict(
uuid=self._uuid, func_ir=self.func_ir, flags=self.flags,
locals=self.locals, extras=self._reduce_extras(),
)
def _reduce_extras(self):
return {}
@classmethod
def _rebuild(cls, uuid, func_ir, flags, locals, extras):
try:
return cls._memo[uuid]
except KeyError:
pass
# NOTE: We are assuming that this is must be cpu_target, which is true
# for now.
# TODO: refactor this to not assume on `cpu_target`
from numba.core import registry
typingctx = registry.cpu_target.typing_context
targetctx = registry.cpu_target.target_context
self = cls(func_ir, typingctx, targetctx, flags, locals, **extras)
self._set_uuid(uuid)
return self
def get_source_location(self):
return self.func_ir.loc.line
def _pre_compile(self, args, return_type, flags):
pass
@global_compiler_lock
def compile(self, sig):
# Use counter to track recursion compilation depth
with self._compiling_counter:
# XXX this is mostly duplicated from Dispatcher.
flags = self.flags
args, return_type = sigutils.normalize_signature(sig)
# Don't recompile if signature already exists
existing = self.overloads.get(tuple(args))
if existing is not None:
return existing.entry_point
self._pre_compile(args, return_type, flags)
cloned_func_ir = self.func_ir.copy()
cres = compiler.compile_ir(typingctx=self.typingctx,
targetctx=self.targetctx,
func_ir=cloned_func_ir,
args=args, return_type=return_type,
flags=flags, locals=self.locals,
lifted=(),
lifted_from=self.lifted_from,
is_lifted_loop=True,)
if cres.typing_error is not None and not flags.enable_pyobject:
raise cres.typing_error
self.add_overload(cres)
return cres.entry_point
class LiftedLoop(LiftedCode):
def _pre_compile(self, args, return_type, flags):
assert not flags.enable_looplift, "Enable looplift flags is on"
class LiftedWith(LiftedCode):
can_cache = True
def _reduce_extras(self):
return dict(output_types=self.output_types)
@property
def _numba_type_(self):
return types.Dispatcher(self)
def get_call_template(self, args, kws):
if self._can_compile:
self.compile(tuple(args))
pysig = None
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
# so avoid keeping a reference to `cfunc`.
call_template = typing.make_concrete_template(
name, key=func_name, signatures=self.nopython_signatures)
return call_template, pysig, args, kws
class ObjModeLiftedWith(LiftedWith):
def __init__(self, *args, **kwargs):
self.output_types = kwargs.pop('output_types', None)
super(LiftedWith, self).__init__(*args, **kwargs)
if not self.flags.force_pyobject:
raise ValueError("expecting `flags.force_pyobject`")
if self.output_types is None:
raise TypeError('`output_types` must be provided')
@property
def _numba_type_(self):
return types.ObjModeDispatcher(self)
def get_call_template(self, args, kws):
assert not kws
self._legalize_arg_types(args)
# Coerce to object mode
args = [types.ffi_forced_object] * len(args)
if self._can_compile:
self.compile(tuple(args))
signatures = [typing.signature(self.output_types, *args)]
pysig = None
func_name = self.py_func.__name__
name = "CallTemplate({0})".format(func_name)
call_template = typing.make_concrete_template(
name, key=func_name, signatures=signatures)
return call_template, pysig, args, kws
def _legalize_arg_types(self, args):
for i, a in enumerate(args, start=1):
if isinstance(a, types.List):
msg = (
'Does not support list type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
elif isinstance(a, types.Dispatcher):
msg = (
'Does not support function type inputs into '
'with-context for arg {}'
)
raise errors.TypingError(msg.format(i))
# Initialize typeof machinery
_dispatcher.typeof_init(
OmittedArg,
dict((str(t), t._code) for t in types.number_domain))
| true
| true
|
f704f208405e343692080fea1f8d229afeb2ecb7
| 90,552
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/test_show_interface.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/test_show_interface.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/test_show_interface.py
|
Drey/genieparser
|
f16649efabf1f3c892bcaad340ae24ce5403ba6b
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python
import sys
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from textwrap import dedent
ats_mock = Mock()
with patch.dict('sys.modules',
{'ats' : ats_mock}, autospec=True):
import genie.parsergen
from genie.parsergen import oper_fill
from genie.parsergen import oper_check
from genie.parsergen import oper_fill_tabular
from genie.parsergen.examples.parsergen.pyAts import parsergen_demo_mkpg
import xml.etree.ElementTree as ET
from ats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.iosxe.show_interface import ShowInterfacesSwitchport,\
ShowIpInterfaceBriefPipeVlan,\
ShowInterfaces, ShowIpInterface,\
ShowIpv6Interface, \
ShowInterfacesTrunk, \
ShowInterfacesCounters, \
ShowInterfacesAccounting, \
ShowIpInterfaceBriefPipeIp
class test_show_interface_parsergen(unittest.TestCase):
def test_tabular_parser(self):
self.showCommandOutput='''
R1#show ip interface brief
Interface IP-Address OK? Method Status Protocol
GigabitEthernet0/0 10.1.10.20 YES NVRAM up up
GigabitEthernet1/0/1 unassigned YES unset up up
GigabitEthernet1/0/10 unassigned YES unset down down
'''
self.outputDict = {'GigabitEthernet0/0': {'IP-Address': '10.1.10.20',
'Interface': 'GigabitEthernet0/0',
'Method': 'NVRAM',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/1': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/1',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/10': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/10',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'down',
'Status': 'down'}}
# Define how device stub will behave when accessed by production parser.
device_kwargs = {'is_connected.return_value':True,
'execute.return_value':dedent(self.showCommandOutput)}
device1 = Mock(**device_kwargs)
device1.name='router3'
result = genie.parsergen.oper_fill_tabular(device=device1,
show_command="show ip interface brief",
refresh_cache=True,
header_fields=
[ "Interface",
"IP-Address",
"OK\?",
"Method",
"Status",
"Protocol" ],
label_fields=
[ "Interface",
"IP-Address",
"OK?",
"Method",
"Status",
"Protocol" ],
index=[0])
self.assertEqual(result.entries, self.outputDict)
args, kwargs = device1.execute.call_args
self.assertTrue('show ip interface brief' in args,
msg='The expected command was not sent to the router')
#############################################################################
# unitest For Show Interfaces switchport
#############################################################################
class test_show_ip_interfaces_brief_pipe_ip(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {'interface':
{'GigabitEthernet0/0': {'interface_ok': 'YES',
'interface_status': 'up',
'ip_address': '10.1.18.80',
'method': 'manual',
'protocol_status': 'up'}}}
golden_output = {'execute.return_value': '''
R1#sh ip int brief | i 10.1.18.80
GigabitEthernet0/0 10.1.18.80 YES manual up up
'''}
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowIpInterfaceBriefPipeIp(device=self.device)
parsed_output = obj.parse(ip='10.1.18.80')
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
obj = ShowIpInterfaceBriefPipeIp(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse(ip='10.1.18.80')
# Comment out due to old version of yang, will enhance it
# class test_show_interface_brief_pipe_vlan_yang(unittest.TestCase):
# device = Device(name='aDevice')
# device1 = Device(name='bDevice')
# golden_parsed_output = {'interface': {'Vlan1': {'vlan_id': {'1': {'ip_address': 'unassigned'}}},
# 'Vlan100': {'vlan_id': {'100': {'ip_address': '201.0.12.1'}}}}}
# class etree_holder():
# def __init__(self):
# self.data = ET.fromstring('''
# <data>
# <native xmlns="http://cisco.com/ns/yang/ned/ios">
# <interface>
# <Vlan>
# <name>1</name>
# <ip>
# <no-address>
# <address>false</address>
# </no-address>
# </ip>
# <shutdown/>
# </Vlan>
# <Vlan>
# <name>100</name>
# <ip>
# <address>
# <primary>
# <address>201.0.12.1</address>
# <mask>255.255.255.0</mask>
# </primary>
# </address>
# </ip>
# <ipv6>
# <address>
# <prefix-list>
# <prefix>2001::12:30/128</prefix>
# </prefix-list>
# </address>
# </ipv6>
# </Vlan>
# </interface>
# </native>
# </data>
# ''')
# golden_output = {'get.return_value': etree_holder()}
# def test_golden(self):
# self.device = Mock(**self.golden_output)
# intf_obj = ShowIpInterfaceBriefPipeVlan(device=self.device)
# intf_obj.context = Context.yang.value
# parsed_output = intf_obj.parse()
# self.assertEqual(parsed_output,self.golden_parsed_output)
# empty_parsed_output = {'interface': {}}
# class empty_etree_holder():
# def __init__(self):
# self.data = ET.fromstring('''
# <data>
# <native xmlns="http://cisco.com/ns/yang/ned/ios">
# <interface>
# <Vlan>
# </Vlan>
# </interface>
# </native>
# </data>
# ''')
# empty_output = {'get.return_value': empty_etree_holder()}
# def test_empty(self):
# self.device1 = Mock(**self.empty_output)
# intf_obj = ShowIpInterfaceBriefPipeVlan(device=self.device1)
# intf_obj.context = Context.yang.value
# parsed_output = intf_obj.parse()
# self.assertEqual(parsed_output,self.empty_parsed_output)
#############################################################################
# unitest For Show Interfaces switchport
#############################################################################
class test_show_interfaces_switchport(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"GigabitEthernet1/0/4": {
"switchport_mode": "trunk",
"pruning_vlans": "2-1001",
'operational_mode': 'trunk',
"switchport_enable": True,
"trunk_vlans": "200-211",
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q"
},
"access_vlan": "1",
"unknown_unicast_blocked": False,
"native_vlan_tagging": True,
"unknown_multicast_blocked": False,
"protected": False,
"negotiation_of_trunk": True,
"capture_vlans": "all",
"encapsulation": {
"operational_encapsulation": "dot1q",
"native_vlan": "1",
"administrative_encapsulation": "dot1q"
}
},
"GigabitEthernet1/0/2": {
"pruning_vlans": "2-1001",
"switchport_enable": True,
"unknown_multicast_blocked": False,
"trunk_vlans": "100-110",
"port_channel": {
"port_channel_int": "Port-channel12",
"port_channel_member": True
},
"access_vlan": "1",
"operational_mode": "trunk",
"unknown_unicast_blocked": False,
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q",
"operational": "10 (VLAN0010) 100 (VLAN0100)",
"trunk_mappings": "10 (VLAN0010) 100 (VLAN0100)"
},
"encapsulation": {
"operational_encapsulation": "dot1q",
"native_vlan": "1",
"administrative_encapsulation": "dot1q"
},
"protected": False,
"native_vlan_tagging": True,
"negotiation_of_trunk": True,
"capture_vlans": "all",
"switchport_mode": "trunk"
},
"GigabitEthernet1/0/5": {
"switchport_mode": "static access",
"pruning_vlans": "2-1001",
"switchport_enable": True,
"trunk_vlans": "all",
'operational_mode': 'down',
"capture_mode": False,
"private_vlan": {
"native_vlan_tagging": True,
"encapsulation": "dot1q"
},
"access_vlan": "1",
"unknown_unicast_blocked": False,
"native_vlan_tagging": True,
"unknown_multicast_blocked": False,
"protected": False,
"negotiation_of_trunk": False,
"capture_vlans": "all",
"encapsulation": {
"native_vlan": "1",
"administrative_encapsulation": "dot1q"
}
},
"Port-channel12": {
"switchport_enable": True,
"private_vlan": {
"encapsulation": "dot1q",
"native_vlan_tagging": True
},
"native_vlan_tagging": False,
"negotiation_of_trunk": True,
"unknown_unicast_blocked": False,
"protected": False,
"encapsulation": {
"administrative_encapsulation": "dot1q",
"native_vlan": "0"
},
"switchport_mode": "trunk",
"unknown_multicast_blocked": False,
"trunk_vlans": "100-110",
"operational_mode": "down",
"pruning_vlans": "2-1001",
"port_channel": {
"port_channel_member": True,
"port_channel_member_intfs": [
"GigabitEthernet1/0/2"
]
}
}
}
golden_output = {'execute.return_value': '''
Name: Gi1/0/2
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk (member of bundle Po12)
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings:
10 (VLAN0010) 100 (VLAN0100)
Operational private-vlan:
10 (VLAN0010) 100 (VLAN0100)
Trunking VLANs Enabled: 100-110
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Gi1/0/4
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: trunk
Administrative Trunking Encapsulation: dot1q
Operational Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: 200-211
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Gi1/0/5
Switchport: Enabled
Administrative Mode: static access
Operational Mode: down
Administrative Trunking Encapsulation: dot1q
Negotiation of Trunking: Off
Access Mode VLAN: 1 (default)
Trunking Native Mode VLAN: 1 (default)
Administrative Native VLAN tagging: enabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: ALL
Pruning VLANs Enabled: 2-1001
Capture Mode Disabled
Capture VLANs Allowed: ALL
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
Name: Po12
Switchport: Enabled
Administrative Mode: trunk
Operational Mode: down
Administrative Trunking Encapsulation: dot1q
Negotiation of Trunking: On
Access Mode VLAN: unassigned
Trunking Native Mode VLAN: 0 (Inactive)
Administrative Native VLAN tagging: disabled
Voice VLAN: none
Administrative private-vlan host-association: none
Administrative private-vlan mapping: none
Administrative private-vlan trunk native VLAN: none
Administrative private-vlan trunk Native VLAN tagging: enabled
Administrative private-vlan trunk encapsulation: dot1q
Administrative private-vlan trunk normal VLANs: none
Administrative private-vlan trunk associations: none
Administrative private-vlan trunk mappings: none
Operational private-vlan: none
Trunking VLANs Enabled: 100-110
Pruning VLANs Enabled: 2-1001
Protected: false
Unknown unicast blocked: disabled
Unknown multicast blocked: disabled
Appliance trust: none
'''}
def test_golden(self):
self.device = Mock(**self.golden_output)
intf_obj = ShowInterfacesSwitchport(device=self.device)
parsed_output = intf_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
intf_obj = ShowInterfacesSwitchport(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = intf_obj.parse()
#############################################################################
# unitest For Show Interfaces
#############################################################################
class test_show_interfaces(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"Port-channel12": {
"flow_control": {
"send": False,
"receive": False
},
"type": "EtherChannel",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d23h",
"out_interface_resets": 2,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 2000,
"in_rate_pkts": 2
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 961622,
"in_multicast_pkts": 4286699522,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 72614643,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 944788,
"out_pkts": 39281,
"out_late_collision": 0,
"out_octets": 6235318,
"in_overrun": 0,
"out_babble": 0
},
"auto_negotiate": True,
"phys_address": "0057.d228.1a02",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"oper_status": "up",
"arp_type": "arpa",
"rxload": "1/255",
"duplex_mode": "full",
"link_type": "auto",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 2000,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 0,
"queue_strategy": "fifo"
},
"encapsulations": {
"encapsulation": "qinq virtual lan",
"first_dot1q": "10",
"second_dot1q": "20",
},
"last_input": "never",
"last_output": "1d22h",
"line_protocol": "up",
"mac_address": "0057.d228.1a02",
"connected": True,
"port_channel": {
"port_channel_member": True,
"port_channel_member_intfs": ['GigabitEthernet1/0/2'],
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"port_speed": "1000",
"enabled": True,
"mtu": 1500,
"delay": 10,
"reliability": "255/255"
},
"GigabitEthernet1/0/1": {
"flow_control": {
"send": False,
"receive": False
},
"type": "Gigabit Ethernet",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d02h",
"out_interface_resets": 2,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 30,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 12127,
"in_multicast_pkts": 4171,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 2297417,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 0,
"out_pkts": 12229,
"out_late_collision": 0,
"out_octets": 2321107,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "0057.d228.1a64",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"description": "desc",
"oper_status": "down",
"arp_type": "arpa",
"rxload": "1/255",
"duplex_mode": "auto",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"ipv4": {
"10.1.1.1/24": {
"prefix_length": "24",
"ip": "10.1.1.1"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_input": "never",
"last_output": "04:39:18",
"line_protocol": "down",
"mac_address": "0057.d228.1a64",
"connected": False,
"port_channel": {
"port_channel_member": False
},
"media_type": "10/100/1000BaseTX",
"bandwidth": 768,
"port_speed": "1000",
"enabled": False,
"arp_timeout": "04:00:00",
"mtu": 1500,
"delay": 3330,
"reliability": "255/255"
},
"GigabitEthernet3": {
"flow_control": {
"send": False,
"receive": False
},
"type": "CSR vNIC",
'auto_negotiate': True,
'duplex_mode': 'full',
'link_type': 'auto',
'media_type': 'RJ45',
'port_speed': '1000',
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "never",
"out_interface_resets": 1,
"in_mac_pause_frames": 0,
"out_collision": 0,
"in_crc_errors": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 6,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 480,
"out_unknown_protocl_drops": 0,
"out_no_carrier": 0,
"out_lost_carrier": 0,
"in_broadcast_pkts": 0,
"out_pkts": 28,
"out_late_collision": 0,
"out_octets": 7820,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "5254.0072.9b0c",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"ipv4": {
"200.2.1.1/24": {
"prefix_length": "24",
"ip": "200.2.1.1"
},
"unnumbered": {
"interface_ref": "Loopback0"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_output": "00:00:27",
"line_protocol": "up",
"mac_address": "5254.0072.9b0c",
"oper_status": "up",
"port_channel": {
"port_channel_member": False
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never"
},
"Loopback0": {
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 75,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 0,
"queue_strategy": "fifo"
},
"mtu": 1514,
"encapsulations": {
"encapsulation": "loopback"
},
"last_output": "never",
"type": "Loopback",
"line_protocol": "up",
"oper_status": "up",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_pkts": 0,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 0,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5760,
"in_overrun": 0,
"in_abort": 0
},
"reliability": "255/255",
"bandwidth": 8000000,
"port_channel": {
"port_channel_member": False
},
"enabled": True,
"ipv4": {
"200.2.1.1/24": {
"prefix_length": "24",
"ip": "200.2.1.1"
}
},
"rxload": "1/255",
"delay": 5000,
"last_input": "1d02h"
},
"Vlan100": {
"type": "Ethernet SVI",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_pkts": 50790,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 3657594,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5526,
"in_overrun": 0
},
"phys_address": "0057.d228.1a51",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"output_hang": "never",
"ipv4": {
"201.0.12.1/24": {
"prefix_length": "24",
"ip": "201.0.12.1"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_output": "1d03h",
"line_protocol": "up",
"mac_address": "0057.d228.1a51",
"oper_status": "up",
"port_channel": {
"port_channel_member": False
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never"
},
"GigabitEthernet1/0/2": {
"flow_control": {
"send": False,
"receive": False
},
"type": "Gigabit Ethernet",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d02h",
"out_interface_resets": 5,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 3000,
"in_rate_pkts": 5
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 545526,
"in_multicast_pkts": 535961,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 41210298,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 535961,
"out_pkts": 23376,
"out_late_collision": 0,
"out_octets": 3642296,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "0057.d228.1a02",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"oper_status": "up",
"arp_type": "arpa",
"media_type": "10/100/1000BaseTX",
"rxload": "1/255",
"duplex_mode": "full",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 2000,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_input": "never",
"last_output": "00:00:02",
"line_protocol": "up",
"mac_address": "0057.d228.1a02",
"connected": True,
"port_channel": {
"port_channel_member": True,
'port_channel_int': 'Port-channel12',
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"port_speed": "1000",
"enabled": True,
"mtu": 1500,
"delay": 10,
"reliability": "255/255"
},
"GigabitEthernet0/0/4": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"bandwidth": 1000000,
"counters": {
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 1,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
}
},
"delay": 10,
"enabled": False,
"encapsulations": {
"encapsulation": "arpa"
},
"flow_control": {
"receive": False, "send": False
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mac_address": "380e.4d6c.7006",
"phys_address": "380e.4d6c.7006",
"mtu": 1500,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-2T+6X1GE"
}
}
golden_output = {'execute.return_value': '''
GigabitEthernet1/0/1 is administratively down, line protocol is down (disabled)
Hardware is Gigabit Ethernet, address is 0057.d228.1a64 (bia 0057.d228.1a64)
Description: desc
Internet address is 10.1.1.1/24
MTU 1500 bytes, BW 768 Kbit/sec, DLY 3330 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Auto-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
input flow-control is off, output flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 04:39:18, output hang never
Last clearing of "show interface" counters 1d02h
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
30 second input rate 0 bits/sec, 0 packets/sec
30 second output rate 0 bits/sec, 0 packets/sec
12127 packets input, 2297417 bytes, 0 no buffer
Received 4173 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 4171 multicast, 0 pause input
0 input packets with dribble condition detected
12229 packets output, 2321107 bytes, 0 underruns
0 output errors, 0 collisions, 2 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet1/0/2 is up, line protocol is up (connected)
Hardware is Gigabit Ethernet, address is 0057.d228.1a02 (bia 0057.d228.1a02)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
input flow-control is off, output flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 00:00:02, output hang never
Last clearing of "show interface" counters 1d02h
Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 3000 bits/sec, 5 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
545526 packets input, 41210298 bytes, 0 no buffer
Received 535996 broadcasts (535961 multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 535961 multicast, 0 pause input
0 input packets with dribble condition detected
23376 packets output, 3642296 bytes, 0 underruns
0 output errors, 0 collisions, 5 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet3 is up, line protocol is up
Hardware is CSR vNIC, address is 5254.0072.9b0c (bia 5254.0072.9b0c)
Interface is unnumbered. Using address of Loopback0 (200.2.1.1)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full Duplex, 1000Mbps, link type is auto, media type is RJ45
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 00:00:27, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
6 packets input, 480 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
28 packets output, 7820 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
Loopback0 is up, line protocol is up
Hardware is Loopback
Internet address is 200.2.1.1/24
MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation LOOPBACK, loopback not set
Keepalive set (10 sec)
Last input 1d02h, output never, output hang never
Last clearing of "show interface" counters 1d04h
Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/0 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
0 packets input, 0 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
72 packets output, 5760 bytes, 0 underruns
0 output errors, 0 collisions, 0 interface resets
0 unknown protocol drops
0 output buffer failures, 0 output buffers swapped out
Vlan100 is up, line protocol is up
Hardware is Ethernet SVI, address is 0057.d228.1a51 (bia 0057.d228.1a51)
Internet address is 201.0.12.1/24
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive not supported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 1d03h, output hang never
Last clearing of "show interface" counters 1d04h
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
50790 packets input, 3657594 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
72 packets output, 5526 bytes, 0 underruns
0 output errors, 0 interface resets
0 unknown protocol drops
0 output buffer failures, 0 output buffers swapped out
Port-channel12 is up, line protocol is up (connected)
Hardware is EtherChannel, address is 0057.d228.1a02 (bia 0057.d228.1a02)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation QinQ Virtual LAN, outer ID 10, inner ID 20
Keepalive set (10 sec)
Full-duplex, 1000Mb/s, link type is auto, media type is
input flow-control is off, output flow-control is unsupported
Members in this channel: Gi1/0/2
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 1d22h, output hang never
Last clearing of "show interface" counters 1d23h
Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/0 (size/max)
5 minute input rate 2000 bits/sec, 2 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
961622 packets input, 72614643 bytes, 0 no buffer
Received 944818 broadcasts (944788 multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 4286699522 multicast, 0 pause input
0 input packets with dribble condition detected
39281 packets output, 6235318 bytes, 0 underruns
0 output errors, 0 collisions, 2 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet0/0/4 is administratively down, line protocol is down
Hardware is BUILT-IN-2T+6X1GE, address is 380e.4d6c.7006 (bia 380e.4d6c.7006)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive not supported
Full Duplex, 1000Mbps, link type is auto, media type is unknown media type
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output never, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
0 packets input, 0 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
0 packets output, 0 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
'''}
golden_interface_output = {'execute.return_value': '''
CE1#show interfaces GigabitEthernet1
GigabitEthernet1 is up, line protocol is up
Hardware is CSR vNIC, address is 5e00.0001.0000 (bia 5e00.0001.0000)
Internet address is 172.16.1.243/24
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full Duplex, 1000Mbps, link type is auto, media type is Virtual
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input 00:00:02, output 00:00:25, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 32000 bits/sec, 28 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
7658 packets input, 1125842 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
44 packets output, 4324 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
'''
}
golden_parsed_interface_output={
"GigabitEthernet1": {
"rxload": "1/255",
"phys_address": "5e00.0001.0000",
"flow_control": {
"send": False,
"receive": False
},
"arp_type": "arpa",
"type": "CSR vNIC",
"enabled": True,
"media_type": "Virtual",
"last_input": "00:00:02",
"link_type": "auto",
"last_output": "00:00:25",
"counters": {
"in_errors": 0,
"in_frame": 0,
"in_watchdog": 0,
"out_babble": 0,
"in_overrun": 0,
"out_collision": 0,
"out_buffer_failure": 0,
"out_no_carrier": 0,
"in_runts": 0,
"out_late_collision": 0,
"in_mac_pause_frames": 0,
"out_underruns": 0,
"out_pkts": 44,
"in_ignored": 0,
"in_pkts": 7658,
"out_buffers_swapped": 0,
"out_interface_resets": 1,
"rate": {
"out_rate": 0,
"load_interval": 300,
"in_rate_pkts": 28,
"out_rate_pkts": 0,
"in_rate": 32000
},
"out_mac_pause_frames": 0,
"in_broadcast_pkts": 0,
"in_no_buffer": 0,
"out_deferred": 0,
"in_crc_errors": 0,
"out_octets": 4324,
"out_lost_carrier": 0,
"in_octets": 1125842,
"out_unknown_protocl_drops": 0,
"last_clear": "never",
"in_throttles": 0,
"in_multicast_pkts": 0,
"out_errors": 0,
"in_giants": 0
},
"keepalive": 10,
"mtu": 1500,
"delay": 10,
"encapsulations": {
"encapsulation": "arpa"
},
"ipv4": {
"172.16.1.243/24": {
"ip": "172.16.1.243",
"prefix_length": "24"
}
},
"queues": {
"output_queue_size": 0,
"input_queue_size": 0,
"input_queue_flushes": 0,
"queue_strategy": "fifo",
"total_output_drop": 0,
"output_queue_max": 40,
"input_queue_drops": 0,
"input_queue_max": 375
},
"auto_negotiate": True,
"line_protocol": "up",
"oper_status": "up",
"duplex_mode": "full",
"bandwidth": 1000000,
"arp_timeout": "04:00:00",
"port_speed": "1000",
"port_channel": {
"port_channel_member": False
},
"output_hang": "never",
"txload": "1/255",
"mac_address": "5e00.0001.0000",
"reliability": "255/255"
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfaces(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfaces(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_show_interfaces(self):
self.device = Mock(**self.golden_interface_output)
interface_obj = ShowInterfaces(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1')
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_interface_output)
#############################################################################
# unitest For Show ip interface
#############################################################################
class test_show_ip_interface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"Vlan211": {
"sevurity_level": "default",
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": True,
"oper_status": "up",
"address_determined_by": "configuration file",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"201.11.14.1/24": {
"prefix_length": "24",
"ip": "201.11.14.1",
"secondary": False,
"broadcase_address": "255.255.255.255"
}
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"input_features": ["MCI Check"],
"directed_broadcast_forwarding": False,
"ip_flow_switching": False
},
"GigabitEthernet0/0": {
"sevurity_level": "default",
'address_determined_by': 'setup command',
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": True,
"oper_status": "up",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"vrf": "Mgmt-vrf",
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"10.1.8.134/24": {
"prefix_length": "24",
"ip": "10.1.8.134",
"secondary": False,
"broadcase_address": "255.255.255.255"
}
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"input_features": ["MCI Check"],
"directed_broadcast_forwarding": False,
"ip_flow_switching": False
},
"GigabitEthernet2": {
"enabled": False,
"oper_status": "down"
},
"GigabitEthernet1/0/1": {
"sevurity_level": "default",
'address_determined_by': 'setup command',
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"enabled": False,
"oper_status": "down",
"router_discovery": False,
"ip_multicast_fast_switching": False,
"split_horizon": True,
"bgp_policy_mapping": False,
"ip_output_packet_accounting": False,
"mtu": 1500,
"policy_routing": False,
"local_proxy_arp": False,
"proxy_arp": True,
"network_address_translation": False,
"ip_cef_switching_turbo_vector": True,
"icmp": {
"redirects": "always sent",
"mask_replies": "never sent",
"unreachables": "always sent",
},
"ipv4": {
"10.1.1.1/24": {
"prefix_length": "24",
"ip": "10.1.1.1",
"secondary": False,
"broadcase_address": "255.255.255.255"
},
"10.2.2.2/24": {
"prefix_length": "24",
"ip": "10.2.2.2",
"secondary": True
},
},
"ip_access_violation_accounting": False,
"ip_cef_switching": True,
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
},
},
'wccp': {
'redirect_outbound': False,
'redirect_inbound': False,
'redirect_exclude': False,
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"directed_broadcast_forwarding": False,
"ip_flow_switching": False,
"input_features": ["MCI Check", "QoS Classification", "QoS Marking"],
}
}
golden_output = {'execute.return_value': '''
Vlan211 is up, line protocol is up
Internet address is 201.11.14.1/24
Broadcast address is 255.255.255.255
Address determined by configuration file
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet0/0 is up, line protocol is up
Internet address is 10.1.8.134/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
VPN Routing/Forwarding "Mgmt-vrf"
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet1/0/1 is administratively down, line protocol is down
Internet address is 10.1.1.1/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Secondary address 10.2.2.2/24
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: QoS Classification, QoS Marking, MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
GigabitEthernet2 is administratively down, line protocol is down
Internet protocol processing disabled
'''}
golden_interface_output = {'execute.return_value':'''
CE1#show ip interface GigabitEthernet1
GigabitEthernet1 is up, line protocol is up
Internet address is 172.16.1.243/24
Broadcast address is 255.255.255.255
Address determined by DHCP
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
'''
}
golden_parsed_interface_output = {
"GigabitEthernet1": {
"ip_multicast_fast_switching": True,
"oper_status": "up",
"ip_output_packet_accounting": False,
"address_determined_by": "DHCP",
"rtp_ip_header_compression": False,
"ip_multicast_distributed_fast_switching": False,
"wccp": {
"redirect_exclude": False,
"redirect_outbound": False,
"redirect_inbound": False
},
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
}
},
"router_discovery": False,
"tcp_ip_header_compression": False,
"probe_proxy_name_replies": False,
"local_proxy_arp": False,
"policy_routing": False,
"mtu": 1500,
"icmp": {
"mask_replies": "never sent",
"unreachables": "always sent",
"redirects": "always sent"
},
"enabled": True,
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"ip_cef_switching": True,
"ip_fast_switching": True,
"sevurity_level": "default",
"directed_broadcast_forwarding": False,
"proxy_arp": True,
"ip_null_turbo_vector": True,
"network_address_translation": False,
"input_features": [
"MCI Check"
],
"bgp_policy_mapping": False,
"split_horizon": True,
"ip_access_violation_accounting": False,
"ip_cef_switching_turbo_vector": True,
"ipv4": {
"172.16.1.243/24": {
"ip": "172.16.1.243",
"prefix_length": "24",
"broadcase_address": "255.255.255.255",
"secondary": False
}
},
"ip_flow_switching": False
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_interface_golden(self):
self.device = Mock(**self.golden_interface_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1')
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_interface_output)
#############################################################################
# unitest For show ipv6 interface
#############################################################################
class test_show_ipv6_interface(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"GigabitEthernet1/0/1": {
"joined_group_addresses": [
"FF02::1"
],
"ipv6": {
"2001:DB8:2:2::2/64": {
"ip": "2001:DB8:2:2::2",
"prefix_length": "64",
"status": "tentative"
},
"2000::1/126": {
"ip": "2000::1",
"prefix_length": "126",
"status": "tentative"
},
"2001:DB8:1:1::1/64": {
"ip": "2001:DB8:1:1::1",
"prefix_length": "64",
"status": "tentative"
},
"2001:DB8:4:4:257:D2FF:FE28:1A64/64": {
"ip": "2001:DB8:4:4:257:D2FF:FE28:1A64",
"prefix_length": "64",
"status": "tentative",
"eui_64": True
},
"2001:DB8:3:3::3/64": {
"ip": "2001:DB8:3:3::3",
"prefix_length": "64",
"status": "tentative",
"anycast": True
},
"FE80::257:D2FF:FE28:1A64": {
"ip": "FE80::257:D2FF:FE28:1A64",
"status": "tentative",
"origin": "link_layer",
},
"enabled": True,
"nd": {
"dad_attempts": 1,
"ns_retransmit_interval": 1000,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000
},
"icmp": {
"error_messages_limited": 100,
"redirects": True,
"unreachables": "sent"
},
},
"oper_status": "down",
"enabled": False,
"mtu": 1500
},
"Vlan211": {
"joined_group_addresses": [
"FF02::1",
"FF02::1:FF14:1",
"FF02::1:FF28:1A71"
],
"ipv6": {
"2001:10::14:1/112": {
"ip": "2001:10::14:1",
"prefix_length": "112",
"status": "valid",
'autoconf': {
'preferred_lifetime': 604711,
'valid_lifetime': 2591911,
},
},
"FE80::257:D2FF:FE28:1A71": {
"ip": "FE80::257:D2FF:FE28:1A71",
"status": "valid",
"origin": "link_layer",
},
"enabled": True,
"nd": {
"dad_attempts": 1,
"ns_retransmit_interval": 1000,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000
},
"icmp": {
"error_messages_limited": 100,
"redirects": True,
"unreachables": "sent"
},
},
"oper_status": "up",
"enabled": True,
"autoconf": True,
"mtu": 1500
},
"GigabitEthernet3": {
"enabled": True,
"joined_group_addresses": [
"FF02::1",
"FF02::1:FF1E:4F2",
"FF02::2"
],
"ipv6": {
"enabled": False,
"FE80::5054:FF:FE1E:4F2": {
"ip": "FE80::5054:FF:FE1E:4F2",
"status": "valid",
"origin": "link_layer",
},
"unnumbered": {
"interface_ref": "Loopback0",
},
"nd": {
"dad_attempts": 1,
"reachable_time": 30000,
"using_time": 30000,
"dad_enabled": True
},
"icmp": {
"unreachables": "sent",
"redirects": True,
"error_messages_limited": 100
},
"nd": {
"dad_attempts": 1,
"dad_enabled": True,
"reachable_time": 30000,
"using_time": 30000,
"advertised_reachable_time": 0,
"advertised_retransmit_interval": 0,
"router_advertisements_interval": 200,
"router_advertisements_live": 1800,
"advertised_default_router_preference": 'Medium',
"advertised_reachable_time_unspecified": True,
"advertised_retransmit_interval_unspecified": True,
},
},
"oper_status": "up",
"mtu": 1500,
"addresses_config_method": 'stateless autoconfig',
}
}
golden_output = {'execute.return_value': '''
Vlan211 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::257:D2FF:FE28:1A71
No Virtual link-local address(es):
Stateless address autoconfig enabled
Global unicast address(es):
2001:10::14:1, subnet is 2001:10::14:0/112
valid lifetime 2591911 preferred lifetime 604711
Joined group address(es):
FF02::1
FF02::1:FF14:1
FF02::1:FF28:1A71
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet1/0/1 is administratively down, line protocol is down
IPv6 is tentative, link-local address is FE80::257:D2FF:FE28:1A64 [TEN]
No Virtual link-local address(es):
Description: desc
Global unicast address(es):
2000::1, subnet is 2000::/126 [TEN]
2001:DB8:1:1::1, subnet is 2001:DB8:1:1::/64 [TEN]
2001:DB8:2:2::2, subnet is 2001:DB8:2:2::/64 [TEN]
2001:DB8:3:3::3, subnet is 2001:DB8:3:3::/64 [ANY/TEN]
2001:DB8:4:4:257:D2FF:FE28:1A64, subnet is 2001:DB8:4:4::/64 [EUI/TEN]
Joined group address(es):
FF02::1
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet3 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::5054:FF:FE1E:4F2
No Virtual link-local address(es):
Interface is unnumbered. Using address of Loopback0
No global unicast address is configured
Joined group address(es):
FF02::1
FF02::2
FF02::1:FF1E:4F2
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements are sent every 200 seconds
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
Hosts use stateless autoconfig for addresses.
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpv6Interface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpv6Interface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces trunk
#############################################################################
class test_show_interfaces_trunk(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"interface": {
"GigabitEthernet1/0/4": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/4",
"encapsulation": "802.1q"
},
"GigabitEthernet1/0/23": {
"vlans_allowed_active_in_mgmt_domain": '200-211',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "GigabitEthernet1/0/23",
"encapsulation": "802.1q"
},
"Port-channel12": {
"vlans_allowed_active_in_mgmt_domain": '100-110',
"vlans_allowed_on_trunk": '100-110',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '100-110',
"name": "Port-channel12",
"encapsulation": "802.1q"
},
"Port-channel14": {
"vlans_allowed_active_in_mgmt_domain": '200-211, 300-302',
"vlans_allowed_on_trunk": '200-211',
"mode": "on",
"native_vlan": "1",
"status": "trunking",
"vlans_in_stp_forwarding_not_pruned": '200-211',
"name": "Port-channel14",
"encapsulation": "802.1q"
}
}
}
golden_output = {'execute.return_value': '''
Port Mode Encapsulation Status Native vlan
Gi1/0/4 on 802.1q trunking 1
Gi1/0/23 on 802.1q trunking 1
Po12 on 802.1q trunking 1
Po14 on 802.1q trunking 1
Port Vlans allowed on trunk
Gi1/0/4 200-211
Gi1/0/23 200-211
Po12 100-110
Po14 200-211
Port Vlans allowed and active in management domain
Gi1/0/4 200-211
Gi1/0/23 200-211
Po12 100-110
Po14 200-211, 300-302
Port Vlans in spanning tree forwarding state and not pruned
Gi1/0/4 200-211
Gi1/0/23 200-211
Po12 100-110
Port Vlans in spanning tree forwarding state and not pruned
Po14 200-211
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfacesTrunk(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfacesTrunk(device=self.device)
parsed_output = interface_obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces <WORD> counters
#############################################################################
class test_show_interfaces_counters(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"interface": {
"GigabitEthernet1/0/1": {
"out": {
"mcast_pkts": 188396,
"bcast_pkts": 0,
"ucast_pkts": 124435064,
"name": "GigabitEthernet1/0/1",
"octets": 24884341205
},
"in": {
"mcast_pkts": 214513,
"bcast_pkts": 0,
"ucast_pkts": 15716712,
"name": "GigabitEthernet1/0/1",
"octets": 3161931167
}
}
}
}
golden_output = {'execute.return_value': '''
Port InOctets InUcastPkts InMcastPkts InBcastPkts
Gi1/0/1 3161931167 15716712 214513 0
Port OutOctets OutUcastPkts OutMcastPkts OutBcastPkts
Gi1/0/1 24884341205 124435064 188396 0
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfacesCounters(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse(interface='Gi1/0/1')
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfacesCounters(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1/0/1')
self.assertEqual(parsed_output,self.golden_parsed_output)
#############################################################################
# unitest For show interfaces <interface> accounting
#############################################################################
class test_show_interfaces_accounting(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = \
{
"GigabitEthernet1": {
"accounting": {
"arp": {
"chars_in": 4590030,
"chars_out": 120,
"pkts_in": 109280,
"pkts_out": 2
},
"ip": {
"chars_in": 2173570,
"chars_out": 2167858,
"pkts_in": 22150,
"pkts_out": 22121
},
"ipv6": {
"chars_in": 1944,
"chars_out": 0,
"pkts_in": 24,
"pkts_out": 0
},
"other": {
"chars_in": 5306164,
"chars_out": 120,
"pkts_in": 112674,
"pkts_out": 2
}
}
},
"GigabitEthernet2": {
"accounting": {
"arp": {
"chars_in": 5460,
"chars_out": 5520,
"pkts_in": 91,
"pkts_out": 92
},
"ip": {
"chars_in": 968690,
"chars_out": 1148402,
"pkts_in": 11745,
"pkts_out": 10821
},
"ipv6": {
"chars_in": 70,
"chars_out": 0,
"pkts_in": 1,
"pkts_out": 0
},
"other": {
"chars_in": 741524,
"chars_out": 5520,
"pkts_in": 3483,
"pkts_out": 92
}
}
},
"GigabitEthernet3": {
"accounting": {
"arp": {
"chars_in": 5460,
"chars_out": 5520,
"pkts_in": 91,
"pkts_out": 92
},
"ip": {
"chars_in": 1190691,
"chars_out": 1376253,
"pkts_in": 15271,
"pkts_out": 14382
},
"ipv6": {
"chars_in": 70,
"chars_out": 0,
"pkts_in": 1,
"pkts_out": 0
},
"other": {
"chars_in": 741524,
"chars_out": 5520,
"pkts_in": 3483,
"pkts_out": 92
}
}
}
}
golden_output = {'execute.return_value': '''
show interface accounting
GigabitEthernet1
Protocol Pkts In Chars In Pkts Out Chars Out
Other 112674 5306164 2 120
IP 22150 2173570 22121 2167858
ARP 109280 4590030 2 120
IPv6 24 1944 0 0
GigabitEthernet2
Protocol Pkts In Chars In Pkts Out Chars Out
Other 3483 741524 92 5520
IP 11745 968690 10821 1148402
ARP 91 5460 92 5520
IPv6 1 70 0 0
GigabitEthernet3
Protocol Pkts In Chars In Pkts Out Chars Out
Other 3483 741524 92 5520
IP 15271 1190691 14382 1376253
ARP 91 5460 92 5520
IPv6 1 70 0 0
Loopback0
Protocol Pkts In Chars In Pkts Out Chars Out
No traffic sent or received on this interface.
Loopback1
Protocol Pkts In Chars In Pkts Out Chars Out
No traffic sent or received on this interface.
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowInterfacesAccounting(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
obj = ShowInterfacesAccounting(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output,self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
| 39.115335
| 107
| 0.489774
|
import sys
import unittest
from unittest.mock import Mock
from unittest.mock import patch
from textwrap import dedent
ats_mock = Mock()
with patch.dict('sys.modules',
{'ats' : ats_mock}, autospec=True):
import genie.parsergen
from genie.parsergen import oper_fill
from genie.parsergen import oper_check
from genie.parsergen import oper_fill_tabular
from genie.parsergen.examples.parsergen.pyAts import parsergen_demo_mkpg
import xml.etree.ElementTree as ET
from ats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError
from genie.libs.parser.iosxe.show_interface import ShowInterfacesSwitchport,\
ShowIpInterfaceBriefPipeVlan,\
ShowInterfaces, ShowIpInterface,\
ShowIpv6Interface, \
ShowInterfacesTrunk, \
ShowInterfacesCounters, \
ShowInterfacesAccounting, \
ShowIpInterfaceBriefPipeIp
class test_show_interface_parsergen(unittest.TestCase):
def test_tabular_parser(self):
self.showCommandOutput='''
R1#show ip interface brief
Interface IP-Address OK? Method Status Protocol
GigabitEthernet0/0 10.1.10.20 YES NVRAM up up
GigabitEthernet1/0/1 unassigned YES unset up up
GigabitEthernet1/0/10 unassigned YES unset down down
'''
self.outputDict = {'GigabitEthernet0/0': {'IP-Address': '10.1.10.20',
'Interface': 'GigabitEthernet0/0',
'Method': 'NVRAM',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/1': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/1',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'up',
'Status': 'up'},
'GigabitEthernet1/0/10': {'IP-Address': 'unassigned',
'Interface': 'GigabitEthernet1/0/10',
'Method': 'unset',
'OK?': 'YES',
'Protocol': 'down',
'Status': 'down'}}
device_kwargs = {'is_connected.return_value':True,
'execute.return_value':dedent(self.showCommandOutput)}
device1 = Mock(**device_kwargs)
device1.name='router3'
result = genie.parsergen.oper_fill_tabular(device=device1,
show_command="show ip interface brief",
refresh_cache=True,
header_fields=
[ "Interface",
"IP-Address",
"OK\?",
"Method",
"Status",
"Protocol" ],
label_fields=
[ "Interface",
"IP-Address",
"OK?",
"Method",
"Status",
"Protocol" ],
index=[0])
self.assertEqual(result.entries, self.outputDict)
args, kwargs = device1.execute.call_args
self.assertTrue('show ip interface brief' in args,
msg='The expected command was not sent to the router')
arse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_empty(self):
self.device1 = Mock(**self.empty_output)
intf_obj = ShowInterfacesSwitchport(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = intf_obj.parse()
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_input": "never",
"last_output": "04:39:18",
"line_protocol": "down",
"mac_address": "0057.d228.1a64",
"connected": False,
"port_channel": {
"port_channel_member": False
},
"media_type": "10/100/1000BaseTX",
"bandwidth": 768,
"port_speed": "1000",
"enabled": False,
"arp_timeout": "04:00:00",
"mtu": 1500,
"delay": 3330,
"reliability": "255/255"
},
"GigabitEthernet3": {
"flow_control": {
"send": False,
"receive": False
},
"type": "CSR vNIC",
'auto_negotiate': True,
'duplex_mode': 'full',
'link_type': 'auto',
'media_type': 'RJ45',
'port_speed': '1000',
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "never",
"out_interface_resets": 1,
"in_mac_pause_frames": 0,
"out_collision": 0,
"in_crc_errors": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 6,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 480,
"out_unknown_protocl_drops": 0,
"out_no_carrier": 0,
"out_lost_carrier": 0,
"in_broadcast_pkts": 0,
"out_pkts": 28,
"out_late_collision": 0,
"out_octets": 7820,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "5254.0072.9b0c",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"ipv4": {
"200.2.1.1/24": {
"prefix_length": "24",
"ip": "200.2.1.1"
},
"unnumbered": {
"interface_ref": "Loopback0"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_output": "00:00:27",
"line_protocol": "up",
"mac_address": "5254.0072.9b0c",
"oper_status": "up",
"port_channel": {
"port_channel_member": False
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never"
},
"Loopback0": {
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 75,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 0,
"queue_strategy": "fifo"
},
"mtu": 1514,
"encapsulations": {
"encapsulation": "loopback"
},
"last_output": "never",
"type": "Loopback",
"line_protocol": "up",
"oper_status": "up",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_pkts": 0,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 0,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5760,
"in_overrun": 0,
"in_abort": 0
},
"reliability": "255/255",
"bandwidth": 8000000,
"port_channel": {
"port_channel_member": False
},
"enabled": True,
"ipv4": {
"200.2.1.1/24": {
"prefix_length": "24",
"ip": "200.2.1.1"
}
},
"rxload": "1/255",
"delay": 5000,
"last_input": "1d02h"
},
"Vlan100": {
"type": "Ethernet SVI",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d04h",
"out_interface_resets": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 0,
"in_rate_pkts": 0
},
"in_pkts": 50790,
"in_multicast_pkts": 0,
"in_runts": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 3657594,
"in_crc_errors": 0,
"out_unknown_protocl_drops": 0,
"in_broadcast_pkts": 0,
"out_pkts": 72,
"out_octets": 5526,
"in_overrun": 0
},
"phys_address": "0057.d228.1a51",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 375,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"txload": "1/255",
"reliability": "255/255",
"arp_type": "arpa",
"rxload": "1/255",
"output_hang": "never",
"ipv4": {
"201.0.12.1/24": {
"prefix_length": "24",
"ip": "201.0.12.1"
}
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_output": "1d03h",
"line_protocol": "up",
"mac_address": "0057.d228.1a51",
"oper_status": "up",
"port_channel": {
"port_channel_member": False
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"enabled": True,
"mtu": 1500,
"delay": 10,
"last_input": "never"
},
"GigabitEthernet1/0/2": {
"flow_control": {
"send": False,
"receive": False
},
"type": "Gigabit Ethernet",
"counters": {
"out_buffer_failure": 0,
"out_underruns": 0,
"in_giants": 0,
"in_throttles": 0,
"in_frame": 0,
"in_ignored": 0,
"last_clear": "1d02h",
"out_interface_resets": 5,
"in_mac_pause_frames": 0,
"out_collision": 0,
"rate": {
"out_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"in_rate": 3000,
"in_rate_pkts": 5
},
"in_watchdog": 0,
"out_deferred": 0,
"out_mac_pause_frames": 0,
"in_pkts": 545526,
"in_multicast_pkts": 535961,
"in_runts": 0,
"out_unknown_protocl_drops": 0,
"in_no_buffer": 0,
"out_buffers_swapped": 0,
"out_lost_carrier": 0,
"out_errors": 0,
"in_errors": 0,
"in_octets": 41210298,
"in_crc_errors": 0,
"out_no_carrier": 0,
"in_with_dribble": 0,
"in_broadcast_pkts": 535961,
"out_pkts": 23376,
"out_late_collision": 0,
"out_octets": 3642296,
"in_overrun": 0,
"out_babble": 0
},
"phys_address": "0057.d228.1a02",
"keepalive": 10,
"output_hang": "never",
"txload": "1/255",
"oper_status": "up",
"arp_type": "arpa",
"media_type": "10/100/1000BaseTX",
"rxload": "1/255",
"duplex_mode": "full",
"queues": {
"input_queue_size": 0,
"total_output_drop": 0,
"input_queue_drops": 0,
"input_queue_max": 2000,
"output_queue_size": 0,
"input_queue_flushes": 0,
"output_queue_max": 40,
"queue_strategy": "fifo"
},
"encapsulations": {
"encapsulation": "arpa"
},
"last_input": "never",
"last_output": "00:00:02",
"line_protocol": "up",
"mac_address": "0057.d228.1a02",
"connected": True,
"port_channel": {
"port_channel_member": True,
'port_channel_int': 'Port-channel12',
},
"arp_timeout": "04:00:00",
"bandwidth": 1000000,
"port_speed": "1000",
"enabled": True,
"mtu": 1500,
"delay": 10,
"reliability": "255/255"
},
"GigabitEthernet0/0/4": {
"arp_timeout": "04:00:00",
"arp_type": "arpa",
"bandwidth": 1000000,
"counters": {
"in_broadcast_pkts": 0,
"in_crc_errors": 0,
"in_errors": 0,
"in_frame": 0,
"in_giants": 0,
"in_ignored": 0,
"in_mac_pause_frames": 0,
"in_multicast_pkts": 0,
"in_no_buffer": 0,
"in_octets": 0,
"in_overrun": 0,
"in_pkts": 0,
"in_runts": 0,
"in_throttles": 0,
"in_watchdog": 0,
"last_clear": "never",
"out_babble": 0,
"out_collision": 0,
"out_deferred": 0,
"out_errors": 0,
"out_interface_resets": 1,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_mac_pause_frames": 0,
"out_no_carrier": 0,
"out_octets": 0,
"out_pkts": 0,
"out_underruns": 0,
"out_unknown_protocl_drops": 0,
"rate": {
"in_rate": 0,
"in_rate_pkts": 0,
"load_interval": 300,
"out_rate": 0,
"out_rate_pkts": 0
}
},
"delay": 10,
"enabled": False,
"encapsulations": {
"encapsulation": "arpa"
},
"flow_control": {
"receive": False, "send": False
},
"last_input": "never",
"last_output": "never",
"line_protocol": "down",
"mac_address": "380e.4d6c.7006",
"phys_address": "380e.4d6c.7006",
"mtu": 1500,
"oper_status": "down",
"output_hang": "never",
"port_channel": {
"port_channel_member": False
},
"queues": {
"input_queue_drops": 0,
"input_queue_flushes": 0,
"input_queue_max": 375,
"input_queue_size": 0,
"output_queue_max": 40,
"output_queue_size": 0,
"queue_strategy": "fifo",
"total_output_drop": 0
},
"reliability": "255/255",
"rxload": "1/255",
"txload": "1/255",
"type": "BUILT-IN-2T+6X1GE"
}
}
golden_output = {'execute.return_value': '''
GigabitEthernet1/0/1 is administratively down, line protocol is down (disabled)
Hardware is Gigabit Ethernet, address is 0057.d228.1a64 (bia 0057.d228.1a64)
Description: desc
Internet address is 10.1.1.1/24
MTU 1500 bytes, BW 768 Kbit/sec, DLY 3330 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Auto-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
input flow-control is off, output flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 04:39:18, output hang never
Last clearing of "show interface" counters 1d02h
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
30 second input rate 0 bits/sec, 0 packets/sec
30 second output rate 0 bits/sec, 0 packets/sec
12127 packets input, 2297417 bytes, 0 no buffer
Received 4173 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 4171 multicast, 0 pause input
0 input packets with dribble condition detected
12229 packets output, 2321107 bytes, 0 underruns
0 output errors, 0 collisions, 2 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet1/0/2 is up, line protocol is up (connected)
Hardware is Gigabit Ethernet, address is 0057.d228.1a02 (bia 0057.d228.1a02)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full-duplex, 1000Mb/s, media type is 10/100/1000BaseTX
input flow-control is off, output flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 00:00:02, output hang never
Last clearing of "show interface" counters 1d02h
Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 3000 bits/sec, 5 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
545526 packets input, 41210298 bytes, 0 no buffer
Received 535996 broadcasts (535961 multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 535961 multicast, 0 pause input
0 input packets with dribble condition detected
23376 packets output, 3642296 bytes, 0 underruns
0 output errors, 0 collisions, 5 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet3 is up, line protocol is up
Hardware is CSR vNIC, address is 5254.0072.9b0c (bia 5254.0072.9b0c)
Interface is unnumbered. Using address of Loopback0 (200.2.1.1)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full Duplex, 1000Mbps, link type is auto, media type is RJ45
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 00:00:27, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
6 packets input, 480 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
28 packets output, 7820 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
Loopback0 is up, line protocol is up
Hardware is Loopback
Internet address is 200.2.1.1/24
MTU 1514 bytes, BW 8000000 Kbit/sec, DLY 5000 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation LOOPBACK, loopback not set
Keepalive set (10 sec)
Last input 1d02h, output never, output hang never
Last clearing of "show interface" counters 1d04h
Input queue: 0/75/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/0 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
0 packets input, 0 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored, 0 abort
72 packets output, 5760 bytes, 0 underruns
0 output errors, 0 collisions, 0 interface resets
0 unknown protocol drops
0 output buffer failures, 0 output buffers swapped out
Vlan100 is up, line protocol is up
Hardware is Ethernet SVI, address is 0057.d228.1a51 (bia 0057.d228.1a51)
Internet address is 201.0.12.1/24
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive not supported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 1d03h, output hang never
Last clearing of "show interface" counters 1d04h
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
50790 packets input, 3657594 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
72 packets output, 5526 bytes, 0 underruns
0 output errors, 0 interface resets
0 unknown protocol drops
0 output buffer failures, 0 output buffers swapped out
Port-channel12 is up, line protocol is up (connected)
Hardware is EtherChannel, address is 0057.d228.1a02 (bia 0057.d228.1a02)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation QinQ Virtual LAN, outer ID 10, inner ID 20
Keepalive set (10 sec)
Full-duplex, 1000Mb/s, link type is auto, media type is
input flow-control is off, output flow-control is unsupported
Members in this channel: Gi1/0/2
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output 1d22h, output hang never
Last clearing of "show interface" counters 1d23h
Input queue: 0/2000/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/0 (size/max)
5 minute input rate 2000 bits/sec, 2 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
961622 packets input, 72614643 bytes, 0 no buffer
Received 944818 broadcasts (944788 multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 4286699522 multicast, 0 pause input
0 input packets with dribble condition detected
39281 packets output, 6235318 bytes, 0 underruns
0 output errors, 0 collisions, 2 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
GigabitEthernet0/0/4 is administratively down, line protocol is down
Hardware is BUILT-IN-2T+6X1GE, address is 380e.4d6c.7006 (bia 380e.4d6c.7006)
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive not supported
Full Duplex, 1000Mbps, link type is auto, media type is unknown media type
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input never, output never, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 0 bits/sec, 0 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
0 packets input, 0 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
0 packets output, 0 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
'''}
golden_interface_output = {'execute.return_value': '''
CE1#show interfaces GigabitEthernet1
GigabitEthernet1 is up, line protocol is up
Hardware is CSR vNIC, address is 5e00.0001.0000 (bia 5e00.0001.0000)
Internet address is 172.16.1.243/24
MTU 1500 bytes, BW 1000000 Kbit/sec, DLY 10 usec,
reliability 255/255, txload 1/255, rxload 1/255
Encapsulation ARPA, loopback not set
Keepalive set (10 sec)
Full Duplex, 1000Mbps, link type is auto, media type is Virtual
output flow-control is unsupported, input flow-control is unsupported
ARP type: ARPA, ARP Timeout 04:00:00
Last input 00:00:02, output 00:00:25, output hang never
Last clearing of "show interface" counters never
Input queue: 0/375/0/0 (size/max/drops/flushes); Total output drops: 0
Queueing strategy: fifo
Output queue: 0/40 (size/max)
5 minute input rate 32000 bits/sec, 28 packets/sec
5 minute output rate 0 bits/sec, 0 packets/sec
7658 packets input, 1125842 bytes, 0 no buffer
Received 0 broadcasts (0 IP multicasts)
0 runts, 0 giants, 0 throttles
0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored
0 watchdog, 0 multicast, 0 pause input
44 packets output, 4324 bytes, 0 underruns
0 output errors, 0 collisions, 1 interface resets
0 unknown protocol drops
0 babbles, 0 late collision, 0 deferred
0 lost carrier, 0 no carrier, 0 pause output
0 output buffer failures, 0 output buffers swapped out
'''
}
golden_parsed_interface_output={
"GigabitEthernet1": {
"rxload": "1/255",
"phys_address": "5e00.0001.0000",
"flow_control": {
"send": False,
"receive": False
},
"arp_type": "arpa",
"type": "CSR vNIC",
"enabled": True,
"media_type": "Virtual",
"last_input": "00:00:02",
"link_type": "auto",
"last_output": "00:00:25",
"counters": {
"in_errors": 0,
"in_frame": 0,
"in_watchdog": 0,
"out_babble": 0,
"in_overrun": 0,
"out_collision": 0,
"out_buffer_failure": 0,
"out_no_carrier": 0,
"in_runts": 0,
"out_late_collision": 0,
"in_mac_pause_frames": 0,
"out_underruns": 0,
"out_pkts": 44,
"in_ignored": 0,
"in_pkts": 7658,
"out_buffers_swapped": 0,
"out_interface_resets": 1,
"rate": {
"out_rate": 0,
"load_interval": 300,
"in_rate_pkts": 28,
"out_rate_pkts": 0,
"in_rate": 32000
},
"out_mac_pause_frames": 0,
"in_broadcast_pkts": 0,
"in_no_buffer": 0,
"out_deferred": 0,
"in_crc_errors": 0,
"out_octets": 4324,
"out_lost_carrier": 0,
"in_octets": 1125842,
"out_unknown_protocl_drops": 0,
"last_clear": "never",
"in_throttles": 0,
"in_multicast_pkts": 0,
"out_errors": 0,
"in_giants": 0
},
"keepalive": 10,
"mtu": 1500,
"delay": 10,
"encapsulations": {
"encapsulation": "arpa"
},
"ipv4": {
"172.16.1.243/24": {
"ip": "172.16.1.243",
"prefix_length": "24"
}
},
"queues": {
"output_queue_size": 0,
"input_queue_size": 0,
"input_queue_flushes": 0,
"queue_strategy": "fifo",
"total_output_drop": 0,
"output_queue_max": 40,
"input_queue_drops": 0,
"input_queue_max": 375
},
"auto_negotiate": True,
"line_protocol": "up",
"oper_status": "up",
"duplex_mode": "full",
"bandwidth": 1000000,
"arp_timeout": "04:00:00",
"port_speed": "1000",
"port_channel": {
"port_channel_member": False
},
"output_hang": "never",
"txload": "1/255",
"mac_address": "5e00.0001.0000",
"reliability": "255/255"
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowInterfaces(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowInterfaces(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_show_interfaces(self):
self.device = Mock(**self.golden_interface_output)
interface_obj = ShowInterfaces(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1')
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_interface_output)
t_outbound': False,
'redirect_inbound': False,
'redirect_exclude': False,
},
"ip_null_turbo_vector": True,
"probe_proxy_name_replies": False,
"ip_fast_switching": True,
"ip_multicast_distributed_fast_switching": False,
"tcp_ip_header_compression": False,
"rtp_ip_header_compression": False,
"directed_broadcast_forwarding": False,
"ip_flow_switching": False,
"input_features": ["MCI Check", "QoS Classification", "QoS Marking"],
}
}
golden_output = {'execute.return_value': '''
Vlan211 is up, line protocol is up
Internet address is 201.11.14.1/24
Broadcast address is 255.255.255.255
Address determined by configuration file
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet0/0 is up, line protocol is up
Internet address is 10.1.8.134/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
VPN Routing/Forwarding "Mgmt-vrf"
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
GigabitEthernet1/0/1 is administratively down, line protocol is down
Internet address is 10.1.1.1/24
Broadcast address is 255.255.255.255
Address determined by setup command
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Secondary address 10.2.2.2/24
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is disabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: QoS Classification, QoS Marking, MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
GigabitEthernet2 is administratively down, line protocol is down
Internet protocol processing disabled
'''}
golden_interface_output = {'execute.return_value':'''
CE1#show ip interface GigabitEthernet1
GigabitEthernet1 is up, line protocol is up
Internet address is 172.16.1.243/24
Broadcast address is 255.255.255.255
Address determined by DHCP
MTU is 1500 bytes
Helper address is not set
Directed broadcast forwarding is disabled
Outgoing Common access list is not set
Outgoing access list is not set
Inbound Common access list is not set
Inbound access list is not set
Proxy ARP is enabled
Local Proxy ARP is disabled
Security level is default
Split horizon is enabled
ICMP redirects are always sent
ICMP unreachables are always sent
ICMP mask replies are never sent
IP fast switching is enabled
IP Flow switching is disabled
IP CEF switching is enabled
IP CEF switching turbo vector
IP Null turbo vector
Associated unicast routing topologies:
Topology "base", operation state is UP
IP multicast fast switching is enabled
IP multicast distributed fast switching is disabled
IP route-cache flags are Fast, CEF
Router Discovery is disabled
IP output packet accounting is disabled
IP access violation accounting is disabled
TCP/IP header compression is disabled
RTP/IP header compression is disabled
Probe proxy name replies are disabled
Policy routing is disabled
Network address translation is disabled
BGP Policy Mapping is disabled
Input features: MCI Check
IPv4 WCCP Redirect outbound is disabled
IPv4 WCCP Redirect inbound is disabled
IPv4 WCCP Redirect exclude is disabled
'''
}
golden_parsed_interface_output = {
"GigabitEthernet1": {
"ip_multicast_fast_switching": True,
"oper_status": "up",
"ip_output_packet_accounting": False,
"address_determined_by": "DHCP",
"rtp_ip_header_compression": False,
"ip_multicast_distributed_fast_switching": False,
"wccp": {
"redirect_exclude": False,
"redirect_outbound": False,
"redirect_inbound": False
},
"unicast_routing_topologies": {
"topology": {
"base": {
"status": "up"
}
}
},
"router_discovery": False,
"tcp_ip_header_compression": False,
"probe_proxy_name_replies": False,
"local_proxy_arp": False,
"policy_routing": False,
"mtu": 1500,
"icmp": {
"mask_replies": "never sent",
"unreachables": "always sent",
"redirects": "always sent"
},
"enabled": True,
"ip_route_cache_flags": [
"CEF",
"Fast"
],
"ip_cef_switching": True,
"ip_fast_switching": True,
"sevurity_level": "default",
"directed_broadcast_forwarding": False,
"proxy_arp": True,
"ip_null_turbo_vector": True,
"network_address_translation": False,
"input_features": [
"MCI Check"
],
"bgp_policy_mapping": False,
"split_horizon": True,
"ip_access_violation_accounting": False,
"ip_cef_switching_turbo_vector": True,
"ipv4": {
"172.16.1.243/24": {
"ip": "172.16.1.243",
"prefix_length": "24",
"broadcase_address": "255.255.255.255",
"secondary": False
}
},
"ip_flow_switching": False
}
}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpInterface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
def test_interface_golden(self):
self.device = Mock(**self.golden_interface_output)
interface_obj = ShowIpInterface(device=self.device)
parsed_output = interface_obj.parse(interface='GigabitEthernet1')
self.maxDiff = None
self.assertEqual(parsed_output, self.golden_parsed_interface_output)
02::1
FF02::1:FF14:1
FF02::1:FF28:1A71
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet1/0/1 is administratively down, line protocol is down
IPv6 is tentative, link-local address is FE80::257:D2FF:FE28:1A64 [TEN]
No Virtual link-local address(es):
Description: desc
Global unicast address(es):
2000::1, subnet is 2000::/126 [TEN]
2001:DB8:1:1::1, subnet is 2001:DB8:1:1::/64 [TEN]
2001:DB8:2:2::2, subnet is 2001:DB8:2:2::/64 [TEN]
2001:DB8:3:3::3, subnet is 2001:DB8:3:3::/64 [ANY/TEN]
2001:DB8:4:4:257:D2FF:FE28:1A64, subnet is 2001:DB8:4:4::/64 [EUI/TEN]
Joined group address(es):
FF02::1
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND NS retransmit interval is 1000 milliseconds
GigabitEthernet3 is up, line protocol is up
IPv6 is enabled, link-local address is FE80::5054:FF:FE1E:4F2
No Virtual link-local address(es):
Interface is unnumbered. Using address of Loopback0
No global unicast address is configured
Joined group address(es):
FF02::1
FF02::2
FF02::1:FF1E:4F2
MTU is 1500 bytes
ICMP error messages limited to one every 100 milliseconds
ICMP redirects are enabled
ICMP unreachables are sent
ND DAD is enabled, number of DAD attempts: 1
ND reachable time is 30000 milliseconds (using 30000)
ND advertised reachable time is 0 (unspecified)
ND advertised retransmit interval is 0 (unspecified)
ND router advertisements are sent every 200 seconds
ND router advertisements live for 1800 seconds
ND advertised default router preference is Medium
Hosts use stateless autoconfig for addresses.
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
interface_obj = ShowIpv6Interface(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = interface_obj.parse()
def test_golden(self):
self.device = Mock(**self.golden_output)
interface_obj = ShowIpv6Interface(device=self.device)
parsed_output = interface_obj.parse()
self.maxDiff = None
self.assertEqual(parsed_output,self.golden_parsed_output)
| true
| true
|
f704f29d917dfbd51e439f0dd5292f602da50c6f
| 8,655
|
py
|
Python
|
modelci/experimental/model/model_structure.py
|
FerdinandZhong/ML-Model-CI
|
90fa2de056dca05031f0787b96c520dc57dc664d
|
[
"Apache-2.0"
] | 170
|
2020-06-08T18:30:52.000Z
|
2022-03-28T12:08:11.000Z
|
modelci/experimental/model/model_structure.py
|
FerdinandZhong/ML-Model-CI
|
90fa2de056dca05031f0787b96c520dc57dc664d
|
[
"Apache-2.0"
] | 146
|
2020-06-14T18:56:27.000Z
|
2022-02-27T21:15:59.000Z
|
modelci/experimental/model/model_structure.py
|
FerdinandZhong/ML-Model-CI
|
90fa2de056dca05031f0787b96c520dc57dc664d
|
[
"Apache-2.0"
] | 36
|
2020-06-08T18:30:56.000Z
|
2022-03-07T18:10:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: Li Yuanming
Email: yli056@e.ntu.edu.sg
Date: 1/27/2021
ML model structure definitions.
"""
import abc
import inspect
from enum import Enum
from typing import Optional, Union, Tuple, Dict, OrderedDict
from pydantic import BaseModel, PositiveInt, conint, PositiveFloat, Field, validator
from typing_extensions import Literal
class Operation(Enum):
"""
Operation enum to the layer or connection. There are three kinds of operations: ``'A'`` for add the specific
layer / connection, ``'D'`` for delete the specific layer / connection, ``M`` for modify the layer /
connection, and ``E`` for no operation.
"""
ADD = 'A'
DELETE = 'D'
MODIFY = 'M'
EMPTY = 'E'
class LayerType(Enum):
"""
Enum of the supported layer type. This is to hint which class of layer the provided data is converted to.
"""
LINEAR = 'torch.nn.Linear'
CONV_1D = 'torch.nn.Conv1d'
CONV_2D = 'torch.nn.Conv2d'
RELU = 'torch.nn.ReLU'
TANH = 'torch.nn.Tanh'
BN_1D = 'torch.nn.BatchNorm1d'
BN_2D = 'torch.nn.BatchNorm2d'
MP_1D = 'torch.nn.MaxPool1d'
MP_2D = 'torch.nn.MaxPool2d'
AAP_1D = 'torch.nn.AdaptiveAvgPool1d'
AAP_2D = 'torch.nn.AdaptiveAvgPool2d'
class ModelLayer(BaseModel, abc.ABC):
# noinspection PyUnresolvedReferences
"""
Layer of the model structure.
For layer attributes need to be set :code:`None`, use :code:`'null'` instead. This is for the reason of
updated parameters with value :code:`None` will be viewed as not set. So we take special care to the
desired :code:`None`, replacing it with :code:`'null'`.
Attributes:
op_ (Operation): Operation to the layer.
type_ (LayerType): Indicates the type of this layer. This field also provides hint for :class:`pydantic`
model conversion.
__required_type__ (LayerType): By overriding this attributes, we can use :meth:`check_layer_type` to
provide validation of the sub classes.
"""
op_: Operation
type_: LayerType
__required_type__: LayerType
@classmethod
def parse_layer_obj(cls, layer_obj):
"""
Parse from a ML layer object.
This function will inspect the required parameters to build the layer, and try to obtain its
parameter value from the layer object. The default parameter parser is python default
:code:`getattr`, which assume we can get the value from the same-named attribute of the
layer object.
For parameter cannot parsed with default parser, set a function with the format:
:code:`__{parameter_name}_parser__(layer_obj: Any) -> Any`.
Has the following signature:
Input Arguments:
* layer_obj : Any
The layer object to be parsed.
Return Arguments:
* Any
The parsed value of the given parameter.
TODO:
Signature checking for __{parameter_name}_parser__
"""
kwargs = {'op_': Operation.EMPTY, 'type_': cls.__required_type__}
signature = inspect.signature(layer_obj.__init__)
for param in signature.parameters:
parser = getattr(cls, f'__{param}_parser__', lambda obj: getattr(obj, param))
kwargs[param] = parser(layer_obj)
return cls(**kwargs)
@validator('type_')
def check_layer_type(cls, layer_type: LayerType) -> LayerType: # noqa
"""
Checks layer type value provided is the same as the required value.
This is to generate validator for check :code:`layer_type` field of subclasses of :class:`ModelLayer`.
"""
if layer_type != cls.__required_type__:
raise ValueError(f'Expected {cls.__required_type__} but got {layer_type}')
return layer_type
class Linear(ModelLayer):
in_features: Optional[PositiveInt]
out_features: Optional[PositiveInt]
bias: Optional[bool]
__required_type__ = LayerType.LINEAR
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class _ConvNd(ModelLayer, abc.ABC):
in_channels: Optional[PositiveInt]
out_channels: Optional[PositiveInt]
kernel_size: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
padding: Optional[Union[conint(ge=0), Tuple[conint(ge=0), ...]]]
dilation: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
groups: PositiveInt
bias: bool
padding_mode: Literal['zeros', 'reflect', 'replicate', 'circular']
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class Conv1d(_ConvNd):
__required_type__ = LayerType.CONV_1D
class Conv2d(_ConvNd):
__required_type__ = LayerType.CONV_2D
class ReLU(ModelLayer):
inplace: Optional[bool]
__required_type__ = LayerType.RELU
class Tanh(ModelLayer):
__required_type__ = LayerType.TANH
class _BatchNorm(ModelLayer, abc.ABC):
num_features: Optional[PositiveInt]
eps: Optional[PositiveFloat]
momentum: Optional[Union[PositiveFloat, Literal['null']]]
affine: Optional[bool]
track_running_stats: Optional[bool]
class BatchNorm1d(_BatchNorm):
__required_type__ = LayerType.BN_1D
class BatchNorm2d(_BatchNorm):
__required_type__ = LayerType.BN_2D
class _MaxPool(ModelLayer, abc.ABC):
kernel_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]] = None
padding: Union[conint(ge=0), Tuple[conint(ge=0), ...]] = 0
dilation: Union[PositiveInt, Tuple[PositiveInt, ...]] = 1
return_indices: bool = False
ceil_mode: bool = False
class MaxPool1d(_MaxPool):
__required_type__ = LayerType.MP_1D
class MaxPool2d(_MaxPool):
__required_type__ = LayerType.MP_2D
class _AdaptiveAvgPool(ModelLayer, abc.ABC):
output_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
class AdaptiveAvgPool1d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_1D
class AdaptiveAvgPool2d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_2D
_LayerType = Union[Linear, Conv1d, Conv2d, ReLU, Tanh, BatchNorm1d, BatchNorm2d, MaxPool1d, MaxPool2d,
AdaptiveAvgPool1d, AdaptiveAvgPool2d]
class Structure(BaseModel):
# noinspection PyUnresolvedReferences
"""
Indicate a ML model structure using a graph data structure.
:attr:`layer` is the graph node, representing a layer of the model. :attr:`connection` is the graph edge,
representing which two layers are connected, and the directions of tensor pass.
Attributes:
layer (OrderedDict[str, _LayerType]): Layer mapping, the key is layer name, and the value is layer
attributes. See :class:`ModelLayer` for reference.
connection (Optional[Dict[str, Dict[str, Operation]]]): The connection (:attr:`connection`) maps
the starting layer name, to the ending layer name with a connection operation.
Examples::
>>> from collections import OrderedDict
>>> # add a nn.Linear layer named 'fc1' with in_features=1024, out_features=10
>>> layer_mapping = OrderedDict({
... 'fc1': LinearLayer(in_features=1024, out_features=10, type_=LayerType.LINEAR, op_=Operation.ADD),
... })
>>> # connection example for add connection from 'conv1' to 'fc1'
>>> connection_mapping = {'conv1': {'fc1': Operation.ADD}}
>>> struct = Structure(layer=layer_mapping, connection=connection_mapping)
>>> print(struct)
layer={'fc1': LinearLayer(in_features=1024, out_features=10, bias=None)}
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}}
>>> # Other than using the model object, we can pass in a plain dictionary,
... # and utilize `Structure.parse_obj`.
>>> structure_data = {
... 'layer': {'fc': {'in_features': 1024, 'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'A'}},
... 'connection': {'conv1': {'fc1': 'A'}}
... }
>>> Structure.parse_obj(structure_data)
Structure(layer={'fc': LinearLayer(in_features=1024, out_features=10, bias=None)},
connection={'conv1': {'fc1': <Operation.ADD: 'A'>}})
"""
layer: OrderedDict[str, _LayerType] = Field(
default_factory=OrderedDict,
example={'fc': {'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'M'}}
)
connection: Optional[Dict[str, Dict[str, Operation]]] = Field(
default_factory=dict,
example={'conv1': {'fc1': 'A'}}
)
| 34.209486
| 115
| 0.669324
|
import abc
import inspect
from enum import Enum
from typing import Optional, Union, Tuple, Dict, OrderedDict
from pydantic import BaseModel, PositiveInt, conint, PositiveFloat, Field, validator
from typing_extensions import Literal
class Operation(Enum):
ADD = 'A'
DELETE = 'D'
MODIFY = 'M'
EMPTY = 'E'
class LayerType(Enum):
LINEAR = 'torch.nn.Linear'
CONV_1D = 'torch.nn.Conv1d'
CONV_2D = 'torch.nn.Conv2d'
RELU = 'torch.nn.ReLU'
TANH = 'torch.nn.Tanh'
BN_1D = 'torch.nn.BatchNorm1d'
BN_2D = 'torch.nn.BatchNorm2d'
MP_1D = 'torch.nn.MaxPool1d'
MP_2D = 'torch.nn.MaxPool2d'
AAP_1D = 'torch.nn.AdaptiveAvgPool1d'
AAP_2D = 'torch.nn.AdaptiveAvgPool2d'
class ModelLayer(BaseModel, abc.ABC):
op_: Operation
type_: LayerType
__required_type__: LayerType
@classmethod
def parse_layer_obj(cls, layer_obj):
kwargs = {'op_': Operation.EMPTY, 'type_': cls.__required_type__}
signature = inspect.signature(layer_obj.__init__)
for param in signature.parameters:
parser = getattr(cls, f'__{param}_parser__', lambda obj: getattr(obj, param))
kwargs[param] = parser(layer_obj)
return cls(**kwargs)
@validator('type_')
def check_layer_type(cls, layer_type: LayerType) -> LayerType:
if layer_type != cls.__required_type__:
raise ValueError(f'Expected {cls.__required_type__} but got {layer_type}')
return layer_type
class Linear(ModelLayer):
in_features: Optional[PositiveInt]
out_features: Optional[PositiveInt]
bias: Optional[bool]
__required_type__ = LayerType.LINEAR
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class _ConvNd(ModelLayer, abc.ABC):
in_channels: Optional[PositiveInt]
out_channels: Optional[PositiveInt]
kernel_size: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
padding: Optional[Union[conint(ge=0), Tuple[conint(ge=0), ...]]]
dilation: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]]
groups: PositiveInt
bias: bool
padding_mode: Literal['zeros', 'reflect', 'replicate', 'circular']
@staticmethod
def __bias_parser__(layer_obj):
return layer_obj.bias is not None
class Conv1d(_ConvNd):
__required_type__ = LayerType.CONV_1D
class Conv2d(_ConvNd):
__required_type__ = LayerType.CONV_2D
class ReLU(ModelLayer):
inplace: Optional[bool]
__required_type__ = LayerType.RELU
class Tanh(ModelLayer):
__required_type__ = LayerType.TANH
class _BatchNorm(ModelLayer, abc.ABC):
num_features: Optional[PositiveInt]
eps: Optional[PositiveFloat]
momentum: Optional[Union[PositiveFloat, Literal['null']]]
affine: Optional[bool]
track_running_stats: Optional[bool]
class BatchNorm1d(_BatchNorm):
__required_type__ = LayerType.BN_1D
class BatchNorm2d(_BatchNorm):
__required_type__ = LayerType.BN_2D
class _MaxPool(ModelLayer, abc.ABC):
kernel_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
stride: Optional[Union[PositiveInt, Tuple[PositiveInt, ...]]] = None
padding: Union[conint(ge=0), Tuple[conint(ge=0), ...]] = 0
dilation: Union[PositiveInt, Tuple[PositiveInt, ...]] = 1
return_indices: bool = False
ceil_mode: bool = False
class MaxPool1d(_MaxPool):
__required_type__ = LayerType.MP_1D
class MaxPool2d(_MaxPool):
__required_type__ = LayerType.MP_2D
class _AdaptiveAvgPool(ModelLayer, abc.ABC):
output_size: Union[PositiveInt, Tuple[PositiveInt, ...]]
class AdaptiveAvgPool1d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_1D
class AdaptiveAvgPool2d(_AdaptiveAvgPool):
__required_type__ = LayerType.AAP_2D
_LayerType = Union[Linear, Conv1d, Conv2d, ReLU, Tanh, BatchNorm1d, BatchNorm2d, MaxPool1d, MaxPool2d,
AdaptiveAvgPool1d, AdaptiveAvgPool2d]
class Structure(BaseModel):
layer: OrderedDict[str, _LayerType] = Field(
default_factory=OrderedDict,
example={'fc': {'out_features': 10, 'type_': 'torch.nn.Linear', 'op_': 'M'}}
)
connection: Optional[Dict[str, Dict[str, Operation]]] = Field(
default_factory=dict,
example={'conv1': {'fc1': 'A'}}
)
| true
| true
|
f704f2ca519f891d9320e62953ff55261e97b68a
| 5,431
|
py
|
Python
|
src/thexb/STAGE_topobinner.py
|
harris-2374/THEx
|
04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a
|
[
"MIT"
] | null | null | null |
src/thexb/STAGE_topobinner.py
|
harris-2374/THEx
|
04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a
|
[
"MIT"
] | null | null | null |
src/thexb/STAGE_topobinner.py
|
harris-2374/THEx
|
04c4f56eb2cf86b8f55ddd6edd3f48029296bf5a
|
[
"MIT"
] | null | null | null |
"""
Author: Andrew Harris
Python 3.8
"""
import logging
import os
import pandas as pd
from ete3 import Tree
from tqdm import tqdm
############################### Set up logger #################################
def set_logger_level(WORKING_DIR, LOG_LEVEL):
logger = logging.getLogger(__name__)
# Remove existing log file if present
if os.path.exists(WORKING_DIR / 'logs/topobin.log'):
os.remove(WORKING_DIR / 'logs/topobin.log')
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(WORKING_DIR / 'logs/topobin.log')
file_handler.setFormatter(formatter)
stream_handler = logging.StreamHandler()
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
logger.setLevel(LOG_LEVEL)
return logger
############################## Helper Functions ###############################
def remove_heterotachy_info(l):
"""Remove any information in bracketsete3
does not support this format of newick"""
if ("[" not in l) and ("]" not in l):
return l
open_brackets = [i for i, x in enumerate(l) if x == "["]
close_brackets = [i for i, x in enumerate(l) if x == "]"]
final_string = f'{l[:open_brackets[0]]}'
for ob, cb in zip(open_brackets[1:], close_brackets[:-1]):
final_string += l[cb+1:ob]
final_string += l[close_brackets[-1]+1:]
return final_string
def tv_header_validation(df):
"""Return False if first four required column headers are not valid"""
required_cols = list(df.columns[:4])
try:
assert required_cols == ["Chromosome", "Window", "NewickTree", "TopologyID"]
return True
except AssertionError:
return False
############################### Main Function ################################
def topobinner(TREEVIEWER_FN, UPDATED_TV_FILENAME, TOPOBIN_ROOTED, WORKING_DIR, MULTIPROCESS, LOG_LEVEL):
logger = set_logger_level(WORKING_DIR, LOG_LEVEL) # Setup log file level
# Load in Tree Viewer excel file
df = pd.read_excel(TREEVIEWER_FN, engine='openpyxl')
df = df.reset_index(drop=True)
# Validate headers
header_check = tv_header_validation(df)
if not header_check:
raise AssertionError("Input file headers are not valid, please ensure required headers are correct.")
df['TopologyID'] = ['NULL']*len(df)
trees = df['NewickTree']
topologies = dict()
logger.info(f"{len(trees):,} trees to run")
# Set root boolean value
if TOPOBIN_ROOTED == "Y":
TOPOBIN_ROOTED = False
else:
TOPOBIN_ROOTED = True
# Bin Trees
tqdm_text = "#" + "{}".format("run1").zfill(3)
with tqdm(total=len(trees), desc=tqdm_text, ascii=True) as pbar:
for n, t in enumerate(trees):
# Check to see if tree is NoTree
if t == "NoTree":
pbar.update(1)
continue
# Set first tree in collection dictionary +
# move to next tree
if len(topologies.keys()) == 0:
topologies[n] = {'count': 1, 'idx': [n]}
pbar.update(1)
continue
else:
# Iterate through topology list
# add new topology if no rf == 0
# increase count if rf == 0 with topology
new_topology = True
for idx in topologies.keys():
if df.at[idx, 'NewickTree'] == "NoTree":
continue
t1 = Tree(remove_heterotachy_info(t))
t2 = Tree(remove_heterotachy_info(df.at[idx, 'NewickTree']))
comparison = t1.compare(t2, unrooted=TOPOBIN_ROOTED)
rf = comparison['rf']
if rf == 0:
topologies[idx]['count'] += 1
topologies[idx]['idx'].append(n)
new_topology = False
break
else:
continue
if new_topology:
topologies[n] = {'count': 1, 'idx': [n]}
pbar.update(1)
continue
else:
pbar.update(1)
continue
# Sort topologies dictionary by 'count'
topologies = {k: v for k, v in sorted(topologies.items(), key=lambda item: item[1]['count'], reverse=True)}
num_topologies = len(topologies.keys())
# Set zfill number
if num_topologies < 100:
zfillnum = 3
elif 100 < num_topologies < 1000:
zfillnum = 4
else:
zfillnum = 5
# Update DataFrame TopologyID column with results
overview_df = pd.DataFrame(
{
"TopologyID": [("Tree" + "{}".format(str(i)).zfill(zfillnum)) for i in range(1, len(topologies.keys())+1)],
"Count": [topologies[i]["count"] for i in topologies.keys()],
"Rank": [i for i in range(1, len(topologies.keys())+1)],
}
)
topoCount = 1
for topo in topologies.keys():
idx = topologies[topo]['idx']
topoName = "Tree" + "{}".format(topoCount).zfill(zfillnum)
for i in idx:
df.at[i, 'TopologyID'] = topoName
continue
topoCount += 1
# Output updated Tree Viewer file
df.to_excel(UPDATED_TV_FILENAME, index=False, engine='openpyxl')
logger.info(f"{overview_df}")
return
| 38.51773
| 119
| 0.558645
|
import logging
import os
import pandas as pd
from ete3 import Tree
from tqdm import tqdm
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.