input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"""
A video controller for ai2thor
Basic example:
from ai2thor.controller import VideoController
with VideoController() as vc:
vc.play(vc.MoveAhead())
vc.wait(5)
vc.play(vc.MoveAhead())
vc.export_video('thor.mp4')
Known issues:
- Multi agent rotations don't work (since TeleportFull breaks when passing in an AgentID)
"""
from ai2thor.controller import Controller
import cv2
import os
from PIL import Image
import math
from math import erf, sqrt
class VideoController(Controller):
def __init__(
self,
cam_rot=dict(x=85, y=225, z=0),
cam_pos=dict(x=-1.25, y=7.0, z=-1.0),
cam_fov=60,
**controller_kwargs,
):
self.saved_frames = []
self.ceiling_off = False
self.initial_cam_rot = cam_rot.copy()
self.initial_cam_pos = cam_pos.copy()
self.initial_cam_fov = cam_fov
super().__init__(continuous=True, **controller_kwargs)
self.step(
action="AddThirdPartyCamera",
rotation=self.initial_cam_rot,
position=self.initial_cam_pos,
fieldOfView=self.initial_cam_fov,
)
def reset(self, scene=None, **init_params):
"""Changes the scene and adds a new third party camera to the initial position."""
super().reset(scene, **init_params)
return self.step(
action="AddThirdPartyCamera",
rotation=self.initial_cam_rot,
position=self.initial_cam_pos,
fieldOfView=self.initial_cam_fov,
)
def play(self, *action_generators):
"""Apply multiple actions at the same time (e.g., move multiple agents,
and pan the camera around the scene.
Examples
vc.play(vc.moveAhead())
vc.wait(60)
vc.play(vc.moveAhead(), vc.orbitCameraAnimation(0, 0, 0))"""
# action_generators should be a list of generators (e.g., moveAhead(<Params>))
# this does many transformations at the same time
while True:
# execute next actions if available
next_actions = [next(generator, False) for generator in action_generators]
# add the frame to the saved frames after all actions execute
self.saved_frames.append(self.last_event.third_party_camera_frames[0])
# remove actions with finished iterators
next_actions = [action for action in next_actions if action != False]
if not next_actions:
# exit after all generators have finished
break
def _wait(self, frames=60):
"""Yields a generator used in self.wait()"""
for _ in range(frames):
yield self.step(action="Pass")
def wait(self, frames=60):
"""Do absolutely nothing to the agent. Keep the current frame still, as is.
Params
- frames (int)=60: The duration of the do nothing action.
Note: videos are typically 30fps.
Example: vc.wait(60)"""
self.play(self._wait(frames))
def ToggleCeiling(self):
"""Hides the ceiling. This method is greatly preferred over calling
step(action='ToggleMapView') directly, since it allows for automatic
ceiling toggles in the future. (e.g., if the camera is above the
height of the room, toggle off the ceiling, and vice versa."""
self.ceiling_off = not self.ceiling_off
return self.step(action="ToggleMapView")
def _cdf(self, x, std_dev=0.5, mean=0.0):
"""Cumulative distribution function"""
return (1.0 + erf((x - mean) / sqrt(2.0 * std_dev ** 2))) / 2.0
def _linear_to_smooth(self, curr_frame, total_frames, std_dev=0.5, min_val=3):
# start at -3 STD on a normal gaussian, go to 3 STD on gaussian
# curr frame should be 1 indexed, and end with total_frames
assert min_val > 0, "Min val should be > 0"
if curr_frame == total_frames:
# removes drifting
return 1
return self._cdf(
-min_val + 2 * min_val * (curr_frame / total_frames), std_dev=std_dev
)
def _move(self, actionName, moveMagnitude, frames, smoothAnimation, agentId=None):
"""Yields a generator full of move commands to move the agent incrementally.
Used as a general move command for MoveAhead, MoveRight, MoveLeft, MoveBack."""
last_moveMag = 0
for i in range(frames):
# smoothAnimation = False => linear animation
if smoothAnimation:
next_moveMag = (
self._linear_to_smooth(i + 1, frames, std_dev=1) * moveMagnitude
)
if agentId is None:
yield self.step(
action=actionName, moveMagnitude=next_moveMag - last_moveMag
)
else:
yield self.step(
action=actionName,
moveMagnitude=next_moveMag - last_moveMag,
agentId=agentId,
)
last_moveMag = next_moveMag
else:
if agentId is None:
yield self.step(
action=actionName, moveMagnitude=moveMagnitude / frames
)
else:
yield self.step(
action=actionName,
moveMagnitude=moveMagnitude / frames,
agentId=agentId,
)
def _rotate(self, direction, rotateDegrees, frames, smoothAnimation, agentId=None):
"""Yields a generator full of step(action='TeleportFull') commands to rotate the agent incrementally."""
if agentId is not None:
raise ValueError("rotations do not yet work with multiple agents")
# make it work for left and right rotations
direction = direction.lower()
assert direction == "left" or direction == "right"
if direction == "left":
rotateDegrees *= -1
# get the initial rotation
y0 = self.last_event.metadata["agent"]["rotation"]["y"]
for i in range(frames):
# keep the position the same
p = self.last_event.metadata["agent"]["position"]
if smoothAnimation:
yield self.step(
action="TeleportFull",
rotation=y0
+ rotateDegrees * self._linear_to_smooth(i + 1, frames, std_dev=1),
**p,
)
else:
yield self.step(
action="TeleportFull",
rotation=y0 + rotateDegrees * ((i + 1) / frames),
**p,
)
def MoveAhead(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveAhead", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def MoveBack(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveBack", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def MoveLeft(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveLeft", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def MoveRight(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveRight", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def RotateRight(
self, rotateDegrees=90, frames=60, smoothAnimation=True, agentId=None
):
# do incremental teleporting
return self._rotate(
"right", rotateDegrees, frames, smoothAnimation, agentId=agentId
)
def RotateLeft(
self, rotateDegrees=90, frames=60, smoothAnimation=True, agentId=None
):
# do incremental teleporting
return self._rotate(
"left", rotateDegrees, frames, smoothAnimation, agentId=agentId
)
def OrbitCameraAnimation(
self,
centerX,
centerZ,
posY,
dx=6,
dz=6,
xAngle=55,
frames=60,
orbit_degrees_per_frame=0.5,
):
"""Orbits the camera around the scene.
Example: https://www.youtube.com/watch?v=KcELPpdN770&feature=youtu.be&t=14"""
degrees = frames * orbit_degrees_per_frame
rot0 = self.last_event.metadata["thirdPartyCameras"][0]["rotation"][
"y"
] # starting angle
for frame in range(frames):
yAngle = rot0 + degrees * (frame + 1) / frames
yield self.step(
action="UpdateThirdPartyCamera",
thirdPartyCameraId=0,
rotation={"x": xAngle, "y": yAngle, "z": 0},
position={
"x": centerX - dx * math.sin(math.radians(yAngle)),
"y": posY,
"z": centerZ - dz * math.cos(math.radians(yAngle)),
},
)
def RelativeCameraAnimation(self, px=0, py=0, pz=0, rx=0, ry=0, rz=0, frames=60):
"""Linear interpolation between the current camera position and rotation
and the final camera position, given by deltas to the current values.
Params
- px (int)=0: x offset from the current camera position.
- py (int)=0: y offset from the current camera position.
- pz (int)=0: z offset from the current camera position.
- rx (int)=0: x offset from the current camera rotation.
- ry (int)=0: y offset from the current camera rotation.
- rz (int)=0: z offset from the current camera rotation.
- frames (int)=60: The duration of the animation.
Note: videos are typically 30fps."""
for _ in range(frames):
cam = self.last_event.metadata["thirdPartyCameras"][0]
pos, rot = cam["position"], cam["rotation"]
yield self.step(
action="UpdateThirdPartyCamera",
thirdPartyCameraId=0,
rotation={
"x": rot["x"] + rx / frames,
"y": rot["y"] + ry / frames,
"z": rot["z"] + rz / frames,
},
position={
"x": pos["x"] + px / frames,
"y": pos["y"] + py / frames,
"z": pos["z"] + pz / frames,
},
)
def AbsoluteCameraAnimation(
self,
px,
py,
pz,
rx,
ry,
rz,
frames=60,
smartSkybox=True,
FOVstart=None,
FOVend=None,
visibleAgents=True,
):
cam = self.last_event.metadata["thirdPartyCameras"][0]
p0, r0 = cam["position"], cam["rotation"]
if smartSkybox:
# toggles on and off (to give the same final result) to find the height of the ceiling
event0 = self.step(action="ToggleMapView")
event1 = self.step(action="ToggleMapView")
if event0.metadata["actionReturn"]:
maxY = event0.metadata["actionReturn"]["y"]
else:
maxY = event1.metadata["actionReturn"]["y"]
for i in range(1, frames + 1):
if self.ceiling_off and maxY > p0["y"] + (py - p0["y"]) / frames * i:
# turn ceiling on
self.ToggleCeiling()
kwargs = {
"action": "UpdateThirdPartyCamera",
"thirdPartyCameraId": 0,
"rotation": {
"x": r0["x"] + (rx - r0["x"]) / frames * i,
"y": r0["y"] + (ry - r0["y"]) / frames * i,
"z": r0["z"] + (rz - r0["z"]) / frames * i,
},
"position": {
"x": p0["x"] + (px - p0["x"]) / frames * i,
"y": p0["y"] + (py - p0["y"]) / frames * i,
"z": p0["z"] + (pz - p0["z"]) / frames * i,
},
}
# enables linear animation changes to the camera FOV
if FOVstart is not None and FOVend is not None:
kwargs["fieldOfView"] = FOVstart + (FOVend - FOVstart) / frames * i
if not (smartSkybox and maxY > p0["y"] + (py - p0["y"]) / frames * i):
kwargs["skyboxColor"] = "black"
yield self.step(**kwargs)
def LookUp(self):
raise NotImplementedError()
def LookDown(self):
raise NotImplementedError()
def Stand(self):
"""Note: have not found an easy way to move the agent in-between
stand and crouch."""
raise NotImplementedError()
def Crouch(self):
"""Note: have not found an easy way to move the agent in-between
stand and crouch."""
raise NotImplementedError()
def export_video(self, path):
"""Merges all the saved frames into a .mp4 video and saves it to `path`"""
if self.saved_frames:
path = path if path[:-4] == ".mp4" else path + ".mp4"
if os.path.exists(path):
os.remove(path)
video = cv2.VideoWriter(
path,
cv2.VideoWriter_fourcc(*"DIVX"),
30,
(self.saved_frames[0].shape[1], self.saved_frames[0].shape[0]),
)
for frame in self.saved_frames:
# assumes that the frames are RGB images. CV2 uses BGR.
video.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
cv2.destroyAllWindows()
video.release()
def export_frames(self, path, file_type=".png"):
"""Exports all of the presently frames to the `path` directory.
The frames are numbered in sequential order (starting with 0)."""
for i | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 15:42:02 2019
####################################################################
#### ####
#### (married to the) game module ####
#### ####
####################################################################
This module houses the Search subclass Game.
Like Search, a Game object can be created with or without parameters:
g = Game()
or
g = Game(GameID='1041900405', LeagueID=10...etc...)
or
params = {'GameID'='1041900405', LeagueID'='10'...etc...}
g = Game(**params)
Perform a search by calling any Game class method, with or without params.
Game's methods may also be used without creating a League object:
Game().method(**params)
If no parameters are passed, default parameters are used and data
is returned relevant to the series-clinching 5th Game of the 2019
WNBA Finals.
For information on the search history capabilities inherited from the Search
method, please see the search module documentation.
"""
from wnbAPI.search import Search, currentSeason, requests, headers, DEBUG
class Game(Search):
'''
Extends Search class with methods to access endpoints for Game data.
Game Methods:
pbp() *** Does not use core search method
http://data.wnba.com/data/5s/v2015/json/mobile_teams/wnba/2019/scores/pbp/1041900405_1_pbp.json
scoreboard()
https://stats.wnba.com/stats/scoreboard
scoreboardv2()
https://stats.wnba.com/stats/scoreboardv2
playbyplay()
https://stats.wnba.com/stats/playbyplay
uses: playbyplayv2()
https://stats.wnba.com/stats/playbyplayv2
Game default required params:
requiredParams = {
'GameID': '1041900405',# requires int or string GameID
'LeagueID': '10' # 10 is WNBA. 00 is NBA and 30 is G-League
}
'''
def __init__(self, **params):
Search.__init__(self, **params)
# create dictionary of the endpoints assigned to each method.
# these could also have been stored inside each method.
# and that might have been a better choice, but I like
# having a list of the accessible endpoints built into the
# object.
self.endpoints = {
'scoreboard':
'https://stats.wnba.com/stats/scoreboard',
'scoreboardv2':
'https://stats.wnba.com/stats/scoreboardv2',
'playbyplay':'https://stats.wnba.com/stats/playbyplay',
'playbyplayv2':'https://stats.wnba.com/stats/playbyplayv2',
}
# set default required Parameters. These parameters are required for the
# majority of the methods in the class. When not required, can
# be overridden at the method level. But most of these params
# can be passed without affecting return for the methods where they
# are not required.
self.requiredParams = {
'GameID': '1041900405',
'LeagueID': '10'
}
def pbp(self, **params):
'''
Does not use core search method but is the most detailed play by
play data, and is the only dataset currently known to me that
provides the locations of plays on the court.
uses:
http://data.wnba.com/data/5s/v2015/json/mobile_teams/wnba/2019/scores/pbp/1041900405_1_pbp.json
AFFECTED ONLY BY GameID, Period, and Season.
Unlike the other endpoints it affects, the Period parameter
for this endpoint requires the string 'full' rather than an
empty string to query data for the entire game.
**** THIS ENDPOINT RETURNS A COMPLETELY DIFFERENT FORMAT THAN ****
**** ANY OTHER ENDPOINT IN THE PACKAGE, BUT IS INDISPENSABLE ****
**** BECAUSE OF THE UNIQUE INFORMATION IT INCLUDES. NO CHANGE TO ****
**** DATAFRAME HAS BEEN MADE TO ACCOMMODATE THE FORMATTING ****
**** OF THESE RESULTS. UNTIL THAT IS FIXED THIS METHOD SHOULD NOT****
**** BE USED IN CONJUNCTION WITH THE DATAFRAME METHOD ****
SAMPLE DATA STRUCTURE:
if 'full'
{'g':
{ 'mid': integer. not sure of meaning.,
'gid': string of numbers representing unique GameID,
'gcode': string made of 8 digit date + / + away team abbrev.
+ home team abbrev. i.e. '20191010/CONWAS',
'next': in theory returns URL string for pbp endpoint of the
next game (chronologically) of the WNBA season.
Unfortunately, and HIGHLY disappointingly, this
uses an inherited system from the NBA's version
of the same endpoint and does NOT return a functional
URL for WNBA games
'pd': a list of periods dictionaries, each representing
one period of the game.
[{'p': period number as int,
'pla': a chronological list of dicts, each
representing a play or game event, for
all plays recorded during the period
[{'evt': int representing # of events
in the game up to present, meaning
that evt 30 is the 30th event.
Does not always increment by one,
which may be caused missing plays
but as yet no other evidence of
missing plays has been found.
'cl': MM:SS - string representing time
remaining in period when
the event occurred,
'de': Short string description of event,
'locX': Int between -250 and 250. This int
describes the x value of a coordinate
on half-court grid, where -250 and
250 represent the side-lines, 0
represents the center of the court
and 10 units is equivalent to 1 foot
'locY': Int between -40 and 860, OR -80.
locY describes the locations of the
event between the two baselines,
where 0 represents the defense's
basket, -40 represents the baseline
below that basket, 860 represents
the opposite baseline, and 10 units
is equivalent to 1 foot. -80 is a
standard value for events which
do not necessarily have a location
on the court, such as timeout calls
or player substitutions.
All events with locY -80 have a locX
of 0, and should be confidently omitted
from any charts created using these
coordinates.
********BIG NOTE**********
locY is ALWAYS relative to the
defensive teams basket, meaning
that mosts are plotted on
the same half-court grid. The only
events with locY values over 430
(which represents the half-court
line) are those that occur in the
backcourt, such as backcourt
fouls & turnovers, fullcourt heaves,
etc. The data seems designed to
work mostcomfortably with half-court
charts, like those made by tha legend
<NAME> (if you're
unfamiliar with Mr. Goldsberry's
work, you have no business trying
to use this package. Shame. Shame).
'opt1': int. unknown meaning. defaults to 0
'opt2': int. unknown meaning. defaults to 0
'mtype':int. unknown meaning. defaults to 0,
'etype': int code describing the type of
event. Known values:
1 is a made shot,
2 is a missed shot,
3 is a free throw
(made or missed),
4 is a rebound,
5 is a turnover,
6 is a personal foul,
7 is a team penalty,
8 is a substition,
9 is a team timeout,
13 is the end of a period
if an event fits two etypes,
such as a turnover caused by
an offensive foul, it is listed
twice, with a separate 'evt' value
for each entry.
'opid': '' or int representing player id of
the opposing player (ie the defender
on a jump shot). If no opposing
player was recorded, ''.
'tid': int representing the TeamID of the
primary team in the event (the
offensive team on a shot, the
team calling timeout on a timeout).
Lists 0 if the event is not specific
to either team
'pid': int representing the PlayerID of the
primary player in the event (shooter,
rebounder, player committing foul)
Lists 0 if the event is not specific
to any player,
'hs': int score of the home team at the time
of the event,
'vs': int score of the visiting team at the
time of the event,
'epid': int representing the PlayerID of
a secondary player involved in
the event (such as the player
who assisted on a made shot)
Lists '' if no secondary player
was credited on the play,
'oftid': int representing the TeamID of the
team that was on offense at the
time of the event. For all shots,
the 'oftid' value will match the
'tid' value,
'ord': int with length between 5 and 7.
the leftmost 3 digits seem to
correspond with 'evt', but are
usually slightly lower. These may
represent # of possessions in the
game. The 4 rightmost digits are
almost always 0000. Observed
exceptions so far have always
incremented the prior event's 'ord'
value by 1, leaving the leftmost
digits unchanged. | |
account on category of this product are same.'))
if acc_src == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Input Account of this product and Valuation account on category of this product are same.'))
if not acc_src:
raise osv.except_osv(_('Error!'), _('Please define stock input account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not acc_dest:
raise osv.except_osv(_('Error!'), _('Please define stock output account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not journal_id:
raise osv.except_osv(_('Error!'), _('Please define journal on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
if not acc_valuation:
raise osv.except_osv(_('Error!'), _('Please define inventory valuation account on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
return journal_id, acc_src, acc_dest, acc_valuation
def _get_reference_accounting_values_for_valuation(self, cr, uid, move, context=None):
"""
Return the reference amount and reference currency representing the inventory valuation for this move.
These reference values should possibly be converted before being posted in Journals to adapt to the primary
and secondary currencies of the relevant accounts.
"""
product_uom_obj = self.pool.get('product.uom')
# by default the reference currency is that of the move's company
reference_currency_id = move.company_id.currency_id.id
default_uom = move.product_id.uom_id.id
qty = product_uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, default_uom)
# if product is set to average price and a specific value was entered in the picking wizard,
# we use it
if move.location_dest_id.usage != 'internal' and move.product_id.cost_method == 'average':
reference_amount = qty * move.product_id.standard_price
elif move.product_id.cost_method == 'average' and move.price_unit:
reference_amount = qty * move.price_unit
reference_currency_id = move.price_currency_id.id or reference_currency_id
# Otherwise we default to the company's valuation price type, considering that the values of the
# valuation field are expressed in the default currency of the move's company.
else:
if context is None:
context = {}
currency_ctx = dict(context, currency_id = move.company_id.currency_id.id)
amount_unit = move.product_id.price_get('standard_price', context=currency_ctx)[move.product_id.id]
reference_amount = amount_unit * qty
return reference_amount, reference_currency_id
def _create_product_valuation_moves(self, cr, uid, move, context=None):
"""
Generate the appropriate accounting moves if the product being moves is subject
to real_time valuation tracking, and the source or destination location is
a transit location or is outside of the company.
"""
if move.product_id.valuation == 'real_time': # FIXME: product valuation should perhaps be a property?
if context is None:
context = {}
src_company_ctx = dict(context,force_company=move.location_id.company_id.id)
dest_company_ctx = dict(context,force_company=move.location_dest_id.company_id.id)
account_moves = []
# Outgoing moves (or cross-company output part)
if move.location_id.company_id \
and (move.location_id.usage == 'internal' and move.location_dest_id.usage != 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, src_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#returning goods to supplier
if move.location_dest_id.usage == 'supplier':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_src, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_dest, reference_amount, reference_currency_id, context))]
# Incoming moves (or cross-company input part)
if move.location_dest_id.company_id \
and (move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, dest_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#goods return from customer
if move.location_id.usage == 'customer':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_dest, acc_valuation, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_src, acc_valuation, reference_amount, reference_currency_id, context))]
move_obj = self.pool.get('account.move')
for j_id, move_lines in account_moves:
move_obj.create(cr, uid,
{
'journal_id': j_id,
'line_id': move_lines,
'ref': move.picking_id and move.picking_id.name}, context=context)
def action_done(self, cr, uid, ids, context=None):
""" Makes the move done and if all moves are done, it will finish the picking.
@return:
"""
picking_ids = []
move_ids = []
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state=="draft":
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state in ['done','cancel']:
continue
move_ids.append(move.id)
if move.picking_id:
picking_ids.append(move.picking_id.id)
if move.move_dest_id.id and (move.state != 'done'):
# Downstream move should only be triggered if this move is the last pending upstream move
other_upstream_move_ids = self.search(cr, uid, [('id','!=',move.id),('state','not in',['done','cancel']),
('move_dest_id','=',move.move_dest_id.id)], context=context)
if not other_upstream_move_ids:
self.write(cr, uid, [move.id], {'move_history_ids': [(4, move.move_dest_id.id)]})
if move.move_dest_id.state in ('waiting', 'confirmed'):
self.force_assign(cr, uid, [move.move_dest_id.id], context=context)
if move.move_dest_id.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
if move.move_dest_id.auto_validate:
self.action_done(cr, uid, [move.move_dest_id.id], context=context)
self._create_product_valuation_moves(cr, uid, move, context=context)
if move.state not in ('confirmed','done','assigned'):
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
self.write(cr, uid, move_ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
for id in move_ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
for pick_id in picking_ids:
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return True
def _create_account_move_line(self, cr, uid, move, src_account_id, dest_account_id, reference_amount, reference_currency_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given stock move.
"""
# prepare default values considering that the destination accounts have the reference_currency_id as their main currency
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'debit': reference_amount,
'account_id': dest_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'credit': reference_amount,
'account_id': src_account_id,
}
# if we are posting to accounts in a different currency, provide correct values in both currencies correctly
# when compatible with the optional secondary currency on the account.
# Financial Accounts only accept amounts in secondary currencies if there's no secondary currency on the account
# or if it's the same as that of the secondary amount being posted.
account_obj = self.pool.get('account.account')
src_acct, dest_acct = account_obj.browse(cr, uid, [src_account_id, dest_account_id], context=context)
src_main_currency_id = src_acct.company_id.currency_id.id
dest_main_currency_id = dest_acct.company_id.currency_id.id
cur_obj = self.pool.get('res.currency')
if reference_currency_id != src_main_currency_id:
# fix credit line:
credit_line_vals['credit'] = cur_obj.compute(cr, uid, reference_currency_id, src_main_currency_id, reference_amount, context=context)
if (not src_acct.currency_id) or src_acct.currency_id.id == reference_currency_id:
credit_line_vals.update(currency_id=reference_currency_id, amount_currency=-reference_amount)
if reference_currency_id != dest_main_currency_id:
# fix debit line:
debit_line_vals['debit'] = cur_obj.compute(cr, uid, reference_currency_id, dest_main_currency_id, reference_amount, context=context)
if (not dest_acct.currency_id) or dest_acct.currency_id.id == reference_currency_id:
debit_line_vals.update(currency_id=reference_currency_id, amount_currency=reference_amount)
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
for move in self.browse(cr, uid, ids, context=context):
if move.state != 'draft' and not ctx.get('call_unlink', False):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(
cr, uid, ids, context=ctx)
# _create_lot function is not used anywhere
def _create_lot(self, cr, uid, ids, product_id, prefix=False):
""" Creates production lot
@return: Production lot id
"""
prodlot_obj = self.pool.get('stock.production.lot')
prodlot_id = prodlot_obj.create(cr, uid, {'prefix': prefix, 'product_id': product_id})
return prodlot_id
def action_scrap(self, cr, uid, ids, quantity, location_id, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
#quantity should in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'tracking_id': move.tracking_id.id,
'prodlot_id': move.prodlot_id.id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
self.action_done(cr, uid, res, context=context)
return res
# action_split function is not used anywhere
# FIXME: deprecate this method
def action_split(self, cr, uid, ids, quantity, split_by_qty=1, prefix=False, with_lot=True, context=None):
""" | |
"""
Test the pipeline module.
"""
from distutils.version import LooseVersion
from tempfile import mkdtemp
import shutil
import time
import pytest
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.dummy import DummyRegressor
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils import Memory
from sklearn.utils._joblib import __version__ as joblib_version
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X):
return X
class Transf(NoInvTransf):
def transform(self, X):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
class DummyEstimatorParams(BaseEstimator):
"""Mock classifier that takes params on predict"""
def fit(self, X, y):
return self
def predict(self, X, got_attribute=False):
self.got_attribute = got_attribute
return self
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = assert_no_warnings(clone, pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_init_tuple():
# Pipeline accepts steps as tuple
X = np.array([[1, 2]])
pipe = Pipeline((('transf', Transf()), ('clf', FitParamT())))
pipe.fit(X, y=None)
pipe.score(X)
pipe.set_params(transf=None)
pipe.fit(X, y=None)
pipe.score(X)
@pytest.mark.filterwarnings('ignore: Default solver will be changed') # 0.22
@pytest.mark.filterwarnings('ignore: Default multi_class will') # 0.22
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
# invalid parameters should raise an error message
assert_raise_message(
TypeError,
"fit() got an unexpected keyword argument 'bad'",
pipe.fit, None, None, clf__bad=True
)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, y=None), 3)
assert_equal(pipe.score(X, y=None, sample_weight=None), 3)
assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8)
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, sample_weight=None), 3)
assert_raise_message(
TypeError,
"score() got an unexpected keyword argument 'sample_weight'",
pipe.score, X, sample_weight=np.array([2, 3])
)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', pipe),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(gamma='scale', probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(gamma='scale', probability=True, random_state=0,
decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert_true(pipe.named_steps['transf'].fit_params['should_get_this'])
assert_true(pipe.named_steps['clf'].successful)
assert_false('should_succeed' in pipe.named_steps['transf'].fit_params)
def test_predict_with_predict_params():
# tests that Pipeline passes predict_params to the final estimator
# when predict is invoked
pipe = Pipeline([('transf', Transf()), ('clf', DummyEstimatorParams())])
pipe.fit(None, None)
pipe.predict(X=None, got_attribute=True)
assert_true(pipe.named_steps['clf'].got_attribute)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# Test clone
fs2 = assert_no_warnings(clone, fs)
assert_false(fs.transformer_list[0][1] is fs2.transformer_list[0][1])
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], | |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def batch_pairwise_squared_distance(x, squared=False):
bsz, max_len, dim = x.size()
x_sq = (x**2).sum(dim=2)
prod = torch.bmm(x, x.transpose(1,2))
dist = (x_sq.unsqueeze(1) + x_sq.unsqueeze(2) - 2*prod).clamp(min=1e-12)
if squared == True:
dist = torch.sqrt(dist).clone()
#dist[dist!=dist] = 0
dist[:, range(max_len), range(max_len)] = 0
return dist
def central_squared_distance(x, squared=False):
bsz, max_len, w_size, dim = x.size()
center = int((w_size-1)/2)
x_sq = (x**2).sum(dim=3)
prod = torch.bmm(x.view(-1, w_size, dim), x.view(-1, w_size, dim).transpose(1,2))
prod = prod.view(bsz, max_len, w_size, w_size)[:,:,center,:]
dist = (x_sq + x_sq[:,:,center].unsqueeze(2) - 2*prod).clamp(min=1e-12)
if squared == True:
dist = torch.sqrt(dist)
dist[:, range(max_len), center] = 0
return dist
def window_index(w_size, bsz, length):
w_size_2 = (w_size - 1)/2
idx = torch.arange(0, length).unsqueeze(0).unsqueeze(2).repeat(bsz, 1, w_size)
idx_range = torch.range(-w_size_2, w_size_2).expand_as(idx)
idx = idx + idx_range
idx = torch.clamp(idx, 0, length-1)
idx_base = torch.arange(0, bsz).view(-1,1,1)*length
idx = (idx + idx_base)
return idx
class DistillKL(nn.Module):
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t,mode="classification"):
if mode == "regression":
loss = F.mse_loss((y_s/self.T).view(-1), (y_t/self.T).view(-1))
else:
p_s = F.log_softmax(y_s/self.T, dim=-1)
p_t = F.softmax(y_t/self.T, dim=-1)
loss = -torch.sum(p_t * p_s, dim=-1).mean()
return loss
class PKD_loss(nn.Module):
##Only CLS Token
##Input Dimension : Batchsize, n_layer, hidden_Size
def __init__(self, p, normalize=False):
super(PKD_loss, self).__init__()
self.p = p
self.normalize=normalize
def forward(self, teacher_patience, student_patience):
if self.normalize:
if len(teacher_patience.size()) == 4:
teacher_patience = F.normalize(teacher_patience, p=self.p, dim=3)
student_patience = F.normalize(student_patience, p=self.p, dim=3)
elif len(teacher_patience.size()) == 3:
teacher_patience = F.normalize(teacher_patience, p=self.p, dim=2)
student_patience = F.normalize(student_patience, p=self.p, dim=2)
return F.mse_loss(teacher_patience.float(), student_patience.float())
class WR_Dist(nn.Module):
def __init__(self):
super(WR_Dist, self).__init__()
def forward(self, t_embed, s_embed, attention_mask, distance, lossfunc, normalize=False, squard=False):
bsz, layer_num, max_len, dim = t_embed.size()
_, _, _, sdim = s_embed.size()
t_embed = t_embed.view(-1, max_len, dim)
s_embed = s_embed.view(-1, max_len, sdim)
mask = self.make_mask(attention_mask, layer_num)
mask = mask.view(-1, max_len, max_len)
with torch.no_grad():
if distance == "cos":
t_norm = F.normalize(t_embed, p=2, dim=2)
t_d = torch.bmm(t_norm, t_norm.transpose(1,2))
t_d = t_d * mask
diagonal = (torch.ones(max_len, max_len) - torch.eye(max_len, max_len)).to(t_embed.device)
t_d = t_d.masked_fill(diagonal == 0, -np.inf)
t_d = t_d.masked_fill(mask == 0, -np.inf)
t_d = F.softmax(t_d, dim=-1)
t_d = t_d * mask
elif distance=="l2":
t_d = batch_pairwise_squared_distance(t_embed, squared=False)
if normalize:
t_d = t_d * mask
nonzero = torch.sum((t_d.view(bsz*layer_num, -1) > 0), dim=-1)
mean_td = t_d.view(bsz*layer_num, -1).sum(dim=-1) / nonzero
t_d = t_d / mean_td.unsqueeze(1).unsqueeze(2)
else:
t_d = t_d * mask
if distance == "cos":
s_norm = F.normalize(s_embed, p=2, dim=2)
s_d = torch.bmm(s_norm, s_norm.transpose(1,2))
s_d = s_d * mask
s_d = s_d.masked_fill(diagonal == 0, -np.inf)
s_d = s_d.masked_fill(mask == 0, -np.inf)
s_d = F.log_softmax(s_d, dim=-1)
s_d = s_d * mask
elif distance=="l2":
s_d = batch_pairwise_squared_distance(s_embed, squared=False)
if normalize:
s_d = s_d * mask
nonzero = torch.sum((s_d.view(bsz*layer_num, -1) > 0), dim=-1)
mean_sd = s_d.view(bsz*layer_num, -1).sum(dim=-1) / nonzero
s_d = s_d / mean_sd.unsqueeze(1).unsqueeze(2)
else:
s_d = s_d * mask
if lossfunc == "kldiv":
return F.kl_div(s_d, t_d, reduction="sum") / mask.sum().item()
elif lossfunc == "l1loss":
return F.l1_loss(s_d, t_d, reduction='sum') / mask.sum().item()
elif lossfunc == "l2loss":
return F.mse_loss(s_d, t_d, reduction='sum') / mask.sum().item()
elif lossfunc =='smoothl1':
return F.smooth_l1_loss(s_d, t_d, reduction='sum') / mask.sum().item()
def make_mask(self, attention_mask, layers):
mask = attention_mask.unsqueeze(2) * attention_mask.unsqueeze(1)
return mask.unsqueeze(1).repeat(1,layers,1,1).float()
##Update WRDIST with window
class WR_Angle(nn.Module):
def __init__(self):
super(WR_Angle, self).__init__()
def forward(self, t_embed, s_embed, attention_mask, lossfunc):
bsz, layer_num, max_len, dim = t_embed.size()
bsz, layer_num, max_len, sdim = s_embed.size()
t_embed = t_embed.view(-1, max_len, dim)
s_embed = s_embed.view(-1, max_len, sdim)
mask = self.make_mask(attention_mask, layer_num)
mask = mask.view(-1, max_len, max_len, max_len)
with torch.no_grad():
#1441
t_sub = (t_embed.unsqueeze(1) - t_embed.unsqueeze(2)) #1873
t_sub = F.normalize(t_sub, p=2, dim=3).view(-1,max_len,dim) #2305
t_angle = torch.bmm(t_sub, t_sub.transpose(1,2)).view(-1, max_len, max_len, max_len)
t_angle = t_angle * mask
s_sub = (s_embed.unsqueeze(1) - s_embed.unsqueeze(2)) #2737
s_sub = F.normalize(s_sub, p=2, dim=3).view(-1, max_len, sdim) #3169
s_angle = torch.bmm(s_sub, s_sub.transpose(1,2)).view(-1, max_len, max_len, max_len)
s_angle = s_angle * mask
if lossfunc == "l1loss":
return F.l1_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
elif lossfunc == "l2loss":
return F.mse_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
elif lossfunc == "smoothl1":
return F.smooth_l1_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
def make_mask(self, attention_mask, layers):
mask = attention_mask.unsqueeze(2).unsqueeze(3) * attention_mask.unsqueeze(1).unsqueeze(3) * attention_mask.unsqueeze(1).unsqueeze(2)
return mask.unsqueeze(1).repeat(1,layers,1,1,1).float()
class WR_Angle_window(nn.Module):
def __init__(self):
super(WR_Angle_window, self).__init__()
def forward(self, t_embed, s_embed, attention_mask, lossfunc, window=5):
assert (window % 2) == 1
bsz, layer_num, max_len, dim = t_embed.size()
bsz, layer_num, max_len, sdim = s_embed.size()
t_embed = t_embed.view(-1, max_len, dim)
s_embed = s_embed.view(-1, max_len, sdim)
new_bsz = bsz * layer_num
idx = window_index(window, new_bsz, max_len)
#idx = idx.long().unsqueeze(1).repeat(1, layer_num,1,1).view(-1, max_len, window)
idx = idx.long()
t_round_emb = t_embed.view(new_bsz*max_len, -1)[idx, :]
s_round_emb = s_embed.view(new_bsz*max_len, -1)[idx, :]
mask = self.make_mask(attention_mask, layer_num, window)
mask = mask.view(-1, max_len, window, window)
with torch.no_grad():
t_sub = (t_embed.unsqueeze(2) - t_round_emb)
# bsz, len, window, window, dim
t_sub = F.normalize(t_sub, p=2, dim=3).view(-1, window, dim)
t_angle = torch.bmm(t_sub, t_sub.transpose(1,2)).view(new_bsz, max_len, window, window)
t_angle = t_angle * mask
s_sub = (s_embed.unsqueeze(2) - s_round_emb) #2737
s_sub = F.normalize(s_sub, p=2, dim=3).view(-1, window, sdim) #3169
s_angle = torch.bmm(s_sub, s_sub.transpose(1,2)).view(new_bsz, max_len, window, window)
s_angle = s_angle * mask
if lossfunc == "l1loss":
return F.l1_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
elif lossfunc == "l2loss":
return F.mse_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
elif lossfunc == "smoothl1":
return F.smooth_l1_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
def make_mask(self, attention_mask, layers, window):
mask = attention_mask.unsqueeze(2).unsqueeze(3).repeat(1,1,window,window)
return mask.unsqueeze(1).repeat(1,layers,1,1,1).float()
class LTR_Dist(nn.Module):
def __init__(self):
super(LTR_Dist, self).__init__()
def forward(self, t_embed, s_embed, attention_mask, distance, lossfunc, normalize=False, squard=False):
bsz, layer_num, max_len, dim = t_embed.size()
bsz, layer_num, max_len, sdim = s_embed.size()
t_embed = t_embed.transpose(1,2).reshape(-1, layer_num, dim)
s_embed = s_embed.transpose(1,2).reshape(-1, layer_num, sdim)
mask = self.make_mask(attention_mask, layer_num).view(-1, layer_num, layer_num)
mask = mask.view(-1, layer_num, layer_num)
with torch.no_grad():
if distance == "cos":
t_norm = F.normalize(t_embed, p=2, dim=2)
t_d = torch.bmm(t_norm, t_norm.transpose(1,2))
t_d = t_d * mask
diagonal = (torch.ones(layer_num, layer_num) - torch.eye(layer_num, layer_num)).to(t_embed.device)
t_d = t_d.masked_fill(diagonal == 0, -np.inf)
t_d = t_d.masked_fill(mask == 0, -np.inf)
#t_d = t_d.masked_fill(t_d == 1.0, -np.inf)
t_d = F.softmax(t_d, dim=-1)
t_d = t_d * mask
elif distance == "l2":
t_d = batch_pairwise_squared_distance(t_embed, squared=False)
if normalize:
t_d = t_d * mask
nonzero = torch.sum((t_d.view(bsz*max_len, -1) > 0), dim=-1)
nonzero[nonzero==0] = 1
mean_td = t_d.view(bsz*max_len, -1).sum(dim=-1) / nonzero
mean_td[mean_td==0] = 1
t_d = t_d / mean_td.unsqueeze(1).unsqueeze(2)
else:
t_d = t_d * mask
if distance == "cos":
s_norm = F.normalize(s_embed, p=2, dim=2)
s_d = torch.bmm(s_norm, s_norm.transpose(1,2))
s_d = s_d * mask
s_d = s_d.masked_fill(diagonal == 0, -np.inf)
s_d = s_d.masked_fill(mask == 0, -np.inf)
#s_d = s_d.masked_fill(s_d == 1.0, -np.inf)
s_d = F.log_softmax(s_d, dim=-1)
s_d = s_d * mask
elif distance == "l2":
s_d = batch_pairwise_squared_distance(s_embed, squared=False)
if normalize:
s_d = s_d * mask
nonzero = torch.sum((s_d.view(bsz*max_len, -1) > 0), dim=-1)
nonzero[nonzero==0] = 1
mean_sd = s_d.view(bsz*max_len, -1).sum(dim=-1) / nonzero
mean_sd[mean_sd==0] = 1
s_d = s_d / mean_sd.unsqueeze(1).unsqueeze(2)
else:
s_d = s_d * mask
if lossfunc == "kldiv":
return F.kl_div(s_d, t_d, reduction="sum") / mask.sum().item()
elif lossfunc == "l1loss":
return F.l1_loss(s_d, t_d, reduction='sum') / mask.sum().item()
elif lossfunc == "l2loss":
return F.mse_loss(s_d, t_d, reduction='sum') / mask.sum().item()
elif lossfunc =='smoothl1':
return F.smooth_l1_loss(s_d, t_d, reduction='sum') / mask.sum().item()
def make_mask(self, attention_mask, layer):
#attention mask -> b, len
# mask -> b, len, 6, 6
mask = attention_mask.unsqueeze(2).unsqueeze(3)
return mask.repeat(1, 1, layer, layer).float()
class LTR_Angle(nn.Module):
def __init__(self):
super(LTR_Angle, self).__init__()
def forward(self, t_embed, s_embed, attention_mask, loss):
bsz, layer_num, max_len, dim = t_embed.size()
bsz, layer_num, max_len, sdim = s_embed.size()
t_embed = t_embed.transpose(1,2).reshape(-1, layer_num, dim)
s_embed = s_embed.transpose(1,2).reshape(-1, layer_num, sdim)
mask = self.make_mask(attention_mask, layer_num)
mask = mask.view(-1, layer_num, layer_num, layer_num)
with torch.no_grad():
#1441
t_sub = (t_embed.unsqueeze(1) - t_embed.unsqueeze(2)) #1873
t_sub = F.normalize(t_sub, p=2, dim=3).view(-1,layer_num,dim) #2305
t_angle = torch.bmm(t_sub, t_sub.transpose(1,2)).view(-1, layer_num, layer_num, layer_num)
t_angle = t_angle * mask
s_sub = (s_embed.unsqueeze(1) - s_embed.unsqueeze(2)) #2737
s_sub = F.normalize(s_sub, p=2, dim=3).view(-1, layer_num, sdim) #3169
s_angle = torch.bmm(s_sub, s_sub.transpose(1,2)).view(-1, layer_num,layer_num,layer_num) #3385
s_angle = s_angle * mask
if loss == "l1loss":
return F.l1_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
elif loss == "l2loss":
return F.mse_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
elif loss == "smoothl1":
return F.smooth_l1_loss(s_angle, t_angle, reduction='sum') / mask.sum().item()
def make_mask(self, attention_mask, layers):
mask = attention_mask.unsqueeze(2).unsqueeze(3).unsqueeze(4)
return mask.repeat(1,1,layers,layers,layers).float()
class Hidden_mse(nn.Module):
def __init__(self, student_size, teacher_size):
super(Hidden_mse, self).__init__()
#self.fit_dense = nn.Linear(student_size, teacher_size)
def forward(self, s_embed, t_embed):
bsz, layer_num, max_len, tdim = t_embed.size()
bsz, layer_num, max_len, sdim = s_embed.size()
t_embed = t_embed.view(-1, max_len, tdim)
s_embed = | |
<gh_stars>0
#Embedded file name: ACEStream\Core\NATFirewall\UDPPuncture.pyo
import guessip
import time
import socket
import sys
import errno
import random
from collections import deque
import TimeoutFinder
import os
DEBUG = False
if sys.platform == 'win32':
SOCKET_BLOCK_ERRORCODE = 10035
else:
SOCKET_BLOCK_ERRORCODE = errno.EWOULDBLOCK
class UDPHandler():
TRACKER_ADDRESS = 'm23trial-udp.tribler.org'
CONNECT = chr(0)
YOUR_IP = chr(1)
FW_CONNECT_REQ = chr(2)
REV_CONNECT = chr(3)
PEX_ADD = chr(4)
PEX_DEL = chr(5)
CLOSE = chr(6)
UPDATE_NATFW_STATE = chr(7)
PEER_UNKNOWN = chr(8)
KEEP_ALIVE = chr(9)
CLOSE_NORMAL = chr(0)
CLOSE_TOO_MANY = chr(1)
CLOSE_LEN = chr(2)
CLOSE_PROTO_VER, = chr(3)
CLOSE_GARBAGE = chr(4)
CLOSE_NOT_CONNECTED = chr(5)
CLOSE_STATE_CORRUPT = chr(6)
NAT_UNKNOWN, NAT_NONE, NAT_APDM = range(0, 3)
FILTER_UNKNOWN, FILTER_NONE, FILTER_APDF = range(0, 3)
RECV_CONNECT_THRESHOLD = 4
RECV_CONNECT_SCALE_THRESHOLD = 64
FIXED_THRESHOLD = 7
def __init__(self, rawserver, check_crawler, port = 0):
self.connections = {}
if check_crawler:
from ACEStream.Core.Statistics.Crawler import Crawler
crawler = Crawler.get_instance()
if crawler.am_crawler():
return
self.connections = {}
self.rawserver = rawserver
self.socket = rawserver.create_udpsocket(port, '0.0.0.0')
self.known_peers = {}
self.nat_type = UDPHandler.NAT_UNKNOWN
self.filter_type = UDPHandler.FILTER_UNKNOWN
current_file_path = os.path.dirname(os.path.realpath(__file__))
maxconnections_file = os.path.join(os.path.split(os.path.split(current_file_path)[0])[0],"values","maxconnections.txt")
f = open(maxconnections_file, "r")
string = f.read()
self.max_connections = int(string)
self.connect_threshold = 75
self.recv_unsolicited = 0
self.recv_connect_total = 0
self.recv_address = 0
self.recv_different_address = 0
self.sendqueue = deque([])
self.last_connect = 0
self.last_info_dump = time.time()
self.natfw_version = 1
self.keepalive_intvl = 100
self.done = False
self.reporter = None
self.last_sends = {}
rawserver.start_listening_udp(self.socket, self)
if port == 9473:
self.tracker = True
self.id = '\x00\x00\x00\x00'
self.max_connections = 1000
rawserver.add_task(self.check_for_timeouts, 10)
else:
self.tracker = False
self.id = chr(random.getrandbits(8)) + chr(random.getrandbits(8)) + chr(random.getrandbits(8)) + chr(random.getrandbits(8))
if DEBUG:
debug('My ID: %s' % self.id.encode('hex'))
rawserver.add_task(self.bootstrap, 5)
TimeoutFinder.TimeoutFinder(rawserver, False, self.timeout_report)
TimeoutFinder.TimeoutFinder(rawserver, True, self.timeout_report)
if not DEBUG:
if check_crawler:
from ACEStream.Core.Statistics.PunctureCrawler import get_reporter_instance
self.reporter = get_reporter_instance()
if self.reporter:
my_wan_ip = guessip.get_my_wan_ip()
if my_wan_ip == None and sys.platform == 'win32':
try:
import os
for line in os.popen('netstat -nr').readlines():
words = line.split()
if words[0] == '0.0.0.0':
my_wan_ip = words[3]
break
except:
pass
if my_wan_ip == None:
my_wan_ip = 'Unknown'
self.reporter.add_event('UDPPuncture', 'ID:%s;IP:%s' % (self.id.encode('hex'), my_wan_ip))
def shutdown(self):
self.done = True
for connection in self.connections.values():
self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NORMAL, connection.address)
self.delete_closed_connection(connection)
def data_came_in(self, packets):
for address, data in packets:
if DEBUG:
debug('Data came (%d) in from address %s:%d' % (ord(data[0]), address[0], address[1]))
connection = self.connections.get(address)
if not connection:
if data[0] == UDPHandler.CLOSE:
continue
if data[0] != UDPHandler.CONNECT:
self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NOT_CONNECTED, address)
continue
if len(data) != 8:
self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_LEN, address)
continue
if data[1] != chr(0):
self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_PROTO_VER, address)
continue
if self.check_connection_count():
if self.reporter:
self.reporter.add_event('UDPPuncture', 'OCTM:%s,%d,%s' % (address[0], address[1], data[2:6].encode('hex')))
self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_TOO_MANY, address)
continue
id = data[2:6]
connection = self.known_peers.get(id)
if not connection:
connection = UDPConnection(address, id, self)
self.known_peers[id] = connection
elif connection.address != address:
if connection.connection_state == UDPConnection.CONNECT_ESTABLISHED:
self.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_STATE_CORRUPT, address)
continue
try:
del self.connections[connection.address]
except:
pass
connection.address = address
if address not in self.last_sends:
self.incoming_connect(address, True)
self.connections[address] = connection
if not connection.handle_msg(data):
self.delete_closed_connection(connection)
def check_connection_count(self):
if len(self.connections) < self.max_connections:
return False
if DEBUG:
debug(' Connection threshold reached, trying to find an old connection')
oldest = None
oldest_time = 1e+308
for connection in self.connections.itervalues():
if not connection.tracker and connection.connected_since < oldest_time:
oldest_time = connection.connected_since
oldest = connection
if not oldest:
return True
if not self.tracker and oldest.connected_since > time.time() - 300:
if DEBUG:
debug(' All connections are under 5 minutes old')
return True
if DEBUG:
debug(' Closing connection to %s %s:%d' % (oldest.id.encode('hex'), oldest.address[0], oldest.address[1]))
oldest.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NORMAL)
self.delete_closed_connection(oldest)
return False
def incoming_connect(self, address, unsolicited):
if self.tracker:
return
if unsolicited:
self.recv_unsolicited += 1
self.recv_connect_total += 1
if self.recv_connect_total > UDPHandler.RECV_CONNECT_SCALE_THRESHOLD:
self.recv_connect_total >>= 1
self.recv_unsolicited >>= 1
if self.recv_connect_total > UDPHandler.RECV_CONNECT_THRESHOLD:
if DEBUG:
debug('Setting filter state (recv total %d, recv unsol %d)' % (self.recv_connect_total, self.recv_unsolicited))
update_filter = False
if self.recv_unsolicited > self.recv_connect_total / 2 or self.recv_unsolicited > UDPHandler.FIXED_THRESHOLD:
if self.filter_type != UDPHandler.FILTER_NONE or self.nat_type != UDPHandler.NAT_NONE:
update_filter = True
self.filter_type = UDPHandler.FILTER_NONE
self.nat_type = UDPHandler.NAT_NONE
elif self.filter_type != UDPHandler.FILTER_APDF:
update_filter = True
self.filter_type = UDPHandler.FILTER_APDF
if update_filter:
self.natfw_version += 1
if self.natfw_version > 255:
self.natfw_version = 0
if self.reporter:
self.reporter.add_event('UDPPuncture', 'UNAT:%d,%d,%d' % (self.nat_type, self.filter_type, self.natfw_version))
map(lambda x: x.readvertise_nat(), self.connections.itervalues())
def incoming_ip(self, address):
if self.tracker:
return
self.recv_address += 1
if self.recv_address == 1:
self.reported_wan_address = address
return
if self.recv_address > UDPHandler.RECV_CONNECT_SCALE_THRESHOLD:
self.recv_address >>= 1
self.recv_different_address >>= 1
if self.reported_wan_address != address:
self.reported_wan_address = address
self.recv_different_address += 1
if self.recv_address > UDPHandler.RECV_CONNECT_THRESHOLD:
if DEBUG:
debug('Setting nat state (recv addr %d, recv diff %d)' % (self.recv_address, self.recv_different_address))
update_nat = False
if self.recv_different_address > self.recv_address / 2:
if self.nat_type != UDPHandler.NAT_APDM:
update_nat = True
self.nat_type = UDPHandler.NAT_APDM
self.filter_type = UDPHandler.FILTER_APDF
elif self.nat_type != UDPHandler.NAT_NONE:
update_nat = True
self.nat_type = UDPHandler.NAT_NONE
if update_nat:
self.natfw_version += 1
if self.natfw_version > 255:
self.natfw_version = 0
if self.reporter:
self.reporter.add_event('UDPPuncture', 'UNAT:%d,%d,%d' % (self.nat_type, self.filter_type, self.natfw_version))
map(lambda x: x.readvertise_nat(), self.connections.itervalues())
def bootstrap(self):
if DEBUG:
debug('Starting bootstrap')
try:
address = socket.gethostbyname(UDPHandler.TRACKER_ADDRESS)
except:
return
if address == '172.16.58.3':
return
tracker = UDPConnection((address, 9473), '\x00\x00\x00\x00', self)
tracker.advertised_by[('0.0.0.0', 0)] = 1e+308
tracker.nat_type = UDPHandler.NAT_NONE
tracker.filter_type = UDPHandler.FILTER_NONE
tracker.tracker = True
self.known_peers[tracker.id] = tracker
self.check_for_timeouts()
def sendto(self, data, address):
if DEBUG:
debug('Sending data (%d) to address %s:%d' % (ord(data[0]), address[0], address[1]))
if len(self.sendqueue) > 0:
self.sendqueue.append((data, address))
return
try:
self.socket.sendto(data, address)
except socket.error as error:
if error[0] == SOCKET_BLOCK_ERRORCODE:
self.sendqueue.append((data, address))
self.rawserver.add_task(self.process_sendqueue, 0.1)
def process_sendqueue(self):
while len(self.sendqueue) > 0:
data, address = self.sendqueue[0]
try:
self.socket.sendto(data, address)
except socket.error as error:
if error[0] == SOCKET_BLOCK_ERRORCODE:
self.rawserver.add_task(self.process_sendqueue, 0.1)
return
self.sendqueue.popleft()
def check_nat_compatible(self, peer):
if self.nat_type == UDPHandler.NAT_APDM and peer.filter_type == UDPHandler.FILTER_APDF:
return False
return True
def check_for_timeouts(self):
if self.done:
return
now = time.time()
close_list = []
for address in self.last_sends.iterkeys():
if self.last_sends[address] < now - 300:
close_list.append(address)
for address in close_list:
del self.last_sends[address]
if not self.tracker and len(self.connections) >= self.connect_threshold:
if DEBUG:
debug('Closing connections older than 10 minutes')
close_list = []
for connection in self.connections.itervalues():
if not connection.tracker and connection.connected_since < now - 600:
if DEBUG:
debug(' Closing connection to %s %s:%d' % (connection.id.encode('hex'), connection.address[0], connection.address[1]))
close_list.append(connection)
for connection in close_list:
connection.sendto(UDPHandler.CLOSE + UDPHandler.CLOSE_NORMAL)
self.delete_closed_connection(connection)
if len(self.connections) < self.connect_threshold / 1.5:
break
if not self.tracker and len(self.connections) < self.connect_threshold and self.last_connect < now - 20:
unconnected_peers = list(set(self.known_peers.iterkeys()) - set(ConnectionIteratorByID(self.connections)))
random.shuffle(unconnected_peers)
while len(unconnected_peers) > 0:
peer = self.known_peers[unconnected_peers.pop()]
if peer.connection_state != UDPConnection.CONNECT_NONE:
continue
if not self.check_nat_compatible(peer):
continue
if peer.last_comm > now - 300:
continue
if not self.try_connect(peer):
continue
self.last_connect = now
break
need_advert_time = now - self.keepalive_intvl
timeout_time = now - 250
can_advert_time = now - 30
close_list = []
pex_only = 0
for connection in self.connections.itervalues():
if connection.connection_state == UDPConnection.CONNECT_SENT and connection.last_received < can_advert_time:
if connection.connection_tries < 0:
if DEBUG:
debug('Dropping connection with %s:%d (timeout)' % (connection.address[0], connection.address[1]))
close_list.append(connection)
elif not self.try_connect(connection):
if DEBUG:
debug('Too many retries %s:%d' % (connection.address[0], connection.address[1]))
close_list.append(connection)
elif connection.last_received < timeout_time:
if DEBUG:
debug('Dropping connection with %s:%d (timeout)' % (connection.address[0], connection.address[1]))
close_list.append(connection)
for connection in close_list:
self.delete_closed_connection(connection)
for connection in self.connections.itervalues():
if connection.last_send < need_advert_time:
if connection.advertise_nat or len(connection.pex_add) != 0 or len(connection.pex_del) != 0:
connection.send_pex() or connection.sendto(UDPHandler.KEEP_ALIVE)
else:
connection.sendto(UDPHandler.KEEP_ALIVE)
elif connection.advertise_nat or (len(connection.pex_add) != 0 or len(connection.pex_del) != 0) and connection.last_advert < can_advert_time and pex_only < 35:
if connection.send_pex():
pex_only += 1
self.rawserver.add_task(self.check_for_timeouts, 10)
if DEBUG:
if self.last_info_dump + 60 < now:
self.last_info_dump = now
for connection in self.known_peers.itervalues():
msg = 'Peer %d %s %s:%d,%d,%d: Advertisers:' % (connection.connection_state,
connection.id.encode('hex'),
connection.address[0],
connection.address[1],
connection.nat_type,
connection.filter_type)
for advertiser in connection.advertised_by.iterkeys():
msg += ' %s:%d' % (advertiser[0], advertiser[1])
debug(msg)
def try_connect(self, peer):
if peer.filter_type != UDPHandler.FILTER_NONE and len(peer.advertised_by) == 0:
return False
if peer.connection_tries > 2:
return False
peer.connection_tries += 1
if DEBUG:
debug('Found compatible peer at %s:%d attempt %d' % (peer.address[0], peer.address[1], peer.connection_tries))
if self.reporter:
self.reporter.add_event('UDPPuncture', 'OCON%d:%s,%d,%s,%d,%d,%d' % (peer.connection_tries,
peer.address[0],
peer.address[1],
peer.id.encode('hex'),
peer.nat_type,
peer.filter_type,
peer.natfw_version))
peer.sendto(UDPHandler.CONNECT + chr(0) + self.id + natfilter_to_byte(self.nat_type, self.filter_type) + chr(self.natfw_version))
if peer.filter_type != UDPHandler.FILTER_NONE:
if DEBUG:
debug('Rendez-vous needed')
rendezvous_peers = list(peer.advertised_by.iterkeys())
random.shuffle(rendezvous_peers)
rendezvous_addr = rendezvous_peers[0]
rendezvous = self.connections.get(rendezvous_addr)
if rendezvous:
if self.reporter:
self.reporter.add_event('UDPPuncture', 'OFWC:%s,%d,%s,%s' % (rendezvous.address[0],
rendezvous.address[1],
rendezvous.id.encode('hex'),
peer.id.encode('hex')))
rendezvous.sendto(UDPHandler.FW_CONNECT_REQ + peer.id)
peer.connection_state = UDPConnection.CONNECT_SENT
peer.last_received = time.time()
self.connections[peer.address] = peer
return True
def delete_closed_connection(self, connection):
del self.connections[connection.address]
orig_state = connection.connection_state
connection.connection_state = UDPConnection.CONNECT_NONE
connection.last_comm = | |
3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den | |
""" LSPEAS proximal ring pulsatility and expansion module
Module for obtaining node to node pulsatility in ECG gated CT of Anaconda
Button interaction:
ENTER: calculate distances and pulsatility between selected nodes
ESCAPE: FINISH MODEL, GET MAX PULSATILITY OVERALL, STORE TO EXCEL
"""
import os
import visvis as vv
from stentseg.utils.datahandling import select_dir, loadvol, loadmodel
import numpy as np
from stentseg.utils import PointSet, _utils_GUI
from stentseg.stentdirect import stentgraph
from visvis import Pointset # for meshes
from stentseg.stentdirect.stentgraph import create_mesh
from visvis.processing import lineToMesh, combineMeshes
from visvis import ssdf
from stentseg.utils.picker import pick3d
from stentseg.utils.visualization import DrawModelAxes
def on_key(event):
global node_points
if event.key == vv.KEY_DOWN:
# hide nodes and labels
t1.visible, t2.visible, t3.visible = False, False, False
t4.visible, t5.visible, t6.visible = False, False, False
for node_point in node_points:
node_point.visible = False
if event.key == vv.KEY_UP:
# show nodes and labels
t1.visible, t2.visible, t3.visible = True, True, True
t4.visible, t5.visible, t6.visible = True, True, True
for node_point in node_points:
node_point.visible = True
if event.text == 'n':
# add clickable point: point on graph closest to picked point (SHIFT+R-click )
view = a.GetView()
for node_point in node_points:
node_point.visible = False
snapOut = _utils_GUI.snap_picked_point_to_graph(model, vol, label) # x,y,z
pickedOnGraph = snapOut[0]
n1, n2 = snapOut[1]
pickedOnGraphIndex = snapOut[2]
pickedOnGraphDeforms = model.edge[n1][n2]['pathdeforms'][pickedOnGraphIndex]
model.add_node(pickedOnGraph, deforms=pickedOnGraphDeforms)
node_points = _utils_GUI.interactive_node_points(model, scale=0.7)
_utils_GUI.node_points_callbacks(node_points, selected_nodes, t0=t0)
# visualize
# pickedOnGraph_sphere = vv.solidSphere(translation = (pickedOnGraph), scaling = (scale,scale,scale))
point = vv.plot(pickedOnGraph[0], pickedOnGraph[1], pickedOnGraph[2],
mc = 'y', ms = 'o', mw = 9, alpha=0.5)
a.SetView(view)
if event.key == vv.KEY_ENTER:
assert len(selected_nodes) == 2 or 3 or 4
# Node_to_node analysis
if len(selected_nodes) == 2:
# get nodes
selectn1 = selected_nodes[0].node
selectn2 = selected_nodes[1].node
# get index of nodes which are in fixed order
n1index = selected_nodes[0].nr
n2index = selected_nodes[1].nr
nindex = [n1index, n2index]
# get deforms of nodes
n1Deforms = model.node[selectn1]['deforms']
n2Deforms = model.node[selectn2]['deforms']
# get pulsatility
output = point_to_point_pulsatility(selectn1, n1Deforms, selectn2, n2Deforms)
# update labels
t1.text = '\b{Node pair}: %i - %i' % (nindex[0], nindex[1])
t2.text = 'Node-to-node Min: %1.2f mm' % output[0][0]
t3.text = 'Node-to-node Max: %1.2f mm' % output[4][0]
t4.text = 'Node-to-node Median: %1.2f mm' % output[2]
t5.text = 'Node-to-node Q1 and Q3: %1.2f | %1.2f mm' % (output[1], output[3])
t6.text = '\b{Node-to-node Pulsatility: %1.2f mm}' % (output[5][0] )
t1.visible, t2.visible, t3.visible = True, True, True
t4.visible, t5.visible, t6.visible = True, True, True
# Store output including index/nr of nodes
output.insert(0, [n1index]) # at the start
output.insert(1, [n2index])
output[8].insert(0, [n1index])
output[9].insert(0, [n2index])
if output not in storeOutput:
storeOutput.append(output)
# Midpoint_to_node analysis
if len(selected_nodes)== 3:
# find the edge selected to get midpoint
selected_nodes2 = selected_nodes.copy()
for node1 in selected_nodes:
selected_nodes2.remove(node1) # check combination once and not to self
for node2 in selected_nodes2:
if model.has_edge(node1.node, node2.node):
# get midpoint of edge and its deforms
output = get_midpoint_deforms_edge(model, node1.node, node2.node)
break # edge found, to first for loop
# get index of nodepair and midpoint and its deforms
nodepair1 = output[0]
midpoint1IndexPath = output[1]
midpoint1 = output[2]
midpoint1Deforms = output[3]
# get node
for i, node in enumerate(selected_nodes):
if node.nr not in nodepair1:
n3 = node
break
# get deforms for node
n3Deforms = model.node[n3.node]['deforms']
# get pulsatility
# first selected first in output
if i > 0: # single node was not selected first
output2 = point_to_point_pulsatility(midpoint1,
midpoint1Deforms, n3.node, n3Deforms)
else:
output2 = point_to_point_pulsatility(n3.node, n3Deforms,
midpoint1, midpoint1Deforms)
# visualize midpoint
view = a.GetView()
point = vv.plot(midpoint1[0], midpoint1[1], midpoint1[2],
mc = 'm', ms = 'o', mw = 8, alpha=0.5)
a.SetView(view)
# update labels
t1.text = '\b{Node pairs}: (%i %i) - (%i)' % (nodepair1[0],nodepair1[1],n3.nr)
t2.text = 'Midpoint-to-node Min: %1.2f mm' % output2[0][0]
t3.text = 'Midpoint-to-node Max: %1.2f mm' % output2[4][0]
t4.text = 'Midpoint-to-node Median: %1.2f mm' % output2[2]
t5.text = 'Midpoint-to-node Q1 and Q3: %1.2f | %1.2f mm' % (output2[1], output2[3])
t6.text = '\b{Midpoint-to-node Pulsatility: %1.2f mm}' % (output2[5][0])
t1.visible, t2.visible, t3.visible = True, True, True
t4.visible, t5.visible, t6.visible = True, True, True
# Store output including index nodes
if i > 0:
output2.insert(0, nodepair1) # at the start
output2.insert(1, [n3.nr])
output2[8].insert(0, midpoint1IndexPath)
output2[9].insert(0, [n3.nr])
else:
output2.insert(0, [n3.nr]) # at the start
output2.insert(1, nodepair1)
output2[8].insert(0, [n3.nr])
output2[9].insert(0, midpoint1IndexPath)
if output2 not in storeOutput:
storeOutput.append(output2)
# Midpoint_to_midpoint analysis
if len(selected_nodes) == 4:
outputs = list()
# get midpoints for the two edges
# get nodepairs from order selected
for i in (0,2):
n1 = selected_nodes[i].node
n2 = selected_nodes[i+1].node
assert model.has_edge(n1, n2)
# get midpoint of edge and its deforms
output = get_midpoint_deforms_edge(model, n1, n2)
midpoint = output[2]
# store for both edges
outputs.append(output)
# visualize midpoint
view = a.GetView()
point = vv.plot(midpoint[0], midpoint[1], midpoint[2],
mc = 'm', ms = 'o', mw = 8, alpha=0.5)
a.SetView(view)
assert len(outputs) == 2 # two midpoints should be found
# get midpoints and deforms
nodepair1 = outputs[0][0]
midpoint1IndexPath = outputs[0][1]
midpoint1 = outputs[0][2]
midpoint1Deforms = outputs[0][3]
nodepair2 = outputs[1][0]
midpoint2IndexPath = outputs[1][1]
midpoint2 = outputs[1][2]
midpoint2Deforms = outputs[1][3]
# get pulsatility midp to midp
output2 = point_to_point_pulsatility(midpoint1,
midpoint1Deforms, midpoint2, midpoint2Deforms)
# # get max pulsatility between points on the paths
# outputmaxP.append(edge_to_edge_max_pulsatility(model, nodepair1, nodepair2))
# update labels
t1.text = '\b{Node pairs}: (%i %i) - (%i %i)' % (nodepair1[0], nodepair1[1],
nodepair2[0], nodepair2[1])
t2.text = 'Midpoint-to-midpoint Min: %1.2f mm' % output2[0][0]
t3.text = 'Midpoint-to-midpoint Max: %1.2f mm' % output2[4][0]
t4.text = 'Midpoint-to-midpoint Median: %1.2f mm' % output2[2]
t5.text = 'Midpoint-to-midpoint Q1 and Q3: %1.2f | %1.2f mm' % (output2[1], output2[3])
t6.text = '\b{Midpoint-to-midpoint Pulsatility: %1.2f mm}' % (output2[5][0])
t1.visible, t2.visible, t3.visible = True, True, True
t4.visible, t5.visible, t6.visible = True, True, True
# Store output including nodepairs of the midpoints
output2.insert(0, nodepair1) # indices at the start
output2.insert(1, nodepair2)
output2[8].insert(0, midpoint1IndexPath)
output2[9].insert(0, midpoint2IndexPath)
if output2 not in storeOutput:
storeOutput.append(output2)
# Visualize analyzed nodes and deselect
for node in selected_nodes:
node.faceColor = (0,1,0,0.8) # # make green when analyzed
selected_nodes.clear()
if event.key == vv.KEY_ESCAPE:
# FINISH, STORE TO EXCEL
# visualize
view = a.GetView()
t = vv.volshow(vol, clim=clim, renderStyle='mip')
# show mesh of model without deform coloring
modelmesh = create_mesh(model, 0.4) # Param is thickness
m = vv.mesh(modelmesh)
m.faceColor = (0,1,0,1) # green
a.SetView(view)
# Store to EXCEL
storeOutputToExcel(storeOutput,exceldir)
for node_point in node_points:
node_point.visible = False # show that store is ready
def get_midpoint_deforms_edge(model, n1, n2):
""" Get midpoint of a given edge
Returns output array with index of nodes, index of midpoint on path and
midpoint with its deforms
"""
# get index of nodes which are in fixed order
n1index = sorted(model.nodes()).index(n1) # sort to match node_point numbering
n2index = sorted(model.nodes()).index(n2) # sort to match node_point numbering
nindex = [n1index, n2index]
# get path
edge = model.edge[n1][n2]
path = edge['path']
# find point closest to mid of line n1 to n2
mid = (n1[0]+n2[0])/2, (n1[1]+n2[1])/2, (n1[2]+n2[2])/2
# define vector from points to mid
v = path - mid
dist_to_mid = ( (v[:,0]**2 + v[:,1]**2 + v[:,2]**2)**0.5 ).reshape(-1,1)
# get point on path closest to mid
midpointIndex = list(dist_to_mid).index(dist_to_mid.min() ) # index on path
midpoint = path[midpointIndex]
# get deforms of midpoint
midpointDeforms = model.edge[n1][n2]['pathdeforms'][midpointIndex]
if (len(path) % 2 == 0): #even; middle of 2 closest pathpoints to get actual mid
dist_to_mid[midpointIndex] = dist_to_mid.max()+100 # replace for distance > max
midpointIndex2 = list(dist_to_mid).index(dist_to_mid.min() ) # index on path
midpoint2 = path[midpointIndex2]
# if neighbors on this path, find mid point and deforms
if abs(midpointIndex - midpointIndex2) == 1:
midpoint = (midpoint + midpoint2) / 2
midpointDeforms2 = model.edge[n1][n2]['pathdeforms'][midpointIndex2]
midpointDeforms = (midpointDeforms + midpointDeforms2) / 2
midpointIndex = [midpointIndex,midpointIndex2]
else: # take only closest as midpoint
midpointIndex = [midpointIndex] # return as array, similar to when even pathlength
else: # odd, expected one pathpoint closest to mid of line
midpointIndex = [midpointIndex] # return as array, similar to when even pathlength
midpoint = np.asarray(tuple(midpoint.flat)) if isinstance(midpoint, PointSet) else midpoint # if PointSet make array
return [nindex, midpointIndex, midpoint, midpointDeforms]
def point_to_point_pulsatility(point1, point1Deforms,
point2, point2Deforms):
""" Analyze pulsatility peak_to_peak or valley_to_valley | |
<reponame>quantify-os/quantify-core<gh_stars>1-10
# ---
# jupyter:
# jupytext:
# cell_markers: '"""'
# formats: py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
rst_conf = {"jupyter_execute_options": [":hide-code:"]}
# pylint: disable=line-too-long
# pylint: disable=wrong-import-order
# pylint: disable=wrong-import-position
# pylint: disable=pointless-string-statement
# pylint: disable=pointless-statement
# pylint: disable=invalid-name
# pylint: disable=expression-not-assigned
# pylint: disable=duplicate-code
# %% [raw]
"""
.. _sec-dataset-examples:
Quantify dataset - examples
===========================
.. seealso::
The complete source code of this tutorial can be found in
.. NB .py is from notebook_to_sphinx_extension
:jupyter-download:notebook:`Quantify dataset - examples.py`
:jupyter-download:script:`Quantify dataset - examples.py`
.. admonition:: Imports and auxiliary utilities
:class: dropdown
"""
# %%
rst_conf = {"indent": " "}
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from rich import pretty
import quantify_core.data.dataset_attrs as dattrs
from quantify_core.analysis.calibration import rotate_to_calibrated_axis
from quantify_core.analysis.fitting_models import exp_decay_func
from quantify_core.data import handling as dh
from quantify_core.utilities import dataset_examples
from quantify_core.utilities.examples_support import (
mk_iq_shots,
mk_trace_for_iq_shot,
mk_trace_time,
round_trip_dataset,
)
from quantify_core.utilities.inspect_utils import display_source_code
from quantify_core.visualization.mpl_plotting import (
plot_complex_points,
plot_xr_complex,
plot_xr_complex_on_plane,
)
pretty.install()
dh.set_datadir(Path.home() / "quantify-data") # change me!
# %% [raw]
"""
In this page we explore a series of datasets that comply with the :ref:`Quantify dataset specification <dataset-spec>`.
2D dataset example
------------------
We use the :func:`~quantify_core.utilities.dataset_examples.mk_two_qubit_chevron_dataset`
to generate our exemplary dataset. Its source code is conveniently displayed in the
drop down below.
"""
# %% [raw]
"""
.. admonition:: Generate a 2D dataset
:class: dropdown
"""
# %%
rst_conf = {"indent": " "}
display_source_code(dataset_examples.mk_two_qubit_chevron_dataset)
# %%
dataset = dataset_examples.mk_two_qubit_chevron_dataset()
assert dataset == round_trip_dataset(dataset) # confirm read/write
dataset
# %% [raw]
"""
The data within this dataset can be easily visualized using xarray facilities,
however we first need to convert the Quantify dataset to a "gridded" version with as
shown below.
Since our dataset contains multiple repetitions of the same experiment, it is convenient
to visualize them on different plots.
"""
# %%
dataset_gridded = dh.to_gridded_dataset(
dataset,
dimension="main_dim",
coords_names=dattrs.get_main_coords(dataset),
)
dataset_gridded.pop_q0.plot.pcolormesh(x="amp", col="repetitions")
_ = dataset_gridded.pop_q1.plot.pcolormesh(x="amp", col="repetitions")
# %% [raw]
"""
In xarray, among other features, it is possible to average along a dimension which can
be very convenient to average out some of the noise:
"""
# %%
_ = dataset_gridded.pop_q0.mean(dim="repetitions").plot(x="amp")
# %% [raw]
"""
A repetitions dimension can be indexed by a coordinate such that we can have some
specific label for each of our repetitions. To showcase this, we will modify the previous
dataset by merging it with a dataset containing the relevant extra information.
"""
# %%
coord_dims = ("repetitions",)
coord_values = ["A", "B", "C", "D", "E"]
dataset_indexed_rep = xr.Dataset(coords=dict(repetitions=(coord_dims, coord_values)))
dataset_indexed_rep
# %%
# merge with the previous dataset
dataset_rep = dataset_gridded.merge(dataset_indexed_rep, combine_attrs="drop_conflicts")
assert dataset_rep == round_trip_dataset(dataset_rep) # confirm read/write
dataset_rep
# %% [raw]
"""
Now we can select a specific repetition by its coordinate, in this case a string label.
"""
# %%
_ = dataset_rep.pop_q0.sel(repetitions="E").plot(x="amp")
# %% [raw]
"""
T1 dataset examples
-------------------
The T1 experiment is one of the most common quantum computing experiments.
Here we explore how the datasets for such an experiment, for a transmon qubit, can be
stored using the Quantify dataset with increasing levels of data detail.
We start with the most simple format that contains only processed (averaged) measurements
and finish with a dataset containing the raw digitized signals from the transmon readout
during a T1 experiment.
"""
# %% [raw]
"""
.. admonition:: Mock data utilities
:class: dropdown
We use a few auxiliary functions to generate, manipulate and plot the data of the
examples that follow:
- :func:`quantify_core.utilities.examples_support.mk_iq_shots`
- :func:`quantify_core.utilities.examples_support.mk_trace_time`
- :func:`quantify_core.utilities.examples_support.mk_trace_for_iq_shot`
- :func:`quantify_core.analysis.fitting_models.exp_decay_func`
Below you can find the source-code of the most important ones and a few usage
examples in order to gain some intuition for the mock data.
"""
# %%
rst_conf = {"indent": " "}
for func in (mk_iq_shots, mk_trace_time, mk_trace_for_iq_shot):
display_source_code(func)
# %%
rst_conf = {"indent": " "}
ground = -0.2 + 0.65j
excited = 0.7 - 0.4j
centers = ground, excited
sigmas = [0.1] * 2
shots = mk_iq_shots(
num_shots=256,
sigmas=sigmas,
centers=centers,
probabilities=[0.4, 1 - 0.4],
)
plt.hexbin(shots.real, shots.imag)
plt.xlabel("I")
plt.ylabel("Q")
_ = plot_complex_points(centers, ax=plt.gca())
# %%
rst_conf = {"indent": " "}
time = mk_trace_time()
trace = mk_trace_for_iq_shot(shots[0])
fig, ax = plt.subplots(1, 1, figsize=(12, 12 / 1.61 / 2))
ax.plot(time * 1e6, trace.imag, ".-", label="I-quadrature")
ax.plot(time * 1e6, trace.real, ".-", label="Q-quadrature")
ax.set_xlabel("Time [µs]")
ax.set_ylabel("Amplitude [V]")
_ = ax.legend()
# %% [raw]
"""
First we define a few parameters of our mock qubit and mock data acquisition.
"""
# %%
# parameters of our qubit model
tau = 30e-6
ground = -0.2 + 0.65j # ground state on the IQ-plane
excited = 0.7 - 0.4j # excited state on the IQ-plane
centers = ground, excited
sigmas = [0.1] * 2 # sigma, NB in general not the same for both state
# mock of data acquisition configuration
# NB usually at least 1000+ shots are taken, here we use less for faster code execution
num_shots = 256
# time delays between exciting the qubit and measuring its state
t1_times = np.linspace(0, 120e-6, 30)
# NB this are the ideal probabilities from repeating the measurement many times for a
# qubit with a lifetime given by tau
probabilities = exp_decay_func(t=t1_times, tau=tau, offset=0, n_factor=1, amplitude=1)
# Ideal experiment result
plt.ylabel("|1> probability")
plt.suptitle("Typical processed data of a T1 experiment")
plt.plot(t1_times * 1e6, probabilities, ".-")
_ = plt.xlabel("Time [µs]")
# %%
# convenience dict with the mock parameters
mock_conf = dict(
num_shots=num_shots,
centers=centers,
sigmas=sigmas,
t1_times=t1_times,
probabilities=probabilities,
)
# %% [raw]
"""
T1 experiment averaged
~~~~~~~~~~~~~~~~~~~~~~
In this first example we generate the individual measurement shots and average it,
similar to what some instrument are capable of doing directly in the hardware.
Here is how we store this data in the dataset along with the coordinates of these
datapoints:
"""
# %% [raw]
"""
.. admonition:: Generate dataset
:class: dropdown, toggle-shown
"""
# %%
rst_conf = {"indent": " "}
display_source_code(dataset_examples.mk_t1_av_dataset)
# %%
dataset = dataset_examples.mk_t1_av_dataset(**mock_conf)
assert dataset == round_trip_dataset(dataset) # confirm read/write
dataset
# %%
dataset.q0_iq_av.shape, dataset.q0_iq_av.dtype
# %%
dataset_gridded = dh.to_gridded_dataset(
dataset,
dimension="main_dim",
coords_names=dattrs.get_main_coords(dataset),
)
dataset_gridded
# %% [raw]
"""
.. admonition:: Plotting utilities
:class: dropdown
"""
# %%
rst_conf = {"indent": " "}
display_source_code(plot_xr_complex)
display_source_code(plot_xr_complex_on_plane)
# %%
plot_xr_complex(dataset_gridded.q0_iq_av)
fig, ax = plot_xr_complex_on_plane(dataset_gridded.q0_iq_av)
_ = plot_complex_points(centers, ax=ax)
# %% [raw]
"""
T1 experiment averaged with calibration points
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is common for many experiment to require calibration data in order to interpret the
results. Often, these calibration datapoints have different array shapes. E.g. it can be
just two simple datapoints corresponding to the ground and excited states of our
transmon.
To accommodate this data in the dataset we make use of a secondary dimensions along which
the variables and its coordinate will lie along.
Additionally, since the secondary variable and coordinate used for calibration can have
arbitrary names and relate to other variable in more complex ways, we specify this
relationship in the dataset attributes
(see :class:`~quantify_core.data.dataset_attrs.QDatasetIntraRelationship`).
This information can be used later, for example, to run an appropriate analysis on this
dataset.
"""
# %% [raw]
"""
.. admonition:: Generate dataset
:class: dropdown, toggle-shown
"""
# %%
rst_conf = {"indent": " "}
display_source_code(dataset_examples.mk_t1_av_with_cal_dataset)
# %%
dataset = dataset_examples.mk_t1_av_with_cal_dataset(**mock_conf)
assert dataset == round_trip_dataset(dataset) # confirm read/write
dataset
# %%
dattrs.get_main_dims(dataset), dattrs.get_secondary_dims(dataset)
# %%
dataset.relationships
# %% [raw]
"""
As before the coordinates can be set to index the variables that lie along the same
dimensions:
"""
# %%
dataset_gridded = dh.to_gridded_dataset(
dataset,
dimension="main_dim",
coords_names=dattrs.get_main_coords(dataset),
)
dataset_gridded = dh.to_gridded_dataset(
dataset_gridded,
dimension="cal_dim",
coords_names=dattrs.get_secondary_coords(dataset_gridded),
)
dataset_gridded
# %%
fig = plt.figure(figsize=(8, 5))
ax = plt.subplot2grid((1, 10), (0, 0), colspan=9, fig=fig)
plot_xr_complex(dataset_gridded.q0_iq_av, ax=ax)
ax_calib = plt.subplot2grid((1, 10), (0, 9), colspan=1, fig=fig, sharey=ax)
for i, color in zip(
range(2), ["C0", "C1"]
): # plot each calibration point with same color
dataset_gridded.q0_iq_av_cal.real[i : i + 1].plot.line(
marker="o", ax=ax_calib, linestyle="", color=color
)
dataset_gridded.q0_iq_av_cal.imag[i : i + 1].plot.line(
marker="o", ax=ax_calib, linestyle="", color=color
)
ax_calib.yaxis.set_label_position("right")
ax_calib.yaxis.tick_right()
fig, ax = plot_xr_complex_on_plane(dataset_gridded.q0_iq_av)
_ = plot_complex_points(dataset_gridded.q0_iq_av_cal.values, ax=ax)
# %% [raw]
"""
We can use the calibration points to normalize the data and obtain the typical T1 decay.
"""
# %% [raw]
"""
.. admonition:: Data rotation and normalization utilities
:class: dropdown
The normalization to the calibration points can be achieved as follows.
Several of the
:mod:`single-qubit time-domain analyses <quantify_core.analysis.single_qubit_timedomain>`
provided use this under the hood.
The result is that most of the information will now be contained within the same
quadrature.
"""
# %%
rst_conf = {"indent": " "}
rotated_and_normalized = rotate_to_calibrated_axis(
dataset_gridded.q0_iq_av.values, *dataset_gridded.q0_iq_av_cal.values
)
rotated_and_normalized_da = xr.DataArray(dataset_gridded.q0_iq_av)
rotated_and_normalized_da.values = rotated_and_normalized
rotated_and_normalized_da.attrs["long_name"] = "|1> Population"
rotated_and_normalized_da.attrs["units"] = ""
_ = plot_xr_complex(rotated_and_normalized_da)
# %% [raw]
"""
T1 experiment storing all shots
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Now we will include in the dataset all the single qubit states (shot) for each
individual measurement.
"""
# %% [raw]
"""
.. admonition:: Generate dataset
:class: dropdown, toggle-shown
"""
# %%
rst_conf = {"indent": " "}
display_source_code(dataset_examples.mk_t1_shots_dataset)
# %%
dataset = dataset_examples.mk_t1_shots_dataset(**mock_conf)
dataset
# %%
dataset_gridded = dh.to_gridded_dataset(
dataset,
dimension="main_dim",
coords_names=dattrs.get_main_coords(dataset),
)
dataset_gridded = dh.to_gridded_dataset(
dataset_gridded,
dimension="cal_dim",
coords_names=dattrs.get_secondary_coords(dataset_gridded),
)
dataset_gridded
# %% [raw]
"""
In this dataset we have both the averaged values and all the shots. The averaged values
can be plotted in the same way as before.
"""
# %%
_ = plot_xr_complex(dataset_gridded.q0_iq_av)
_, ax = plot_xr_complex_on_plane(dataset_gridded.q0_iq_av)
_ = plot_complex_points(dataset_gridded.q0_iq_av_cal.values, ax=ax)
# %% [raw]
"""
Here we focus on inspecting how the individual shots are distributed on the IQ plane
for some particular `Time` values.
Note that we are plotting the calibration points as well.
"""
# %%
chosen_time_values = [
t1_times[1], # second value selected otherwise we won't see both centers
t1_times[len(t1_times) // 5], # a value close to the end of the experiment
]
for t_example in chosen_time_values:
| |
to server and vice-versa",
"Upgrading OpenSSL to latest versions will mitigate this issue. This resource gives more information about the vulnerability and the associated remediation. http://ccsinjection.lepidum.co.jp/"],
[17, "With this vulnerability the attacker will be able to perform a MiTM attack and thus compromising the confidentiality factor.",
"Upgrading OpenSSL to latest version will mitigate this issue. Versions prior to 1.1.0 is prone to this vulnerability. More information can be found in this resource. https://bobcares.com/blog/how-to-fix-sweet32-birthday-attacks-vulnerability-cve-2016-2183/"],
[18, "With the LogJam attack, the attacker will be able to downgrade the TLS connection which allows the attacker to read and modify any data passed over the connection.",
"Make sure any TLS libraries you use are up-to-date, that servers you maintain use 2048-bit or larger primes, and that clients you maintain reject Diffie-Hellman primes smaller than 1024-bit. More information can be found in this resource. https://weakdh.org/"],
[19, "Allows remote attackers to cause a denial of service (crash), and possibly obtain sensitive information in applications that use OpenSSL, via a malformed ClientHello handshake message that triggers an out-of-bounds memory access.",
" OpenSSL versions 0.9.8h through 0.9.8q and 1.0.0 through 1.0.0c are vulnerable. It is recommended to upgrade the OpenSSL version. More resource and information can be found here. https://www.openssl.org/news/secadv/20110208.txt"],
[20, "Otherwise termed as BREACH atack, exploits the compression in the underlying HTTP protocol. An attacker will be able to obtain email addresses, session tokens, etc from the TLS encrypted web traffic.",
"Turning off TLS compression does not mitigate this vulnerability. First step to mitigation is to disable Zlib compression followed by other measures mentioned in this resource. http://breachattack.com/"],
[21, "Otherwise termed as Plain-Text Injection attack, which allows MiTM attackers to insert data into HTTPS sessions, and possibly other types of sessions protected by TLS or SSL, by sending an unauthenticated request that is processed retroactively by a server in a post-renegotiation context.",
"Detailed steps of remediation can be found from these resources. https://securingtomorrow.mcafee.com/technical-how-to/tips-securing-ssl-renegotiation/ https://www.digicert.com/news/2011-06-03-ssl-renego/ "],
[22, "This vulnerability allows attackers to steal existing TLS sessions from users.",
"Better advice is to disable session resumption. To harden session resumption, follow this resource that has some considerable information. https://wiki.crashtest-security.com/display/KB/Harden+TLS+Session+Resumption"],
[23, "This has nothing to do with security risks, however attackers may use this unavailability of load balancers as an advantage to leverage a denial of service attack on certain services or on the whole application itself.",
"Load-Balancers are highly encouraged for any web application. They improve performance times as well as data availability on during times of server outage. To know more information on load balancers and setup, check this resource. https://www.digitalocean.com/community/tutorials/what-is-load-balancing"],
[24, "An attacker can forwarded requests that comes to the legitimate URL or web application to a third party address or to the attacker's location that can serve malware and affect the end user's machine.",
"It is highly recommended to deploy DNSSec on the host target. Full deployment of DNSSEC will ensure the end user is connecting to the actual web site or other service corresponding to a particular domain name. For more information, check this resource. https://www.cloudflare.com/dns/dnssec/how-dnssec-works/"],
[25, "Attackers may find considerable amount of information from these files. There are even chances attackers may get access to critical information from these files.",
"It is recommended to block or restrict access to these files unless necessary."],
[26, "Attackers may find considerable amount of information from these directories. There are even chances attackers may get access to critical information from these directories.",
"It is recommended to block or restrict access to these directories unless necessary."],
[27, "May not be SQLi vulnerable. An attacker will be able to know that the host is using a backend for operation.",
"Banner Grabbing should be restricted and access to the services from outside would should be made minimum."],
[28, "An attacker will be able to steal cookies, deface web application or redirect to any third party address that can serve malware.",
"Input validation and Output Sanitization can completely prevent Cross Site Scripting (XSS) attacks. XSS attacks can be mitigated in future by properly following a secure coding methodology. The following comprehensive resource provides detailed information on fixing this vulnerability. https://www.owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet"],
[29, "SSL related vulnerabilities breaks the confidentiality factor. An attacker may perform a MiTM attack, intrepret and eavesdrop the communication.",
"Proper implementation and upgraded version of SSL and TLS libraries are very critical when it comes to blocking SSL related vulnerabilities."],
[30, "Particular Scanner found multiple vulnerabilities that an attacker may try to exploit the target.",
"Refer to Vulnerous-Vulnerability-Report to view the complete information of the vulnerability, once the scan gets completed."],
[31, "Attackers may gather more information from subdomains relating to the parent domain. Attackers may even find other services from the subdomains and try to learn the architecture of the target. There are even chances for the attacker to find vulnerabilities as the attack surface gets larger with more subdomains discovered.",
"It is sometimes wise to block sub domains like development, staging to the outside world, as it gives more information to the attacker about the tech stack. Complex naming practices also help in reducing the attack surface as attackers find hard to perform subdomain bruteforcing through dictionaries and wordlists."],
[32, "Through this deprecated protocol, an attacker may be able to perform MiTM and other complicated attacks.",
"It is highly recommended to stop using this service and it is far outdated. SSH can be used to replace TELNET. For more information, check this resource https://www.ssh.com/ssh/telnet"],
[33, "This protocol does not support secure communication and there are likely high chances for the attacker to eavesdrop the communication. Also, many FTP programs have exploits available in the web such that an attacker can directly crash the application or either get a SHELL access to that target.",
"Proper suggested fix is use an SSH protocol instead of FTP. It supports secure communication and chances for MiTM attacks are quite rare."],
[34, "The StuxNet is level-3 worm that exposes critical information of the target organization. It was a cyber weapon that was designed to thwart the nuclear intelligence of Iran. Seriously wonder how it got here? Hope this isn't a false positive Nmap ;)",
"It is highly recommended to perform a complete rootkit scan on the host. For more information refer to this resource. https://www.symantec.com/security_response/writeup.jsp?docid=2010-071400-3123-99&tabid=3"],
[35, "WebDAV is supposed to contain multiple vulnerabilities. In some case, an attacker may hide a malicious DLL file in the WebDAV share however, and upon convincing the user to open a perfectly harmless and legitimate file, execute code under the context of that user",
"It is recommended to disable WebDAV. Some critical resource regarding disbling WebDAV can be found on this URL. https://www.networkworld.com/article/2202909/network-security/-webdav-is-bad---says-security-researcher.html"],
[36, "Attackers always do a fingerprint( of any server before they launch an attack. Fingerprint(ing gives them information about the server type, content- they are serving, last modification times etc, this gives an attacker to learn more information about the target",
"A good practice is to obfuscate the information to outside world. Doing so, the attackers will have tough time understanding the server's tech stack and therefore leverage an attack."],
[37, "Attackers mostly try to render web applications or service useless by flooding the target, such that blocking access to legitimate users. This may affect the business of a company or organization as well as the reputation",
"By ensuring proper load balancers in place, configuring rate limits and multiple connection restrictions, such attacks can be drastically mitigated."],
[38, "Intruders will be able to remotely include shell files and will be able to access the core file system or they will be able to read all the files as well. There are even higher chances for the attacker to remote execute code on the file system.",
"Secure code practices will mostly prevent LFI, RFI and RCE | |
#! /usr/bin/python3.9
"Python script containing functions and classes for general cryptographic use."
# importing libraries
import os
import rsa
import string
import secrets
import hashlib as hash
from random import shuffle
from Crypto.Cipher import AES
from Crypto.Cipher import Blowfish
from Crypto.Util.strxor import strxor
from Crypto.Protocol.KDF import PBKDF2
from cryptography.fernet import Fernet
from platform import system as get_platform
from Crypto.Util.number import getPrime, isPrime, GCD
from Crypto.Util.RFC1751 import english_to_key, key_to_english
from Crypto.Cipher import Salsa20, ChaCha20, ChaCha20_Poly1305, DES, DES3, ARC2, ARC4, CAST
from .file_encryption import AES_encrypt_file, AES_decrypt_file, BLO_encrypt_file, BLO_decrypt_file
class NoKeyError(Exception):
"""
Raised when no key was provided to a cipher object.
"""
pass
class BadKeyError(Exception):
"""
Raised when the key given does not match the encryption key signature.
"""
pass
class InvalidCipherArgument(Exception):
"""
Raised when a parameter for a cipher is not provided.
"""
pass
class InvalidKeyArgument(Exception):
"""
Raised when a key generation function recieves an incorrect parameter, or when a given parameter does not meet the requirements of the underlying key generation function.
"""
pass
class UnknownError(Exception):
"""
Raised when a an unknown error occurrs during encryption/decryption.
"""
pass
def hash_(ToHash, hash_code:str, return_hex=True, return_length = 256): # tested
"""
Miscellenious function for implementing hash algorithms.
Parameters:
`ToHash`: The bytes to be hashed; if not bytes, it will be converted to bytes.
`hash_code`: A string indicating which hashing algorithm to use.
currently supported hashes are:
`'SHA224'`: SHA224 hashing algorithm.
`'SHA256'`: SHA256 hashing algorithm.
`'SHA_384'`: SHA384 hashing algorithm.
`'SHA512'`: SHA512 hashing algorithm.
`'MD5'`: MD5 hashing algorithm.
`'SHA1'`: SHA1 hashing algorithm.
`'SHA3_224'`: SHA3_224 hashing algorithm.
`'SHA3_256'`: SHA3-256 hashing algorithm.
`'SHA3_384'`: SHA3-384 hashing algorithm.
`'SHA3_512'`: SHA3-512 hashing algorithm.
`'BLAKE2b'`: BLAKE2b hashing algorithm.
`'BLAKE2s'`: BLAKE2s hashing algorithm.
`'SHAKE_128'`: SHAKE_128 hashing algorithm.
`'SHAKE_256'`: SHAKE_256 hashing algorithm.
`return_hex`: A boolean indicating whether the output should be in hexadecimal or not.
`return_length`: An optional parameter specifying the amount of bytes to return. Used only in shake algorithms.
Returns: a hash of the specific algorithm and data representation.
"""
ToHash = bytes(ToHash, 'utf-8')
hash_code = hash_code.upper()
if hash_code == "SHA224":
hash_obj = hash.sha224(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA256":
hash_obj = hash.sha256(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA512":
hash_obj = hash.sha512(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "MD5":
hash_obj = hash.md5(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA384":
hash_obj = hash.sha384(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA1":
hash_obj = hash.sha1(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "BLAKE2B":
hash_obj = hash.blake2b(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "BLAKE2S":
hash_obj = hash.blake2s(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA3_224":
hash_obj = hash.sha3_224(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA3_256":
hash_obj = hash.sha3_256(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA3_384":
hash_obj = hash.sha3_384(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHA3_512":
hash_obj = hash.sha3_512(ToHash)
if return_hex is False:
return hash_obj.digest()
else:
return hash_obj.hexdigest()
elif hash_code == "SHAKE_128":
hash_obj = hash.shake_128(ToHash)
if return_hex is False:
return hash_obj.digest(return_length)
else:
return hash_obj.hexdigest(return_length)
elif hash_code == "SHAKE_256":
hash_obj = hash.shake_256(ToHash)
if return_hex is False:
return hash_obj.digest(return_length)
else:
return hash_obj.hexdigest(return_length)
def random_choice(given_list:list): # tested
"""
A function to randomly choose an item from a given list.
Parameters:
`given_list`: The list to choose from.
Returns: the chosen item in the list.
"""
chosen = secrets.choice(given_list)
return chosen
def compare_hashes(hash_1=str, hash_2=str) -> bool: # tested
"""
hash comparision function.
Takes 2 strings and compares them to see if they are the same.
returns a boolean value in such a way to reduce timing attack efficacy.
Parameters:
`hash_1`: The string to compare the second hash to.
`hash_2`: The string to be compared.
"""
result = secrets.compare_digest(hash_1, hash_2)
return result
def token_generate(size:int, return_type="HEX"): # tested
"""
Simplifed method for interfacing with the secrets module.
Parameters:
`return_type`: What is being returned. modes are `'URL'`, `'HEX'`, and `'BYTES'`.
`size`: the number of bytes in the token to be generated.
returns: a token of the specific type, or 1 to indicate that the return type was not valid.
"""
if return_type.upper() == "HEX":
token = secrets.token_hex(size)
return token
if return_type.upper() == "BYTES":
token = secrets.token_bytes(size)
return token
if return_type.upper() == "URL":
token = secrets.token_urlsafe(size)
return token
else:
return 1
def generate_password(length:int) -> str: # tested
"""
Generates and returns a random password of n `length`.
"""
characters = list(string.ascii_letters + string.digits + "!@#$%^&*()")
shuffle(characters)
password = []
for i in range(length):
password.append(secrets.choice(characters))
shuffle(password)
final_password = <PASSWORD>(password)
# deleting uneeded variables
del characters
return final_password
def sec_delete(file_path:str, random_fill = True, null_fill = True, passes = 35) -> int: # tested
"""
Secure file deletion function with overwriting and null filling.
It is best practice to combine this with another secure file deletion protocol.
return codes:
1: Attempting to access root folder or hidden file.
2: Attempt to pass a dangerous command to command line.
0: File successfully deleted.
"""
file_path = os.path.abspath(file_path)
if "/home/" not in file_path or "/." in file_path:
return 1
elif "sudo rm -rf /" in file_path:
return 2
# testing if platform is Linux
if get_platform() != "Linux":
return 3
else:
with open (file_path, "rb") as file:
data = file.read()
length = len(data)
if random_fill is True:
for _ in range(passes):
with open(file_path, "wb") as file:
file.write(os.urandom(length))
if null_fill is True:
for _ in range(passes):
with open (file_path, "wb") as file:
file.write(b"\x00" * length)
os.system("rm {}".format(file_path))
# deleting uneeded variables
del data, length, file_path, random_fill, null_fill, passes
return 0
def delete(path:str) -> int: # tested
"""
File deletion function.
Parameters:
`path`: The path to the file, in string format.
Returns: An integer value indicating if the function successfully executed.
"""
path = os.path.abspath(path)
if '/home/' not in path or "/." in path:
return 1
elif "sudo rm -rf /" in path:
return 2
# checking if the platform is Linux
elif get_platform() == "Linux":
os.system("rm {}".format(path))
return 0
else:
return 3
def XOR(bytes1:bytes, bytes2:bytes, write_to=None):
"""
A function for preforming XOR operations on bytestrings.
Returns: None if `write_to` is `None`, otherwise returns the XOR'ed string.
"""
bytes_string = strxor(bytes1, bytes2, output = write_to)
return bytes_string
def one_time_pad_encrypt(message:str, key:str):
"""
A function for performing one time pad encryption. using a key generated from a keyphrase."""
# generating the key
key_bytes = hash_(key, "SHA256", return_hex=False)
# getting the length of the key
key_length = len(key_bytes)
# getting the length of the message bytes
message_length = len(message.encode())
# coverting the message to bytes
print (key_length, message_length)
message_bytes = message.encode()
# ensuring message length is a multiple of key length
if message_length % key_length != 0:
message_bytes = message_bytes + b"\x00" * (key_length - (message_length % key_length))
#breaking the message into chunks
message_chunks = [message_bytes[i:i+key_length] for i in range(0, len(message_bytes), key_length)]
# XORing the chunks with the key
chunks = []
for i in range(len(message_chunks)):
XORed_chunks = XOR(message_chunks[i], key_bytes)
chunks.append(XORed_chunks)
assert len(chunks) == len(message_chunks)
encrypted_message = b"".join(chunks)
# deleting all variables
del key_bytes, key_length, message_length, message_chunks, XORed_chunks
# returning the XORed string
return encrypted_message
def one_time_pad_decrypt(message:bytes, key:str):
"""
A function for performing one time pad decryption. using a key generated from a keyphrase."""
# generating the key
key_bytes = hash_(key, "SHA256", return_hex=False)
# getting the length of the key
key_length = len(key_bytes)
# breaking the message bytes into chunks of the key length
message_chunks = [message[i:i+key_length] for i in range(0, len(message), key_length)]
# XORing the chunks with the key
chunks = []
for i in range(len(message_chunks)):
XORed_chunks = XOR(message_chunks[i], key_bytes)
chunks.append(XORed_chunks)
# joining the chunks
message_bytes = b"".join(chunks)
# removing the null bytes
message_bytes = message_bytes.rstrip(b"\x00")
# deleting all variables
del key_bytes, key_length, message_chunks, XORed_chunks
# returning the XORed string
return message_bytes
def is_prime_number(number:int) -> bool:
"""
A function for testing if a number is prime. Returns a boolean | |
<reponame>emcknight/ChessEngineAASpr2022-
import math
import chess
from chess import *
# Chess location to index dictionary.
chessToIndex = {
'a8': 56, 'b8': 57, 'c8': 58, 'd8': 59, 'e8': 60, 'f8': 61, 'g8': 62, 'h8': 63,
'a7': 48, 'b7': 49, 'c7': 50, 'd7': 51, 'e7': 52, 'f7': 53, 'g7': 54, 'h7': 55,
'a6': 40, 'b6': 41, 'c6': 42, 'd6': 43, 'e6': 44, 'f6': 45, 'g6': 46, 'h6': 47,
'a5': 32, 'b5': 33, 'c5': 34, 'd5': 35, 'e5': 36, 'f5': 37, 'g5': 38, 'h5': 39,
'a4': 24, 'b4': 25, 'c4': 26, 'd4': 27, 'e4': 28, 'f4': 29, 'g4': 30, 'h4': 31,
'a3': 16, 'b3': 17, 'c3': 18, 'd3': 19, 'e3': 20, 'f3': 21, 'g3': 22, 'h3': 23,
'a2': 8, 'b2': 9, 'c2': 10, 'd2': 11, 'e2': 12, 'f2': 13, 'g2': 14, 'h2': 15,
'a1': 0, 'b1': 1, 'c1': 2, 'd1': 3, 'e1': 4, 'f1': 5, 'g1': 6, 'h1': 7
}
# Initialize evaluation with current move. (Maybe just make this a method for a parent object?)
# This algorithm assumes 'myColor' is the person whose turn it is.
# Evaluates in 4 parts: Material, King Safety, Control of Center, and possible Activity
def calculateRapid(board: chess.Board, color):
myColor = color
enemyColor = not color
allMyPieces = set()
allTheirPieces = set()
# ------------------------------------------------------------------------------------------------------------------
# Get all pieces on the board for each side. Create unions to group all into a general group.
# Kings
myKings = board.pieces(KING, myColor)
theirKings = board.pieces(KING, enemyColor)
allMyPieces = allMyPieces.union(myKings)
allTheirPieces = allTheirPieces.union(theirKings)
# Queens
myQueens = board.pieces(QUEEN, myColor)
theirQueens = board.pieces(QUEEN, enemyColor)
allMyPieces = allMyPieces.union(myQueens)
allTheirPieces = allTheirPieces.union(theirQueens)
# Rooks
myRooks = board.pieces(ROOK, myColor)
theirRooks = board.pieces(ROOK, enemyColor)
allMyPieces = allMyPieces.union(myRooks)
allTheirPieces = allTheirPieces.union(theirRooks)
# Bishops
myBishops = board.pieces(BISHOP, myColor)
theirBishops = board.pieces(BISHOP, enemyColor)
allMyPieces = allMyPieces.union(myBishops)
allTheirPieces = allTheirPieces.union(theirBishops)
# Knights
myKnights = board.pieces(KNIGHT, myColor)
theirKnights = board.pieces(KNIGHT, enemyColor)
allMyPieces = allMyPieces.union(myKnights)
allTheirPieces = allTheirPieces.union(theirKnights)
# Pawns
myPawns = board.pieces(PAWN, myColor)
theirPawns = board.pieces(PAWN, enemyColor)
allMyPieces = allMyPieces.union(myPawns)
allTheirPieces = allTheirPieces.union(theirPawns)
# ------------------------------------------------------------------------------------------------------------------
# Gets the material score.
kingWt = len(myKings) - len(theirKings)
queenWt = len(myQueens) - len(theirQueens)
rookWt = len(myRooks) - len(theirRooks)
bishWt = len(myBishops) - len(theirBishops)
kntWt = len(myKnights) - len(theirKnights)
pawnWt = len(myPawns) - len(theirPawns)
materialVal = (200 * kingWt) + (9 * queenWt) + (5 * rookWt) + (3 * (kntWt + bishWt)) + pawnWt
# ------------------------------------------------------------------------------------------------------------------
# Gets the activity score. Here I'm comparing the average number of moves per piece.
if board.turn == myColor:
myMoves = list(board.legal_moves)
theirMoves = list()
else:
theirMoves = list(board.legal_moves)
myMoves = list()
if board.turn == myColor:
if len(myMoves) > 0:
board.push(myMoves[0])
theirMoves = list(board.legal_moves)
board.pop()
else:
if len(theirMoves) > 0:
board.push(theirMoves[0])
myMoves = list(board.legal_moves)
board.pop()
activityVal = 0
if (allMyPieces == 0 and allTheirPieces == 0):
activityVal = 0
elif (allMyPieces == 0):
activityVal = -1 * len(theirMoves) / len(allTheirPieces)
elif (allTheirPieces == 0):
activityVal = len(myMoves) / len(allMyPieces)
else:
activityVal = (len(myMoves) / len(allMyPieces)) - (len(theirMoves) / len(allTheirPieces))
# ------------------------------------------------------------------------------------------------------------------
# Gets the center control. How many pawns are in a4 to h5.
myControl = 0
theirControl = 0
for x in range(16):
for piece in myPawns:
if piece == 16 + x:
myControl = myControl + 1
for piece in theirPawns:
if piece == 16 + x:
theirControl = theirControl + 1
controlVal = myControl - theirControl
# ------------------------------------------------------------------------------------------------------------------
# Gets the King safety. How safe is my King to the enemy King?
# This part is pretty big...
# To reduce time to process all these conditions - If I'm in check, my King is DEFINITELY not safe.
kingSafetyVal = 0
if board.is_check():
kingSafetyVal = -2
else:
# Pt.1 How mobile is my King to the enemy King?
myEscape = countMoves(myKings, myMoves)
theirEscape = countMoves(theirKings, theirMoves)
escapeVal = myEscape - theirEscape
# Pt.2 How many pawns are nearby my King?
# The 5x5 area around the King
fiveByFiveRange = [-18, -17, -16, -15, -14, -10, -9, -8, -7, -6, -2, -1, 1, 2, 6, 7, 8, 9, 10, 14, 15, 16, 17, 18]
myPawnShield = 0
theirPawnShield = 0
for dif in fiveByFiveRange:
for king in myKings:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and \
((dif > 0 and math.floor((king + dif)/8 - king/dif) <= 2) or (dif < 0 and math.ceil((king + dif)/8 - king/dif) >= -2)):
if king + dif in myPawns:
myPawnShield = myPawnShield + 1
for king in theirKings:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64 and \
((dif > 0 and math.floor((king + dif)/8 - king/dif) <= 2) or (dif < 0 and math.ceil((king + dif)/8 - king/dif) >= -2)):
if king + dif in theirPawns:
theirPawnShield = theirPawnShield + 1
pawnShieldVal = myPawnShield - theirPawnShield
# Pt.3 How many friendly pieces are nearby my King? (not counting pawns)
# The 5x8 area around the King. Ensure these differences don't extend outside 5x8 area
fiveByEightRange = [-23, -22, -21, -20, -19, -18, -17, -16, -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]
myDefenders = 0
theirDefenders = 0
for dif in fiveByEightRange:
for king in myKings:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if ((dif > 0 and math.floor((king + dif)/8 - king/dif) <= 3) or (dif < 0 and math.ceil((king + dif)/8 - king/dif) >= -3)):
# Don't count pawns.
if king + dif in allMyPieces and not king + dif in myPawns:
myDefenders = myDefenders + 1
for king in theirKings:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if ((dif > 0 and math.floor((king + dif)/8 - king/dif) <= 3) or (dif < 0 and math.ceil((king + dif)/8 - king/dif) >= -3)):
# Don't count pawns.
if king + dif in allTheirPieces and not king + dif in theirPawns:
theirDefenders = theirDefenders + 1
defenderVal = myDefenders - theirDefenders
# Pt. 4 How many enemy pieces are nearby my King?
myAttackers = 0
theirAttackers = 0
for dif in fiveByEightRange:
for king in myKings:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if ((dif > 0 and math.floor((king + dif)/8 - king/dif) <= 3) or (dif < 0 and math.ceil((king + dif)/8 - king/dif) >= -3)):
if king + dif in allTheirPieces:
myAttackers = myAttackers + 1
for king in theirKings:
# Ensure space difference is valid and doesn't extend outside desired range
if king + dif >= 0 and king + dif < 64:
if ((dif > 0 and math.floor((king + dif)/8 - king/dif) <= 3) or (dif < 0 and math.ceil((king + dif)/8 - king/dif) >= -3)):
# Don't count pawns.
if king + dif in allMyPieces:
theirAttackers = theirAttackers + 1
# This field we want the enemy to have more of
attackerValue = theirAttackers - myAttackers
# Pt. 5 How many Horizontal/Vertical/Diagonal lanes lack protection from one or more pieces
# The Row spaces the King is on
rowNegRange = [-1, -2, -3, -4, -5, -6, -7]
rowPosRange = [1, 2, 3, 4, 5, 6, 7]
# The Column spaces the king is on
colNegRange = [-8, -16, -24, -32, -40, -48, -56]
colPosRange = [8, 16, 24, 32, 40, 48, 56]
# The Diagonals the king is on
diagBLRange = [-9, -18, -27, -36, -45, -54, -63] | |
<reponame>abdellaui/camera_construction
bl_info = {
"name": "Camera Construct (main)",
"description": "Camera Construct",
"author": "<NAME>",
"version": (0, 0, 2),
"blender": (2, 79, 0),
"location": "3D View > Tools > Camera Construct",
"category": "Render"
}
import bpy
import math
import os
from datetime import datetime
from bpy.app.handlers import persistent
from bpy.props import (StringProperty,
BoolProperty,
BoolVectorProperty,
IntProperty,
FloatProperty,
EnumProperty,
PointerProperty,
CollectionProperty,
FloatVectorProperty
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
UIList,
SplinePoint
)
# ------------------------------------------------------------------------
# Classes & Functions
# ------------------------------------------------------------------------
class DotDict(dict):
# dot.notation access to dictionary attributes
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class Utils:
@staticmethod
def lenghtOfPath(obj):
length = 0
if obj and obj.type == "CURVE":
mesh = obj.to_mesh(bpy.context.scene, False, "PREVIEW")
ve = mesh.vertices
for i in mesh.edges:
distance = ve[i.vertices[0]].co - ve[i.vertices[1]].co
length += distance.length
length = round(length,4)
bpy.data.meshes.remove(mesh)
return length
@staticmethod
def rotationInDegToRad(x=0, y=0, z=0):
return (math.radians(x), math.radians(y), math.radians(z))
@staticmethod
def objectExists(obj):
return obj and (obj.name in bpy.data.objects)
@staticmethod
def generatateGroundTruthString(fileName, location, quaternion):
x, y ,z = location
w, p, q, r = quaternion
precission = 6
#return "{}.png {} {} {} {} {} {} {}".format(fileName, x,y,z,w,p,q,r)
return "{}.png {} {} {} {} {} {} {}".format(fileName, \
round(x, precission), round(y, precission), round(z, precission), \
round(w, precission), round(p, precission), round(q, precission), round(r, precission))
class CameraConstruct:
def __init__(self, cameras = [], pathObj = None, cubeObj = None):
self.cameras = cameras
self.pathObj = pathObj # holds the path
self.cubeObj = cubeObj # holds the cameracuberoot element
self.pathLength = 0;
@staticmethod
def generate(currentPosition = (0, 0, 0)):
instance = CameraConstruct()
# create path
bpy.ops.curve.primitive_nurbs_path_add(location=currentPosition)
instance.pathObj = bpy.context.object
instance.pathObj.name = "CameraConstruct"
# create cube
bpy.ops.object.empty_add(location=currentPosition, type="CUBE", radius=0.2)
instance.cubeObj = bpy.context.object
instance.cubeObj.name = "CameraRootCube"
instance.select()
# the active object will be the parent of all selected object
bpy.context.scene.objects.active = instance.pathObj
bpy.ops.object.parent_set(type="FOLLOW")
# add constrait
constrait = instance.cubeObj.constraints.new(type='LIMIT_ROTATION')
constrait.name = "LIMIT_ROTATION"
constrait.min_x = math.radians(0)
constrait.max_x = math.radians(0)
constrait.min_y = math.radians(-90)
constrait.max_y = math.radians(-90)
constrait.min_z = math.radians(-90)
constrait.max_z = math.radians(-90)
# correct the rotation and location of cubeObj inclusive cameras
instance.cubeObj.location.x -= 2
instance.createCameras()
instance.configure()
return instance;
def selectSplinePoint(self, index):
bpy.ops.curve.select_all(action="DESELECT")
self.pathObj.data.splines[0].points[index].select = True
def changeSpline(self, points):
self.pathObj.data.splines.clear()
polyline = self.pathObj.data.splines.new("NURBS")
polyline.use_endpoint_u = True
polyline.points.add(len(points)-1)
# cant make a function: items at [i] are readonly, but props not
for i, source in enumerate(points):
polyline.points[i].co = source.co
polyline.points[i].radius = source.radius
polyline.points[i].tilt = source.tilt
polyline.points[i].weight = source.weight
polyline.points[i].weight_softbody = source.weight_softbody
# workaround
polyline.use_bezier_u = True
polyline.use_bezier_u = False
self.calcPathLength()
def copyPathPoints(self, path):
# if path is same path, so copy all points, changeSpline removes points (points are refs)
points = list(path.data.splines[0].points)
if path == self.pathObj:
pointsHolder = []
for source in points:
copyPoint = {
"co" : source.co.copy(),
"radius" : source.radius,
"tilt" : source.tilt,
"weight" : source.weight,
"weight_softbody" : source.weight_softbody
}
pointsHolder.append(DotDict(copyPoint))
points = pointsHolder
self.changeSpline(points)
def uiToPointList(self):
scene = bpy.context.scene
points = list(scene.listOfPoints)
self.changeSpline(points)
def pointListToUI(self):
scene = bpy.context.scene
scene.listIndex = 0
points = list(self.pathObj.data.splines[0].points)
scene.listOfPoints.clear()
# doesnt support add(amount)
for i in range(len(points)):
scene.listOfPoints.add()
# cant make a function: items at [i] are readonly, but props not
for i, source in enumerate(points):
scene.listOfPoints[i].co = source.co
scene.listOfPoints[i].radius = source.radius
scene.listOfPoints[i].tilt = source.tilt
scene.listOfPoints[i].weight = source.weight
scene.listOfPoints[i].weight_softbody = source.weight_softbody
def getPathLength(self):
return self.pathLength;
def getName(self):
if self.pathObj and self.pathObj.name:
return self.pathObj.name
else:
return ""
def isValid(self):
return Utils.objectExists(self.pathObj) and Utils.objectExists(self.cubeObj) and len(self.cameras) > 0
def select(self):
bpy.ops.object.select_all(action="DESELECT")
self.pathObj.select = True
self.cubeObj.select = True
def addCamera(self, x=0, y=0, z=0):
scene = bpy.context.scene
settings = scene.ccSettings
cameraName = "Camera_x{}_y{}_z{}".format(x, y, z)
print("Created: " + cameraName)
camera = bpy.data.cameras.new(cameraName)
obj = bpy.data.objects.new(cameraName, camera)
# if sample of configured camera is setted, so copy configuration
if settings.sampleOfCamera and scene.objects[settings.sampleOfCamera] and scene.objects[settings.sampleOfCamera].type == "CAMERA":
obj.data = scene.objects[settings.sampleOfCamera].data
obj.location = (0, 0, 0) # relative to cube
obj.rotation_mode = "ZYX"
obj.rotation_euler = Utils.rotationInDegToRad(x,y,z)
obj.rotation_mode = "QUATERNION"
obj.parent = self.cubeObj
scene.objects.link(obj) # add camera to scene
self.cameras.append(obj)
def createCameras(self):
scene = bpy.context.scene
settings = scene.ccSettings
allowsAxes = list(settings.variationOfAxes)
steps = settings.steps
stepSize = settings.stepSize
stepRange = steps*stepSize
allowX = allowsAxes[0]
allowY = allowsAxes[1]
allowZ = allowsAxes[2]
if settings.variationCombination:
for x in range(-stepRange, stepRange+1, stepSize):
if x == 0 or allowX:
for y in range(-stepRange, stepRange+1, stepSize):
if y == 0 or allowY:
for z in range(-stepRange, stepRange+1, stepSize):
if z == 0 or allowZ:
self.addCamera(x, y, z)
else:
self.addCamera(0, 0, 0)
for x in range(-stepRange, stepRange+1, stepSize):
if x != 0 and allowX:
self.addCamera(x, 0, 0)
for y in range(-stepRange, stepRange+1, stepSize):
if y != 0 and allowY:
self.addCamera(0, y, 0)
for z in range(-stepRange, stepRange+1, stepSize):
if z != 0 and allowZ:
self.addCamera(0, 0, z)
def calcPathLength(self):
self.pathLength = Utils.lenghtOfPath(self.pathObj)
def configureCubeRotation(self):
if not self.cubeObj or not self.pathObj:
return None
scene = bpy.context.scene
settings = scene.ccSettings
usePathFollowAxes = list(settings.usePathFollowAxes)
self.cubeObj.constraints["LIMIT_ROTATION"].use_limit_x = not usePathFollowAxes[0]
self.cubeObj.constraints["LIMIT_ROTATION"].use_limit_y = not usePathFollowAxes[1]
self.cubeObj.constraints["LIMIT_ROTATION"].use_limit_z = not usePathFollowAxes[2]
self.cubeObj.rotation_mode = "ZYX"
self.cubeObj.rotation_euler = Utils.rotationInDegToRad(y = -90, z = -90)
def getCameraAmount(self):
return len(self.cameras)
def configure(self):
self.pathObj.data.path_duration = ConstructManager.keypoints
self.pathObj.data.use_path_follow = True
self.configureCubeRotation()
self.calcPathLength()
class ConstructManager:
sceneKey = None
records = False
iteratesOverCams = False
canChangePointList = False
keypoints = 0
currentFrame = 0
file = None # holds the file handler
pathToStore = ""
lastPointIndex = -1
cc = CameraConstruct()
@classmethod
def setCenterCamera(cls):
index = int(len(cls.cc.cameras)/2);
bpy.data.scenes[cls.sceneKey].camera = cls.cc.cameras[index]
@classmethod
def stopRecord(cls):
if cls.records:
print("IMAGE RENDERING STOPPED")
if cls.file:
cls.file.close()
cls.file = None
cls.records = False
bpy.ops.screen.animation_cancel()
@classmethod
def startRecord(cls):
cls.file = open( os.path.join(bpy.path.abspath(cls.pathToStore), "dataset.txt"), "a")
_currentFrame = bpy.data.scenes[cls.sceneKey].frame_current
cls.currentFrame = _currentFrame * cls.cc.getCameraAmount()
cls.records = True
cls.resetFrameSettings(_currentFrame)
if cls.currentFrame == 0:
now = datetime.now()
header = "synthetic dataset created on {} \nImageFile, Camera Position [X Y Z W P Q R] \n\n".format(now)
cls.file.write(header)
bpy.ops.screen.animation_cancel()
bpy.ops.screen.animation_play()
@classmethod
def takePictures(cls, location):
print("Taking pictures...")
print("Using Scene[{}]".format(cls.sceneKey))
amount = len(cls.cc.cameras) * cls.keypoints
cls.iteratesOverCams = True
for obj in cls.cc.cameras:
cls.currentFrame += 1
print("{}% \t\t {:05} / {:05}".format(round(cls.currentFrame/amount*100,2), cls.currentFrame, amount))
fileName = "{:05}".format(cls.currentFrame)
fileName = os.path.join("img", fileName)
groundtruth = Utils.generatateGroundTruthString(fileName, location, obj.matrix_world.to_quaternion())
cls.file.write(groundtruth+"\n")
# set scenes camera and output filename
bpy.data.scenes[cls.sceneKey].camera = obj
bpy.data.scenes[cls.sceneKey].render.image_settings.file_format = "PNG"
bpy.data.scenes[cls.sceneKey].render.filepath = os.path.join(cls.pathToStore, fileName)
# render scene and store the scene
bpy.ops.render.render( write_still = True )
cls.iteratesOverCams = False
print("Done for location: {}".format(location))
@classmethod
def reinitalize(cls):
scene = bpy.context.scene
settings = scene.ccSettings
obj = settings.currentConstruct
if obj and scene.objects[obj]:
_pathObj = scene.objects[obj]
# first obj is a curve and has childrens
if _pathObj.type == "CURVE" and _pathObj.data.use_path and len(_pathObj.children) > 0:
_cubeObj = _pathObj.children[0]
# second obj is an empty and has childrens
if _cubeObj.type == "EMPTY" and len(_cubeObj.children) > 0:
_cameras = _cubeObj.children
# third obj is a camera and has childrens
if _cameras[0].type == "CAMERA":
cls.reset()
cls.cc = CameraConstruct(list(_cameras), _pathObj, _cubeObj)
# for better vizualisation
cls.cc.select()
cls.cc.pointListToUI()
return ("Information", "Construct initilized")
else:
return ("Error", "No cameras aviable")
else:
return ("Error", "CameraRootCube is missing")
else:
return ("Error", "Construct object doesnt seems similar an acceptable construct!")
else:
return ("Error", "Object doesnt exists!")
@classmethod
def generate(cls):
scene = bpy.context.scene
settings = scene.ccSettings
currentPosition = settings.position
return CameraConstruct.generate(currentPosition)
@classmethod
def applySettings(cls):
scene = bpy.context.scene
settings = scene.ccSettings
picturePerUnit = settings.picturePerUnit
pathLength = cls.cc.getPathLength()
cls.keypoints = math.ceil(pathLength / picturePerUnit)
cls.pathToStore = settings.pathToStore
cls.refreshFrameEnd()
cls.cc.configure()
@classmethod
def resetFrameSettings(cls, frame=0):
cls.cc.pathObj.data.eval_time = frame
bpy.data.scenes[cls.sceneKey].frame_start = 0
cls.refreshFrameEnd()
@classmethod
def refreshFrameEnd(cls):
bpy.data.scenes[cls.sceneKey].frame_end = cls.keypoints
@classmethod
def reset(cls):
if cls.file:
cls.file.close()
cls.file = None
cls.pathToStore = ""
cls.cc = CameraConstruct()
cls.records = False
cls.iteratesOverCams = False
cls.canChangePointList = False
cls.keypoints = 0
cls.currentFrame = 0
cls.sceneKey = bpy.data.scenes.keys()[0]
cls.lastPointIndex = -1
@classmethod
def canTakePictures(cls):
if cls.records:
return False
elif not cls.cc.isValid():
cls.reset()
return False
else:
return True
def ShowMessageBox(message = "", title = "Message Box", icon = "INFO"):
def draw(self, context):
self.layout.label(message)
bpy.context.window_manager.popup_menu(draw, title = title, icon = icon)
@persistent
def onFrameChanged(scene):
# on iterating over | |
if randomboss == True: #!
if randomweapons == False:
for y in range(8):
if posB[y][0] == "Airman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(AirShooterReceived)
AirShooterReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 3:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(AirShooterReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Bubbleman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(BubbleLeadReceived)
BubbleLeadReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(BubbleLeadReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Quickman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(QuickBoomerangReceived)
QuickBoomerangReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(QuickBoomerangReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Heatman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(AtomicFireReceived)
AtomicFireReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 6:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(AtomicFireReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
ROM.write(b'\x0A')
Pointer+=1
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
ROM.write(b'\x0A')
Pointer+=1
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Woodman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(LeafShieldReceived)
LeafShieldReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 4:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(LeafShieldReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Metalman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(MetalBladeReceived)
MetalBladeReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 5:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(MetalBladeReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Flashman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(TimeStopperReceived)
TimeStopperReceived.remove(b'\x20')
z -= 1
for x in range(z):#Writes second part
if x == 4:
ROM.write(b'\x0A')
Pointer+=1
ROM.write(TimeStopperReceived[x])
Pointer+=1
ROM.write(b'\x0B')
Pointer+=1
if y == 0: #Checks to see what palette value should be written based on position
Value = (b'\x30')
elif y == 1:
Value = (b'\x31')
elif y == 2:
Value = (b'\x32')
elif y == 3:
Value = (b'\x33')
elif y == 4:
Value = (b'\x34')
elif y == 5:
Value = (b'\x35')
elif y == 6:
Value = (b'\x36')
elif y == 7:
Value = (b'\x37')
ROM.write(Value)
Pointer+=1
if GiveMarine == y:
for x in range(13): #If they are supposed to give Item, write text
ROM.write(Item1text[x])
Pointer+=1
if GiveJet == y:
for x in range(13):
ROM.write(Item2text[x])
Pointer+=1
if GiveI3 == y:
for x in range(13):
ROM.write(Item3text[x])
Pointer+=1
if y == 7:
ROM.write(b'\x00') #If this is the last one, write the terminator at the end
End2.append(Pointer) #Used to recalculate offsets for text
elif posB[y][0] == "Crashman":
Seek = ROM.seek(Pointer,0)
for x in range(19): #Writes first part
ROM.write(GetEquippedtext[x])
Pointer+=1
z = len(CrashBomberReceived)
CrashBomberReceived.remove(b'\x20')
z -= 1
| |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Extension functionality around the os and path modules (os.path, posixpath, ntpath).
Implements the commons.resource_if interface based on local files.
Created on 21.05.2011
@author: SIGIESEC
'''
from __future__ import with_statement
from commons.core_if import ContentMetric
from commons.core_util import isinstance_or_duck, CollectionTools, StringTools
from commons.resource_if import (ResourceMetricProcessor, Resource,
ResourceAccessError, IllegalResourceIdentifierError, ResourceUnresolvable,
GenerationStrategy)
from commons.resource_util import ResourceUtil
from commons.v26compat_util import compatrelpath
from datetime import datetime
from itertools import imap, ifilter, permutations
import csv
import logging
import ntpath
import os.path
import posixpath
import re
import sys
import urllib
import urlparse
import warnings
import shelve
class AbsoluteFileMetricProcessor(ResourceMetricProcessor):
"""
@deprecated: ResourceMetricProcessor is deprecated in the current form.
"""
def apply_metric_to_resource(self, path):
"""
Returns the number of apply_metric_to_resource contained in a file.
@param abspath: a path, which must be absolute; result is undefined if not
"""
raise NotImplementedError(self.__class__)
class LocalFileMetricProcessor(AbsoluteFileMetricProcessor):
"""
@deprecated: ResourceMetricProcessor is deprecated in the current form.
"""
def apply_metric_to_resource(self, path):
"""
Returns the number of apply_metric_to_resource contained in a file.
@param path: a path, evaluated relative to the current working directory
"""
raise NotImplementedError(self.__class__)
class ResolvingResourceMetricProcessor(ResourceMetricProcessor):
"""
@deprecated: ResourceMetricProcessor is deprecated in the current form.
"""
def __init__(self, metric, resource_resolver):
assert isinstance(metric, ContentMetric)
assert isinstance(resource_resolver, ResourceResolver)
self.__metric = metric
self.__resource_resolver = resource_resolver
def get_metric(self):
return self.__metric
def apply_metric_to_resource(self, path):
return self.__metric.apply_metric(self.__resource_resolver.resolve(input_identifier=path).open())
class CachingResourceMetricProcessor(ResourceMetricProcessor):
"""
@deprecated: ResourceMetricProcessor is deprecated in the current form.
"""
def __init__(self, decoratee):
assert isinstance(decoratee, ResourceMetricProcessor)
self.__decoratee = decoratee
self.__cache = dict()
def get_metric(self):
return self.__decoratee.get_metric()
def get_cached_lengths_items(self):
return self.__cache.iteritems()
def lines_cached(self, resource):
return self.__cache[resource][0]
@classmethod
def __modification_date(cls, resource):
return resource.stat().st_mtime
def __put_entry(self, resource, lines):
self.__cache[resource] = (lines, self.__modification_date(resource))
def modified(self, resource):
return self.__cache[resource][1] != self.__modification_date(resource)
def apply_metric_to_resource(self, resource):
if resource not in self.__cache or self.modified(resource):
lines = self.__decoratee.apply_metric_to_resource(resource)
self.__put_entry(resource, lines)
return lines
else:
return self.lines_cached(resource)
#class PersistentCachingFileLengthCalculatorDecorator(AbsoluteFileMetricProcessor):
# pass
# TODO must be implemented
class NormalizedPathsIter(object):
"""
Decorates an iterator of items which are strings or lists of strings and applies
os.path.normcase and posixpath.normpath to each element which looks like a path.
>>> test = [('./dyn', ), [], ['dyn', '1'], set(['dyn', 'foo/../dyn']), ('foo/../dyn', 'Hallo Welt'), ('foo/..', ), ('c:\\\\test.cpp', )]
>>> for element in NormalizedPathsIter(test): print(element)
('dyn',)
['dyn', '1']
set(['dyn'])
('dyn', 'Hallo Welt')
('.',)
('c:/test.cpp',)
"""
def __init__(self, decoratee):
"""
@param decoratee: The object that should be decorated.
@type decoratee: types.Iterable
"""
self.__metric = decoratee.__iter__()
def __iter__(self):
return self
def __get_normalized_item(self, item):
# TODO replace by using a LocalPathResolver?
#item = os.path.normcase(item) #if item.find('.') != -1:
item = PathTools.unix_normpath(item)
return item
def __get_normalized_iter(self, line):
for x in line:
if any(x.find(sep) != -1 for sep in PathToolsConstants.anysep):
x = self.__get_normalized_item(x)
yield x
def next(self):
# StopIteration exception is passed through
line = ()
while len(line) == 0:
line = self.__metric.next()
my_line_iter = self.__get_normalized_iter(line)
return line.__class__(my_line_iter) # this works for tuple, list, set, ...?
@staticmethod
def create(filename, what, delimiter=',', allow_missing=False):
return NormalizedPathsIter(FileTools.create_csv_reader(filename, what, delimiter, allow_missing))
class PathToolsConstants:
"""
@cvar anysep: A regular expression fragment that represents any possible separation character as defined by os.path
@type anysep: str
"""
anysep = "[" + re.escape(os.path.sep) + (re.escape(os.path.altsep) if os.path.altsep else "") + "]"
class PathTools(object):
"""
This utility class bundles methods manipulating filesystem paths.
Its methods use os.path, but the behaviour may be undefined if os.path is neither ntpath
nor posixpath.
@todo: Change static methods to instance methods and parametrise the object with the
os.path module to use.
"""
@staticmethod
def fileinfo_str(filename):
# TODO this is too specific for commons.os_util and should be hidden or moved
return "%s (%s)" % (filename,
datetime.fromtimestamp(os.stat(filename).st_mtime).strftime("%Y-%m-%d %H:%M"))
@staticmethod
def replace_extension(filename, new_extension):
"""
Replaces the extension within a path by another.
@param filename: The original file name (path), optionally including an extension.
@type filename: basestring
@param new_extension: The new extension.
@type new_extension: basestring
@return: type(filename)
>>> PathTools.replace_extension(filename="/a/c.cpp", new_extension=".h")
'/a/c.h'
>>> PathTools.replace_extension(filename="/a/c", new_extension=".h")
'/a/c.h'
"""
return os.path.splitext(filename)[0] + new_extension
@staticmethod
def relative_path(path_name, relative_to, ignore_absolute=True, path_module=os.path):
"""
>>> PathTools.relative_path('./inc/foo.df', ".\\\\inc\\\\bar.df", True, ntpath)
'foo.df'
>>> PathTools.relative_path('./_dyn/foo.h', '.\\\\inc\\\\bar.df', True, ntpath)
'..\\\\_dyn\\\\foo.h'
@param ignore_absolute: If true, absolute paths will be returned without modification
"""
# TODO warum sollte man absolute Pfade ignorieren? es könnte sinnvoll sein, Pfade, die nicht unterhalb von relative_to liegen, zu ignorieren
if not path_module.isabs(path_name) or not ignore_absolute:
if hasattr(path_module, "relpath"):
return path_module.relpath(path_name, path_module.dirname(relative_to))
else:
warnings.warn("Using path module without relpath", DeprecationWarning)
return compatrelpath(path_name, path_module.dirname(relative_to))
else:
return path_name
# norm_path_name = os.path.normcase(path_name)
# norm_relative_to = os.path.normcase(relative_to)
# logging.error("%s,%s" % (os.path.dirname(norm_path_name), os.path.dirname(norm_relative_to)))
# if os.path.dirname(norm_path_name) == os.path.dirname(norm_relative_to):
# return os.path.basename(norm_path_name)
# else:
# return None
@staticmethod
def unix_normpath(path):
"""
Transforms a ntpath or posixpath path to a (normalized) posixpath path.
@todo: Should be renamed to to_posixpath_normpath.
>>> PathTools.unix_normpath('/x/y/z')
'/x/y/z'
>>> PathTools.unix_normpath('\\\\x\\\\y\\\\..\\\\z')
'/x/z'
"""
return ntpath.normpath(path).replace(ntpath.sep, posixpath.sep)
__cygwin_regex = re.compile("(?i)" + PathToolsConstants.anysep + "cygdrive")
@staticmethod
def is_cygwin_directory(path):
"""
>>> PathTools.is_cygwin_directory("/cygdrive/D/x/y/z")
True
>>> PathTools.is_cygwin_directory("/x/y/z")
False
>>> PathTools.is_cygwin_directory("D:\\\\x\\\\y\\\\z")
False
"""
return PathTools.__cygwin_regex.match(path) != None
@staticmethod
def cygwin_to_cmd_path(path):
return os.path.normpath(os.path.normcase(re.sub(r'(?i)[/\\\\]cygdrive[/\\\\]([a-z])', r'\1:', path)))
@staticmethod
def windows_to_native(windows_path):
"""
Convert a (relative) Windows path to a native path.
If windows_path is not a Windows path, the result is undefined.
#>>> PathTools.windows_to_native("D:\\\\foo")
#'D:\\\\foo'
#>>> PathTools.windows_to_native(".\\\\foo")
#'.\\\\foo'
#>>> PathTools.windows_to_native("D:\\\\foo")
#ValueError on POSIX
#>>> PathTools.windows_to_native("/foo")
#'/foo'
"""
if os.path == posixpath:
if ntpath.isabs(windows_path):
raise ValueError("Cannot convert an absolute Windows path to POSIX: %s" % windows_path)
return windows_path.replace('\\', os.path.sep)
elif os.path == ntpath:
return windows_path
else:
raise NotImplementedError("Can only convert to posix or nt")
@staticmethod
def native_to_posix(native_path):
"""
#>>> PathTools.native_to_posix("D:\\\\foo")
#'D:/foo'
#>>> PathTools.native_to_posix("/foo")
#'/foo'
"""
if os.path == posixpath:
return native_path
elif os.path == ntpath:
return native_path.replace('\\', '/')
else:
raise NotImplementedError()
@staticmethod
def get_url_for_local_path(path):
"""
>>> PathTools.get_url_for_local_path('/foo')
'file:///foo'
#>>> PathTools.get_url_for_local_path('D:\\\\foo')
#'file:///D:/foo'
#>>> PathTools.get_url_for_local_path('D:\\\\foo bar')
#'file:///D:/foo%20bar'
"""
return urlparse.SplitResult("file", None, urllib.pathname2url(path), None, None).geturl()
@staticmethod
def splitall(path):
"""
>>> PathTools.splitall('D:\\\\foo\\\\bar\\\\x.txt')
['D:\\\\', 'foo', 'bar', 'x.txt']
"""
retval = list()
restpath = path
while restpath != '':
(head, tail) = ntpath.split(restpath)
restpath = ''
if tail:
retval = [tail] + retval
restpath = head
elif head:
retval = [head] + retval
return retval
@staticmethod
def splitall_iter(path):
"""
>>> list(PathTools.splitall_iter('D:\\\\foo\\\\bar\\\\x.txt'))
['D:\\\\', 'foo', 'bar', 'x.txt']
"""
(head, tail) = ntpath.split(path)
if head:
if tail:
for x in PathTools.splitall_iter(head):
yield x
yield tail
else:
yield head
else:
if tail:
yield tail
@staticmethod
def resource_to_relpath(canonic_resource, path_module=os.path):
if canonic_resource.get_resolution_root():
return PathTools.relative_path(path_name=canonic_resource.name(), relative_to=canonic_resource.get_resolution_root() + path_module.sep,
ignore_absolute=False,
path_module=path_module)
else:
return canonic_resource.name()
@staticmethod
def canonicalize_capitalization(path, pathmodule=os.path, strict=False):
if not pathmodule.exists(path):
if strict:
raise ValueError("File %s cannot be found" % (path, ))
else:
return path
(head, tail) = pathmodule.split(path)
if (pathmodule.sep in head) and tail:
head = PathTools.canonicalize_capitalization(head)
for tail_cand in os.listdir(head):
if tail_cand.lower() == tail.lower():
tail = tail_cand
break
return os.path.join(head, tail)
@classmethod
def common_base_directory(cls, file_paths, pathmodule=os.path):
return StringTools.get_common_prefix(imap(os.path.normpath, imap(pathmodule.dirname, file_paths)))
class WalkTools(object):
@staticmethod
def walk_extensions(top, extensions, onerror=None, skip_dirs=[]):
"""
Walks a directory tree and enumerates all files with certain extensions.
@param top: base directory
@param skip_dirs: subdirectories to skip
@param extensions: A tuple (not a list) of extensions to look for, e.g. ('.cpp', '.h')
@param onerror: unary function to call on an error, a OSError will be passed as the parameter
@rtype: iterable of strings (paths starting with top)
"""
for dirpath, dirnames, filenames in os.walk(top=top, onerror=onerror):
for dirname in skip_dirs:
try:
dirnames.remove(dirname)
except ValueError:
pass
for filename in ifilter(lambda filename: filename.endswith(extensions), filenames):
yield os.path.join(dirpath, filename)
class FileTools(object):
"""
A static utility class containing extension methods for files.
"""
__logger = None
@staticmethod
def file_len(fname):
"""
Returns the number of apply_metric_to_resource in a file.
@param fname: path of the file
@return: a non-negative integer
@raise OSError: if the file does not exist
@deprecated: Use commons.os_util.LocalFileMetricProcessor instead
"""
if os.path.exists(fname):
with open(fname) as f:
i = -1
for i, _l in enumerate(f):
pass
return i + 1
else:
raise OSError("file not found: %s" % fname)
#raise Warning()
@classmethod
def create_csv_dict_reader(cls, filename, what, fieldnames, delimiter=',', allow_missing=False):
if cls.__logger == None:
cls.__logger = logging.getLogger(cls.__module__)
if os.path.exists(filename):
cls.__logger.info("Reading %s from %s" | |
from PyQt5 import QtCore, QtGui, QtWidgets
from dialogError import Ui_DialogError
import yfinance as yf
import matplotlib as mpl
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import pandas as pd
import pandas_datareader as pdr
import numpy as np
import chart_studio.plotly as py
from datetime import datetime
from datetime import date
import pyfolio as pf
from pyfolio import plotting as pa
import icons
class Ui_TabWidgetPortfolio(object):
def openError(self):
self.dialog=QtWidgets.QDialog()
self.uiError=Ui_DialogError()
self.uiError.setupUi(self.dialog)
self.dialog.show()
def setupUi(self, TabWidgetPortfolio):
TabWidgetPortfolio.setObjectName("TabWidgetPortfolio")
TabWidgetPortfolio.resize(1124, 854)
TabWidgetPortfolio.setMinimumSize(QtCore.QSize(1124, 854))
TabWidgetPortfolio.setMaximumSize(QtCore.QSize(1124, 854))
font = QtGui.QFont()
font.setPointSize(18)
TabWidgetPortfolio.setFont(font)
TabWidgetPortfolio.setTabPosition(QtWidgets.QTabWidget.North)
TabWidgetPortfolio.setIconSize(QtCore.QSize(26, 26))
TabWidgetPortfolio.setElideMode(QtCore.Qt.ElideNone)
self.tab_portfolio = QtWidgets.QWidget()
self.tab_portfolio.setObjectName("tab_portfolio")
self.listWidget = QtWidgets.QListWidget(self.tab_portfolio)
self.listWidget.setGeometry(QtCore.QRect(80, 210, 241, 341))
self.listWidget.setStyleSheet("background-color: rgb(255, 255, 255);")
self.listWidget.setFrameShadow(QtWidgets.QFrame.Sunken)
self.listWidget.setObjectName("listWidget")
self.comboBoxSymbol = QtWidgets.QComboBox(self.tab_portfolio)
self.comboBoxSymbol.setGeometry(QtCore.QRect(80, 130, 241, 51))
self.comboBoxSymbol.setStyleSheet("font: 16pt \"MS Shell Dlg 2\";")
self.comboBoxSymbol.setEditable(True)
self.comboBoxSymbol.setObjectName("comboBoxSymbol")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.comboBoxSymbol.addItem("")
self.labelPeriod = QtWidgets.QLabel(self.tab_portfolio)
self.labelPeriod.setGeometry(QtCore.QRect(610, 130, 111, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(18)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelPeriod.setFont(font)
self.labelPeriod.setStyleSheet("")
self.labelPeriod.setObjectName("labelPeriod")
self.labelDateRange = QtWidgets.QLabel(self.tab_portfolio)
self.labelDateRange.setGeometry(QtCore.QRect(640, 180, 171, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelDateRange.setFont(font)
self.labelDateRange.setStyleSheet("")
self.labelDateRange.setObjectName("labelDateRange")
self.label_3 = QtWidgets.QLabel(self.tab_portfolio)
self.label_3.setGeometry(QtCore.QRect(640, 220, 81, 41))
font = QtGui.QFont()
font.setPointSize(15)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.dateStart = QtWidgets.QDateEdit(self.tab_portfolio)
self.dateStart.setGeometry(QtCore.QRect(640, 260, 181, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.dateStart.setFont(font)
self.dateStart.setStyleSheet("background-color: rgb(216, 216, 216);")
self.dateStart.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateStart.setDate(QtCore.QDate(2019, 1, 1))
self.dateStart.setObjectName("dateStart")
self.labelParameter = QtWidgets.QLabel(self.tab_portfolio)
self.labelParameter.setGeometry(QtCore.QRect(930, 180, 161, 31))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelParameter.setFont(font)
self.labelParameter.setStyleSheet("")
self.labelParameter.setObjectName("labelParameter")
self.comboBoxPeriod = QtWidgets.QComboBox(self.tab_portfolio)
self.comboBoxPeriod.setGeometry(QtCore.QRect(930, 220, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxPeriod.setFont(font)
self.comboBoxPeriod.setStyleSheet("background-color: rgb(216, 216, 216);")
self.comboBoxPeriod.setObjectName("comboBoxPeriod")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.comboBoxPeriod.addItem("")
self.labelInterval = QtWidgets.QLabel(self.tab_portfolio)
self.labelInterval.setGeometry(QtCore.QRect(620, 400, 141, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(18)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelInterval.setFont(font)
self.labelInterval.setStyleSheet("")
self.labelInterval.setObjectName("labelInterval")
self.comboBoxInterval = QtWidgets.QComboBox(self.tab_portfolio)
self.comboBoxInterval.setGeometry(QtCore.QRect(640, 450, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxInterval.setFont(font)
self.comboBoxInterval.setStyleSheet("background-color: rgb(216, 216, 216);")
self.comboBoxInterval.setObjectName("comboBoxInterval")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.radioButtonRange = QtWidgets.QRadioButton(self.tab_portfolio)
self.radioButtonRange.setGeometry(QtCore.QRect(610, 180, 21, 20))
self.radioButtonRange.setText("")
self.radioButtonRange.setChecked(True)
self.radioButtonRange.setObjectName("radioButtonRange")
self.radioButtonPeriod = QtWidgets.QRadioButton(self.tab_portfolio)
self.radioButtonPeriod.setGeometry(QtCore.QRect(900, 180, 16, 21))
self.radioButtonPeriod.setText("")
self.radioButtonPeriod.setObjectName("radioButtonPeriod")
self.label_2 = QtWidgets.QLabel(self.tab_portfolio)
self.label_2.setGeometry(QtCore.QRect(640, 300, 51, 31))
font = QtGui.QFont()
font.setPointSize(15)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.dateEnd = QtWidgets.QDateEdit(self.tab_portfolio)
self.dateEnd.setGeometry(QtCore.QRect(640, 330, 181, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.dateEnd.setFont(font)
self.dateEnd.setStyleSheet("background-color: rgb(216, 216, 216);")
self.dateEnd.setDateTime(QtCore.QDateTime(QtCore.QDate(2020, 1, 1), QtCore.QTime(0, 0, 0)))
self.dateEnd.setDate(QtCore.QDate(2020, 1, 1))
self.dateEnd.setObjectName("dateEnd")
self.AddButton = QtWidgets.QPushButton(self.tab_portfolio)
self.AddButton.setGeometry(QtCore.QRect(350, 140, 91, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.AddButton.setFont(font)
self.AddButton.setStyleSheet("font: 87 15pt \"Arial Black\";\n"
"background-color: rgb(206, 206, 206);")
self.AddButton.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/add/add.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.AddButton.setIcon(icon)
self.AddButton.setIconSize(QtCore.QSize(34, 34))
self.AddButton.setObjectName("AddButton")
self.DeleteButton = QtWidgets.QPushButton(self.tab_portfolio)
self.DeleteButton.setGeometry(QtCore.QRect(350, 210, 91, 41))
self.DeleteButton.setStyleSheet("font: 87 12pt \"Arial Black\";\n"
"background-color: rgba(206, 206, 206, 206);")
self.DeleteButton.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/delete/minus.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.DeleteButton.setIcon(icon1)
self.DeleteButton.setIconSize(QtCore.QSize(34, 34))
self.DeleteButton.setObjectName("DeleteButton")
self.candlestickButton = QtWidgets.QPushButton(self.tab_portfolio)
self.candlestickButton.setGeometry(QtCore.QRect(200, 760, 151, 41))
self.candlestickButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/candlestick/candlestick.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.candlestickButton.setIcon(icon2)
self.candlestickButton.setIconSize(QtCore.QSize(34, 34))
self.candlestickButton.setObjectName("candlestickButton")
self.OHLCButton = QtWidgets.QPushButton(self.tab_portfolio)
self.OHLCButton.setGeometry(QtCore.QRect(360, 760, 141, 41))
self.OHLCButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/ohlc/ohlc.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.OHLCButton.setIcon(icon3)
self.OHLCButton.setIconSize(QtCore.QSize(24, 24))
self.OHLCButton.setObjectName("OHLCButton")
self.timeSeriesDataButton = QtWidgets.QPushButton(self.tab_portfolio)
self.timeSeriesDataButton.setGeometry(QtCore.QRect(10, 760, 181, 41))
self.timeSeriesDataButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/timeseries/timeser.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.timeSeriesDataButton.setIcon(icon4)
self.timeSeriesDataButton.setIconSize(QtCore.QSize(34, 34))
self.timeSeriesDataButton.setObjectName("timeSeriesDataButton")
self.dailyPercentageChangeButton = QtWidgets.QPushButton(self.tab_portfolio)
self.dailyPercentageChangeButton.setGeometry(QtCore.QRect(660, 760, 191, 41))
self.dailyPercentageChangeButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/histogram/chart-histogram-512.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.dailyPercentageChangeButton.setIcon(icon5)
self.dailyPercentageChangeButton.setIconSize(QtCore.QSize(34, 34))
self.dailyPercentageChangeButton.setObjectName("dailyPercentageChangeButton")
self.volatilityButton = QtWidgets.QPushButton(self.tab_portfolio)
self.volatilityButton.setGeometry(QtCore.QRect(510, 760, 141, 41))
self.volatilityButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/volatility/volatility-512.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.volatilityButton.setIcon(icon6)
self.volatilityButton.setIconSize(QtCore.QSize(34, 34))
self.volatilityButton.setObjectName("volatilityButton")
self.volumeButton = QtWidgets.QPushButton(self.tab_portfolio)
self.volumeButton.setGeometry(QtCore.QRect(860, 760, 141, 41))
self.volumeButton.setStyleSheet("\n"
"font: 87 8pt \"Arial Black\";")
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/area/area.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.volumeButton.setIcon(icon7)
self.volumeButton.setIconSize(QtCore.QSize(34, 34))
self.volumeButton.setObjectName("volumeButton")
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/portfolio/portfolio.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
TabWidgetPortfolio.addTab(self.tab_portfolio, icon8, "")
self.tab_advisor = QtWidgets.QWidget()
self.tab_advisor.setObjectName("tab_advisor")
self.toolBoxAdvisor = QtWidgets.QToolBox(self.tab_advisor)
self.toolBoxAdvisor.setGeometry(QtCore.QRect(0, 220, 1121, 581))
self.toolBoxAdvisor.setObjectName("toolBoxAdvisor")
self.pageNaive = QtWidgets.QWidget()
self.pageNaive.setGeometry(QtCore.QRect(0, 0, 1121, 428))
self.pageNaive.setObjectName("pageNaive")
self.comboBoxThreshold = QtWidgets.QComboBox(self.pageNaive)
self.comboBoxThreshold.setGeometry(QtCore.QRect(160, 30, 111, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxThreshold.setFont(font)
self.comboBoxThreshold.setStyleSheet("")
self.comboBoxThreshold.setObjectName("comboBoxThreshold")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.comboBoxThreshold.addItem("")
self.labelThreshold = QtWidgets.QLabel(self.pageNaive)
self.labelThreshold.setGeometry(QtCore.QRect(30, 20, 121, 51))
font = QtGui.QFont()
font.setFamily("MS Shell Dlg 2")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
self.labelThreshold.setFont(font)
self.labelThreshold.setStyleSheet("")
self.labelThreshold.setObjectName("labelThreshold")
self.naiveButton = QtWidgets.QPushButton(self.pageNaive)
self.naiveButton.setGeometry(QtCore.QRect(500, 100, 121, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.naiveButton.setFont(font)
self.naiveButton.setStyleSheet("font: 87 15pt \"Arial Black\";")
self.naiveButton.setObjectName("naiveButton")
self.toolBoxAdvisor.addItem(self.pageNaive, "")
self.pageMAC = QtWidgets.QWidget()
self.pageMAC.setGeometry(QtCore.QRect(0, 0, 1121, 428))
self.pageMAC.setObjectName("pageMAC")
self.labelShort = QtWidgets.QLabel(self.pageMAC)
self.labelShort.setGeometry(QtCore.QRect(30, 40, 261, 31))
self.labelShort.setStyleSheet("font: 15pt \"MS Shell Dlg 2\";")
self.labelShort.setObjectName("labelShort")
self.comboBoxShort = QtWidgets.QComboBox(self.pageMAC)
self.comboBoxShort.setGeometry(QtCore.QRect(290, 40, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxShort.setFont(font)
self.comboBoxShort.setStyleSheet("")
self.comboBoxShort.setObjectName("comboBoxShort")
self.comboBoxShort.addItem("")
self.comboBoxShort.addItem("")
self.comboBoxShort.addItem("")
self.labelLong = QtWidgets.QLabel(self.pageMAC)
self.labelLong.setGeometry(QtCore.QRect(30, 100, 251, 31))
self.labelLong.setStyleSheet("font: 15pt \"MS Shell Dlg 2\";")
self.labelLong.setObjectName("labelLong")
self.comboBoxLong = QtWidgets.QComboBox(self.pageMAC)
self.comboBoxLong.setGeometry(QtCore.QRect(290, 100, 81, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxLong.setFont(font)
self.comboBoxLong.setStyleSheet("")
self.comboBoxLong.setObjectName("comboBoxLong")
self.comboBoxLong.addItem("")
self.comboBoxLong.addItem("")
self.comboBoxLong.addItem("")
self.MACButton = QtWidgets.QPushButton(self.pageMAC)
self.MACButton.setGeometry(QtCore.QRect(500, 190, 131, 31))
self.MACButton.setStyleSheet("font: 87 15pt \"Arial Black\";")
self.MACButton.setObjectName("MACButton")
self.toolBoxAdvisor.addItem(self.pageMAC, "")
self.pageTurtle = QtWidgets.QWidget()
self.pageTurtle.setGeometry(QtCore.QRect(0, 0, 1121, 428))
self.pageTurtle.setObjectName("pageTurtle")
self.labelBreakout = QtWidgets.QLabel(self.pageTurtle)
self.labelBreakout.setGeometry(QtCore.QRect(30, 30, 121, 41))
font = QtGui.QFont()
font.setPointSize(15)
self.labelBreakout.setFont(font)
self.labelBreakout.setObjectName("labelBreakout")
self.comboBoxBreakout = QtWidgets.QComboBox(self.pageTurtle)
self.comboBoxBreakout.setGeometry(QtCore.QRect(170, 40, 71, 31))
font = QtGui.QFont()
font.setPointSize(16)
self.comboBoxBreakout.setFont(font)
self.comboBoxBreakout.setObjectName("comboBoxBreakout")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.comboBoxBreakout.addItem("")
self.turtleButton = QtWidgets.QPushButton(self.pageTurtle)
self.turtleButton.setGeometry(QtCore.QRect(500, 100, 121, 41))
font = QtGui.QFont()
font.setFamily("Arial Black")
font.setPointSize(15)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.turtleButton.setFont(font)
self.turtleButton.setStyleSheet("font: 87 15pt \"Arial Black\";")
self.turtleButton.setObjectName("turtleButton")
self.toolBoxAdvisor.addItem(self.pageTurtle, "")
self.labelInitialCapital = QtWidgets.QLabel(self.tab_advisor)
self.labelInitialCapital.setGeometry(QtCore.QRect(30, 70, 241, 51))
self.labelInitialCapital.setStyleSheet("font: 87 18pt \"Arial Black\";")
self.labelInitialCapital.setObjectName("labelInitialCapital")
self.comboBoxInitialCapital = QtWidgets.QComboBox(self.tab_advisor)
self.comboBoxInitialCapital.setGeometry(QtCore.QRect(270, 80, 131, 31))
self.comboBoxInitialCapital.setStyleSheet("font: 16pt \"MS Shell Dlg 2\";")
self.comboBoxInitialCapital.setEditable(True)
self.comboBoxInitialCapital.setObjectName("comboBoxInitialCapital")
self.comboBoxInitialCapital.addItem("")
self.comboBoxInitialCapital.addItem("")
self.comboBoxInitialCapital.addItem("")
self.comboBoxInitialCapital.addItem("")
self.labelCommission = QtWidgets.QLabel(self.tab_advisor)
self.labelCommission.setGeometry(QtCore.QRect(540, 70, 221, 41))
self.labelCommission.setStyleSheet("font: 87 18pt \"Arial Black\";")
self.labelCommission.setObjectName("labelCommission")
self.comboBoxCommission = QtWidgets.QComboBox(self.tab_advisor)
self.comboBoxCommission.setGeometry(QtCore.QRect(760, 80, 101, 31))
self.comboBoxCommission.setStyleSheet("font: 16pt \"MS Shell Dlg 2\";")
self.comboBoxCommission.setEditable(True)
self.comboBoxCommission.setObjectName("comboBoxCommission")
self.comboBoxCommission.addItem("")
self.comboBoxCommission.addItem("")
self.comboBoxCommission.addItem("")
self.comboBoxCommission.addItem("")
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/advisor/advisor.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
TabWidgetPortfolio.addTab(self.tab_advisor, icon9, "")
self.retranslateUi(TabWidgetPortfolio)
TabWidgetPortfolio.setCurrentIndex(0)
self.toolBoxAdvisor.setCurrentIndex(0)
self.toolBoxAdvisor.layout().setSpacing(7)
QtCore.QMetaObject.connectSlotsByName(TabWidgetPortfolio)
self.AddButton.clicked.connect(self.addToList)
self.DeleteButton.clicked.connect(self.removeItems)
self.comboBoxPeriod.setDisabled(True)
self.timeSeriesDataButton.clicked.connect(self.plotTimeSeries)
self.candlestickButton.clicked.connect(self.plotCandlestick)
self.OHLCButton.clicked.connect(self.plotOHLC)
self.volatilityButton.clicked.connect(self.plotVolatility)
self.dailyPercentageChangeButton.clicked.connect(self.calculatePctChange)
self.volumeButton.clicked.connect(self.plotVolume)
self.naiveButton.clicked.connect(self.buildTableNaive)
self.MACButton.clicked.connect(self.buildTableMAC)
self.turtleButton.clicked.connect(self.turtleStrategy)
self.radioButtonRange.clicked.connect(self.disablePeriod)
self.radioButtonPeriod.clicked.connect(self.disableRange)
self.comboBoxPeriod.currentIndexChanged.connect(self.changeIntervalOptionsForPeriod)
self.radioButtonRange.clicked.connect(self.changeIntervalOptionsForRange)
self.radioButtonPeriod.clicked.connect(self.changeIntervalOptionsForPeriod)
self.dateEnd.dateChanged.connect(self.startDateLowerThenEnd)
def retranslateUi(self, TabWidgetPortfolio):
_translate = QtCore.QCoreApplication.translate
TabWidgetPortfolio.setWindowTitle(_translate("TabWidgetPortfolio", "TabWidget"))
self.comboBoxSymbol.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p><span style=\" font-size:12pt;\">Type/ Select Symbol</span></p></body></html>"))
self.comboBoxSymbol.setCurrentText(_translate("TabWidgetPortfolio", "Enter Symbol"))
self.comboBoxSymbol.setItemText(0, _translate("TabWidgetPortfolio", "Enter Symbol"))
self.comboBoxSymbol.setItemText(1, _translate("TabWidgetPortfolio", "Microsoft"))
self.comboBoxSymbol.setItemText(2, _translate("TabWidgetPortfolio", "Apple"))
self.comboBoxSymbol.setItemText(3, _translate("TabWidgetPortfolio", "Amazon"))
self.comboBoxSymbol.setItemText(4, _translate("TabWidgetPortfolio", "Alphabet"))
self.comboBoxSymbol.setItemText(5, _translate("TabWidgetPortfolio", "Alibaba"))
self.comboBoxSymbol.setItemText(6, _translate("TabWidgetPortfolio", "Facebook"))
self.comboBoxSymbol.setItemText(7, _translate("TabWidgetPortfolio", "Visa"))
self.comboBoxSymbol.setItemText(8, _translate("TabWidgetPortfolio", "Walmart"))
self.labelPeriod.setText(_translate("TabWidgetPortfolio", "Period:"))
self.labelDateRange.setText(_translate("TabWidgetPortfolio", "By Date Range"))
self.label_3.setText(_translate("TabWidgetPortfolio", "Start:"))
self.dateStart.setDisplayFormat(_translate("TabWidgetPortfolio", "yyyy-MM-dd"))
self.labelParameter.setText(_translate("TabWidgetPortfolio", "By Parameter"))
self.comboBoxPeriod.setItemText(0, _translate("TabWidgetPortfolio", "1d"))
self.comboBoxPeriod.setItemText(1, _translate("TabWidgetPortfolio", "5d"))
self.comboBoxPeriod.setItemText(2, _translate("TabWidgetPortfolio", "7d"))
self.comboBoxPeriod.setItemText(3, _translate("TabWidgetPortfolio", "1mo"))
self.comboBoxPeriod.setItemText(4, _translate("TabWidgetPortfolio", "3mo"))
self.comboBoxPeriod.setItemText(5, _translate("TabWidgetPortfolio", "6mo"))
self.comboBoxPeriod.setItemText(6, _translate("TabWidgetPortfolio", "1y"))
self.comboBoxPeriod.setItemText(7, _translate("TabWidgetPortfolio", "2y"))
self.comboBoxPeriod.setItemText(8, _translate("TabWidgetPortfolio", "5y"))
self.comboBoxPeriod.setItemText(9, _translate("TabWidgetPortfolio", "10y"))
self.labelInterval.setText(_translate("TabWidgetPortfolio", "Interval:"))
self.comboBoxInterval.setItemText(0, _translate("TabWidgetPortfolio", "1d"))
self.comboBoxInterval.setItemText(1, _translate("TabWidgetPortfolio", "5d"))
self.comboBoxInterval.setItemText(2, _translate("TabWidgetPortfolio", "1wk"))
self.comboBoxInterval.setItemText(3, _translate("TabWidgetPortfolio", "1mo"))
self.comboBoxInterval.setItemText(4, _translate("TabWidgetPortfolio", "3mo"))
self.label_2.setText(_translate("TabWidgetPortfolio", "End:"))
self.dateEnd.setDisplayFormat(_translate("TabWidgetPortfolio", "yyyy-MM-dd"))
self.AddButton.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p><span style=\" font-size:12pt;\">Add Symbol</span></p></body></html>"))
self.DeleteButton.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p>Delete Symbol</p></body></html>"))
self.candlestickButton.setText(_translate("TabWidgetPortfolio", "Candlestick"))
self.OHLCButton.setText(_translate("TabWidgetPortfolio", "OHLC"))
self.timeSeriesDataButton.setToolTip(_translate("TabWidgetPortfolio", "<html><head/><body><p><br/></p></body></html>"))
self.timeSeriesDataButton.setText(_translate("TabWidgetPortfolio", "Time Series Data"))
self.dailyPercentageChangeButton.setText(_translate("TabWidgetPortfolio", "Percentage Change "))
self.volatilityButton.setText(_translate("TabWidgetPortfolio", "Volatility"))
self.volumeButton.setText(_translate("TabWidgetPortfolio", "Volume"))
TabWidgetPortfolio.setTabText(TabWidgetPortfolio.indexOf(self.tab_portfolio), _translate("TabWidgetPortfolio", "Portfolio"))
self.comboBoxThreshold.setItemText(0, _translate("TabWidgetPortfolio", "5"))
self.comboBoxThreshold.setItemText(1, _translate("TabWidgetPortfolio", "7"))
self.comboBoxThreshold.setItemText(2, _translate("TabWidgetPortfolio", "10"))
self.comboBoxThreshold.setItemText(3, _translate("TabWidgetPortfolio", "15"))
self.comboBoxThreshold.setItemText(4, _translate("TabWidgetPortfolio", "20"))
self.labelThreshold.setText(_translate("TabWidgetPortfolio", "Threshold:"))
self.naiveButton.setText(_translate("TabWidgetPortfolio", "Advise"))
self.toolBoxAdvisor.setItemText(self.toolBoxAdvisor.indexOf(self.pageNaive), _translate("TabWidgetPortfolio", "Naive Trading Strategy"))
self.labelShort.setText(_translate("TabWidgetPortfolio", "Short Moving Average:"))
self.comboBoxShort.setItemText(0, _translate("TabWidgetPortfolio", "5"))
self.comboBoxShort.setItemText(1, _translate("TabWidgetPortfolio", "10"))
self.comboBoxShort.setItemText(2, _translate("TabWidgetPortfolio", "25"))
self.labelLong.setText(_translate("TabWidgetPortfolio", "Long Moving Average:"))
self.comboBoxLong.setItemText(0, _translate("TabWidgetPortfolio", "50"))
self.comboBoxLong.setItemText(1, _translate("TabWidgetPortfolio", "100"))
self.comboBoxLong.setItemText(2, _translate("TabWidgetPortfolio", "200"))
self.MACButton.setText(_translate("TabWidgetPortfolio", "Advise"))
self.toolBoxAdvisor.setItemText(self.toolBoxAdvisor.indexOf(self.pageMAC), _translate("TabWidgetPortfolio", "Two Moving Average Crossover Strategy"))
self.labelBreakout.setText(_translate("TabWidgetPortfolio", "Breakout:"))
self.comboBoxBreakout.setCurrentText(_translate("TabWidgetPortfolio", "35"))
self.comboBoxBreakout.setItemText(0, _translate("TabWidgetPortfolio", "35"))
self.comboBoxBreakout.setItemText(1, _translate("TabWidgetPortfolio", "40"))
self.comboBoxBreakout.setItemText(2, _translate("TabWidgetPortfolio", "45"))
self.comboBoxBreakout.setItemText(3, _translate("TabWidgetPortfolio", "50"))
self.comboBoxBreakout.setItemText(4, _translate("TabWidgetPortfolio", "55"))
self.turtleButton.setText(_translate("TabWidgetPortfolio", "Advise"))
self.toolBoxAdvisor.setItemText(self.toolBoxAdvisor.indexOf(self.pageTurtle), _translate("TabWidgetPortfolio", "Turtle Strategy"))
self.labelInitialCapital.setText(_translate("TabWidgetPortfolio", "Initial Capital:"))
self.comboBoxInitialCapital.setCurrentText(_translate("TabWidgetPortfolio", "100000"))
self.comboBoxInitialCapital.setItemText(0, _translate("TabWidgetPortfolio", "100000"))
self.comboBoxInitialCapital.setItemText(1, _translate("TabWidgetPortfolio", "150000"))
self.comboBoxInitialCapital.setItemText(2, _translate("TabWidgetPortfolio", "200000"))
self.comboBoxInitialCapital.setItemText(3, _translate("TabWidgetPortfolio", "250000"))
self.labelCommission.setText(_translate("TabWidgetPortfolio", "Commission:"))
self.comboBoxCommission.setItemText(0, _translate("TabWidgetPortfolio", "0.00"))
self.comboBoxCommission.setItemText(1, _translate("TabWidgetPortfolio", "0.02"))
self.comboBoxCommission.setItemText(2, _translate("TabWidgetPortfolio", "0.03"))
self.comboBoxCommission.setItemText(3, _translate("TabWidgetPortfolio", "0.04"))
TabWidgetPortfolio.setTabText(TabWidgetPortfolio.indexOf(self.tab_advisor), _translate("TabWidgetPortfolio", "Trade Advisor"))
#############################SET#####################################################################################################
self.dateEnd.setMaximumDate(date.today())
self.dateStart.setMaximumDate(self.dateEnd.date())
def startDateLowerThenEnd(self):
self.dateStart.setMaximumDate(self.dateEnd.date())
def disablePeriod(self):
self.comboBoxPeriod.setDisabled(True)
self.dateEnd.setEnabled(True)
self.dateStart.setEnabled(True)
def disableRange(self):
self.dateEnd.setDisabled(True)
self.dateStart.setDisabled(True)
self.comboBoxPeriod.setEnabled(True)
def changeIntervalOptionsForPeriod(self):
if ((self.comboBoxPeriod.currentText()=="1mo")|(self.comboBoxPeriod.currentText()=="60d")):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"2m")
self.comboBoxInterval.setItemText(1,"5m")
self.comboBoxInterval.setItemText(2,"15m")
self.comboBoxInterval.setItemText(3,"30m")
self.comboBoxInterval.setItemText(4,"60m")
self.comboBoxInterval.setItemText(5,"90m")
self.comboBoxInterval.setItemText(6,"1d")
self.comboBoxInterval.setItemText(7,"5d")
self.comboBoxInterval.setItemText(8,"1wk")
self.comboBoxInterval.setItemText(9,"1mo")
self.comboBoxInterval.setItemText(10,"3mo")
if((self.comboBoxPeriod.currentText()=="1d")|(self.comboBoxPeriod.currentText()=="5d")|(self.comboBoxPeriod.currentText()=="7d")):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"1m")
self.comboBoxInterval.setItemText(1,"2m")
self.comboBoxInterval.setItemText(2,"5m")
self.comboBoxInterval.setItemText(3,"15m")
self.comboBoxInterval.setItemText(4,"30m")
self.comboBoxInterval.setItemText(5,"60m")
self.comboBoxInterval.setItemText(6,"90m")
self.comboBoxInterval.setItemText(7,"1d")
self.comboBoxInterval.setItemText(8,"5d")
self.comboBoxInterval.setItemText(9,"1wk")
self.comboBoxInterval.setItemText(10,"1mo")
self.comboBoxInterval.setItemText(11,"3mo")
if((self.comboBoxPeriod.currentText()=="3mo")|(self.comboBoxPeriod.currentText()=="1y")|(self.comboBoxPeriod.currentText()=="2y")|(self.comboBoxPeriod.currentText()=="5y")|(self.comboBoxPeriod.currentText()=="10y")):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"1d")
self.comboBoxInterval.setItemText(1,"5d")
self.comboBoxInterval.setItemText(2,"1wk")
self.comboBoxInterval.setItemText(3,"1mo")
self.comboBoxInterval.setItemText(4,"3mo")
def changeIntervalOptionsForRange(self):
self.comboBoxInterval.clear()
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.addItem("")
self.comboBoxInterval.setItemText(0,"1d")
self.comboBoxInterval.setItemText(1,"5d")
self.comboBoxInterval.setItemText(2,"1wk")
self.comboBoxInterval.setItemText(3,"1mo")
self.comboBoxInterval.setItemText(4,"3mo")
###################################DATA############################################################################################
def addToList(self):
item=str(self.comboBoxSymbol.currentText())
item=self.showSymbol(item)
checkIfExist=yf.download(item,period="5d",interval="1m")
x=checkIfExist.empty
if x!=True:
self.listWidget.insertItem(0,item)
self.comboBoxSymbol.setCurrentIndex(0)
else:
self.openError()
def showSymbol(self,item):
if item=='Microsoft':return 'MSFT'
if item=='Apple': return 'AAPL'
if item=='Amazon': return 'AMZN'
if item=='Alphabet': return 'GOOG'
if item=='Alibaba': return 'BABA'
if item=='Facebook': return 'FB'
if item=='Visa': return 'V'
if item=='Walmart': return 'WMT'
else: return item
| |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf8
import urllib.request, urllib.parse, urllib.error
import zipfile
from io import BytesIO
import json
import csv
import copy
import sys
import time
import math
from xml.etree import ElementTree as ET
import utm
version = "0.5.0"
header = {"User-Agent": "nkamapper/n50osm"}
coordinate_decimals = 7
island_size = 100000 # Minimum square meters for place=island vs place=islet
lake_ele_size = 2000 # Minimum square meters for fetching elevation
data_categories = ["AdministrativeOmrader", "Arealdekke", "BygningerOgAnlegg", "Hoyde", "Restriksjonsomrader", "Samferdsel", "Stedsnavn"]
avoid_objects = [ # Object types to exclude from output
'ÅpentOmråde', 'Tregruppe', # Arealdekke
'GangSykkelveg', 'VegSenterlinje', 'Vegsperring', # Samferdsel
'Forsenkningskurve', 'Hjelpekurve', 'Høydekurve', # Hoyde
'PresentasjonTekst' # Stedsnavn
]
auxiliary_objects = ['Arealbrukgrense', 'Dataavgrensning', 'FiktivDelelinje', \
'InnsjøElvSperre', 'InnsjøInnsjøSperre', 'ElvBekkKant', 'Havflate', 'Innsjøkant', 'InnsjøkantRegulert', 'FerskvannTørrfallkant']
avoid_tags = [ # N50 properties to exclude from output (unless debug)
'oppdateringsdato', 'datafangstdato',
'målemetode', 'nøyaktighet'
]
osm_tags = {
# Arealdekke
'Alpinbakke': { 'landuse': 'winter_sports', 'piste:type': 'downhill', 'area': 'yes' },
'BymessigBebyggelse': { 'landuse': 'retail' }, #'landuse': 'residential', 'residential': 'urban' },
'DyrketMark': { 'landuse': 'farmland' },
'ElvBekk': { 'waterway': 'stream' },
'FerskvannTørrfall': { 'waterway': 'riverbank', 'intermittent': 'yes' },
'Foss': { 'waterway': 'waterfall' },
'Golfbane': { 'leisure': 'golf_course' },
'Gravplass': { 'landuse': 'cemetery' },
'HavElvSperre': { 'natural': 'coastline' },
'HavInnsjøSperre': { 'natural': 'coastline' },
'Hyttefelt': { 'landuse': 'residential', 'residential': 'cabin' },
'Industriområde': { 'landuse': 'industrial' },
'Innsjø': { 'natural': 'water' },
'InnsjøRegulert': { 'natural': 'water', 'water': 'reservoir' },
'Kystkontur': { 'natural': 'coastline' },
'Lufthavn': { 'aeroway': 'aerodrome' },
'Myr': { 'natural': 'wetland', 'wetland': 'bog' },
'Park': { 'leisure': 'park' },
'Rullebane': { 'aeroway': 'runway' },
'Skjær': { 'seamark:type': 'rock' },
'Skog': { 'natural': 'wood' },
'Skytefelt': { 'leisure': 'pitch', 'sport': 'shooting' },
'SnøIsbre': { 'natural': 'glacier' },
'SportIdrettPlass': { 'leisure': 'pitch' },
'Steinbrudd': { 'landuse': 'quarry'},
'Steintipp': { 'landuse': 'landfill' },
'Tettbebyggelse': { 'landuse': 'residential' },
# Samferdsel
'Barmarksløype': { 'highway': 'track' },
'Traktorveg': { 'highway': 'track' },
'Sti': { 'highway': 'path' },
# Hoyde
'Terrengpunkt': { 'natural': 'hill' },
'TrigonometriskPunkt': { 'natural': 'hill' },
# Restriksjonsomrader
'Naturvernområde': { 'boundary': 'protected_area' },
'Allmenning': { 'boundary': 'protected_area', 'protect_class': '27'}, # Public land
# BygningerOgAnlegg
'Bygning': { 'building': 'yes' },
'Campingplass': { 'tourism': 'camp_site' },
'Dam': { 'waterway': 'dam' },
'Flytebrygge': { 'man_made': 'pier', 'floating': 'yes' },
'Gruve': { 'man_made': 'adit' }, # Could be shaft
'Hoppbakke': { 'piste:type': 'ski_jump' },
'KaiBrygge': { 'man_made': 'quay' },
'Ledning': { 'power': 'line' },
'LuftledningLH': { 'power': 'line' },
'Lysløype': { 'highway': 'track', 'lit': 'yes', 'trailblazed': 'yes' },
'MastTele': { 'man_made': 'mast', 'tower:type': 'communication' },
'Molo': { 'man_made': 'breakwater' },
'Navigasjonsinstallasjon': { 'man_made': 'lighthouse' }, # Only lighthouses, it seems
'Parkeringsområde': { 'amenity': 'parking' },
'Pir': { 'man_made': 'pier' },
'Reingjerde': { 'barrier': 'fence' },
'Rørgate': { 'man_made': 'pipeline' }, # Also "tømmerrenne"
'Skitrekk': { 'aerialway': 'drag_lift' }, # Could be other aerialway values
'Skytebaneinnretning': { 'leisure': 'pitch', 'sport': 'shooting' },
'Tank': { 'man_made': 'tank' },
'Taubane': { 'aerialway': 'cable_car' }, # Could be other aerial values, e.g. gondola, goods
'Tårn': { 'man_made': 'tower' }, # Any massive or substantial tower
'Vindkraftverk': { 'power': 'generator', 'generator:source': 'wind', 'generator:type': 'horizontal_axis' }
}
# OSM tagging; first special cases
def tag_object(feature_type, geometry_type, properties, feature):
tags = {}
missing_tags = set()
# First special object cases
if feature_type == "ElvBekk":
if geometry_type == "område":
tags['waterway'] = "riverbank"
elif "vannBredde" in properties and properties['vannBredde'] > "2": # >3 meter
tags['waterway'] = "river"
else:
tags['waterway'] = "stream"
elif feature_type == "Skytefelt" and data_category == "Restriksjonsomrader": # Eception to Arealdekke
tags['landuse'] = "military"
elif feature_type == "Bygning":
if "bygningstype" in properties:
if properties['bygningstype'] == "956": # Turisthytte
if "betjeningsgrad" in properties:
if properties['betjeningsgrad'] == "B": # Betjent
tags['tourism'] = "alpine_hut"
elif properties['betjeningsgrad'] == "S": # Selvbetjent
tags['tourism'] = "wilderness_hut"
elif properties['betjeningsgrad'] == ["U", "D", "R"]: # Ubetjent, dagstur, rastebu
tags['amenity'] = "shelter"
tags['shelter_type'] = "basic_hut"
else:
tags['amenity'] = "shelter"
tags['shelter_type'] = "lean_to"
if "hytteeier" in properties:
if properties['hytteeier'] == "1":
tags['operator'] = "DNT"
elif properties['hytteeier'] == "3":
tags['operator'] = "Fjellstyre"
elif properties['hytteeier'] == "4":
tags['operator'] = "Statskog"
elif properties['bygningstype'] in building_tags:
for key, value in iter(building_tags[ properties['bygningstype'] ].items()):
if geometry_type == "område" or key != "building" or len(building_tags[ properties['bygningstype'] ]) > 1:
tags[ key ] = value
if geometry_type != "posisjon" and "building" not in tags:
tags['building'] = "yes" # No building tag for single nodes
elif feature_type == "Lufthavn":
if "lufthavntype" in properties and properties['lufthavntype'] == "H":
tags['aeroway'] = "heliport"
else:
tags['aeroway'] = "aerodrome"
if "trafikktype" in properties:
if properties['trafikktype'] == "I":
tags['aeroway:type'] = "international"
elif properties['trafikktype'] == "N":
tags['aeroway:type'] = "regional"
elif properties['trafikktype'] == "A":
tags['aeroway:type'] = "airfield"
if "iataKode" in properties and properties['iataKode'] != "XXX":
tags['iata'] = properties['iataKode']
if "icaoKode" in properties and properties['icaoKode'] != "XXXX":
tags['icao'] = properties['icaoKode']
elif geometry_type == "område" and feature_type == "SportIdrettPlass":
if len(feature['coordinates']) > 1:
tags['leisure'] = "track"
tags['area'] = "yes"
else:
tags['leisure'] = "pitch"
# Then conversion dict
elif feature_type in osm_tags:
tags.update( osm_tags[feature_type] )
# Collect set of remaining object types not handled
elif feature_type not in auxiliary_objects:
missing_tags.add(feature_type)
# Additional tagging based on object properties from GML
if "høyde" in properties:
tags['ele'] = properties['høyde']
if "lavesteRegulerteVannstand" in properties:
tags['ele:min'] = properties['lavesteRegulerteVannstand']
if "vatnLøpenummer" in properties:
tags['ref:nve:vann'] = properties['vatnLøpenummer']
if "navn" in properties:
tags['name'] = properties['navn']
if "fulltekst" in properties:
tags['name'] = properties['fulltekst']
if "stedsnummer" in properties:
tags['ssr:stedsnr'] = properties['stedsnummer']
if "merking" in properties and properties['merking'] == "JA":
tags['trailblazed'] = "yes"
if "verneform" in properties:
if properties['verneform'] in ["NP", "NPS"]:
tags['boundary'] = "national_park"
elif properties['verneform'] in ["LVO", "NM"]:
tags['boundary'] = "protected_area"
else:
tags['leisure'] = "nature_reserve"
if "lengde" in properties and feature_type == "Hoppbakke":
tags['ref'] = "K" + properties['lengde']
return (tags, missing_tags)
# Output message
def message (output_text):
sys.stdout.write (output_text)
sys.stdout.flush()
# Format time
def timeformat (sec):
if sec > 3600:
return "%i:%02i:%02i hours" % (sec / 3600, (sec % 3600) / 60, sec % 60)
elif sec > 60:
return "%i:%02i minutes" % (sec / 60, sec % 60)
else:
return "%i seconds" % sec
# Calculate coordinate area of polygon in square meters
# Simple conversion to planar projection, works for small areas
# < 0: Clockwise
# > 0: Counter-clockwise
# = 0: Polygon not closed
def polygon_area (polygon):
if polygon[0] == polygon[-1]:
lat_dist = math.pi * 6371009.0 / 180.0
coord = []
for node in polygon:
y = node[1] * lat_dist
x = node[0] * lat_dist * math.cos(math.radians(node[1]))
coord.append((x,y))
area = 0.0
for i in range(len(coord) - 1):
area += (coord[i+1][1] - coord[i][1]) * (coord[i+1][0] + coord[i][0]) # (x2-x1)(y2+y1)
return int(area / 2.0)
else:
return 0
# Calculate coordinate area of multipolygon, i.e. excluding inner polygons
def multipolygon_area (multipolygon):
if type(multipolygon) is list and len(multipolygon) > 0 and type(multipolygon[0]) is list and \
multipolygon[0][0] == multipolygon[0][-1]:
area = polygon_area(multipolygon[0])
for patch in multipolygon[1:]:
inner_area = polygon_area(patch)
if inner_area:
area -= inner_area
else:
return None
return area
else:
return None
# Calculate centroid of polygon
# Source: https://en.wikipedia.org/wiki/Centroid#Of_a_polygon
def polygon_centroid (polygon):
if polygon[0] == polygon[-1]:
x = 0
y = 0
det = 0
for i in range(len(polygon) - 1):
d = polygon[i][0] * polygon[i+1][1] - polygon[i+1][0] * polygon[i][1]
det += d
x += (polygon[i][0] + polygon[i+1][0]) * d # (x1 + x2) (x1*y2 - x2*y1)
y += (polygon[i][1] + polygon[i+1][1]) * d # (y1 + y2) (x1*y2 - x2*y1)
return (x / (3.0 * det), y / (3.0 * det) )
else:
return None
# Tests whether point (x,y) is inside a polygon
# Ray tracing method
def inside_polygon (point, polygon):
if polygon[0] == polygon[-1]:
x, y = point
n = len(polygon)
inside = False
p1x, p1y = polygon[0]
for i in range(n):
p2x, p2y = polygon[i]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xints = (y-p1y) * (p2x-p1x) / (p2y-p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
else:
return None
# Test whether point (x,y) is inside a multipolygon, i.e. not inside inner polygons
def inside_multipolygon (point, multipolygon):
if type(multipolygon) is list and len(multipolygon) > 0 and type(multipolygon[0]) is list and \
multipolygon[0][0] == multipolygon[0][-1]:
inside = inside_polygon(point, multipolygon[0])
if inside:
for patch in multipolygon[1:]:
inside = (inside and not inside_polygon(point, patch))
if not inside:
break
return inside
else:
return None
# Calculate new node with given distance offset in meters
# Works over short distances
def coordinate_offset (node, distance):
m = (1 / ((math.pi / 180.0) * 6378137.0)) # Degrees per meter
latitude = node[1] + (distance * m)
longitude = node[0] + (distance * m) / math.cos( math.radians(node[1]) )
return (longitude, latitude)
# Identify bounds of line coordinates
# Returns lower left and upper right corners of square bounds + extra perimeter (in meters)
def get_bbox(coordinates, perimeter):
if type(coordinates) is tuple:
patch = [ coordinates ]
elif type(coordinates[0]) is tuple:
patch = coordinates
else:
patch = coordinates[0]
min_node = list(patch[0])
max_node = copy.deepcopy(min_node)
for node in patch:
for i in [0,1]:
min_node[i] = min(min_node[i], node[i])
max_node[i] = max(max_node[i], node[i])
if perimeter > 0:
min_node = coordinate_offset(min_node, - perimeter)
max_node = coordinate_offset(max_node, + perimeter)
return [min_node, max_node]
# Create feature with one point
def create_point (node, gml_id, note):
if debug:
entry = {
'object': 'Debug',
'type': 'Point',
'coordinates': node,
'members': [],
'tags': {},
'extras': {
'objekttype': 'Debug',
'note': note
}
}
if gml_id:
entry['gml_id'] = gml_id
features.append(entry)
# Get list of coordinates from GML
# Each point is a tuple of (lon, lat), corresponding to GeoJSON format x,y
def parse_coordinates (coord_text):
global gml_id
parse_count = 0
split_coord = coord_text.split(" ")
coordinates = []
for i in range(0, len(split_coord) - 1, 2):
x = float(split_coord[i])
y = float(split_coord[i+1])
[lat, lon] = utm.UtmToLatLon (x, y, 33, "N")
node = ( round(lon, coordinate_decimals), round(lat, coordinate_decimals) )
parse_count += 1
if not coordinates or node != coordinates[-1] or json_output:
coordinates.append(node)
else:
message ("\t*** DELETED DUPLICATE NODE: %s %s\n" % (node, gml_id))
create_point(node, gml_id, "deleted duplicate")
# Remove single outlayer node
if not json_output:
i = 0
while i < len(coordinates):
if i > 1 and coordinates[i] == coordinates[i - 2]:
message ("\t*** DELETED ARTEFACT NODE: %s %s\n" % (coordinates[ i - 1 ], gml_id))
create_point(copy.deepcopy(coordinates[ i - 1 ]), gml_id, "deleted artefact")
coordinates.pop(i)
coordinates.pop(i - 1)
i -= 1
else:
i += 1
if len(coordinates) > 2 and coordinates[0] == | |
COMMA foreign ref
| expr RP
"""
p[0] = p[1]
p_list = list(p)
if p_list[-1] != "," and p_list[-1] != ")":
if "type" in p_list[-1] and "name" in p_list[-1]:
p[0]["columns"].append(p_list[-1])
elif "check" in p_list[-1]:
if isinstance(p_list[-1]["check"], list):
check = " ".join(p_list[-1]["check"])
if isinstance(check, str):
check = {"constraint_name": None, "statement": check}
else:
check = p_list[-1]["check"]
p[0] = self.set_constraint(
p[0], "checks", check, check["constraint_name"]
)
p[0]["checks"].append(check)
else:
p[0].update(p_list[-1])
if isinstance(p_list[-1], dict):
if "constraint" in p_list[-2]:
if p_list[-1].get("unique_statement"):
p[0] = self.set_constraint(
p[0],
"uniques",
{"columns": p_list[-1]["unique_statement"]},
p_list[-2]["constraint"]["name"],
)
else:
p[0] = self.set_constraint(
p[0],
"primary_keys",
{"columns": p_list[-1]["primary_key"]},
p_list[-2]["constraint"]["name"],
)
elif p_list[-1].get("references"):
p[0] = self.add_ref_information_to_table(p, p_list)
def add_ref_information_to_table(self, p, p_list):
if len(p_list) > 4 and "constraint" in p_list[3]:
p[0] = self.set_constraint(
p[0],
"references",
p_list[-1]["references"],
p_list[3]["constraint"]["name"],
)
elif isinstance(p_list[-2], list):
if "ref_columns" not in p[0]:
p[0]["ref_columns"] = []
for num, column in enumerate(p_list[-2]):
ref = deepcopy(p_list[-1]["references"])
ref["column"] = ref["columns"][num]
del ref["columns"]
ref["name"] = column
p[0]["ref_columns"].append(ref)
return p[0]
@staticmethod
def set_constraint(target_dict, _type, constraint, constraint_name):
if not target_dict.get("constraints"):
target_dict["constraints"] = {}
if not target_dict["constraints"].get(_type):
target_dict["constraints"][_type] = []
constraint.update({"constraint_name": constraint_name})
target_dict["constraints"][_type].append(constraint)
return target_dict
def p_expression_like_table(self, p):
"""expr : table_name LIKE ID
| table_name LIKE ID DOT ID
"""
# get schema & table name
p_list = list(p)
if len(p) > 4:
if "." in p:
schema = p_list[-3]
table_name = p_list[-1]
else:
table_name = p_list[-1]
schema = None
p[0] = p[1]
p[0].update({"like": {"schema": schema, "table_name": table_name}})
def p_table_name(self, p):
"""table_name : create_table ID DOT ID
| create_table ID
| table_name LIKE ID
| table_name DOT ID
"""
# get schema & table name
p_list = list(p)
p[0] = p[1]
if len(p) > 4:
if "." in p:
schema = p_list[-3]
table_name = p_list[-1]
else:
table_name = p_list[-1]
schema = None
p[0].update(
{"schema": schema, "table_name": table_name, "columns": [], "checks": []}
)
def p_expression_seq(self, p):
"""expr : seq_name
| expr INCREMENT ID
| expr START ID
| expr MINVALUE ID
| expr MAXVALUE ID
| expr CACHE ID
"""
# get schema & table name
p_list = list(p)
p[0] = p[1]
if len(p) > 2:
p[0].update({p[2].lower(): int(p_list[-1])})
def p_seq_name(self, p):
"""seq_name : create_seq ID DOT ID
| create_seq ID
"""
# get schema & table name
p_list = list(p)
schema = None
if len(p) > 4:
if "." in p:
schema = p_list[-3]
seq_name = p_list[-1]
else:
seq_name = p_list[-1]
p[0] = {"schema": schema, "sequence_name": seq_name}
def p_create_seq(self, p):
"""create_seq : CREATE SEQUENCE IF NOT EXISTS
| CREATE SEQUENCE
"""
# get schema & table name
pass
def p_tid(self, p):
"""tid : LT ID
| tid ID
| tid COMMAT
| tid RT
"""
if not isinstance(p[1], list):
p[0] = [p[1]]
else:
p[0] = p[1]
for i in list(p)[2:]:
p[0][0] += i
@staticmethod
def get_complex_type(p, p_list):
if len(p_list) == 4:
p[0]["type"] = f"{p[2]} {p[3][0]}"
elif p[0]["type"]:
if len(p[0]["type"]) == 1 and isinstance(p[0]["type"], list):
p[0]["type"] = p[0]["type"][0]
p[0]["type"] = f'{p[0]["type"]} {p_list[-1][0]}'
else:
p[0]["type"] = p_list[-1][0]
return p[0]
def extract_references(self, p_list):
ref_index = p_list.index("REFERENCES")
ref = {
"table": None,
"columns": [None],
"schema": None,
"on_delete": None,
"on_update": None,
"deferrable_initially": None,
}
if "." not in p_list[ref_index:]:
ref.update({"table": p_list[ref_index + 1]})
if not len(p_list) == 3:
ref.update({"columns": p_list[-1]})
else:
ref.update(
{
"schema": p_list[ref_index + 1],
"columns": p_list[-1],
"table": p_list[ref_index + 3],
}
)
return ref
def p_null(self, p):
"""null : NULL
| NOT NULL
"""
nullable = True
if "NULL" in p or "null" in p:
if "NOT" in p or "not" in p:
nullable = False
p[0] = {"nullable": nullable}
def p_f_call(self, p):
"""f_call : ID LP RP
| ID LP f_call RP
| ID LP multi_id RP
| ID LP pid RP
"""
p_list = list(p)
if isinstance(p[1], list):
p[0] = p[1]
p[0].append(p_list[-1])
else:
value = ""
for elem in p_list[1:]:
if isinstance(elem, list):
elem = ",".join(elem)
value += elem
p[0] = value
def p_multi_id(self, p):
"""multi_id : ID
| multi_id ID
| f_call
| multi_id f_call
"""
p_list = list(p)
if isinstance(p[1], list):
p[0] = p[1]
p[0].append(p_list[-1])
else:
value = " ".join(p_list[1:])
p[0] = value
def p_funct_expr(self, p):
"""funct_expr : LP multi_id RP
| multi_id
"""
if len(p) > 2:
p[0] = p[2]
else:
p[0] = p[1]
def p_def(self, p):
"""def : DEFAULT ID
| DEFAULT STRING
| DEFAULT NULL
| DEFAULT funct_expr
| DEFAULT LP pid RP
| def ID
| def LP RP
"""
p_list = list(p)
if len(p_list) == 5 and isinstance(p[3], list):
default = p[3][0]
else:
default = p[2]
if default.isnumeric():
default = int(default)
if isinstance(p[1], dict):
p[0] = p[1]
for i in p[2:]:
if isinstance(p[2], str):
p[2] = p[2].replace("\\'", "'")
if i == ")" or i == "(":
p[0]["default"] = str(p[0]["default"]) + f"{i}"
else:
p[0]["default"] = str(p[0]["default"]) + f" {i}"
p[0]["default"] = p[0]["default"].replace("))", ")")
else:
p[0] = {"default": default}
def p_constraint(self, p):
"""
constraint : CONSTRAINT ID
"""
p_list = list(p)
p[0] = {"constraint": {"name": p_list[-1]}}
def p_generated(self, p):
"""
generated : gen_always funct_expr
| gen_always funct_expr ID
| gen_always LP multi_id RP
| gen_always f_call
"""
p_list = list(p)
stored = False
if len(p) > 3 and p_list[-1].lower() == "stored":
stored = True
_as = p[2]
p[0] = {"generated": {"always": True, "as": _as, "stored": stored}}
def p_gen_always(self, p):
"""
gen_always : GENERATED ID AS
"""
p[0] = {"generated": {"always": True}}
def p_check_st(self, p):
"""check_st : CHECK LP ID
| check_st ID
| check_st STRING
| check_st ID RP
| check_st STRING RP
"""
p_list = remove_par(list(p))
if isinstance(p[1], dict):
p[0] = p[1]
else:
p[0] = {"check": []}
for item in p_list[2:]:
p[0]["check"].append(item)
def p_expression_alter(self, p):
"""expr : alter_foreign ref
| alter_check
| alter_unique
| alter_default
"""
p[0] = p[1]
if len(p) == 3:
p[0].update(p[2])
def p_alter_unique(self, p):
"""alter_unique : alt_table UNIQUE LP pid RP
| alt_table constraint UNIQUE LP pid RP
"""
p_list = remove_par(list(p))
p[0] = p[1]
p[0]["unique"] = {"constraint_name": None, "columns": p_list[-1]}
if "constraint" in p[2]:
p[0]["unique"]["constraint_name"] = p[2]["constraint"]["name"]
def p_alter_default(self, p):
"""alter_default : alt_table ID ID
| alt_table constraint ID ID
| alt_table ID STRING
| alt_table constraint ID STRING
| alter_default ID
| alter_default FOR pid
"""
p_list = remove_par(list(p))
p[0] = p[1]
if "FOR" in p_list:
column = p_list[-1]
value = None
elif p[0].get("default") and "value" in p[0]["default"]:
value = p[0]["default"]["value"] + " " + p_list[-1]
column = None
else:
value = p_list[-1]
column = None
if "default" not in p[0]:
p[0]["default"] = {
"constraint_name": None,
"columns": column,
"value": value,
}
else:
p[0]["default"].update(
{
"columns": p[0]["default"].get("column") or column,
"value": value or p[0]["default"].get("value"),
}
)
if "constraint" in p[2]:
p[0]["default"]["constraint_name"] = p[2]["constraint"]["name"]
def p_alter_check(self, p):
"""alter_check : alt_table check_st
| alt_table constraint check_st
"""
p_list = remove_par(list(p))
p[0] = p[1]
if isinstance(p[1], dict):
p[0] = p[1]
if not p[0].get("check"):
p[0]["check"] = {"constraint_name": None, "statement": []}
if isinstance(p[2], dict) and "constraint" in p[2]:
p[0]["check"]["constraint_name"] = p[2]["constraint"]["name"]
p[0]["check"]["statement"] = p_list[-1]["check"]
def p_pid_with_type(self, p):
"""pid_with_type : column
| pid_with_type COMMA column
"""
p_list = list(p)
if not isinstance(p_list[1], list):
p[0] = [p_list[1]]
else:
p[0] = p_list[1]
p[0].append(p_list[-1])
def p_pid(self, p):
"""pid : ID
| STRING
| pid ID
| pid STRING
| STRING LP RP
| ID LP RP
| pid COMMA ID
| pid COMMA STRING
"""
p_list = list(p)
if len(p_list) == 4 and isinstance(p[1], str):
p[0] = ["".join(p[1:])]
elif not isinstance(p_list[1], list):
p[0] = [p_list[1]]
else:
p[0] = p_list[1]
p[0].append(p_list[-1])
def p_index_pid(self, p):
"""index_pid : ID
| index_pid ID
| index_pid COMMA index_pid
"""
p_list = list(p)
if len(p_list) == 2:
detailed_column = {"name": p_list[1], "order": "ASC", "nulls": "LAST"}
column = p_list[1]
p[0] = {"detailed_columns": [detailed_column], "columns": [column]}
else:
p[0] = p[1]
if len(p) == 3:
if p_list[-1] in ["DESC", "ASC"]:
p[0]["detailed_columns"][0]["order"] = p_list[-1]
else:
p[0]["detailed_columns"][0]["nulls"] = p_list[-1]
column = p_list[2]
elif isinstance(p_list[-1], dict):
for i in p_list[-1]["columns"]:
p[0]["columns"].append(i)
for i in p_list[-1]["detailed_columns"]:
p[0]["detailed_columns"].append(i)
def p_alter_foreign(self, p):
"""alter_foreign : alt_table foreign
| alt_table constraint foreign
"""
p_list = list(p)
p[0] = p[1]
if isinstance(p_list[-1], | |
"""
This script extracts stack traces from txt files made available by Campbell 2016.
"""
import argparse
import codecs
import gzip
import logging
import math
import os
import re
import traceback
from datetime import datetime
from itertools import count
import sys
import json
import pymongo
import unicodedata
def fixline(line_raw, encoding_guess="utf-8"):
line_raw = line_raw.decode(encoding=encoding_guess, errors='replace')
line = u""
for ch in line_raw:
if unicodedata.category(ch)[0] == 'C':
ch = u'?'
# raise ValueError("Bad encoding %s in: %s" % (ch.encode('unicode_escape'), line.encode('utf-8')))
elif ch == u'\ufffd':
ch = u'?'
line += ch
return line
# number address in function (args) at file from lib
naifafl = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s+\(([^\)]*)\)\s+at\s+(\S+)\sfrom\s+(\S+)\s*$')
# number address in function (args) from lib
naifal = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s+\(([^\)]*)\)\s+from\s+(.+?)\s*$')
# number address function (args) from lib (missing in)
nafal = re.compile(r'^#([\dx]+)\s+(\S+)\s+(.+?)\s+\(([^\)]*)\)\s+from\s+(.+?)\s*$')
# number address in function (args) at file
naifaf = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s+\((.*?)\)\s+at\s+(.+?)\s*$')
# number function (args) at file
nfaf = re.compile(r'^#([\dx]+)\s+(.+?)\s+\((.*?)\)\s+at\s+(\S+)\s*$')
# number address in function (args
naifa = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s*\((.*?)\)?\s*$')
# number address in function
naif = re.compile(r'^#([\dx]+)\s+(\S+)\s+in\s+(.+?)\s*$')
# number function (args
nfa = re.compile(r'^#([\dx]+)\s+(.+?)\s+\((.*?)\)?\s*$')
# number <function>
nf = re.compile(r'^#(\d+)\s+(<.*?>)\s*$')
# file: line
fl = re.compile(r'^([^:]+):(\d+)\s*$')
# at file: line
afl = re.compile(r'^\s*at\s+([^\s:]+):(\d+)\s*$')
def load_from_strings(line, extras=None):
frame = {}
matched = False
try:
if not matched:
match = naifafl.match(line)
if match is not None:
frame['depth'] = int(match.group(1))
frame['address'] = match.group(2)
frame['function'] = match.group(3)
frame['args'] = match.group(4)
frame['file'] = match.group(5)
frame['dylib'] = match.group(6)
matched = True
if not matched:
match = naifal.match(line)
if match is not None:
frame['depth'] = int(match.group(1))
frame['address'] = match.group(2)
frame['function'] = match.group(3)
frame['args'] = match.group(4)
frame['dylib'] = match.group(5)
matched = True
if not matched:
match = nafal.match(line)
if match is not None:
frame['depth'] = int(match.group(1))
frame['address'] = match.group(2)
frame['function'] = match.group(3)
frame['args'] = match.group(4)
frame['dylib'] = match.group(5)
matched = True
if not matched:
match = naifaf.match(line)
if match is not None:
frame['depth'] = int(match.group(1))
frame['address'] = match.group(2)
frame['function'] = match.group(3)
frame['args'] = match.group(4)
frame['file'] = match.group(5)
matched = True
if not matched:
match = nfaf.match(line)
if match is not None:
frame['depth'] = int(match.group(1))
frame['function'] = match.group(2)
frame['args'] = match.group(3)
frame['file'] = match.group(4)
matched = True
if not matched:
match = naifa.match(line)
if match is not None:
assert ((not re.search(' at ', line))
or re.search('memory at ', line)
or re.search('at remote ', line)
), line
assert (not re.search(' from ', line))
frame['depth'] = int(match.group(1))
frame['address'] = match.group(2)
frame['function'] = match.group(3)
frame['args'] = match.group(4)
matched = True
if not matched:
match = naif.match(line)
if match is not None:
assert (not re.search(' at ', line))
assert (not re.search(' from ', line))
assert (not re.search('\(.*?\)', line))
frame['depth'] = int(match.group(1))
frame['address'] = match.group(2)
frame['function'] = match.group(3)
matched = True
if not matched:
match = nfa.match(line)
if match is not None:
assert ((not re.search(' at ', line))
or re.search('memory at ', line)
or re.search('at remote ', line)
), line
assert (not re.search(' from ', line))
assert (not re.search(' ()\s*$', line))
frame['depth'] = int(match.group(1))
frame['function'] = match.group(2)
frame['args'] = match.group(3)
matched = True
if not matched:
match = nf.match(line)
if match is not None:
assert (not re.search(' at ', line))
assert (not re.search(' from ', line))
assert (not re.search('\(.*?\)', line))
frame['depth'] = int(match.group(1))
frame['function'] = match.group(2)
matched = True
except:
logging.error(line)
raise
# if frame.get('function') == '??':
# frame['function'] = None
leftover_extras = []
if 'file' in frame:
match = fl.match(frame['file'])
if match is not None:
frame['file'] = match.group(1)
frame['fileline'] = match.group(2)
# print(frame['file'] + " : " + frame['fileline'], file=sys.stderr)
elif extras is not None:
for extra in extras:
extra_matched = False
if not extra_matched:
match = afl.match(extra)
if match is not None:
frame['file'] = match.group(1)
frame['fileline'] = match.group(2)
extra_matched = True
if not extra_matched:
leftover_extras.append(extra)
if len(leftover_extras) > 0:
frame['extra'] = leftover_extras
if matched:
return frame
else:
raise RuntimeError("Couldn't recognize stack frame format: %s" % (line.encode('unicode_escape')))
def load_from_file(path):
encoding_guess = 'ISO-8859-1'
if 'gz' in path:
# gzip doesn't support encoding= ... this may need a workaround
with gzip.open(path) as stackfile:
stacklines = [fixline(line) for line in stackfile.readlines()]
else:
with codecs.open(path, encoding=encoding_guess) as stackfile:
stacklines = stackfile.readlines()
extras = []
prevline = None
stack = []
for line in stacklines:
line = line.rstrip()
# for ch in line.lstrip():
# if ch != '\t' and unicodedata.category(ch)[0] == 'C':
# raise ValueError("Bad encoding %s %s: %s" % (encoding_guess, ch.encode('unicode_escape'), line.encode('unicode_escape')))
if re.match('^#', line):
if prevline is not None:
stack.append(load_from_strings(prevline, extras))
prevline = line
extras = []
if re.match('rax', line):
return None
else:
extras.append(line.rstrip())
if prevline is not None:
stack.append(load_from_strings(prevline, extras))
return stack
def parse_stacktrace_file(text, filepath=""):
"""
Examples of function calls found in Stacktrace.txt.1:
#12 0xb78a9b5d in IA__g_object_notify (object=0x9133590,
#13 0xb7b62eb8 in IA__gdk_display_manager_set_default_display (
#14 0xb7b60bbc in IA__gdk_display_open_default_libgtk_only ()
#0 __GI___libc_free (mem=0x3) at malloc.c:2892
ar_ptr = <optimized out>
p = <optimized out>
#0 0x00000000 in ?? ()
#2 0x0601ffef in nux::GpuDevice::CreateAsmVertexShader (this=0xa034798) at ./GpuDeviceShader.cpp:47 ptr = (class nux::IOpenGLAsmVertexShader *) 0xa035b28 h = {ptr_ = 0x41b3217, _reference_count = 0xbfa54dbc, _weak_reference_count = 0x603f3a4, _objectptr_count = 0xa033df0, _destroyed = 0x0}
#3 0x0601bda7 in IOpenGLAsmShaderProgram (this=0xa035b28, ShaderProgramName= {m_string = {static npos = <optimized out>, _M_dataplus = {<std::allocator<char>> = {<__gnu_cxx::new_allocator<char>> = {<No data fields>}, <No data fields>}, _M_p = 0xbfa54dbc "\004X\003\n�\03$
No locals.
#4 0x01c35211 in Gfx::opBeginImage(Object*, int) () from /usr/lib/libpoppler.so.12
No symbol table info available.
#5 0x01c2aae6 in Gfx::execOp(Object*, Object*, int) () from /usr/lib/libpoppler.so.12
#0 0x00007f695f2367fc in QMutex::lock (this=0xc0a090)
#1 0x00007f69505d48b2 in Soprano::Virtuoso::QueryResultIteratorBackend::close
#2 0x00007f695b4e7226 in ~Iterator (this=0x7f694800acf0)
#2 0x080c771b in xf86SigHandler ()
#3 <signal handler called>
#7 <signusername handler cusernameled>
#1 0x00002b88b1a23599 in lucene::util::Compare::Char::operator() () from /usr/lib/libclucene.so.0
#2 0x00002b88b1a358ee in std::_Rb_tree<char const*, std::pair<char const* const, void*>, std::_Select1st<std::pair<char const* const, void*> >, lucene::util::Compare::Char, std::allocator<std::pair<char const* const, void*> > >::insert_unique
#3 0x00002b88b1a340c7 in lucene::store::TransactionalRAMDirectory::createOutput () from /usr/lib/libclucene.so.0
#19 0x00002b625d2bfb31 in gnash::NetConnection::openConnection (this=<value optimized out>, url=<value optimized out>) at NetConnection.cpp:103 newurl = {static npos = 18446744073709551615, _M_dataplus = {<std::allocator<char>> = {<__gnu_cxx::new_allocator<char>> = {<No data fields>}, <No data fields>}, _M_p = 0x1000 <Address 0x1000
#20 0x00002b625d2eb966 in gnash::NetStreamGst::startPlayback (this=0xd986b0) at NetStreamGst.cpp:910 head = "\b\021" video = <value optimized out> sound = <value optimized out> __PRETTY_FUNCTION__ = "void gnash::NetStreamGst::startPlayback()"
"""
r = re.compile(
r"^#([0-9]+) +(([a-zA-Z0-9]+) +in +)?((.+?) *[(].*?|(<sig[\w]+? handler [\w ]+?>)|([\w:<*,> \[\]-]+))(( +(at|from) +(\S+))|$)",
re.MULTILINE)
text = text.replace('\t', ' ')
# remove new line from the file except when a new function call is declared.
text = re.sub("\n +", " ", text)
call_numbers = []
fc = []
for m in r.finditer(text):
call_number = int(m.group(1))
if call_number == 0 and len(call_numbers) > 0:
# There are some files that contain two duplicate (Some contains stack traces from threads). Pick the first one.
logging.getLogger().warning("{} contains more than one stack trace.".format(filepath))
break
call_numbers.append(call_number)
method = None
for i in range(5, 8):
if m.group(i) is not None:
method = m.group(i)
break
if method is None:
raise Exception("One frame is None. {}\t{}.".format(m.group(), filepath))
fc.append({
# "mem_address": m.group(3),
"function": method.strip(),
# "params": m.group(4),
"file": m.group(11),
})
for idx, cn in enumerate(call_numbers):
if cn != idx:
logging.getLogger().warning("Stack Trace is incomplete. {}".format(text))
last = 0
# There are some stack traces that are missing some calls e we complete it with None
for idx, cn in enumerate(call_numbers):
diff = cn - last
if diff != 0:
i = idx
for _ in range(diff):
# fc.insert(i, {"mem_address": None, "method": None, "source": None, })
fc.insert(i, {"function": None, "file": None, })
i += 1
last = cn + 1
break
return fc
def parse_stacktrace_top(text):
re_stack_top = re.compile(r"(.+?) *[(].*?(( +(at|from) +(\S+))|$)", re.MULTILINE)
fc = []
for m in re_stack_top.finditer(text):
fc.append({
# "mem_address": None,
"function": m.group(1).strip(),
# "params": m.group(4),
"file": m.group(5),
})
return fc
def parse_thread_stacktrace(text, file_path=""):
re_thread = re.compile(r"^Thread +[0-9]+", flags=re.IGNORECASE | re.MULTILINE)
threads = []
matches = list(re_thread.finditer(text))
for idx, m in enumerate(matches):
start = m.end()
end = matches[idx + 1].start() if idx + 1 < len(matches) else len(text)
threads.append(parse_stacktrace_file(text[start:end], file_path))
return threads
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('folder', help="Folder that contains all bug reports")
parser.add_argument('output', help="Json File")
parser.add_argument('date_file', help="Json File with date")
parser.add_argument('lp_json', help="Data used by Campbell")
logging.basicConfig(level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger()
args = parser.parse_args()
logger.info(args)
problem_re = re.compile(r"ProblemType: +[\w:. ]+\n")
field_re = re.compile(r"^[.\w]+:", flags=re.MULTILINE)
categorical_fields = set()
reports = []
oracle = {}
n_before_crashes = 0
n_crashes_buckets = | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Construct the necessary state for the TVM graph executor
from a Relay expression.
"""
import warnings
import numpy as np
from tvm.ir import IRModule
from tvm.ir.transform import PassContext
from tvm.tir import expr as tvm_expr
from tvm.target import Target
from .. import nd as _nd, autotvm, register_func
from ..target import Target
from ..contrib import graph_executor as _graph_rt
from . import _build_module
from . import ty as _ty
from . import expr as _expr
from . import function as _function
from .transform import InferType
from .backend import graph_executor_factory as _graph_executor_factory
from .backend import interpreter as _interpreter
from .backend.vm import VMExecutor
def _update_target(target):
target = target if target else Target.current()
if target is None:
raise ValueError("Target is not set in env or passed as argument.")
tgts = {}
if isinstance(target, (str, Target)):
dev_type = tvm_expr.IntImm("int32", _nd.device(str(target)).device_type)
tgts[dev_type] = Target(target)
elif isinstance(target, dict):
for dev, tgt in target.items():
dev_type = tvm_expr.IntImm("int32", _nd.device(dev).device_type)
tgts[dev_type] = Target(tgt)
else:
raise TypeError(
"target is expected to be str or "
+ "tvm.target.Target, but received "
+ "{}".format(type(target))
)
return tgts
def _convert_param_map(params):
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
return inputs
class BuildModule(object):
"""Build an IR module to run on TVM graph executor. This class is used
to expose the `RelayBuildModule` APIs implemented in C++.
"""
def __init__(self):
self.mod = _build_module._BuildModule()
self._get_graph_json = self.mod["get_graph_json"]
self._get_module = self.mod["get_module"]
self._build = self.mod["build"]
self._optimize = self.mod["optimize"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
def build(self, mod, target=None, target_host=None, params=None):
"""
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The IRModule to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
factory_module : tvm.relay.backend.graph_executor_factory.GraphExecutorFactoryModule
The runtime factory for the TVM graph executor.
"""
target = _update_target(target)
target, target_host = Target.check_and_update_host_consist(
target, target_host, target_is_dict_key=False
)
# Setup the params.
if params:
self._set_params(params)
# Build the IR module. If auto_scheduler is not enabled,
# then use the TOPI-defined schedule.
use_auto_scheduler = PassContext.current().config.get(
"relay.backend.use_auto_scheduler", False
)
# Turn off AutoTVM config not found warnings if auto_scheduler is enabled.
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = use_auto_scheduler
self._build(mod, target, target_host)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
# Get artifacts
graph_json = self.get_json()
mod = self.get_module()
params = self.get_params()
return graph_json, mod, params
def optimize(self, mod, target=None, params=None):
"""
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The IR module to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : :py:class:`~tvm.IRModule`
The optimized relay module.
params : dict
The parameters of the final graph.
"""
target = _update_target(target)
# Setup the params.
if params:
self._set_params(params)
mod = self._optimize(mod, target)
# Get artifacts
params = self.get_params()
return mod, params
def _set_params(self, params):
self._set_params_func(_convert_param_map(params))
def get_json(self):
"""Return the json file of the built program."""
return self._get_graph_json()
def get_module(self):
"""Return the built module."""
return self._get_module()
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
@register_func("tvm.relay.module_export_library")
def _module_export(module, file_name): # fcompile, addons, kwargs?
return module.export_library(file_name)
@register_func("tvm.relay.build")
def _build_module_no_factory(mod, target=None, target_host=None, params=None, mod_name="default"):
"""A wrapper around build which discards the Python GraphFactoryRuntime.
This wrapper is suitable to be used from other programming languages as
the runtime::Module can be freely passed between language boundaries.
"""
target, target_host = Target.check_and_update_host_consist(target, target_host)
return build(mod, target, params=params, mod_name=mod_name).module
def build(ir_mod, target=None, target_host=None, params=None, mod_name="default"):
# fmt: off
# pylint: disable=line-too-long
"""Helper function that builds a Relay function to run on TVM graph executor.
Parameters
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build. Using relay.Function is deprecated.
target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context to
target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
mod_name: Optional[str]
The module name we will build
Returns
-------
graph_json : str
The json string that can be accepted by graph executor.
mod : tvm.Module
The module containing necessary libraries.
params : dict
The parameters of the final graph.
"""
# pylint: enable=line-too-long
# fmt: on
if not isinstance(ir_mod, (IRModule, _function.Function)):
raise ValueError("Type of input parameter mod must be tvm.IRModule")
if isinstance(ir_mod, _function.Function):
if params:
ir_mod = bind_params_by_name(ir_mod, params)
ir_mod = IRModule.from_expr(ir_mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter mod (tvm.relay.function.Function)",
DeprecationWarning,
)
target = _update_target(target)
if isinstance(target_host, (str, Target)):
target_host = Target(target_host)
elif target_host:
raise ValueError("target host must be the type of str, " + "tvm.target.Target, or None")
target, target_host = Target.check_and_update_host_consist(
target, target_host, target_is_dict_key=False
)
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(target.values()))
else:
tophub_context = autotvm.utils.EmptyContext()
with tophub_context:
bld_mod = BuildModule()
graph_json, runtime_mod, params = bld_mod.build(mod=ir_mod, target=target, params=params)
executor_factory = _graph_executor_factory.GraphExecutorFactoryModule(
ir_mod, target, graph_json, runtime_mod, mod_name, params
)
return executor_factory
def optimize(mod, target=None, params=None):
"""Helper function that optimizes a Relay module.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to build. Using relay.Function is deprecated.
target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context
name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context to
target mapping. For homogeneous compilation, it is a build target.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : :py:class:`~tvm.IRModule`
The optimized relay module.
params : dict
The parameters of the final graph.
"""
if not isinstance(mod, (IRModule, _function.Function)):
raise ValueError("Type of input parameter mod must be tvm.IRModule")
if isinstance(mod, _function.Function):
if params:
mod = bind_params_by_name(mod, params)
mod = IRModule.from_expr(mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter func (tvm.relay.function.Function)",
DeprecationWarning,
)
target = _update_target(target)
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(target.values()))
else:
tophub_context | |
values: the value to filter
Ex.
pp.subscript(dataArray, index, value)
"""
if not isinstance(dataArray, xr.DataArray):
raise ValueError(
"the 'dataArray' parameter must be of the type xr.DataArray")
if not isinstance(indexes, list):
indexes = [indexes]
if not isinstance(values, list):
values = [values]
res = dataArray
filterDic = {}
for _pos, indexItem in enumerate(indexes):
filterDic[indexItem.name] = values[_pos]
if len(filterDic) > 0:
res = res.sel(filterDic, drop=True)
return res
def change_index(self, dataArray, oldIndex, newIndex, compareMode=1, defaultValue=None):
"""Changes index of a dataArray object.
compareMode: 1: by Value (default), 2: by pos
Ex.
pp.change_index(dataArray, oldIndex, newIndex)
"""
_da = dataArray
if compareMode == 1:
_temp = _da.reindex({oldIndex.name: newIndex.values})
_temp[newIndex.name] = _temp[oldIndex.name]
_temp = _temp.swap_dims(
{oldIndex.name: newIndex.name}).drop(oldIndex.name)
if not defaultValue is None:
_temp = _temp.fillna(defaultValue)
return _temp
else:
if len(oldIndex.values) == len(newIndex.values):
_tmp = _da.copy()
_tmp = _tmp.assign_coords({oldIndex.name: newIndex.values})
_tmp = _tmp.rename({oldIndex.name: newIndex.name})
return _tmp
elif len(oldIndex.values) > len(newIndex.values):
raise ValueError(
"Changeindex by pos for indices of different size is not implemented")
else:
raise ValueError(
"Changeindex by pos for indices of different size is not implemented")
def kind_to_string(self, kind):
"""Returns the data type on human-readable string
"""
if kind in {'U', 'S'}:
return "string"
elif kind in {'b'}:
return "boolean"
elif kind in {'i', 'u', 'f', 'c'}:
return "numeric"
elif kind in {'m', 'M'}:
return "date"
elif kind in {'O'}:
return "object"
elif kind in {'V'}:
return "void"
def pandas_from_excel(self, excel, sheetName=None, namedRange=None, cellRange=None, indexes=None, driver=""):
"""Returns a pandas DataFrame from Excel spreadsheet.
excel: excel file path or openpyxl workbook object
sheetName: sheet name to be read
namedRange: range name to be read
cellRange: used together with sheetName to read from single cell range
indexes: List of columns names to be set as index of dataframe
Ex.
pp.pandas_from_excel(excelNode,"Sheet 1")
pp.pandas_from_excel(excelNode,namedRange="name_range")
pp.pandas_from_excel(excelNode,"Sheet 1",cellRange="A1:H10")
This function automatically generates pickles from every named range in excel file
when excel parameter is a string.
"""
# When excel param is a string, this function tries to read from automatically generated
# pickles for every named range if they are newer than the Excel file (its modified date).
# If they do not exist or are outdated, tries to generate one pickle for every named range in
# the spreadsheet.
# Requirements:
# - it must have writing permissions,
# - it must have named ranges.
# Otherwise, it should load the spreadsheet using openpyxl library and then read the sheet,
# range or cellrange.
if isinstance(excel, str):
if not os.path.isfile(excel):
excel = os.path.join(self.model.getNode(
"current_path").result, excel)
filepath = excel
# Only read/generate pickles for named ranges
if namedRange is not None:
orig_dir, single_filename = os.path.split(filepath)
filename, _ = os.path.splitext(single_filename)
target_dir = os.path.join(orig_dir, f".{filename}")
picklepath = os.path.join(target_dir, f"{namedRange}.pkl")
# Read from pickle if it is newer than Excel file
if os.path.isfile(picklepath) and os.path.getmtime(picklepath) >= os.path.getmtime(filepath):
return self.__read_pickle_df(filepath=picklepath, indexes=indexes)
else:
wb = load_workbook(
filepath, data_only=True, read_only=True)
named_ranges = [
r.name for r in wb.defined_names.definedName]
# Check if user has writing permissions to generate new pickles and if namedRange exists
if os.access(excel, os.W_OK) and namedRange in named_ranges:
flag_filename = 'flag.tmp'
flag_filepath = os.path.join(target_dir, flag_filename)
# Clean potentially old flag files
self.__remove_old_file(
filepath=flag_filepath, maxElapsedMinutes=60)
# If flag file exists (optimization is running), read directly from Excel
if os.path.isfile(flag_filepath):
return self.pandas_from_excel(wb, sheetName, namedRange, cellRange, indexes)
else:
self.__generate_pkl_from_excel(
workbook=wb, filepath=filepath, targetDir=target_dir,
maxFileSizeMB=100, flagFilename=flag_filename)
# Read file
if os.path.isfile(picklepath):
return self.__read_pickle_df(filepath=picklepath, indexes=indexes)
else:
return self.pandas_from_excel(wb, sheetName, namedRange, cellRange, indexes)
# Read directly from Excel
else:
return self.pandas_from_excel(wb, sheetName, namedRange, cellRange, indexes)
else:
wb = load_workbook(filepath, data_only=True, read_only=True)
return self.pandas_from_excel(wb, sheetName, namedRange, cellRange, indexes)
elif "openpyxl.workbook" in str(type(excel)):
rangeToRead = None
if not namedRange is None:
the_range = excel.defined_names[namedRange]
dests = the_range.destinations
for title, coord in dests:
ws = excel[title]
rangeToRead = ws[coord]
elif not cellRange is None:
ws = excel[sheetName]
rangeToRead = ws[cellRange]
else:
rangeToRead = excel[sheetName]
cols = []
values = []
for index, row in enumerate(rangeToRead):
if index == 0:
cols = [str(c.value) for c in row]
else:
values.append([c.value for c in row])
df = pd.DataFrame(values, None, cols)
if not indexes is None:
if isinstance(indexes, str):
indexes = [indexes]
toIndex = []
for indexColumn in indexes:
if indexColumn in df.columns.values:
toIndex.append(indexColumn)
if len(toIndex) > 0:
df.set_index(toIndex, inplace=True)
return df.dropna(how="all")
else:
raise ValueError("excel must be a string or openpyxl workbook")
def index_from_pandas(self, dataframe, columnName=None, removeEmpty=True):
"""Returns a pandas.Index from an column of a pandas dataframe.
dataframe: pandas dataframe
columnName: dataframe column name used for create cp.index. By default is created using the first column
removeEmpty: True for remove empty rows
Ex.
pp.index_from_pandas(df)
pp.index_from_pandas(df,"column10")
"""
_serie = None
if columnName is None:
_serie = dataframe[dataframe.columns[0]]
else:
_serie = dataframe[columnName]
if removeEmpty:
_serie.dropna(inplace=True)
if self.kind_to_string(_serie.dtype.kind) == "string" or self.kind_to_string(_serie.dtype.kind) == "object":
_serie = _serie[_serie != ""]
return pd.Index(_serie.unique())
def index_from_excel(self, excel, sheetName=None, namedRange=None, cellRange=None, columnName=None, removeEmpty=True):
"""Returns a pandas.Index from an excel file.
excel: pp.excel object
sheetName: sheet name to be read
namedRange: name of the range to be read
cellRange: used with sheetname, for read from a simple range
columnName: dataframe column name used for create pp.index. By default is created using the first column
removeEmpty: True for remove empty rows
Ex.
pp.index_from_excel(excelNode,"Sheet 1")
pp.index_from_excel(excelNode,namedRange="name_range")
pp.index_from_excel(excelNode,namedRange="name_range", columnName="indicadores")
"""
if isinstance(excel, str) or "openpyxl.workbook" in str(type(excel)):
_df = self.pandas_from_excel(
excel, sheetName, namedRange, cellRange)
return self.index_from_pandas(_df, columnName, removeEmpty)
else:
raise ValueError(
"excel can be excel_connection object or a str path to the filename")
def dataarray_from_pandas(self, dataframe, domainDic, valueColumns, defaultValue=None, valueColumnsAsDim=True, sumDuplicateRecords=True):
"""Returns a DataArray (valueColumns is string or (valueColumns is pd.Index and valueColumnsAsDim is True))
or Dataset (valueColumns is a list or (valueColumns is a pd.Index and valueColumnsAsDim is False)) from
a Pandas dataframe applying the set_domain function.
dataframe: Pandas dataframe with no index columns.
domainDic: Dictionary of column names and index names. Ex. {'Column Name': index_name}.
valueColumns: String, list or pd.Index. Dataframe's value columns.
defaultValue: Default value when applying set_domain function.
valueColumnsAsDim: If True, valueColumns becomes a dimension of resulting DataArray. If False, each value
column becomes a variable of the resulting Dataset.
sumDuplicateRecords: If True, sums identical rows. Otherwise, removes duplicates (except the first one).
Ex.
pp.dataarray_from_pandas(sales_dataframe, {'Sales Channel': sales_channels, 'Month': time}, 'Sales', 0.)
"""
_index_value_columns = None
# Check if valueColumns is string, list, np.ndarray or pd.Index (transform to list) and indexes is dict.
if isinstance(valueColumns, pd.Index):
_index_value_columns = valueColumns.copy()
_index_value_columns_name = _index_value_columns.name
valueColumns = valueColumns.values.tolist()
elif isinstance(valueColumns, np.ndarray):
valueColumns = valueColumns.tolist()
elif not isinstance(valueColumns, str) and not isinstance(valueColumns, list):
raise ValueError(
"valueColumns must be a string, a list or a pd.Index")
if not isinstance(domainDic, dict):
raise ValueError("Indexes must be a dictionary")
# Transform indexes into list and create list with all columns.
_index_cols = list(domainDic.keys())
_cols = _index_cols.copy()
if isinstance(valueColumns, list):
_cols = _cols + valueColumns
else:
_cols.append(valueColumns)
# If valueColumnsAsDim is True, check if every column is in dataframe and filter it.
if (valueColumnsAsDim is True) and isinstance(_index_value_columns, pd.Index):
_df_columns = dataframe.columns.values.tolist()
_cols = [value for value in _df_columns if value in _cols]
_filtered_value_columns = [
value for value in _cols if value not in _index_cols]
# Filter dataframe by columns.
_df = dataframe[_cols]
# Sum identical rows or remove duplicates.
if sumDuplicateRecords is True:
_df = _df.groupby(_index_cols, as_index=False).sum()
else:
_duplicate_rows = _df.duplicated(_index_cols)
_df = _df[~_duplicate_rows]
# If valueColumnsAsDim is True, melt valueColumns.
if (valueColumnsAsDim is True) and isinstance(_index_value_columns, pd.Index):
# Unpivot dataframe from wide format to long format by valueColumns.
_df = pd.melt(_df, id_vars=_index_cols, value_vars=_filtered_value_columns,
var_name=_index_value_columns_name, value_name='values')
_index_cols = _index_cols + [_index_value_columns_name]
domainDic[_index_value_columns_name] = _index_value_columns
# Create DataArray
_data = _df.set_index(_index_cols)['values'].to_xarray()
# Appy set_domain function to DataArray / Dataset.
_data = self.set_domain(_data, domainDic, defaultValue)
else:
# Create DataArray / Dataset.
_data = _df.set_index(_index_cols)[valueColumns].to_xarray()
# Appy set_domain function to DataArray / Dataset.
_data = self.set_domain(_data, domainDic, defaultValue)
return _data
def dataarray_from_excel(self, excel, sheetName=None, namedRange=None, cellRange=None, indexes=None, valueColumns=None, indexColumnHeaders=None, replaceByIndex=None, defaultValue=0):
"""Returns a xr.DataArray from excel file.
excel: excel_connection object.
sheetName: sheet name to | |
def to_map(self):
result = {}
result['TimeStamp'] = self.time_stamp
result['StorageUtilization'] = self.storage_utilization
result['NetworkOut'] = self.network_out
return result
def from_map(self, map={}):
self.time_stamp = map.get('TimeStamp')
self.storage_utilization = map.get('StorageUtilization')
self.network_out = map.get('NetworkOut')
return self
class DescribeVodStorageDataResponseStorageData(TeaModel):
def __init__(self, storage_data_item=None):
self.storage_data_item = []
def validate(self):
self.validate_required(self.storage_data_item, 'storage_data_item')
if self.storage_data_item:
for k in self.storage_data_item:
if k :
k.validate()
def to_map(self):
result = {}
result['StorageDataItem'] = []
if self.storage_data_item is not None:
for k in self.storage_data_item:
result['StorageDataItem'].append(k.to_map() if k else None)
else:
result['StorageDataItem'] = None
return result
def from_map(self, map={}):
self.storage_data_item = []
if map.get('StorageDataItem') is not None:
for k in map.get('StorageDataItem'):
temp_model = DescribeVodStorageDataResponseStorageDataStorageDataItem()
temp_model = temp_model.from_map(k)
self.storage_data_item.append(temp_model)
else:
self.storage_data_item = None
return self
class DescribeVodAIDataRequest(TeaModel):
def __init__(self, owner_id=None, start_time=None, end_time=None, region=None, aitype=None):
self.owner_id = owner_id
self.start_time = start_time
self.end_time = end_time
self.region = region
self.aitype = aitype
def validate(self):
self.validate_required(self.start_time, 'start_time')
self.validate_required(self.end_time, 'end_time')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['Region'] = self.region
result['AIType'] = self.aitype
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.region = map.get('Region')
self.aitype = map.get('AIType')
return self
class DescribeVodAIDataResponse(TeaModel):
def __init__(self, request_id=None, data_interval=None, aidata=None):
self.request_id = request_id
self.data_interval = data_interval
self.aidata = aidata
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_interval, 'data_interval')
self.validate_required(self.aidata, 'aidata')
if self.aidata:
self.aidata.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['DataInterval'] = self.data_interval
if self.aidata is not None:
result['AIData'] = self.aidata.to_map()
else:
result['AIData'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.data_interval = map.get('DataInterval')
if map.get('AIData') is not None:
temp_model = DescribeVodAIDataResponseAIData()
self.aidata = temp_model.from_map(map['AIData'])
else:
self.aidata = None
return self
class DescribeVodAIDataResponseAIDataAIDataItemDataDataItem(TeaModel):
def __init__(self, name=None, value=None):
self.name = name
self.value = value
def validate(self):
self.validate_required(self.name, 'name')
self.validate_required(self.value, 'value')
def to_map(self):
result = {}
result['Name'] = self.name
result['Value'] = self.value
return result
def from_map(self, map={}):
self.name = map.get('Name')
self.value = map.get('Value')
return self
class DescribeVodAIDataResponseAIDataAIDataItemData(TeaModel):
def __init__(self, data_item=None):
self.data_item = []
def validate(self):
self.validate_required(self.data_item, 'data_item')
if self.data_item:
for k in self.data_item:
if k :
k.validate()
def to_map(self):
result = {}
result['DataItem'] = []
if self.data_item is not None:
for k in self.data_item:
result['DataItem'].append(k.to_map() if k else None)
else:
result['DataItem'] = None
return result
def from_map(self, map={}):
self.data_item = []
if map.get('DataItem') is not None:
for k in map.get('DataItem'):
temp_model = DescribeVodAIDataResponseAIDataAIDataItemDataDataItem()
temp_model = temp_model.from_map(k)
self.data_item.append(temp_model)
else:
self.data_item = None
return self
class DescribeVodAIDataResponseAIDataAIDataItem(TeaModel):
def __init__(self, time_stamp=None, data=None):
self.time_stamp = time_stamp
self.data = data
def validate(self):
self.validate_required(self.time_stamp, 'time_stamp')
self.validate_required(self.data, 'data')
if self.data:
self.data.validate()
def to_map(self):
result = {}
result['TimeStamp'] = self.time_stamp
if self.data is not None:
result['Data'] = self.data.to_map()
else:
result['Data'] = None
return result
def from_map(self, map={}):
self.time_stamp = map.get('TimeStamp')
if map.get('Data') is not None:
temp_model = DescribeVodAIDataResponseAIDataAIDataItemData()
self.data = temp_model.from_map(map['Data'])
else:
self.data = None
return self
class DescribeVodAIDataResponseAIData(TeaModel):
def __init__(self, aidata_item=None):
self.aidata_item = []
def validate(self):
self.validate_required(self.aidata_item, 'aidata_item')
if self.aidata_item:
for k in self.aidata_item:
if k :
k.validate()
def to_map(self):
result = {}
result['AIDataItem'] = []
if self.aidata_item is not None:
for k in self.aidata_item:
result['AIDataItem'].append(k.to_map() if k else None)
else:
result['AIDataItem'] = None
return result
def from_map(self, map={}):
self.aidata_item = []
if map.get('AIDataItem') is not None:
for k in map.get('AIDataItem'):
temp_model = DescribeVodAIDataResponseAIDataAIDataItem()
temp_model = temp_model.from_map(k)
self.aidata_item.append(temp_model)
else:
self.aidata_item = None
return self
class DescribeVodTranscodeDataRequest(TeaModel):
def __init__(self, owner_id=None, start_time=None, end_time=None, region=None, interval=None, storage=None, specification=None):
self.owner_id = owner_id
self.start_time = start_time
self.end_time = end_time
self.region = region
self.interval = interval
self.storage = storage
self.specification = specification
def validate(self):
self.validate_required(self.start_time, 'start_time')
self.validate_required(self.end_time, 'end_time')
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['StartTime'] = self.start_time
result['EndTime'] = self.end_time
result['Region'] = self.region
result['Interval'] = self.interval
result['Storage'] = self.storage
result['Specification'] = self.specification
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.start_time = map.get('StartTime')
self.end_time = map.get('EndTime')
self.region = map.get('Region')
self.interval = map.get('Interval')
self.storage = map.get('Storage')
self.specification = map.get('Specification')
return self
class DescribeVodTranscodeDataResponse(TeaModel):
def __init__(self, request_id=None, data_interval=None, transcode_data=None):
self.request_id = request_id
self.data_interval = data_interval
self.transcode_data = transcode_data
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.data_interval, 'data_interval')
self.validate_required(self.transcode_data, 'transcode_data')
if self.transcode_data:
self.transcode_data.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['DataInterval'] = self.data_interval
if self.transcode_data is not None:
result['TranscodeData'] = self.transcode_data.to_map()
else:
result['TranscodeData'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.data_interval = map.get('DataInterval')
if map.get('TranscodeData') is not None:
temp_model = DescribeVodTranscodeDataResponseTranscodeData()
self.transcode_data = temp_model.from_map(map['TranscodeData'])
else:
self.transcode_data = None
return self
class DescribeVodTranscodeDataResponseTranscodeDataTranscodeDataItemDataDataItem(TeaModel):
def __init__(self, name=None, value=None):
self.name = name
self.value = value
def validate(self):
self.validate_required(self.name, 'name')
self.validate_required(self.value, 'value')
def to_map(self):
result = {}
result['Name'] = self.name
result['Value'] = self.value
return result
def from_map(self, map={}):
self.name = map.get('Name')
self.value = map.get('Value')
return self
class DescribeVodTranscodeDataResponseTranscodeDataTranscodeDataItemData(TeaModel):
def __init__(self, data_item=None):
self.data_item = []
def validate(self):
self.validate_required(self.data_item, 'data_item')
if self.data_item:
for k in self.data_item:
if k :
k.validate()
def to_map(self):
result = {}
result['DataItem'] = []
if self.data_item is not None:
for k in self.data_item:
result['DataItem'].append(k.to_map() if k else None)
else:
result['DataItem'] = None
return result
def from_map(self, map={}):
self.data_item = []
if map.get('DataItem') is not None:
for k in map.get('DataItem'):
temp_model = DescribeVodTranscodeDataResponseTranscodeDataTranscodeDataItemDataDataItem()
temp_model = temp_model.from_map(k)
self.data_item.append(temp_model)
else:
self.data_item = None
return self
class DescribeVodTranscodeDataResponseTranscodeDataTranscodeDataItem(TeaModel):
def __init__(self, time_stamp=None, data=None):
self.time_stamp = time_stamp
self.data = data
def validate(self):
self.validate_required(self.time_stamp, 'time_stamp')
self.validate_required(self.data, 'data')
if self.data:
self.data.validate()
def to_map(self):
result = {}
result['TimeStamp'] = self.time_stamp
if self.data is not None:
result['Data'] = self.data.to_map()
else:
result['Data'] = None
return result
def from_map(self, map={}):
self.time_stamp = map.get('TimeStamp')
if map.get('Data') is not None:
temp_model = DescribeVodTranscodeDataResponseTranscodeDataTranscodeDataItemData()
self.data = temp_model.from_map(map['Data'])
else:
self.data = None
return self
class DescribeVodTranscodeDataResponseTranscodeData(TeaModel):
def __init__(self, transcode_data_item=None):
self.transcode_data_item = []
def validate(self):
self.validate_required(self.transcode_data_item, 'transcode_data_item')
if self.transcode_data_item:
for k in self.transcode_data_item:
if k :
k.validate()
def to_map(self):
result = {}
result['TranscodeDataItem'] = []
if self.transcode_data_item is not None:
for k in self.transcode_data_item:
result['TranscodeDataItem'].append(k.to_map() if k else None)
else:
result['TranscodeDataItem'] = None
return result
def from_map(self, map={}):
self.transcode_data_item = []
if map.get('TranscodeDataItem') is not None:
for k in map.get('TranscodeDataItem'):
temp_model = DescribeVodTranscodeDataResponseTranscodeDataTranscodeDataItem()
temp_model = temp_model.from_map(k)
self.transcode_data_item.append(temp_model)
else:
self.transcode_data_item = None
return self
class DeleteMultipartUploadRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_id=None, resource_owner_account=None, owner_account=None, resource_real_owner_id=None, media_id=None, media_type=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_id = resource_owner_id
self.resource_owner_account = resource_owner_account
self.owner_account = owner_account
self.resource_real_owner_id = resource_real_owner_id
self.media_id = media_id
self.media_type = media_type
def validate(self):
self.validate_required(self.media_id, 'media_id')
self.validate_required(self.media_type, 'media_type')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerId'] = self.resource_owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['OwnerAccount'] = self.owner_account
result['ResourceRealOwnerId'] = self.resource_real_owner_id
result['MediaId'] = self.media_id
result['MediaType'] = self.media_type
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_id = map.get('ResourceOwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.owner_account = map.get('OwnerAccount')
self.resource_real_owner_id = map.get('ResourceRealOwnerId')
self.media_id = map.get('MediaId')
self.media_type = map.get('MediaType')
return self
class DeleteMultipartUploadResponse(TeaModel):
def __init__(self, request_id=None):
self.request_id = request_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
return self
class GetAttachedMediaInfoRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, media_ids=None, auth_timeout=None, resource_real_owner_id=None, output_type=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.media_ids = media_ids
self.auth_timeout = auth_timeout
self.resource_real_owner_id = resource_real_owner_id
self.output_type = output_type
def validate(self):
self.validate_required(self.media_ids, 'media_ids')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['MediaIds'] = self.media_ids
result['AuthTimeout'] = self.auth_timeout
result['ResourceRealOwnerId'] = self.resource_real_owner_id
result['OutputType'] = self.output_type
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.media_ids = map.get('MediaIds')
self.auth_timeout = map.get('AuthTimeout')
self.resource_real_owner_id = map.get('ResourceRealOwnerId')
self.output_type = map.get('OutputType')
return self
class GetAttachedMediaInfoResponse(TeaModel):
def __init__(self, request_id=None, attached_media_list=None, non_exist_media_ids=None):
self.request_id = request_id
self.attached_media_list = []
self.non_exist_media_ids = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.attached_media_list, 'attached_media_list')
if self.attached_media_list:
for k in self.attached_media_list:
if k :
k.validate()
self.validate_required(self.non_exist_media_ids, 'non_exist_media_ids')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['AttachedMediaList'] = []
if self.attached_media_list is not None:
for k in self.attached_media_list:
result['AttachedMediaList'].append(k.to_map() if k else None)
else:
result['AttachedMediaList'] = None
result['NonExistMediaIds'] = []
if self.non_exist_media_ids is not None:
for k in self.non_exist_media_ids:
result['NonExistMediaIds'].append(k)
else:
result['NonExistMediaIds'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.attached_media_list = []
if map.get('AttachedMediaList') is not None:
for k in map.get('AttachedMediaList'):
temp_model = GetAttachedMediaInfoResponseAttachedMediaList()
temp_model = temp_model.from_map(k)
self.attached_media_list.append(temp_model)
else:
self.attached_media_list | |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import Form, tagged
from odoo.addons.stock_account.tests.test_anglo_saxon_valuation_reconciliation_common import ValuationReconciliationTestCommon
from odoo.exceptions import UserError
@tagged('post_install', '-at_install')
class TestAngloSaxonValuation(ValuationReconciliationTestCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.env.user.company_id.anglo_saxon_accounting = True
cls.product = cls.env['product.product'].create({
'name': 'product',
'type': 'product',
'categ_id': cls.stock_account_product_categ.id,
})
def _inv_adj_two_units(self):
inventory = self.env['stock.inventory'].create({
'name': 'test',
'location_ids': [(4, self.company_data['default_warehouse'].lot_stock_id.id)],
'product_ids': [(4, self.product.id)],
})
inventory.action_start()
self.env['stock.inventory.line'].create({
'inventory_id': inventory.id,
'location_id': self.company_data['default_warehouse'].lot_stock_id.id,
'product_id': self.product.id,
'product_qty': 2,
})
inventory.action_validate()
def _so_and_confirm_two_units(self):
sale_order = self.env['sale.order'].create({
'partner_id': self.partner_a.id,
'order_line': [
(0, 0, {
'name': self.product.name,
'product_id': self.product.id,
'product_uom_qty': 2.0,
'product_uom': self.product.uom_id.id,
'price_unit': 12,
'tax_id': False, # no love taxes amls
})],
})
sale_order.action_confirm()
return sale_order
def _fifo_in_one_eight_one_ten(self):
# Put two items in stock.
in_move_1 = self.env['stock.move'].create({
'name': 'a',
'product_id': self.product.id,
'location_id': self.env.ref('stock.stock_location_suppliers').id,
'location_dest_id': self.company_data['default_warehouse'].lot_stock_id.id,
'product_uom': self.product.uom_id.id,
'product_uom_qty': 1,
'price_unit': 8,
})
in_move_1._action_confirm()
in_move_1.quantity_done = 1
in_move_1._action_done()
in_move_2 = self.env['stock.move'].create({
'name': 'a',
'product_id': self.product.id,
'location_id': self.env.ref('stock.stock_location_suppliers').id,
'location_dest_id': self.company_data['default_warehouse'].lot_stock_id.id,
'product_uom': self.product.uom_id.id,
'product_uom_qty': 1,
'price_unit': 10,
})
in_move_2._action_confirm()
in_move_2.quantity_done = 1
in_move_2._action_done()
# -------------------------------------------------------------------------
# Standard Ordered
# -------------------------------------------------------------------------
def test_standard_ordered_invoice_pre_delivery(self):
"""Standard price set to 10. Get 2 units in stock. Sale order 2@12. Standard price set
to 14. Invoice 2 without delivering. The amount in Stock OUT and COGS should be 14*2.
"""
self.product.categ_id.property_cost_method = 'standard'
self.product.invoice_policy = 'order'
self.product.standard_price = 10.0
# Put two items in stock.
self._inv_adj_two_units()
# Create and confirm a sale order for 2@12
sale_order = self._so_and_confirm_two_units()
# standard price to 14
self.product.standard_price = 14.0
# Invoice the sale order.
invoice = sale_order._create_invoices()
invoice.action_post()
# Check the resulting accounting entries
amls = invoice.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 28)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 28)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 24)
self.assertEqual(receivable_aml.credit, 0)
income_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_revenue'])
self.assertEqual(income_aml.debit, 0)
self.assertEqual(income_aml.credit, 24)
def test_standard_ordered_invoice_post_partial_delivery_1(self):
"""Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, invoice 1,
change the standard price to 14, deliver one, change the standard price to 16, invoice 1.
The amounts used in Stock OUT and COGS should be 10 then 14."""
self.product.categ_id.property_cost_method = 'standard'
self.product.invoice_policy = 'order'
self.product.standard_price = 10.0
# Put two items in stock.
sale_order = self._so_and_confirm_two_units()
# Create and confirm a sale order for 2@12
sale_order = self._so_and_confirm_two_units()
# Deliver one.
sale_order.picking_ids.move_lines.quantity_done = 1
wiz = sale_order.picking_ids.button_validate()
wiz = Form(self.env[wiz['res_model']].with_context(wiz['context'])).save()
wiz.process()
# Invoice 1
invoice = sale_order._create_invoices()
invoice_form = Form(invoice)
with invoice_form.invoice_line_ids.edit(0) as invoice_line:
invoice_line.quantity = 1
invoice_form.save()
invoice.action_post()
# Check the resulting accounting entries
amls = invoice.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 10)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 10)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 12)
self.assertEqual(receivable_aml.credit, 0)
income_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_revenue'])
self.assertEqual(income_aml.debit, 0)
self.assertEqual(income_aml.credit, 12)
# change the standard price to 14
self.product.standard_price = 14.0
# deliver the backorder
sale_order.picking_ids[0].move_lines.quantity_done = 1
sale_order.picking_ids[0].button_validate()
# change the standard price to 16
self.product.standard_price = 16.0
# invoice 1
invoice2 = sale_order._create_invoices()
invoice2.action_post()
amls = invoice2.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 14)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 14)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 12)
self.assertEqual(receivable_aml.credit, 0)
income_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_revenue'])
self.assertEqual(income_aml.debit, 0)
self.assertEqual(income_aml.credit, 12)
def test_standard_ordered_invoice_post_delivery(self):
"""Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, change the
standard price to 14, deliver one, invoice 2. The amounts used in Stock OUT and COGS should
be 12*2."""
self.product.categ_id.property_cost_method = 'standard'
self.product.invoice_policy = 'order'
self.product.standard_price = 10
# Put two items in stock.
self._inv_adj_two_units()
# Create and confirm a sale order for 2@12
sale_order = self._so_and_confirm_two_units()
# Deliver one.
sale_order.picking_ids.move_lines.quantity_done = 1
wiz = sale_order.picking_ids.button_validate()
wiz = Form(self.env[wiz['res_model']].with_context(wiz['context'])).save()
wiz.process()
# change the standard price to 14
self.product.standard_price = 14.0
# deliver the backorder
sale_order.picking_ids.filtered('backorder_id').move_lines.quantity_done = 1
sale_order.picking_ids.filtered('backorder_id').button_validate()
# Invoice the sale order.
invoice = sale_order._create_invoices()
invoice.action_post()
# Check the resulting accounting entries
amls = invoice.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 24)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 24)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 24)
self.assertEqual(receivable_aml.credit, 0)
income_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_revenue'])
self.assertEqual(income_aml.debit, 0)
self.assertEqual(income_aml.credit, 24)
# -------------------------------------------------------------------------
# Standard Delivered
# -------------------------------------------------------------------------
def test_standard_delivered_invoice_pre_delivery(self):
"""Not possible to invoice pre delivery."""
self.product.categ_id.property_cost_method = 'standard'
self.product.invoice_policy = 'delivery'
self.product.standard_price = 10
# Put two items in stock.
self._inv_adj_two_units()
# Create and confirm a sale order for 2@12
sale_order = self._so_and_confirm_two_units()
# Invoice the sale order.
# Nothing delivered = nothing to invoice.
with self.assertRaises(UserError):
sale_order._create_invoices()
def test_standard_delivered_invoice_post_partial_delivery(self):
"""Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, invoice 1,
change the standard price to 14, deliver one, change the standard price to 16, invoice 1.
The amounts used in Stock OUT and COGS should be 10 then 14."""
self.product.categ_id.property_cost_method = 'standard'
self.product.invoice_policy = 'delivery'
self.product.standard_price = 10
# Put two items in stock.
sale_order = self._so_and_confirm_two_units()
# Create and confirm a sale order for 2@12
sale_order = self._so_and_confirm_two_units()
# Deliver one.
sale_order.picking_ids.move_lines.quantity_done = 1
wiz = sale_order.picking_ids.button_validate()
wiz = Form(self.env[wiz['res_model']].with_context(wiz['context'])).save()
wiz.process()
# Invoice 1
invoice = sale_order._create_invoices()
invoice_form = Form(invoice)
with invoice_form.invoice_line_ids.edit(0) as invoice_line:
invoice_line.quantity = 1
invoice_form.save()
invoice.action_post()
# Check the resulting accounting entries
amls = invoice.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 10)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 10)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 12)
self.assertEqual(receivable_aml.credit, 0)
income_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_revenue'])
self.assertEqual(income_aml.debit, 0)
self.assertEqual(income_aml.credit, 12)
# change the standard price to 14
self.product.standard_price = 14.0
# deliver the backorder
sale_order.picking_ids[0].move_lines.quantity_done = 1
sale_order.picking_ids[0].button_validate()
# change the standard price to 16
self.product.standard_price = 16.0
# invoice 1
invoice2 = sale_order._create_invoices()
invoice2.action_post()
amls = invoice2.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 14)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 14)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 12)
self.assertEqual(receivable_aml.credit, 0)
income_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_revenue'])
self.assertEqual(income_aml.debit, 0)
self.assertEqual(income_aml.credit, 12)
def test_standard_delivered_invoice_post_delivery(self):
"""Standard price set to 10. Get 2 units in stock. Sale order 2@12. Deliver 1, change the
standard price to 14, deliver one, invoice 2. The amounts used in Stock OUT and COGS should
be 12*2."""
self.product.categ_id.property_cost_method = 'standard'
self.product.invoice_policy = 'delivery'
self.product.standard_price = 10
# Put two items in stock.
self._inv_adj_two_units()
# Create and confirm a sale order for 2@12
sale_order = self._so_and_confirm_two_units()
# Deliver one.
sale_order.picking_ids.move_lines.quantity_done = 1
wiz = sale_order.picking_ids.button_validate()
wiz = Form(self.env[wiz['res_model']].with_context(wiz['context'])).save()
wiz.process()
# change the standard price to 14
self.product.standard_price = 14.0
# deliver the backorder
sale_order.picking_ids.filtered('backorder_id').move_lines.quantity_done = 1
sale_order.picking_ids.filtered('backorder_id').button_validate()
# Invoice the sale order.
invoice = sale_order._create_invoices()
invoice.action_post()
# Check the resulting accounting entries
amls = invoice.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 24)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 24)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 24)
self.assertEqual(receivable_aml.credit, 0)
income_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_revenue'])
self.assertEqual(income_aml.debit, 0)
self.assertEqual(income_aml.credit, 24)
# -------------------------------------------------------------------------
# AVCO Ordered
# -------------------------------------------------------------------------
def test_avco_ordered_invoice_pre_delivery(self):
"""Standard price set to 10. Sale order 2@12. Invoice without delivering."""
self.product.categ_id.property_cost_method = 'average'
self.product.invoice_policy = 'order'
self.product.standard_price = 10
# Put two items in stock.
self._inv_adj_two_units()
# Create and confirm a sale order for 2@12
sale_order = self._so_and_confirm_two_units()
# Invoice the sale order.
invoice = sale_order._create_invoices()
invoice.action_post()
# Check the resulting accounting entries
amls = invoice.line_ids
self.assertEqual(len(amls), 4)
stock_out_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_stock_out'])
self.assertEqual(stock_out_aml.debit, 0)
self.assertEqual(stock_out_aml.credit, 20)
cogs_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_expense'])
self.assertEqual(cogs_aml.debit, 20)
self.assertEqual(cogs_aml.credit, 0)
receivable_aml = amls.filtered(lambda aml: aml.account_id == self.company_data['default_account_receivable'])
self.assertEqual(receivable_aml.debit, 24)
self.assertEqual(receivable_aml.credit, 0)
| |
between 0 and maxangle
:param angle2: angle scaled to be between 0 and maxangle
:param maxangle: max angle. defaults to 2 * pi.
:return: angle1 - angle2, shifted to be between -.5 and +.5 * maxangle
"""
if maxangle is None:
maxangle = np.pi * 2
return (((angle1 / maxangle)
- (angle2 / maxangle) + .5) % 1. - .5) * maxangle
def pconc2conc(pconc: np.ndarray) -> np.ndarray:
# pconc = np.clip(pconc, a_min=1e-6, a_max=1-1e-6)
# pconc = np.clip(pconc, 0., 1.)
return 1. / (1. - pconc) - 1.
def conc2pconc(conc: np.ndarray) -> np.ndarray:
return 1. - 1. / (conc + 1.)
def rotation_matrix(rad, dim=(-2, -1)):
if np.ndim(rad) < 2:
if not isinstance(rad, np.ndarray):
rad = np.array(rad)
rad = np.expand_dims(np.array(rad),
list(-(np.arange(2 - np.ndim(rad)) + 1)))
cat = np.concatenate
return cat((
cat((np.cos(rad), -np.sin(rad)), dim[1]),
cat((np.sin(rad), np.cos(rad)), dim[1])), dim[0])
def rotate(v, rad: np.ndarray) -> np.ndarray:
"""
:param v: [batch_dims, (x0, y0)]
:param rad: [batch_dims]
:return: [batch_dims, (x, y)]
"""
rotmat = rotation_matrix(np.expand_dims(rad, (-1, -2)))
return np.squeeze(rotmat @ np.expand_dims(v, -1), -1)
def ellipse2cov(th, long_axis, short_axis) -> np.array:
"""
:param th: radian
:param long_axis:
:param short_axis:
:return: covariance matrix
"""
rot = rotation_matrix(th)
cov = rot @ np.diag([long_axis, short_axis]) ** 2 @ rot.T
return cov
def ____GEOMETRY____():
pass
def lineseg_dists(p, a, b):
"""
Cartesian distance from point to line segment
Edited to support arguments as series, from:
https://stackoverflow.com/a/54442561/11208892
From: https://stackoverflow.com/a/58781995/2565317
Args:
- p: np.array of single point, shape (2,) or 2D array, shape (x, 2)
- a: np.array of shape (x, 2)
- b: np.array of shape (x, 2)
"""
# normalized tangent vectors
d_ba = b - a
d = np.divide(d_ba, (np.hypot(d_ba[:, 0], d_ba[:, 1])
.reshape(-1, 1)))
# signed parallel distance components
# rowwise dot products of 2D vectors
s = np.multiply(a - p, d).sum(axis=1)
t = np.multiply(p - b, d).sum(axis=1)
# clamped parallel distance
h = np.maximum.reduce([s, t, np.zeros(len(s))])
# perpendicular distance component
# rowwise cross products of 2D vectors
d_pa = p - a
c = d_pa[:, 0] * d[:, 1] - d_pa[:, 1] * d[:, 0]
return np.hypot(h, c)
def distance_point_line(
point: np.ndarray,
line_st: np.ndarray,
line_en: np.ndarray) -> np.ndarray:
"""
Adapted from https://stackoverflow.com/a/48137604/2565317
:param point: [index, (x, y)]
:param line_st: [index, (x, y)]
:param line_en: [index, (x, y)]
:return: distance[index]
"""
d = np.cross(
line_en - line_st, point - line_st
) / np.linalg.norm(line_en - line_st)
return d
def ____TRANSFORM____():
pass
def logit(v):
"""logit function"""
return np.log(v) - np.log(1 - v)
def logistic(v):
"""inverse logit function"""
return 1 / (np.exp(-v) + 1)
def softmax(dv):
if type(dv) is torch.Tensor:
edv = torch.exp(dv)
p = edv / torch.sum(edv)
else:
edv = np.exp(dv)
p = edv / np.sum(edv)
return p
def softargmax(dv):
p = softmax(dv)
a = np.nonzero(np.random.multinomial(1, p))[0][0]
return a
def project(a, b, axis=None, scalar_proj=False):
"""
Project vector a onto b (vector dimensions are along axis).
:type a: np.array
:type b: np.array
:type axis: None, int
:rtype: np.array
"""
proj = np.sum(a * b, axis) / np.sum(b**2, axis)
if scalar_proj:
return proj
else:
return proj * b
def inverse_transform(xy0: np.ndarray, xy1: np.ndarray) -> np.ndarray:
"""
:param xy0: [(x, y), ix, iy]: original grid
:param xy1: [(x, y), ix, iy]: transformed grid
:return: xy2: [(x, y), ix, iy]: inverse-transformed original grid
"""
from scipy.interpolate import griddata
if xy0.ndim == 3:
xy2 = np.stack([
np.stack([
inverse_transform(xy00, xy11)
for xy00, xy11 in zip(xy0[0].T, xy1[0].T)
]).T,
np.stack([
inverse_transform(xy00, xy11)
for xy00, xy11 in zip(xy0[1], xy1[1])
])
])
elif xy0.ndim == 1:
xy2 = griddata(xy1, xy0, xy0, method='linear')
else:
raise ValueError()
return xy2
def ____BINARY_OPS____():
pass
def conv_circ( signal, ker ):
'''
signal: real 1D array
ker: real 1D array
signal and ker must have same shape
from https://stackoverflow.com/a/38034801/2565317
'''
return np.real(np.fft.ifft( np.fft.fft(signal)*np.fft.fft(ker) ))
def ____COMPARISON____():
pass
def startswith(a: Sequence, b: Sequence) -> bool:
"""
a and b should be the same type: tuple, list, np.ndarray, or torch.tensor
EXAMPLE:
startswith(np.array([1, 2, 3]), np.array([1, 2]))
True
startswith(np.array([1, 2, 3]), np.array([1, 2, 3, 4]))
False
startswith((1, 2), (1, 2, 3))
False
startswith((1, 2), (1,))
True
:param a: tuple, list, np.ndarray, or torch.tensor
:param b: same type as a
:return: True if a starts with b
"""
v = len(a) >= len(b) and a[:len(b)] == b
try:
return v.all()
except AttributeError:
return v
def ____IMAGE____():
pass
def nancrosscorr(
fr1: np.ndarray,
fr2: np.ndarray = None,
thres_n=2,
fillvalue=np.nan,
processes=1,
) -> np.ndarray:
"""
Normalized cross-correlation ignoring NaNs.
As in Barry et al. 2007
:param fr1: [x, y, batch]
:param fr2: [x, y, batch]
:param fillvalue:
:param thres_n: Minimum number of non-NaN entries to compute crosscorr with.
:param processes: >0 to run in parallel
:return: cc[i_dx, i_dy, batch]
"""
if fr2 is None:
fr2 = fr1
is_fr1_ndim2 = fr1.ndim == 2
if is_fr1_ndim2:
fr1 = fr1[..., None]
is_fr2_ndim2 = fr2.ndim == 2
if is_fr2_ndim2:
fr2 = fr2[..., None]
assert fr1.ndim == 3
assert fr2.ndim == 3
assert thres_n >= 2, 'to compute correlation thres_n needs to be >= 2'
fsh1 = np.array(fr1.shape[:2])
fsh2 = np.array(fr2.shape[:2])
# csh = fsh1 + fsh2
# NOTE: pad smaller of the two to match max_shape + 2,
# + 2 to ensure both are padded on both sides to remove smoothing artifact
max_sh0 = np.amax(np.stack([fsh1, fsh2], axis=0), axis=0)
max_sh = max_sh0 + 2
# max_sh = (max_sh // 2) * 2 + 1 # enforce odd numbers so it has a center
pad1 = max_sh - fsh1
# pad1 = np.stack([
# int(np.floor(pad1 / 2))])
pad2 = max_sh - fsh2
fr1 = np.pad(fr1, [
(int(np.floor(pad1[0] / 2)),
int(np.ceil(pad1[0] / 2))),
(int(np.floor(pad1[1] / 2)),
int(np.ceil(pad1[1] / 2))),
(0, 0)
], constant_values=np.nan)
fr2 = np.pad(fr2, [
(int(np.floor(pad2[0] / 2)),
int(np.ceil(pad2[0] / 2))),
(int(np.floor(pad2[1] / 2)),
int(np.ceil(pad2[1] / 2))),
(0, 0)
], constant_values=np.nan)
csh = max_sh0 * 2
cc = np.zeros(tuple(csh) + fr1.shape[2:]) + fillvalue
# fsh = np.amin(np.stack([fsh1, fsh2], axis=0), axis=0)
# fsh = np.ceil(max_sh / 2).astype(int)
fsh = max_sh0
pool = Pool(processes=processes)
# if processes > 0:
# pool = Pool(processes=processes)
# f_map = pool.map
# else:
# def f_map(*args, **kwargs):
# return list(map(*args, **kwargs))
# def ccorrs(dx: int):
# cc0 = _ccorrs_given_dx(dx, csh, fillvalue, fr1, fr2, fsh, thres_n)
dxs = np.arange(-fsh[0], fsh[0])
cc[fsh[0] + dxs] = np.array(pool.map(
_ccorrs_given_dx,
((dx, csh, fillvalue, fr1, fr2, fsh, thres_n) for dx in dxs)
))
# cc[fsh[0] + dxs] = np.array(pool.map(ccorrs, dxs))
# if processes > 0:
# pool.close()
if is_fr1_ndim2 and is_fr2_ndim2:
assert cc.shape[-1] == 1
cc = cc[..., 0]
return cc
def _ccorrs_given_dx(inp):
"""
:param inp: dx, csh, fillvalue, fr1, fr2, fsh, thres_n
dx: int
csh: [(x, y)] shape of the results (cross correlation)
fillvalue: what to fill when the number of bins < thres_n
fr1: [x, y, batch]
fr2: [x, y, batch]
fsh: [(x, y)]
thres_n: min number of bins required
:return: cross_correlation[x, y]
"""
dx, csh, fillvalue, fr1, fr2, fsh, thres_n = inp
n_batch = fr1.shape[-1]
cc0 = np.zeros([csh[1], n_batch]) + fillvalue
if dx == 0:
f1 = fr1
f2 = fr2
elif dx > 0:
f1 = fr1[dx:]
f2 = fr2[:-dx]
else:
f1 = fr1[:dx]
f2 = fr2[-dx:]
for dy in range(-fsh[1], fsh[1]):
if dy == 0:
g1 = f1
g2 = f2
elif dy > 0:
g1 = f1[:, dy:]
g2 = f2[:, :-dy]
else:
g1 = f1[:, :dy]
g2 = f2[:, -dy:]
# g1 = g1.flatten()
# g2 = g2.flatten()
g1 = g1.reshape([np.prod(g1.shape[:2]), -1])
g2 = g2.reshape([np.prod(g2.shape[:2]), -1])
incl = np.all(~np.isnan(g1), -1) & np.all(~np.isnan(g2), -1)
if np.sum(incl) >= thres_n:
# cc0[dy + fsh[1]] = stats.pearsonr(g1[incl], g2[incl])[0]
cc0[dy + fsh[1]] = pearsonr(g1[incl].T, g2[incl].T)
# return cc0
return cc0
def pearsonr(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Same as scipy.stats.pearsonr, but works along dim=-1, w/o checks or pvalue.
:param a:
:param b:
:param dim:
:return:
"""
xmean = x.mean(axis=-1, keepdims=True)
ymean = y.mean(axis=-1, keepdims=True)
xm = x - xmean
ym = y - ymean
from scipy import linalg
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm, | |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
import os
import shutil
import fnmatch
import tempfile
from aiida.common.utils import get_repository_folder
# If True, tries to make everything (dirs, files) group-writable.
# Otherwise, tries to make everything only readable and writable by the user.
# TODO: put it in a global variable, and check if it really works!
group_writable = True
_valid_sections = ['node', 'workflow']
class Folder(object):
"""
A class to manage generic folders, avoiding to get out of
specific given folder borders.
.. todo::
fix this, os.path.commonprefix of /a/b/c and /a/b2/c will give
a/b, check if this is wanted or if we want to put trailing slashes.
(or if we want to use os.path.relpath and check for a string starting
with os.pardir?)
.. todo::
rethink whether the folder_limit option is still useful. If not, remove
it alltogether (it was a nice feature, but unfortunately all the calls
to os.path.abspath or normpath are quite slow).
"""
def __init__(self, abspath, folder_limit=None):
abspath = os.path.abspath(abspath)
if folder_limit is None:
folder_limit = abspath
else:
folder_limit = os.path.abspath(folder_limit)
# check that it is a subfolder
if not os.path.commonprefix([abspath,
folder_limit]) == folder_limit:
raise ValueError(
"The absolute path for this folder is not within the "
"folder_limit. abspath={}, folder_limit={}.".format(
abspath, folder_limit))
self._abspath = abspath
self._folder_limit = folder_limit
@property
def mode_dir(self):
"""
Return the mode with which the folders should be created
"""
if group_writable:
return 0o770
else:
return 0o700
@property
def mode_file(self):
"""
Return the mode with which the files should be created
"""
if group_writable:
return 0o660
else:
return 0o600
def get_subfolder(self, subfolder, create=False, reset_limit=False):
"""
Return a Folder object pointing to a subfolder.
:param subfolder: a string with the relative path of the subfolder,
relative to the absolute path of this object. Note that
this may also contain '..' parts,
as far as this does not go beyond the folder_limit.
:param create: if True, the new subfolder is created, if it does not exist.
:param reset_limit: when doing ``b = a.get_subfolder('xxx', reset_limit=False)``,
the limit of b will be the same limit of a.
if True, the limit will be set to the boundaries of folder b.
:Returns: a Folder object pointing to the subfolder.
"""
dest_abs_dir = os.path.abspath(os.path.join(
self.abspath, unicode(subfolder)))
if reset_limit:
# Create a new Folder object, with a limit to itself (cannot go
# back to this folder)
folder_limit = None
else:
# Create a new Folder object, with the same limit of the parent
folder_limit = self.folder_limit
new_folder = Folder(abspath=dest_abs_dir,
folder_limit=folder_limit)
if create:
new_folder.create()
return new_folder
def get_content_list(self, pattern='*', only_paths=True):
"""
Return a list of files (and subfolders) in the folder,
matching a given pattern.
Example: If you want to exclude files starting with a dot, you can
call this method with ``pattern='[!.]*'``
:param pattern: a pattern for the file/folder names, using Unix filename
pattern matching (see Python standard module fnmatch).
By default, pattern is '*', matching all files and folders.
:param only_paths: if False (default), return pairs (name, is_file).
if True, return only a flat list.
:Returns:
a list of tuples of two elements, the first is the file name and
the second is True if the element is a file, False if it is a
directory.
"""
file_list = [fname for fname in os.listdir(self.abspath)
if fnmatch.fnmatch(fname, pattern)]
if only_paths:
return file_list
else:
return [(fname, not os.path.isdir(os.path.join(self.abspath, fname)))
for fname in file_list]
def create_symlink(self, src, name):
"""
Create a symlink inside the folder to the location 'src'.
:param src: the location to which the symlink must point. Can be
either a relative or an absolute path. Should, however,
be relative to work properly also when the repository is
moved!
:param name: the filename of the symlink to be created.
"""
dest_abs_path = self.get_abs_path(name)
os.symlink(src, dest_abs_path)
# For symlinks, permissions should not be set
def insert_path(self, src, dest_name=None, overwrite=True):
"""
Copy a file to the folder.
:param src: the source filename to copy
:param dest_name: if None, the same basename of src is used. Otherwise,
the destination filename will have this file name.
:param overwrite: if ``False``, raises an error on existing destination;
otherwise, delete it first.
"""
if dest_name is None:
filename = unicode(os.path.basename(src))
else:
filename = unicode(dest_name)
if not isinstance(src, unicode):
src = unicode(src)
dest_abs_path = self.get_abs_path(filename)
if not os.path.isabs(src):
raise ValueError("src must be an absolute path in insert_file")
# In this way, the destination is always correct (i.e., if I copy to a
# folder, I point to the correct location inside it)
if os.path.isdir(dest_abs_path):
dest_abs_path = os.path.join(dest_abs_path, os.path.basename(src))
if os.path.isfile(src):
if os.path.exists(dest_abs_path):
if overwrite:
if os.path.isdir(dest_abs_path):
shutil.rmtree(dest_abs_path)
else:
os.remove(dest_abs_path)
# This automatically overwrites files
shutil.copyfile(src, dest_abs_path)
else:
raise IOError("destination already exists: {}".format(
os.path.join(dest_abs_path)))
else:
shutil.copyfile(src, dest_abs_path)
elif os.path.isdir(src):
if os.path.exists(dest_abs_path):
if overwrite:
if os.path.isdir(dest_abs_path):
shutil.rmtree(dest_abs_path)
else:
os.remove(dest_abs_path)
# This automatically overwrites files
shutil.copytree(src, dest_abs_path)
else:
raise IOError("destination already exists: {}".format(
os.path.join(dest_abs_path)))
else:
shutil.copytree(src, dest_abs_path)
else:
raise ValueError("insert_path can only insert files or paths, not symlinks or the like")
return dest_abs_path
def create_file_from_filelike(self, src_filelike, dest_name):
"""
Create a file from a file-like object.
:note: if the current file position in src_filelike is not 0,
only the contents from the current file position to the end of the
file will be copied in the new file.
:param src_filelike: the file-like object (e.g., if you have
a string called s, you can pass ``StringIO.StringIO(s)``)
:param dest_name: the destination filename will have this file name.
"""
filename = unicode(dest_name)
# I get the full path of the filename, checking also that I don't
# go beyond the folder limits
dest_abs_path = self.get_abs_path(filename)
with open(dest_abs_path, 'w') as f:
shutil.copyfileobj(src_filelike, f)
# Set the mode
os.chmod(dest_abs_path, self.mode_file)
return dest_abs_path
def remove_path(self, filename):
"""
Remove a file or folder from the folder.
:param filename: the relative path name to remove
"""
# I get the full path of the filename, checking also that I don't
# go beyond the folder limits
dest_abs_path = self.get_abs_path(filename, check_existence=True)
if os.path.isdir(dest_abs_path):
shutil.rmtree(dest_abs_path)
else:
os.remove(dest_abs_path)
def get_abs_path(self, relpath, check_existence=False):
"""
Return an absolute path for a file or folder in this folder.
The advantage of using this method is that it checks that filename
is a valid filename within this folder,
and not something e.g. containing slashes.
:param filename: The file or directory.
:param check_existence: if False, just return the file path.
Otherwise, also check if the file or directory actually exists.
Raise OSError if it does not.
"""
if os.path.isabs(relpath):
raise ValueError("relpath must be a relative path")
dest_abs_path = os.path.join(self.abspath, relpath)
if not os.path.commonprefix([dest_abs_path, self.folder_limit]) == self.folder_limit:
errstr = "You didn't specify a valid filename: {}".format(relpath)
raise ValueError(errstr)
if check_existence:
if not os.path.exists(dest_abs_path):
raise OSError("{} does not exist within the folder {}".format(
relpath, self.abspath))
return dest_abs_path
def open(self, name, mode='r'):
"""
Open a file in the current folder and return the corresponding
file object.
"""
return open(self.get_abs_path(name), mode)
@property
def abspath(self):
"""
The absolute path of the folder.
"""
return self._abspath
@property
def folder_limit(self):
"""
The folder limit that cannot be crossed when creating files and folders.
"""
return self._folder_limit
def exists(self):
"""
Return True if the folder exists, False otherwise.
"""
return os.path.exists(self.abspath)
def isfile(self, relpath):
"""
Return True if 'relpath' exists inside the folder and is a file,
False otherwise.
"""
return os.path.isfile(os.path.join(self.abspath, relpath))
def isdir(self, relpath):
"""
Return True if 'relpath' exists inside the folder and is a directory,
False otherwise.
"""
return os.path.isdir(os.path.join(self.abspath, relpath))
def erase(self, create_empty_folder=False):
"""
Erases the folder. Should be called only in very specific cases,
in general folder should not be erased!
Doesn't complain if the folder does not exist.
:param create_empty_folder: if True, after erasing, creates an empty | |
#!/usr/bin/python2
"""Reads and plots the data in a simulation log protocol buffer file.
This script requires Python 2, because protobufs doesn't really work for Python 3 yet."""
import argparse
import os
import sys
from textwrap import wrap
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from remy_tool_runner import SenderLoggerRunner
from matplotlib.patches import Circle
from matplotlib.animation import FuncAnimation
import utils
import datautils
DEFAULT_PLOTS_DIR = "log-plots"
LAST_PLOTS_SYMLINK = "last-plots"
def pretty(name):
return name.split('.')[-1].capitalize().replace("_", " ")
class BaseFigureGenerator(object):
"""Abstract base class to generate figures.
BasePlotGenerator and BaseAnimationGenerator both derive from this class."""
title = None
xlabel = None
ylabel = None
figfilename = None
plotsdir = '.'
plot_kwargs = {}
file_extension = None
def __init__(self, **kwargs):
self._plotsdir = kwargs.pop('plotsdir', self.plotsdir)
self._plot_kwargs = kwargs.pop('plot_kwargs', None)
super(BaseFigureGenerator, self).__init__(**kwargs)
def get_figfilename(self, extension=None):
"""Returns the file name to which the figure should be saved.
Subclasses for which self.file_extension is a list should iterate over
self.file_extension and call this with the `extension` argument."""
if extension is None:
extension = self.file_extension
if not isinstance(extension, str):
raise ValueError("Bad file extension: " + repr(extension))
name = os.path.join(self._plotsdir, self.figfilename)
if not name.endswith("." + extension):
name += "." + extension
return name
def get_plot_kwargs(self, i=None):
"""Returns the keyword arguments that should be applied to plots, for
the graph of index i."""
kwargs = dict(self.plot_kwargs)
if isinstance(self._plot_kwargs, list):
if i is None:
raise ValueError("For this generator, plot_kwargs must be a dict, not a list of dicts")
kwargs.update(self._plot_kwargs[i])
elif isinstance(self._plot_kwargs, dict):
kwargs.update(self._plot_kwargs)
elif self._plot_kwargs is not None:
raise TypeError("plot_kwargs must be a list of dicts, or a dict")
return kwargs
def generate(self, run_data):
raise NotImplementedError("Subclasses must implement generate()")
def _print_generating_line(self):
if isinstance(self.file_extension, list) or isinstance(self.file_extension, tuple):
extension = "(" + ",".join(self.file_extension) + ")"
else:
extension = None
print("Generating {}...".format(self.get_figfilename(extension)))
class BasePlotGenerator(BaseFigureGenerator):
"""Abstract base class to generate plots."""
legend_location = 'best'
file_extension = ['svg', 'png']
def iter_plot_data(self, run_data):
"""Iterates through data to be plotted. The default implementation just
gives the single element `self.get_plot_data(run_data)`. Subclasses
that need to plot more than one series should override this method.
Each iteration should yield a 3-tuple (x, y, label)."""
yield self.get_plot_data(run_data) + (None,)
def get_plot_data(self, run_data):
"""Either this or `iter_plot_data()` be impelemented by subclasses.
Returns a tuple of two elements (x, y) each being a list of data points.
The two lists must have the same length."""
raise NotImplementedError("Subclasses must implement either get_plot_data() or iter_plot_data()")
def generate(self, run_data, actions=None):
"""Generates the figure for `run_data`, which should be a
SimulationRunData instance."""
self._print_generating_line()
self.actions = actions
self.fig = plt.figure()
self.generate_plot(run_data)
for ext in self.file_extension:
self.fig.savefig(self.get_figfilename(ext), format=ext, bbox_inches='tight')
plt.close(self.fig)
def generate_plot(self, run_data):
"""Generates the plot for `run_data`, which should be a
SimulationRunData instance."""
self.ax = self.fig.add_subplot(111)
for i, (x, y, label) in enumerate(self.iter_plot_data(run_data)):
self.ax.plot(x, y, label=label, **self.get_plot_kwargs(i))
self.ax.set_title(self.title)
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
if hasattr(self, 'get_xlim'):
self.ax.set_xlim(self.get_xlim(run_data))
if hasattr(self, 'get_ylim'):
self.ax.set_ylim(self.get_ylim(run_data))
if len(self.ax.lines) > 1:
self.ax.legend(loc=self.legend_location)
class BaseAnimationGenerator(BaseFigureGenerator):
"""Abstract base class to generate timed animations."""
history = 20
file_extension = 'mp4'
dpi = 200
plot_kwargs = {'linestyle': 'solid', 'linewidth': 0.25, 'color': (0.75, 0.75, 0.75),
'marker': '.', 'markersize': 12.0, 'markerfacecolor': 'blue', 'markeredgecolor': 'blue'}
def __init__(self, **kwargs):
super(BaseAnimationGenerator, self).__init__(**kwargs)
def animate(self, i):
"""Draws frame `i`. This function is passed to FuncAnimation; see
the matplotlib animations documentation for details."""
raise NotImplementedError("Subclasses must implement animate()")
def generate(self, run_data):
self._print_generating_line()
self.fig = plt.figure()
self.initial(run_data)
anim = FuncAnimation(self._fig, self.animate, frames=len(self._times),
interval=run_data.log_interval_ticks)
anim.save(self.get_figfilename(), dpi=self.dpi)
plt.close(self.fig)
def initial(self, run_data):
"""Initializes the animation. This function is passed to FuncAnimation;
see the matplotlib animations documentation for details."""
raise NotImplementedError("Subclasses must implement initial()")
class BaseSingleAnimationGenerator(BaseAnimationGenerator):
def animate(self, i):
sys.stdout.write("Up to frame {:d} of {:d}...\r".format(i, len(self._times)))
sys.stdout.flush()
if i < self.history:
self._line.set_data(self._x[:i], self._y[:i])
else:
self._line.set_data(self._x[i-self.history:i], self._y[i-self.history:i])
self._text.set_text('t = {:.2f} ({:d})'.format(self._times[i], i))
for circle, sending in zip(self._circles, self._sending[i]):
circle.set_facecolor('g' if sending else 'r')
def initial(self, run_data):
self._times = self.get_times(run_data)
self._sending = self.get_sending(run_data)
self._x, self._y = self.get_plot_data(run_data)
xmax = max(self._x)
ymax = max(self._y)
self.ax = self.fig.add_subplot(111)
self.ax.set_title(self.title)
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
self.ax.set_xlim([0, xmax])
self.ax.set_ylim([0, ymax])
self._line = self.ax.plot([], [], **self.get_plot_kwargs())[0]
self._text = self.ax.text(0.05, 0.95, '', transform=self.ax.transAxes)
self._circles = []
for i in range(run_data.config.num_senders):
circle = Circle((0.05+i*0.05, 0.90), radius=0.02, facecolor='k', transform=self.ax.transAxes)
self.ax.add_artist(circle)
self._circles.append(circle)
def get_plot_data(self, run_data):
"""Must be impelemented by subclasses. Returns a tuple of two elements
(x, y) each being a list of data points. The two lists must have the
same length."""
raise NotImplementedError("Subclasses must implement get_plot_data()")
class BaseGridAnimationGenerator(BaseAnimationGenerator):
"""Abstract base class to generate grid animations.
Subclasses must implement get_plot_data(), which must return a tuple of
lists. The animation will then draw one plot for each pair of lists.
"""
ticklabelsize = 5
axislabelsize = 8
timetextsize = 9
wrapwidth = 10
plot_kwargs = {'linestyle': 'solid', 'linewidth': 0.25, 'color': (0.75, 0.75, 0.75),
'marker': '.', 'markersize': 4.0, 'markerfacecolor': 'blue', 'markeredgecolor': 'blue'}
def animate(self, index):
sys.stdout.write("Up to frame {:d} of {:d}...\r".format(index, len(self._times)))
sys.stdout.flush()
nvars = self._nvars
for i in range(nvars):
y = self._data[i]
for j in range(nvars):
x = self._data[j]
if index < self.history:
self._lines[i*nvars+j].set_data(x[:index], y[:index])
else:
self._lines[i*nvars+j].set_data(x[index-self.history:index],
y[index-self.history:index])
self._text.set_text('t = {:.2f} ({:d})'.format(self._times[index], index))
for circle, sending in zip(self._circles, self._sending[index]):
circle.set_facecolor('g' if sending else 'r')
def initial(self, run_data):
self._times = run_data.get_times()
self._sending = run_data.get_sending()
self._data = self.get_plot_data(run_data)
nvars = self._nvars = len(self._data)
maxes = [max(d) for d in self._data]
self._lines = [] # will be a 2D list of axes, indexed by (row, col)
self._text = self.fig.text(0.05, 0.95, '', size=self.timetextsize)
self._circles = []
for i in range(run_data.num_senders):
circle = Circle((0.05+i*0.05, 0.92), radius=0.02, facecolor='k', transform=self.fig.transFigure)
self.fig.patches.append(circle)
self._circles.append(circle)
for i in range(nvars):
for j in range(nvars):
ax = self.fig.add_subplot(nvars, nvars, i*nvars+j+1)
line = ax.plot([], [], **self.get_plot_kwargs())[0]
self._lines.append(line)
ax.set_xlim([0, maxes[j]])
ax.set_ylim([0, maxes[i]])
# x tick labels apply to last row only
if i == nvars-1:
for label in ax.get_xticklabels():
label.set_size(self.ticklabelsize)
label.set_rotation('vertical')
else:
ax.set_xticklabels([])
if i == 0:
xlabeltext = '\n'.join(wrap(self.titles[j], self.wrapwidth))
ax.set_xlabel(xlabeltext, fontsize=self.axislabelsize)
ax.get_xaxis().set_label_position('top')
# y tick labels apply to first column only
if j == 0:
for label in ax.get_yticklabels():
label.set_size(self.ticklabelsize)
ylabeltext = '\n'.join(wrap(self.titles[i], self.wrapwidth))
ax.set_ylabel(ylabeltext, fontsize=self.axislabelsize)
else:
ax.set_yticklabels([])
def get_plot_data(self, run_data):
"""Must be impelemented by subclasses. Returns a tuple of lists, each
being a list of data points. The lists must all have the same length.
The animation will plot one plot for each pair of lists."""
raise NotImplementedError("Subclasses must implement get_plot_data()")
class TimePlotMixin(object):
"""Provides functions for plots where the x-axis is time."""
xlabel = "Time (s)"
overlay_actions = False
def __init__(self, **kwargs):
self._overlay_actions = kwargs.pop('overlay_actions', self.overlay_actions)
super(TimePlotMixin, self).__init__(**kwargs)
def get_xlim(self, run_data):
x = run_data.get_times()
return [min(x), max(x)]
def plot_action_change_times(self, ax, run_data, index):
"""Adds dots for action changes times on the axes `ax`."""
times = run_data.get_action_change_times(index)
for time in times:
ax.axvline(time, color=(0.5, 0.5, 0.5), linewidth=0.25)
def plot_action_bounds(self, ax, run_data, index, attrname):
lower, upper = run_data.get_action_bounds(index, attrname)
t_start = run_data.get_action_change_times(index)
t_end = t_start[1:] + [run_data.get_times()[-1]]
ymin, ymax = ax.get_ylim()
for t1, t2, l, u in zip(t_start, t_end, lower, upper):
ax.fill([t1, t1, t2, t2], [ymin, l, l, ymin], color=(0.75, 0.75, 0.75))
ax.fill([t1, t1, t2, t2], [u, ymax, ymax, u], color=(0.75, 0.75, 0.75))
class TimePlotGenerator(TimePlotMixin, BasePlotGenerator):
"""Generates plots where the x-axis is time and the y-axis is taken directly
from raw data."""
def __init__(self, *attrnames, **kwargs):
self.attrnames = attrnames
self.figfilename = "__".join(attrnames)
pretty_name = ", ".join([pretty(attrname) for attrname in attrnames])
unit = kwargs.pop('unit', None)
self.ylabel = pretty_name
if unit:
self.ylabel += " ({})".format(unit)
self.title = pretty_name
self.senders = kwargs.pop('senders', None)
if isinstance(self.senders, int):
self.senders = [self.senders]
if self.senders is not None:
if len(self.senders) == 1:
self.title += ": sender {}".format(self.senders[0])
self.figfilename += "_sender_{}".format(self.senders[0])
else:
self.title += ": senders {}".format(", ".join([str(x) for x in self.senders]))
self.figfilename += "_senders_{}".format("_".join([str(x) for x in self.senders]))
super(TimePlotGenerator, self).__init__(**kwargs)
def _senders(self, run_data):
return self.senders if self.senders is not None else range(run_data.num_senders)
def iter_plot_data(self, run_data):
label_attrname = len(self.attrnames) > 1
label_sender = len(self._senders(run_data)) > 1
for attrname in self.attrnames:
for i in self._senders(run_data):
x, y = run_data.get_time_data(i, attrname)
label = pretty(attrname) if label_attrname else "sender"
if label_sender:
label += " {:d}".format(i)
yield x, y, label
def generate_plot(self, run_data):
super(TimePlotGenerator, self).generate_plot(run_data)
if self._overlay_actions and self.senders is not None and len(self.senders) == 1:
sender = self.senders[0]
self.plot_action_change_times(self.ax, run_data, sender)
if len(self.attrnames) == 1 and "memory" in datautils.RunData.RAW_ATTRIBUTES[self.attrnames[0]]:
ylim = self.ax.get_ylim()
self.plot_action_bounds(self.ax, | |
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : WenRichard
# @Email : <EMAIL>
# @File : predict_bert_crf.py
# @Software: PyCharm
import pandas as pd
import tensorflow as tf
import numpy as np
import codecs
import pickle
import os
from datetime import time, timedelta, datetime
import json
import copy
from run_bert_wol import create_model, InputFeatures, InputExample
from bert import tokenization
from bert import modeling_bert
from public_tools.ner_utils import get_entity, get_result
from public_tools.tag_evaluating import Metrics
from public_tools.entity_evaluating import entity_metrics
from public_tools.cluener_score import get_f1_score
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool(
"do_dev_offline", True,
"Whether to do predict online."
)
flags.DEFINE_bool(
"do_predict_offline", False,
"Whether to do predict outline."
)
flags.DEFINE_bool(
"do_predict_online", False,
"Whether to do predict online."
)
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.allow_growth = True
class Args():
def __init__(self):
self.kflod = 2
self.is_training =False
self.use_one_hot_embeddings = False
self.batch_size = 1
self.dev_file = './data/clue_ner/dev.txt'
self.dev_file_json = './data/clue_ner/dev.json'
self.dev_result_file = './data/clue_ner/submit/cluener_predict_dev.json'
self.test_file = './data/clue_ner/test.json'
self.test_result_file = './data/clue_ner/submit/cluener_predict.json'
self.bert_config_file = 'D:/Expriment/pretrain_model_tf/bert/bert_config.json'
self.output_dir = 'D:/Expriment/model_output/ner_tool/bert_wol/single_task/clue_ner/runs/checkpoints'
self.vocab_file = 'D:/Expriment/pretrain_model_tf/bert/vocab.txt'
args = Args()
# 加载label->id的词典
with codecs.open(os.path.join(args.output_dir, 'label2id.pkl'), 'rb') as rf:
label2id = pickle.load(rf)
id2label = {value: key for key, value in label2id.items()}
num_labels = len(label2id)
global graph
graph = tf.get_default_graph()
sess = tf.Session(config=gpu_config)
def parse_file(input_file):
with open(input_file, 'r', encoding='utf-8') as f:
lines = []
words = []
labels = []
for line in f:
contends = line.strip()
tokens = contends.split('\t')
if len(tokens) == 2:
word = line.strip().split('\t')[0]
label = line.strip().split('\t')[-1]
else:
if len(contends) == 0:
# L: 'B-ORG M-ORG M-ORG M-ORG'
# W: '中 共 中 央'
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append([w, l])
words = []
labels = []
continue
words.append(word)
labels.append(label)
return lines
def dev_offline(file):
"""
do online prediction. each time make prediction for one instance.
you can change to a batch if you want.
:param line: a list. element is: [dummy_label,text_a,text_b]
:return:
"""
def convert(line, label):
feature = convert_single_example_dev(2, line, label, label2id, FLAGS.max_seq_length, tokenizer)
input_ids = np.reshape([feature.input_ids], (1, FLAGS.max_seq_length))
input_mask = np.reshape([feature.input_mask], (1, FLAGS.max_seq_length))
segment_ids = np.reshape([feature.segment_ids], (1, FLAGS.max_seq_length))
label_ids =np.reshape([feature.label_ids], (1, FLAGS.max_seq_length))
return input_ids, input_mask, segment_ids, label_ids
global graph
with graph.as_default():
# sess.run(tf.global_variables_initializer())
input_ids_p = tf.placeholder(tf.int32, [1, FLAGS.max_seq_length], name="input_ids")
input_mask_p = tf.placeholder(tf.int32, [1, FLAGS.max_seq_length], name="input_mask")
label_ids_p = tf.placeholder(tf.int32, [1, FLAGS.max_seq_length], name="label_ids")
segment_ids_p = tf.placeholder(tf.int32, [1, FLAGS.max_seq_length], name="segment_ids")
bert_config = modeling_bert.BertConfig.from_json_file(args.bert_config_file)
(total_loss, pred_ids) = create_model(
bert_config, args.is_training, input_ids_p, input_mask_p, segment_ids_p, label2id,
label_ids_p, args.use_one_hot_embeddings)
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(args.output_dir))
print(tf.train.latest_checkpoint(args.output_dir))
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=FLAGS.do_lower_case)
# 获取id2char字典
id2char = tokenizer.inv_vocab
dev_texts, dev_labels = zip(*parse_file(file))
start = datetime.now()
pred_labels_all = []
true_labels_all = []
x_all = []
sent_tags = []
for index, text in enumerate(dev_texts):
sentence = str(text)
input_ids, input_mask, segment_ids, label_ids = convert(sentence, dev_labels[index])
feed_dict = {input_ids_p: input_ids,
input_mask_p: input_mask,
segment_ids_p: segment_ids,
label_ids_p: label_ids}
# run session get current feed_dict result
y_pred = sess.run([pred_ids], feed_dict)
# print(list(y_pred[0][0]))
# print(len(list(y_pred[0][0])))
sent_tag = []
y_pred_clean = []
input_ids_clean = []
y_true_clean = []
# 去除 [CLS] 和 [SEP]获取正确的tag范围
for index_b, id in enumerate(list(np.reshape(input_ids, -1))):
char = id2char[id]
tag = id2label[list(y_pred[0][0])[index_b]]
if char == "[CLS]":
continue
if char == "[SEP]":
break
input_ids_clean.append(id)
sent_tag.append(tag)
y_pred_clean.append(list(y_pred[0][0])[index_b])
y_true_clean.append(label_ids[0][index_b])
pred_labels_all.append(y_pred_clean)
true_labels_all.append(y_true_clean)
x_all.append(input_ids_clean)
sent_tags.append(sent_tag)
print('预测标签与真实标签评价结果......')
print(pred_labels_all)
print(len(pred_labels_all))
print(true_labels_all)
print(len(true_labels_all))
metrics = Metrics(true_labels_all, pred_labels_all, id2label, remove_O=True)
metrics.report_scores()
# metrics.report_confusion_matrix()
print('预测实体与真实实体评价结果......')
precision, recall, f1 = entity_metrics(x_all, pred_labels_all, true_labels_all, id2char, id2label)
print("Dev P/R/F1: {} / {} / {}".format(round(precision, 2), round(recall, 2), round(f1, 2)))
print('Time used: {} sec'.format((datetime.now() - start).seconds))
# 保存文件为预测文件格式
with open(args.dev_result_file, 'w', encoding='utf-8') as fo:
for index, text in enumerate(dev_texts):
sentence = ''.join(text.split(' '))
sent_tag = sent_tags[index]
result_words, result_pos = get_result(sentence, sent_tag)
rs = {}
for w, p in zip(result_words, result_pos):
rs[p] = rs.get(p, []) + [w]
preds = {}
for p, ws in rs.items():
temp = {}
for w in ws:
word = sentence[w[0]: w[1] + 1]
temp[word] = temp.get(word, []) + [w]
preds[p] = temp
output_line = json.dumps({'id': index, 'label': preds}, ensure_ascii=False) + '\n'
fo.write(output_line)
def predict_online():
"""
do online prediction. each time make prediction for one instance.
you can change to a batch if you want.
"""
def convert(line):
feature = convert_single_example(line, label2id, FLAGS.max_seq_length, tokenizer)
input_ids = np.reshape([feature.input_ids], (args.batch_size, FLAGS.max_seq_length))
input_mask = np.reshape([feature.input_mask], (args.batch_size, FLAGS.max_seq_length))
segment_ids = np.reshape([feature.segment_ids], (args.batch_size, FLAGS.max_seq_length))
label_ids =np.reshape([feature.label_ids], (args.batch_size, FLAGS.max_seq_length))
return input_ids, input_mask, segment_ids, label_ids
global graph
with graph.as_default():
print("going to restore checkpoint")
# sess.run(tf.global_variables_initializer())
input_ids_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="input_ids")
input_mask_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="input_mask")
label_ids_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="label_ids")
segment_ids_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="segment_ids")
bert_config = modeling_bert.BertConfig.from_json_file(args.bert_config_file)
(total_loss, pred_ids) = create_model(
bert_config, args.is_training, input_ids_p, input_mask_p, segment_ids_p, label2id,
label_ids_p, args.use_one_hot_embeddings)
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(args.output_dir))
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=FLAGS.do_lower_case)
# 获取id2char字典
id2char = tokenizer.inv_vocab
while True:
print('input the test sentence:')
sentence = str(input())
start = datetime.now()
if len(sentence) < 2:
print(sentence)
continue
# print('your input is:{}'.format(sentence))
input_ids, input_mask, segment_ids, label_ids = convert(sentence)
feed_dict = {input_ids_p: input_ids,
input_mask_p: input_mask,
segment_ids_p:segment_ids,
label_ids_p:label_ids}
# run session get current feed_dict result
y_pred = sess.run([pred_ids], feed_dict)
sent_tag = []
y_pred_clean = []
input_ids_clean = []
# 去除 [CLS] 和 [SEP]获取正确的tag范围
print([id2char[i] for i in list(np.reshape(input_ids, -1))])
print(len(list(np.reshape(input_ids, -1))))
print([id2label[i] for i in list(y_pred[0][0])])
print(len(list(y_pred[0][0])))
for index, id in enumerate(list(np.reshape(input_ids, -1))):
char = id2char[id]
tag = id2label[list(y_pred[0][0])[index]]
if char == "[CLS]":
continue
if char == "[SEP]":
break
input_ids_clean.append(id)
sent_tag.append(tag)
y_pred_clean.append(list(y_pred[0][0])[index])
sent_tag = ' '.join(sent_tag)
print(sentence + '\n' + sent_tag)
entity = get_entity([sentence], [y_pred_clean], id2label)
print('predict_result:')
print(entity)
print('Time used: {} sec'.format((datetime.now() - start).seconds))
def predict_offline(in_file, out_file):
"""
do offline prediction. each time make prediction for one instance.
you can change to a batch if you want.
"""
# TODO 以文件形式预测结果
def convert(line):
feature = convert_single_example(line, label2id, FLAGS.max_seq_length, tokenizer)
input_ids = np.reshape([feature.input_ids], (args.batch_size, FLAGS.max_seq_length))
input_mask = np.reshape([feature.input_mask], (args.batch_size, FLAGS.max_seq_length))
segment_ids = np.reshape([feature.segment_ids], (args.batch_size, FLAGS.max_seq_length))
label_ids = np.reshape([feature.label_ids], (args.batch_size, FLAGS.max_seq_length))
return input_ids, input_mask, segment_ids, label_ids
global graph
with graph.as_default():
print("going to restore checkpoint")
# sess.run(tf.global_variables_initializer())
input_ids_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="input_ids")
input_mask_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="input_mask")
label_ids_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="label_ids")
segment_ids_p = tf.placeholder(tf.int32, [args.batch_size, FLAGS.max_seq_length], name="segment_ids")
bert_config = modeling_bert.BertConfig.from_json_file(args.bert_config_file)
(total_loss, pred_ids) = create_model(
bert_config, args.is_training, input_ids_p, input_mask_p, segment_ids_p, label2id,
label_ids_p, args.use_one_hot_embeddings)
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint(args.output_dir))
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=FLAGS.do_lower_case)
# 获取id2char字典
id2char = tokenizer.inv_vocab
# TODO 以文件形式预测结果
lines = []
with open(in_file, 'r', encoding='utf-8') as fr, open(out_file, 'w', encoding='utf-8') as fo:
count = 0
for line in fr.readlines():
json_line = json.loads(line.strip())
sentence = str(json_line['text'])
start = datetime.now()
# print('your input is:{}'.format(sentence))
input_ids, input_mask, segment_ids, label_ids = convert(sentence)
feed_dict = {input_ids_p: input_ids,
input_mask_p: input_mask,
segment_ids_p: segment_ids,
label_ids_p: label_ids}
# run session get current feed_dict result
y_pred = sess.run([pred_ids], feed_dict)
sent_tag = []
y_pred_clean = []
input_ids_clean = []
# 去除 [CLS] 和 [SEP]获取正确的tag范围
for index, id in enumerate(list(np.reshape(input_ids, -1))):
char = id2char[id]
tag = id2label[list(y_pred[0][0])[index]]
if char == "[CLS]":
continue
if char == "[SEP]":
break
input_ids_clean.append(id)
sent_tag.append(tag)
y_pred_clean.append(list(y_pred[0][0])[index])
result_words, result_pos = get_result(sentence, sent_tag)
rs = {}
for w, p in zip(result_words, result_pos):
rs[p] = rs.get(p, []) + [w]
preds = {}
for p, ws in rs.items():
temp = {}
for w in ws:
word = sentence[w[0]: w[1] + 1]
temp[word] = temp.get(word, []) + [w]
preds[p] = temp
output_line = json.dumps({'id': count, 'label': preds}, ensure_ascii=False) + '\n'
count += 1
fo.write(output_line)
def convert_single_example_dev(ex_index, text, label, label2id, max_seq_length,
tokenizer):
"""
将一个样本进行分析,然后将字转化为id, 标签转化为id,然后结构化到InputFeatures对象中
:param ex_index: index
:param example: 一个样本
:param label_list: 标签列表
:param max_seq_length:
:param tokenizer:
:param mode:
:return:
"""
O_index = label2id["O"]
# L: ['B-ORG', 'M-ORG', 'M-ORG', 'M-ORG']
# W: ['中', '共', '中', '央']
textlist = text.split(' ')
labellist = label.split(' ')
tokens = []
labels = []
for i, word in enumerate(textlist):
# 对每个字进行tokenize,返回list
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
else: # 一般不会出现else
labels.append("X")
# 序列截断
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)] # -2 的原因是因为序列需要加一个句首和句尾标志
labels = labels[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]") # 句子开始设置CLS 标志
segment_ids.append(0)
label_ids.append(label2id["[CLS]"]) #
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label2id[labels[i]])
ntokens.append("[SEP]") # 句尾添加[SEP] 标志
segment_ids.append(0)
label_ids.append(label2id["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens) # 将序列中的字(ntokens)转化为ID形式
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
| |
last node of the original list.
self_last.set_next(self, mscdll_first)
mscdll_first.set_back(self, self_last)
# if we are to unlink the nodes from the extension list, there is only one
# node, so, just delete its self list links.
if unlink_nodes:
mscdll_first._links.pop(mscdll._id)
# case 3: length of both lists is 1
else:
self_first.set_next(self, mscdll_first)
self_first.set_back(self, mscdll_first)
mscdll_first.set_next(self, self_first)
mscdll_first.set_back(self, self_first)
if unlink_nodes:
mscdll_first._links.pop(mscdll._id)
# in all cases, adjust the length of the original list
self._length += mscdll._length
# and if we unlinked, set the extension list's head to None and length
# to 0, which resets it since it already has no nodes.
if unlink_nodes:
mscdll._head = None
mscdll._length = 0
# done with cases 1, 2 and 3
return
# case 4: both lists have more than one node each
# set both next and back references of the first node in self and last node
# in mscdll's to point to their new correct fields
self_first.set_back(self, mscdll_last)
mscdll_last.set_next(self, self_first)
mscdll_last.set_back(self, mscdll_last.get_back(mscdll))
self_last.set_next(self, mscdll_first)
# do the same for the first node in mscdll and adjust the first node in self's
# back reference accordingly. We do so last as not to break any links.
mscdll_first.set_back(self, self_last)
mscdll_first.set_next(self, mscdll_first.get_next(mscdll))
self_first.set_back(self, mscdll_last)
# adjust length to be the combination of both lists
self._length += mscdll._length
current = mscdll._head.get_next(mscdll)
# the first and last nodes of mscdll already have their correct self
# references in their _links dictionary. All of the middle ones do not.
# Add them in this while loop.
# Also, if we are to unlink all nodes from mscdll, we remove the references
# to that list from all of its nodes.
while current is not mscdll_last:
current._links[self._id] = {}
current.set_next(self, current.get_next(mscdll))
current.set_back(self, current.get_back(mscdll))
next_node = current.get_next(mscdll)
if unlink_nodes:
current._links.pop(mscdll._id)
current = next_node
# if we chose to unlink nodes, head still contains the reference to mscdll.
# Remove that reference as well as the head pointer, and reset the length.
# This way, mscdll will be reset to be empty and all of its nodes will be
# bound to self as an extended list.
if unlink_nodes:
current._links.pop(mscdll._id)
mscdll._head._links.pop(mscdll._id)
mscdll._head = None
mscdll._length = 0
def split(self, idx: int, splitted_list_name=None):
"""
Splits the MSCDLL instance into two parts, where the second one starts from the
node in the original list at the index passed as a parameter.
The original list is modified. It will contain all of the nodes from index 0 to
idx, not inclusive. The splitted list will hold all of the nodes from idx onwards.
Both lists will be independent from each other.
Parameters:
idx (int): the node index from where the splitted list will begin.
splitted_list_name (str|int): the splitted list name (id). If not given, it
will default to its memory address id.
Returns:
(MSCDLL): a reference to the splitted list.
"""
if idx == 0 or idx == -self._length:
raise IndexError('You can only split from node index 1 onwards.')
# get the target node where we want to split the list at, the node that comes
# before it and create an instance of MSCDLL to assign the split list to
target_node = self._get_node_by_idx(idx)
target_node_back = target_node.get_back(self)
splitted_list = MSCDLL()
# If a name for the splitted list was provided, set it
if splitted_list_name:
splitted_list._id = splitted_list_name
while True:
# once we reach self._head, the splitted list was trasversed. Break out
if target_node is self._head:
break
# add the splitted list's link key to _links dictionary in the node and
# append it to the list
target_node = splitted_list._nodify(target_node)
splitted_list.append(target_node)
# assign the head reference of the splitted_list on the first loop
if not splitted_list._head:
splitted_list._head = target_node
# save a reference of the target node before erasing the original list's
# dictionary. We need it later to follow the next and back fields
# correctly while trasversing. Once that list is popped, then reassign
# the temporal target node to target node again to continue the loop.
temp_target_node = target_node.get_next(self)
target_node._links.pop(self._id)
target_node = temp_target_node
# Once finished with the splitted list, recompose the original one. Set the
# previous node to the one where the list was split to have its next field
# pointing to the original list's head. Set the head's back link to that
# previous node, and adjust self._length accordingly
target_node_back.set_next(self, self._head)
self._head.set_back(self, target_node_back)
if idx > 0:
self._length -= self._length - idx
else:
self._length = self._length + idx
# finally, return a reference to the splitted list
return splitted_list
def insert(self, node_or_value, position, **kwargs):
"""
Inserts the node or value (which will be converted to a MSCDLLNode) passed as
parameter in the specified index position. If 'overwrite' is passed as a kwarg,
the node found at the given position will be replaced with the node or value
converted to a node instead.
Parameters:
node_or_value (MSCDLLNode|any): the node to insert at the given position,
or to replace with if you are to 'overwrite' instead. If a value is
passed instead of a MSCDLLNode, then it will be automatically converted
to one.
position (int): the index position where the node is to be inserted in, or
where the target node to replace is positioned. Negative indexing is
accepted, but keep in mind that -1 will insert the node before the last
one in the list. To position a node at the end of the list, use len(self)
or 'end'.
kwargs:
name (str|int): the name (id) of the node to be inserted, or to serve as a
replacement if 'overwrite' is set to True.
overwrite (bool): on True, the current node at the index targeted by position
parameter will be replaced with node_or_value. On False, node_or_value
will be inserted at that index instead, shifting all following nodes to
the right by one.
Returns:
(tuple): a tuple containing the index position where the node was inserted and
a reference to the inserted node. If kwargs['overwrite'] is True, then the
tuple will contain the index where the node was inserted, a reference to
the replaced node, and a reference to the replacement node now in the list.
"""
if type(position) is str:
if position.lower() == 'end':
position = self._length
assert type(position) is int, \
'position must be an integer value, or "end".'
# overwrite OFF will insert the node in the given position index, shifting
# all othernodes to the right.
# overwrite ON will replace the node in the given position index.
overwrite = False
# if node_or_value is anything but a MSCDLLNode instance, convert it to one.
node = self._nodify(node_or_value)
if 'name' in kwargs:
node._id = kwargs['name']
if 'overwrite' in kwargs:
if kwargs['overwrite']:
overwrite = True
# if the list is empty or we are appending to it
if not self._head or position == self._length or (position == -1 and overwrite):
# if the list is empty, set this node as its first one.
# head, next and back references will point to it.
if not self._head:
self._head = node
self._length +=1
node.set_back(self, self._head)
node.set_next(self, self._head)
# if it is not empty, we are either appending or overwriting at
# position -1
else:
# if we are overwriting at position -1
if (position == -1 and overwrite) or (position == self._length and overwrite):
# if there is only 1 node in the list, just replace it with
# this new node. Length of the list is unmodified.
if self._length < 2:
node_to_replace = self._head
self._head = node
self._head.set_next(self, node)
self._head.set_back(self, node)
# the list contains more than one node. Adjust the new node's
# next link to head, and its back to node at index -2. Then,
# unlink the node at | |
<filename>test/functional/feature_futures.py
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) DeFi Blockchain Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""Test Futures contract RPC."""
from test_framework.test_framework import DefiTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from decimal import Decimal
import time
def sort_history(e):
return e['txn']
class FuturesTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-txnotokens=0', '-amkheight=1', '-bayfrontheight=1', '-eunosheight=1', '-fortcanningheight=1', '-fortcanninghillheight=1', '-fortcanningroadheight=150', '-subsidytest=1']]
def run_test(self):
self.nodes[0].generate(101)
# Set up oracles and tokens
self.setup_test()
# Test setting of futures Gov vars
self.futures_setup()
# Test dToken to DUSD
self.test_dtoken_to_dusd()
# Test DUSD to dToken
self.test_dusd_to_dtoken()
# Test futures block range
self.check_swap_block_range()
# Test multiple swaps per account
self.check_multiple_swaps()
# Test withdrawal
self.check_withdrawals()
# Test Satoshi swaps
self.check_minimum_swaps()
# Test changing Gov vars
self.check_gov_var_change()
# Test refunding of unpaid futures
self.unpaid_contract()
# Test list future swap history
self.rpc_history()
def setup_test(self):
# Store addresses
self.address = self.nodes[0].get_genesis_keys().ownerAuthAddress
self.contract_address = 'bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpsqgljc'
# Store interval
self.futures_interval = 25
# RPC history checks
self.list_history = []
# Set token symbols
self.symbolDFI = 'DFI'
self.symbolDUSD = 'DUSD'
self.symbolTSLA = 'TSLA'
self.symbolGOOGL = 'GOOGL'
self.symbolTWTR = 'TWTR'
self.symbolMSFT = 'MSFT'
self.symbolBTC = 'BTC'
# Setup oracle
oracle_address = self.nodes[0].getnewaddress("", "legacy")
price_feeds = [
{"currency": "USD", "token": self.symbolDFI},
{"currency": "USD", "token": self.symbolTSLA},
{"currency": "USD", "token": self.symbolGOOGL},
{"currency": "USD", "token": self.symbolTWTR},
{"currency": "USD", "token": self.symbolMSFT}
]
self.oracle_id = self.nodes[0].appointoracle(oracle_address, price_feeds, 10)
self.nodes[0].generate(1)
# Create Oracle prices
self.price_tsla = 870
self.price_googl = 2600
self.price_twtr = 37
self.price_msft = 295
# Calculate future swap prices
self.prices = []
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_tsla)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_googl)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_twtr)) * Decimal('0.95000000')))
})
self.prices.append({
'premiumPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('1.05000000'))),
'discountPrice': Decimal(str(Decimal(str(self.price_msft)) * Decimal('0.95000000')))
})
# Feed oracle
oracle_prices = [
{"currency": "USD", "tokenAmount": f'{self.price_tsla}@{self.symbolTSLA}'},
{"currency": "USD", "tokenAmount": f'{self.price_googl}@{self.symbolGOOGL}'},
{"currency": "USD", "tokenAmount": f'{self.price_twtr}@{self.symbolTWTR}'},
{"currency": "USD", "tokenAmount": f'{self.price_msft}@{self.symbolMSFT}'},
]
self.nodes[0].setoracledata(self.oracle_id, int(time.time()), oracle_prices)
self.nodes[0].generate(10)
# Set up non-loan token for failure test
self.nodes[0].createtoken({
"symbol": self.symbolBTC,
"name": self.symbolBTC,
"isDAT": True,
"collateralAddress": self.address
})
self.nodes[0].generate(1)
# Setup loan tokens
self.nodes[0].setloantoken({
'symbol': self.symbolDUSD,
'name': self.symbolDUSD,
'fixedIntervalPriceId': f'{self.symbolDUSD}/USD',
'mintable': True,
'interest': 0})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTSLA,
'name': self.symbolTSLA,
'fixedIntervalPriceId': f'{self.symbolTSLA}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolGOOGL,
'name': self.symbolGOOGL,
'fixedIntervalPriceId': f'{self.symbolGOOGL}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolTWTR,
'name': self.symbolTWTR,
'fixedIntervalPriceId': f'{self.symbolTWTR}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
self.nodes[0].setloantoken({
'symbol': self.symbolMSFT,
'name': self.symbolMSFT,
'fixedIntervalPriceId': f'{self.symbolMSFT}/USD',
'mintable': True,
'interest': 1})
self.nodes[0].generate(1)
# Set token ids
self.idDUSD = list(self.nodes[0].gettoken(self.symbolDUSD).keys())[0]
self.idTSLA = list(self.nodes[0].gettoken(self.symbolTSLA).keys())[0]
self.idGOOGL = list(self.nodes[0].gettoken(self.symbolGOOGL).keys())[0]
self.idTWTR = list(self.nodes[0].gettoken(self.symbolTWTR).keys())[0]
self.idMSFT = list(self.nodes[0].gettoken(self.symbolMSFT).keys())[0]
self.idBTC = list(self.nodes[0].gettoken(self.symbolBTC).keys())[0]
# Mint tokens for swapping
self.nodes[0].minttokens([f'100000@{self.idDUSD}'])
self.nodes[0].minttokens([f'100000@{self.idTSLA}'])
self.nodes[0].minttokens([f'100000@{self.idGOOGL}'])
self.nodes[0].minttokens([f'<EMAIL>}'])
self.nodes[0].minttokens([f'<EMAIL>}'])
self.nodes[0].generate(1)
def futures_setup(self):
# Move to fork block
self.nodes[0].generate(150 - self.nodes[0].getblockcount())
# Create addresses for futures
address = self.nodes[0].getnewaddress("", "legacy")
# Try futureswap before feature is active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1<EMAIL>}')
# Set partial futures attributes
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Try futureswap before feature is fully active
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1@{self.<EMAIL>}')
# Set all futures attributes but set active to false
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'false','v0/params/dfip2203/reward_pct':'0.05','v0/params/dfip2203/block_period':f'{self.futures_interval}'}})
self.nodes[0].generate(1)
# Try futureswap with DFIP2203 active set to false
assert_raises_rpc_error(-32600, "DFIP2203 not currently active", self.nodes[0].futureswap, address, f'1<EMAIL>}')
# Fully enable DFIP2203
self.nodes[0].setgov({"ATTRIBUTES":{'v0/params/dfip2203/active':'true'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result['v0/params/dfip2203/active'], 'true')
assert_equal(result['v0/params/dfip2203/reward_pct'], '0.05')
assert_equal(result['v0/params/dfip2203/block_period'], str(self.futures_interval))
# Disable DUSD
self.nodes[0].setgov({"ATTRIBUTES":{f'v0/token/{str(self.idDUSD)}/dfip2203':'false'}})
self.nodes[0].generate(1)
# Verify Gov vars
result = self.nodes[0].getgov('ATTRIBUTES')['ATTRIBUTES']
assert_equal(result[f'v0/token/{self.idDUSD}/dfip2203'], 'false')
# Check futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
assert_equal(next_futures_block, self.nodes[0].getfutureswapblock())
def test_dtoken_to_dusd(self):
# Create addresses for futures
address_msft = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_msft: f'1@{self.symbolMSFT}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'1@{self.symbolGOOGL}'})
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'1@{self.symbolTSLA}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'1@{self.symbolTWTR}'})
self.nodes[0].generate(1)
# Test futureswap failures
assert_raises_rpc_error(-32600, f'Could not get source loan token {self.idBTC}', self.nodes[0].futureswap, self.address, f'1@{self.<EMAIL>BTC}')
assert_raises_rpc_error(-32600, f'DFIP2203 currently disabled for token {self.idDUSD}', self.nodes[0].futureswap, self.address, f'1@{self.<EMAIL>}', int(self.idDUSD))
assert_raises_rpc_error(-32600, f'Could not get destination loan token {self.idBTC}. Set valid destination.', self.nodes[0].futureswap, self.address, f'1@{self.<EMAIL>}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'Destination should not be set when source amount is a dToken', self.nodes[0].futureswap, self.address, f'1@{self.<EMAIL>A}', int(self.idBTC))
assert_raises_rpc_error(-32600, 'amount 0.00000000 is less than 1.00000000', self.nodes[0].futureswap, address_twtr, f'1@{self.<EMAIL>A}')
# Create user futures contracts
self.nodes[0].futureswap(address_twtr, f'1@{self.<EMAIL>}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'1@{self.<EMAIL>TSLA}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'1@{self.<EMAIL>GL}')
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_msft, f'1@{self.<EMAIL>}')
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_msft)
assert_equal(result[0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result[0]['destination'], self.symbolDUSD)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result[1]['destination'], self.symbolDUSD)
assert_equal(result[2]['owner'], address_tsla)
assert_equal(result[2]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result[2]['destination'], self.symbolDUSD)
assert_equal(result[3]['owner'], address_twtr)
assert_equal(result[3]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result[3]['destination'], self.symbolDUSD)
# Get user MSFT futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolMSFT}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolGOOGL}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TSLA futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTSLA}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{Decimal("1.00000000")}@{self.symbolTWTR}')
assert_equal(result['values'][0]['destination'], self.symbolDUSD)
# Check DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [])
# Check DFI2203 address on listgovs, current shows pending, burn should be empty.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.00000000@{self.<EMAIL>SLA}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.0<EMAIL>}'])
assert('v0/live/economy/dfip2203_burned' not in result)
assert('v0/live/economy/dfip2203_minted' not in result)
# Get token total minted before future swap
total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
# Check total minted incremented as expected
new_total_dusd = Decimal(self.nodes[0].gettoken(self.idDUSD)[self.idDUSD]['minted'])
assert_equal(total_dusd + self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"], new_total_dusd)
# Check TXN ordering
txn_first = 4294967295
result = self.nodes[0].listaccounthistory('all', {"maxBlockHeight":self.nodes[0].getblockcount(), 'depth':0, 'txtype':'q'})
result.sort(key = sort_history, reverse = True)
for result_entry in result:
assert_equal(result_entry['txn'], txn_first)
txn_first -= 1
# Pending futures should now be empty
result = self.nodes[0].listpendingfutureswaps()
assert_equal(len(result), 0)
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(len(result['values']), 0)
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(len(result['values']), 0)
# Check contract address
result = self.nodes[0].getaccount(self.contract_address)
assert_equal(result, [f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.symbolTWTR}', f'1.00000000@{self.<EMAIL>FT}'])
# Check DFI2203 address on listgovs
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'1.0000000<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}'])
# Check DFI2203 address on getburninfo
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>GOOGL}', f'1.00000000@{self.<EMAIL>TWTR}', f'1.00000000@{self.symbolMSFT}'])
# Check that futures have been executed
result = self.nodes[0].getaccount(address_msft)
assert_equal(result, [f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_googl)
assert_equal(result, [f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_tsla)
assert_equal(result, [f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'])
result = self.nodes[0].getaccount(address_twtr)
assert_equal(result, [f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'])
# Populate RPC check
self.list_history.append({'height': self.nodes[0].getblockcount(), 'swaps': [
{'address': address_tsla, 'destination': f'{self.prices[0]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_googl, 'destination': f'{self.prices[1]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_twtr, 'destination': f'{self.prices[2]["discountPrice"]}@{self.symbolDUSD}'},
{'address': address_msft, 'destination': f'{self.prices[3]["discountPrice"]}@{self.symbolDUSD}'},
]})
def test_dusd_to_dtoken(self):
# Create addresses for futures
address_tsla = self.nodes[0].getnewaddress("", "legacy")
address_twtr = self.nodes[0].getnewaddress("", "legacy")
address_googl = self.nodes[0].getnewaddress("", "legacy")
address_msft = self.nodes[0].getnewaddress("", "legacy")
# Fund addresses
self.nodes[0].accounttoaccount(self.address, {address_tsla: f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_googl: f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_twtr: f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].accounttoaccount(self.address, {address_msft: f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}'})
self.nodes[0].generate(1)
# Create user futures contracts
self.nodes[0].futureswap(address_msft, f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}', self.idMSFT)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_twtr, f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}', self.idTWTR)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_googl, f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}', self.symbolGOOGL)
self.nodes[0].generate(1)
self.nodes[0].futureswap(address_tsla, f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}', self.symbolTSLA)
self.nodes[0].generate(1)
# List user futures contracts
result = self.nodes[0].listpendingfutureswaps()
assert_equal(result[0]['owner'], address_tsla)
assert_equal(result[0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[0]['destination'], self.symbolTSLA)
assert_equal(result[1]['owner'], address_googl)
assert_equal(result[1]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[1]['destination'], self.symbolGOOGL)
assert_equal(result[2]['owner'], address_twtr)
assert_equal(result[2]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[2]['destination'], self.symbolTWTR)
assert_equal(result[3]['owner'], address_msft)
assert_equal(result[3]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result[3]['destination'], self.symbolMSFT)
# Get user TSLA futures swap by address
result = self.nodes[0].getpendingfutureswaps(address_tsla)
assert_equal(result['values'][0]['source'], f'{self.prices[0]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTSLA)
# Get user GOOGL futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_googl)
assert_equal(result['values'][0]['source'], f'{self.prices[1]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolGOOGL)
# Get user TWTR futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_twtr)
assert_equal(result['values'][0]['source'], f'{self.prices[2]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolTWTR)
# Get user MSFT futures contracts by address
result = self.nodes[0].getpendingfutureswaps(address_msft)
assert_equal(result['values'][0]['source'], f'{self.prices[3]["premiumPrice"]}@{self.symbolDUSD}')
assert_equal(result['values'][0]['destination'], self.symbolMSFT)
# Check new DFI2203 amounts do not show up as burns yet
result = self.nodes[0].getburninfo()
assert_equal(result['dfip2203'], [f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}'])
# Check DFI2203 address on listgovs, current shows pending if any, burned shows
# deposits from executed swaps and minted shows output from executed swaps.
result = self.nodes[0].listgovs()[8][0]['ATTRIBUTES']
assert_equal(result['v0/live/economy/dfip2203_current'], [f'3992.10000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}'])
assert_equal(result['v0/live/economy/dfip2203_burned'], [f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}', f'1.00000000@{self.<EMAIL>}'])
assert_equal(result['v0/live/economy/dfip2203_minted'], [f'{self.prices[0]["discountPrice"] + self.prices[1]["discountPrice"] + self.prices[2]["discountPrice"] + self.prices[3]["discountPrice"]}@{self.symbolDUSD}'])
# Get token total minted before future swap
total_tsla = Decimal(self.nodes[0].gettoken(self.idTSLA)[self.idTSLA]['minted'])
total_googl = Decimal(self.nodes[0].gettoken(self.idGOOGL)[self.idGOOGL]['minted'])
total_twtr = Decimal(self.nodes[0].gettoken(self.idTWTR)[self.idTWTR]['minted'])
total_msft = Decimal(self.nodes[0].gettoken(self.idMSFT)[self.idMSFT]['minted'])
# Move to next futures block
next_futures_block = self.nodes[0].getblockcount() + (self.futures_interval - (self.nodes[0].getblockcount() % self.futures_interval))
self.nodes[0].generate(next_futures_block - self.nodes[0].getblockcount())
| |
# H20.12.03/R02.02.11 by <NAME>
import math, sys
def get_file(channel, mode):
f = channels.get(channel)
if f is None:
channels[channel] = f = os.fdopen(channel, mode)
return f
channels = {0: sys.stdin, 1: sys.stdout, 2: sys.stderr}
def inchar(channel, s):
rf = get_file(channel, "r")
ch = rf.read(1)
r = s.find(ch)
return r + 1
def outchar(interp):
def _outchar(channel, s, i):
rf = get_file(channel, "w")
ch = s[i - 1]
rf.write(ch)
if channel == 1:
interp.in_new_line = (ch == '\n')
return _outchar
def maxint():
try:
return sys.maxint
except AttributeError:
return sys.maxsize # for Python 3.*
def entier(e):
return int(math.floor(e))
# The following is based on "Appendix 2 The environmental block" in
# "Modified Report on the Algorithmic Language ALGOL 60" (1976)
# with a correction to "ininteger" procedure.
PRELUDE = """
begin
comment Simple functions;
real procedure abs(E);
value E; real E;
abs := if E >= 0.0 then E else -E;
integer procedure iabs(E);
value E; integer E;
iabs := if E >= 0 then E else -E;
integer procedure sign(E);
value E; integer E;
sign := if E > 0.0 then 1
else if E < 0.0 then -1 else 0;
integer procedure entier(E);
value E; real E;
comment entier := largest integer not greater than E,
i.e. E - 1 < entier <= E;
entier := _nativecall(`Prelude.entier', E);
comment Mathematical functions;
real procedure sqrt(E);
value E; real E;
if E < 0.0 then
fault(`negative sqrt', E)
else
sqrt := E**0.5;
real procedure sin(E);
value E; real E;
comment sin := sine of E radians;
sin := _nativecall(`Prelude.math.sin', E);
real procedure cos(E);
value E; real E;
cos := _nativecall(`Prelude.math.cos', E);
real procedure arctan(E);
value E; real E;
arctan := _nativecall(`Prelude.math.atan', E);
real procedure ln(E);
value E; real E;
comment ln := natural logarithm of E;
if E <= 0.0 then
fault(`ln not positive', E)
else
ln := _nativecall(`Prelude.math.log', E);
real procedure exp(E);
value E; real E;
comment exp := exponential function of E;
if E > ln(maxreal) then
fault(`overflow on exp', E)
else
exp := _nativecall(`Prelude.math.exp', E);
comment Terminating procedures;
procedure stop;
go to _stoplabel;
procedure fault(str, r);
value r; string str; real r;
begin
print(`fault', `', str, `', r);
stop
end fault;
comment Input/output procedures;
procedure inchar(channel, str, int);
value channel;
integer channel, int; string str;
comment Set int to value corresponding to the first position in
str of current character on channel. Set int to zero if
character not in str. Move channel pointer to next character;
int := _nativecall(`Prelude.inchar', channel, str);
procedure outchar(channel, str, int);
value channel, int;
integer channel, int; string str;
comment Pass to channel the character in str, corresponding to
the value of int;
if int < 1 or int > length(str) then
fault(`character not in string', int)
else
_nativecall(`Prelude.outchar(self)', channel, str, int);
integer procedure length(str);
string str;
comment length := number of characters in the string;
length := _nativecall(`len', str);
procedure outstring(channel, str);
value channel;
integer channel; string str;
begin
integer m, n;
n := length(str);
for m := 1 step 1 until n do
outchar(channel, str, m)
end outstring;
procedure outterminator(channel);
value channel; integer channel;
comment outputs a terminator for use after a number;
outchar(channel, ` ', 1);
procedure ininteger(channel, int);
value channel; integer channel, int;
comment int takes the value of an integer;
begin
integer k, m;
Boolean b, d;
integer procedure ins;
begin
integer n;
comment read one character, converting newlines to spaces;
inchar(channel, `0123456789-+ ;`NL'', n);
ins := if n = 15 then 13 else n
end ins;
comment pass over initial spaces or newlines;
for k := ins while k = 13 do
;
comment fault anything except sign or digit;
if k = 0 or k > 13 then
fault(`invalid character', k);
if k > 10 then
begin
comment sign found, d indicates digit found, b
indicates the sign, m is value so far;
d := false;
b := k /= 11;
m := 0
end
else
begin
d := b := true;
m := k - 1
end;
for k := ins while k > 0 and k < 11 do
begin
comment deal with further digits;
m := 10 * m + k - 1;
d := true
end k loop;
comment fault if not digit has been found, or the terminator
was invalid;
if d impl k < 13 then
fault(`invalid character', k);
int := if b then m else -m
end ininteger;
procedure outinteger(channel, int);
value channel, int;
integer channel, int;
comment Passes to channel the characters representing the value
of int, followed by a terminator;
begin
procedure digits(int);
value int; integer int;
begin
integer j;
comment use recursion to evaluate digits from right to left,
but print them from left to right;
j := int div 10;
int := int - 10 * j;
if j /= 0 then
digits(j);
outchar(channel, `0123456789', int + 1)
end digits;
if int < 0 then
begin
outchar(channel, `-', 1);
int := -int
end;
digits(int); outterminator(channel)
end outinteger;
procedure inreal(channel, re);
value channel;
integer channel; real re;
begin
integer j, k, m;
real r, s;
Boolean b, d;
integer procedure ins;
begin
integer n;
comment read one character, converting newlines to spaces;
inchar(channel, `0123456789-+.e ;`NL'', n);
ins := if n = 17 then 15 else n
end ins;
comment pass over initial spaces or newlines;
for k := ins while k = 15 do
;
comment fault anything except sign, digit, point or ten;
if k = 0 or 15 < k then
fault(`invalid character', k);
b := k /= 11;
d := true;
m := 1;
j := if k < 11 then 2 else iabs(k + k - 23);
r := if k < 11 then k - 1 else 0.0;
if k /= 14 then
begin
comment ten not found, Continue until ten or terminator found;
for k := ins while k < 14 do
begin
comment fault for non-numerical character, sign or
second point;
if k = 0 or k = 11 or k = 12 or k = 13 and j > 2 then
fault(`invalid character', k);
comment deal with digit unless it cannot affect value;
if d then
begin
if k = 13 then
begin comment point found;
j := 3
end
else
begin
if j < 3 then
begin comment deal with digit before point;
r := 10.0 * r + k - 1
end
else
begin comment deal with digit after point;
s := 10.0 ** (-m);
m := m + 1;
r := r + s * (k - 1);
comment if r = r + s to machine accuracy,
further digits cannot affect value;
d := r /= r + s
end;
if j = 1 or j = 3 then j := j + 1
end
end if d
end k loop;
comment fault if no digit has been found;
if j = 1 and k /= 14 or j = 3 then
fault(`invalid character', k);
end;
if k = 14 then
begin comment deal with exponent part;
ininteger(channel, m);
r := (if j = 1 or j = 5 then 1.0 else r) * 10.0 ** m
end;
re := if b then r else -r
end inreal;
procedure outreal(channel, re);
value channel, re;
integer channel; real re;
comment Passes to channel the characters representing the value
of re, followed by a terminator;
begin
integer n;
comment n gives number of digits to print;
n := entier(1.0 - ln(epsilon) / ln(10.0));
if re < 0.0 then
begin
outchar(channel, `-', 1);
re := - re
end;
if re < minreal then
outstring(channel, 0.0)
else
begin
integer j, k, m, p;
Boolean float, nines;
comment m will hold number of places point must be moved to
standardise value of re to have one digit before point;
| |
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
The collection of utility functions/classes are inspired by their original
implementation of the Tensorflow Extended team, which can be found here:
https://github.com/tensorflow/tfx/blob/master/tfx/dsl/component/experimental
/decorators.py
This version is heavily adjusted to work with the Pipeline-Step paradigm which
is proposed by ZenML.
"""
from __future__ import absolute_import, division, print_function
import inspect
import json
import sys
import typing
from typing import (
Any,
Callable,
ClassVar,
Dict,
ItemsView,
Iterator,
KeysView,
List,
Optional,
Sequence,
Set,
Type,
ValuesView,
)
import pydantic
from tfx.dsl.component.experimental.decorators import _SimpleComponent
from tfx.dsl.components.base.base_executor import BaseExecutor
from tfx.dsl.components.base.executor_spec import ExecutorClassSpec
from tfx.orchestration.portable import outputs_utils
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import component_spec
from tfx.types.channel import Channel
from tfx.utils import json_utils
from zenml.artifacts.base_artifact import BaseArtifact
from zenml.exceptions import MissingStepParameterError, StepInterfaceError
from zenml.io import fileio
from zenml.logger import get_logger
from zenml.materializers.base_materializer import BaseMaterializer
from zenml.steps.base_step_config import BaseStepConfig
from zenml.steps.step_context import StepContext
from zenml.steps.step_environment import StepEnvironment
from zenml.steps.step_output import Output
from zenml.utils import source_utils
logger = get_logger(__name__)
STEP_INNER_FUNC_NAME: str = "entrypoint"
SINGLE_RETURN_OUT_NAME: str = "output"
PARAM_STEP_NAME: str = "step_name"
PARAM_ENABLE_CACHE: str = "enable_cache"
PARAM_PIPELINE_PARAMETER_NAME: str = "pipeline_parameter_name"
PARAM_CREATED_BY_FUNCTIONAL_API: str = "created_by_functional_api"
PARAM_CUSTOM_STEP_OPERATOR: str = "custom_step_operator"
INTERNAL_EXECUTION_PARAMETER_PREFIX: str = "zenml-"
INSTANCE_CONFIGURATION: str = "INSTANCE_CONFIGURATION"
OUTPUT_SPEC: str = "OUTPUT_SPEC"
def do_types_match(type_a: Type[Any], type_b: Type[Any]) -> bool:
"""Check whether type_a and type_b match.
Args:
type_a: First Type to check.
type_b: Second Type to check.
Returns:
True if types match, otherwise False.
"""
# TODO [ENG-158]: Check more complicated cases where type_a can be a sub-type
# of type_b
return type_a == type_b
def resolve_type_annotation(obj: Any) -> Any:
"""Returns the non-generic class for generic aliases of the typing module.
If the input is no generic typing alias, the input itself is returned.
Example: if the input object is `typing.Dict`, this method will return the
concrete class `dict`.
"""
if isinstance(obj, typing._GenericAlias): # type: ignore[attr-defined]
return obj.__origin__
else:
return obj
def generate_component_spec_class(
step_name: str,
input_spec: Dict[str, Type[BaseArtifact]],
output_spec: Dict[str, Type[BaseArtifact]],
execution_parameter_names: Set[str],
) -> Type[component_spec.ComponentSpec]:
"""Generates a TFX component spec class for a ZenML step.
Args:
step_name: Name of the step for which the component will be created.
input_spec: Input artifacts of the step.
output_spec: Output artifacts of the step
execution_parameter_names: Execution parameter names of the step.
Returns:
A TFX component spec class.
"""
inputs = {
key: component_spec.ChannelParameter(type=artifact_type)
for key, artifact_type in input_spec.items()
}
outputs = {
key: component_spec.ChannelParameter(type=artifact_type)
for key, artifact_type in output_spec.items()
}
parameters = {
key: component_spec.ExecutionParameter(type=str) # type: ignore[no-untyped-call] # noqa
for key in execution_parameter_names
}
return type(
f"{step_name}_Spec",
(component_spec.ComponentSpec,),
{
"INPUTS": inputs,
"OUTPUTS": outputs,
"PARAMETERS": parameters,
},
)
def generate_component_class(
step_name: str,
step_module: str,
input_spec: Dict[str, Type[BaseArtifact]],
output_spec: Dict[str, Type[BaseArtifact]],
execution_parameter_names: Set[str],
step_function: Callable[..., Any],
materializers: Dict[str, Type[BaseMaterializer]],
) -> Type["_ZenMLSimpleComponent"]:
"""Generates a TFX component class for a ZenML step.
Args:
step_name: Name of the step for which the component will be created.
step_module: Module in which the step class is defined.
input_spec: Input artifacts of the step.
output_spec: Output artifacts of the step
execution_parameter_names: Execution parameter names of the step.
step_function: The actual function to execute when running the step.
materializers: Materializer classes for all outputs of the step.
Returns:
A TFX component class.
"""
component_spec_class = generate_component_spec_class(
step_name=step_name,
input_spec=input_spec,
output_spec=output_spec,
execution_parameter_names=execution_parameter_names,
)
# Create executor class
executor_class_name = f"{step_name}_Executor"
executor_class = type(
executor_class_name,
(_FunctionExecutor,),
{
"_FUNCTION": staticmethod(step_function),
"__module__": step_module,
"materializers": materializers,
PARAM_STEP_NAME: step_name,
},
)
# Add the executor class to the module in which the step was defined
module = sys.modules[step_module]
setattr(module, executor_class_name, executor_class)
return type(
step_name,
(_ZenMLSimpleComponent,),
{
"SPEC_CLASS": component_spec_class,
"EXECUTOR_SPEC": ExecutorClassSpec(executor_class=executor_class),
"__module__": step_module,
},
)
class _PropertyDictWrapper(json_utils.Jsonable):
"""Helper class to wrap inputs/outputs from TFX nodes.
Currently, this class is read-only (setting properties is not implemented).
Internal class: no backwards compatibility guarantees.
Code Credit: https://github.com/tensorflow/tfx/blob
/51946061ae3be656f1718a3d62cd47228b89b8f4/tfx/types/node_common.py
"""
def __init__(
self,
data: Dict[str, Channel],
compat_aliases: Optional[Dict[str, str]] = None,
):
"""Initializes the wrapper object.
Args:
data: The data to be wrapped.
compat_aliases: Compatibility aliases to support deprecated keys.
"""
self._data = data
self._compat_aliases = compat_aliases or {}
def __iter__(self) -> Iterator[str]:
"""Returns a generator that yields keys of the wrapped dictionary."""
yield from self._data
def __getitem__(self, key: str) -> Channel:
"""Returns the dictionary value for the specified key."""
if key in self._compat_aliases:
key = self._compat_aliases[key]
return self._data[key]
def __getattr__(self, key: str) -> Channel:
"""Returns the dictionary value for the specified key."""
if key in self._compat_aliases:
key = self._compat_aliases[key]
try:
return self._data[key]
except KeyError:
raise AttributeError
def __repr__(self) -> str:
"""Returns the representation of the wrapped dictionary."""
return repr(self._data)
def get_all(self) -> Dict[str, Channel]:
"""Returns the wrapped dictionary."""
return self._data
def keys(self) -> KeysView[str]:
"""Returns the keys of the wrapped dictionary."""
return self._data.keys()
def values(self) -> ValuesView[Channel]:
"""Returns the values of the wrapped dictionary."""
return self._data.values()
def items(self) -> ItemsView[str, Channel]:
"""Returns the items of the wrapped dictionary."""
return self._data.items()
class _ZenMLSimpleComponent(_SimpleComponent):
"""Simple ZenML TFX component with outputs overridden."""
@property
def outputs(self) -> _PropertyDictWrapper: # type: ignore[override]
"""Returns the wrapped spec outputs."""
return _PropertyDictWrapper(self.spec.outputs)
class _FunctionExecutor(BaseExecutor):
"""Base TFX Executor class which is compatible with ZenML steps"""
_FUNCTION = staticmethod(lambda: None)
materializers: ClassVar[
Optional[Dict[str, Type["BaseMaterializer"]]]
] = None
def resolve_materializer_with_registry(
self, param_name: str, artifact: BaseArtifact
) -> Type[BaseMaterializer]:
"""Resolves the materializer for the given obj_type.
Args:
param_name: Name of param.
artifact: A TFX artifact type.
Returns:
The right materializer based on the defaults or optionally the one
set by the user.
"""
if not self.materializers:
raise ValueError("Materializers are missing is not set!")
materializer_class = self.materializers[param_name]
return materializer_class
def resolve_input_artifact(
self, artifact: BaseArtifact, data_type: Type[Any]
) -> Any:
"""Resolves an input artifact, i.e., reading it from the Artifact Store
to a pythonic object.
Args:
artifact: A TFX artifact type.
data_type: The type of data to be materialized.
Returns:
Return the output of `handle_input()` of selected materializer.
"""
# Skip materialization for BaseArtifact and its subtypes.
if issubclass(data_type, BaseArtifact):
if data_type != type(artifact):
logger.warning(
f"You specified the data_type `{data_type}` but the actual "
f"artifact type from the previous step is "
f"`{type(artifact)}`. Ignoring this for now, but please be "
f"aware of this in your step code."
)
return artifact
materializer = source_utils.load_source_path_class(
artifact.materializer
)(artifact)
# The materializer now returns a resolved input
return materializer.handle_input(data_type=data_type)
def resolve_output_artifact(
self, param_name: str, artifact: BaseArtifact, data: Any
) -> None:
"""Resolves an output artifact, i.e., writing it to the Artifact Store.
Calls `handle_return(return_values)` of the selected materializer.
Args:
param_name: Name of output param.
artifact: A TFX artifact type.
data: The object to be passed to `handle_return()`.
"""
# Skip materialization for BaseArtifact and subclasses.
if issubclass(type(data), BaseArtifact):
return
materializer_class = self.resolve_materializer_with_registry(
param_name, artifact
)
artifact.materializer = source_utils.resolve_class(materializer_class)
artifact.datatype = source_utils.resolve_class(type(data))
materializer_class(artifact).handle_return(data)
def check_output_types_match(
self, output_value: Any, specified_type: Type[Any]
) -> None:
"""Raise error if types don't match.
Args:
output_value: Value of output.
specified_type: What the type of output should be as defined in the
signature.
Raises:
ValueError if types do not match.
"""
# TODO [ENG-160]: Include this check when we figure out the logic of
# slightly different subclasses.
if not do_types_match(type(output_value), specified_type):
raise ValueError(
f"Output `{output_value}` of type {type(output_value)} does "
f"not match specified return type {specified_type} in step "
f"{getattr(self, PARAM_STEP_NAME)}"
)
def Do(
self,
input_dict: Dict[str, List[BaseArtifact]],
output_dict: Dict[str, List[BaseArtifact]],
exec_properties: Dict[str, Any],
) -> None:
"""Main block for the execution of the step
Args:
input_dict: dictionary containing the input artifacts
output_dict: dictionary containing the output artifacts
exec_properties: dictionary containing the execution parameters
"""
step_name = getattr(self, PARAM_STEP_NAME)
# remove all ZenML internal execution properties
exec_properties = {
k: json.loads(v)
for k, v in exec_properties.items()
if not k.startswith(INTERNAL_EXECUTION_PARAMETER_PREFIX)
}
# Building the args for the entrypoint function
function_params = {}
# First, we parse the inputs, i.e., params and input artifacts.
spec | |
-> bool:...
@staticmethod
def UndoAutoNumbering() -> bool:...
@staticmethod
def Unstack() -> bool:...
class ThemeChangedEventHandler(_n_8_t_6, _n_8_t_7, _n_18_t_0):
def __init__(self, A_0: object, A_1: _n_8_t_0) -> ThemeChangedEventHandler:...
def BeginInvoke(self, callback: _n_8_t_9, obj: object) -> _n_8_t_8:...
def EndInvoke(self, result: _n_8_t_8):...
def Invoke(self):...
class TrueColorPicker(ComboBoxWrapper, _n_13_t_0, _n_21_t_0IOleControl, _n_21_t_0IOleObject, _n_21_t_0IOleInPlaceObject, _n_21_t_0IOleInPlaceActiveObject, _n_21_t_0IOleWindow, _n_21_t_0IViewObject, _n_21_t_0IViewObject2, _n_21_t_0IPersist, _n_21_t_0IPersistStreamInit, _n_21_t_0IPersistPropertyBag, _n_21_t_0IPersistStorage, _n_21_t_0IQuickActivate, _n_21_t_1, _n_21_t_2, _n_13_t_1, _n_21_t_3, _n_22_t_0, _n_21_t_4):
@property
def CurrentSelectionIndex(self) -> int:"""CurrentSelectionIndex { get; set; } -> int"""
def __init__(self) -> TrueColorPicker:...
def AddOtherItemToList(self, name: str, cargo: int) -> int:...
def GetCurrentItemColor(self) -> _n_1_t_0:...
def SetCurrentByColor(self, mgColor: _n_1_t_0):...
class UiCallback(_n_8_t_6, _n_8_t_7, _n_18_t_0):
def __init__(self, A_0: object, A_1: _n_8_t_0) -> UiCallback:...
def BeginInvoke(self, callback: _n_8_t_9, obj: object) -> _n_8_t_8:...
def EndInvoke(self, result: _n_8_t_8):...
def Invoke(self):...
class UndoBoundaryEnum(_n_8_t_2, _n_8_t_3, _n_8_t_4, _n_8_t_5):
CommandBoundaryEnd: int
CommandBoundaryStart: int
SubCommandBoundary: int
SubCommandWall: int
value__: int
class UnmanagedResource(object):
@property
def AcadResourceHandle(self) -> _n_8_t_0:"""AcadResourceHandle { get; } -> IntPtr"""
def __init__(self) -> UnmanagedResource:...
@staticmethod
def GetResourceHandle(moduleName: str) -> _n_8_t_0:...
@staticmethod
def LoadString(resourceHandle: _n_8_t_0, resourceId: int) -> str:...
class UpdateTitleBarEventHandler(_n_8_t_6, _n_8_t_7, _n_18_t_0):
def __init__(self, A_0: object, A_1: _n_8_t_0) -> UpdateTitleBarEventHandler:...
def BeginInvoke(self, callback: _n_8_t_9, obj: object) -> _n_8_t_8:...
def EndInvoke(self, result: _n_8_t_8):...
def Invoke(self):...
class UpdateUIEventHandler(_n_8_t_6, _n_8_t_7, _n_18_t_0):
def __init__(self, A_0: object, A_1: _n_8_t_0) -> UpdateUIEventHandler:...
def BeginInvoke(self, sender: object, e: IPEUpdateUIEventArgs, callback: _n_8_t_9, obj: object) -> _n_8_t_8:...
def EndInvoke(self, result: _n_8_t_8):...
def Invoke(self, sender: object, e: IPEUpdateUIEventArgs):...
class Utils(object):
@property
def ApplicationStatusBarMenu(self) -> _n_20_t_0:"""ApplicationStatusBarMenu { get; } -> MenuItem"""
@property
def ApplicationToolbarsMenu(self) -> _n_20_t_0:"""ApplicationToolbarsMenu { get; } -> MenuItem"""
@property
def CaptureOnLayoutSwitch(self) -> bool:"""CaptureOnLayoutSwitch { set; } -> bool"""
@property
def ImpliedSelectionIsActive(self) -> bool:"""ImpliedSelectionIsActive { get; } -> bool"""
@property
def IsCuiCommandEnabled(self) -> bool:"""IsCuiCommandEnabled { get; } -> bool"""
@property
def IsEditorReady(self) -> bool:"""IsEditorReady { get; } -> bool"""
@property
def IsOsThemed(self) -> bool:"""IsOsThemed { get; } -> bool"""
@property
def IsScriptActive(self) -> bool:"""IsScriptActive { get; } -> bool"""
@property
def SysVarInProgress(self) -> str:"""SysVarInProgress { get; } -> str"""
@property
def SavingStartupTools(self) -> _n_8_t_15[StartupToolsEventArgs]:
"""SavingStartupTools Event: EventHandler"""
def __init__(self) -> Utils:...
@staticmethod
def ActivateDocument(hWndDoc: _n_8_t_0):...
@staticmethod
def ActivateLayout(doc: _n_0_t_0, index: int):...
@staticmethod
def AddCommand(cmdGroupName: str, cmdGlobalName: str, cmdLocalName: str, cmdFlags: _n_6_t_2, func: CommandCallback):...
@staticmethod
def AngleToString(dValue: float) -> str:...
@staticmethod
def ApplyAcValueFormat(objValue: object, formatString: str) -> str:...
@staticmethod
def AreFilesSame(file1: str, file2: str) -> bool:...
@staticmethod
def CallButtonEditor(resId: str, packageName: str) -> str:...
@staticmethod
def CallButtonEditorWithBitmap(bitmap: _n_14_t_1, resId: str, packageName: str) -> str:...
@staticmethod
def CancelAndRunCmds(pStrCmd: str) -> bool:...
@staticmethod
def CloseCommandLine():...
@staticmethod
def ConvertBitmapToAcGiImageBGRA32(bmp: _n_14_t_1) -> _n_5_t_1:...
@staticmethod
def ConvertBitmapToAcGiImageBGRA32Ex(bmp: _n_14_t_1) -> _n_5_t_1:...
@staticmethod
def ConvertCMenuToMenuItem(pMenu: object, menuItem: _n_20_t_0):...
@staticmethod
def ConvertOSnapCMenuToMenuItem(pMenu: object, menuItem: _n_20_t_0, b3DOsnap: bool):...
@staticmethod
def CreateCommandToolTip(pTooltipInfo: _n_8_t_10) -> object:...
@staticmethod
def CreateNativeTrayItemInstance() -> _n_8_t_0:...
@staticmethod
def CUIEndTransferBitmaps():...
@staticmethod
def CUIIsUsingSmallIcon() -> bool:...
@staticmethod
def CUIRequestBitmap(resId: str):...
@staticmethod
def CUISaveMenuAndToolbarState():...
@staticmethod
def CUIStartTransferBitmaps():...
@staticmethod
def DeleteDimConstraints(db: _n_2_t_3, bKeepAnnotativeDimensions: bool) -> bool:...
@staticmethod
def DisableUndoRecording(db: _n_2_t_3, bDisable: bool) -> bool:...
@staticmethod
def DisplaySettingsDialog():...
@staticmethod
def Do_Cmd(menuString: str, IsSSetNeedRestore: bool, bMenuMacro: bool, IsHiddenCmd: bool, bSynchronous: bool):...
@staticmethod
def DoHelpForCommand(cmdName: str):...
@staticmethod
def DragDropLayoutTab(doc: _n_0_t_0, index: int, isMoveOrCopy: bool):...
@staticmethod
def DrawLineTypePattern(ltypeID: _n_2_t_0, left: int, top: int, right: int, bottom: int) -> _n_8_t_0:...
@staticmethod
def DrawLineWeightLine(lineWeight: _n_2_t_2, left: int, top: int, right: int, bottom: int) -> _n_8_t_0:...
@staticmethod
def DrawLineWeightSquare(lineWeight: _n_2_t_2, left: int, top: int, right: int, bottom: int) -> _n_8_t_0:...
@staticmethod
def DropOpenFile(file: str):...
@staticmethod
def EnableDockControlBars(bEnable: bool):...
@staticmethod
def EnableFloatingWindows(bEnable: bool):...
@staticmethod
def EnableSysButtons(bEnable: bool):...
@staticmethod
def EntFirst() -> _n_2_t_0:...
@staticmethod
def EntLast() -> _n_2_t_0:...
@staticmethod
def EntNext(entId: _n_2_t_0, skipSubEnt: bool) -> _n_2_t_0:...
@staticmethod
def EntNext(entId: _n_2_t_0) -> _n_2_t_0:...
@staticmethod
def EvaluateTopLevelNetwork(db: _n_2_t_3, bRelaxEvaluate: bool) -> bool:...
@staticmethod
def ExecuteApplicationStatusBarMenu(nId: int):...
@staticmethod
def FlushGraphics():...
@staticmethod
def ForceRunCommandAsTransparent(bEnabled: bool):...
@staticmethod
def GetAcadFrameHandle() -> _n_8_t_0:...
@staticmethod
def GetAcadResourceIcon(sResId: str) -> _n_14_t_2:...
@staticmethod
def GetActualIndex(index: int) -> int:...
@staticmethod
def GetApplicationFrameHWnd() -> _n_8_t_0:...
@staticmethod
def GetBlockImage(objectId: _n_2_t_0, nImgWidth: int, nImgHeight: int, backgroundColor: _n_1_t_0) -> _n_8_t_0:...
@staticmethod
def GetCommandAtLevelForDocument(level: int) -> str:...
@staticmethod
def GetCommandPromptString() -> str:...
@staticmethod
def GetCommandVersion() -> int:...
@staticmethod
def GetCurrentEditBlock() -> _n_2_t_0:...
@staticmethod
def GetCurrentFindingContent() -> str:...
@staticmethod
def GetCurrentObjectColor() -> _n_1_t_0:...
@staticmethod
def GetCurrentViewportVisualStyleId() -> _n_2_t_0:...
@staticmethod
def GetCustomSwatchImage(name: str, nWidth: int, nHeight: int) -> _n_8_t_0:...
@staticmethod
def GetDieselEvalString(text: str, bGrayed: bool, bChecked: bool) -> str:...
@staticmethod
def GetDieselEvalString(text: str) -> str:...
@staticmethod
def GetDimStyleImage(objectId: _n_2_t_0, nImgWidth: int, nImgHeight: int, backgroundColor: _n_1_t_0) -> _n_8_t_0:...
@staticmethod
def GetDockClientRect(excludeLayoutBar: bool) -> _n_19_t_1:...
@staticmethod
def GetDockClientRect() -> _n_19_t_1:...
@staticmethod
def GetDwgFrameIcon(doc: _n_0_t_0) -> _n_14_t_2:...
@staticmethod
def GetFontImage(fontID: _n_2_t_0) -> _n_8_t_0:...
@staticmethod
def GetGradientDisplayText(nHatchGradientNameEnum: int) -> str:...
@staticmethod
def GetGradientDisplayText(gradientName: str) -> str:...
@staticmethod
def GetGradientName(displayText: str) -> str:...
@staticmethod
def GetGradientSwatchImage(displayText: str, angle: float, bShifted: bool, startColor: _n_1_t_0, stopColor: _n_1_t_0, nWidth: int, nHeight: int) -> _n_8_t_0:...
@staticmethod
def GetGradientValue(displayText: str) -> int:...
@staticmethod
def GetHideWarningDialogs(nType: _n_8_t_12) -> bool:...
@staticmethod
def GetLastCommandLines(lastLines: int, ignoreNull: bool) -> _n_10_t_0[str]:...
@staticmethod
def GetLastInsertBlockData(specifyScaleOnScreen: bool, specifyRotationOnScreen: bool):...
@staticmethod
def GetLayoutThumbnail(doc: _n_0_t_0, layoutName: str) -> _n_14_t_1:...
@staticmethod
def GetMLeaderStyleImage(objectId: _n_2_t_0, nImgWidth: int, nImgHeight: int, backgroundColor: _n_1_t_0) -> _n_8_t_0:...
@staticmethod
def GetMoreHideWarningDialogs(nType: _n_8_t_12) -> bool:...
@staticmethod
def GetOpmWindow() -> _n_8_t_0:...
@staticmethod
def GetPatSwatchImage(name: str, patternColor: _n_1_t_0, backgroundColor: _n_1_t_0, nWidth: int, nHeight: int) -> _n_8_t_0:...
@staticmethod
def GetPredefinedVisualStyleGlobalName(localName: str) -> str:...
@staticmethod
def GetProductbrandingName() -> str:...
@staticmethod
def GetQpWindow() -> _n_8_t_0:...
@staticmethod
def GetRealHatchPreviewImage(nWidth: int, nHeight: int) -> _n_8_t_0:...
@staticmethod
def GetRedoHistory() -> _n_11_t_1[str]:...
@staticmethod
def GetRefEditName() -> str:...
@staticmethod
def GetStatusBarOsnapMenu(b3DOsnap: bool, isDarkTheme: bool) -> _n_20_t_0:...
@staticmethod
def GetTableCellStyleImage(cellStyleName: str, nImgWidth: int, nImgHeight: int, backgroundColor: _n_1_t_0) -> _n_8_t_0:...
@staticmethod
def GetTableStyleImage(objectId: _n_2_t_0, nImgWidth: int, nImgHeight: int, backgroundColor: _n_1_t_0) -> _n_8_t_0:...
@staticmethod
def GetTextExtents(styleId: _n_2_t_0, text: str, dHeight: float) -> _n_4_t_0:...
@staticmethod
def GetToolPaletteGroups(schemeName: str, pGroups: _n_12_t_0) -> bool:...
@staticmethod
def GetUndoHistory() -> _n_11_t_1[str]:...
@staticmethod
def GetUnitsConversion(fromUnits: _n_2_t_20, toUnits: _n_2_t_20) -> float:...
@staticmethod
def GetUnknownPatternSwatchImage(nWidth: int, nHeight: int) -> _n_8_t_0:...
@staticmethod
def GetUserDefinedSwatchImage(bDouble: bool, patternColor: _n_1_t_0, backgroundColor: _n_1_t_0, nWidth: int, nHeight: int) -> _n_8_t_0:...
@staticmethod
def GetVisualStyleEdgeColor(visualStyleId: _n_8_t_10) -> _n_1_t_0:...
@staticmethod
def GetVisualStyleImage(objectId: _n_2_t_0) -> _n_8_t_0:...
@staticmethod
def GetVisualStyleIntersectionEdgeColor(visualStyleId: _n_8_t_10) -> _n_1_t_0:...
@staticmethod
def GetVisualStyleObscuredEdgeColor(visualStyleId: _n_8_t_10) -> _n_1_t_0:...
@staticmethod
def GetVisualStyles(objectIds: _n_2_t_1, imageList: _n_10_t_0[_n_24_t_1]):...
@staticmethod
def GetWSUID(sWSName: str) -> str:...
@staticmethod
def HistoryStatus() -> bool:...
@staticmethod
def IconFilePath() -> str:...
@staticmethod
def InitDialog(bUseDialog: bool) -> bool:...
@staticmethod
def InitializeCommandLineFont():...
@staticmethod
def InvokeDataLinkManagerDialog(database: _n_2_t_3, dialogMode: int, parentForm: _n_21_t_10, objectId: _n_2_t_0) -> _n_21_t_9:...
@staticmethod
def InvokeDataTypeDialog(inDataType: _n_2_t_16, inUnitType: _n_2_t_17, sFormatIn: str, objValue: object, dialogOptions: int, parentForm: _n_21_t_10, sTitle: str, sHelp: str, outDataType: _n_2_t_16, outUnitType: _n_2_t_17, sFormatOut: str) -> _n_21_t_9:...
@staticmethod
def InvokeOpmSetPropertyValue(prop: object, val: object, objIds: _n_2_t_1, guid: _n_8_t_20, mode: OPMModeFlags) -> OPMStatus:...
@staticmethod
def InvokeOptionsDialog(strPos: str, bInvokedAsChildDlg: bool, defaultLabelIdInFileTab: _n_8_t_12, bExpanded: bool):...
@staticmethod
def InvokeStatusBarItemDeleted(ptr: _n_8_t_0):...
@staticmethod
def InvokeStatusBarItemMouseDown(ptr: _n_8_t_0, type: int, flag: int, point: _n_19_t_2):...
@staticmethod
def InvokeTableStyleDialog():...
@staticmethod
def InvokeTrayItemCloseBubbleWindow(trayItemPtr: _n_8_t_0):...
@staticmethod
def InvokeTrayItemShowBubbleWindow(title: str, description: str, iconType: Utils.BubbleWindowIconType, hyperlink: str, hypertext: str, text: str, trayItemPtr: _n_8_t_0):...
@staticmethod
def Is3dVisualStyle(styleName: str) -> bool:...
@staticmethod
def IsAssociativeArrayRibbonContextApplicable(type: str, dataItem: object) -> bool:...
@staticmethod
def IsCommandActive(name: str) -> bool:...
@staticmethod
def IsCommandDefined(cmdName: str) -> bool:...
@staticmethod
def IsCommandNameInUse(name: str) -> CommandTypeFlags:...
@staticmethod
def IsCommandReEntered(name: str) -> bool:...
@staticmethod
def IsCoreCommand(name: str) -> bool:...
@staticmethod
def IsCustSyncEnabled() -> bool:...
@staticmethod
def IsDiesel(text: str) -> bool:...
@staticmethod
def IsDocumentInBlockEditor(doc: _n_0_t_0) -> bool:...
@staticmethod
def IsDroppableExtension(extension: str) -> bool:...
@staticmethod
def IsFlagOn(id: _n_8_t_12, bDefault: bool) -> bool:...
@staticmethod
def IsInBlockEditor() -> bool:...
@staticmethod
def IsInCommandStack(flags: _n_6_t_2) -> bool:...
@staticmethod
def IsInCustomizeMode() -> bool:...
@staticmethod
def IsInPaperSpace() -> bool:...
@staticmethod
def IsInputPending() -> bool:...
@staticmethod
def IsInQuiescentState() -> bool:...
@staticmethod
def IsInStartup() -> bool:...
@staticmethod
def IsInTilemode() -> bool:...
@staticmethod
def IsLinkedObjectExist(doc: _n_0_t_0) -> bool:...
@staticmethod
def IsLispCommandDefined(name: str) -> bool:...
@staticmethod
def IsMultiRedoAvaliable() -> bool:...
@staticmethod
def IsNewTabCommandAllowed() -> bool:...
@staticmethod
def IsNonDrawingDocumentDisabled() -> bool:...
@staticmethod
def IsOEM() -> bool:...
@staticmethod
def IsOverrideActive() -> bool:...
@staticmethod
def IsPasteClipCommandAllowed() -> bool:...
@staticmethod
def IsPointOverToolbar(x: int, y: int) -> bool:...
@staticmethod
def IsT2P() -> bool:...
@staticmethod
def IsTextEditorActive() -> bool:...
@staticmethod
def IsUndoAvailable() -> bool:...
@staticmethod
| |
if fully anchored
if not self.verifyAnchor(serder=serder, seqner=seqner, diger=diger):
self.escrowALEvent(serder=serder)
raise MissingAnchorError("Failure verify event = {} "
"".format(serder.ked))
self.logEvent(pre=vci, sn=sn, serder=serder, seqner=seqner, diger=diger)
elif ilk is Ilks.brv: # backer revoke
if self.noBackers is True:
raise ValidationError("invalid backer issue evt {} against backerless registry {}".
format(ked, self.regk))
rtoad, baks = self.getBackerState(ked)
bigers = self.valAnchorBigs(serder=serder,
seqner=seqner,
diger=diger,
bigers=bigers,
toad=rtoad,
baks=baks)
self.logEvent(pre=vci, sn=sn, serder=serder, seqner=seqner, diger=diger, bigers=bigers)
else:
raise ValidationError("Unsupported ilk = {} for evt = {}.".format(ilk, ked))
def vcState(self, vcpre):
"""
Calculate state (issued/revoked) of VC from db.
Returns None if never issued from this Registry
Parameters:
vcpre: the VC identifier
"""
vci =nsKey([self.prefixer.qb64, vcpre])
cnt = self.reger.cntTels(vci)
if cnt == 1:
return VcStates.issued
elif cnt == 2:
return VcStates.revoked
return None
def vcSn(self, vcpre):
"""
Calculates the current seq no of VC from db.
Returns None if never issued from this Registry
Parameters:
vcpre: the VC identifier
"""
vci = nsKey([self.prefixer.qb64, vcpre])
cnt = self.reger.cntTels(vci)
return None if cnt == 0 else cnt - 1
def logEvent(self, pre, sn, serder, seqner, diger, bigers=None, baks=None):
"""
Update associated logs for verified event.
Update is idempotent. Logs will not write dup at key if already exists.
Parameters:
pre (qb64): is event prefix
sn (int): is event sequence number
serder (Serder): is Serder instance of current event
seqner (Seqner): issuing event sequence number from controlling KEL.
diger (Diger): issuing event digest from controlling KEL.
bigers (Siger): is optional list of Siger instance of indexed backer sigs
seqner (Seqner): is optional Seqner instance of cloned first seen ordinal
If cloned mode then seqner maybe provided (not None)
When seqner provided then compare fn of dater and database and
first seen if not match then log and add cue notify problem
baks (qb64): is optional Dater instance of cloned replay datetime
If cloned mode then dater maybe provided (not None)
When dater provided then use dater for first seen datetime
"""
dig = serder.diger.qb64b
key = dgKey(pre, dig)
sealet = seqner.qb64b + diger.qb64b
self.reger.putAnc(key, sealet)
if bigers:
self.reger.putTibs(key, [biger.qb64b for biger in bigers])
if baks:
self.reger.delBaks(key)
self.reger.putBaks(key, [bak.encode("utf-8") for bak in baks])
self.reger.putTvt(key, serder.raw)
self.reger.putTel(snKey(pre, sn), dig)
logger.info("Tever state: %s Added to KEL valid event=\n%s\n",
pre, json.dumps(serder.ked, indent=1))
def valAnchorBigs(self, serder, seqner, diger, bigers, toad, baks):
"""
Returns double (bigers) where:
bigers is unique validated signature verified members of inputed bigers
Validates sigers signatures by validating indexes, verifying signatures, and
validating threshold sith.
Validate backer receipts by validating indexes, verifying
backer signatures and validating toad.
Backer validation is a function of .regk and .local
Parameters:
serder is Serder instance of event
seqner (Seqner): issuing event sequence number from controlling KEL.
diger (Diger): issuing event digest from controlling KEL.
bigers is list of Siger instances of indexed witness signatures.
Index is offset into wits list of associated witness nontrans pre
from which public key may be derived.
toad is int or str hex of witness threshold
baks is list of qb64 non-transferable prefixes of backers used to
derive werfers for bigers
"""
berfers = [Verfer(qb64=bak) for bak in baks]
# get unique verified bigers and bindices lists from bigers list
bigers, bindices = verifySigs(serder=serder, sigers=bigers, verfers=berfers)
# each biger now has werfer of corresponding wit
# check if fully anchored
if not self.verifyAnchor(serder=serder, seqner=seqner, diger=diger):
self.escrowALEvent(serder=serder, bigers=bigers)
raise MissingAnchorError("Failure verify event = {} "
"".format(serder.ked))
# Kevery .process event logic prevents this from seeing event when
# not local and event pre is own pre
if ((baks and not self.regk) or # in promiscuous mode so assume must verify toad
(baks and not self.local and self.regk and self.regk not in baks)):
# validate that event is fully witnessed
if isinstance(toad, str):
toad = int(toad, 16)
if toad < 0 or len(baks) < toad:
raise ValidationError("Invalid toad = {} for wits = {} for evt"
" = {}.".format(toad, baks, serder.ked))
if len(bindices) < toad: # not fully witnessed yet
self.escrowPWEvent(serder=serder, seqner=seqner, diger=diger, bigers=bigers)
raise MissingWitnessSignatureError("Failure satisfying toad = {} "
"on witness sigs for {} for evt = {}.".format(toad,
[siger.qb64 for siger in bigers],
serder.ked))
return bigers
def verifyAnchor(self, serder, seqner, diger):
"""
retrieve event from db using anchor
get seal from event eserder
verify pre, sn and dig against serder
"""
dig = self.db.getFe(key=fnKey(pre=self.pre, sn=seqner.sn))
if not dig:
return False
else:
dig = bytes(dig)
# retrieve event by dig
raw = self.db.getEvt(key=dgKey(pre=self.pre, dig=dig))
if not raw:
return False
else:
raw = bytes(raw)
eserder = Serder(raw=raw) # deserialize event raw
if eserder.dig != diger.qb64:
return False
seal = eserder.ked["a"]
if seal is None or len(seal) != 1:
return False
seal = seal[0]
spre = seal["i"]
ssn = seal["s"]
sdig = seal["d"]
if spre == serder.ked["i"] and ssn == serder.ked["s"] \
and serder.dig == sdig:
return True
return False
def escrowPWEvent(self, serder, seqner, diger, bigers=None):
"""
Update associated logs for escrow of partially witnessed event
Parameters:
serder is Serder instance of event
bigers is list of Siger instance of indexed witness sigs
"""
dgkey = dgKey(serder.preb, serder.digb)
sealet = seqner.qb64b + diger.qb64b
self.reger.putAnc(dgkey, sealet)
self.reger.putTibs(dgkey, [biger.qb64b for biger in bigers])
self.reger.putTvt(dgkey, serder.raw)
self.reger.putTwe(snKey(serder.preb, serder.sn), serder.digb)
logger.info("Tever state: Escrowed partially witnessed "
"event = %s\n", serder.ked)
def escrowALEvent(self, serder, bigers=None):
"""
Update associated logs for escrow of anchorless event
Parameters:
serder is Serder instance of event
"""
key = dgKey(serder.preb, serder.digb)
if bigers:
self.reger.putTibs(key, [biger.qb64b for biger in bigers])
self.reger.putTvt(key, serder.raw)
self.reger.putTae(snKey(serder.preb, serder.sn), serder.digb)
logger.info("Tever state: Escrowed anchorless event "
"event = %s\n", serder.ked)
def getBackerState(self, ked):
rega = ked["ra"]
regi = rega["i"]
regs = rega["s"]
regd = rega["d"]
if regi != self.prefixer.qb64:
raise ValidationError("Mismatch event regk prefix = {} expecting"
" = {} for evt = {}.".format(self.regk,
self.prefixer.qb64,
ked))
# load backer list and toad (via event) for specific event in registry from seal in event
dgkey = dgKey(regi, regd)
revt = self.reger.getTvt(dgkey)
if revt is None:
raise ValidationError("have to escrow this somewhere")
rserder = Serder(raw=bytes(revt))
# the backer threshold at this event in mgmt TEL
rtoad = rserder.ked["bt"]
baks = [bytes(bak) for bak in self.reger.getBaks(dgkey)]
return rtoad, baks
class Tevery:
"""
Tevery (Transaction Event Message Processing Facility)
Currently placeholder
"""
def __init__(self, tevers=None, reger=None, db=None, regk=None, local=False):
"""
Initialize instance:
Parameters:
kevers is dict of Kever instances of key state in db
db is Baser instance
opre is local or own identifier prefix. Some restriction if present
local is Boolean, True means only process msgs for own events if .pre
False means only process msgs for not own events if .pre
"""
self.tevers = tevers if tevers is not None else dict()
self.cues = deque()
if db is None:
db = Baser() # default name = "main"
self.db = db
if reger is None:
reger = Registry()
self.reger = reger
self.regk = regk # local prefix for restrictions on local events
self.local = True if local else False # local vs nonlocal restrictions
def processEvent(self, serder, seqner, diger, wigers=None):
"""
Process one event serder with attached indexd signatures sigers
Parameters:
serder (Serder): event to process
seqner (Seqner): issuing event sequence number from controlling KEL.
diger (Diger): issuing event digest from controlling KEL.
wigers (Siger): is optional list of Siger instances of attached witness indexed sigs
"""
ked = serder.ked
try: # see if code of pre is supported and matches size of pre
Prefixer(qb64b=serder.preb)
except Exception as ex: # if unsupported code or bad size raises error
raise ValidationError("Invalid pre = {} for evt = {}."
"".format(serder.pre, ked))
regk = self.registryKey(serder)
pre = serder.pre
ked = serder.ked
sn = ked["s"]
ilk = ked["t"]
inceptive = ilk in (Ilks.vcp, Ilks.iss, Ilks.bis)
# validate SN for
sn = validateSN(sn, inceptive=inceptive)
if self.regk:
if self.local:
if self.regk != regk: # nonlocal event when in local mode
raise ValueError("Nonlocal event regk={} when local mode for regk={}."
"".format(regk, self.regk))
else:
if self.regk == regk: # | |
<filename>suitesparse_graphblas/create_headers.py
"""
Script to generate suitesparse_graphblas.h, suitesparse_graphblas_no_complex.h, and source.c files.
- Copy the SuiteSparse header file GraphBLAS.h to the local directory.
- Run the C preprocessor (cleans it up, but also loses #define values).
- Parse the processed header file using pycparser.
- Create the final files with and without complex types.
- Check #define values for sanity.
The generated files are then used by cffi to bind to SuiteSparse:GraphBLAS.
When running against new versions of SuiteSparse:GraphBLAS, the most likely
things that may need to change are:
- Update DEFINES, the integer #define constants defined by SuiteSparse.
- Update CHAR_DEFINES, the char* #defines.
- Update IGNORE_DEFINES, #defines that the script may mistakingly identity,
but that we can safely ignore.
- Update DEPRECATED: deprecated names (including enum fields) to exclude.
Run `python create_headers.py --help` to see more help.
"""
import argparse
import os
import re
import shutil
import subprocess
import sys
import pycparser
from pycparser import c_ast, c_generator, parse_file
def sort_key(string):
"""e.g., sort 'INT8' before 'INT16'"""
return string.replace("8", "08")
def has_complex(string):
return "FC32" in string or "FC64" in string
def groupby(index, seq):
rv = {}
for item in seq:
key = item[index]
if key in rv:
rv[key].append(item)
else:
rv[key] = [item]
return rv
AUTO = "/* This file is automatically generated */"
DEPRECATED = {
# enums
"GxB_IS_HYPER",
"GrB_SCMP",
# functions
"GxB_kron",
"GxB_Matrix_resize",
"GxB_Vector_resize",
# UnaryOp
"GxB_ABS_BOOL",
"GxB_ABS_INT8",
"GxB_ABS_INT16",
"GxB_ABS_INT32",
"GxB_ABS_INT64",
"GxB_ABS_UINT8",
"GxB_ABS_UINT16",
"GxB_ABS_UINT32",
"GxB_ABS_UINT64",
"GxB_ABS_FP32",
"GxB_ABS_FP64",
# Monoids
"GxB_MIN_INT8_MONOID",
"GxB_MIN_INT16_MONOID",
"GxB_MIN_INT32_MONOID",
"GxB_MIN_INT64_MONOID",
"GxB_MIN_UINT8_MONOID",
"GxB_MIN_UINT16_MONOID",
"GxB_MIN_UINT32_MONOID",
"GxB_MIN_UINT64_MONOID",
"GxB_MIN_FP32_MONOID",
"GxB_MIN_FP64_MONOID",
"GxB_MAX_INT8_MONOID",
"GxB_MAX_INT16_MONOID",
"GxB_MAX_INT32_MONOID",
"GxB_MAX_INT64_MONOID",
"GxB_MAX_UINT8_MONOID",
"GxB_MAX_UINT16_MONOID",
"GxB_MAX_UINT32_MONOID",
"GxB_MAX_UINT64_MONOID",
"GxB_MAX_FP32_MONOID",
"GxB_MAX_FP64_MONOID",
"GxB_PLUS_INT8_MONOID",
"GxB_PLUS_INT16_MONOID",
"GxB_PLUS_INT32_MONOID",
"GxB_PLUS_INT64_MONOID",
"GxB_PLUS_UINT8_MONOID",
"GxB_PLUS_UINT16_MONOID",
"GxB_PLUS_UINT32_MONOID",
"GxB_PLUS_UINT64_MONOID",
"GxB_PLUS_FP32_MONOID",
"GxB_PLUS_FP64_MONOID",
"GxB_TIMES_INT8_MONOID",
"GxB_TIMES_INT16_MONOID",
"GxB_TIMES_INT32_MONOID",
"GxB_TIMES_INT64_MONOID",
"GxB_TIMES_UINT8_MONOID",
"GxB_TIMES_UINT16_MONOID",
"GxB_TIMES_UINT32_MONOID",
"GxB_TIMES_UINT64_MONOID",
"GxB_TIMES_FP32_MONOID",
"GxB_TIMES_FP64_MONOID",
"GxB_LOR_BOOL_MONOID",
"GxB_LAND_BOOL_MONOID",
"GxB_LXOR_BOOL_MONOID",
"GxB_LXNOR_BOOL_MONOID",
# "GxB_EQ_BOOL_MONOID", # XXX: I prefer this name to GrB_LXNOR_MONOID_BOOL
# Semirings
"GxB_PLUS_TIMES_INT8",
"GxB_PLUS_TIMES_INT16",
"GxB_PLUS_TIMES_INT32",
"GxB_PLUS_TIMES_INT64",
"GxB_PLUS_TIMES_UINT8",
"GxB_PLUS_TIMES_UINT16",
"GxB_PLUS_TIMES_UINT32",
"GxB_PLUS_TIMES_UINT64",
"GxB_PLUS_TIMES_FP32",
"GxB_PLUS_TIMES_FP64",
"GxB_PLUS_MIN_INT8",
"GxB_PLUS_MIN_INT16",
"GxB_PLUS_MIN_INT32",
"GxB_PLUS_MIN_INT64",
"GxB_PLUS_MIN_UINT8",
"GxB_PLUS_MIN_UINT16",
"GxB_PLUS_MIN_UINT32",
"GxB_PLUS_MIN_UINT64",
"GxB_PLUS_MIN_FP32",
"GxB_PLUS_MIN_FP64",
"GxB_MIN_PLUS_INT8",
"GxB_MIN_PLUS_INT16",
"GxB_MIN_PLUS_INT32",
"GxB_MIN_PLUS_INT64",
"GxB_MIN_PLUS_UINT8",
"GxB_MIN_PLUS_UINT16",
"GxB_MIN_PLUS_UINT32",
"GxB_MIN_PLUS_UINT64",
"GxB_MIN_PLUS_FP32",
"GxB_MIN_PLUS_FP64",
"GxB_MIN_TIMES_INT8",
"GxB_MIN_TIMES_INT16",
"GxB_MIN_TIMES_INT32",
"GxB_MIN_TIMES_INT64",
"GxB_MIN_TIMES_UINT8",
"GxB_MIN_TIMES_UINT16",
"GxB_MIN_TIMES_UINT32",
"GxB_MIN_TIMES_UINT64",
"GxB_MIN_TIMES_FP32",
"GxB_MIN_TIMES_FP64",
"GxB_MIN_FIRST_INT8",
"GxB_MIN_FIRST_INT16",
"GxB_MIN_FIRST_INT32",
"GxB_MIN_FIRST_INT64",
"GxB_MIN_FIRST_UINT8",
"GxB_MIN_FIRST_UINT16",
"GxB_MIN_FIRST_UINT32",
"GxB_MIN_FIRST_UINT64",
"GxB_MIN_FIRST_FP32",
"GxB_MIN_FIRST_FP64",
"GxB_MIN_SECOND_INT8",
"GxB_MIN_SECOND_INT16",
"GxB_MIN_SECOND_INT32",
"GxB_MIN_SECOND_INT64",
"GxB_MIN_SECOND_UINT8",
"GxB_MIN_SECOND_UINT16",
"GxB_MIN_SECOND_UINT32",
"GxB_MIN_SECOND_UINT64",
"GxB_MIN_SECOND_FP32",
"GxB_MIN_SECOND_FP64",
"GxB_MIN_MAX_INT8",
"GxB_MIN_MAX_INT16",
"GxB_MIN_MAX_INT32",
"GxB_MIN_MAX_INT64",
"GxB_MIN_MAX_UINT8",
"GxB_MIN_MAX_UINT16",
"GxB_MIN_MAX_UINT32",
"GxB_MIN_MAX_UINT64",
"GxB_MIN_MAX_FP32",
"GxB_MIN_MAX_FP64",
"GxB_MAX_PLUS_INT8",
"GxB_MAX_PLUS_INT16",
"GxB_MAX_PLUS_INT32",
"GxB_MAX_PLUS_INT64",
"GxB_MAX_PLUS_UINT8",
"GxB_MAX_PLUS_UINT16",
"GxB_MAX_PLUS_UINT32",
"GxB_MAX_PLUS_UINT64",
"GxB_MAX_PLUS_FP32",
"GxB_MAX_PLUS_FP64",
"GxB_MAX_TIMES_INT8",
"GxB_MAX_TIMES_INT16",
"GxB_MAX_TIMES_INT32",
"GxB_MAX_TIMES_INT64",
"GxB_MAX_TIMES_UINT8",
"GxB_MAX_TIMES_UINT16",
"GxB_MAX_TIMES_UINT32",
"GxB_MAX_TIMES_UINT64",
"GxB_MAX_TIMES_FP32",
"GxB_MAX_TIMES_FP64",
"GxB_MAX_FIRST_INT8",
"GxB_MAX_FIRST_INT16",
"GxB_MAX_FIRST_INT32",
"GxB_MAX_FIRST_INT64",
"GxB_MAX_FIRST_UINT8",
"GxB_MAX_FIRST_UINT16",
"GxB_MAX_FIRST_UINT32",
"GxB_MAX_FIRST_UINT64",
"GxB_MAX_FIRST_FP32",
"GxB_MAX_FIRST_FP64",
"GxB_MAX_SECOND_INT8",
"GxB_MAX_SECOND_INT16",
"GxB_MAX_SECOND_INT32",
"GxB_MAX_SECOND_INT64",
"GxB_MAX_SECOND_UINT8",
"GxB_MAX_SECOND_UINT16",
"GxB_MAX_SECOND_UINT32",
"GxB_MAX_SECOND_UINT64",
"GxB_MAX_SECOND_FP32",
"GxB_MAX_SECOND_FP64",
"GxB_MAX_MIN_INT8",
"GxB_MAX_MIN_INT16",
"GxB_MAX_MIN_INT32",
"GxB_MAX_MIN_INT64",
"GxB_MAX_MIN_UINT8",
"GxB_MAX_MIN_UINT16",
"GxB_MAX_MIN_UINT32",
"GxB_MAX_MIN_UINT64",
"GxB_MAX_MIN_FP32",
"GxB_MAX_MIN_FP64",
"GxB_LOR_LAND_BOOL",
"GxB_LAND_LOR_BOOL",
"GxB_LXOR_LAND_BOOL",
# "GxB_EQ_LOR_BOOL", # XXX: I prefer this name to GrB_LXNOR_LOR_SEMIRING_BOOL
# Old deprecated (probably already removed)
"GrB_eWiseMult_Vector_Semiring",
"GrB_eWiseMult_Vector_Monoid",
"GrB_eWiseMult_Vector_BinaryOp",
"GrB_eWiseMult_Matrix_Semiring",
"GrB_eWiseMult_Matrix_Monoid",
"GrB_eWiseMult_Matrix_BinaryOp",
"GrB_eWiseAdd_Vector_Semiring",
"GrB_eWiseAdd_Vector_Monoid",
"GrB_eWiseAdd_Vector_BinaryOp",
"GrB_eWiseAdd_Matrix_Semiring",
"GrB_eWiseAdd_Matrix_Monoid",
"GrB_eWiseAdd_Matrix_BinaryOp",
}
DEFINES = {
"GxB_STDC_VERSION",
"GxB_IMPLEMENTATION_MAJOR",
"GxB_IMPLEMENTATION_MINOR",
"GxB_IMPLEMENTATION_SUB",
"GxB_SPEC_MAJOR",
"GxB_SPEC_MINOR",
"GxB_SPEC_SUB",
"GxB_IMPLEMENTATION",
"GxB_SPEC_VERSION",
"GxB_INDEX_MAX",
"GRB_VERSION",
"GRB_SUBVERSION",
"GxB_NTHREADS",
"GxB_CHUNK",
"GxB_GPU_CONTROL",
"GxB_GPU_CHUNK",
"GxB_HYPERSPARSE",
"GxB_SPARSE",
"GxB_BITMAP",
"GxB_FULL",
"GxB_NBITMAP_SWITCH",
"GxB_ANY_SPARSITY",
"GxB_AUTO_SPARSITY",
"GxB_RANGE",
"GxB_STRIDE",
"GxB_BACKWARDS",
"GxB_BEGIN",
"GxB_END",
"GxB_INC",
}
CHAR_DEFINES = {
"GxB_IMPLEMENTATION_NAME",
"GxB_IMPLEMENTATION_DATE",
"GxB_SPEC_DATE",
"GxB_IMPLEMENTATION_ABOUT",
"GxB_IMPLEMENTATION_LICENSE",
"GxB_SPEC_ABOUT",
}
IGNORE_DEFINES = {
"GrB",
"GxB",
"CMPLX",
"CMPLXF",
"GB_PUBLIC",
"GRAPHBLAS_H",
"GrB_INVALID_HANDLE",
"GrB_NULL",
"GxB_SUITESPARSE_GRAPHBLAS",
"NMACRO",
# deprecated
"GxB_HYPER",
}
IGNORE_LINES = {
"GxB_cuda_calloc",
"GxB_cuda_malloc",
"GxB_cuda_free",
}
class VisitEnumTypedef(c_generator.CGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.results = []
def visit_Typedef(self, node):
rv = super().visit_Typedef(node)
if isinstance(node.type.type, c_ast.Enum):
self.results.append(rv + ";")
return rv
def get_ast(filename):
fake_include = os.path.dirname(pycparser.__file__) + "utils/fake_libc_include"
ast = parse_file(filename, cpp_args=f"-I{fake_include}")
return ast
def get_groups(ast):
generator = c_generator.CGenerator()
lines = generator.visit(ast).splitlines()
seen = set()
groups = {}
vals = {x for x in lines if "extern GrB_Info GxB" in x} - seen
seen.update(vals)
groups["GxB methods"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern GrB_Info GrB" in x} - seen
seen.update(vals)
groups["GrB methods"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern GrB_Info GB" in x} - seen
seen.update(vals)
groups["GB methods"] = sorted(vals, key=sort_key)
missing_methods = {x for x in lines if "extern GrB_Info " in x} - seen
assert not missing_methods
vals = {x for x in lines if "extern GrB" in x} - seen
seen.update(vals)
groups["GrB objects"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern GxB" in x} - seen
seen.update(vals)
groups["GxB objects"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern const" in x and "GxB" in x} - seen
seen.update(vals)
groups["GxB const"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "extern const" in x and "GrB" in x} - seen
seen.update(vals)
groups["GrB const"] = sorted(vals, key=sort_key)
missing_const = {x for x in lines if "extern const" in x} - seen
assert not missing_const
vals = {x for x in lines if "typedef" in x and "GxB" in x and "(" not in x} - seen
seen.update(vals)
groups["GxB typedef"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "typedef" in x and "GrB" in x and "(" not in x} - seen
seen.update(vals)
groups["GrB typedef"] = sorted(vals, key=sort_key)
missing_typedefs = {x for x in lines if "typedef" in x and "GB" in x and "(" not in x} - seen
assert not missing_typedefs
assert all(x.endswith(";") for x in seen) # sanity check
g = VisitEnumTypedef()
_ = g.visit(ast)
enums = g.results
vals = {x for x in enums if "} GrB" in x}
for val in vals:
seen.update(val.splitlines())
groups["GrB typedef enums"] = sorted(vals, key=lambda x: sort_key(x.rsplit("}", 1)[-1]))
vals = {x for x in enums if "} GxB" in x}
for val in vals:
seen.update(val.splitlines())
groups["GxB typedef enums"] = sorted(vals, key=lambda x: sort_key(x.rsplit("}", 1)[-1]))
missing_enums = set(enums) - set(groups["GrB typedef enums"]) - set(groups["GxB typedef enums"])
assert not missing_enums
vals = {x for x in lines if "typedef" in x and "GxB" in x} - seen
seen.update(vals)
groups["GxB typedef funcs"] = sorted(vals, key=sort_key)
vals = {x for x in lines if "typedef" in x and "GrB" in x} - seen
assert not vals
groups["not seen"] = sorted(set(lines) - seen, key=sort_key)
for group in groups["not seen"]:
assert "extern" not in group, group
unhandled = set()
for line in groups["not seen"]:
if "GrB" in line or "GxB" in line:
for item in IGNORE_LINES:
if item in line:
break
else:
unhandled.add(line)
if unhandled:
raise ValueError(
"\n===================================\n"
"Unhandled functions with GrB or GxB\n"
"-----------------------------------\n "
+ "\n ".join(sorted(unhandled))
+ "\n==================================="
)
return groups
def get_group_info(groups, ast, *, skip_complex=False):
rv = {}
def handle_constants(group):
for line in group:
extern, const, ctype, name = line.split(" ")
assert name.endswith(";")
name = name[:-1].replace("(void)", "()")
assert extern == "extern"
assert const == "const"
if name in DEPRECATED:
continue
if skip_complex and has_complex(line):
continue
info = {
"text": line,
}
yield info
rv["GrB const"] = list(handle_constants(groups["GrB const"]))
rv["GxB const"] = list(handle_constants(groups["GxB const"]))
def handle_objects(group):
for line in group:
extern, ctype, name = line.split(" ")
assert name.endswith(";")
name = name[:-1]
assert extern == "extern"
if name in DEPRECATED:
continue
if skip_complex and has_complex(line):
continue
info = {
"text": line,
}
yield info
rv["GrB objects"] = list(handle_objects(groups["GrB objects"]))
rv["GxB objects"] = list(handle_objects(groups["GxB objects"]))
def handle_enums(group):
for text in group:
text = text.replace("enum \n", "enum\n")
typedef, bracket, *fields, name = text.splitlines()
assert typedef.strip() == "typedef enum"
assert bracket == "{"
assert name.startswith("}")
assert name.endswith(";")
name = name[1:-1].strip()
if name in DEPRECATED:
continue
if skip_complex and has_complex(name):
continue
# Break this open so we can remove unwanted deprecated fields.
# Instead of traversing the AST, munging string is good enough.
typedef, bracket, *fields, cname = text.splitlines()
typedef = typedef.strip()
assert typedef.strip() == "typedef enum"
assert bracket == "{"
assert cname.startswith("}")
assert cname.endswith(";")
new_fields = []
for field in fields:
if field.endswith(","):
field = field[:-1]
field = field.strip()
cfieldname, eq, val = field.split(" ")
assert eq == "="
if cfieldname in DEPRECATED:
continue
if skip_complex and has_complex(cfieldname):
continue
new_fields.append(field)
if not new_fields:
continue
lines = [typedef, bracket]
for field in new_fields:
lines.append(f" {field},")
lines[-1] = lines[-1][:-1] # remove last comma
lines.append(cname)
info = {
"orig_text": text,
"text": "\n".join(lines),
}
yield info
rv["GrB typedef enums"] = list(handle_enums(groups["GrB typedef enums"]))
rv["GxB typedef enums"] = list(handle_enums(groups["GxB typedef enums"]))
def handle_typedefs(group):
for line in group:
| |
adjacent bone
adj_bone = adjacent_2_bone[0]
T_2_dir = Vector(*r_2).cross((adj_bone.X[n_frame, 0] - adj_bone.XG[n_frame])).unit()
if len(adjacent_2_bone) == 0 or np.isnan(T_2_dir).any(): # if no adjacent, or if above calc causes error
T_2_dir = (0, 1, 0) # Improve later, for now say all torques about y axis
for dim in [0, 1, 2]:
# This loop essentially writes out the following equations into A and b for each dimension (x,y,z):
# r1 x F1 + r2 x F2 + T1 + T2 = T_net
# The cross product of r = (x,y,z) and F = (Fx, Fy, Fz) yields (Fz*y - Fy*z, ...)
# Take the x component, x -> Fz*y - Fy*z
# Notice that Fy is negative, and Fz is positive. This is always true, that, for the forces, one lower dimension than the current is positive, and one higher is negative (cyclical relations)
# use this below
# Get dim above and below, wrapping round for below x and above z
dim_below = (dim - 1) % 3
dim_above = (dim + 1) % 3
coeff_dict = {
get_index(j_1, dim): 0,
# eg no effect of F_x in the x directional torque (not relevant statement, only here for readability)
get_index(j_1, dim_above): - r_1[dim_below], # eg multiply - z by Fy in the x direction
get_index(j_1, dim_below): r_1[dim_above], # eg multiply y by Fz in the x direction
# Reversed polarity for joint 2 as the desired force is - F2
get_index(j_2, dim_above): r_2[dim_below],
get_index(j_2, dim_below): - r_2[dim_above],
# Add the torques on each joint
get_index(j_1, is_force=False): T_1_dir[dim],
get_index(j_2, is_force=False): -T_2_dir[dim]
}
A.append(A_row(coeff_dict))
b.append(tau_net[dim])
weights.append(equation_weighting["Rotational"])
### SOLVE FORCES ON BODY. Note body defined so all joint forces/torques on it are positive
body = self.body
F_net = body.F_net[n_frame]
# BODY INERTIAL FORCES
for dim in [0, 1, 2]:
A.append(A_row({get_index(j, dim): 1 for j in self.body.start_joints + self.body.end_joints}))
b.append((F_net - body.mass * g_vec)[dim])
weights.append(equation_weighting["Inertial"])
# BODY ROTATIONAL FORCES - same as for bones
x_g = body.XG[n_frame]
tau_net = body.tau_net[n_frame]
# Improve above later, for now say all torques about y axis
T_dir = (0, 1, 0)
for dim in [0, 1, 2]:
coeff_dict = {}
for joint in body.start_joints + body.end_joints:
x_j = self.joint_pos[n_frame, joint]
r_j = (x_j - x_g) # position vector to centre
# Get dim above and below, wrapping round for below x and above z
dim_below, dim_above = (dim - 1) % 3, (dim + 1) % 3
coeff_dict[get_index(joint, dim_above)] = -r_j[dim_below] # eg multiply - z by Fy in the x direction
coeff_dict[get_index(joint, dim_below)] = r_j[dim_above] # eg multiply y by Fz in the x direction
coeff_dict[get_index(joint, is_force=False)] = T_dir[dim] # Add pure torque of pin
A.append(A_row(coeff_dict))
b.append(tau_net[dim])
weights.append(equation_weighting["Rotational"])
# print each line of the equations defined by A, b, with the final result
# Only print variables with both non-zero values, and non-zero coefficients
if report_equations:
print(f"----Frame {n_frame}----")
params = []
for joint in range(self.n_joints):
for dim in "xyz":
params.append(F"F_{joint}_{dim}") # Add forces by joint
for joint in range(self.n_joints):
params.append(F"T_{joint}") # Add torques by joint
for n, (coeffs, result) in enumerate(zip(A, b)):
s = []
for j, (coeff, param) in enumerate(zip(coeffs, params)):
if coeff != 0:
s.append(f"{round(coeff, 3)} * {param}")
# b_actual = np.dot(A[n], D)
# pct_error = abs(100 * (b_actual - result) / b_actual)
if n <= 7:
print(f"{' + '.join(s)} = {round(result, 3)}") # ({round(b_actual, 3)}) [{round(pct_error, 2)}%]")
return A, b, weights, bounds
def solve_forces(self, report_equations=False, end_frames_disregarded=5, prefix="",
save=True):
"""Solves the forces at each frame for the system, collects them and saves them to .npy files.
Note: Currently, due to smoothing, the first 5 and last 5 frames are disregarded"""
self.get_dynamics()
n_joints = self.n_joints
if report_equations:
print("Solving system...")
print(f"Total mass {round(self.total_mass, 2)} kg.")
# If dir doesn't exist, make it
dir = path_join(DataSources.dynamics_data, self.name)
if self.name not in os.listdir(DataSources.dynamics_data):
os.mkdir(dir)
forces, torques = [], []
f_shape, t_shape = (self.n_joints, 3), (self.n_joints,)
# Add zeros either end due to not being able to calculate for the first or last 2 frames
for i in range(end_frames_disregarded):
forces.append(np.zeros(f_shape))
torques.append(np.zeros(t_shape))
calc_forces = []
calc_torques = []
progress = tqdm(total=self.n_frames - 2 * end_frames_disregarded)
for n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):
A, b, weights, bounds = self.calculate_forces(n_frame, report_equations=report_equations)
D = weighted_bound_least_squares(A, b, weights, bounds, rcond=None)
f, tau = D[:(3 * n_joints)], D[(3 * n_joints):]
f, tau = f.reshape((n_joints, 3)), tau.reshape((n_joints))
calc_forces.append(f)
calc_torques.append(tau)
progress.update()
forces[end_frames_disregarded: - end_frames_disregarded] = calc_forces
torques += calc_torques
for i in range(end_frames_disregarded):
forces.append(np.zeros(f_shape))
torques.append(np.zeros(t_shape))
if save:
np.save(path_join(dir, prefix + "forces.npy"), forces)
np.save(path_join(dir, prefix + "torques.npy"), torques)
return np.array(forces), np.array(torques)
def get_com_position(self):
"""Calculates the position of the centre of mass of the whole system at each timestep"""
return sum(b.XG * b.mass for b in self.target_bones + [self.body]) / self.total_mass
def return_equations(self, end_frames_disregarded=5):
"""For each frame, return the equation vector b"""
self.get_dynamics()
bs = []
for n_frame in range(end_frames_disregarded, self.n_frames - end_frames_disregarded):
A, b, weights, bounds = self.calculate_forces(n_frame, report_equations=False)
bs.append(b)
return np.array(bs)
def set_paw_equilibrium(self):
"""Get paw equilibrium from mocap data by finding the drop of the paw.
This method will work for the current dataset, but is likely not robust, so can be replaced with
a better method of finding the paw equilibrium at a later date"""
if self.is_mocap:
paw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]
else:
paw_z_heights = self.unsmoothed_data[:, self.foot_joints, 2]
self.paw_disps = {} # paw joint: displacement over time, for paw spring model
min_contacts_detected = 3 # minimum requirement to use peak detection mode
plot = True
if plot:
fig, axes = plt.subplots(nrows=2, ncols=2)
footfall_detector = FootfallDetector(train=False, load=True, name=["smal", "mocap"][self.is_mocap])
for n, paw in enumerate(self.foot_joints):
contact_ends_failed = False
disp = np.zeros((self.n_frames)) # will give eps - the displacement of the paw from equilibrium
# for when the paw is in contact with the ground
Z = paw_z_heights[:, n]
on_ground = footfall_detector.process_clip(Z)
on_ground_idxs = np.where(on_ground > 0)[0]
if plot:
axes[n // 2, n % 2].plot(Z.mean() * (on_ground), color="red", alpha=0.3)
min_footfall_width = 3 # 3 frames long minimum to count as a footfall
footfalls = consecutive(on_ground_idxs)
trigger_height = np.percentile(np.array([Z[ff].max() for ff in footfalls]), 25) # mean trigger height
for footfall in footfalls:
if len(footfall) > min_footfall_width:
# disp[footfall] = Z[footfall].max() - Z[footfall] # old
disp[footfall] = np.clip(trigger_height - Z[footfall], a_min=0, a_max=None)
self.paw_disps[paw] = disp
if plot:
ax = axes[n // 2, n % 2]
ax.plot(Z)
Z_on_ground = Z.copy()
Z_on_ground[disp == 0] = np.nan
ax.plot(Z_on_ground, color="green")
ax.plot(disp)
Z_smoothed = self.joint_pos[:, paw, 2]
ax.set_title(n)
if plot:
plt.show(block=False)
plt.draw()
plt.pause(1e-8)
def view_ground_displacements(self, deriv=0):
"""Plot and show a graph of vertical displacement against frames for each paw - identifying L0 for each paw"""
fig, axes = plt.subplots(nrows=4)
for n, j in enumerate(self.foot_joints):
label = foot_joint_labels[n]
ax = axes[n]
if deriv == 0:
X = self.joint_pos[:, j, 2]
X_unsmoothed = self.unsmoothed_data[:, j, 2]
ax.plot(X)
ax.plot(X_unsmoothed, alpha=.6)
# ax.axhline(self.paw_equilibrium_values[j], ls = "--")
ax.axhline(self.L0_paws[label.split(" ")[0]])
elif deriv == 1:
ax.plot(self.joint_vel[:, j, 2])
ax.set_title(label)
plt.show()
def view_com_displacements(self, deriv=0):
"""Plot and show graph of X, Y, and Z motion of CoM of dog.
If deriv > 0, plot that derivative of the displacement"""
fig, ax = plt.subplots()
com_data = self.get_com_position()
if deriv > 0:
com_data = nth_time_deriv(com_data, 1 / self.freq, n=deriv)
for i in [0, 1, 2]:
ax.plot(com_data[:, i], label="xyz"[i])
ax.legend()
plt.show()
def calc_leg_lengths(self):
"""Uses the compliant-legged walking model estimation to work out the average length of legs.
Assume legs are undeformed while off ground. Work out avg distance from leg to COM"""
self.leg_disps = {} # length of leg over time for each paw
self.leg_vecs = {} # normalised vector of leg spring direction for each paw
plot = True
if plot: fig, axes = plt.subplots(nrows=2, ncols=2, sharex="all", sharey="row")
for n, paw in enumerate(self.foot_joints):
is_front = n < 2 # Assumes order of f left, f right, r left, r right
tol = 1e-3
on_ground = self.paw_disps[paw] > tol
off_ground = self.paw_disps[paw] <= tol
# centre_of_rot = self.body.XG[:]#self.body.X[:, int(is_front)]
# centre_of_rot = self.unsmoothed_data[:, self.body_joints[is_front][n%2]]
if self.is_mocap:
centre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]
paw_pos = self.unsmoothed_data[:, paw]
else:
centre_of_rot = self.unsmoothed_data[:, self.leg_spring_joints[n]]
paw_pos = self.unsmoothed_data[:, paw]
X, Z = np.swapaxes(centre_of_rot[:, [0, 2]], 0, 1) # get X, Z position of CoM
X_PAW, Z_PAW = np.swapaxes(paw_pos[:, [0, 2]], 0, 1) # get X, Z position of CoM
THETA = np.arctan((X_PAW - X) / (Z - Z_PAW)) # angle between spring and vertical
L = ((X - X_PAW) ** 2 + (Z - Z_PAW) ** 2) ** .5
L0 = (L).max()
z_disp = (L - L0) * np.cos(THETA)
x_disp = (L - L0) * np.sin(THETA)
# get z displacement by footfall
disp = np.zeros(self.n_frames)
# if self.is_mocap:
for ff in consecutive(np.where(on_ground)[0]):
if len(ff) < 3: continue # min width of footfall required
disp[ff] = z_disp[ff].max() - z_disp[ff]
# else:
# disp = -z_disp
self.leg_disps[paw] = disp
if plot:
ax = axes[n // 2, n % 2]
# ax.plot(L)
ax.plot(L - L0)
ax.plot(disp, color="green")
if plot:
plt.tight_layout()
# plt.show()
plt.show(block=False)
plt.draw()
plt.pause(1e-8)
def norm_kin_data(kin_data, targ_markers=None):
"""Normalise kinematic data.
If targ_markers given, normalise so these markers are at desired height"""
norm_height = 0.4 # 0.635 # fixed to Ally height for now
# scale so minimum is at (0,0,0)
for dim in [0, 1, 2]:
kin_data[:, :, dim] -= kin_data[:, :, dim].min()
if targ_markers is None:
kin_data = norm_height * kin_data / np.max(kin_data[:, :, 2])
elif targ_markers is not None:
height_target = kin_data[:, targ_markers, 2].mean()
kin_data = norm_height * kin_data / height_target
return kin_data
def get_dyn_data(dynamic_src, clip_length, mass, is_mocap=True, target_freq=100):
"""Loads and returns kinematic data"""
force_plate_data, | |
<reponame>xiexie1993/Tool_Sublime_Text3_for_Windows
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""
koXMLDatasetInfo ties together the use of koXMLTreeService and
XML Catalog/DTD support in koCatalog to supply data handlers for determining
valid elements/attributes for the current position in the tree.
All tree arguments are cElementTree elements and should be the root element
of an XMLDocument from koXMLTreeService.
Note: most of this logic moved out of koXMLCompletionBase.py in order to
allow testing outside of Komodo.
"""
import sys
import os
import logging
import koXMLTreeService
from koCatalog import CatalogResolver
log = logging.getLogger("koXMLDatasetInfo")
class EmptyDatasetHandler:
def tagnames(self, tree, node=None):
if node is None:
node = tree.current
if node is not None:
tags = tree.tags.get(tree.namespace(node), {})
else:
tags = tree.tags.get("", {})
return [t for t in tags.keys() if t]
def attrs(self, tree, node=None):
if node is None:
node = tree.current
attrs = {}
nodes = [n for n in tree.nodes if n.tag.lower() == node.tag.lower()]
# now, get all attributes from all the tags
for n in nodes:
attrs.update(n.attrib)
return attrs.keys()
def values(self, attrname, tree, node=None):
return []
class DataSetHandler(EmptyDatasetHandler):
def __init__(self, namespace, dataset):
self.namespace = namespace
self.dataset = dataset
def getnamespace(self, tree):
""" if we were created without a namespace (eg. doctype only) then
use the top level namespace for the document we're handling
don't save the namespace, as it could change from document
to document. """
if not self.namespace and tree.root is not None:
return tree.root.ns
return self.namespace
def tagnames(self, tree, node=None):
namespace = self.getnamespace(tree)
if node is None:
node = tree.current
if node is None:
# get root elements
return self.dataset.possible_children()
orig_node = node
while node is not None:
# print "node [%s] ns [%s]" % (node.localName,
# tree.namespace(node))
ns = tree.namespace(node)
if node.localName and (not ns or ns.lower() == namespace.lower()):
if self.dataset.element_info(node.localName):
return self.dataset.possible_children(node.localName)
node = tree.parent(node)
if self.dataset.element_info(orig_node.localName):
return self.dataset.possible_children(orig_node.localName)
return self.dataset.all_element_types()
def attrs(self, tree, node=None):
if node is None:
node = tree.current
return self.dataset.possible_attributes(node.localName)
def values(self, attrname, tree, node=None):
if node is None:
node = tree.current
return self.dataset.\
possible_attribute_values(node.localName, attrname)
class DatasetHandlerService:
handlers = {} # empty dataset handlers
resolver = None
def __init__(self):
self._default_public_ids = {
"HTML": "-//W3C//DTD HTML 5//EN",
}
self._default_namespace_ids = {}
self.defaultHandler = EmptyDatasetHandler()
self.resolver = CatalogResolver()
def setCatalogs(self, catalogs):
self.resolver.resetCatalogs(catalogs)
DatasetHandlerService.handlers = {}
def getDefaultPublicId(self, lang, env):
decl = self._default_public_ids.get(lang, None)
if env:
decl = env.get_pref("default%sDecl" % (lang,), decl)
return decl
def setDefaultPublicId(self, lang, public_id):
self._default_public_ids[lang] = public_id
def getDefaultNamespace(self, lang, env):
namespace = self._default_namespace_ids.get(lang, None)
if env:
namespace = env.get_pref("default%sNamespace" % (lang,), namespace)
return namespace
def setDefaultNamespace(self, lang, namespace):
self._default_namespace_ids[lang] = namespace
def createDatasetHandler(self, publicId, systemId, namespace):
dataset = self.resolver.getDataset(publicId, systemId, namespace)
if not dataset:
handler = EmptyDatasetHandler()
else:
handler = DataSetHandler(namespace, dataset)
if namespace:
self.handlers[namespace] = handler
if publicId or systemId:
self.handlers[(publicId, systemId)] = handler
return handler
def getDocumentHandler(self, publicId=None, systemId=None, namespace=None):
if namespace:
if namespace not in self.handlers:
handler = self.createDatasetHandler(
publicId, systemId, namespace)
else:
handler = self.handlers.get(namespace)
if handler:
return handler
if publicId or systemId:
key = (publicId, systemId)
if key not in self.handlers:
handler = self.createDatasetHandler(
publicId, systemId, namespace)
else:
handler = self.handlers.get(key)
if handler:
return handler
return EmptyDatasetHandler()
__datasetSvc = None
def getService():
global __datasetSvc
if not __datasetSvc:
__datasetSvc = DatasetHandlerService()
return __datasetSvc
def get_tree_handler(tree, node=None, default=None):
# if we have a namespace, use it, otherwise, fallback to the doctype
namespace = None
if node is None:
node = tree.root
if node is not None:
namespace = tree.namespace(node)
log.info("getting handler for (%s,%s,%s)" %
(tree.publicId, tree.systemId, namespace))
# print "getDocumentHandler (%s,%s,%s)"%(tree.publicId, tree.systemId,
# namespace)
publicId = tree.publicId
systemId = tree.systemId
if not (publicId or systemId or namespace) and default:
# print "using defaults %r" % (default,)
publicId = default[0]
systemId = default[1]
namespace = default[2]
return getService().getDocumentHandler(publicId, systemId, namespace)
if __name__ == "__main__":
import sys
import os
# basic logging configuration
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# utility functions for testing, these are *SIMILAR* to codeintel lang_xml
default_completion = {'HTML': ('-//W3C//DTD XHTML 1.0 Strict//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd',
'http://www.w3.org/1999/xhtml')}
def getDefaultCompletion(tree, node, lang):
if lang == "XSLT":
if node is not None and not tree.namespace(node):
# do we have an output element, if so, figure out if we're html
# cheap way to get the output element
output = tree.tags.get(
'http://www.w3.org/1999/XSL/Transform', []).get('output')
if output is not None:
lang = output.attrib.get('method').upper()
publicId = output.attrib.get('doctype-public')
systemId = output.attrib.get('doctype-system')
default_dataset_info = default_completion.get(lang)
if publicId or systemId:
default_dataset_info = (
publicId, systemId, default_dataset_info[2])
return default_dataset_info
return None
return default_completion.get(lang)
def getValidTagNames(text, uri=None, lang=None):
"""getValidTagNames
return a list of valid element names that can be inserted at the end
of the text segment
"""
tree = koXMLTreeService.getService().getTreeForURI(uri, text)
default_dataset_info = getDefaultCompletion(tree, tree.current, lang)
handlerclass = get_tree_handler(
tree, tree.current, default_dataset_info)
tagnames = handlerclass.tagnames(tree)
if not tagnames:
return None
tagnames.sort()
return tagnames
def getOpenTagName(text, uri=None):
"""getOpenTagName
return the current tag name
"""
tree = koXMLTreeService.getService().getTreeForURI(uri, text)
if tree.current is None:
return None
return tree.tagname(tree.current)
def getValidAttributes(text, uri=None, lang=None):
"""getValidAttributes
get the current tag, and return the attributes that are allowed in that
element
"""
tree = koXMLTreeService.getService().getTreeForURI(uri, text)
if tree.current is None:
return None
already_supplied = tree.current.attrib.keys()
handlerclass = get_tree_handler(
tree, tree.current, default_completion.get(lang))
attrs = handlerclass.attrs(tree)
if not attrs:
return None
attrs = [name for name in attrs if name not in already_supplied]
attrs.sort()
return attrs
def getValidAttributeValues(text, attr, uri=None, lang=None):
"""getValidAttributeValues
get the current attribute, and return the values that are allowed in that
attribute
"""
tree = koXMLTreeService.getService().getTreeForURI(uri, text)
if tree.current is None:
return None
handlerclass = get_tree_handler(
tree, tree.current, default_completion.get(lang))
values = handlerclass.values(attr, tree)
if not values:
return None
values.sort()
return values
# configure catalogs to use
basedir = os.path.dirname(os.path.dirname(os.getcwd()))
catalogs = os.path.join(basedir, "test", "stuff", "xml")
getService().setCatalogs([os.path.join(catalogs, "testcat.xml")])
from cElementTree import Element
tree = koXMLTreeService.XMLDocument()
tree.root = tree.current = Element('')
handlerclass = get_tree_handler(tree, tree.current)
assert handlerclass != None, "no handler class for empty tree"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="html" indent="yes"/>
<html> <
"""
tags = getValidTagNames(xml, lang="XSLT")
assert tags == ['body', 'head'], \
"invalid output tags for stylesheet"
xml = "<"
assert getValidTagNames(xml) == None, "invalid children for html"
xml = """<html>
<body>
<scr"""
assert "script" in getValidTagNames(
xml, lang="HTML"), "invalid children for body"
html = """<!DOCTYPE | |
= self._getTraverseReport.execute(params)
traverseNodes = []
#TraverseOder is a dict containing the members along the traverse path, including their linear pisition in the "position" attribute.
#traverseNeighbors contains everything that was noted as a neighbor of any node while traversing. Their linear position is "-1"
#In principle, the traverse order members should also exist in that dict as members as well
#We Want to merge traverseOrder into traverseNeighbors, without revertying any existing traverseOrder member positions to -1
for neighborKey in traverseNeighbors:
if neighborKey not in traverseOrder:
traverseOrder[neighborKey] = traverseNeighbors[neighborKey]
for nodeKey in traverseOrder.keys():
traverseNodes.append(traverseOrder[nodeKey])
fullReport = {"nodes" : traverseNodes, "links" : traverseLinks }
return fullReport
def getTraverseReportJSON(self, entityUUID, traversePath, isMeme = True, linkType = 0):
#getTraverseReport(self, splitPath, isMeme, lthLevel = 0, linkType = 0, excludeLinks = [], returnUniqueValuesOnly = True, excludeCluster = []):
returnUniqueValuesOnly = True
params = [entityUUID, traversePath, isMeme, linkType, returnUniqueValuesOnly]
traverseLinks, traverseNeighbors, traverseOrder = self._getTraverseReport.execute(params)
traverseNodes = []
#TraverseOder is a dict containing the members along the traverse path, including their linear pisition in the "position" attribute.
#traverseNeighbors contains everything that was noted as a neighbor of any node while traversing. Their linear position is "-1"
#In principle, the traverse order members should also exist in that dict as members as well
#We Want to merge traverseOrder into traverseNeighbors, without revertying any existing traverseOrder member positions to -1
for neighborKey in traverseNeighbors:
if neighborKey not in traverseOrder:
traverseOrder[neighborKey] = traverseNeighbors[neighborKey]
for nodeKey in traverseOrder.keys():
traverseNodes.append(traverseOrder[nodeKey])
fullReport = {"nodes" : traverseNodes, "links" : traverseLinks }
fullReportJSON = json.dumps(fullReport)
return fullReportJSON
def getMemeExists(self, memePath):
try:
params = [memePath]
memeExists = self._getMemeExists.execute(params)
return memeExists
except Exception as e:
exception = "getMemeExists(%s) error %s" %(memePath, e)
raise Exceptions.ScriptError(exception)
def instantiateEntity(self, entityUUID):
try:
params = [entityUUID]
unusedMemeExists = self._instantiateEntity.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: instantiateEntity(%s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, e)
except:
exception = "Action on entity of unknown type: instantiateEntity(%s) . Possible reason is that entity is not in database. traceback = %s" %(entityUUID, e)
raise Exceptions.ScriptError(exception)
def removeEntityLink(self, entityUUID, memberUUID):
try:
params = [entityUUID, memberUUID]
returnArray = self._removeEntityLink.execute(params)
return returnArray
except Exceptions.EventScriptFailure as e:
raise e
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: removeEntityLink(%s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, memberUUID, e)
except:
exception = "Action on entity of unknown type: removeEntityLink(%s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, memberUUID, e)
raise Exceptions.ScriptError(exception)
def removeAllCounterpartsOfType(self, entityUUID, memePath):
try:
params = [entityUUID, memePath]
unusedMemeExists = self._removeAllCounterpartsOfType.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: removeAllCounterpartsOfType(%s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, memePath, e)
except:
exception = "Action on entity of unknown type: removeAllCounterpartsOfType(%s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, memePath, e)
raise Exceptions.ScriptError(exception)
def removeAllCounterpartsOfTag(self, entityUUID, tag):
try:
params = [entityUUID, tag]
unusedMemeExists = self._removeAllCounterpartsOfTag.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: removeAllCounterpartsOfTag(%s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, tag, e)
except:
exception = "Action on entity of unknown type: removeAllCounterpartsOfTag(%s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, tag, e)
raise Exceptions.ScriptError(exception)
def removeAllCustomPropertiesFromEntity(self, entityUUID):
try:
params = [entityUUID]
unusedMemeExists = self._removeAllCustomPropertiesFromEntity.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: removeAllCustomPropertiesFromEntity(%s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, e)
except:
exception = "Action on entity of unknown type: removeAllCustomPropertiesFromEntity(%s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, e)
raise Exceptions.ScriptError(exception)
def removeEntityProperty(self, entityUUID, propertyName, drilldown = False):
try:
params = [entityUUID, propertyName, drilldown]
unusedMemeExists = self._removeEntityProperty.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: removeEntityProperty(%s, %s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, propertyName, drilldown, e)
except:
exception = "Action on entity of unknown type: removeEntityProperty(%s, %s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, propertyName, drilldown, e)
raise Exceptions.ScriptError(exception)
def removeEntityTaxonomy(self, entityUUID, taxonomy):
try:
params = [entityUUID, taxonomy]
unusedMemeExists = self._removeEntityTaxonomy.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: removeEntityTaxonomy(%s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, taxonomy, e)
except:
exception = "Action on entity of unknown type: removeEntityTaxonomy(%s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, taxonomy, e)
raise Exceptions.ScriptError(exception)
def revertEntity(self, entityUUID, drilldown = False):
try:
params = [entityUUID, drilldown]
unusedMemeExists = self._revertEntity.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: revertEntity(%s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, drilldown, e)
except:
exception = "Action on entity of unknown type: revertEntity(%s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, drilldown, e)
raise Exceptions.ScriptError(exception)
def revertEntityPropertyValues(self, entityUUID, drilldown = False):
try:
params = [entityUUID, drilldown]
unusedMemeExists = self._revertEntityPropertyValues.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: revertEntityPropertyValues(%s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, drilldown, e)
except:
exception = "Action on entity of unknown type: revertEntityPropertyValues(%s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, drilldown, e)
raise Exceptions.ScriptError(exception)
def setEntityPropertyValue(self, entityUUID, propertyName, propertyValue):
try:
params = [entityUUID, propertyName, propertyValue]
returnValue = self._setEntityPropertyValue.execute(params)
return returnValue
except Exceptions.EventScriptFailure as e:
raise e
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: setEntityPropertyValue(%s, %s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, propertyName, propertyValue, e)
except:
exception = "Action on entity of unknown type: setEntityPropertyValue(%s, %s, %s). Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, propertyName, propertyValue, e)
raise Exceptions.ScriptError(exception)
def setStateEventScript(self, entityUUID, scriptLocation):
try:
params = [entityUUID, scriptLocation]
unusedMemeExists = self._setStateEventScript.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity: setStateEventScript(%s, %s) traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, scriptLocation, e)
except:
exception = "Action on entity of unknown type: setStateEventScript(%s, %s) . Possible reason is that entity is not in repository. traceback = %s" %(entityUUID, scriptLocation, e)
raise Exceptions.ScriptError(exception)
def installPythonExecutor(self, entityUUID, callableObject):
try:
params = [entityUUID, callableObject]
#assert callable(callableObject)
unusedMemeExists = self._installPythonExecutor.execute(params)
except Exception as e:
exception = None
try:
entity = self.getEntity(entityUUID)
exception = "Action on %s entity %s: installPythonExecutor. Traceback = %s" %(entity.memePath.fullTemplatePath, entityUUID, e)
except:
exception = "Action on entity of unknown type %s: installPythonExecutor. Possible reason is that entity is not in repository. Traceback = %s" %(entityUUID, e)
raise Exceptions.ScriptError(exception)
""" Passing either a UUID of a specific entity, or the fully resolved path of a singleton in entityUUID
If it is a fully resolved path that is passed and the entity does not exist yet (i.e. the meme is not a singleton),
then this method will also force its creation.
supressInit determines whether or not to initialize entities that are created for evaluation """
def evaluateEntity(self, entityUUID, runtimeVariables = {}, actionID = None, subjectID = None, objectID = (), supressInit = False):
try:
#ToDo - fully resolved path of singleton still broken here
params = {"entityID" : entityUUID, "runtimeVariables" : runtimeVariables, "actionID":actionID, "subjectID":subjectID, "objectID":objectID, "supressInit":supressInit}
evalResult = self._evaluateEntity.execute(entityUUID, params)
return evalResult
except Exceptions.EventScriptFailure as e:
raise e
except Exceptions.ScriptError as e:
raise | |
from graphistry.Plottable import Plottable
from typing import Any, Callable, List, Optional, Union, TYPE_CHECKING
import copy, hashlib, logging, numpy as np, pandas as pd, pyarrow as pa, sys, uuid
from functools import lru_cache
from weakref import WeakValueDictionary
from .util import (error, in_ipython, make_iframe, random_string, warn)
from .bolt_util import (
bolt_graph_to_edges_dataframe,
bolt_graph_to_nodes_dataframe,
node_id_key,
start_node_id_key,
end_node_id_key,
to_bolt_driver)
from .arrow_uploader import ArrowUploader
from .nodexlistry import NodeXLGraphistry
from .tigeristry import Tigeristry
maybe_cudf = None
try:
import cudf
maybe_cudf = cudf
except ImportError:
1
maybe_dask_dataframe = None
try:
import dask.dataframe
maybe_dask_dataframe = dask.dataframe
except ImportError:
1
maybe_dask_cudf = None
try:
import dask_cudf
maybe_dask_cudf = dask_cudf
except ImportError:
1
logger = logging.getLogger('Plotter')
CACHE_COERCION_SIZE = 100
_cache_coercion_val = None
@lru_cache(maxsize=CACHE_COERCION_SIZE)
def cache_coercion_helper(k):
return _cache_coercion_val
def cache_coercion(k, v):
"""
Holds references to last 100 used coercions
Use with weak key/value dictionaries for actual lookups
"""
global _cache_coercion_val
_cache_coercion_val = v
return cache_coercion_helper(k)
class WeakValueWrapper:
def __init__(self, v):
self.v = v
class PlotterBase(Plottable):
"""Graph plotting class.
Created using ``Graphistry.bind()``.
Chained calls successively add data and visual encodings, and end with a plot call.
To streamline reuse and replayable notebooks, Plotter manipulations are immutable. Each chained call returns a new instance that derives from the previous one. The old plotter or the new one can then be used to create different graphs.
When using memoization, for .register(api=3) sessions with .plot(memoize=True), Pandas/cudf arrow coercions are memoized, and file uploads are skipped on same-hash dataframes.
The class supports convenience methods for mixing calls across Pandas, NetworkX, and IGraph.
"""
_defaultNodeId = '__nodeid__'
_pd_hash_to_arrow : WeakValueDictionary = WeakValueDictionary()
_cudf_hash_to_arrow : WeakValueDictionary = WeakValueDictionary()
def __init__(self, *args, **kwargs):
super(PlotterBase, self).__init__()
# Bindings
self._edges : Any = None
self._nodes : Any = None
self._source : Optional[str] = None
self._destination : Optional[str] = None
self._node : Optional[str] = None
self._edge_title : Optional[str] = None
self._edge_label : Optional[str] = None
self._edge_color : Optional[str] = None
self._edge_source_color : Optional[str] = None
self._edge_destination_color : Optional[str] = None
self._edge_size : Optional[str] = None
self._edge_weight : Optional[str] = None
self._edge_icon : Optional[str] = None
self._edge_opacity : Optional[str] = None
self._point_title : Optional[str] = None
self._point_label : Optional[str] = None
self._point_color : Optional[str] = None
self._point_size : Optional[str] = None
self._point_weight : Optional[str] = None
self._point_icon : Optional[str] = None
self._point_opacity : Optional[str] = None
self._point_x : Optional[str] = None
self._point_y : Optional[str] = None
# Settings
self._height : int = 500
self._render : bool = True
self._url_params : dict = {'info': 'true'}
# Metadata
self._name : Optional[str] = None
self._description : Optional[str] = None
self._style : Optional[dict] = None
self._complex_encodings : dict = {
'node_encodings': {'current': {}, 'default': {} },
'edge_encodings': {'current': {}, 'default': {} }
}
# Integrations
self._bolt_driver : any = None
self._tigergraph : any = None
def __repr__(self):
bindings = ['edges', 'nodes', 'source', 'destination', 'node',
'edge_label', 'edge_color', 'edge_size', 'edge_weight', 'edge_title', 'edge_icon', 'edge_opacity',
'edge_source_color', 'edge_destination_color',
'point_label', 'point_color', 'point_size', 'point_weight', 'point_title', 'point_icon', 'point_opacity',
'point_x', 'point_y']
settings = ['height', 'url_params']
rep = {'bindings': dict([(f, getattr(self, '_' + f)) for f in bindings]),
'settings': dict([(f, getattr(self, '_' + f)) for f in settings])}
if in_ipython():
from IPython.lib.pretty import pretty
return pretty(rep)
else:
return str(rep)
def addStyle(self, fg=None, bg=None, page=None, logo=None):
"""Set general visual styles
See .bind() and .settings(url_params={}) for additional styling options, and style() for another way to set the same attributes.
To facilitate reuse and replayable notebooks, the addStyle() call is chainable. Invocation does not effect the old style: it instead returns a new Plotter instance with the new styles added to the existing ones. Both the old and new styles can then be used for different graphs.
addStyle() will extend the existing style settings, while style() will replace any in the same group
:param fg: Dictionary {'blendMode': str} of any valid CSS blend mode
:type fg: dict
:param bg: Nested dictionary of page background properties. {'color': str, 'gradient': {'kind': str, 'position': str, 'stops': list }, 'image': { 'url': str, 'width': int, 'height': int, 'blendMode': str }
:type bg: dict
:param logo: Nested dictionary of logo properties. { 'url': str, 'autoInvert': bool, 'position': str, 'dimensions': { 'maxWidth': int, 'maxHeight': int }, 'crop': { 'top': int, 'left': int, 'bottom': int, 'right': int }, 'padding': { 'top': int, 'left': int, 'bottom': int, 'right': int}, 'style': str}
:type logo: dict
:param page: Dictionary of page metadata settings. { 'favicon': str, 'title': str }
:type page: dict
:returns: Plotter
:rtype: Plotter
**Example: Chained merge - results in color, blendMode, and url being set**
::
g2 = g.addStyle(bg={'color': 'black'}, fg={'blendMode': 'screen'})
g3 = g2.addStyle(bg={'image': {'url': 'http://site.com/watermark.png'}})
**Example: Overwrite - results in blendMode multiply**
::
g2 = g.addStyle(fg={'blendMode': 'screen'})
g3 = g2.addStyle(fg={'blendMode': 'multiply'})
**Example: Gradient background**
::
g.addStyle(bg={'gradient': {'kind': 'linear', 'position': 45, 'stops': [['rgb(0,0,0)', '0%'], ['rgb(255,255,255)', '100%']]}})
**Example: Page settings**
::
g.addStyle(page={'title': 'Site - {{ name }}', 'favicon': 'http://site.com/logo.ico'})
"""
style = copy.deepcopy(self._style or {})
o = {'fg': fg, 'bg': bg, 'page': page, 'logo': logo}
for k, v in o.items():
if not (v is None):
if isinstance(v, dict):
if not (k in style) or (style[k] is None):
style[k] = {}
for k2, v2 in v.items():
style[k][k2] = v2
else:
style[k] = v
res = self.bind()
res._style = style
return res
def style(self, fg=None, bg=None, page=None, logo=None):
"""Set general visual styles
See .bind() and .settings(url_params={}) for additional styling options, and addStyle() for another way to set the same attributes.
To facilitate reuse and replayable notebooks, the style() call is chainable. Invocation does not effect the old style: it instead returns a new Plotter instance with the new styles added to the existing ones. Both the old and new styles can then be used for different graphs.
style() will fully replace any defined parameter in the existing style settings, while addStyle() will merge over previous values
:param fg: Dictionary {'blendMode': str} of any valid CSS blend mode
:type fg: dict
:param bg: Nested dictionary of page background properties. {'color': str, 'gradient': {'kind': str, 'position': str, 'stops': list }, 'image': { 'url': str, 'width': int, 'height': int, 'blendMode': str }
:type bg: dict
:param logo: Nested dictionary of logo properties. { 'url': str, 'autoInvert': bool, 'position': str, 'dimensions': { 'maxWidth': int, 'maxHeight': int }, 'crop': { 'top': int, 'left': int, 'bottom': int, 'right': int }, 'padding': { 'top': int, 'left': int, 'bottom': int, 'right': int}, 'style': str}
:type logo: dict
:param page: Dictionary of page metadata settings. { 'favicon': str, 'title': str }
:type page: dict
:returns: Plotter
:rtype: Plotter
**Example: Chained merge - results in url and blendMode being set, while color is dropped**
::
g2 = g.style(bg={'color': 'black'}, fg={'blendMode': 'screen'})
g3 = g2.style(bg={'image': {'url': 'http://site.com/watermark.png'}})
**Example: Gradient background**
::
g.style(bg={'gradient': {'kind': 'linear', 'position': 45, 'stops': [['rgb(0,0,0)', '0%'], ['rgb(255,255,255)', '100%']]}})
**Example: Page settings**
::
g.style(page={'title': 'Site - {{ name }}', 'favicon': 'http://site.com/logo.ico'})
"""
style = copy.deepcopy(self._style or {})
o = {'fg': fg, 'bg': bg, 'page': page, 'logo': logo}
for k, v in o.items():
if not (v is None):
style[k] = v
res = self.bind()
res._style = style
return res
def encode_point_color(self, column,
palette=None, as_categorical=None, as_continuous=None, categorical_mapping=None, default_mapping=None,
for_default=True, for_current=False):
"""Set point color with more control than bind()
:param column: Data column name
:type column: str
:param palette: Optional list of color-like strings. Ex: ["black, "#FF0", "rgb(255,255,255)" ]. Used as a gradient for continuous and round-robin for categorical.
:type palette: Optional[list]
:param as_categorical: Interpret column values as categorical. Ex: Uses palette via round-robin when more values than palette entries.
:type as_categorical: Optional[bool]
:param as_continuous: Interpret column values as continuous. Ex: Uses palette for an interpolation gradient when more values than palette entries.
:type as_continuous: Optional[bool]
:param categorical_mapping: Mapping from column values to color-like strings. Ex: {"car": "red", "truck": #000"}
:type categorical_mapping: Optional[dict]
:param default_mapping: Augment categorical_mapping with mapping for values not in categorical_mapping. Ex: default_mapping="gray".
:type default_mapping: Optional[str]
:param for_default: Use encoding for when no user override is set. Default on.
:type for_default: Optional[bool]
:param for_current: Use encoding as currently active. Clearing the active | |
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.complete_time, 'complete_time')
self.validate_required(self.transcode_progress, 'transcode_progress')
self.validate_required(self.input_file_url, 'input_file_url')
self.validate_required(self.priority, 'priority')
self.validate_required(self.error_code, 'error_code')
self.validate_required(self.error_message, 'error_message')
self.validate_required(self.definition, 'definition')
self.validate_required(self.output_file, 'output_file')
if self.output_file:
self.output_file.validate()
def to_map(self):
result = {}
result['TranscodeJobId'] = self.transcode_job_id
result['TranscodeTemplateId'] = self.transcode_template_id
result['TranscodeJobStatus'] = self.transcode_job_status
result['CreationTime'] = self.creation_time
result['CompleteTime'] = self.complete_time
result['TranscodeProgress'] = self.transcode_progress
result['InputFileUrl'] = self.input_file_url
result['Priority'] = self.priority
result['ErrorCode'] = self.error_code
result['ErrorMessage'] = self.error_message
result['Definition'] = self.definition
if self.output_file is not None:
result['OutputFile'] = self.output_file.to_map()
else:
result['OutputFile'] = None
return result
def from_map(self, map={}):
self.transcode_job_id = map.get('TranscodeJobId')
self.transcode_template_id = map.get('TranscodeTemplateId')
self.transcode_job_status = map.get('TranscodeJobStatus')
self.creation_time = map.get('CreationTime')
self.complete_time = map.get('CompleteTime')
self.transcode_progress = map.get('TranscodeProgress')
self.input_file_url = map.get('InputFileUrl')
self.priority = map.get('Priority')
self.error_code = map.get('ErrorCode')
self.error_message = map.get('ErrorMessage')
self.definition = map.get('Definition')
if map.get('OutputFile') is not None:
temp_model = GetTranscodeTaskResponseTranscodeTaskTranscodeJobInfoListOutputFile()
self.output_file = temp_model.from_map(map['OutputFile'])
else:
self.output_file = None
return self
class GetTranscodeTaskResponseTranscodeTask(TeaModel):
def __init__(self, transcode_task_id=None, video_id=None, task_status=None, creation_time=None, complete_time=None, trigger=None, transcode_template_group_id=None, transcode_job_info_list=None):
self.transcode_task_id = transcode_task_id
self.video_id = video_id
self.task_status = task_status
self.creation_time = creation_time
self.complete_time = complete_time
self.trigger = trigger
self.transcode_template_group_id = transcode_template_group_id
self.transcode_job_info_list = []
def validate(self):
self.validate_required(self.transcode_task_id, 'transcode_task_id')
self.validate_required(self.video_id, 'video_id')
self.validate_required(self.task_status, 'task_status')
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.complete_time, 'complete_time')
self.validate_required(self.trigger, 'trigger')
self.validate_required(self.transcode_template_group_id, 'transcode_template_group_id')
self.validate_required(self.transcode_job_info_list, 'transcode_job_info_list')
if self.transcode_job_info_list:
for k in self.transcode_job_info_list:
if k :
k.validate()
def to_map(self):
result = {}
result['TranscodeTaskId'] = self.transcode_task_id
result['VideoId'] = self.video_id
result['TaskStatus'] = self.task_status
result['CreationTime'] = self.creation_time
result['CompleteTime'] = self.complete_time
result['Trigger'] = self.trigger
result['TranscodeTemplateGroupId'] = self.transcode_template_group_id
result['TranscodeJobInfoList'] = []
if self.transcode_job_info_list is not None:
for k in self.transcode_job_info_list:
result['TranscodeJobInfoList'].append(k.to_map() if k else None)
else:
result['TranscodeJobInfoList'] = None
return result
def from_map(self, map={}):
self.transcode_task_id = map.get('TranscodeTaskId')
self.video_id = map.get('VideoId')
self.task_status = map.get('TaskStatus')
self.creation_time = map.get('CreationTime')
self.complete_time = map.get('CompleteTime')
self.trigger = map.get('Trigger')
self.transcode_template_group_id = map.get('TranscodeTemplateGroupId')
self.transcode_job_info_list = []
if map.get('TranscodeJobInfoList') is not None:
for k in map.get('TranscodeJobInfoList'):
temp_model = GetTranscodeTaskResponseTranscodeTaskTranscodeJobInfoList()
temp_model = temp_model.from_map(k)
self.transcode_job_info_list.append(temp_model)
else:
self.transcode_job_info_list = None
return self
class GetURLUploadInfosRequest(TeaModel):
def __init__(self, owner_id=None, resource_owner_account=None, resource_owner_id=None, job_ids=None, upload_urls=None):
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.job_ids = job_ids
self.upload_urls = upload_urls
def validate(self):
pass
def to_map(self):
result = {}
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['JobIds'] = self.job_ids
result['UploadURLs'] = self.upload_urls
return result
def from_map(self, map={}):
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.job_ids = map.get('JobIds')
self.upload_urls = map.get('UploadURLs')
return self
class GetURLUploadInfosResponse(TeaModel):
def __init__(self, request_id=None, urlupload_info_list=None, non_exists=None):
self.request_id = request_id
self.urlupload_info_list = []
self.non_exists = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.urlupload_info_list, 'urlupload_info_list')
if self.urlupload_info_list:
for k in self.urlupload_info_list:
if k :
k.validate()
self.validate_required(self.non_exists, 'non_exists')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['URLUploadInfoList'] = []
if self.urlupload_info_list is not None:
for k in self.urlupload_info_list:
result['URLUploadInfoList'].append(k.to_map() if k else None)
else:
result['URLUploadInfoList'] = None
result['NonExists'] = []
if self.non_exists is not None:
for k in self.non_exists:
result['NonExists'].append(k)
else:
result['NonExists'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.urlupload_info_list = []
if map.get('URLUploadInfoList') is not None:
for k in map.get('URLUploadInfoList'):
temp_model = GetURLUploadInfosResponseURLUploadInfoList()
temp_model = temp_model.from_map(k)
self.urlupload_info_list.append(temp_model)
else:
self.urlupload_info_list = None
self.non_exists = []
if map.get('NonExists') is not None:
for k in map.get('NonExists'):
self.non_exists.append(k)
else:
self.non_exists = None
return self
class GetURLUploadInfosResponseURLUploadInfoList(TeaModel):
def __init__(self, job_id=None, upload_url=None, media_id=None, file_size=None, status=None, user_data=None, error_code=None, error_message=None, creation_time=None, complete_time=None):
self.job_id = job_id
self.upload_url = upload_url
self.media_id = media_id
self.file_size = file_size
self.status = status
self.user_data = user_data
self.error_code = error_code
self.error_message = error_message
self.creation_time = creation_time
self.complete_time = complete_time
def validate(self):
self.validate_required(self.job_id, 'job_id')
self.validate_required(self.upload_url, 'upload_url')
self.validate_required(self.media_id, 'media_id')
self.validate_required(self.file_size, 'file_size')
self.validate_required(self.status, 'status')
self.validate_required(self.user_data, 'user_data')
self.validate_required(self.error_code, 'error_code')
self.validate_required(self.error_message, 'error_message')
self.validate_required(self.creation_time, 'creation_time')
self.validate_required(self.complete_time, 'complete_time')
def to_map(self):
result = {}
result['JobId'] = self.job_id
result['UploadURL'] = self.upload_url
result['MediaId'] = self.media_id
result['FileSize'] = self.file_size
result['Status'] = self.status
result['UserData'] = self.user_data
result['ErrorCode'] = self.error_code
result['ErrorMessage'] = self.error_message
result['CreationTime'] = self.creation_time
result['CompleteTime'] = self.complete_time
return result
def from_map(self, map={}):
self.job_id = map.get('JobId')
self.upload_url = map.get('UploadURL')
self.media_id = map.get('MediaId')
self.file_size = map.get('FileSize')
self.status = map.get('Status')
self.user_data = map.get('UserData')
self.error_code = map.get('ErrorCode')
self.error_message = map.get('ErrorMessage')
self.creation_time = map.get('CreationTime')
self.complete_time = map.get('CompleteTime')
return self
class UpdateTranscodeTemplateGroupRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, name=None, transcode_template_list=None, locked=None, transcode_template_group_id=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.name = name
self.transcode_template_list = transcode_template_list
self.locked = locked
self.transcode_template_group_id = transcode_template_group_id
def validate(self):
self.validate_required(self.transcode_template_group_id, 'transcode_template_group_id')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['Name'] = self.name
result['TranscodeTemplateList'] = self.transcode_template_list
result['Locked'] = self.locked
result['TranscodeTemplateGroupId'] = self.transcode_template_group_id
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.name = map.get('Name')
self.transcode_template_list = map.get('TranscodeTemplateList')
self.locked = map.get('Locked')
self.transcode_template_group_id = map.get('TranscodeTemplateGroupId')
return self
class UpdateTranscodeTemplateGroupResponse(TeaModel):
def __init__(self, request_id=None, transcode_template_group_id=None):
self.request_id = request_id
self.transcode_template_group_id = transcode_template_group_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.transcode_template_group_id, 'transcode_template_group_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['TranscodeTemplateGroupId'] = self.transcode_template_group_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.transcode_template_group_id = map.get('TranscodeTemplateGroupId')
return self
class AddTranscodeTemplateGroupRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, name=None, transcode_template_list=None, transcode_template_group_id=None, app_id=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.name = name
self.transcode_template_list = transcode_template_list
self.transcode_template_group_id = transcode_template_group_id
self.app_id = app_id
def validate(self):
pass
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['Name'] = self.name
result['TranscodeTemplateList'] = self.transcode_template_list
result['TranscodeTemplateGroupId'] = self.transcode_template_group_id
result['AppId'] = self.app_id
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.name = map.get('Name')
self.transcode_template_list = map.get('TranscodeTemplateList')
self.transcode_template_group_id = map.get('TranscodeTemplateGroupId')
self.app_id = map.get('AppId')
return self
class AddTranscodeTemplateGroupResponse(TeaModel):
def __init__(self, request_id=None, transcode_template_group_id=None):
self.request_id = request_id
self.transcode_template_group_id = transcode_template_group_id
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.transcode_template_group_id, 'transcode_template_group_id')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['TranscodeTemplateGroupId'] = self.transcode_template_group_id
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.transcode_template_group_id = map.get('TranscodeTemplateGroupId')
return self
class DeleteTranscodeTemplateGroupRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, transcode_template_group_id=None, transcode_template_ids=None, force_del_group=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.transcode_template_group_id = transcode_template_group_id
self.transcode_template_ids = transcode_template_ids
self.force_del_group = force_del_group
def validate(self):
self.validate_required(self.transcode_template_group_id, 'transcode_template_group_id')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['TranscodeTemplateGroupId'] = self.transcode_template_group_id
result['TranscodeTemplateIds'] = self.transcode_template_ids
result['ForceDelGroup'] = self.force_del_group
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.transcode_template_group_id = map.get('TranscodeTemplateGroupId')
self.transcode_template_ids = map.get('TranscodeTemplateIds')
self.force_del_group = map.get('ForceDelGroup')
return self
class DeleteTranscodeTemplateGroupResponse(TeaModel):
def __init__(self, request_id=None, non_exist_transcode_template_ids=None):
self.request_id = request_id
self.non_exist_transcode_template_ids = []
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.non_exist_transcode_template_ids, 'non_exist_transcode_template_ids')
def to_map(self):
result = {}
result['RequestId'] = self.request_id
result['NonExistTranscodeTemplateIds'] = []
if self.non_exist_transcode_template_ids is not None:
for k in self.non_exist_transcode_template_ids:
result['NonExistTranscodeTemplateIds'].append(k)
else:
result['NonExistTranscodeTemplateIds'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
self.non_exist_transcode_template_ids = []
if map.get('NonExistTranscodeTemplateIds') is not None:
for k in map.get('NonExistTranscodeTemplateIds'):
self.non_exist_transcode_template_ids.append(k)
else:
self.non_exist_transcode_template_ids = None
return self
class GetTranscodeTemplateGroupRequest(TeaModel):
def __init__(self, access_key_id=None, owner_id=None, resource_owner_account=None, resource_owner_id=None, transcode_template_group_id=None):
self.access_key_id = access_key_id
self.owner_id = owner_id
self.resource_owner_account = resource_owner_account
self.resource_owner_id = resource_owner_id
self.transcode_template_group_id = transcode_template_group_id
def validate(self):
self.validate_required(self.transcode_template_group_id, 'transcode_template_group_id')
def to_map(self):
result = {}
result['AccessKeyId'] = self.access_key_id
result['OwnerId'] = self.owner_id
result['ResourceOwnerAccount'] = self.resource_owner_account
result['ResourceOwnerId'] = self.resource_owner_id
result['TranscodeTemplateGroupId'] = self.transcode_template_group_id
return result
def from_map(self, map={}):
self.access_key_id = map.get('AccessKeyId')
self.owner_id = map.get('OwnerId')
self.resource_owner_account = map.get('ResourceOwnerAccount')
self.resource_owner_id = map.get('ResourceOwnerId')
self.transcode_template_group_id = map.get('TranscodeTemplateGroupId')
return self
class GetTranscodeTemplateGroupResponse(TeaModel):
def __init__(self, request_id=None, transcode_template_group=None):
self.request_id = request_id
self.transcode_template_group = transcode_template_group
def validate(self):
self.validate_required(self.request_id, 'request_id')
self.validate_required(self.transcode_template_group, 'transcode_template_group')
if self.transcode_template_group:
self.transcode_template_group.validate()
def to_map(self):
result = {}
result['RequestId'] = self.request_id
if self.transcode_template_group is not None:
result['TranscodeTemplateGroup'] = self.transcode_template_group.to_map()
else:
result['TranscodeTemplateGroup'] = None
return result
def from_map(self, map={}):
self.request_id = map.get('RequestId')
if map.get('TranscodeTemplateGroup') is not None:
temp_model = GetTranscodeTemplateGroupResponseTranscodeTemplateGroup()
self.transcode_template_group = temp_model.from_map(map['TranscodeTemplateGroup'])
else:
self.transcode_template_group = None
return self
class GetTranscodeTemplateGroupResponseTranscodeTemplateGroupTranscodeTemplateList(TeaModel):
def __init__(self, transcode_template_id=None, video=None, audio=None, container=None, mux_config=None, trans_config=None, definition=None, encrypt_setting=None, package_setting=None, subtitle_list=None, opening_list=None, tail_slate_list=None, template_name=None, transcode_file_regular=None, clip=None, rotate=None, type=None, user_data=None, watermark_ids=None):
self.transcode_template_id = transcode_template_id
self.video = video
self.audio = audio
self.container = container
self.mux_config = mux_config
self.trans_config = trans_config
self.definition = definition
self.encrypt_setting = encrypt_setting
self.package_setting = package_setting
self.subtitle_list = subtitle_list
self.opening_list = opening_list
self.tail_slate_list = tail_slate_list
self.template_name = template_name
self.transcode_file_regular = transcode_file_regular
self.clip = clip
self.rotate = rotate
self.type = type
self.user_data = user_data
self.watermark_ids = []
def validate(self):
self.validate_required(self.transcode_template_id, 'transcode_template_id')
self.validate_required(self.video, 'video')
self.validate_required(self.audio, 'audio')
self.validate_required(self.container, | |
import csv
import logging
import math
import shutil
import tempfile
import typing
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
import librosa
import torch
from phonemes2ids import phonemes2ids
from torch.utils.data import Dataset
from vits_train.config import TrainingConfig
from vits_train.mel_processing import spectrogram_torch
_LOGGER = logging.getLogger("vits_train.dataset")
@dataclass
class Utterance:
id: str
phoneme_ids: typing.List[int]
audio_path: Path
cache_path: typing.Optional[Path]
speaker_id: typing.Optional[int] = None
@dataclass
class UtteranceTensors:
id: str
phoneme_ids: torch.LongTensor
spectrogram: torch.FloatTensor
audio_norm: torch.FloatTensor
spec_length: int
speaker_id: typing.Optional[torch.LongTensor] = None
@dataclass
class Batch:
phoneme_ids: torch.LongTensor
phoneme_lengths: torch.LongTensor
spectrograms: torch.FloatTensor
spectrogram_lengths: torch.LongTensor
audios: torch.FloatTensor
audio_lengths: torch.LongTensor
speaker_ids: typing.Optional[torch.LongTensor] = None
UTTERANCE_PHONEME_IDS = typing.Dict[str, typing.List[int]]
UTTERANCE_SPEAKER_IDS = typing.Dict[str, int]
UTTERANCE_IDS = typing.Collection[str]
@dataclass
class DatasetInfo:
name: str
audio_dir: Path
utt_phoneme_ids: UTTERANCE_PHONEME_IDS
utt_speaker_ids: UTTERANCE_SPEAKER_IDS
split_ids: typing.Mapping[str, UTTERANCE_IDS]
# -----------------------------------------------------------------------------
class PhonemeIdsAndMelsDataset(Dataset):
def __init__(
self,
config: TrainingConfig,
datasets: typing.Sequence[DatasetInfo],
split: str,
cache_dir: typing.Optional[typing.Union[str, Path]] = None,
):
super().__init__()
self.config = config
self.utterances = []
self.split = split
self.temp_dir: typing.Optional[tempfile.TemporaryDirectory] = None
if cache_dir is None:
# pylint: disable=consider-using-with
self.temp_dir = tempfile.TemporaryDirectory(prefix="vits_train")
self.cache_dir = Path(self.temp_dir.name)
else:
self.cache_dir = Path(cache_dir)
self.cache_dir.mkdir(parents=True, exist_ok=True)
# Check utterances
speakers_with_data: typing.Set[int] = set()
for dataset in datasets:
for utt_id in dataset.split_ids.get(split, []):
audio_path = dataset.audio_dir / utt_id
if not audio_path.is_file():
# Try WAV extension
audio_path = dataset.audio_dir / f"{utt_id}.wav"
if audio_path.is_file():
cache_path = self.cache_dir / dataset.name / utt_id
speaker_id = dataset.utt_speaker_ids.get(utt_id)
if config.model.is_multispeaker:
assert speaker_id is not None, f"No speaker for {utt_id}"
speakers_with_data.add(speaker_id)
self.utterances.append(
Utterance(
id=utt_id,
phoneme_ids=dataset.utt_phoneme_ids[utt_id],
audio_path=audio_path,
cache_path=cache_path,
speaker_id=speaker_id,
)
)
else:
_LOGGER.warning("Missing audio file: %s", audio_path)
if config.model.is_multispeaker and (
len(speakers_with_data) < config.model.n_speakers
):
# Possilbly missing data
_LOGGER.warning(
"Data was found for only %s/%s speakers",
len(speakers_with_data),
config.model.n_speakers,
)
def __getitem__(self, index):
utterance = self.utterances[index]
# Normalized audio
audio_norm_path = utterance.cache_path.with_suffix(".audio.pt")
if audio_norm_path.is_file():
# Load from cache
audio_norm = torch.load(str(audio_norm_path))
else:
# Load audio and resample
audio, _sample_rate = librosa.load(
str(utterance.audio_path), sr=self.config.audio.sample_rate
)
# NOTE: audio is already in [-1, 1] coming from librosa
audio_norm = torch.FloatTensor(audio).unsqueeze(0)
# Save to cache
audio_norm_path.parent.mkdir(parents=True, exist_ok=True)
# Use temporary file to avoid multiple processes writing at the same time.
with tempfile.NamedTemporaryFile(mode="wb") as audio_norm_file:
torch.save(audio_norm, audio_norm_file.name)
shutil.copy(audio_norm_file.name, audio_norm_path)
# Mel spectrogram
spectrogram_path = utterance.cache_path.with_suffix(".spec.pt")
if spectrogram_path.is_file():
# Load from cache
spectrogram = torch.load(str(spectrogram_path))
else:
spectrogram = spectrogram_torch(
y=audio_norm,
n_fft=self.config.audio.filter_length,
sampling_rate=self.config.audio.sample_rate,
hop_size=self.config.audio.hop_length,
win_size=self.config.audio.win_length,
center=False,
).squeeze(0)
# Save to cache
spectrogram_path.parent.mkdir(parents=True, exist_ok=True)
# Use temporary file to avoid multiple processes writing at the same time.
with tempfile.NamedTemporaryFile(mode="wb") as spec_file:
torch.save(spectrogram, spec_file.name)
shutil.copy(spec_file.name, spectrogram_path)
speaker_id = None
if utterance.speaker_id is not None:
speaker_id = torch.LongTensor([utterance.speaker_id])
return UtteranceTensors(
id=utterance.id,
phoneme_ids=torch.LongTensor(utterance.phoneme_ids),
audio_norm=audio_norm,
spectrogram=spectrogram,
spec_length=spectrogram.size(1),
speaker_id=speaker_id,
)
def __len__(self):
return len(self.utterances)
class UtteranceCollate:
def __call__(self, utterances: typing.Sequence[UtteranceTensors]) -> Batch:
num_utterances = len(utterances)
assert num_utterances > 0, "No utterances"
max_phonemes_length = 0
max_spec_length = 0
max_audio_length = 0
num_mels = 0
multispeaker = False
# Determine lengths
for utt_idx, utt in enumerate(utterances):
assert utt.spectrogram is not None
assert utt.audio_norm is not None
phoneme_length = utt.phoneme_ids.size(0)
spec_length = utt.spectrogram.size(1)
audio_length = utt.audio_norm.size(1)
max_phonemes_length = max(max_phonemes_length, phoneme_length)
max_spec_length = max(max_spec_length, spec_length)
max_audio_length = max(max_audio_length, audio_length)
num_mels = utt.spectrogram.size(0)
if utt.speaker_id is not None:
multispeaker = True
# Create padded tensors
phonemes_padded = torch.LongTensor(num_utterances, max_phonemes_length)
spec_padded = torch.FloatTensor(num_utterances, num_mels, max_spec_length)
audio_padded = torch.FloatTensor(num_utterances, 1, max_audio_length)
phonemes_padded.zero_()
spec_padded.zero_()
audio_padded.zero_()
phoneme_lengths = torch.LongTensor(num_utterances)
spec_lengths = torch.LongTensor(num_utterances)
audio_lengths = torch.LongTensor(num_utterances)
speaker_ids: typing.Optional[torch.LongTensor] = None
if multispeaker:
speaker_ids = torch.LongTensor(num_utterances)
# Sort by decreasing spectrogram length
sorted_utterances = sorted(
utterances, key=lambda u: u.spectrogram.size(1), reverse=True
)
for utt_idx, utt in enumerate(sorted_utterances):
phoneme_length = utt.phoneme_ids.size(0)
spec_length = utt.spectrogram.size(1)
audio_length = utt.audio_norm.size(1)
phonemes_padded[utt_idx, :phoneme_length] = utt.phoneme_ids
phoneme_lengths[utt_idx] = phoneme_length
spec_padded[utt_idx, :, :spec_length] = utt.spectrogram
spec_lengths[utt_idx] = spec_length
audio_padded[utt_idx, :, :audio_length] = utt.audio_norm
audio_lengths[utt_idx] = audio_length
if utt.speaker_id is not None:
assert speaker_ids is not None
speaker_ids[utt_idx] = utt.speaker_id
return Batch(
phoneme_ids=phonemes_padded,
phoneme_lengths=phoneme_lengths,
spectrograms=spec_padded,
spectrogram_lengths=spec_lengths,
audios=audio_padded,
audio_lengths=audio_lengths,
speaker_ids=speaker_ids,
)
class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):
"""
Maintain similar input lengths in a batch.
Length groups are specified by boundaries.
Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.
It removes samples which are not included in the boundaries.
Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.
"""
def __init__(
self,
dataset,
batch_size,
boundaries,
num_replicas=None,
rank=None,
shuffle=True,
):
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
self.lengths = [utt.spec_length for utt in dataset]
self.batch_size = batch_size
self.boundaries = boundaries
self.buckets, self.num_samples_per_bucket = self._create_buckets()
self.total_size = sum(self.num_samples_per_bucket)
self.num_samples = self.total_size // self.num_replicas
def _create_buckets(self):
buckets = [[] for _ in range(len(self.boundaries) - 1)]
for i in range(len(self.lengths)):
length = self.lengths[i]
idx_bucket = self._bisect(length)
if idx_bucket != -1:
buckets[idx_bucket].append(i)
for i in range(len(buckets) - 1, 0, -1):
if len(buckets[i]) == 0:
buckets.pop(i)
self.boundaries.pop(i + 1)
num_samples_per_bucket = []
for i in range(len(buckets)):
len_bucket = len(buckets[i])
total_batch_size = self.num_replicas * self.batch_size
rem = (
total_batch_size - (len_bucket % total_batch_size)
) % total_batch_size
num_samples_per_bucket.append(len_bucket + rem)
return buckets, num_samples_per_bucket
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = []
if self.shuffle:
for bucket in self.buckets:
indices.append(torch.randperm(len(bucket), generator=g).tolist())
else:
for bucket in self.buckets:
indices.append(list(range(len(bucket))))
batches = []
for i in range(len(self.buckets)):
bucket = self.buckets[i]
len_bucket = len(bucket)
ids_bucket = indices[i]
num_samples_bucket = self.num_samples_per_bucket[i]
# add extra samples to make it evenly divisible
rem = num_samples_bucket - len_bucket
ids_bucket = (
ids_bucket
+ ids_bucket * (rem // len_bucket)
+ ids_bucket[: (rem % len_bucket)]
)
# subsample
ids_bucket = ids_bucket[self.rank :: self.num_replicas]
# batching
for j in range(len(ids_bucket) // self.batch_size):
batch = [
bucket[idx]
for idx in ids_bucket[
j * self.batch_size : (j + 1) * self.batch_size
]
]
batches.append(batch)
if self.shuffle:
batch_ids = torch.randperm(len(batches), generator=g).tolist()
batches = [batches[i] for i in batch_ids]
self.batches = batches
assert len(self.batches) * self.batch_size == self.num_samples
return iter(self.batches)
def _bisect(self, x, lo=0, hi=None):
if hi is None:
hi = len(self.boundaries) - 1
if hi > lo:
mid = (hi + lo) // 2
if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:
return mid
elif x <= self.boundaries[mid]:
return self._bisect(x, lo, mid)
else:
return self._bisect(x, mid + 1, hi)
else:
return -1
def __len__(self):
return self.num_samples // self.batch_size
# -----------------------------------------------------------------------------
def load_dataset(
config: TrainingConfig,
dataset_name: str,
metadata_dir: typing.Union[str, Path],
audio_dir: typing.Union[str, Path],
splits=("train", "val"),
speaker_id_map: typing.Optional[typing.Dict[str, int]] = None,
) -> DatasetInfo:
metadata_dir = Path(metadata_dir)
audio_dir = Path(audio_dir)
# Determine data paths
data_paths = defaultdict(dict)
for split in splits:
is_phonemes = False
csv_path = metadata_dir / f"{split}_ids.csv"
if not csv_path.is_file():
csv_path = metadata_dir / f"{split}_phonemes.csv"
is_phonemes = True
data_paths[split]["is_phonemes"] = is_phonemes
data_paths[split]["csv_path"] = csv_path
data_paths[split]["utt_ids"] = []
# train/val sets are required
for split in splits:
assert data_paths[split][
"csv_path"
].is_file(), (
f"Missing {split}_ids.csv or {split}_phonemes.csv in {metadata_dir}"
)
# Load utterances
phoneme_to_id = config.phonemes.phoneme_to_id
utt_phoneme_ids: typing.Dict[str, str] = {}
utt_speaker_ids: typing.Dict[str, int] = {}
for split in splits:
csv_path = data_paths[split]["csv_path"]
if not csv_path.is_file():
_LOGGER.debug("Skipping data for %s", split)
continue
is_phonemes = data_paths[split]["is_phonemes"]
utt_ids = data_paths[split]["utt_ids"]
with open(csv_path, "r", encoding="utf-8") as csv_file:
reader = csv.reader(csv_file, delimiter="|")
for row_idx, row in enumerate(reader):
assert len(row) > 1, f"{row} in {csv_path}:{row_idx+1}"
utt_id, phonemes_or_ids = row[0], row[-1]
if config.model.is_multispeaker:
if len(row) > 2:
speaker = row[1]
else:
speaker = dataset_name
if speaker not in speaker_id_map:
# Add to cross-datatset speaker id map
speaker_id_map[speaker] = len(speaker_id_map)
utt_speaker_ids[utt_id] = speaker_id_map[speaker]
if is_phonemes:
# Map phonemes with phonemes2ids
assert phoneme_to_id, "No phoneme to id map (missing phonemes.txt?)"
word_phonemes = config.phonemes.split_word_phonemes(phonemes_or_ids)
phoneme_ids = phonemes2ids(
word_phonemes=word_phonemes,
phoneme_to_id=phoneme_to_id,
pad=config.phonemes.pad,
bos=config.phonemes.bos,
eos=config.phonemes.eos,
blank=config.phonemes.blank,
blank_word=config.phonemes.blank_word,
blank_between=config.phonemes.blank_between,
blank_at_start=config.phonemes.blank_at_start,
blank_at_end=config.phonemes.blank_at_end,
simple_punctuation=config.phonemes.simple_punctuation,
punctuation_map=config.phonemes.punctuation_map,
separate=config.phonemes.separate,
separate_graphemes=config.phonemes.separate_graphemes,
separate_tones=config.phonemes.separate_tones,
tone_before=config.phonemes.tone_before,
)
else:
phoneme_ids = [int(p_id) for p_id in phonemes_or_ids.split()]
phoneme_ids = [
p_id
for p_id in phoneme_ids
if 0 <= p_id < config.model.num_symbols
]
if phoneme_ids:
utt_phoneme_ids[utt_id] = phoneme_ids
utt_ids.append(utt_id)
else:
_LOGGER.warning("No phoneme ids for %s (%s)", utt_id, csv_path)
_LOGGER.debug(
"Loaded %s utterance(s) for %s from %s", len(utt_ids), split, csv_path
)
# Filter utterances based on min/max settings in config
_LOGGER.debug("Filtering data")
drop_utt_ids: typing.Set[str] = set()
num_phonemes_too_small = 0
num_phonemes_too_large = 0
num_audio_missing = 0
num_spec_too_small = 0
num_spec_too_large = 0
for utt_id, phoneme_ids in utt_phoneme_ids.items():
# Check | |
<filename>kalite_zim/management/commands/export2zim.py
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import json
import os
import shutil
import subprocess
import sys
import tempfile
from datetime import datetime
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from iso639 import languages as iso_languages
from kalite.topic_tools import settings as topic_tools_settings, \
get_content_cache, get_exercise_cache
from kalite.settings.base import CONTENT_ROOT
from kalite import i18n
from kalite_zim.utils import download_video, logger
from fle_utils.general import softload_json
from submarine.parser import parser as submarine_parser
from kalite_zim.anythumbnailer.thumbnail_ import create_thumbnail
from distutils.spawn import find_executable
YOUTUBE_URL = "https://www.youtube.com/watch?v={id}"
COPIED_FILES = {}
def compressor_init(input_dir):
settings.COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
settings.STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
settings.COMPRESS_ROOT = input_dir
settings.COMPRESS_OUTPUT_DIR = ''
settings.COMPRESS_CSS_FILTERS = []
def soft_link(src, dst):
""" create a link if filesystem supports it otherwise copy """
return os.symlink(src, dst)
def hard_link(src, dst):
try:
os.link(src, dst)
except OSError as exp:
# Operation not supported (filesystem cannot hard link?) (45) or Link from one device to another (cross-device link) is invalid (18)
if exp.errno == 45 or exp.errno == 18:
shutil.copyfile(src, dst)
else:
raise #Raise last exception we have not catch
def copy_file(src, dst):
if src not in COPIED_FILES.keys():
# copy it first properly
hard_link(src, dst)
COPIED_FILES[src] = dst
else:
# we already have a hard copy, let's symlink to it
soft_link(COPIED_FILES.get(src), dst)
class Command(BaseCommand):
args = ('zimfile')
help = 'Export video and meta data of your KA Lite installation to OpenZim' # @ReservedAssignment
option_list = BaseCommand.option_list + (
make_option(
'--language', '-l',
action='store',
dest='language',
default='en',
help='Select which language (videos and meta data) to export'
),
make_option(
'--tmp-dir', '-t',
action='store',
dest='tmp_dir',
default='',
help='Directory for the temporary zim filesystem'
),
make_option(
'--test',
action='store_true',
dest='test',
help='Use test data'
),
make_option(
'--clear', '-c',
action='store_true',
dest='clear',
default=False,
help='Force clearing temporary fs and output destinations before write'
),
make_option(
'--resume', '-r',
action='store_true',
dest='resume',
default=False,
help='Resume writing into a dirty tmp-dir'
),
make_option(
'--download', '-d',
action='store_true',
dest='download',
default=False,
help='Instead of skipping videos that are not available, download them to KA Lite.'
),
make_option(
'--zimwriterfs', '-z',
action='store',
dest='zimwriterfs',
default=None,
help="Path to zimwriterfs if it's not on the shell path"
),
make_option(
'--publisher', '-p',
action='store',
dest='publisher',
default="Learning Equality",
help="Name of publisher"
),
make_option(
'--transcode2webm',
action='store_true',
dest='transcode2webm',
default=False,
help="Name of publisher"
),
)
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("Takes exactly 1 argument")
dest_file = os.path.abspath(args[0])
logger.info("Starting up KA Lite export2zim command")
beginning = datetime.now()
logger.info("Begin: {}".format(beginning))
language = options.get('language')
if not language:
raise CommandError("Must specify a language!")
if not options.get('tmp_dir'):
tmp_dir = os.path.join(tempfile.gettempdir(), 'ka-lite-zim_{}'.format(language))
else:
tmp_dir = options.get('tmp_dir')
tmp_dir = os.path.abspath(tmp_dir)
if os.path.exists(tmp_dir) and os.listdir(tmp_dir):
if options['clear']:
logger.info("Clearing directory {}".format(tmp_dir))
shutil.rmtree(tmp_dir)
elif options['resume']:
logger.info("Resuming in dirty tmp directory {}".format(tmp_dir))
else:
raise CommandError(
"{} not empty, use the -c option to clean it, -r to resume, or use an empty destination directory.".format(
tmp_dir
)
)
zimwriterfs = options.get("zimwriterfs", None)
publisher = options.get("publisher")
transcode2webm = options.get("transcode2webm")
ffmpeg = find_executable("ffmpeg")
if not ffmpeg:
logger.warning("FFMpeg not found in your path, you won't be able to create missing thumbnails or transcode to webm.")
if not zimwriterfs:
zimwriterfs = find_executable("zimwriterfs")
if not zimwriterfs:
raise CommandError("Could not find zimwriterfs in your path, try specifying --zimwriterfs=/path")
if not os.path.exists(zimwriterfs):
raise CommandError("Invalid --zimwriterfs")
from kalite_zim import __file__ as base_path
base_path = os.path.abspath(os.path.dirname(base_path))
data_path = os.path.join(base_path, 'data')
# Where subtitles are found in KA Lite
subtitle_src_dir = i18n.get_srt_path(language)
logger.info("Will export videos for language: {}".format(language))
logger.info("Preparing KA Lite topic tree...")
# Use live data
if not options.get('test'):
# This way of doing things will be deprecated in KA Lite 0.16
topic_tree_json_path = topic_tools_settings.TOPICS_FILEPATHS.get('khan')
content_cache = get_content_cache(language=language, annotate=True)
exercise_cache = get_exercise_cache(language=language)
# Use test data
else:
topic_tree_json_path = os.path.join(data_path, 'test_topics.json')
content_cache = json.load(
open(os.path.join(data_path, 'test_content.json'))
)
exercise_cache = json.load(
open(os.path.join(data_path, 'test_exercise.json'))
)
topic_tree = softload_json(topic_tree_json_path, logger=logger.debug, raises=False)
content_json_output = {}
exercise_json_output = {}
def annotate_tree(topic, depth=0, parent=None):
"""
We need to recurse into the tree in order to annotate elements
with topic data and exercise data
"""
children = topic.get('children', [])
new_children = []
for child_topic in children:
if child_topic.get("kind") in ("Video", "Topic"):
annotate_tree(child_topic, depth=depth + 1, parent=topic)
new_children.append(child_topic)
topic["children"] = new_children
if topic.get("kind") == "Exercise":
topic['exercise'] = exercise_cache.get(topic.get("id"), {})
exercise_json_output[topic.get("id")] = topic['exercise']
elif topic.get("kind") == "Topic":
pass
else:
topic['exercise'] = None
topic['content'] = content_cache.get(topic.get("id"), {})
content_json_output[topic.get("id")] = topic['content']
if not topic['content']:
logger.error('No content!?, id is: {}'.format(topic.get('id')))
# Translate everything for good measure
with i18n.translate_block(language):
topic["title"] = _(topic.get("title", ""))
topic["description"] = _(topic.get("description", "")) if topic.get("description") else ""
topic["url"] = topic["id"] + ".html"
topic["parent"] = parent
topic["depth"] = depth
for key in ("child_data", "keywords", "hide", "contains"):
topic.pop(key, None)
# 1. Annotate a topic tree
annotate_tree(topic_tree)
# 2. Now go through the tree and copy each element into the destination
# zim file system
def copy_media(node):
if node['kind'] == 'Topic':
# Don't do anything if it's a topic
pass
elif node['kind'] == 'Exercise':
# Exercises cannot be displayed
node["content"]["available"] = False
elif node['kind'] == 'Video':
if node['content']['format'] == "webm":
logger.warning("Found a duplicate ID for {}, re-downloading".format(node['id']))
node['content']['format'] = "mp4"
# Available is False by default until we locate the file
node["content"]["available"] = False
subtitle_dir = os.path.join(tmp_dir, "subtitle")
if not os.path.exists(subtitle_dir):
os.makedirs(subtitle_dir)
videos_dir = os.path.join(tmp_dir, "videos")
if not os.path.exists(videos_dir):
os.makedirs(videos_dir)
if transcode2webm:
video_file_name = node['id'] + '.' + 'webm'
else:
video_file_name = node['id'] + '.' + node['content']['format']
video_file_src = os.path.join(CONTENT_ROOT, video_file_name)
video_file_dest = os.path.join(videos_dir, video_file_name)
thumb_dir = os.path.join(tmp_dir, "thumb")
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
thumb_file_name = node['id'] + '.png'
thumb_file_src = os.path.join(CONTENT_ROOT, thumb_file_name)
thumb_file_dest = os.path.join(thumb_dir, thumb_file_name)
if options['download'] and not os.path.exists(video_file_src):
logger.info("Video file being downloaded to: {}".format(video_file_src))
if transcode2webm:
yt_video_url = YOUTUBE_URL.format(
id=node['content']['youtube_id'])
cmd = ['youtube-dl', '-o', video_file_src,
'-f', 'webm/mp4',
'--recode-video', 'webm', '-k',
yt_video_url]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout_data, _stderr_data = process.communicate()
if process.returncode != 0:
logger.error("Error invoking ffmpeg: {}".format((_stderr_data or "") + (stdout_data or "")))
logger.error("Command was: {}".format(" ".join(cmd)))
raise CommandError("Could not complete transcoding")
node['content']['format'] = "webm"
else:
download_video(
node['content']['youtube_id'],
node['content']['format'],
CONTENT_ROOT,
)
if os.path.exists(video_file_src):
if not os.path.exists(video_file_dest):
copy_file(video_file_src, video_file_dest)
node["video_url"] = os.path.join(
"videos",
video_file_name
)
copy_media.videos_found += 1
logger.info("Videos processed: {}".format(copy_media.videos_found))
node["content"]["available"] = True
# Create thumbnail if it wasn't downloaded
if not os.path.exists(thumb_file_src):
fp = create_thumbnail(video_file_src, output_format="png")
if fp is None:
logger.error("Failed to create thumbnail for {}".format(video_file_src))
else:
logger.info("Successfully created thumbnail for {}".format(video_file_src))
file(thumb_file_src, 'wb').write(fp.read())
# Handle thumbnail
if os.path.exists(thumb_file_src):
node["thumbnail_url"] = os.path.join(
"thumb",
node['id'] + '.png'
)
if not os.path.exists(thumb_file_dest):
copy_file(thumb_file_src, thumb_file_dest)
else:
node["thumbnail_url"] = None
subtitle_srt = os.path.join(
subtitle_src_dir,
node['id'] + '.srt'
)
if os.path.isfile(subtitle_srt):
subtitle_vtt = os.path.join(
subtitle_dir,
node['id'] + '.vtt'
)
# Convert to .vtt because this format is understood
# by latest video.js and the old ones that read
# .srt don't work with newer jquery etc.
submarine_parser(subtitle_srt, subtitle_vtt)
if not os.path.exists(subtitle_vtt):
logger.warning("Subtitle not converted: {}".format(subtitle_srt))
else:
logger.info("Subtitle convert from SRT to VTT: {}".format(subtitle_vtt))
node["subtitle_url"] = os.path.join(
"subtitle",
node['id'] + '.vtt'
)
else:
if options['download']:
logger.error("File not found or downloaded: {}".format(video_file_src))
else:
logger.error("Invalid node, kind: {}".format(node.get("kind", None)))
# Exercises cannot be displayed
node["content"] = {"available": False}
new_children = []
for child in node.get('children', []):
copy_media(child)
empty_topic = child["kind"] == "Topic" and not child.get("children", [])
unavailable_video = child["kind"] == "Video" and not child.get("content", {}).get("available", False)
if not (empty_topic or unavailable_video):
new_children.append(child)
node['children'] = new_children
copy_media.videos_found = 0
def render_topic_pages(node):
parents = [node] if node.get("children") else []
parent = node["parent"]
while parent:
parents.append(parent)
parent = parent["parent"]
# Finally, render templates into the destination
template_context = {
"topic_tree": topic_tree,
"topic": node,
"parents": parents
}
with i18n.translate_block(language):
topic_html = render_to_string("kalite_zim/topic.html", template_context)
# Replace absolute references to '/static' with relative
topic_html = topic_html.replace("/static", "static")
dest_html = os.path.join(tmp_dir, node["id"] + ".html")
logger.info("Rendering {}".format(dest_html))
open(dest_html, "w").write(topic_html)
render_topic_pages.pages_rendered += 1
for child in node.get('children', []):
render_topic_pages(child)
render_topic_pages.pages_rendered = 0
logger.info("Hard linking video files from KA Lite...")
copy_media(topic_tree)
sys.stderr.write("\n")
logger.info("Done!")
# Configure django-compressor
compressor_init(os.path.join(base_path, 'static'))
# Finally, render templates into the destination
template_context = {
"topic_tree": topic_tree,
"welcome": True,
}
with i18n.translate_block(language):
welcome_html = render_to_string("kalite_zim/welcome.html", template_context)
about_html = render_to_string("kalite_zim/about.html", template_context)
# Replace absolute references to '/static' with relative
welcome_html = welcome_html.replace("/static", "static")
about_html = about_html.replace("/static", "static")
# Write the | |
"Type": "STRING"
}
]
}
},
"networkphysicaltable": {
"RelationalTable": {
"DataSourceArn": dataARN,
"Schema": "patch_compliance",
"Name": AWS_NETWORK_TABLE,
"InputColumns": [
{
"Name": "accountid",
"Type": "STRING"
},
{
"Name": "resourceid",
"Type": "STRING"
},
{
"Name": "ipv4",
"Type": "STRING"
},
{
"Name": "ipv6",
"Type": "STRING"
},
{
"Name": "dnsserver",
"Type": "STRING"
},
{
"Name": "name",
"Type": "STRING"
},
{
"Name": "dhcpserver",
"Type": "STRING"
},
{
"Name": "macaddress",
"Type": "STRING"
},
{
"Name": "region",
"Type": "STRING"
},
{
"Name": "subnetmask",
"Type": "STRING"
},
{
"Name": "resourcetype",
"Type": "STRING"
},
{
"Name": "gateway",
"Type": "STRING"
}
]
}
},
"configpcphysicaltable": {
"RelationalTable": {
"DataSourceArn": dataARN,
"Schema": "patch_compliance",
"Name": CONFIGPC_TABLE,
"InputColumns": [
{
"Name": "configpcinstanceid",
"Type": "STRING"
},
{
"Name": "configpcstatus",
"Type": "STRING"
},
{
"Name": "configpcdate",
"Type": "STRING"
}
]
}
},
"instancedetailedinformationphysicaltable": {
"RelationalTable": {
"DataSourceArn": dataARN,
"Schema": "patch_compliance",
"Name": AWS_INSTANCE_DETAILED_INFORMATION_TABLE,
"InputColumns": [
{
"Name": "resourceid",
"Type": "STRING"
},
{
"Name": "cpusockets",
"Type": "STRING"
},
{
"Name": "cpus",
"Type": "STRING"
},
{
"Name": "cpuhyperthreadenabled",
"Type": "STRING"
},
{
"Name": "cpumodel",
"Type": "STRING"
},
{
"Name": "schemaversion",
"Type": "STRING"
},
{
"Name": "resourcetype",
"Type": "STRING"
},
{
"Name": "cpucores",
"Type": "STRING"
},
{
"Name": "accountid",
"Type": "STRING"
},
{
"Name": "osservicepack",
"Type": "STRING"
},
{
"Name": "cpuspeedmhz",
"Type": "STRING"
},
{
"Name": "capturetime",
"Type": "STRING"
},
{
"Name": "region",
"Type": "STRING"
}
]
}
},
"patchsummaryphysicaltable": {
"RelationalTable": {
"DataSourceArn": dataARN,
"Schema": "patch_compliance",
"Name": AWS_PATCH_SUMMARY_TABLE,
"InputColumns": [
{
"Name": "installedothercount",
"Type": "STRING"
},
{
"Name": "resourceid",
"Type": "STRING"
},
{
"Name": "snapshotid",
"Type": "STRING"
},
{
"Name": "rebootoption",
"Type": "STRING"
},
{
"Name": "notapplicablecount",
"Type": "STRING"
},
{
"Name": "operationtype",
"Type": "STRING"
},
{
"Name": "schemaversion",
"Type": "STRING"
},
{
"Name": "resourcetype",
"Type": "STRING"
},
{
"Name": "baselineid",
"Type": "STRING"
},
{
"Name": "patchgroup",
"Type": "STRING"
},
{
"Name": "installedcount",
"Type": "STRING"
},
{
"Name": "accountid",
"Type": "STRING"
},
{
"Name": "installedpendingrebootcount",
"Type": "STRING"
},
{
"Name": "installedrejectedcount",
"Type": "STRING"
},
{
"Name": "missingcount",
"Type": "STRING"
},
{
"Name": "failedcount",
"Type": "STRING"
},
{
"Name": "capturetime",
"Type": "STRING"
},
{
"Name": "operationendtime",
"Type": "STRING"
},
{
"Name": "region",
"Type": "STRING"
},
{
"Name": "operationstarttime",
"Type": "STRING"
}
]
}
}
},
LogicalTableMap={
"configpcjoinlogicaltable": {
"Alias": "configpcjoin",
"DataTransforms": [
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "patchsummaryoperationenddatetime",
"ColumnId": "patchsummaryoperationenddatetime",
"Expression": "parseDate(replace(substring({patchsummaryoperationendtime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss')"
}
]
}
},
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "patchsummaryoperationstartdatetime",
"ColumnId": "patchsummaryoperationstartdatetime",
"Expression": "parseDate(replace(substring({patchsummaryoperationstarttime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss')"
}
]
}
},
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "patchsummarycapturedatetime",
"ColumnId": "patchsummarycapturedatetime",
"Expression": "parseDate(replace(substring({patchsummarycapturetime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss')"
}
]
}
},
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "instancedetailedcapturedatetime",
"ColumnId": "instancedetailedcapturedatetime",
"Expression": "parseDate(replace(substring({instancedetailedcapturetime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss')"
}
]
}
},
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "instanceinformationcapturedatetime",
"ColumnId": "instanceinformationcapturedatetime",
"Expression": "parseDate(replace(substring({instanceinformationcapturetime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss')"
}
]
}
},
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "patchexecutiondatetime",
"ColumnId": "patchexecutiondatetime",
"Expression": "parseDate(replace(substring({patchexecutiontime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss')"
}
]
}
},
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "patchcapturedatetime",
"ColumnId": "patchcapturedatetime",
"Expression": "parseDate(replace(substring({patchcapturetime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss')"
}
]
}
},
{
"CreateColumnsOperation": {
"Columns": [
{
"ColumnName": "patchinstalleddatetime",
"ColumnId": "patchinstalleddatetime",
"Expression": "ifelse(strlen({patchinstalledtime})>0,parseDate(replace(substring({patchinstalledtime},1,19),'T',\" \"),'yyyy-MM-dd HH:mm:ss'),NULL)"
}
]
}
},
{
"ProjectOperation": {
"ProjectedColumns": [
"configmicdate",
"configmicaccountid",
"configmicregion",
"configmicinstanceid",
"configmicname",
"configmicstate",
"configmiccompliancestatus",
"patchsummarybaselineid",
"patchsummarymissingcount",
"patchsummaryinstalledothercount",
"patchsummaryinstalledpendingrebootcount",
"patchsummarysnapshotid",
"patchsummarynotapplicablecount",
"patchsummaryrebootoption",
"patchsummaryfailedcount",
"patchsummaryoperationtype",
"patchsummaryinstalledcount",
"patchsummaryinstalledrejectedcount",
"patchsummarypatchgroup",
"patchsummaryresourceid",
"networkresourceid",
"networkname",
"networksubnetmask",
"networkgateway",
"networkdhcpserver",
"networkdnsserver",
"networkmacaddress",
"networkipv4",
"networkipv6",
"instancedetailedcpus",
"instancedetailedosservicepack",
"instancedetailedcpuhyperthreadenabled",
"instancedetailedcpuspeedmhz",
"instancedetailedcpusockets",
"instancedetailedcpucores",
"instancedetailedcpumodel",
"instancedetailedresourceid",
"instanceinformationplatformname",
"instanceinformationplatformversion",
"instanceinformationagenttype",
"instanceinformationagentversion",
"instanceinformationinstanceid",
"instanceinformationinstancestatus",
"instanceinformationcomputername",
"instanceinformationipaddress",
"instanceinformationplatformtype",
"instanceinformationresourceid",
"patchstatus",
"patchexecutiontype",
"patchpatchseverity",
"patchtitle",
"patchseverity",
"patchcompliancetype",
"patchclassification",
"patchdocumentversion",
"patchid",
"patchpatchstate",
"patchpatchbaselineid",
"patchdocumentname",
"patchpatchgroup",
"patchexecutionid",
"patchresourceid",
"configpcdate",
"configpcinstanceid",
"configpcstatus",
"patchsummaryoperationenddatetime",
"patchsummaryoperationstartdatetime",
"patchsummarycapturedatetime",
"instancedetailedcapturedatetime",
"instanceinformationcapturedatetime",
"patchexecutiondatetime",
"patchcapturedatetime",
"patchinstalleddatetime"
]
}
}
],
"Source": {
"JoinInstruction": {
"LeftOperand": "patchjoinlogicaltable",
"RightOperand": "configpclogicaltable",
"Type": "LEFT",
"OnClause": "{configmicinstanceid} = {configpcinstanceid}"
}
}
},
"instancedetailedjoinlogicaltable": {
"Alias": "instancedetailedjoin",
"Source": {
"JoinInstruction": {
"LeftOperand": "networkjoinlogicaltable",
"RightOperand": "instancedetailedinformationlogicaltable",
"Type": "LEFT",
"OnClause": "{configmicinstanceid} = {instancedetailedresourceid}"
}
}
},
"patchsummaryjoinlogicaltable": {
"Alias": "patchsummaryjoin",
"Source": {
"JoinInstruction": {
"LeftOperand": "configmiclogicaltable",
"RightOperand": "patchsummarylogicaltable",
"Type": "LEFT",
"OnClause": "{configmicinstanceid} = {patchsummaryresourceid}"
}
}
},
"patchjoinlogicaltable": {
"Alias": "patchjoin",
"Source": {
"JoinInstruction": {
"LeftOperand": "instanceinformationjoinlogicaltable",
"RightOperand": "patchlogicaltable",
"Type": "LEFT",
"OnClause": "{configmicinstanceid} = {patchresourceid}"
}
}
},
"networkjoinlogicaltable": {
"Alias": "networkjoin",
"Source": {
"JoinInstruction": {
"LeftOperand": "patchsummaryjoinlogicaltable",
"RightOperand": "networklogicaltable",
"Type": "LEFT",
"OnClause": "{configmicinstanceid} = {networkresourceid}"
}
}
},
"configmiclogicaltable": {
"Alias": "configmic",
"DataTransforms": [
{
"CastColumnTypeOperation": {
"ColumnName": "configmicdate",
"NewColumnType": "DATETIME",
"Format": "yyyy-MM-dd"
}
}
],
"Source": {
"PhysicalTableId": "configmicphysicaltable"
}
},
"configpclogicaltable": {
"Alias": "configpc",
"Source": {
"PhysicalTableId": "configpcphysicaltable"
}
},
"instanceinformationjoinlogicaltable": {
"Alias": "instanceinformationjoin",
"Source": {
"JoinInstruction": {
"LeftOperand": "instancedetailedjoinlogicaltable",
"RightOperand": "instanceinformationlogicaltable",
"Type": "LEFT",
"OnClause": "{configmicinstanceid} = {instanceinformationinstanceid}"
}
}
},
"instancedetailedinformationlogicaltable": {
"Alias": "awsinstancedetailedinformation",
"DataTransforms": [
{
"RenameColumnOperation": {
"ColumnName": "resourcetype",
"NewColumnName": "instancedetailedresourcetype"
}
},
{
"RenameColumnOperation": {
"ColumnName": "region",
"NewColumnName": "instancedetailedregion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "accountid",
"NewColumnName": "instancedetailedaccountid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "schemaversion",
"NewColumnName": "instancedetailedschemaversion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "capturetime",
"NewColumnName": "instancedetailedcapturetime"
}
},
{
"RenameColumnOperation": {
"ColumnName": "resourceid",
"NewColumnName": "instancedetailedresourceid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "cpumodel",
"NewColumnName": "instancedetailedcpumodel"
}
},
{
"RenameColumnOperation": {
"ColumnName": "cpucores",
"NewColumnName": "instancedetailedcpucores"
}
},
{
"RenameColumnOperation": {
"ColumnName": "cpusockets",
"NewColumnName": "instancedetailedcpusockets"
}
},
{
"RenameColumnOperation": {
"ColumnName": "cpuspeedmhz",
"NewColumnName": "instancedetailedcpuspeedmhz"
}
},
{
"RenameColumnOperation": {
"ColumnName": "cpuhyperthreadenabled",
"NewColumnName": "instancedetailedcpuhyperthreadenabled"
}
},
{
"RenameColumnOperation": {
"ColumnName": "osservicepack",
"NewColumnName": "instancedetailedosservicepack"
}
},
{
"RenameColumnOperation": {
"ColumnName": "cpus",
"NewColumnName": "instancedetailedcpus"
}
},
{
"CastColumnTypeOperation": {
"ColumnName": "instancedetailedcpus",
"NewColumnType": "INTEGER"
}
},
{
"CastColumnTypeOperation": {
"ColumnName": "instancedetailedcpusockets",
"NewColumnType": "INTEGER"
}
},
{
"CastColumnTypeOperation": {
"ColumnName": "instancedetailedcpucores",
"NewColumnType": "INTEGER"
}
}
],
"Source": {
"PhysicalTableId": "instancedetailedinformationphysicaltable"
}
},
"instanceinformationlogicaltable": {
"Alias": "awsinstanceinformation",
"DataTransforms": [
{
"RenameColumnOperation": {
"ColumnName": "resourcetype",
"NewColumnName": "instanceinformationresourcetype"
}
},
{
"RenameColumnOperation": {
"ColumnName": "region",
"NewColumnName": "instanceinformationregion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "accountid",
"NewColumnName": "instanceinformationaccountid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "schemaversion",
"NewColumnName": "instanceinformationschemaversion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "capturetime",
"NewColumnName": "instanceinformationcapturetime"
}
},
{
"RenameColumnOperation": {
"ColumnName": "resourceid",
"NewColumnName": "instanceinformationresourceid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "platformtype",
"NewColumnName": "instanceinformationplatformtype"
}
},
{
"RenameColumnOperation": {
"ColumnName": "ipaddress",
"NewColumnName": "instanceinformationipaddress"
}
},
{
"RenameColumnOperation": {
"ColumnName": "computername",
"NewColumnName": "instanceinformationcomputername"
}
},
{
"RenameColumnOperation": {
"ColumnName": "instancestatus",
"NewColumnName": "instanceinformationinstancestatus"
}
},
{
"RenameColumnOperation": {
"ColumnName": "instanceid",
"NewColumnName": "instanceinformationinstanceid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "agentversion",
"NewColumnName": "instanceinformationagentversion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "agenttype",
"NewColumnName": "instanceinformationagenttype"
}
},
{
"RenameColumnOperation": {
"ColumnName": "platformversion",
"NewColumnName": "instanceinformationplatformversion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "platformname",
"NewColumnName": "instanceinformationplatformname"
}
}
],
"Source": {
"PhysicalTableId": "instanceinformationphysicaltable"
}
},
"networklogicaltable": {
"Alias": "awsnetwork",
"DataTransforms": [
{
"RenameColumnOperation": {
"ColumnName": "resourcetype",
"NewColumnName": "networkresourcetype"
}
},
{
"RenameColumnOperation": {
"ColumnName": "region",
"NewColumnName": "networkregion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "accountid",
"NewColumnName": "networkaccountid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "ipv6",
"NewColumnName": "networkipv6"
}
},
{
"RenameColumnOperation": {
"ColumnName": "ipv4",
"NewColumnName": "networkipv4"
}
},
{
"RenameColumnOperation": {
"ColumnName": "macaddress",
"NewColumnName": "networkmacaddress"
}
},
{
"RenameColumnOperation": {
"ColumnName": "dnsserver",
"NewColumnName": "networkdnsserver"
}
},
{
"RenameColumnOperation": {
"ColumnName": "dhcpserver",
"NewColumnName": "networkdhcpserver"
}
},
{
"RenameColumnOperation": {
"ColumnName": "gateway",
"NewColumnName": "networkgateway"
}
},
{
"RenameColumnOperation": {
"ColumnName": "subnetmask",
"NewColumnName": "networksubnetmask"
}
},
{
"RenameColumnOperation": {
"ColumnName": "name",
"NewColumnName": "networkname"
}
},
{
"RenameColumnOperation": {
"ColumnName": "resourceid",
"NewColumnName": "networkresourceid"
}
}
],
"Source": {
"PhysicalTableId": "networkphysicaltable"
}
},
"patchlogicaltable": {
"Alias": "patch",
"Source": {
"PhysicalTableId": "patchphysicaltable"
}
},
"patchsummarylogicaltable": {
"Alias": "awspatchsummary",
"DataTransforms": [
{
"RenameColumnOperation": {
"ColumnName": "resourcetype",
"NewColumnName": "patchsummaryresourcetype"
}
},
{
"RenameColumnOperation": {
"ColumnName": "region",
"NewColumnName": "patchsummaryregion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "accountid",
"NewColumnName": "patchsummaryaccountid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "schemaversion",
"NewColumnName": "patchsummaryschemaversion"
}
},
{
"RenameColumnOperation": {
"ColumnName": "capturetime",
"NewColumnName": "patchsummarycapturetime"
}
},
{
"RenameColumnOperation": {
"ColumnName": "resourceid",
"NewColumnName": "patchsummaryresourceid"
}
},
{
"RenameColumnOperation": {
"ColumnName": "patchgroup",
"NewColumnName": "patchsummarypatchgroup"
}
},
{
"RenameColumnOperation": {
"ColumnName": "installedrejectedcount",
"NewColumnName": "patchsummaryinstalledrejectedcount"
}
},
{
"RenameColumnOperation": {
"ColumnName": "installedcount",
"NewColumnName": "patchsummaryinstalledcount"
}
},
{
"RenameColumnOperation": {
"ColumnName": "operationtype",
"NewColumnName": "patchsummaryoperationtype"
}
},
{
"RenameColumnOperation": {
"ColumnName": | |
for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# This code block is copied from numpy.matrix_power().
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('input must be a square array')
# Get the number of rows and columns.
n, n = A.shape
# Triangularize the matrix if necessary,
# attempting to preserve dtype if possible.
if np.array_equal(A, np.triu(A)):
Z = None
T = A
else:
if np.isrealobj(A):
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
# Zeros on the diagonal of the triangular matrix are forbidden,
# because the inverse scaling and squaring cannot deal with it.
T_diag = np.diag(T)
if _count_nonzero(T_diag) != n:
raise FractionalMatrixPowerError(
'cannot use inverse scaling and squaring to find '
'the fractional matrix power of a singular matrix')
# If the triangular matrix is real and has a negative
# entry on the diagonal, then force the matrix to be complex.
if np.isrealobj(T) and np.min(T_diag) < 0:
T = T.astype(complex)
# Get the fractional power of the triangular matrix,
# and de-triangularize it if necessary.
U = _remainder_matrix_power_triu(T, t)
if Z is not None:
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
else:
return U
def fractional_matrix_power(A, p):
"""
Compute the fractional power of a matrix.
Proceeds according to the discussion in section (6) of [1]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
p : float
Fractional power.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] <NAME> and <NAME> (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
if p == int(p):
return np.linalg.matrix_power(A, int(p))
# Compute singular values.
s = svdvals(A)
# Inverse scaling and squaring cannot deal with a singular matrix,
# because the process of repeatedly taking square roots
# would not converge to the identity matrix.
if s[-1]:
# Compute the condition number relative to matrix inversion,
# and use this to decide between floor(p) and ceil(p).
k2 = s[0] / s[-1]
p1 = p - np.floor(p)
p2 = p - np.ceil(p)
if p1 * k2 ** (1 - p1) <= -p2 * k2:
a = int(np.floor(p))
b = p1
else:
a = int(np.ceil(p))
b = p2
try:
R = _remainder_matrix_power(A, b)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
except np.linalg.LinAlgError as e:
pass
# If p is negative then we are going to give up.
# If p is non-negative then we can fall back to generic funm.
if p < 0:
X = np.empty_like(A)
X.fill(np.nan)
return X
else:
p1 = p - np.floor(p)
a = int(np.floor(p))
b = p1
R, info = funm(A, lambda x: pow(x, b), disp=False)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
def _logm_triu(T):
"""
Compute matrix logarithm of an upper triangular matrix.
The matrix logarithm is the inverse of
expm: expm(logm(`T`)) == `T`
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `T`
References
----------
.. [1] <NAME> and <NAME> (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] <NAME> (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] <NAME> and <NAME> (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
T = np.asarray(T)
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T.shape
# Construct T0 with the appropriate type,
# depending on the dtype and the spectrum of T.
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0
if keep_it_real:
T0 = T
else:
T0 = T.astype(complex)
# Define bounds given in Table (2.1).
theta = (None,
1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
R, s, m = _inverse_squaring_helper(T0, theta)
# Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
# This requires the nodes and weights
# corresponding to degree-m Gauss-Legendre quadrature.
# These quadrature arrays need to be transformed from the [-1, 1] interval
# to the [0, 1] interval.
nodes, weights = scipy.special.p_roots(m)
nodes = nodes.real
if nodes.shape != (m,) or weights.shape != (m,):
raise Exception('internal error')
nodes = 0.5 + 0.5 * nodes
weights = 0.5 * weights
ident = np.identity(n)
U = np.zeros_like(R)
for alpha, beta in zip(weights, nodes):
U += solve_triangular(ident + beta*R, alpha*R)
U *= np.exp2(s)
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
# Recompute diagonal entries of U.
U[np.diag_indices(n)] = np.log(np.diag(T0))
# Recompute superdiagonal entries of U.
# This indexing of this code should be renovated
# when newer np.diagonal() becomes available.
for i in range(n-1):
l1 = T0[i, i]
l2 = T0[i+1, i+1]
t12 = T0[i, i+1]
U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
# Return the logm of the upper triangular matrix.
if not np.array_equal(U, np.triu(U)):
raise Exception('internal inconsistency')
return U
def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
# The input matrix should be upper triangular.
# The eps is ad hoc and is not meant to be machine precision.
tri_eps = 1e-20
abs_diag = np.absolute(np.diag(T))
if np.any(abs_diag == 0):
exact_singularity_msg = 'The logm input matrix is exactly singular.'
warnings.warn(exact_singularity_msg, LogmExactlySingularWarning)
if not inplace:
T = T.copy()
n = T.shape[0]
for i in range(n):
if not T[i, i]:
T[i, i] = tri_eps
elif np.any(abs_diag < tri_eps):
near_singularity_msg = 'The logm input matrix may be nearly singular.'
warnings.warn(near_singularity_msg, LogmNearlySingularWarning)
return T
def logm(A):
"""
Compute matrix logarithm.
The matrix logarithm is the inverse of
expm: expm(logm(`A`)) == `A`
Parameters
----------
A : (N, N) array_like
Matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `A`
References
----------
.. [1] <NAME> and <NAME> (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] <NAME> (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] <NAME> and <NAME> (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# In this function we look at triangular matrices that are similar
# to the input matrix. If any diagonal entry of such a triangular matrix
# is exactly zero then the original matrix is singular.
# The matrix logarithm does not exist for such matrices,
# but in such cases we will pretend that the diagonal entries that are zero
# are actually slightly positive by an ad-hoc amount, in the interest
# of returning something more useful than NaN. This will cause a warning.
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
n = A.shape[0]
# If the input matrix dtype is integer then copy to a float dtype matrix.
if issubclass(A.dtype.type, np.integer):
A = np.asarray(A, dtype=float)
keep_it_real = np.isrealobj(A)
try:
if np.array_equal(A, np.triu(A)):
A = _logm_force_nonsingular_triangular_matrix(A)
if np.min(np.diag(A)) < 0:
A = A.astype(complex)
return _logm_triu(A)
else:
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T,Z)
else:
T, Z = schur(A, output='complex')
T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
U = _logm_triu(T)
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
| |
0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'relative humidity fraction 20',
{'name': u'Relative Humidity Fraction 20',
'pyname': u'relative_humidity_fraction_20',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'water vapor diffusion resistance factor 20',
{'name': u'Water Vapor Diffusion Resistance Factor 20',
'pyname': u'water_vapor_diffusion_resistance_factor_20',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'relative humidity fraction 21',
{'name': u'Relative Humidity Fraction 21',
'pyname': u'relative_humidity_fraction_21',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'water vapor diffusion resistance factor 21',
{'name': u'Water Vapor Diffusion Resistance Factor 21',
'pyname': u'water_vapor_diffusion_resistance_factor_21',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'relative humidity fraction 22',
{'name': u'Relative Humidity Fraction 22',
'pyname': u'relative_humidity_fraction_22',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'water vapor diffusion resistance factor 22',
{'name': u'Water Vapor Diffusion Resistance Factor 22',
'pyname': u'water_vapor_diffusion_resistance_factor_22',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'relative humidity fraction 23',
{'name': u'Relative Humidity Fraction 23',
'pyname': u'relative_humidity_fraction_23',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'water vapor diffusion resistance factor 23',
{'name': u'Water Vapor Diffusion Resistance Factor 23',
'pyname': u'water_vapor_diffusion_resistance_factor_23',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'relative humidity fraction 24',
{'name': u'Relative Humidity Fraction 24',
'pyname': u'relative_humidity_fraction_24',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'water vapor diffusion resistance factor 24',
{'name': u'Water Vapor Diffusion Resistance Factor 24',
'pyname': u'water_vapor_diffusion_resistance_factor_24',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'relative humidity fraction 25',
{'name': u'Relative Humidity Fraction 25',
'pyname': u'relative_humidity_fraction_25',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'}),
(u'water vapor diffusion resistance factor 25',
{'name': u'Water Vapor Diffusion Resistance Factor 25',
'pyname': u'water_vapor_diffusion_resistance_factor_25',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': 'real',
'unit': u'dimensionless'})]),
'format': None,
'group': u'Surface Construction Elements',
'min-fields': 0,
'name': u'MaterialProperty:HeatAndMoistureTransfer:Diffusion',
'pyname': u'MaterialPropertyHeatAndMoistureTransferDiffusion',
'required-object': False,
'unique-object': False}
@property
def material_name(self):
"""field `Material Name`
| Moisture Material Name that the moisture properties will be added to.
Args:
value (str): value for IDD Field `Material Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `material_name` or None if not set
"""
return self["Material Name"]
@material_name.setter
def material_name(self, value=None):
"""Corresponds to IDD field `Material Name`"""
self["Material Name"] = value
@property
def number_of_data_pairs(self):
"""field `Number of Data Pairs`
| Water Vapor Diffusion Resistance Factor
| value >= 1
| value <= 25
Args:
value (int): value for IDD Field `Number of Data Pairs`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `number_of_data_pairs` or None if not set
"""
return self["Number of Data Pairs"]
@number_of_data_pairs.setter
def number_of_data_pairs(self, value=None):
"""Corresponds to IDD field `Number of Data Pairs`"""
self["Number of Data Pairs"] = value
@property
def relative_humidity_fraction_1(self):
"""field `Relative Humidity Fraction 1`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_1` or None if not set
"""
return self["Relative Humidity Fraction 1"]
@relative_humidity_fraction_1.setter
def relative_humidity_fraction_1(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 1`"""
self["Relative Humidity Fraction 1"] = value
@property
def water_vapor_diffusion_resistance_factor_1(self):
"""field `Water Vapor Diffusion Resistance Factor 1`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 1`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_1` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 1"]
@water_vapor_diffusion_resistance_factor_1.setter
def water_vapor_diffusion_resistance_factor_1(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
1`"""
self["Water Vapor Diffusion Resistance Factor 1"] = value
@property
def relative_humidity_fraction_2(self):
"""field `Relative Humidity Fraction 2`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_2` or None if not set
"""
return self["Relative Humidity Fraction 2"]
@relative_humidity_fraction_2.setter
def relative_humidity_fraction_2(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 2`"""
self["Relative Humidity Fraction 2"] = value
@property
def water_vapor_diffusion_resistance_factor_2(self):
"""field `Water Vapor Diffusion Resistance Factor 2`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 2`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_2` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 2"]
@water_vapor_diffusion_resistance_factor_2.setter
def water_vapor_diffusion_resistance_factor_2(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
2`"""
self["Water Vapor Diffusion Resistance Factor 2"] = value
@property
def relative_humidity_fraction_3(self):
"""field `Relative Humidity Fraction 3`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_3` or None if not set
"""
return self["Relative Humidity Fraction 3"]
@relative_humidity_fraction_3.setter
def relative_humidity_fraction_3(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 3`"""
self["Relative Humidity Fraction 3"] = value
@property
def water_vapor_diffusion_resistance_factor_3(self):
"""field `Water Vapor Diffusion Resistance Factor 3`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 3`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_3` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 3"]
@water_vapor_diffusion_resistance_factor_3.setter
def water_vapor_diffusion_resistance_factor_3(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
3`"""
self["Water Vapor Diffusion Resistance Factor 3"] = value
@property
def relative_humidity_fraction_4(self):
"""field `Relative Humidity Fraction 4`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_4` or None if not set
"""
return self["Relative Humidity Fraction 4"]
@relative_humidity_fraction_4.setter
def relative_humidity_fraction_4(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 4`"""
self["Relative Humidity Fraction 4"] = value
@property
def water_vapor_diffusion_resistance_factor_4(self):
"""field `Water Vapor Diffusion Resistance Factor 4`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 4`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_4` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 4"]
@water_vapor_diffusion_resistance_factor_4.setter
def water_vapor_diffusion_resistance_factor_4(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
4`"""
self["Water Vapor Diffusion Resistance Factor 4"] = value
@property
def relative_humidity_fraction_5(self):
"""field `Relative Humidity Fraction 5`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value for IDD Field `Relative Humidity Fraction 5`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `relative_humidity_fraction_5` or None if not set
"""
return self["Relative Humidity Fraction 5"]
@relative_humidity_fraction_5.setter
def relative_humidity_fraction_5(self, value=None):
"""Corresponds to IDD field `Relative Humidity Fraction 5`"""
self["Relative Humidity Fraction 5"] = value
@property
def water_vapor_diffusion_resistance_factor_5(self):
"""field `Water Vapor Diffusion Resistance Factor 5`
| Units: dimensionless
Args:
value (float): value for IDD Field `Water Vapor Diffusion Resistance Factor 5`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `water_vapor_diffusion_resistance_factor_5` or None if not set
"""
return self["Water Vapor Diffusion Resistance Factor 5"]
@water_vapor_diffusion_resistance_factor_5.setter
def water_vapor_diffusion_resistance_factor_5(self, value=None):
"""Corresponds to IDD field `Water Vapor Diffusion Resistance Factor
5`"""
self["Water Vapor Diffusion Resistance Factor 5"] = value
@property
def relative_humidity_fraction_6(self):
"""field `Relative Humidity Fraction 6`
| The relative humidity is entered as a fraction.
| Units: dimensionless
| value <= 1.0
Args:
value (float): value | |
<gh_stars>0
"""Creating and manipulating measurement sets from T3 visibilities.
Author: <NAME>, <EMAIL>
"""
import yaml
import h5py
import numpy as np
from pkg_resources import resource_filename
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import Angle
from antpos.utils import get_itrf
from pyuvdata import UVData
import casatools as cc
from casacore.tables import table
from dsautils import cnf
from dsamfs.io import initialize_uvh5_file, update_uvh5_file
from dsacalib.ms_io import extract_vis_from_ms
from dsacalib.fringestopping import calc_uvw
import dsacalib.constants as ct
from dsacalib.preprocess import remove_outrigger_delays
PARAMFILE = resource_filename('dsaT3', 'data/T3_parameters.yaml')
with open(PARAMFILE) as YAMLF:
T3PARAMS = yaml.load(YAMLF, Loader=yaml.FullLoader)['T3corr']
MYCONF = cnf.Conf()
CORRPARAMS = MYCONF.get('corr')
MFSPARAMS = MYCONF.get('fringe')
CORR_ORDER = np.arange(1, 17)
ANTENNA_ORDER = [
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
20,
19,
18,
17,
16,
15,
14,
13,
100,
101,
102,
116,
103,
12,
11,
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
104,
105,
106,
107,
108,
109,
110,
111,
112,
113,
114,
115,
117,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
]
def get_mjd(armed_mjd, utc_start, specnum):
"""Get the start mjd of a voltage dump.
Parameters
----------
armed_mjd : float
The time at which the snaps were armed, in mjd.
utc_start : int
The spectrum number at which the correlator was started.
specnum : int
The spectrum number of the first spectrum in the voltage dump,
referenced to when the correlator was started.
Returns
-------
tstart : float
The start time of the voltage dump in mjd.
"""
tstart = (armed_mjd+utc_start*4*8.192e-6/86400+
(1/(250e6/8192/2)*specnum/ct.SECONDS_PER_DAY))
return tstart
def get_blen(antennas):
"""Gets the baseline lengths for a subset of antennas.
Parameters
----------
antennas : list
The antennas used in the array.
Returns
-------
blen : array
The ITRF coordinates of all of the baselines.
bname : list
The names of all of the baselines.
"""
ant_itrf = get_itrf(
latlon_center=(ct.OVRO_LAT*u.rad, ct.OVRO_LON*u.rad, ct.OVRO_ALT*u.m)
).loc[antennas]
xx = np.array(ant_itrf['dx_m'])
yy = np.array(ant_itrf['dy_m'])
zz = np.array(ant_itrf['dz_m'])
# Get uvw coordinates
nants = len(antennas)
nbls = (nants*(nants+1))//2
blen = np.zeros((nbls, 3))
bname = []
k = 0
for i in range(nants):
for j in range(i, nants):
blen[k, :] = np.array([
xx[i]-xx[j],
yy[i]-yy[j],
zz[i]-zz[j]
])
bname += ['{0}-{1}'.format(
antennas[i],
antennas[j]
)]
k += 1
return blen, bname
def generate_T3_uvh5(name, pt_dec, tstart, ntint, nfint, filelist, params=T3PARAMS, start_offset=None, end_offset=None):
"""Generates a measurement set from the T3 correlations.
Parameters
----------
name : str
The name of the measurement set.
pt_dec : quantity
The pointing declination in degrees or equivalient.
tstart : astropy.time.Time instance
The start time of the correlated data.
ntint : float
The number of time bins that have been binned together (compared to the
native correlator resolution).
nfint : float
The number of frequency bins to bin together before writing the ms
(compared to the native resolution).
filelist : dictionary
The correlator data files for each node.
params : dictionary
T3 parameters.
start_offset : int
The timesample to start at. If given, end_offset must also be given.
Defaults to transform the whole file to a ms.
end_offset : int
The timesample to end at.
Returns
-------
str
The name of the measurement set created.
"""
antenna_order = params['antennas']
fobs = params['f0_GHz']+params['deltaf_MHz']*1e-3*(
np.arange(params['nchan'])+0.5)
nant = len(antenna_order)
nbls = (nant*(nant+1))//2
tsamp = params['deltat_s']*ntint*u.s
tobs = tstart + (np.arange(params['nsubint']//ntint)+0.5)*tsamp
if start_offset is None:
start_offset = 0
if end_offset is None:
end_offset = len(tobs)
#assert start_offset is not None
#assert end_offset is not None
tobs = tobs[start_offset:end_offset]
blen, bname = get_blen(params['antennas'])
itemspframe = nbls*params['nchan_corr']*params['npol']*2
framespblock = 16
itemspblock = itemspframe*framespblock
assert (end_offset - start_offset)%framespblock == 0
nblocks = (end_offset-start_offset)//framespblock
# Get outrigger delays
delays = np.zeros(len(bname), dtype=np.int)
for i, bn in enumerate(bname):
ant1, ant2 = bn.split('-')
delays[i] = MFSPARAMS['outrigger_delays'].get(int(ant1), 0)-\
MFSPARAMS['outrigger_delays'].get(int(ant2), 0)
for corr, corrfile in filelist.items(): # corr, ch0 in params['ch0'].items():
ch0 = params['ch0'][corr]
fobs_corr_full = fobs[ch0:(ch0+params['nchan_corr'])]
fobs_corr = np.median(fobs_corr_full.reshape(-1, nfint), axis=-1)
outname = '{1}_{0}.hdf5'.format(corr, name)
vis_model = np.exp(2j*np.pi*fobs_corr_full[:, np.newaxis]*
delays[np.newaxis, :, np.newaxis, np.newaxis])
vis_model = vis_model.astype(np.complex64)
with h5py.File(outname, 'w') as fhdf5:
initialize_uvh5_file(
fhdf5,
len(fobs_corr),
2,
pt_dec.to_value(u.rad),
antenna_order,
fobs_corr,
#outrigger_delays
)
with open(corrfile, 'rb') as cfhandler:
if start_offset is not None:
cfhandler.seek(start_offset*32*itemspframe)
for i in range(nblocks):
data = np.fromfile(
cfhandler,
dtype=np.float32,
count=itemspblock
)
data = data.reshape(-1, 2)
data = data[..., 0] + 1.j*data[..., 1]
data = data.reshape(framespblock, nbls, len(fobs_corr_full), params['npol'])[..., [0, -1]]
data /= vis_model
if nfint > 1:
data = data.reshape(framespblock, nbls, len(fobs_corr), nfint, 2).mean(axis=3)
bu, bv, bw = calc_uvw(
blen,
tobs.mjd[i*framespblock:(i+1)*framespblock],
'HADEC',
np.zeros(framespblock)*u.rad,
np.ones(framespblock)*pt_dec
)
buvw = np.array([bu, bv, bw]).T
update_uvh5_file(
fhdf5,
data.astype(np.complex64),
tobs.jd[i*framespblock:(i+1)*framespblock],
tsamp,
bname,
buvw,
np.ones(data.shape, np.float32)
)
return outname
def plot_image(imname, verbose=False, outname=None, show=True,
expected_point=None):
"""Plots an image from the casa-generated image file.
Paramters
---------
imname : str
The name full path of the image file.
verbose : bool
If set to True, prints some information about the image.
outname : str
If provided, saves the image in <outname>_image.png.
show : bool
If False, the image is closed at the end of the function.
cellsize : str
The size of each pixel, in a Casa-recognized angle.
"""
error = 0
ia = cc.image()
error += not ia.open(imname)
dd = ia.summary()
# dd has shape npixx, npixy, nch, npol
npixx = dd['shape'][0]
if verbose:
print('Image shape: {0}'.format(dd['shape']))
imvals = ia.getchunk(0, int(npixx))[:, :, 0, 0]
#imvals = fftshift(imvals)
error += ia.done()
max_idxs = np.unravel_index(imvals.argmax(), imvals.shape)
cellsizex = Angle(dd['incr'][0], dd['axisunits'][0])
cellsizey = Angle(dd['incr'][1], dd['axisunits'][1])
ra, dec = (
Angle('{0}{1}'.format(dd['refval'][0], dd['axisunits'][0])),
Angle('{0}{1}'.format(dd['refval'][1], dd['axisunits'][1]))
)
brightest_point = (
ra +
Angle('{0}{1}'.format(
dd['incr'][0]*(max_idxs[0]-dd['refpix'][0]),
dd['axisunits'][0]
))/np.cos(dec),
dec +
Angle('{0}{1}'.format(
dd['incr'][1]*(max_idxs[1]-dd['refpix'][1]),
dd['axisunits'][1]
))
)
if verbose:
print('Peak SNR at pix ({0},{1}) = {2}'.format(max_idxs[0],
max_idxs[1],
imvals.max()/
imvals.std()))
print('Value at peak: {0}'.format(imvals.max()))
print('Value at origin: {0}'.format(imvals[imvals.shape[0]//2,
imvals.shape[1]//2]))
_, ax = plt.subplots(1, 1, figsize=(15, 8))
pim = ax.imshow(
imvals.transpose(),
interpolation='none',
origin='lower',
extent=[
(-imvals.shape[0]/2*Angle(cellsizex)).to_value(u.arcmin),
(imvals.shape[0]/2*Angle(cellsizex)).to_value(u.arcmin),
(-imvals.shape[1]/2*Angle(cellsizey)).to_value(u.arcmin),
(imvals.shape[1]/2*Angle(cellsizey)).to_value(u.arcmin)
]
)
plt.colorbar(pim)
ax.axvline(0, color='white', alpha=0.5)
ax.axhline(0, color='white', alpha=0.5)
ax.set_xlabel('l (arcmin)')
ax.set_ylabel('m (arcmin)')
plttitle = '{0} {1:.2f} {2:.2f}'.format(
imname,
brightest_point[0],
brightest_point[1]
)
if expected_point is not None:
plttitle += ', offset by {0:.2f} {1:.2f}'.format(
(brightest_point[0]-expected_point[0]).to(u.arcmin),
(brightest_point[1]-expected_point[1]).to(u.arcmin)
)
plt.title(plttitle)
if outname is not None:
plt.savefig('{0}_image.png'.format(outname))
if not show:
plt.close()
if error > 0:
print('{0} errors occured during imaging'.format(error))
return brightest_point
def read_bfweights(bfweights, bfdir):
"""Reads the beamforming weights.
Parameters
----------
bfweights : str
The label of the file containing the weights. Will open
<bfdir>/beamformer_weights_<bfweights>.yaml
bfdir : str
The directory in which the beamformer weights are stored.
Returns
-------
antenna_order : list
The order of the antennas in the bfweights array.
bfweights : ndarray
The beamformer weights, (antenna, freqeuncy, polarization).
Frequency is in the same order as in the correlator.
"""
with open('{0}/beamformer_weights_{1}.yaml'.format(
bfdir,
bfweights,
)) as yamlf:
bfparams = yaml.load(yamlf, Loader=yaml.FullLoader)
if 'cal_solutions' in bfparams.keys():
bfparams = bfparams['cal_solutions']
antenna_order = bfparams.get('antenna_order', ANTENNA_ORDER)
corr_order = bfparams.get('corr_order', CORR_ORDER)
gains = np.zeros(
(len(antenna_order), len(corr_order), 48, 2),
dtype=np.complex
)
for corridx, corr in enumerate(corr_order):
with open(
'{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(
bfdir,
corr,
bfweights
),
'rb'
) as f:
data = np.fromfile(f, '<f4')
temp = data[64:].reshape(64, 48, 2, 2)
gains[:, corridx, :, :] = temp[..., 0]+1.0j*temp[..., 1]
gains = gains.reshape(
(len(antenna_order), len(corr_order)*48, 2)
)
return antenna_order, gains
def calibrate_T3ms(msname, bfweights, bfdir, dedisp_mask=None):
"""Calibrates a measurement set using the beamformer weights.
Calibrated data is written into the CORRECTED_DATA column.
Parameters
----------
msname : str
The name of the measurement set.
bfweights : str
The label of the file containing the weights. Will open
<bfdir>/beamformer_weights_<bfweights>.yaml
bfdir : str
The directory in which the beamformer weights are stored.
dedisp_mask : str
The path to a dedispersion mask to be applied.
"""
antenna_order, gains = read_bfweights(bfweights, bfdir)
gains = gains[:, ::-1, :]
data, _, fobs, flags, ant1, ant2, _, _, orig_shape = extract_vis_from_ms(
msname,
data='data'
)
print(data.shape)
data = data.reshape(
data.shape[0],
data.shape[1],
data.shape[2],
gains.shape[1],
-1,
data.shape[-1]
)
assert np.all(np.diff(fobs) > 0)
assert orig_shape == ['time', 'baseline', 'spw']
for i in range(data.shape[0]):
a1 = ant1[i]+1
a2 = ant2[i]+1
try:
bl_gains = (
np.conjugate(
gains[antenna_order.index(a2), ...]
)*gains[antenna_order.index(a1), ...]
)
bl_gains = np.exp(1.j*np.angle(bl_gains))
data[i, ...] *= bl_gains[:, np.newaxis, :]
except ValueError:
flags[i, ...] = 1
print('no calibration solutions for baseline {0}-{1}'.format(a1, a2))
data = data.swapaxes(0, 1).reshape((-1, len(fobs), data.shape[-1]))
flags = flags.swapaxes(0, 1).reshape((-1, len(fobs), flags.shape[-1]))
# dedisp_flags = np.load(dedisp_mask)
# | |
<filename>Python/data_structures.py<gh_stars>0
import unittest
import bisect
from heapq import *
"""
https://www.freecodecamp.org/news/the-top-data-structures-you-should-know-for-your-next-coding-interview-36af0831f5e3/
https://docs.python.org/3/tutorial/datastructures.html?highlight=lists
http://thomas-cokelaer.info/tutorials/python/lists.html
Python’s built-in list type makes a decent stack data structure as it supports push and pop operations in amortized O(1) time.
https://docs.python.org/3/tutorial/datastructures.html#using-lists-as-stacks
https://docs.python.org/3/tutorial/datastructures.html#using-lists-as-queues
https://www.hackerrank.com/challenges/30-queues-stacks/problem
"""
class Lists():
def __init__(self, elem):
self.elem = elem
def append(self, x):
# same as self.elem[len(self.elem):] = [x]
# self.elem.insert(len(self.elem), x)
return self.elem.append(x)
def extend(self, elem):
return self.elem.extend(elem)
def insert(self, index, value):
return self.elem.insert(index, value)
def remove(self, x):
"""Removes the first item from the list whose value is equal to x.
It raises a ValueError if there is no such item.
"""
return self.elem.remove(x)
def pop(self, index=None):
"""Remove the item at the given position in the list, and return it.
If no index is specified, a.pop() removes and returns the last item in the list
"""
return self.elem.pop() if index is None else self.elem.pop(index)
def clear(self):
"""Remove all items from the list.
Equivalent to del self.elem[:]
"""
return self.elem.clear()
def len(self):
return len(self.elem)
def index(self, value, start=None, end=None):
"""Return zero-based index in the list of the first item whose value is equal to x.
Raises a ValueError if there is no such item.
The optional arguments start and end are interpreted as in the slice notation and
are used to limit the search to a particular subsequence of the list.
"""
if start is not None and end is not None:
return self.elem.index(value, start, end)
return self.elem.index(value)
def count(self, value):
"""Return the number of times x appears in the list."""
return self.elem.count(value)
def reverse(self):
"""Reverse the elements of the list in place."""
return self.elem.reverse()
def copy(self):
"""Return a shallow copy of the list. Equivalent to a[:]"""
return self.elem.copy()
def sort(self, reverse=False):
return self.elem.sort(reverse=reverse)
def insort(self, value):
"""Inserting items into a sorted list"""
return bisect.insort(self.elem, value)
def bisect(self, value):
"""Inserting items into a sorted list and get the index"""
index = bisect.bisect(self.elem, value)
return self.elem.insert(index, value)
def get_list(self):
return self.elem
class Queue():
def __init__(self):
self.queue = []
def enqueueCharacter(self, char):
self.queue.insert(0, char)
def dequeueCharacter(self):
return self.queue.pop()
def __repr__(self):
return str(self.queue)
class Stack():
def __init__(self):
self.stack = []
def pushCharacter(self, char):
self.stack.append(char)
def popCharacter(self):
return self.stack.pop()
def __repr__(self):
return str(self.stack)
class Heap():
def __init__(self):
self.heap = []
def heappush(self, item):
"""Push the value item onto the heap, maintaining the heap invariant."""
heappush(self.heap, item)
def heappop(self):
"""Pop and return the smallest item from the heap, maintaining the heap invariant."""
return heappop(self.heap)
def heappushpop(self, item):
"""Push item on the heap, then pop and return the smallest item from the heap."""
return heappushpop(self.heap, item)
def heapreplace(self, item):
"""
Pop and return the smallest item from the heap, and also push the new item. The heap size doesn’t change.
If the heap is empty, IndexError is raised.
"""
return heapreplace(self.heap, item)
def size(self):
return len(self.heap)
def heapsort(self):
h = []
for value in self.heap:
heappush(h, value)
self.heap = [heappop(h) for i in range(len(h))]
def __repr__(self):
return str(self.heap)
"""
A tuple is similar to a list. The difference between the two is that we cannot change the elements of a tuple once
it is assigned whereas in a list, elements can be changed.
- We generally use tuple for heterogeneous (different) datatypes and list for homogeneous (similar) datatypes.
- Since tuple are immutable, iterating through tuple is faster than with list. So there is a slight performance boost.
- Tuples that contain immutable elements can be used as key for a dictionary. With list, this is not possible.
- If you have data that doesn't change, implementing it as tuple will guarantee that it remains write-protected.
https://www.programiz.com/python-programming/tuple
http://thomas-cokelaer.info/tutorials/python/tuples.html
"""
class Tuples():
def __init__(self, elem):
self.elem = elem
def len(self):
return len(self.elem)
def sum(self):
"""Retrun the sum of all elements in the tuple."""
return sum(self.elem)
def enumerate(self):
"""Return an enumerate object. It contains the index and value of all the items of tuple as pairs."""
return enumerate(self.elem)
def max(self):
"""Return the largest item in the tuple."""
return max(self.elem)
def min(self):
"""Return the smallest item in the tuple"""
return min(self.elem)
def all(self):
"""Return True if all elements of the tuple are true (or if the tuple is empty)."""
return all(self.elem)
def any(self):
"""Return True if any element of the tuple is true. If the tuple is empty, return False."""
return any(self.elem)
def sort(self, reverse=False):
return sorted(self.elem, reverse=reverse)
def covert(self, list):
return tuple(list)
def exists(self, item):
return item in self.elem
def count(self, item):
"""Return the number of items that is equal to item"""
return self.elem.count(item)
def index(self, item):
"""Return index of first item that is equal to item"""
return self.elem.index(item)
def get(self, index):
return self.elem[index]
def get_tuple(self):
return self.elem
class Dicts():
def __init__(self, elem):
self.elem = elem
def len(self):
return len(self.elem)
def keys(self):
return self.elem.keys()
def values(self):
return self.elem.values()
def items(self):
return self.elem.items()
def get(self, key):
return self.elem.get(key)
def has(self, key):
return key in self.elem.keys()
def pop(self, key):
return self.elem.pop(key)
def popitem(self):
return self.elem.popitem()
def clear(self):
return self.elem.clear()
def update(self, dic):
return self.elem.update(dic)
def from_keys(self, list):
return self.elem.fromkeys(list)
def new_from_keys(self, list):
return {}.fromkeys(list)
def get_dict(self):
return self.elem
"""
Strings are immutable sequence of characters
http://thomas-cokelaer.info/tutorials/python/strings.html
https://docs.python.org/3/library/stdtypes.html#string-methods
"""
class Strings():
def __init__(self, elem):
self.elem = elem
def get(self, index):
return self.elem[index]
def len(self):
return len(self.elem)
def count(self, char, start, end):
return self.elem.count(char, start, end)
def isdigit(self):
return self.elem.isdigit()
def isalpha(self):
return self.elem.isalpha()
def islower(self):
return self.elem.islower()
def isupper(self):
return self.elem.isupper()
def istitle(self):
return self.elem.istitle()
def isspace(self):
return self.elem.isspace()
def isalnum(self):
return self.elem.isalnum()
def title(self):
return self.elem.title()
def capitalize(self):
return self.elem.capitalize()
def lower(self):
return self.elem.lower()
def upper(self):
return self.elem.upper()
def swapcase(self):
return self.elem.swapcase()
def center(self, lenght, fillchar=" "):
return self.elem.center(lenght, fillchar)
def ljust(self, lenght, fillchar=" "):
return self.elem.ljust(lenght, fillchar)
def rjust(self, lenght, fillchar=" "):
return self.elem.rjust(lenght, fillchar)
def zfill(self, lenght):
return self.elem.zfill(lenght)
def strip(self, chars):
return self.elem.strip(chars)
def rstrip(self, chars):
return self.elem.rstrip(chars)
def lstrip(self, chars):
return self.elem.lstrip(chars)
def endswith(self, suffix, start, end):
return self.elem.endswith(suffix, start, end)
def subexist(self, sub):
return sub in self.elem
def find(self, sub, start, end):
return self.elem.find(sub, start, end)
def get_string(self):
return self.elem
"""
Linked Lists
A linked list is an ordered collection of values.
Linked lists are similar to arrays in the sense that they contain objects in a linear order.
However they differ from arrays in their memory layout.
Arrays are contiguous data structures and they’re composed of fixed-size data records stored in adjoining blocks of memory.
Linked lists, however, are made up of data records linked together by pointers.
https://dbader.org/blog/python-linked-list
"""
class SListNode():
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def __repr__(self):
return repr(self.data)
class DListNode():
"""
A node in a doubly-linked list.
"""
def __init__(self, data=None, prev=None, next=None):
self.data = data
self.prev = prev
self.next = next
def __repr__(self):
return repr(self.data)
class SinglyLinkedList:
def __init__(self):
"""
Create a new singly-linked list.
Takes O(1) time.
"""
self.head = None
def __repr__(self):
"""
Return a string representation of the list.
Takes O(n) time.
"""
nodes = []
curr = self.head
while curr:
nodes.append(repr(curr))
curr = curr.next
return '[' + ', '.join(nodes) + ']'
def prepend(self, data):
"""
Insert a new element at the beginning of the list.
Takes O(1) time.
"""
self.head = SListNode(data=data, next=self.head)
def append(self, data):
"""
Insert a new element at the end of the list.
Takes O(n) time.
https://www.hackerrank.com/challenges/30-linked-list/problem
"""
if not self.head:
self.head = SListNode(data=data)
return
curr = self.head
while curr.next:
curr = curr.next
curr.next = SListNode(data=data)
def insert(self, data):
"""
Insert a new element at the end of the list.
Takes O(n) time.
"""
if not self.head:
self.head = SListNode(data=data)
return
curr = self.head
while curr.next:
curr = curr.next
curr.next = SListNode(data=data)
def find(self, key):
"""
Search for the first element with `data` matching
`key`. Return the element or `None` if not found.
Takes O(n) time.
"""
curr = self.head
while curr and curr.data != key:
curr = curr.next
return curr # Will be None if not found
def remove(self, key):
"""
Remove the first occurrence of `key` in the list.
Takes O(n) time.
"""
# Find the element and keep a
# reference to the element preceding it
curr = self.head
prev = None
while curr | |
<filename>app/main/views.py<gh_stars>0
from flask import render_template , redirect , url_for , abort , flash , request , \
current_app , make_response
from flask_login import login_required , current_user
from flask_sqlalchemy import get_debug_queries
from . import main
from .forms import EditProfileForm , EditProfileAdminForm , PostForm , \
CommentForm
from .. import db
from ..models import Permission , Role , User , Post , Comment
from ..decorators import admin_required , permission_required
@main.after_app_request
def after_request ( response ) :
for query in get_debug_queries () :
if query.duration >= current_app.config[ 'FLASKY_SLOW_DB_QUERY_TIME' ] :
current_app.logger.warning (
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement , query.parameters , query.duration ,
query.context) )
return response
@main.route ( '/shutdown' )
def server_shutdown () :
if not current_app.testing :
abort ( 404 )
shutdown = request.environ.get ( 'werkzeug.server.shutdown' )
if not shutdown :
abort ( 500 )
shutdown ()
return 'Shutting down...'
@main.route ( '/' , methods=[ 'GET' , 'POST' ] )
def index () :
page = request.args.get ( 'page' , 1 , type=int )
show_followed = False
if current_user.is_authenticated :
show_followed = bool ( request.cookies.get ( 'show_followed' , '' ) )
if show_followed :
query = current_user.followed_posts
else :
query = Post.query
pagination = query.order_by ( Post.timestamp.desc () ).paginate (
page , per_page=current_app.config[ 'FLASKY_POSTS_PER_PAGE' ] ,
error_out=False )
posts = pagination.items
return render_template ( 'index.html' , posts=posts ,
show_followed=show_followed , pagination=pagination )
@main.route ( '/release_post' , methods=[ 'GET' , 'POST' ] )
def release_post () :
form = PostForm ()
if current_user.can ( Permission.WRITE_ARTICLES ) and \
form.validate_on_submit():
post = Post ( title=form.title.data , body=form.body.data ,
author=current_user._get_current_object () )
db.session.add ( post )
db.session.commit ()
return redirect ( url_for ( '.index' ) )
return render_template ( 'release_post.html' , form=form )
@main.route ( '/user/<username>' )
def user ( username ) :
user = User.query.filter_by ( username=username ).first_or_404 ()
page = request.args.get ( 'page' , 1 , type=int )
pagination = user.posts.order_by ( Post.timestamp.desc () ).paginate (
page , per_page=current_app.config[ 'FLASKY_POSTS_PER_PAGE' ] ,
error_out=False )
posts = pagination.items
return render_template ( 'user.html' , user=user , posts=posts ,
pagination=pagination )
@main.route ( '/edit-profile' , methods=[ 'GET' , 'POST' ] )
@login_required
def edit_profile () :
form = EditProfileForm ()
if form.validate_on_submit () :
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
current_user.avatar1 = form.avatar1.data
db.session.add ( current_user )
flash ( 'Your profile has been updated.' )
return redirect ( url_for ( '.edit_profile' , username=current_user.username ) )
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
form.avatar1.data = current_user.avatar1
return render_template ( 'edit_profile.html' , form=form )
@main.route ( '/edit-profile/<int:id>' , methods=[ 'GET' , 'POST' ] )
@login_required
@admin_required
def edit_profile_admin ( id ) :
user = User.query.get_or_404 ( id )
form = EditProfileAdminForm ( user=user )
if form.validate_on_submit () :
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get ( form.role.data )
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
current_user.avatar1 = form.avatar1.data
db.session.add ( user )
flash ( 'The profile has been updated.' )
return redirect ( url_for ( '.edit_profile' , username=user.username ) )
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
form.avatar1.data = current_user.avatar1
return render_template ( 'edit_profile.html' , form=form , user=user )
@main.route ( '/post/<int:id>' , methods=[ 'GET' , 'POST' ] )
def post ( id ) :
post = Post.query.get_or_404 ( id )
form = CommentForm ()
if form.validate_on_submit () :
comment = Comment ( body=form.body.data ,
post=post ,
author=current_user._get_current_object () )
db.session.add ( comment )
flash ( 'Your comment has been published.' )
return redirect ( url_for ( '.post' , id=post.id , page=-1 ) )
page = request.args.get ( 'page' , 1 , type=int )
if page == -1 :
page = (post.comments.count () - 1) // \
current_app.config[ 'FLASKY_COMMENTS_PER_PAGE' ] + 1
pagination = post.comments.order_by ( Comment.timestamp.asc () ).paginate (
page , per_page=current_app.config[ 'FLASKY_COMMENTS_PER_PAGE' ] ,
error_out=False )
comments = pagination.items
return render_template ( 'post.html' , posts=[ post ] , form=form ,
comments=comments , pagination=pagination )
@main.route ( '/edit_post/<int:id>' , methods=[ 'GET' , 'POST' ] )
@login_required
def edit_post ( id ) :
form = PostForm ()
post = Post.query.get_or_404 ( id )
if current_user != post.author and \
not current_user.can ( Permission.ADMINISTER ) :
abort ( 403 )
if request.method == 'POST' :
post.title = request.form[ 'title' ]
post.body = request.form[ 'body' ]
db.session.add ( post )
flash ( 'The post has been updated.' )
return redirect ( url_for ( '.post' , id=post.id ) )
elif request.method == 'GET' :
form.title.data = post.title
form.body.data = post.body
return render_template ( 'edit_post.html' , form=form )
@main.route ( '/delete_post/<int:id>' , methods=[ 'GET' , 'POST' ] )
@login_required
def delete_post ( id ) :
post = Post.query.get_or_404 ( id )
form = PostForm ()
if current_user != post.author and \
not current_user.can ( Permission.ADMINISTER ) :
abort ( 403 )
if request.method == 'POST' :
db.session.delete ( post )
flash ( 'The post has been deleted.' )
return redirect ( url_for ( '.user' , username=current_user.username ) )
elif request.method == 'GET' :
form.title.data = post.title
form.body.data = post.body
return render_template ( 'delete_post.html' , form=form )
@main.route ( '/follow/<username>' )
@login_required
@permission_required ( Permission.FOLLOW )
def follow ( username ) :
user = User.query.filter_by ( username=username ).first ()
if user is None :
flash ( 'Invalid user.' )
return redirect ( url_for ( '.index' ) )
if current_user.is_following ( user ) :
flash ( 'You are already following this user.' )
return redirect ( url_for ( '.user' , username=username ) )
current_user.follow ( user )
flash ( 'You are now following %s.' % username )
return redirect ( url_for ( '.user' , username=username ) )
@main.route ( '/unfollow/<username>' )
@login_required
@permission_required ( Permission.FOLLOW )
def unfollow ( username ) :
user = User.query.filter_by ( username=username ).first ()
if user is None :
flash ( 'Invalid user.' )
return redirect ( url_for ( '.index' ) )
if not current_user.is_following ( user ) :
flash ( 'You are not following this user.' )
return redirect ( url_for ( '.user' , username=username ) )
current_user.unfollow ( user )
flash ( 'You are not following %s anymore.' % username )
return redirect ( url_for ( '.user' , username=username ) )
@main.route ( '/followers/<username>' )
def followers ( username ) :
user = User.query.filter_by ( username=username ).first ()
if user is None :
flash ( 'Invalid user.' )
return redirect ( url_for ( '.index' ) )
page = request.args.get ( 'page' , 1 , type=int )
pagination = user.followers.paginate (
page , per_page=current_app.config[ 'FLASKY_FOLLOWERS_PER_PAGE' ] ,
error_out=False )
follows = [ {'user' : item.follower , 'timestamp' : item.timestamp}
for item in pagination.items ]
return render_template ( 'followers.html' , user=user , title="Followers of" ,
endpoint='.followers' , pagination=pagination ,
follows=follows )
@main.route ( '/followed-by/<username>' )
def followed_by ( username ) :
user = User.query.filter_by ( username=username ).first ()
if user is None :
flash ( 'Invalid user.' )
return redirect ( url_for ( '.index' ) )
page = request.args.get ( 'page' , 1 , type=int )
pagination = user.followed.paginate (
page , per_page=current_app.config[ 'FLASKY_FOLLOWERS_PER_PAGE' ] ,
error_out=False )
follows = [ {'user' : item.followed , 'timestamp' : item.timestamp}
for item in pagination.items ]
return render_template ( 'followers.html' , user=user , title="Followed by" ,
endpoint='.followed_by' , pagination=pagination ,
follows=follows )
@main.route ( '/all' )
@login_required
def show_all () :
resp = make_response ( redirect ( url_for ( '.index' ) ) )
resp.set_cookie ( 'show_followed' , '' , max_age=30 * 24 * 60 * 60 )
return resp
@main.route ( '/followed' )
@login_required
def show_followed () :
resp = make_response ( redirect ( url_for ( '.index' ) ) )
resp.set_cookie ( 'show_followed' , '1' , max_age=30 * 24 * 60 * 60 )
return resp
@main.route ( '/moderate' )
@login_required
@permission_required ( Permission.MODERATE_COMMENTS )
def moderate () :
page = request.args.get ( 'page' , 1 , type=int )
pagination = Comment.query.order_by ( Comment.timestamp.desc () ).paginate (
page , per_page=current_app.config[ 'FLASKY_COMMENTS_PER_PAGE' ] ,
error_out=False )
comments = pagination.items
return render_template ( 'moderate.html' , comments=comments ,
pagination=pagination , page=page )
@main.route ( '/moderate/enable/<int:id>' )
@login_required
@permission_required ( Permission.MODERATE_COMMENTS )
def moderate_enable ( id | |
<gh_stars>1-10
"""
Quantile regression forest of trees-based ensemble methods.
The module structure is the following:
- The ``BaseForestQuantileRegressor`` base class implements a common ``fit``
method for all the estimators in the module. The ``fit`` method of the base
class calls the ``fit`` method of the ``ForestRegressor`` and creates a
quantile forest object that records the leaf node membership of all samples
of the training set.
- The ``RandomForestQuantileRegressor`` derived class provides the user with a
concrete implementation of the quantile regression forest ensemble method
that extends the classical ``RandomForestRegressor`` as the estimator
implementation.
- The ``ExtraTreesQuantileRegressor`` derived class provides the user with a
concrete implementation of the quantile regression forest ensemble method
that extends the extremely randomized trees ``ExtraTreesRegressor`` as the
estimator implementation.
Only single output problems are handled.
"""
import numbers
import random
import warnings
from math import ceil
from warnings import warn
import joblib
import numpy as np
from sklearn.ensemble._forest import ForestRegressor
from sklearn.ensemble._forest import _generate_sample_indices
from sklearn.ensemble._forest import _get_n_samples_bootstrap
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeRegressor
from sklearn.tree._tree import DTYPE
from sklearn.utils.validation import check_is_fitted
from ._quantile_forest_fast import QuantileForest
from ._quantile_forest_fast import generate_unsampled_indices
def _generate_unsampled_indices(sample_indices, duplicates=None):
"""Private function used by forest._get_unsampled_indices function."""
if duplicates is None:
duplicates = []
return generate_unsampled_indices(sample_indices, duplicates)
def _group_by_value(a):
"""Private function used by forest._leaf_train_indices function."""
sort_idx = np.argsort(a)
a_sorted = a[sort_idx]
unq_first = np.concatenate(([True], a_sorted[1:] != a_sorted[:-1]))
unq_items = a_sorted[unq_first]
unq_count = np.diff(np.concatenate(np.nonzero(unq_first) + ([a.size],)))
unq_idx = np.split(sort_idx, np.cumsum(unq_count[:-1]))
return unq_items, unq_idx
class BaseForestQuantileRegressor(ForestRegressor):
"""
Base class for quantile regression forests.
Warning: This class should not be used directly. Use derived classes
instead.
"""
def fit(self, X, y, sample_weight=None, sparse_pickle=False):
"""Build a forest from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (real numbers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
sparse_pickle : bool, default=False
Pickle the underlying data structure using a SciPy sparse matrix.
Returns
-------
self : object
Fitted estimator.
"""
super(BaseForestQuantileRegressor, self).fit(
X, y, sample_weight=sample_weight
)
X, y = self._validate_data(
X, y, multi_output=False, accept_sparse="csc", dtype=DTYPE
)
# Sort the target values in ascending order.
# Use sorter to maintain mapping to original order.
sorter = np.argsort(y)
y = y[sorter]
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)[sorter]
# Get map of tree leaf nodes to training indices.
y_train_leaves = self._get_y_train_leaves(
X, sorter=sorter, sample_weight=sample_weight
)
# Create quantile forest object.
self.forest_ = QuantileForest(
y.astype(np.float64), y_train_leaves, sparse_pickle=sparse_pickle
)
self.sorter_ = sorter
self.n_train_samples_ = len(y)
self.X_train_hash_ = joblib.hash(X)
self.unsampled_indices_ = None
return self
def _get_y_train_leaves(self, X, sorter=None, sample_weight=None):
"""Return a mapping of each leaf node to its list of training indices.
The ``apply`` function is used on the ``X`` values to obtain the leaf
indices for the appropriate training indices, as sorted by ``sorter``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
sorter : array-like of shape (n_samples), default=None
The indices that would sort the target values in ascending order.
Used to associate ``est.apply`` outputs with sorted target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
y_train_leaves : array-like of shape \
(n_estimators, n_leaves, n_indices)
List of trees, each with a list of nodes, each with a list of
indices of the training samples residing at that node. Nodes with
no samples (e.g., internal nodes) are empty. Internal nodes are
included so that leaf node indices match their ``est.apply``
outputs. Each node list is padded to equal length with 0s.
"""
n_samples = X.shape[0]
if isinstance(self.max_samples_leaf, (numbers.Integral, np.integer)):
if self.max_samples_leaf < 1:
raise ValueError(
"If max_samples_leaf is an integer, "
"it must be be >= 1, got {0}."
"".format(self.max_samples_leaf)
)
max_samples_leaf = self.max_samples_leaf
leaf_subsample = True
elif isinstance(self.max_samples_leaf, numbers.Real):
if not 0. < self.max_samples_leaf <= 1.:
raise ValueError(
"If max_samples_leaf is a float, "
"it must be in range (0, 1], got {0}."
"".format(self.max_samples_leaf)
)
max_samples_leaf = int(ceil(self.max_samples_leaf * n_samples))
leaf_subsample = True
elif self.max_samples_leaf is None:
max_samples_leaf = self.max_samples_leaf
leaf_subsample = False
else:
raise ValueError(
"max_samples_leaf must be of integer, "
"float, or None type, got {0}."
"".format(type(self.max_samples_leaf))
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
X_leaves = self.apply(X)
shape = (n_samples, self.n_estimators)
bootstrap_indices = np.empty(shape, dtype=np.int64)
for i, estimator in enumerate(self.estimators_):
# Get bootstrap indices.
if self.bootstrap:
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples, self.max_samples
)
bootstrap_indices[:, i] = _generate_sample_indices(
estimator.random_state, n_samples, n_samples_bootstrap
)
else:
bootstrap_indices[:, i] = np.arange(n_samples)
# Get predictions on bootstrap indices.
X_leaves[:, i] = X_leaves[bootstrap_indices[:, i], i]
if sorter is not None:
# Reassign bootstrap indices to account for target sorting.
bootstrap_indices = np.argsort(sorter)[bootstrap_indices]
bootstrap_indices += 1 # for sparse matrix (0s as empty)
# Get the maximum number of nodes (internal + leaves) across trees.
# Get the maximum number of samples per leaf across trees (if needed).
max_node_count = 0
max_samples_leaf = 0 if not leaf_subsample else max_samples_leaf
for i, estimator in enumerate(self.estimators_):
node_count = estimator.tree_.node_count
if node_count > max_node_count:
max_node_count = node_count
if not leaf_subsample:
sample_count = np.max(np.bincount(X_leaves[:, i]))
if sample_count > max_samples_leaf:
max_samples_leaf = sample_count
# Initialize NumPy array (more efficient serialization than dict/list).
shape = (self.n_estimators, max_node_count, max_samples_leaf)
y_train_leaves = np.zeros(shape, dtype=np.int64)
for i, estimator in enumerate(self.estimators_):
# Group training indices by leaf node.
leaf_indices, leaf_values_list = _group_by_value(X_leaves[:, i])
if leaf_subsample:
random.seed(estimator.random_state)
# Map each leaf node to its list of training indices.
for leaf_idx, leaf_values in zip(leaf_indices, leaf_values_list):
y_indices = bootstrap_indices[:, i][leaf_values]
if sample_weight is not None:
y_indices = y_indices[sample_weight[y_indices - 1] > 0]
# Subsample leaf training indices (without replacement).
if leaf_subsample and max_samples_leaf < len(y_indices):
if not isinstance(y_indices, list):
y_indices = list(y_indices)
y_indices = random.sample(y_indices, max_samples_leaf)
y_train_leaves[i, leaf_idx, :len(y_indices)] = y_indices
return y_train_leaves
def _oob_samples(self, X, indices=None, duplicates=None):
"""Generate out-of-bag (OOB) samples for each base estimator.
Only generates leaf indices for samples that were excluded from the
bootstrapping process for each base estimator. If ``indices`` is None,
assumes that ``X`` is the same length and order as the training set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
indices : list, default=None
List of training indices that correspond to X indices. An index of
-1 can be used to specify rows omitted from the training set. By
default, assumes all X indices correspond to all training indices.
duplicates : list, default=None
List of sets of functionally identical indices.
Returns
-------
X_leaves : array-like of shape (n_samples, n_estimators)
Prediction leaves for OOB samples. Non-OOB samples may have
uninitialized (arbitrary) data.
X_indices : array-like of shape (n_samples, n_estimators)
Mask for OOB samples. 1 if OOB sample, 0 otherwise.
"""
n_samples = X.shape[0]
n_estimators = len(self.estimators_)
if indices is None:
if n_samples != self.n_train_samples_:
raise ValueError(
"If `indices` are None, OOB samples must be "
"same length as number of training samples."
)
elif joblib.hash(X) != self.X_train_hash_:
warn("OOB samples are not identical to training samples.")
if indices is not None and n_samples != len(indices):
raise ValueError(
"If `indices` are not None, OOB samples "
"and indices must be the same length."
| |
#!/usr/bin/env python
# An IRC bot doing random handy things.
from ircbot import SingleServerIRCBot
from irclib import nm_to_n, nm_to_h, irc_lower, ip_numstr_to_quad, ip_quad_to_numstr, Event
import random, time, datetime
DATADIR = "datadir/"
# Can add lines to a file, and read either certain lines or lines at random.
# XXX: Make it add directories if necessary
# XXX: Make a subclass that makes sure entries are unique.
class ResourceFile:
def __init__( self, category ):
self.file = DATADIR + category + ".txt"
def _slurpText( self ):
f = file( self.file, 'r' )
text = f.readlines()
f.close
text = map( lambda( line ): line.strip(), text )
return text
def getCategory( self ):
return self.category
def getLines( self ):
return self._slurpText()
def clearFile( self ):
f = file( self.file, 'w' )
f.close()
def addLine( self, line ):
f = file( self.file, 'a' )
f.write( line.strip() + "\n" )
f.close()
def getLine( self, line ):
lines = self._slurpText()
if line < len( lines ):
return lines[line]
def getLastLine( self ):
lines = self._slurpText()
return lines[len( lines ) - 1]
def getRandomLine( self ):
lines = self._slurpText()
length = len( lines )
return lines[ random.randint( 0, length-1 ) ]
class MemoFile( ResourceFile ):
def __init__( self, name ):
ResourceFile.__init__( self, "memos/" + name )
# Make sure the file exists, in case it gets opened for reading later.
f = file( self.file, 'a' )
f.close()
def getMemos( self ):
memos = self._slurpText()
self.clearFile()
return memos
class UniqueResourceFile( ResourceFile ):
def __init__( self, category ):
ResourceFile.__init__( self, category )
def addLine( self, line ):
pass
# XXX: Date and time and such
class Logger:
def __init__( self, server, channel, category ):
date = datetime.date.today().isoformat()
logfilestring = "%s/%s-%s-%s" % (server, channel, date, category)
self.logfile = ResourceFile( logfilestring )
def log( self, event ):
who = nm_to_n( event.source() )
now = datetime.datetime.now().ctime()
if event.eventtype() == "pubmsg":
text = event.arguments()[0]
self.logfile.addLine( "%s <%s>\t%s" % (now, who, text) )
elif event.eventtype() == "ctcp":
if event.arguments()[0] == "ACTION":
text = event.arguments()[1]
self.logfile.addLine( "%s %s %s" % (now, who, text) )
def textAfterColon( text ):
tac = text.find( ':' )
if tac >= 0:
tac += 1
return text[tac:].strip()
else:
return ''
def textBeforeColon( text ):
tbc = text.find( ':' )
if tbc >= 0:
return text[:tbc].strip()
else:
return text
# XXX: Possible bugs
# *New logs at midnight!!! Make sure it works, yaknow.
# *Adding new stuff to the data files! Especially policies, I think
# XXX: To do
# *Private message handling
# *Make him able to kick people, other admin-y tasks, etc.
# *Exalted EXP calculator!
# *Remember specific links and bits of information until told to forget them
# *More consistant data file handling and adding and such?
# *RP mode --silent, special log, and doesn't absorb actions
# *Doink out duplicate actions, names and policies
# *Memo multiple people at once
# *Backlog system, that sends you the last X lines of text
# *User manual (possibly via PM and auto-generating docs --docstrings?)
# *Set topic name?
# *Remember word of the day, or pick one if none given
# *More in-depth logging... maybe multiple topics at once, with status? Log
# retrieval, as well?
# *Make users able to edit and correct data files such as murders or weapons
# *URL Logger?
# *Google calculator?
# *Dictionary?
# *Make the quiet and non-quiet commands less... kludgy?
# *Bot status message! Uptime, tyop's, cookies eaten, dice rolled,
# number and size of logs, etc.
# *File locking? Multiple channels at once??? Ick...
# *Music?
# *AI... Markov Chains? Simple punish/reward system?
class GlennBot(SingleServerIRCBot):
def __init__(self, nickname, channel, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.nick = nickname
self.channel = channel
self.server = server
self.commands = {}
self.quiet = False
self.wordOfTheDay = "Banana"
self.generalLogger = Logger( server, channel, "all" )
self.topicalLogger = False
self.namexFile = ResourceFile( "namex" )
self.policyFile = ResourceFile( "policy" )
self.actionFile = ResourceFile( "actions" )
self.murderFile = ResourceFile( "murder" )
self.weaponFile = ResourceFile( "weapons" )
self.initCommands()
self.initTimers()
def initCommands( self ):
self.commands['dice'] = self.doDice
self.commands['roll'] = self.doDice
self.commands['damage'] = self.doDamageDice
self.commands['dmg'] = self.doDamageDice
self.commands['start log'] = self.doStartTopicalLog
self.commands['logon'] = self.doStartTopicalLog
self.commands['stop log'] = self.doStopTopicalLog
self.commands['logoff'] = self.doStopTopicalLog
self.commands['namex'] = self.doName
self.commands['name'] = self.doName
self.commands['new name'] = self.doNewName
self.commands['policy'] = self.doPolicy
self.commands['new policy'] = self.doNewPolicy
self.commands['action'] = self.doAction
self.commands['word of the day'] = self.doWOTD
self.commands['poke'] = self.doPoke
self.commands['cookie'] = self.doCookie
self.commands['quiet'] =self.doToggleQuiet
#self.commands['hush'] = self.doToggleQuiet
self.commands['mimic'] = self.doMimic
self.commands['die'] = self.doDisconnect
#self.commands['bounce'] = self.doBounce
self.commands['kill'] = self.doMurder
self.commands['murder'] = self.doMurder
self.commands['execute'] = self.doMurder
self.commands['assassinate'] = self.doMurder
self.commands['new murder'] = self.doNewMurder
self.commands['new weapon'] = self.doNewWeapon
self.commands['memo'] = self.doMemo
self.commands['message'] = self.doMemo
self.commands['experience'] = self.doExalt
self.commands['exalt'] = self.doExalt
def initTimers( self ):
now = datetime.datetime.now()
endOfDay = now.replace( hour=23, minute=59, second=59 )
retardoBadKludge = time.mktime( endOfDay.timetuple() )
self.ircobj.execute_at( retardoBadKludge, self.newDayLog )
#self.ircobj.execute_delayed( 2, printFoo, [self] )
# Callbacks and basic functionality
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
c.join(self.channel)
print "Joined " + self.channel
def on_privmsg(self, c, e):
pass
#self.parseCommand( e, c, e.arguments()[0] )
def on_dccmsg(self, c, e):
pass
def on_dccchat(self, c, e):
pass
def on_ctcp(self, c, e):
"""Default handler for ctcp events.
Replies to VERSION and PING requests and relays DCC requests
to the on_dccchat method.
"""
if e.arguments()[0] == "VERSION":
c.ctcp_reply(nm_to_n(e.source()),
"VERSION " + self.get_version())
elif e.arguments()[0] == "PING":
if len(e.arguments()) > 1:
c.ctcp_reply(nm_to_n(e.source()),
"PING " + e.arguments()[1])
elif e.arguments()[0] == "DCC" and e.arguments()[1].split(" ", 1)[0] == "CHAT":
self.on_dccchat(c, e)
elif e.arguments()[0] == "ACTION":
whofrom = nm_to_n( e.source() )
print "Action: %s %s" % (whofrom, e.arguments()[1] )
self.analyzeEntireAction( whofrom, e.arguments()[1] )
self.log( e )
self.parseCommand( c, whofrom, e.arguments()[1] )
def on_pubmsg(self, c, e):
whofrom = nm_to_n( e.source() )
self.log( e )
self.analyzeEntireText( whofrom, e.arguments()[0] )
self.parseCommand( c, whofrom, e.arguments()[0] )
# A bit of abstraction, mainly for logging
def sendMessage( self, message ):
if not self.quiet:
e = Event( "pubmsg", self.nick, self.channel, [message] )
self.connection.privmsg( self.channel, message )
self.log( e )
time.sleep( 0.2 )
def sendAction( self, message ):
if not self.quiet:
e = Event( "ctcp", self.nick, self.channel, ["ACTION", message] )
self.connection.action( self.channel, message )
self.log( e )
time.sleep( 0.2 )
# Start a log for a new day
def newDayLog( self ):
self.sendMessage( "(( Starting new log, please wait... ))" )
time.sleep( 3 )
self.generalLogger = 0
self.generalLogger = Logger( self.server, self.channel, "all" )
self.sendMessage( "(( Done. ))" )
def log( self, event ):
self.generalLogger.log( event )
if self.topicalLogger:
self.topicalLogger.log( event )
# XXX: Does not work!
def changeNick( self, newnick ):
self.nick = newnick
self.server.nick( self.nick )
# A stub for doing things not invoked by a command directly,
# like text-analysis, spellchecking, etc.
# These are NOT triggered by things the bot itself does!
def analyzeEntireText( self, whofrom, text ):
pass
def analyzeEntireAction( self, whofrom, text ):
actionwords = text.split()
if (len( actionwords ) > 1) and not ('poke' in text) \
and not (self.nick in text):
self.actionFile.addLine( text )
# Thing is, there's two command formats, sorta... there's the
# bot-name-prefixed that makes it seem more conversational, and then
# there's the !-prefixed type for brevity.
# Aha. Both will use the same keyword-matching in the body, but
# you can replace the bot name with a !.
# Sauce.
def parseCommand( self, c, whofrom, cmd ):
cmdtext = cmd.lower().strip()
mynick = self.nick.lower()
if (cmd[0] == '!') or (cmdtext.find( mynick ) >= 0):
self.doCommand( whofrom, cmd )
def doCommand(self, whofrom, cmd):
print "Doing '" + cmd + "'"
cmdtext = cmd.lower().strip()
for command in self.commands:
if command in cmdtext:
self.commands[command]( whofrom, cmd )
return
# BOT MANAGEMENT
def doDisconnect( self, whofrom, cmd ):
if "Icefox" in whofrom:
self.die( "Aieeeee!" )
elif "GMfox" in whofrom:
self.die( "Aieeeee!" )
elif "GMFox" in whofrom:
self.die( "Aieeeee!" )
else:
self.sendAction( \
"taps one white mana to become immune to non-foxes." )
#def doBounce( self, whofrom, cmd ):
# self.disconnect()
def doToggleQuiet( self, whofrom, cmd ):
if self.quiet:
#self.changeNick( self.nick[1:] )
self.quiet = False
self.sendMessage( "Whew, I can talk again!" )
else:
#self.changeNick( "Q" + | |
TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TResetMetadataRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.is_refresh is not None:
oprot.writeFieldBegin('is_refresh', TType.BOOL, 2)
oprot.writeBool(self.is_refresh)
oprot.writeFieldEnd()
if self.table_name is not None:
oprot.writeFieldBegin('table_name', TType.STRUCT, 3)
self.table_name.write(oprot)
oprot.writeFieldEnd()
if self.header is not None:
oprot.writeFieldBegin('header', TType.STRUCT, 4)
self.header.write(oprot)
oprot.writeFieldEnd()
if self.partition_spec is not None:
oprot.writeFieldBegin('partition_spec', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.partition_spec))
for iter27 in self.partition_spec:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.db_name is not None:
oprot.writeFieldBegin('db_name', TType.STRING, 6)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.is_refresh is None:
raise TProtocol.TProtocolException(message='Required field is_refresh is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TResetMetadataResponse:
"""
Attributes:
- result
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'result', (TCatalogUpdateResult, TCatalogUpdateResult.thrift_spec), None, ), # 1
)
def __init__(self, result=None,):
self.result = result
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.result = TCatalogUpdateResult()
self.result.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TResetMetadataResponse')
if self.result is not None:
oprot.writeFieldBegin('result', TType.STRUCT, 1)
self.result.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.result is None:
raise TProtocol.TProtocolException(message='Required field result is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TGetFunctionsRequest:
"""
Attributes:
- protocol_version
- header
- db_name
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 0, ), # 1
(2, TType.STRING, 'db_name', None, None, ), # 2
(3, TType.STRUCT, 'header', (TCatalogServiceRequestHeader, TCatalogServiceRequestHeader.thrift_spec), None, ), # 3
)
def __init__(self, protocol_version=thrift_spec[1][4], header=None, db_name=None,):
self.protocol_version = protocol_version
self.header = header
self.db_name = db_name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.header = TCatalogServiceRequestHeader()
self.header.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.db_name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TGetFunctionsRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.db_name is not None:
oprot.writeFieldBegin('db_name', TType.STRING, 2)
oprot.writeString(self.db_name)
oprot.writeFieldEnd()
if self.header is not None:
oprot.writeFieldBegin('header', TType.STRUCT, 3)
self.header.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TGetFunctionsResponse:
"""
Attributes:
- status
- functions
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'status', (Status.ttypes.TStatus, Status.ttypes.TStatus.thrift_spec), None, ), # 1
(2, TType.LIST, 'functions', (TType.STRUCT,(Types.ttypes.TFunction, Types.ttypes.TFunction.thrift_spec)), None, ), # 2
)
def __init__(self, status=None, functions=None,):
self.status = status
self.functions = functions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.status = Status.ttypes.TStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.functions = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in xrange(_size28):
_elem33 = Types.ttypes.TFunction()
_elem33.read(iprot)
self.functions.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TGetFunctionsResponse')
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 1)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.functions is not None:
oprot.writeFieldBegin('functions', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.functions))
for iter34 in self.functions:
iter34.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TGetCatalogObjectRequest:
"""
Attributes:
- protocol_version
- header
- object_desc
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 0, ), # 1
(2, TType.STRUCT, 'object_desc', (CatalogObjects.ttypes.TCatalogObject, CatalogObjects.ttypes.TCatalogObject.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'header', (TCatalogServiceRequestHeader, TCatalogServiceRequestHeader.thrift_spec), None, ), # 3
)
def __init__(self, protocol_version=thrift_spec[1][4], header=None, object_desc=None,):
self.protocol_version = protocol_version
self.header = header
self.object_desc = object_desc
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.protocol_version = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.header = TCatalogServiceRequestHeader()
self.header.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.object_desc = CatalogObjects.ttypes.TCatalogObject()
self.object_desc.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TGetCatalogObjectRequest')
if self.protocol_version is not None:
oprot.writeFieldBegin('protocol_version', TType.I32, 1)
oprot.writeI32(self.protocol_version)
oprot.writeFieldEnd()
if self.object_desc is not None:
oprot.writeFieldBegin('object_desc', TType.STRUCT, 2)
self.object_desc.write(oprot)
oprot.writeFieldEnd()
if self.header is not None:
oprot.writeFieldBegin('header', TType.STRUCT, 3)
self.header.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.protocol_version is None:
raise TProtocol.TProtocolException(message='Required field protocol_version is unset!')
if self.object_desc is None:
raise TProtocol.TProtocolException(message='Required field object_desc is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TGetCatalogObjectResponse:
"""
Attributes:
- catalog_object
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'catalog_object', (CatalogObjects.ttypes.TCatalogObject, CatalogObjects.ttypes.TCatalogObject.thrift_spec), None, ), # 1
)
def __init__(self, catalog_object=None,):
self.catalog_object = catalog_object
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.catalog_object = CatalogObjects.ttypes.TCatalogObject()
self.catalog_object.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TGetCatalogObjectResponse')
if self.catalog_object is not None:
oprot.writeFieldBegin('catalog_object', TType.STRUCT, 1)
self.catalog_object.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.catalog_object is None:
raise TProtocol.TProtocolException(message='Required field catalog_object is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TPrioritizeLoadRequest:
"""
Attributes:
- protocol_version
- header
- object_descs
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'protocol_version', None, 0, ), # 1
(2, TType.STRUCT, 'header', (TCatalogServiceRequestHeader, TCatalogServiceRequestHeader.thrift_spec), None, ), # 2
(3, TType.LIST, 'object_descs', (TType.STRUCT,(CatalogObjects.ttypes.TCatalogObject, CatalogObjects.ttypes.TCatalogObject.thrift_spec)), None, ), # 3
)
def __init__(self, protocol_version=thrift_spec[1][4], header=None, object_descs=None,):
self.protocol_version = protocol_version
self.header = header
self.object_descs = object_descs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is | |
match sets of tokens or blocks.
# Note 2: Elements in a set can be matched in any order
# Note 3: Elements of sets are separated by commas
# Note 4: Elements as a whole should be enclosed in parentheses for mandatory matching or braces for optional
# Note 5: The below grammar will match a sequence of 1, 2, 3, 4 and 5 in any order, with 3 and 5 being optional
async def do_set_tokens(self, result_set, result_list, result_dict):
r"""
<<
$result_set = set();
$result_list = list();
$result_dict = dict();
>>
"set-tokens": "Matches a set of tokens in any order"
(
"1" << append($result_set, $1); append($result_list, $1); update($result_dict, $1, $1); >>,
"2" << append($result_set, $2); append($result_list, $2); update($result_dict, $2, $2); >>,
{
"3" << append($result_set, $1); append($result_list, $1); update($result_dict, $1, $1); >>
},
"4" << append($result_set, $4); append($result_list, $4); update($result_dict, $4, $4); >>,
{
"5" << append($result_set, $1); append($result_list, $1); update($result_dict, $1, $1); >>
}
)
"""
print("Result set:", type(result_set), result_set)
print("Result list:", type(result_list), result_list)
print("Result dict:", type(result_dict), result_dict)
# Example 5: Set of blocks one mandatory and one optional, the set as a whole is oprionsl ie enclosed in {}
def do_set_block(self, result):
r"""
<< $result = list(); >>
"set-blocks"
{ # This is the enclosing braces of the set. Hence the set is optional
# If we start matching the set, the inner block 2 will be mandatory as that is enclosed in parentheses
{ # Optional set block
"optional"
{
"set"
}
"block"
} << append($result, $1); >>, # Unlike above example, this $1 matches the sequence above
(
"mandatory"
{
"set"
}
"block"
) << append($result, $2); >>
}
# Help yourself matching the above grammar. Bug reports are welcome.
"""
print("Result:", result)
# Example 6: Types
# Note 1: The following type tokens are available by default
# string, ranged string, string alternatives, ranged integer, ranged float and boolean
def do_numbers(self, str_value, ranged_str_value, alt_str_value, int_value, decimal_value, boolean_value):
r"""
"types": "Type matching example"
(
# STRING_TOKEN, RANGED_STRING_TOKEN etc are token definitions
# So they have to be defined in global grammar, so put the definitions in class docstring
{
"string"
STRING_TOKEN
<< $str_value = $2; >>
},
{
"ranged-string"
RANGED_STRING_TOKEN
<< $ranged_str_value = $2; >>
},
{
"string-alternatives"
ALT_STRING_TOKEN
<< $alt_str_value = $2; >>
},
{
"int"
INT_TOKEN
<< $int_value = $2; >>
},
{
"decimal"
DECIMAL_TOKEN
<< $decimal_value = $2; >>
},
{
"boolean"
BOOLEAN_TOKEN
<< $boolean_value = $2; >>
}
)
"""
print(f"str_value: type: {type(str_value)}: value: {str_value}")
print(f"ranged_str_value: type: {type(ranged_str_value)}: value: {ranged_str_value}")
print(f"alt_str_value: type: {type(alt_str_value)}: value: {alt_str_value}")
print(f"int_value: type: {type(int_value)}: value: {int_value}")
print(f"decimal_value: type: {type(decimal_value)}: value: {decimal_value}")
print(f"boolean_value: type: {type(boolean_value)}: value: {boolean_value}")
# Example 7: Chaining grammars and passing parameters
async def do_grammar_chain(self, value1, value2, value3):
r"""
<<
$value2 = 0;
$value3 = None;
>>
# Note: The child grammars should be present globally, ie. in class docstring
"grammar-chain"
(
child_grammar_1
|
child_grammar_2[]
|
child_grammar_3[$value1, $value2]
|
child_grammar_4[$value2, $value3]
)
"""
print("value1:", type(value1), value1)
print("value2:", type(value2), value2)
print("value3:", type(value3), value3)
# Example 8: Match multiple times, Not much practical use
def do_multiple(self, result):
r"""
<< $result = list(); >>
{"multiple"
(
"match-twice" << append($result, $1); >>
) * 2}
(
"match-one-to-three-times"
<< append($result, $1); >>
)* (1:3)
"""
print("Result:", result)
# Example 9: Defining a custom token
"""
In this example we will define a custom token with auto completion and suggestion. See the CLI interaction below
The command we implement will have 3 tokens
token 1: initial command token - 'token-test'
token 2: StringToken - any string
token 3: Our custom token. It will match any substring from token 2 separated by comma, token 2 can be put inside quotes so that
spaces can be included in it
"""
def do_custom_token(self, parent_string, sub_string):
r"""
"token-test"
EXAMPLE_9_PARENT_STRING
<< $parent_string = $2; >>
EXAMPLE_9_SUBSTRING
<< $sub_string = $3; >>
"""
print("parent_string:", parent_string)
print("sub_string:", sub_string)
# Example 10: Gathering input
# Input can be done with async or non-async functions, get_input and input respectively
# Input can be gathered with masking echo, pass show_char argument as False
# Input can be triggered either from CLI grammar or python code.
# Below example shows all combinations
async def do_input(self, async_inp, show_char, input_str):
r"""
<< $show_char = True; >>
(
"input"
{
{
"async"
<< $async_inp = True; >>
},
{
"mask-input"
<< $show_char = False; >>
}
}
)
<<
$input_str = input("cli: ", $show_char);
print("Cli Code Input (cli print):", $input_str);
>>
"""
if async_inp:
inp = await self.input("python: ", show_char=show_char)
else:
self.print("non async input is deprecated.")
inp = await self.input("python: ", show_char=show_char)
print("Cli Code Input (python print):", input_str)
print("Python Code Input:", inp)
"""
Here's the CLI interaction for example 10
simple-cli # inp
cli: cli
Cli Code Input (cli print): cli
python: python
Cli Code Input (python print): cli
Python Code Input: python
simple-cli #
simple-cli # inp mas
cli: ***
Cli Code Input (cli print): cli
python: ******
Cli Code Input (python print): cli
Python Code Input: python
simple-cli #
simple-cli # inp as
cli: cli
Cli Code Input (cli print): cli
python: python
Cli Code Input (python print): cli
Python Code Input: python
simple-cli # inp as mask
cli: ***
Cli Code Input (cli print): cli
python: ******
Cli Code Input (python print): cli
Python Code Input: python
simple-cli #
"""
"""
Example 9 CLI interaction
=========================
# Some completions might vary slightly as there was fixes in quotes handling
simple-cli #
simple-cli # to<TAB>
token-test : token-test
simple-cli # to <TAB>
: Any string
simple-cli # to asdf <TAB>
asdf : A substring of the parent string
simple-cli # to asdf a<ENTER>
parent_string: asdf
sub_string: asdf
simple-cli #
simple-cli # token "asdf, 123, asdf 123, 1 2 3"
" 1 2 3" : A substring of the parent string
" 123" : A substring of the parent string
" asdf 123" : A substring of the parent string
asdf : A substring of the parent string
simple-cli # token "asdf, 123, asdf 123, 1 2 3" 1<ENTER>
parent_string: asdf, 123, asdf 123, 1 2 3
sub_string: 123
simple-cli # token "asdf, 123, asdf 123, 1 2 3" as<ENTER>
parent_string: asdf, 123, asdf 123, 1 2 3
sub_string: asdf
simple-cli # token "asdf, 123, asdf 123, 1 2 3" "<TAB>
" 1 2 3" : A substring of the parent string
" 123" : A substring of the parent string
" asdf 123" : A substring of the parent string
simple-cli # token "asdf, 123, asdf 123, 1 2 3" "1<ENTER>
parent_string: asdf, 123, asdf 123, 1 2 3
sub_string: 111 222
simple-cli #
simple-cli # token "asdf, 123, asdf 123, 1 2 3" "a<TAB>
"asdf 123" : A substring of the parent string
simple-cli # token "asdf, 123, asdf 123, 1 2 3" "asdf 123"<ENTER>
parent_string: asdf, 123, asdf 123, 1 2 3
sub_string: asdf 123
simple-cli #
simple-cli #
simple-cli # to<TAB>
token-test : token-test
simple-cli # to "asdf\n,123 123,a\tb"<TAB>
: Any string
simple-cli # to "asdf\n,123 123,a\tb" <TAB>
"123 123" : A substring of the parent string
"a\tb" : A substring of the parent string
asdf : A substring of the parent string
simple-cli # to "asdf\n,123 123,a\tb" a<ENTER>
parent_string: asdf
,123 123,a b
sub_string: asdf
simple-cli # to "asdf\n,123 123,a\tb" "<TAB>
"123 123" : A substring of the parent string
"a\tb" : A substring of the parent string
simple-cli # to "asdf\n,123 123,a\tb" "1<ENTER>
parent_string: asdf
,123 123,a b
sub_string: "123 123"
simple-cli # to "asdf\n,123 123,a\tb" "1<TAB><TAB>
"123 123" : A substring of the parent string
simple-cli # to "asdf\n,123 123,a\tb" "123 123"<ENTER>
parent_string: asdf
,123 123,a b
sub_string: "123 123"
simple-cli # to "asdf\n,123 123,a\tb" "<TAB>
"123 123" : A substring of the parent string
"a\tb" : A substring of the parent string
simple-cli # to "asdf\n,123 123,a\tb" "a\tb"<ENTER>
parent_string: asdf
,123 123,a b
sub_string: "a b"
simple-cli #
simple-cli # to "asdf, 123 123, \"asdf\"\""
" 123 123" : A substring of the parent string
" \"asdf\"\"" : A substring of the parent string
asdf : A substring of the parent string
simple-cli # to "asdf, 123 123, \"asdf\"\"" " \<TAB><TAB>
" \"asdf\"\"" : A substring of the parent string
simple-cli # to "asdf, 123 | |
__author__ = '<NAME>'
import ctypes
import os
import time, queue
import numpy as np
import threading
libc = ctypes.cdll.msvcrt
libc.fopen.restype = ctypes.c_void_p
class PixelFly(object):
"""
PixelFly class loads the pf_cam.dll in order to interface
the basic functions of the pco.pixelfly ccd detector.
"""
def __init__(self, dllpath='C:\\Users\\Admin\\Desktop\\pco_pixelfly'):
# Load dynamic link library
self.DLLpath = os.path.join(dllpath , 'SC2_Cam.dll')
self.PixFlyDLL = ctypes.windll.LoadLibrary(self.DLLpath)
# initialize board number, by default 0
self.board = 0
# initialize handles and structs
self.hCam = ctypes.c_int()
self.bin = 1
self.v_max = 1040
self.h_max = 1392
self.wXResAct = ctypes.c_uint16()
self.wYResAct = ctypes.c_uint16()
self.dwWarn = ctypes.c_ulong
self.dwErr = ctypes.c_ulong
self.dwStatus = ctypes.c_ulong
self.szCameraName = ctypes.c_char
self.wSZCameraNameLen = ctypes.c_ushort
# Set all buffer size parameters
self.time_modes = {1: "us", 2: "ms"}
self.set_params = {'ROI': [1, 1, self.h_max, self.v_max],
'binning': [1, 1],
'Exposure time': [0, '0'],
'Camera ROI dimensions': [0, 0]}
self.armed = False
self.buffer_numbers = []
self.buffer_pointers, self.buffer_events = (
[], [])
self.out = 0
# Queues that hold the data collected in the camera.
self.q = queue.Queue(maxsize=2)
self.q_m = queue.Queue(maxsize=2)
def open_camera(self):
"""
open_camera tries to open the camera. It passes the camera
handle hCam by reference in order to get the handle which will
be used afterwards.
:return:True if success and False if unaible to open camera or
some error occured.
"""
# opencamera is the instance of OpenCamera method in DLL
opencamera = self.PixFlyDLL.PCO_OpenCamera
# PCO_OpenCamera(HANDLE *hCam, int board_num), return int
opencamera.argtypes = (ctypes.POINTER(ctypes.c_int), ctypes.c_int)
opencamera.restype = ctypes.c_int
# return 0 if success, <0 if error
ret_code = opencamera(self.hCam, self.board)
print(self.hCam.value, ret_code)
# check if camera connected and get info
if ret_code < 0:
print('Error connecting camera')
# try to identify error
return False
elif ret_code == 0:
print('Camera Connected!')
return True
else:
return False
def close_camera(self):
"""
close_camera tries to close the connected camera with handle hCam.
:return: True if success and False if unaible to close the camera
"""
# closecamera is an instance of the CloseCamera function of the DLL
# call function and expect 0 if success, <0 if error
ret_code = self.PixFlyDLL.PCO_CloseCamera(self.hCam)
if ret_code == 0:
return True
else:
return False
def roi(self, region_of_interest, verbose=True):
"""
Set region of interest window. The ROI must be smaller or
equal to the absolute image area which is defined by the
format h_max, v_max and the binning bin.
:param region_of_interest: tuple of (x0,y0,x1,y1)
:param verbose: True if the process should be printed
:return: None
"""
x0, y0, x1, y1 = tuple(region_of_interest)
if verbose:
print("ROI requested :",x0, y0, x1, y1)
# max ROI depends on the format and the binning
x_max = self.h_max/self.bin
y_max = self.v_max/self.bin
# check that ROI is within allowed borders
restriction = ((x0 > 1) and (y0 > 1) and (x1 < x_max) and (y1 < y_max))
if not restriction:
if verbose:
print("Adjusting ROI..")
if x0 < 1:
x0 = 1
if x1 > x_max:
x1 = x_max
if y0 < 1:
y0 = 1
if y1 > y_max:
y1 = y_max
if x1 < x0 :
x0 , x1 = x1, x0
if y1 < y0:
y0, y1 = y1, y0
# pass values to ctypes variables
wRoiX0 = ctypes.c_uint16(int(x0))
wRoiY0 = ctypes.c_uint16(int(y0))
wRoiX1 = ctypes.c_uint16(int(x1))
wRoiY1 = ctypes.c_uint16(int(y1))
if verbose:
print("Setting ROI..")
self.PixFlyDLL.PCO_SetROI(self.hCam, wRoiX0, wRoiY0, wRoiX1, wRoiY1)
self.PixFlyDLL.PCO_GetROI(self.hCam,
ctypes.byref(wRoiX0), ctypes.byref(wRoiY0),
ctypes.byref(wRoiX1), ctypes.byref(wRoiY1))
if verbose:
print("ROI :")
print("From pixel ", wRoiX0.value)
print("to pixel ", wRoiX1.value, "(left/right")
print("From pixel ", wRoiY0.value)
print("to pixel ", wRoiY1.value, "(up/down")
self.set_params['ROI']=[wRoiX0.value, wRoiY0.value, wRoiX1.value, wRoiY1.value]
return None
def binning(self, h_bin, v_bin):
"""
binning allows for Binning pixels in h_bin x v_bin
Allowed values in {1,2,4,8,16,32}
:param h_bin: binning in horizontal direction
:param v_bin:
:return: None
"""
allowed = [1, 2, 4]
wBinHorz = ctypes.c_uint16(int(h_bin))
wBinVert = ctypes.c_uint16(int(v_bin))
if (h_bin in allowed) and (v_bin in allowed):
self.PixFlyDLL.PCO_SetBinning(self.hCam, wBinHorz, wBinVert)
self.PixFlyDLL.PCO_GetBinning(self.hCam, ctypes.byref(wBinHorz),
ctypes.byref(wBinVert))
self.set_params['binning']=[wBinHorz.value, wBinVert.value]
else:
raise UserWarning("Not allowed binning value pair " + str(h_bin)
+ "x" + str(v_bin))
return None
def exposure_time(self, exp_time, base_exposure, verbose=True):
"""
Sets delay and exposure time allowing to choose a base for each parameter
0x0000 timebase=[ns]=[10^-9 seconds]
0x0001 timebase=[us]=[10^-6 seconds]
0x0002 timebase=[ms]=[10^-3 seconds]
Note: Does not require armed camera to set exp time
:param exp_time: Exposure time (integer < 1000)
:param base_exposure: Base 10 order for exposure time in seconds-> ns/us/ms
:param verbose: True if process should be printed
:return: None
"""
# check for allowed values
if not(base_exposure in [1, 2]):
raise UserWarning("Not accepted time modes")
# pass values to ctypes variables
dwDelay = ctypes.c_uint32(0)
dwExposure = ctypes.c_uint32(int(exp_time))
wTimeBaseDelay = ctypes.c_uint16(0)
wTimeBaseExposure = ctypes.c_uint16(int(base_exposure))
if verbose:
print('Setting exposure time/delay..')
# set exposure time and delay time
self.PixFlyDLL.PCO_SetDelayExposureTime(self.hCam,
dwDelay, dwExposure,
wTimeBaseDelay, wTimeBaseExposure)
self.PixFlyDLL.PCO_GetDelayExposureTime(self.hCam, ctypes.byref(dwDelay),
ctypes.byref(dwExposure),
ctypes.byref(wTimeBaseDelay),
ctypes.byref(wTimeBaseExposure))
self.set_params['Exposure time'] = [dwExposure.value, self.time_modes[wTimeBaseExposure.value]]
return None
def get_exposure_time(self):
"""
Get exposure time of the camera.
:return: exposure time, units
"""
# pass values to ctypes variables
dwDelay = ctypes.c_uint32(0)
dwExposure = ctypes.c_uint32(0)
wTimeBaseDelay = ctypes.c_uint16(0)
wTimeBaseExposure = ctypes.c_uint16(0)
# get exposure time
self.PixFlyDLL.PCO_GetDelayExposureTime(self.hCam, ctypes.byref(dwDelay),
ctypes.byref(dwExposure),
ctypes.byref(wTimeBaseDelay),
ctypes.byref(wTimeBaseExposure))
return [dwExposure.value, self.time_modes[wTimeBaseExposure.value]]
def arm_camera(self):
"""
Arms camera and allocates buffers for image recording
:param num_buffers:
:param verbose:
:return:
"""
if self.armed:
raise UserWarning("Camera already armed.")
# Arm camera
self.PixFlyDLL.PCO_ArmCamera(self.hCam)
# Get the actual image resolution-needed for buffers
self.wXResAct, self.wYResAct, wXResMax, wYResMax = (
ctypes.c_uint16(), ctypes.c_uint16(), ctypes.c_uint16(),
ctypes.c_uint16())
self.PixFlyDLL.PCO_GetSizes(self.hCam, ctypes.byref(self.wXResAct),
ctypes.byref(self.wYResAct), ctypes.byref(wXResMax),
ctypes.byref(wYResMax))
self.set_params['Camera ROI dimensions'] = [self.wXResAct.value,
self.wYResAct.value]
self.armed = True
return None
def disarm_camera(self):
"""
Disarm camera, free allocated buffers and set
recording to 0
:return:
"""
# set recording state to 0
wRecState = ctypes.c_uint16(0)
self.PixFlyDLL.PCO_SetRecordingState(self.hCam, wRecState)
# free all allocated buffers
self.PixFlyDLL.PCO_RemoveBuffer(self.hCam)
for buf in self.buffer_numbers:
self.PixFlyDLL.PCO_FreeBuffer(self.hCam, buf)
self.buffer_numbers, self.buffer_pointers, self.buffer_events = (
[], [], [])
self.armed = False
return None
def allocate_buffer(self, num_buffers=2):
"""
Allocate buffers for image grabbing
:param num_buffers:
:return:
"""
dwSize = ctypes.c_uint32(self.wXResAct.value*self.wYResAct.value*2) # 2 bytes per pixel
# set buffer variable to []
self.buffer_numbers, self.buffer_pointers, self.buffer_events = (
[], [], [])
# now set buffer variables to correct value and pass them to the API
for i in range(num_buffers):
self.buffer_numbers.append(ctypes.c_int16(-1))
self.buffer_pointers.append(ctypes.c_void_p(0))
self.buffer_events.append(ctypes.c_void_p(0))
self.PixFlyDLL.PCO_AllocateBuffer(self.hCam, ctypes.byref(self.buffer_numbers[i]),
dwSize, ctypes.byref(self.buffer_pointers[i]),
ctypes.byref(self.buffer_events[i]))
# Tell camera link what actual resolution to expect
self.PixFlyDLL.PCO_CamLinkSetImageParameters(
self.hCam, self.wXResAct, self.wYResAct)
return None
def start_recording(self):
"""
Start recording
:return: message from recording status
"""
message = self.PixFlyDLL.PCO_SetRecordingState(self.hCam, ctypes.c_int16(1))
return message
def _prepare_to_record_to_memory(self):
"""
Prepares memory for recording
:return:
"""
dw1stImage, dwLastImage = ctypes.c_uint32(0), ctypes.c_uint32(0)
wBitsPerPixel = ctypes.c_uint16(16)
dwStatusDll, dwStatusDrv = ctypes.c_uint32(), ctypes.c_uint32()
bytes_per_pixel = ctypes.c_uint32(2)
pixels_per_image = ctypes.c_uint32(self.wXResAct.value * self.wYResAct.value)
added_buffers = []
for which_buf in range(len(self.buffer_numbers)):
self.PixFlyDLL.PCO_AddBufferEx(
self.hCam, dw1stImage, dwLastImage,
self.buffer_numbers[which_buf], self.wXResAct,
self.wYResAct, wBitsPerPixel)
added_buffers.append(which_buf)
# prepare Python data types for receiving data
# http://stackoverflow.com/questions/7543675/how-to-convert-pointer-to-c-array-to-python-array
ArrayType = ctypes.c_uint16*pixels_per_image.value
self._prepared_to_record = (dw1stImage, dwLastImage,
wBitsPerPixel,
dwStatusDll, dwStatusDrv,
bytes_per_pixel, pixels_per_image,
added_buffers, ArrayType)
return None
def record_live(self):
if not self.armed:
raise UserWarning('Cannot record to memory with disarmed camera')
if not hasattr(self, '_prepared_to_record'):
self._prepare_to_record_to_memory()
(dw1stImage, dwLastImage, wBitsPerPixel, dwStatusDll,
dwStatusDrv, bytes_per_pixel, pixels_per_image, added_buffers, ArrayType) = self._prepared_to_record
poll_timeout=5e5
message = 0
verbose=False
self.live = True
out_preview = self.record_to_memory(1)[0]
while self.live:
num_polls = 0
polling = True
which_buf = added_buffers.pop(0)
try:
while polling:
num_polls += 1
message = self.PixFlyDLL.PCO_GetBufferStatus(
self.hCam, self.buffer_numbers[added_buffers[0]],
ctypes.byref(dwStatusDll), ctypes.byref(dwStatusDrv))
if dwStatusDll.value == 0xc0008000:
# Buffer exits the queue
if verbose:
print("After", num_polls, "polls, buffer")
print(self.buffer_numbers[which_buf].value)
print("is ready.")
polling = False
break
else:
time.sleep(0.00005) # Wait 50 microseconds
if num_polls > poll_timeout:
print("After %i polls, no buffer."%(poll_timeout))
raise TimeoutError
except TimeoutError:
pass
try:
if dwStatusDrv.value == 0x00000000 and dwStatusDll.value == 0xc0008000:
pass
elif dwStatusDrv.value == 0x80332028:
raise DMAError('DMA error during record_to_memory')
else:
print("dwStatusDrv:", dwStatusDrv.value)
raise UserWarning("Buffer status error")
if verbose:
print("Record to memory result:")
print(hex(dwStatusDll.value), hex(dwStatusDrv.value))
print(message)
print('Retrieving image from buffer ', which_buf)
self.ts = time.clock()
if self.q.full():
self.q.queue.clear()
buffer_ptr = ctypes.cast(self.buffer_pointers[which_buf], ctypes.POINTER(ArrayType))
out = np.frombuffer(buffer_ptr.contents, dtype=np.uint16).reshape((self.wYResAct.value, self.wXResAct.value))
out /= 4 # make integer division to convert 16 bit to 14 bit
if self.q_m.full():
self.q_m.queue.clear()
self.q_m.put(np.ndarray.max(out))
self.q.put(out[::-1])
#print('Acquisition time:', time.clock()-ts)
| |
<filename>pyelements/underlordboot.py
from bs4 import BeautifulSoup
from collections import defaultdict
from operator import itemgetter
import numpy as np
import tensorflow
import itertools
from copy import deepcopy
fr = open('C:/users/mnj/Downloads/under.html', 'r', encoding='utf8').read()
soup = BeautifulSoup(fr, "html.parser")
tags = soup('div class')
tiers = soup('u')
tags = soup('li')
container = []
for tag in tags:
if not tag:
continue
info = list(filter(lambda st: st != '' and '<' not in st, str(tag).split()))
if len(info) <= 5:
extract = str(tag.contents[0]).strip()
container.append(extract)
if str(tag.contents[0]).strip() == 'Lich':
break
synergies = dict()
levels = dict()
synergies['S Tier'] = container[:3]
synergies['A Tier'] = container[3:10]
synergies['B Tier'] = container[10:15]
synergies['C Tier'] = container[15:23]
levels['1'] = container[23:37]
levels['2'] = container[37:51]
levels['3'] = container[51:66]
levels['4'] = container[66:78]
levels['5'] = container[78:83]
fr = open('C:/users/mnj/Downloads/stats.html', 'r', encoding='utf8').read()
soup = BeautifulSoup(fr, "html.parser")
'''
tags = soup('u')
for tag in tags:
print(tag.contents)'''
soup = BeautifulSoup(fr, "html.parser")
tags = soup('li')
st = ""
collection = []
for tag in tags:
if tag != None and str(tag.contents[0]).startswith('<strong>'):
extract = ["Alliance 1", "Alliance 2", 'Alliance 3', 'Alliance 4', 'Health', 'DPS', 'Attack Range', 'Armor']
for e in extract:
if str(tag.contents[0]).find(e) != -1:
collection.append(str(tag.contents[1]).strip())
info = {}
tags = soup('u')
i = 0
for tag in tags:
if str(tag)[3:-4] == 'Puck' or str(tag)[3:-4] == 'Dragon Knight' or str(tag)[3:-4] == 'Lycan':
info[str(tag)[3:-4]] = collection[i:i+8]
i += 8
else:
info[str(tag)[3:-4]] = collection[i:i+7]
i += 7
l1 = []
for nm in levels:
for si in levels[nm]:
l1 += [si]
l2 = []
for key in l1:
if 'Wind' in key:
l2.append(key)
if 'Nature' in key:
l2.append(key)
st = ''
for ao in info:
if 'Nature' in ao:
st = ao
for m in l2:
if 'Wind' in m:
info[m] = info['Wind Ranger']
else:
info[m] = info[ao]
# give each hero a number for q-table access later on
heroes_to_num = {}
for index, hero in enumerate(info.keys()):
heroes_to_num[hero.strip()] = index
num_to_heroes = {}
for index, hero in enumerate(info.keys()):
num_to_heroes[index] = hero
i = 0
synergies_to_num = {}
all_synergies = []
for tier in synergies:
for synergy in synergies[tier]:
synergies_to_num[synergy] = i
all_synergies.append(synergies)
i += 1
'''
add_syns = []
for key in synergies_to_num:
add_syns.append(key[:-1])
for syn in add_syns:
synergies_to_num[syn] = synergies_to_num[key]'''
from difflib import SequenceMatcher
# given a synergy which may not be in the dictionary, this tries to find the closest matching one
def similarity_function(access):
# return the synergy string of which access most closely correlates to
max_sim_synergy = ''
max_ratio = 0
for key in synergies_to_num:
if SequenceMatcher(None, access, key).ratio() > max_ratio:
max_ratio = SequenceMatcher(None, access, key).ratio()
max_sim_synergy = key
return max_sim_synergy
class UnderlordSimulator:
def __init__(self):
self.pieces_to_num = heroes_to_num
self.num_to_pieces = num_to_heroes
self.synergies_to_num = synergies_to_num
self.piece_stats = info
self.gold = 1
self.lev = 1
self.xp = 1
self.inventory = dict()
self.iter = 0
for value in heroes_to_num.values():
self.inventory[value] = [0, 0, 0] # number of level 1, 2, and 3 troops
self.exp_needed = [2, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40] # experience needed to level up
self.synergy_inventory = dict()
for n in range(i):
self.synergy_inventory[n] = 0
self.existing = {}
self.rnd = 1
# returns level of a piece
def get_lev(self, piece_name):
for lev in levels.keys():
if piece_name in levels[lev]:
return int(lev)
def generate_troops(self, prob):
troops = []
for i in range(5):
subset = np.random.choice([1, 2, 3, 4, 5], p=prob)
troop = np.random.choice(levels[str(subset)])
troops += [troop]
return troops
# simulate each round so we can have a recursive min-max
def rnd_sim(self, pieces, state, depth, max_depth=3):
combs = []
init_state = (self.gold, self.inventory, self.synergy_inventory) # we don't want to override these between each run
state_vals = [0]
# available_pieces = input('What pieces are in the store?')
# available_pieces = available_pieces.split(',')
# for each of the available pieces in the shop, calculate the game state value if they were to be bought
for piece in pieces:
'''if self.inventory[self.pieces_to_num[piece]] == [0, 0, 0] and self.rnd >= 11:
state_vals += [0]
continue'''
self.gold = init_state[0]
# these are mutable - account for this when altering self.inventory with a deepcoopy
self.inventory = deepcopy(init_state[1])
self.synergy_inventory = deepcopy(init_state[2])
piece = piece.strip() # remove white space
# self.gold -= self.get_lev(piece) # subtact the cost of the piece
if self.gold < 50:
self.gold += (5 + int((self.gold * 0.1)))
else:
self.gold += 5
# calculate the state
# find the combination of pieces which give the maximum state value (accounting for synergies, player level, piece level, etc.)
max_pieces = self.lev
# merge level 1 pieces to level 2 if enough
if self.inventory[self.pieces_to_num[piece]][0] < 2: # if the amt of level 1 pieces is less than 3, increment by 1
self.inventory[self.pieces_to_num[piece]][0] += 1
else:
self.inventory[self.pieces_to_num[piece]][0] = 0
self.inventory[self.pieces_to_num[piece]][1] = 1
# do same but from level 2 to level 3
if self.inventory[self.pieces_to_num[piece]][1] < 2:
self.inventory[self.pieces_to_num[piece]][1] += 1
else:
self.inventory[self.pieces_to_num[piece]][1] = 0
self.inventory[self.pieces_to_num[piece]][2] = 1
if depth != max_depth:
returned_vals = self.rnd_sim(self.generate_troops([1.0, 0, 0, 0, 0]), 0, depth + 1)
state_vals += [returned_vals]
# we have a list of 5 state values
#combs.append()
else:
# go through all combinations of heroes
simplified_inventory = []
# get the hero number representations for each of the pieces we have
for item in self.inventory:
if self.inventory[item] != [0, 0, 0]:
for n in range(self.inventory[item][0]):
simplified_inventory.append(item)
for n in range(self.inventory[item][1]):
simplified_inventory.append(item)
# go through the set of each pieces in the inventory that may be placed on the board and calculate which will max the state
for pieces_on_board in itertools.combinations(simplified_inventory, max_pieces):
# how can this be decreasing / so far from predicted
state_val = 0
if self.pieces_to_num[piece] not in pieces_on_board: # check unique combinations only
continue
if self.existing.get(pieces_on_board, -1) != -1:
continue
self.iter += 1
synergy_count = 0
in_board = []
for board_piece in pieces_on_board: # duplicate pieces on board
in_board.append(board_piece)
if board_piece not in in_board:
synergy1 = self.piece_stats[self.num_to_pieces[board_piece]][0]
synergy2 = self.piece_stats[self.num_to_pieces[board_piece]][1]
# add in case for heroes with 3 or more synergies ***
# 1 synergy per unique piece (so only consider it the first time and then forget about it)
self.synergy_inventory[self.synergy_inventory.get(self.synergies_to_num[similarity_function(synergy1)])] += 1
self.synergy_inventory[self.synergy_inventory.get(self.synergies_to_num[similarity_function(synergy2)])] += 1
for synergy in self.synergy_inventory:
#if self.synergy_inventory[synergy] // 3 == 1: # if you have 3 of the same kind
if self.synergy_inventory.get(synergy, similarity_function(self.num_to_pieces[synergy])) // 3 == 1:
state_val += 5
#if self.synergy_inventory[synergy] // 6 == 1:
if self.synergy_inventory.get(synergy, similarity_function(self.num_to_pieces[synergy])) // 6 == 1:
state_val += 15
if self.synergy_inventory.get(synergy, similarity_function(self.num_to_pieces[synergy])) // 9 == 1:
state_val += 40
# factor in level of the pieces on the board
for board_piece in pieces_on_board:
for i in range(3):
if i == 1:
state_val += (self.inventory[self.pieces_to_num[piece]][i]) * 5
if i == 2:
state_val += (self.inventory[self.pieces_to_num[piece]][i]) * 10
self.existing[pieces_on_board] = 0
# factor in amount of gold
state_val += (self.gold / 2)
state_vals += [state_val] # append the predicted state val for each hero
# we have a list of 5 state values
# return the best state val out of the pieces presented
if depth == 1:
max_val = 0
max_index = 0
for i, val in enumerate(state_vals):
if val > max_val:
max_val = val
max_index = i
print(state_vals)
self.gold = init_state[0]
self.inventory = init_state[1]
self.synergy_inventory = init_state[2]
#print(combs)
return max(state_vals), max_index, combs
return sum(state_vals) / len(state_vals) # max(state_vals)
def game_sim(self):
for rnd in range(1, 35):
self.rnd = rnd
print('Round', rnd )
available_pieces = input('What pieces are in the store?')
available_pieces = available_pieces.split(',')
#available_pieces = self.generate_troops([0.2, 0.2, 0.2, 0.2, 0.2])
if rnd <= 7:
state_val, index, comb = self.rnd_sim(available_pieces, 0, 1, 5)
else:
state_val, index, comb = self.rnd_sim(available_pieces, 0, 1)
#print('Buy', available_pieces[index])
self.xp += 1
if self.xp == self.exp_needed[self.lev - 1]:
self.lev += 1
self.xp = 0
index -= 1
self.inventory[self.pieces_to_num[available_pieces[index]]][0] += 1
if self.inventory[self.pieces_to_num[available_pieces[index]]][0] == 3:
self.inventory[self.pieces_to_num[available_pieces[index]]][1] += 1
self.inventory[self.pieces_to_num[available_pieces[index]]][0] = 0
if self.inventory[self.pieces_to_num[available_pieces[index]]][1] == 3:
self.inventory[self.pieces_to_num[available_pieces[index]]][1] = 0
self.inventory[self.pieces_to_num[available_pieces[index]]][2] = 1
#combs = itertools.combinations(simplified_inventory, self.lev)
#pieces_on_board = set(comb)
# calculate the synergies each round
for syn in self.synergy_inventory:
self.synergy_inventory[syn] = 0
# why aren't combinations being added
#for pce in pieces_on_board:
'''synergy1 = self.piece_stats[self.num_to_pieces[self.pieces_to_num[pce]][0]]
#synergy2 = | |
higher than c[i,j]
cabove = scumsum( count )
# solid lines give probability contours at specified levels
# (defaults to 0.68 for "1-sigma contours")
ax1.contour( x[:-1], y[:-1], cabove, linelevels, colors=[plotargs['color'],plotargs['color']], ls='-' )
if plotstyle=='contourf' :
#flevels = [ 1e-30, 0 ]
# filled contours show full extent of the population
#ax1.contourf( x[:-1], y[:-1], count, flevels, cmap=cmap, alpha=0.5 )
ax1.contourf( x[:-1], y[:-1], cabove, levels=linelevels, colors=[plotargs['mec'],plotargs['mfc']], alpha=0.5, extend='neither' )
filt1 = sim.SURVEYDATA.band2filter(band1)
filt2 = sim.SURVEYDATA.band2filter(band2)
filt3 = sim.SURVEYDATA.band2filter(band3)
filt4 = sim.SURVEYDATA.band2filter(band4)
if band1==band2 : ax1.set_xlabel('%s'%filt1)
else : ax1.set_xlabel('%s - %s'%(filt1,filt2))
if band3==band4 : ax1.set_ylabel('%s'%(filt3))
else : ax1.set_ylabel('%s - %s'%(filt3,filt4))
if sidehist :
# construct the 1-d histograms along the edges
histbinsX = np.arange(binrange[0][0]-histbinwidth,binrange[0][1]+histbinwidth, histbinwidth)
histbinsY = np.arange(binrange[1][0]-histbinwidth,binrange[1][1]+histbinwidth, histbinwidth)
histbincentersY = histbinsY[:-1] + (histbinsY[1]-histbinsY[0])/2.
histbincentersX = histbinsX[:-1] + (histbinsX[1]-histbinsX[0])/2.
histY, edge = p.histogram( yarray, bins=histbinsY )
histX, edge = p.histogram( xarray, bins=histbinsX )
Nsample = len(mag1)
ax2.plot( histbincentersX, cfrac*histX/Nsample, color=histcolor, ls='-', drawstyle='steps-mid' )
ax2.xaxis.set_ticks_position('top')
ymin2,ymax2 = ax2.get_ylim()
ax2.set_yticks( np.round( np.linspace( ymin2, ymax2, 4), 2 )[1:] )
ax3.plot( cfrac*histY/Nsample, histbincentersY, color=histcolor,ls='-', drawstyle='steps-mid' )
ax3.yaxis.set_ticks_position('right')
xmin3,xmax3 = ax3.get_xlim()
ax3.set_xticks( np.round( np.linspace( xmin3, xmax3, 4), 2 )[1:] )
# If SN magnitudes were provided, then plot the observations with error bars
xmin,xmax = xarray.min()-1.0,xarray.max()+1.0
ymin,ymax = yarray.max()+3.0,yarray.min()-1.0
likelihood = 0.0
if ( band1 in snmags and band2 in snmags and
band3 in snmags and band4 in snmags ) :
if band1==band2 : snx = abs(snmags[band1])
else : snx = abs(snmags[band1])-abs(snmags[band2])
if band3==band4 : sny = abs(snmags[band3])
else : sny = abs(snmags[band3])-abs(snmags[band4])
# compute the likelihood value of the position where the observed
# SN magnitudes land: the likelihood that the SN belongs to the
# simulated class, based on the observed data alone
isnx = np.argmin( np.abs( x-snx ) )
isny = np.argmin( np.abs( y-sny ) )
try:
likelihood = 1 - cabove[ isnx ][ isny ]
except :
likelihood = 0.0
if ( 'd'+band1 in snmags.keys() and 'd'+band2 in snmags.keys() and
'd'+band3 in snmags.keys() and 'd'+band4 in snmags.keys() ) :
dsnx1,dsnx2 = snmags['d'+band1], snmags['d'+band2]
dsny3,dsny4 = snmags['d'+band3], snmags['d'+band4]
if band1==band2 : dsnx = dsnx1
else : dsnx = np.sqrt( dsnx1**2 + dsnx2**2 )
if band3==band4 : dsny = dsny3
else : dsny = np.sqrt( dsny3**2 + dsny4**2 )
# plot upper-limit arrow(s) as needed
if band1==band2 and dsnx < 0 :
arr = FancyArrowPatch( [snx,sny], [snx+1.5,sny], arrowstyle='-|>', mutation_scale=25, fc='k', ls='dashed' )
ax1.add_patch( arr )
if band1!=band2 and dsnx1 < 0 :
arr = FancyArrowPatch( [snx,sny], [snx+1.5,sny], arrowstyle='-|>', mutation_scale=25, fc='k', ls='dashed' )
ax1.add_patch( arr )
if band1!=band2 and dsnx2 < 0 :
arr = FancyArrowPatch( [snx,sny], [snx-1.5,sny], arrowstyle='-|>', mutation_scale=25, fc='k', ls='dashed' )
ax1.add_patch( arr )
if band3==band4 and dsny < 0 :
arr = FancyArrowPatch( [snx,sny], [snx,sny+1.5], arrowstyle='-|>', mutation_scale=25, fc='k', ls='dashed' )
ax1.add_patch( arr )
if band3!=band4 and dsny3 < 0 :
arr = FancyArrowPatch( [snx,sny], [snx,sny+1.5], arrowstyle='-|>', mutation_scale=25, fc='k', ls='dashed' )
ax1.add_patch( arr )
if band3!=band4 and dsny4 < 0 :
arr = FancyArrowPatch( [snx,sny], [snx,sny-1.5], arrowstyle='-|>', mutation_scale=25, fc='k', ls='dashed' )
ax1.add_patch( arr )
# plot the point and error
if dsnx1>0 and dsnx2>0 :
ax1.errorbar( snx, sny, 0, abs(dsnx), color='k', marker='o', mec='k',mfc='w', mew=1.5, elinewidth=1.5, alpha=1.0, capsize=0, zorder=10 )
if dsny3>0 and dsny4>0 :
ax1.errorbar( snx, sny, abs(dsny), 0, color='k', marker='o', mec='k',mfc='w', mew=1.5, elinewidth=1.5, alpha=1.0, capsize=0, zorder=10 )
else :
ax1.plot( snx, sny, color='k', marker='o', zorder=10 )
if sidehist:
ax3.axhline( sny, color='0.5', lw=1, ls='-', zorder=10)
ax2.axvline( snx, color='0.5', lw=1, ls='-', zorder=10)
# ensure that the axes ranges include our SN observation
if sny > ymin: ymin = sny + 1
if sny < ymax: ymax = sny - 1
if snx < xmin: xmin = snx - 1
if snx > xmax: xmax = snx + 1
ax1.set_xlim(binrange[0])
ax1.set_ylim(binrange[1])
if band1==band2 :
if not ax1.xaxis_inverted() : ax1.invert_xaxis()
if sidehist:
if not ax2.xaxis_inverted() : ax2.invert_xaxis()
if band3==band4 :
if not ax1.yaxis_inverted() : ax1.invert_yaxis()
if sidehist :
if not ax3.yaxis_inverted() : ax3.invert_yaxis()
return( ax1, likelihood )
def plot_mag_z( sim, band='H', mjd='peak', plotstyle='median',
restbands=False, detlim=False, **kwargs ):
""" plot the magnitudes against redshift for the given MJD
mjd='peak' is a special case that samples all simulated SNe
at their respective peaks. Otherwise we sample all at the
same MJD, which probably means they are at different LC ages.
If restbands == True, show the rest-frame
band-pass contribution fractions at each
redshift
detlim : plot a dashed line at the detection limit ~25.5
"""
if sim.SURVEYDATA.KCORFILE.endswith('AB.fits') :
magsystem = 'AB'
else :
magsystem = 'Vega'
z = sim.z
if mjd in [ None, 0, 'pk','peak'] :
# read in the peak mags
mag = sim.__dict__['SIM_PEAKMAG_'+band]
else :
# sample the light curves at the given MJD date
sim.samplephot( mjd )
mag = sim.__dict__['%s%i'%(band, int(mjd))]
# limit to observations with legit data
igood = np.where( (mag<99) & (mag>-99) )[0]
if not len(igood) :
print( "ERROR: no good mags for %s vs z"%(band))
return( None )
mag = mag[igood]
z = z[igood]
# Plot it
if band in BANDCOLOR.keys(): color = BANDCOLOR[band]
else : color = 'k'
plotdefaults={'ls':' ','marker':'o',
'mew':0.2,'ms':5,'alpha':0.4, 'mfc':color,'mec':color,}
plotargs = dict( plotdefaults.items() + kwargs.items() )
ax = p.gca()
if plotstyle == 'points' :
# Plot a point for every simulated SN
if band1 in BANDCOLOR.keys(): color1 = BANDCOLOR[band1]
else : color1 = 'k'
if band2 in BANDCOLOR.keys(): color2 = BANDCOLOR[band2]
else : color2 = 'k'
kwargs['mfc'] = color1
kwargs['mec'] = color2
p.plot( z, mag, **kwargs )
elif plotstyle == 'median' :
# Plot a rolling median at each redshift.
# We use the 3-sigma-clipped mean and associated robust sigma
# using astrolib-ported python functions defined below.
# sort the mag and z arrays by redshift
zsortidx = z.argsort()
zsorted = z[zsortidx]
magbyz = mag[zsortidx]
# compute the sigma-clipped mean and associated robust sigma
# over bins containing 5% of the simulated SNe
from numpy import array
Nsim = len(sim.z)
Nmed = int(0.05*Nsim)
magmed,magmederr = [],[]
magmax, magmin = [], []
for imag in range( len(mag) ) :
magsample = magbyz[ max(0,imag-Nmed/2) : min(len(magbyz),max(0,imag-Nmed/2)+Nmed) ]
mean, sigma = meanclip( magsample, clipsig=3, maxiter=3, converge_num=0.1 )
magmed.append( mean )
magmederr.append( sigma )
magmax.append( max(magsample) )
magmin.append( min(magsample) )
magmed, magmederr = array(magmed),array(magmederr)
magmax, magmin = array(magmax),array(magmin)
ax = p.gca()
plotdefaults1={'alpha':0.3}
plotargs1 = dict( plotdefaults1.items() + kwargs.items() )
fill_between( ax, zsorted, magmin, magmax, **plotargs1 )
plotdefaults2={'alpha':0.6}
plotargs2 = dict( plotdefaults2.items() + kwargs.items() )
fill_between( ax, zsorted, magmed-magmederr, magmed+magmederr, **plotargs2 )
ax.set_xlim(z.min()-0.2,z.max()+0.2)
ax.set_ylim(mag.max()+0.2,mag.min()-0.2)
ax.set_xlabel('Redshift')
ax.set_ylabel(magsystem+' Magnitude')
if detlim :
ax.axhline( 25.5, ls='--', color='0.4')
ax.text(0.25,25.3,r'3-$\sigma$ Detection Limit', ha='left',va='bottom',color='0.4')
if restbands :
ax.set_ylim(mag.max()+2,mag.min()+0.4)
# plotting bandpass cross-correlations
sim.readfilters()
zrange = np.arange( z.min(), z.max(), 0.02 )
ax2 = ax.twinx()
w2 = sim.FILTERS[band][:,0]
f2 = sim.FILTERS[band][:,1]
restbanddat = getrestbands( )
for otherband in 'KHJYIRVBU' :
if otherband not in restbanddat.keys() : continue
if otherband in BANDCOLOR.keys():
otherbandcolor = BANDCOLOR[otherband]
else : otherbandcolor = 'k'
w1 = restbanddat[otherband][:,0]
f1 = restbanddat[otherband][:,1]
xcor = xcorz( w1, f1, w2, f2, zrange, normalize=True )
if xcor.max() == 0 : continue
ax2.plot( zrange, xcor, marker=' ', color=otherbandcolor, ls='-' )
ax2.set_ylim( -0.02, 8 )
# Label the filter xcor line, but
# don't over-write labels on right side:
if xcor.argmax()== len(xcor)-1:
if zrange[-1] == z.max : continue
zmax = zrange[ xcor.argmax() ]
ax2.text( zmax, xcor.max() - 0.1, otherband,
color=otherbandcolor, backgroundcolor='w')
ax2.set_yticks( [] )
p.draw()
return( ax )
def multiplot_mag_z( sim, bands='GRXIZMH', mjd='peak', sndat={}, restbands=True, **kwargs ):
""" multi-panel plot showing peak mag vs z diagrams.
mjd='peak' is a special case that samples all simulated SNe
at their respective peaks. Otherwise we sample all at the
same MJD, which probably means they are at | |
parse_dates=[
'DEALDATETIME']) for x in page_files]
df = pd.concat(page_dfs)
df = df.reset_index()
df = df.sort_index()
# now some geocoding:
loc_df = parse_body_request_to_dataframe(body)
loc_df = loc_df.append([loc_df]*(len(df) - 1), ignore_index=True)
df = pd.concat([df, loc_df], axis=1)
# fill in district and city, street:
df.loc[:, 'District'] = 'None'
df.loc[:, 'City'] = city
df.loc[:, 'Street'] = 'None'
if savepath is not None:
yrmin = df['DEALDATETIME'].min().year
yrmax = df['DEALDATETIME'].max().year
filename = 'Nadlan_deals_city_{}_no_streets_{}-{}.csv'.format(
city_code, yrmin, yrmax)
df.to_csv(savepath/filename, na_rep='None')
print('{} was saved to {}.'.format(filename, savepath))
return df
def get_all_historical_nadlan_deals(city='רעננה', street='אחוזה',
city_code=None, street_code=None,
savepath=None,
check_for_downloaded_files=True,
sleep_between_streets=True):
import pandas as pd
from json import JSONDecodeError
from Migration_main import path_glob
if check_for_downloaded_files and savepath is not None:
try:
file = path_glob(
savepath, 'Nadlan_deals_city_{}_street_{}_*.csv'.format(city_code, street_code))
print('{} already found, skipping...'.format(file))
return pd.DataFrame()
except FileNotFoundError:
pass
try:
body = produce_nadlan_rest_request(city=city, street=street)
except TypeError:
return None
page_dfs = []
cnt = 1
last_page = False
no_results = False
while not last_page:
print('Page : ', cnt)
try:
result = post_nadlan_rest(body)
except TypeError:
no_results = True
# if cnt > 1:
# pass
# else:
return pd.DataFrame()
except ValueError:
no_results = True
# if cnt > 1:
# else:
# return pd.DataFrame()
if no_results and cnt > 1:
last_page = True
elif no_results and cnt == 1:
return pd.DataFrame()
page_dfs.append(parse_one_json_nadlan_page_to_pandas(
result, city_code, street_code))
cnt += 1
if result['IsLastPage']:
last_page = True
else:
body['PageNo'] += 1
no_results = False
df = pd.concat(page_dfs)
df = df.reset_index()
df = df.sort_index()
# now re-run and get all body requests for all street numbers:
locs = []
unique_addresses = df['FULLADRESS'].unique()
print('processing {} unique addresses...'.format(len(unique_addresses)))
for fa in unique_addresses:
# print('getting data for {} '.format(fa))
rows = len(df[df['FULLADRESS'] == fa])
ind = df[df['FULLADRESS'] == fa].index
try:
body = produce_nadlan_rest_request(full_address=fa)
except JSONDecodeError:
body = produce_nadlan_rest_request(full_address=fa)
loc_df = parse_body_request_to_dataframe(body)
loc_df_street = loc_df.append([loc_df]*(rows-1), ignore_index=True)
loc_df_street.index = ind
locs.append(loc_df_street)
if sleep_between_streets:
sleep_between(0.1, 0.3)
loc_df_street = pd.concat(locs, axis=0)
df = pd.concat([df, loc_df_street.sort_index()], axis=1)
# fill in district and city, street:
try:
good_district = df['District'].unique(
)[df['District'].unique() != ''][0]
except IndexError:
good_district = ''
df.loc[df['District'] == '', 'District'] = good_district
df.loc[df['City'] == '', 'City'] = city
df.loc[df['Street'] == '', 'Street'] = street
if savepath is not None:
yrmin = df['DEALDATETIME'].min().year
yrmax = df['DEALDATETIME'].max().year
filename = 'Nadlan_deals_city_{}_street_{}_{}-{}.csv'.format(
city_code, street_code, yrmin, yrmax)
df.to_csv(savepath/filename, na_rep='None')
print('{} was saved to {}.'.format(filename, savepath))
return df
def read_neighborhood_city_file(path=work_david, file='neighborhood_city_code_counts.csv',
add_auto_complete=True):
import pandas as pd
import numpy as np
from cbs_procedures import read_bycode_city_data
df = pd.read_csv(path/file)
df.columns = ['city_code', 'Neighborhood', 'to_drop']
df = df[[x for x in df.columns if 'to_drop' not in x]]
# filter שכונת:
df['Neighborhood'] = df['Neighborhood'].str.replace('שכונת', '')
df['Neighborhood'] = df['Neighborhood'].str.strip()
if add_auto_complete:
# add neighborhhod data from autocomplete:
auto = pd.read_csv(path/'neighborhoods_auto_complete.csv')
auto = auto.drop('Unnamed: 0', axis=1)
dfs = []
for cc in auto['city_code'].unique():
if cc in df['city_code'].unique():
nvals = pd.Series(auto[auto['city_code']==cc]['Value'].values)
df_vals = pd.Series(df[df['city_code']==cc]['Neighborhood'].values)
merged = pd.concat([nvals, df_vals]).drop_duplicates()
mdf = merged.to_frame('Neighborhood').reset_index(drop=True)
mdf['city_code'] = cc
dfs.append(mdf)
else:
nvals = auto[auto['city_code']==cc]['Value'].values
merged = pd.Series(nvals).drop_duplicates()
mdf = merged.to_frame('Neighborhood').reset_index(drop=True)
mdf['city_code'] = cc
dfs.append(mdf)
df = pd.concat(dfs, axis=0)
df = df.reset_index(drop=True)
df = df.drop_duplicates()
df['neighborhood_code'] = ''
df['City'] = ''
bycode = read_bycode_city_data(path)
df['City'] = df['city_code'].map(bycode['NameHe'].to_dict())
grps = df.groupby('city_code').groups
for cc, inds in grps.items():
n = len(inds)
df.loc[inds, 'neighborhood_code'] = np.arange(1, n+1)
df = df[['city_code', 'City', 'neighborhood_code', 'Neighborhood']]
# fix *:
df['City'] = df['City'].str.replace('*', '')
df['City'] = df['City'].str.replace('נוף הגליל', 'נצרת עילית')
df['Neighborhood'] = df['Neighborhood'].str.replace('א טור', 'א-טור')
df['Neighborhood'] = df['Neighborhood'].str.replace('א רם', 'א-רם')
df['Neighborhood'] = df['Neighborhood'].str.replace("\\", " ")
df['Neighborhood'] = df['Neighborhood'].str.replace('מרכז העיר - מזרח', 'מרכז העיר מזרח')
df['Neighborhood'] = df['Neighborhood'].str.replace('בית צפפא, שרפאת', 'בית צפאפא')
df['Neighborhood'] = df['Neighborhood'].str.replace('הר-החוצבים', 'אזור תעשייה הר החוצבים')
df['Neighborhood'] = df['Neighborhood'].str.replace('שועפאט', 'שועפאת')
df['Neighborhood'] = df['Neighborhood'].str.replace('וייסבורג שקולניק', 'ויסבורג שקולניק')
df['Neighborhood'] = df['Neighborhood'].str.replace('אזור התעשייה הישן', 'אזור תעשייה הישן')
df['Neighborhood'] = df['Neighborhood'].str.replace('2004', 'שכונה 2004')
df['Neighborhood'] = df['Neighborhood'].str.replace('קריית בן צבי-רסקו', 'קרית בן צבירסקו')
df['Neighborhood'] = df['Neighborhood'].str.replace('/', '')
df['Neighborhood'] = df['Neighborhood'].str.replace('נאות שקמה', 'נאות שיקמה')
df['Neighborhood'] = df['Neighborhood'].str.replace("מב''ת צפון", "מבת צפון")
df['Neighborhood'] = df['Neighborhood'].str.replace('(', '')
df['Neighborhood'] = df['Neighborhood'].str.replace(')', '')
# # fix beer-sheva:
# ns=["יא","ט","ו","ה","ד","ג","ב","א"]
# for n in ns:
# ind = df.query('city_code==9000 & Neighborhood=="{}"'.format(n)).index
# df.loc[ind, 'Neighborhood'] = "שכונה" + " {}".format(n) + "'"
ind = df.query('city_code==3000 & neighborhood_code==123').index
df.loc[ind, 'Neighborhood'] = "עיר דוד"
ind = df.query('city_code==7100 & neighborhood_code==8').index
df.loc[ind, 'Neighborhood'] = "האגמים"
ind = df.query('city_code==7100 & neighborhood_code==33').index
df.loc[ind, 'Neighborhood'] = "נווה ים ד"
ind = df.query('city_code==7900 & neighborhood_code==50').index
df.loc[ind, 'Neighborhood'] = "נווה עוז הירוקה"
ind = df.query('city_code==8500 & neighborhood_code==12').index
df.loc[ind, 'Neighborhood'] = "נאות יצחק רבין"
ind = df.query('city_code==8500 & neighborhood_code==11').index
df.loc[ind, 'Neighborhood'] = "העיר העתיקה"
ind = df.query('city_code==8500 & neighborhood_code==28').index
df.loc[ind, 'Neighborhood'] = "העיר העתיקה מזרח"
ind = df.query('city_code==7800 & neighborhood_code==37').index
df.loc[ind, 'Neighborhood'] = "נווה אבות"
ind = df.query('city_code==7000 & neighborhood_code==30').index
df.loc[ind, 'Neighborhood'] = "ורדה הרכבת"
ind = df.query('city_code==7000 & neighborhood_code==31').index
df.loc[ind, 'Neighborhood'] = "שער העיר"
ind = df.query('city_code==2640 & neighborhood_code==24').index
df.loc[ind, 'Neighborhood'] = "אזור תעשיה ראש העין"
ind = df.query('city_code==2640 & neighborhood_code==28').index
df.loc[ind, 'Neighborhood'] = "פארק תעשיה אפק"
ind = df.query('city_code==1200 & neighborhood_code==15').index
df.loc[ind, 'Neighborhood'] = "מורשת תכנון בעתיד "
# ind = df.query('city_code==1139 & neighborhood_code==20').index
# df.loc[ind, 'Neighborhood'] = "רמיה"
ind = df.query('city_code==2630 & neighborhood_code==2').index
df.loc[ind, 'Neighborhood'] = "כרמי גת"
ind = df.query('city_code==2600 & neighborhood_code==15').index
df.loc[ind, 'Neighborhood'] = "יעלים"
ind = df.query('city_code==2560 & neighborhood_code==6').index
df.loc[ind, 'Neighborhood'] = "מרכז מסחרי ב"
# now add switch col:
df['switch'] = False
city_neigh_list = [
(6600, 16), (8300, 38), (70, 21), (70, 25), (3000, 29),
(3000, 18), (9000, 4), (9000, 7), (9000, 10), (9000, 12),
(9000, 13), (9000, 14), (9000, 16), (9000, 17), (9000, 11),
(7100, 9), (7100, 20),(7900, 12), (7900, 14), (7900, 30),
(8500, 11), (7700, 11), (7200, 30), (2100,10),(229, 10)]
for cc, nc in city_neigh_list:
ind = df.query('city_code=={} & neighborhood_code=={}'.format(cc, nc)).index
df.loc[ind, 'switch'] = True
df = df.dropna()
return df
def neighborhoods_auto_complete_for_all_cities(path=work_david):
import pandas as pd
cities = get_all_city_codes_from_largest_to_smallest(path)
dfs = []
for city_code in cities.index:
print('getting {} neighborhoods from autocomplete.'.format(city_code))
df = auto_complete_neighborhoods_for_one_city(cities, path=path,
city_code=city_code)
dfs.append(df)
sleep_between(0.2, 0.35)
df = pd.concat(dfs, axis=0)
return df
def auto_complete_neighborhoods_for_one_city(city_df, path=work_david, city_code=8700):
import requests
city = city_df.loc[city_code]['NameHe']
city = city.replace('-', ' ')
search_term = '{} ,{}'.format('שכונה', city)
url = 'https://www.nadlan.gov.il/TldSearch//api/AutoComplete?query={}&ids=16399'.format(search_term)
r = requests.get(url)
if r.status_code != 200:
raise ValueError('couldnt get a response ({}).'.format(r.status_code))
df = parse_autocomplete_neighbors(r.json())
df['Value'] = df['Value'].str.replace(city, '')
df['Value'] = df['Value'].str.strip()
df['City'] = city
df['city_code'] = city_code
return df
def parse_autocomplete_neighbors(json):
import pandas as pd
try:
df = pd.DataFrame(json['res']['NEIGHBORHOOD'])
df = df.drop(['Data', 'Rank'], axis=1)
except KeyError:
df = pd.DataFrame(['', '']).T
df.columns = ['Key', 'Value']
return df
def process_one_page_from_neighborhoods_or_settelment_search(result_page, desc='neighborhood'):
"""take one result page from post_nadlan_rest, specifically
neighborhoods searchs and process it"""
import pandas as pd
df = parse_one_json_nadlan_page_to_pandas(result_page)
hdfs = []
for i, row in df.iterrows():
full_addr = row['FULLADRESS'].strip()
disp_addr = row['DISPLAYADRESS'].strip()
keyvalue = row['KEYVALUE']
has_historic = row['TREND_FORMAT'] != ''
if full_addr != '' and disp_addr != '':
body = produce_nadlan_rest_request(*full_addr.split(','),
desc_filter='address')
sleep_between(0.25, 0.35)
df_extra = parse_body_request_to_dataframe(body)
df.loc[i, 'ObjectID':'Street'] = df_extra.T[0]
else:
try:
x, y, parcel_id = get_XY_coords_using_GUSH(row['GUSH'])
df.at[i, 'X'] = x
df.at[i, 'Y'] = y
df.at[i, 'ObjectID'] = parcel_id
df.at[i, 'DescLayerID'] = 'XY_recovered'
sleep_between(0.05, 0.1)
except ValueError:
print('No X, Y found for {}.'.format(row['GUSH']))
parcel_id = '0'
pass
try:
df_address = get_address_using_PARCEL_ID(parcel_id)
df.at[i, 'FULLADRESS'] = df_address['FULLADRESS']
df.at[i, 'Street'] = df_address['Street']
sleep_between(0.25, 0.35)
df.at[i, 'DescLayerID'] = 'ADDR_V1_recovered'
except ValueError:
print('No address found for {}.'.format(row['GUSH']))
pass
# now get historic deals:
if has_historic:
try:
result = post_nadlan_historic_deals(keyvalue)
sleep_between(0.25, 0.35)
except TypeError:
continue
df_historic = parse_one_json_nadlan_page_to_pandas(result, historic=True)
for ii, roww in df_historic.iterrows():
df_historic.loc[ii, 'ObjectID':'Street'] = df.loc[i, 'ObjectID':'Street']
hdfs.append(df_historic)
# if no historic deals at all:
if hdfs:
hdf = pd.concat(hdfs, axis=0)
df = pd.concat([df, hdf], axis=0)
return df
def process_all_city_nadlan_neighborhood_search(savepath=work_david/'Nadlan_deals_by_neighborhood',
city_code=8700, ncode=None, path=work_david):
import os
from Migration_main import path_glob
import numpy as np
city_path = savepath / '{}'.format(city_code)
if not (city_path).is_dir():
os.mkdir(city_path)
print('{} was created.'.format(city_path))
ndf = read_neighborhood_city_file(path)
city_ndf = ndf[ndf['city_code'] == city_code]
ns = city_ndf['City'].size
city = city_ndf['City'].unique().item()
if ncode is None:
print('processing city {} with {} neighborhoods.'.format(city, ns))
try:
n_files = path_glob(
city_path, 'Nadlan_deals_city_*_neighborhood_*.csv'.format(city_code))
ns = [x.as_posix().split('/')[-1].split('.')[0].split('_')[-2]
for x in n_files]
ns = sorted([int(x) for x in ns])
curr_n = int(ns[-1]) + 1
print('last neighborhood found is {}, strating at {}.'.format(
| |
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_six_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_three_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='companies_list_item_image_alt_two_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_de',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_en_gb',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_es',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_fr',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_ja',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_pt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='opportunity_list_image_alt_zh_hans',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.', null=True),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image description must be provided.'),
),
migrations.AddField(
model_name='investhighpotentialopportunitydetailpage',
name='proposition_one_image_alt_ar',
field=models.TextField(blank=True, help_text='If this image adds extra information to the page that is not already provided by the text (e.g. if the image is a diagram, chart, or has text on it) then an image | |
<gh_stars>0
from isodate.isodatetime import parse_datetime
from isodate.isoerror import ISO8601Error
import uuid
from django.conf import settings
from . import get_agent_ifp, convert_to_datatype
from authorization import auth
from StatementValidator import StatementValidator
from ..models import Statement, Agent, Activity, ActivityState, ActivityProfile, AgentProfile
from ..exceptions import ParamConflict, ParamError, Forbidden, BadRequest, IDNotFoundError
ignore_rogue_params = getattr(settings, 'XAPI_IGNORE_ROGUE_PARAMS', False)
def check_for_existing_statementId(stmtID):
return Statement.objects.filter(statement_id=stmtID).exists()
def check_for_no_other_params_supplied(query_dict):
supplied = True
if len(query_dict) <= 1:
supplied = False
return supplied
# Extra agent validation for state and profile
def validate_oauth_for_documents(req_dict, endpoint):
ag = req_dict['params']['agent']
token = req_dict['auth']['oauth_token']
scopes = token.scope_to_list()
if 'all' not in scopes:
try:
agent = Agent.objects.get(**ag)
except Agent.DoesNotExist:
# if agent DNE, profile/state scope should still be able to create one
pass
else:
if agent not in req_dict['auth']['agent'].member.all():
err_msg = "Agent for %s is out of scope" % endpoint
raise Forbidden(err_msg)
def validate_void_statement(void_id):
# Retrieve statement, check if the verb is 'voided' - if not then set the voided flag to true else return error
# since you cannot unvoid a statement and should just reissue the
# statement under a new ID.
stmts = Statement.objects.filter(statement_id=void_id)
if len(stmts) > 1:
raise IDNotFoundError(
"Something went wrong. %s statements found with id %s" % (len(stmts), void_id))
elif len(stmts) == 1:
if stmts[0].voided:
err_msg = "Statement with ID: %s is already voided, cannot unvoid. Please re-issue the statement under a new ID." % void_id
raise BadRequest(err_msg)
if stmts[0].verb.verb_id == "http://adlnet.gov/expapi/verbs/voided":
err_msg = "Statement with ID: %s is a voiding statement and cannot be voided." % void_id
raise BadRequest(err_msg)
def validate_body(body, auth, content_type):
[server_validate_statement(
stmt, auth, content_type) for stmt in body]
def server_validate_statement(stmt, auth, content_type):
if 'id' in stmt:
statement_id = stmt['id']
if check_for_existing_statementId(statement_id):
err_msg = "A statement with ID %s already exists" % statement_id
raise ParamConflict(err_msg)
if stmt['verb']['id'] == 'http://adlnet.gov/expapi/verbs/voided':
validate_void_statement(stmt['object']['id'])
if 'attachments' in stmt:
attachment_data = stmt['attachments']
validate_attachments(attachment_data, content_type)
@auth
def statements_post(req_dict):
if req_dict['params'].keys() and not ignore_rogue_params:
raise ParamError("The post statements request contained unexpected parameters: %s" % ", ".join(
req_dict['params'].keys()))
try:
validator = StatementValidator(req_dict['body'])
validator.validate()
except Exception as e:
raise BadRequest(e.message)
except ParamError as e:
raise ParamError(e.message)
if isinstance(req_dict['body'], dict):
body = [req_dict['body']]
else:
body = req_dict['body']
validate_body(body, req_dict['auth'], req_dict['headers']['CONTENT_TYPE'])
return req_dict
@auth
def statements_more_get(req_dict):
if 'more_id' not in req_dict:
err_msg = "Missing more_id while trying to hit /more endpoint"
raise ParamError(err_msg)
return req_dict
def validate_statementId(req_dict):
if 'statementId' in req_dict['params'] and 'voidedStatementId' in req_dict['params']:
err_msg = "Cannot have both statementId and voidedStatementId in a GET request"
raise ParamError(err_msg)
elif 'statementId' in req_dict['params']:
statementId = req_dict['params']['statementId']
voided = False
else:
statementId = req_dict['params']['voidedStatementId']
voided = True
not_allowed = ["agent", "verb", "activity", "registration",
"related_activities", "related_agents", "since",
"until", "limit", "ascending"]
bad_keys = set(not_allowed) & set(req_dict['params'].keys())
if bad_keys:
err_msg = "Cannot have %s in a GET request only 'format' and/or 'attachments' are allowed with 'statementId' and 'voidedStatementId'" % ', '.join(
bad_keys)
raise ParamError(err_msg)
# Try to retrieve stmt, if DNE then return empty else return stmt info
try:
uuidId = uuid.UUID(str(statementId))
st = Statement.objects.get(statement_id=uuidId)
except (Statement.DoesNotExist):
err_msg = 'There is no statement associated with the id: %s' % statementId
raise IDNotFoundError(err_msg)
except (ValueError):
err_msg = 'Not a valid id for query: %s' % statementId
raise BadRequest(err_msg)
auth = req_dict.get('auth', None)
mine_only = auth and 'statements_mine_only' in auth
if auth['agent']:
if mine_only and st.authority.id != auth['agent'].id:
err_msg = "Incorrect permissions to view statements"
raise Forbidden(err_msg)
if st.voided != voided:
if st.voided:
err_msg = 'The requested statement (%s) is voided. Use the "voidedStatementId" parameter to retrieve your statement.' % statementId
else:
err_msg = 'The requested statement (%s) is not voided. Use the "statementId" parameter to retrieve your statement.' % statementId
raise IDNotFoundError(err_msg)
return statementId
@auth
def statements_get(req_dict):
rogueparams = set(req_dict['params']) - set(["statementId", "voidedStatementId", "agent", "verb", "activity", "registration",
"related_activities", "related_agents", "since",
"until", "limit", "format", "attachments", "ascending"])
if rogueparams and not ignore_rogue_params:
raise ParamError(
"The get statements request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'agent' in req_dict['params']:
try:
agent = convert_to_datatype(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param %s is not valid" % \
req_dict['params']['agent'])
validator.validate_agent(agent, "Agent param")
formats = ['exact', 'canonical', 'ids']
if 'format' in req_dict['params']:
if req_dict['params']['format'] not in formats:
raise ParamError("The format filter value (%s) was not one of the known values: %s" % (
req_dict['params']['format'], ','.join(formats)))
else:
req_dict['params']['format'] = 'exact'
# StatementId could be for voided statement as well
if 'statementId' in req_dict['params'] or 'voidedStatementId' in req_dict['params']:
req_dict['statementId'] = validate_statementId(req_dict)
if 'since' in req_dict['params']:
try:
parse_datetime(req_dict['params']['since'])
except (Exception, ISO8601Error):
raise ParamError(
"since parameter was not a valid ISO8601 timestamp")
if 'until' in req_dict['params']:
try:
parse_datetime(req_dict['params']['until'])
except (Exception, ISO8601Error):
raise ParamError(
"until parameter was not a valid ISO8601 timestamp")
if 'ascending' in req_dict['params']:
if req_dict['params']['ascending'].lower() == 'true':
req_dict['params']['ascending'] = True
elif req_dict['params']['ascending'].lower() == 'false':
req_dict['params']['ascending'] = False
else:
raise ParamError(
"ascending parameter was not a boolean value")
else:
req_dict['params']['ascending'] = False
if 'related_agents' in req_dict['params']:
if req_dict['params']['related_agents'].lower() == 'true':
req_dict['params']['related_agents'] = True
elif req_dict['params']['related_agents'].lower() == 'false':
req_dict['params']['related_agents'] = False
else:
raise ParamError(
"related_agents parameter was not a boolean value")
else:
req_dict['params']['related_agents'] = False
if 'related_activities' in req_dict['params']:
if req_dict['params']['related_activities'].lower() == 'true':
req_dict['params']['related_activities'] = True
elif req_dict['params']['related_activities'].lower() == 'false':
req_dict['params']['related_activities'] = False
else:
raise ParamError(
"related_activities parameter was not a boolean value")
else:
req_dict['params']['related_activities'] = False
if 'attachments' in req_dict['params']:
if req_dict['params']['attachments'].lower() == 'true':
req_dict['params']['attachments'] = True
elif req_dict['params']['attachments'].lower() == 'false':
req_dict['params']['attachments'] = False
else:
raise ParamError(
"attachments parameter was not a boolean value")
else:
req_dict['params']['attachments'] = False
if 'limit' in req_dict['params']:
try:
req_dict['params']['limit'] = int(req_dict['params']['limit'])
except Exception:
raise ParamError(
"limit parameter was not a non-negative integer")
else:
if req_dict['params']['limit'] < 0:
raise ParamError(
"limit parameter was not a non-negative integer")
else:
req_dict['params']['limit'] = 0
if 'registration' in req_dict['params']:
validator.validate_uuid(req_dict['params']['registration'], "Registration param")
if 'verb' in req_dict['params']:
validator.validate_iri(
req_dict['params']['verb'], "verb param")
return req_dict
@auth
def statements_put(req_dict):
# Find any unexpected parameters
rogueparams = set(req_dict['params']) - set(["statementId"])
if rogueparams and not ignore_rogue_params:
raise ParamError(
"The put statements request contained unexpected parameters: %s" % ", ".join(rogueparams))
# Statement id can must be supplied in query param. If in the body too, it
# must be the same
if 'statementId' not in req_dict['params']:
raise ParamError(
"Error -- statements - method = %s, but no statementId parameter or ID given in statement" % req_dict['method'])
else:
statement_id = req_dict['params']['statementId']
# Try to get id if in body
try:
statement_body_id = req_dict['body']['id']
except Exception as e:
statement_body_id = None
# If ids exist in both places, check if they are equal
if statement_body_id and statement_id != statement_body_id:
raise ParamError(
"Error -- statements - method = %s, param and body ID both given, but do not match" % req_dict['method'])
# Set id inside of statement with param id
if not statement_body_id:
req_dict['body']['id'] = statement_id
# If there are no other params-raise param error since nothing else is
# supplied
if not check_for_no_other_params_supplied(req_dict['body']):
raise ParamError("No other params are supplied with statementId.")
# Validate statement in body
try:
validator = StatementValidator(req_dict['body'])
validator.validate()
except Exception as e:
raise BadRequest(e.message)
except ParamError as e:
raise ParamError(e.message)
validate_body([req_dict['body']], req_dict['auth'], req_dict['headers']['CONTENT_TYPE'])
return req_dict
def validate_attachments(attachment_data, content_type):
if "multipart/mixed" not in content_type:
if "application/json" == content_type:
for attachment in attachment_data:
if 'fileUrl' not in attachment:
raise BadRequest(
"When sending statements with attachments as 'application/json', you must include fileUrl field")
else:
raise BadRequest(
'Invalid Content-Type %s when sending statements with attachments' % content_type)
@auth
def activity_state_post(req_dict):
rogueparams = set(req_dict['params']) - \
set(["activityId", "agent", "stateId", "registration"])
if rogueparams and not ignore_rogue_params:
raise ParamError(
"The post activity state request contained unexpected parameters: %s" % ", ".join(rogueparams))
validator = StatementValidator()
if 'activityId' in req_dict['params']:
validator.validate_iri(
req_dict['params']['activityId'], "activityId param for activity state")
else:
err_msg = "Error -- activity_state - method = %s, but activityId parameter is missing.." % req_dict[
'method']
raise ParamError(err_msg)
if 'stateId' not in req_dict['params']:
err_msg = "Error -- activity_state - method = %s, but stateId parameter is missing.." % req_dict[
'method']
raise ParamError(err_msg)
if 'registration' in req_dict['params']:
validator.validate_uuid(
req_dict['params']['registration'], "registration param for activity state")
if 'agent' in req_dict['params']:
try:
agent = convert_to_datatype(req_dict['params']['agent'])
req_dict['params']['agent'] = agent
except Exception:
raise ParamError("agent param %s is not | |
0 * 0 NIL <かな漢字><ひらがな><付属>
思い おもい 思う 動詞 2 * 0 子音動詞ワ行 12 基本連用形 8 "代表表記:思う/おもう 補文ト" <代表表記:思う/おもう><補文ト><正規化代表表記:思う/おもう><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
こま こま こむ 動詞 2 * 0 子音動詞マ行 9 未然形 3 "代表表記:込む/こむ 付属動詞候補(基本) 反義:動詞:空く/すく" <代表表記:込む/こむ><付属動詞候補(基本)><反義:動詞:空く/すく><正規化代表表記:込む/こむ><かな漢字><ひらがな><活用語><付属>
ないで ないで ぬ 助動詞 5 * 0 助動詞ぬ型 27 タ系連用テ形 9 NIL <かな漢字><ひらがな><活用語><否定><付属>
ください ください くださる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><表現文末><かな漢字><ひらがな><活用語><付属>
。 。 。 特殊 1 句点 1 * 0 * 0 NIL <文末><英記号><記号><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.replace_with_antonym_pairs(token_lines, antonym_pairs)
self.assertEquals(actual, expected)
def test_remove_negation_from_suruna0(self):
token_lines = ['歩く あるく 歩く 動詞 2 * 0 子音動詞カ行 2 基本形 2 "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>', 'な な な 助詞 9 終助詞 4 * 0 * 0 NIL <文末><表現文末><かな漢字><ひらがな><付属>']
actual = replace_lib.remove_negation_from_suruna(token_lines)
expected = ['歩き あるき 歩く 動詞 * * * 子音動詞カ行 * 基本連用形 * "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>', 'ましょう ましょう ます 接尾辞 14 動詞性接尾辞 7 動詞性接尾辞ます型 31 意志形 4 "代表表記:ます/ます"']
self.assertEquals(actual, expected)
def test_remove_negation_from_ikemasen0(self):
token_lines = """
歩いて あるいて 歩く 動詞 2 * 0 子音動詞カ行 2 タ系連用テ形 14 "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
は は は 助詞 9 副助詞 2 * 0 * 0 NIL <かな漢字><ひらがな><付属>
いけ いけ いける 動詞 2 * 0 母音動詞 1 基本連用形 8 "代表表記:生ける/いける ドメイン:文化・芸術" <代表表記:生ける/いける><ドメイン:文化・芸術><正規化代表表記:生ける/いける?行ける/いける><品曖><ALT-いけ-いけ-いける-2-0-1-8-"代表表記:行ける/いける 可能動詞:行く/いく 付属動詞候補(タ系)"><付属動詞候補(タ系)><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><付属>
ませ ませ ます 接尾辞 14 動詞性接尾辞 7 動詞性接尾辞ます型 31 未然形 3 "代表表記:ます/ます" <代表表記:ます/ます><正規化代表表記:ます/ます><かな漢字><ひらがな><活用語><付属>
ん ん ぬ 助動詞 5 * 0 助動詞ぬ型 27 音便基本形 12 NIL <文末><表現文末><かな漢字><ひらがな><活用語><否定><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_ikemasen(token_lines)
expected = """
歩か あるか 歩く 動詞 * * * 子音動詞カ行 * 未然形 * "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
なければ なければ ない 接尾辞 14 形容詞性述語接尾辞 5 イ形容詞アウオ段 18 基本条件形 6 "代表表記:ない/ない"
いけ いけ いける 動詞 2 * 0 母音動詞 1 基本連用形 8 "代表表記:生ける/いける ドメイン:文化・芸術" <代表表記:生ける/いける><ドメイン:文化・芸術><正規化代表表記:生ける/いける?行ける/いける><品曖><ALT-いけ-いけ-いける-2-0-1-8-"代表表記:行ける/いける 可能動詞:行く/いく 付属動詞候補(タ系)"><付属動詞候補(タ系)><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><付属>
ませ ませ ます 接尾辞 14 動詞性接尾辞 7 動詞性接尾辞ます型 31 未然形 3 "代表表記:ます/ます" <代表表記:ます/ます><正規化代表表記:ます/ます><かな漢字><ひらがな><活用語><付属>
ん ん ぬ 助動詞 5 * 0 助動詞ぬ型 27 音便基本形 12 NIL <文末><表現文末><かな漢字><ひらがな><活用語><否定><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_remove_negation_from_ikemasen1(self):
token_lines = """
忘れて わすれて 忘れる 動詞 2 * 0 母音動詞 1 タ系連用テ形 14 "代表表記:忘れる/わすれる 付属動詞候補(基本) 反義:動詞:覚える/おぼえる" <代表表記:忘れる/わすれる><付属動詞候補(基本)><反義:動詞:覚える/おぼえる><正規化代表表記:忘れる/わすれる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
は は は 助詞 9 副助詞 2 * 0 * 0 NIL <かな漢字><ひらがな><付属>
なら なら なる 動詞 2 * 0 子音動詞ラ行 10 未然形 3 "代表表記:成る/なる 自他動詞:他:成す/なす;他:する/する" <代表表記:成る/なる><自他動詞:他:鳴らす/ならす><正規化代表表記:成る/なる?鳴る/なる><品曖><ALT-なら-なら-なる-2-0-10-3-"代表表記:鳴る/なる 自他動詞:他:鳴らす/ならす"><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><付属>
ない ない ない 接尾辞 14 形容詞性述語接尾辞 5 イ形容詞アウオ段 18 基本形 2 "代表表記:ない/ない" <代表表記:ない/ない><正規化代表表記:ない/ない><文末><表現文末><かな漢字><ひらがな><活用語><否定><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_ikemasen(token_lines)
expected = """
忘れ わすれ 忘れる 動詞 * * * 母音動詞 * 未然形 * "代表表記:忘れる/わすれる 付属動詞候補(基本) 反義:動詞:覚える/おぼえる" <代表表記:忘れる/わすれる><付属動詞候補(基本)><反義:動詞:覚える/おぼえる><正規化代表表記:忘れる/わすれる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
なければ なければ ない 接尾辞 14 形容詞性述語接尾辞 5 イ形容詞アウオ段 18 基本条件形 6 "代表表記:ない/ない"
なら なら なる 動詞 2 * 0 子音動詞ラ行 10 未然形 3 "代表表記:成る/なる 自他動詞:他:成す/なす;他:する/する" <代表表記:成る/なる><自他動詞:他:鳴らす/ならす><正規化代表表記:成る/なる?鳴る/なる><品曖><ALT-なら-なら-なる-2-0-10-3-"代表表記:鳴る/なる 自他動詞:他:鳴らす/ならす"><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><付属>
ない ない ない 接尾辞 14 形容詞性述語接尾辞 5 イ形容詞アウオ段 18 基本形 2 "代表表記:ない/ない" <代表表記:ない/ない><正規化代表表記:ない/ない><文末><表現文末><かな漢字><ひらがな><活用語><否定><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_remove_negation_from_ikemasen2(self):
token_lines = """
走って はしって 走る 動詞 2 * 0 子音動詞ラ行 10 タ系連用テ形 14 "代表表記:走る/はしる" <代表表記:走る/はしる><正規化代表表記:走る/はしる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
は は は 助詞 9 副助詞 2 * 0 * 0 NIL <かな漢字><ひらがな><付属>
いけない いけない いけない 形容詞 3 * 0 イ形容詞アウオ段 18 基本形 2 "代表表記:いけない/いけない" <代表表記:いけない/いけない><正規化代表表記:いけない/いけない><文末><表現文末><かな漢字><ひらがな><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_ikemasen(token_lines)
expected = """
走ら はしら 走る 動詞 * * * 子音動詞ラ行 * 未然形 * "代表表記:走る/はしる" <代表表記:走る/はしる><正規化代表表記:走る/はしる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
なければ なければ ない 接尾辞 14 形容詞性述語接尾辞 5 イ形容詞アウオ段 18 基本条件形 6 "代表表記:ない/ない"
いけない いけない いけない 形容詞 3 * 0 イ形容詞アウオ段 18 基本形 2 "代表表記:いけない/いけない" <代表表記:いけない/いけない><正規化代表表記:いけない/いけない><文末><表現文末><かな漢字><ひらがな><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_remove_negation_from_naide_kudasai0(self):
token_lines = """
歩か あるか 歩く 動詞 2 * 0 子音動詞カ行 2 未然形 3 "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
ないで ないで ぬ 助動詞 5 * 0 助動詞ぬ型 27 タ系連用テ形 9 NIL <かな漢字><ひらがな><活用語><否定><付属>
下さい ください 下さる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_naide_kudasai(token_lines)
expected = """
歩いて あるいて 歩く 動詞 * * * 子音動詞カ行 * タ系連用テ形 * "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
下さい ください 下さる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_remove_negation_from_naide_kudasai1(self):
token_lines = """
走ら はしら 走る 動詞 2 * 0 子音動詞ラ行 10 未然形 3 "代表表記:走る/はしる" <代表表記:走る/はしる><正規化代表表記:走る/はしる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
ないで ないで ぬ 助動詞 5 * 0 助動詞ぬ型 27 タ系連用テ形 9 NIL <かな漢字><ひらがな><活用語><否定><付属>
ね ね ね 助詞 9 終助詞 4 * 0 * 0 NIL <文末><表現文末><かな漢字><ひらがな><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_naide_kudasai(token_lines)
expected = ['走って はしって 走る 動詞 * * * 子音動詞ラ行 * タ系連用テ形 * "代表表記:走る/はしる" <代表表記:走る/はしる><正規化代表表記:走る/はしる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>', 'ね ね ね 助詞 9 終助詞 4 * 0 * 0 NIL <文末><表現文末><かな漢字><ひらがな><付属>']
self.assertEquals(actual, expected)
def test_remove_negation_from_go_naranaide0(self):
token_lines = """
お お お 接頭辞 13 名詞接頭辞 1 * 0 * 0 "代表表記:御/お" <代表表記:御/お><正規化代表表記:御/お><文頭><かな漢字><ひらがな><接頭><非独立接頭辞><タグ単位始><文節始>
使い つかい 使う 動詞 2 * 0 子音動詞ワ行 12 基本連用形 8 "代表表記:使う/つかう" <代表表記:使う/つかう><正規化代表表記:使う/つかう><かな漢字><活用語><自立><内容語><文節主辞>
に に に 助詞 9 格助詞 1 * 0 * 0 NIL <かな漢字><ひらがな><付属>
なら なら なる 動詞 2 * 0 子音動詞ラ行 10 未然形 3 "代表表記:成る/なる 自他動詞:他:成す/なす;他:する/する" <代表表記:成る/なる><自他動詞:他:鳴らす/ならす><正規化代表表記:成る/なる?鳴る/なる><品曖><ALT-なら-なら-なる-2-0-10-3-"代表表記:鳴る/なる 自他動詞:他:鳴らす/ならす"><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><付属>
ないで ないで ぬ 助動詞 5 * 0 助動詞ぬ型 27 タ系連用テ形 9 NIL <かな漢字><ひらがな><活用語><否定><付属>
下さい ください 下さる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><表現文末><かな漢字><活用語><付属>
。 。 。 特殊 1 句点 1 * 0 * 0 NIL <文末><英記号><記号><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_go_naranaide(token_lines)
expected = """
お お お 接頭辞 13 名詞接頭辞 1 * 0 * 0 "代表表記:御/お" <代表表記:御/お><正規化代表表記:御/お><文頭><かな漢字><ひらがな><接頭><非独立接頭辞><タグ単位始><文節始>
使い つかい 使う 動詞 2 * 0 子音動詞ワ行 12 基本連用形 8 "代表表記:使う/つかう" <代表表記:使う/つかう><正規化代表表記:使う/つかう><かな漢字><活用語><自立><内容語><文節主辞>
に に に 助詞 9 格助詞 1 * 0 * 0 NIL <かな漢字><ひらがな><付属>
なって なって なる 動詞 * * * 子音動詞ラ行 * タ系連用テ形 * "代表表記:成る/なる 自他動詞:他:成す/なす;他:する/する" <代表表記:成る/なる><自他動詞:他:鳴らす/ならす><正規化代表表記:成る/なる?鳴る/なる><品曖><ALT-なら-なら-なる-2-0-10-3-"代表表記:鳴る/なる 自他動詞:他:鳴らす/ならす"><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><付属>
下さい ください 下さる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><表現文末><かな漢字><活用語><付属>
。 。 。 特殊 1 句点 1 * 0 * 0 NIL <文末><英記号><記号><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_remove_negation_from_go_naranaide1(self):
token_lines = """
お お お 接頭辞 13 名詞接頭辞 1 * 0 * 0 "代表表記:御/お" <代表表記:御/お><正規化代表表記:御/お><文頭><かな漢字><ひらがな><接頭><非独立接頭辞><タグ単位始><文節始>
使い つかい 使い 名詞 6 普通名詞 1 * 0 * 0 "代表表記:使い/つかいv 代表表記変更:使う/つかう 品詞変更:使い-つかい-使う-2-0-12-8" <代表表記:使い/つかいv><正規化代表表記:使い/つかいv><かな漢字><品詞変更:使い-つかい-使う-2-0-12-8-"代表表記:使う/つかう"><代表表記変更:使う/つかう><名詞相当語><自立><内容語><文節主辞><係:ニ格>
に に に 助詞 9 格助詞 1 * 0 * 0 NIL <かな漢字><ひらがな><付属>
は は は 助詞 9 副助詞 2 * 0 * 0 NIL <かな漢字><ひらがな><付属>
なら なら なる 動詞 2 * 0 子音動詞ラ行 10 未然形 3 "代表表記:成る/なる 自他動詞:他:成す/なす;他:する/する" <代表表記:成る/なる><自他動詞:他:鳴らす/ならす><正規化代表表記:成る/なる?鳴る/なる><品曖><ALT-なら-なら-なる-2-0-10-3-"代表表記:鳴る/なる 自他動詞:他:鳴らす/ならす"><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><自立><内容語><タグ単位始><文節始><文節主辞><用言曖昧性解消>
ないで ないで ぬ 助動詞 5 * 0 助動詞ぬ型 27 タ系連用テ形 9 NIL <かな漢字><ひらがな><活用語><否定><付属>
ください ください くださる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><ひらがな><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_go_naranaide(token_lines)
expected = """
お お お 接頭辞 13 名詞接頭辞 1 * 0 * 0 "代表表記:御/お" <代表表記:御/お><正規化代表表記:御/お><文頭><かな漢字><ひらがな><接頭><非独立接頭辞><タグ単位始><文節始>
使い つかい 使い 名詞 6 普通名詞 1 * 0 * 0 "代表表記:使い/つかいv 代表表記変更:使う/つかう 品詞変更:使い-つかい-使う-2-0-12-8" <代表表記:使い/つかいv><正規化代表表記:使い/つかいv><かな漢字><品詞変更:使い-つかい-使う-2-0-12-8-"代表表記:使う/つかう"><代表表記変更:使う/つかう><名詞相当語><自立><内容語><文節主辞><係:ニ格>
に に に 助詞 9 格助詞 1 * 0 * 0 NIL <かな漢字><ひらがな><付属>
なって なって なる 動詞 * * * 子音動詞ラ行 * タ系連用テ形 * "代表表記:成る/なる 自他動詞:他:成す/なす;他:する/する" <代表表記:成る/なる><自他動詞:他:鳴らす/ならす><正規化代表表記:成る/なる?鳴る/なる><品曖><ALT-なら-なら-なる-2-0-10-3-"代表表記:鳴る/なる 自他動詞:他:鳴らす/ならす"><品曖-動詞><原形曖昧><かな漢字><ひらがな><活用語><自立><内容語><タグ単位始><文節始><文節主辞><用言曖昧性解消>
ください ください くださる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><ひらがな><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_remove_negation_from_banning0(self):
token_lines = """
覚え おぼえ 覚える 動詞 2 * 0 母音動詞 1 未然形 3 "代表表記:覚える/おぼえる 反義:動詞:忘れる/わすれる" <代表表記:覚える/おぼえる><反義:動詞:忘れる/わすれる><正規化代表表記:覚える/おぼえる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
ないで ないで ぬ 助動詞 5 * 0 助動詞ぬ型 27 タ系連用テ形 9 NIL <かな漢字><ひらがな><活用語><否定><付属>
下さい ください 下さる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_banning(token_lines)
expected = """
覚えて おぼえて 覚える 動詞 * * * 母音動詞 * タ系連用テ形 * "代表表記:覚える/おぼえる 反義:動詞:忘れる/わすれる" <代表表記:覚える/おぼえる><反義:動詞:忘れる/わすれる><正規化代表表記:覚える/おぼえる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
下さい ください 下さる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_remove_negation_from_banning1(self):
token_lines = """
忘れ わすれ 忘れる 動詞 2 * 0 母音動詞 1 未然形 3 "代表表記:忘れる/わすれる 付属動詞候補(基本) 反義:動詞:覚える/おぼえる" <代表表記:忘れる/わすれる><付属動詞候補(基本)><反義:動詞:覚える/おぼえる><正規化代表表記:忘れる/わすれる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
ない ない ない 接尾辞 14 形容詞性述語接尾辞 5 イ形容詞アウオ段 18 基本形 2 "代表表記:ない/ない" <代表表記:ない/ない><正規化代表表記:ない/ない><かな漢字><ひらがな><活用語><否定><付属>
ように ように ようだ 助動詞 5 * 0 ナ形容詞 21 ダ列基本連用形 7 NIL <かな漢字><ひらがな><活用語><付属>
して して する 接尾辞 14 動詞性接尾辞 7 サ変動詞 16 タ系連用テ形 14 "代表表記:する/する" <代表表記:する/する><正規化代表表記:する/する><かな漢字><ひらがな><活用語><付属>
ください ください くださる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><ひらがな><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
actual = replace_lib.remove_negation_from_banning(token_lines)
expected = """
忘れる わすれる 忘れる 動詞 * * * 母音動詞 * 基本形 * "代表表記:忘れる/わすれる 付属動詞候補(基本) 反義:動詞:覚える/おぼえる" <代表表記:忘れる/わすれる><付属動詞候補(基本)><反義:動詞:覚える/おぼえる><正規化代表表記:忘れる/わすれる><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>
ように ように ようだ 助動詞 5 * 0 ナ形容詞 21 ダ列基本連用形 7 NIL <かな漢字><ひらがな><活用語><付属>
して して する 接尾辞 14 動詞性接尾辞 7 サ変動詞 16 タ系連用テ形 14 "代表表記:する/する" <代表表記:する/する><正規化代表表記:する/する><かな漢字><ひらがな><活用語><付属>
ください ください くださる 接尾辞 14 動詞性接尾辞 7 子音動詞ラ行イ形 11 命令形 6 "代表表記:下さる/くださる" <代表表記:下さる/くださる><正規化代表表記:下さる/くださる><文末><表現文末><かな漢字><ひらがな><活用語><付属>
"""[1:-1].split('\n') #初めの改行をカット
self.assertEquals(actual, expected)
def test_change_katuyou0(self):
token_line = '歩く あるく 歩く 動詞 2 * 0 子音動詞カ行 2 基本形 2 "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>'
katuyou = '基本連用形'
actual = replace_lib.change_katuyou(token_line, katuyou)
expected = '歩き あるき 歩く 動詞 * * * 子音動詞カ行 * 基本連用形 * "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>'
self.assertEquals(actual, expected)
def test_change_katuyou1(self):
token_line = '歩いて あるいて 歩く 動詞 2 * 0 子音動詞カ行 2 タ系連用テ形 14 "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><文末><表現文末><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>'
katuyou = '基本連用形'
actual = replace_lib.change_katuyou(token_line, katuyou)
expected = '歩き あるき 歩く 動詞 * * * 子音動詞カ行 * 基本連用形 * "代表表記:歩く/あるく" <代表表記:歩く/あるく><正規化代表表記:歩く/あるく><文頭><文末><表現文末><かな漢字><活用語><自立><内容語><タグ単位始><文節始><文節主辞>'
self.assertEquals([actual], [expected])
def test_change_katuyou2(self):
token_line = '覚え おぼえ 覚える 動詞 * * * 母音動詞 * 未然形 * "代表表記:覚える/おぼえる 反義:動詞:忘れる/わすれる"'
katuyou = '基本連用形'
actual | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import functools
import numpy as np
import torch
import torch.nn as nn
import torch._utils
import torch.nn.functional as F
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(False)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index], stride, downsample))
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block(self.num_inchannels[branch_index],
num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_inchannels[i],
1,
1,
0,
bias=False),
nn.BatchNorm2d(num_inchannels[i],
momentum=BN_MOMENTUM),
nn.Upsample(scale_factor=2**(j-i), mode='nearest')))
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM)))
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False),
nn.BatchNorm2d(num_outchannels_conv3x3,
momentum=BN_MOMENTUM),
nn.ReLU(False)))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class ConvBlock(nn.Module):
def __init__(self, in_c, out_c, k, s=1, p=0):
super(ConvBlock, self).__init__()
self.conv = nn.Conv2d(in_c, out_c, k, stride=s, padding=p)
self.conv.apply(weights_init_kaiming)
self.bn = nn.BatchNorm2d(out_c)
def forward(self, x):
return self.bn(self.conv(x))
class SpatialAttn(nn.Module):
def __init__(self):
super(SpatialAttn, self).__init__()
self.conv1 = ConvBlock(256, 1, 3, s=2, p=1)
self.conv2 = ConvBlock(1, 1, 1)
def forward(self, x):
x = F.relu(self.conv1(x))
# bilinear resizing
x = F.upsample(x, (x.size(2)*2, x.size(3)*2), mode='bilinear', align_corners=True)
# scaling conv
x = self.conv2(x)
x = torch.sigmoid(x)
return x
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
class HighResolutionNet(nn.Module):
def __init__(self, cfg, **kwargs):
super(HighResolutionNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 64, 4)
self.stage2_cfg = cfg['MODEL']['EXTRA']['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer(
[256], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = cfg['MODEL']['EXTRA']['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = cfg['MODEL']['EXTRA']['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=True)
self.incre_modules, _, _= self._make_head(pre_stage_channels)
self.cls_head = nn.Sequential(
nn.Conv2d(
in_channels=1920,
out_channels=256,
kernel_size=1,
stride=1,
padding=0),
nn.BatchNorm2d(256, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.bigG = cfg.MODEL.IF_BIGG
self.gap = nn.AdaptiveAvgPool2d(1)
self.part_num = cfg.CLUSTERING.PART_NUM
self.part_cls_layer = nn.Conv2d(in_channels=256,
out_channels=self.part_num,
kernel_size=1,
stride=1,
padding=0)
self.spatial_attn = SpatialAttn()
def _make_incre_channel_nin(self):
head_channels = [128, 256, 512, 1024]
incre_modules = []
for i in range(3):
incre_module = nn.Sequential(
nn.Conv2d(
in_channels=head_channels[i],
out_channels=head_channels[i+1],
kernel_size=1,
stride=1,
padding=0),
nn.BatchNorm2d(head_channels[i+1], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
return incre_modules
def _make_head(self, pre_stage_channels):
head_block = Bottleneck
head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_module = self._make_layer(head_block,
channels,
head_channels[i],
1,
stride=1)
incre_modules.append(incre_module)
incre_modules = nn.ModuleList(incre_modules)
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels)-1):
in_channels = head_channels[i] * head_block.expansion
out_channels = head_channels[i+1] * head_block.expansion
downsamp_module = nn.Sequential(
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2d(
in_channels=head_channels[3] * head_block.expansion,
out_channels=2048,
kernel_size=1,
stride=1,
padding=0
),
nn.BatchNorm2d(2048, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
return incre_modules, downsamp_modules, final_layer
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i],
num_channels_cur_layer[i],
3,
1,
1,
bias=False),
nn.BatchNorm2d(
num_channels_cur_layer[i], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False),
nn.BatchNorm2d(outchannels, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
x = self.stage4(x_list)
for i in range(len(self.incre_modules)):
x[i]=self.incre_modules[i](x[i])
x0_h, x0_w = x[0].size(2), x[0].size(3)
x1=F.upsample(x[1], size=(x0_h, x0_w), mode='bilinear', align_corners=True)
x2=F.upsample(x[2], size=(x0_h, x0_w), mode='bilinear', align_corners=True)
x3=F.upsample(x[3], size=(x0_h, x0_w), mode='bilinear', align_corners=True)
x | |
import curses
import datetime
import json
import logging
import os
import re
import signal
import threading
import time
from collections import OrderedDict
from typing import Dict, List
import yaml
from .core import CephNode, CephNodeManager
from .exceptions import MinionDoesNotExistInConfiguration, ValidationException
from .logging_utils import LoggingUtil
from .salt_event import EventListener, SaltEventProcessor
from .salt_utils import SaltClient, GrainsManager, CephOrch, PillarManager
from .terminal_utils import PrettyPrinter as PP
from .validate.config import validate_config
from .validate.salt_master import check_salt_master_status
from .validate.salt_minion import sync_all
# pylint: disable=C0103
logger = logging.getLogger(__name__)
class ScreenKeyListener:
def up_key(self):
pass
def down_key(self):
pass
def action_key(self):
pass
def quit_key(self):
pass
def collapse_expand_all_key(self):
pass
def pause_key(self):
pass
class CursesScreen:
HEADER_HEIGHT = 2
FOOTER_HEIGHT = 3
MIN_WIDTH = 80
MIN_HEIGHT = 10
COLOR_MARKER = 1
COLOR_MINION = 4
COLOR_STAGE = 5
COLOR_STEP = 6
COLOR_MENU = 7
COLOR_SUCCESS = 8
COLOR_ERROR = 9
COLOR_WARNING = 10
def __init__(self, num_rows=1000):
self.num_rows = num_rows
self.height = None
self.width = None
self.stdscr = None
self.header = None
self.body = None
self.body_height = None
self.body_width = None
self.body_pos = 0
self.footer = None
self.scrollbar = None
self.key_listeners = []
self.previous_signal_handler = None
def add_key_listener(self, listener):
self.key_listeners.append(listener)
@property
def body_current_row(self):
return self.body.getyx()[0]
def refresh(self):
if self.body:
if self.body_pos > self.body_current_row - self.body_height:
self.body_pos = max(0, self.body_current_row - self.body_height)
self.body.refresh(self.body_pos, 0, self.HEADER_HEIGHT, 0,
self.height - self.FOOTER_HEIGHT - 1, self.body_width)
if self.scrollbar:
self._render_body_scrollbar()
self.scrollbar.refresh()
if self.header:
self.header.refresh()
if self.footer:
self.footer.refresh()
def start(self):
os.unsetenv('LINES')
os.unsetenv('COLUMNS')
logger.info("initializing curses screen")
self.stdscr = curses.initscr()
self.height, self.width = self.stdscr.getmaxyx()
self.body_height = self.height - self.HEADER_HEIGHT - self.FOOTER_HEIGHT
self.body_width = self.width - 1
logger.info("current terminal size: rows=%s cols=%s", self.height, self.width)
curses.start_color()
curses.use_default_colors()
curses.init_pair(self.COLOR_MARKER, -1, -1)
curses.init_pair(self.COLOR_MINION, curses.COLOR_CYAN, -1)
curses.init_pair(self.COLOR_STAGE, curses.COLOR_YELLOW, -1)
curses.init_pair(self.COLOR_STEP, curses.COLOR_BLUE, -1)
curses.init_pair(self.COLOR_MENU, curses.COLOR_BLACK, curses.COLOR_GREEN)
curses.init_pair(self.COLOR_SUCCESS, curses.COLOR_GREEN, -1)
curses.init_pair(self.COLOR_ERROR, curses.COLOR_RED, -1)
curses.init_pair(self.COLOR_WARNING, curses.COLOR_YELLOW, -1)
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self.stdscr.keypad(True)
if self.height > 2:
self.header = curses.newwin(self.HEADER_HEIGHT, self.width, 0, 0)
if self.height > 4:
self.footer = curses.newwin(self.FOOTER_HEIGHT, self.width,
self.height - self.FOOTER_HEIGHT, 0)
if self.height > 5:
self.body = curses.newpad(self.num_rows, self.width - 1)
self.body.scrollok(True)
self.scrollbar = curses.newwin(self.body_height + 1, 1, self.HEADER_HEIGHT,
self.width - 1)
logger.info("initializing scrollable pad: rows=%s visible_rows=%s cols=%s", self.num_rows,
self.body_height, self.width)
self.stdscr.timeout(200)
self.stdscr.refresh()
self.refresh()
logger.info("curses screen completed initialization")
self.previous_signal_handler = signal.signal(signal.SIGWINCH, self._resize)
def shutdown(self):
logger.info("shutting down curses screen")
signal.signal(signal.SIGWINCH, self.previous_signal_handler)
curses.nocbreak()
self.stdscr.keypad(False)
curses.echo()
curses.endwin()
self.header = None
self.footer = None
self.body = None
self.scrollbar = None
def _resize(self, *args): # pylint: disable=unused-argument
logger.info("resizing windows")
self.shutdown()
self.start()
def make_visible(self, row, lines):
if self.body is None:
return
if row < self.body_pos:
self.body_pos = row
elif row + lines > self.body_pos + self.body_height:
self.body_pos += row - (self.body_pos + self.body_height) + lines
def has_scroll(self):
if self.body is None:
return False
return self.body_current_row > self.body_height
def _render_body_scrollbar(self):
self.scrollbar.clear()
current_row = self.body_current_row
if current_row <= self.body_height:
# no scrollbar needed
return
scroll_size = round((self.body_height / current_row) * self.body_height)
if scroll_size == 0:
scroll_size = 1
current_pos = round((self.body_pos * self.body_height) / current_row)
if current_pos >= self.body_height:
current_pos = self.body_height - 1
for i in range(0, scroll_size):
self._write(self.scrollbar, current_pos + i, 0, "▐", CursesScreen.COLOR_MARKER, False,
False, False, 1)
def clear_header(self):
if self.header:
self.header.move(0, 0)
self.header.clrtoeol()
def clear_footer(self):
if self.footer:
self.footer.move(0, 0)
self.footer.erase()
def clear_body(self):
if self.body:
self.body.clear()
def clear_row(self, row):
if self.body:
self.body.move(row, 0)
self.body.clrtoeol()
def _write(self, window, row, col, text, color, bold, reverse, line_padding, width):
if window is None:
return
if col >= self.width - 1:
return
attr = curses.color_pair(color)
if bold:
attr |= curses.A_BOLD
if reverse:
attr |= curses.A_REVERSE
window.addstr(row, col, text, attr)
if line_padding:
if width > len(text) + col:
padding = " " * (width - len(text) - col)
window.addstr(row, col + len(text), padding, attr)
def write_header(self, col, text, color, bold=False, reverse=False, line_padding=False):
self._write(self.header, 0, col, text, color, bold, reverse, line_padding, self.width)
def write_footer(self, col, text, color, bold=False, reverse=False, line_padding=False, row=0):
self._write(self.footer, row + 1, col, text, color, bold, reverse, line_padding, self.width)
def write_body(self, row, col, text, color, bold=False, reverse=False, line_padding=False):
self._write(self.body, row, col, text, color, bold, reverse, line_padding, self.body_width)
def wait_for_event(self):
try:
ch = self.stdscr.getch()
if ch == -1:
return False
if ch == curses.KEY_NPAGE:
if self.body:
if self.body_pos < self.body_current_row - self.body_height:
self.body_pos += min(
self.body_height - 1,
self.body_current_row - self.body_pos - self.body_height)
elif ch == curses.KEY_PPAGE and self.body_pos > 0:
if self.body:
self.body_pos -= min(self.body_pos, self.body_height - 1)
elif ch == ord('j'):
if self.body:
if self.body_pos < self.body_current_row - self.body_height:
self.body_pos += 1
elif ch == ord('k'):
if self.body:
if self.body_pos > 0:
self.body_pos -= 1
elif ch == ord(' '):
for listener in self.key_listeners:
listener.action_key()
elif ch == ord('q'):
for listener in self.key_listeners:
listener.quit_key()
elif ch == ord('c'):
for listener in self.key_listeners:
listener.collapse_expand_all_key()
elif ch == curses.KEY_DOWN:
for listener in self.key_listeners:
listener.down_key()
elif ch == curses.KEY_UP:
for listener in self.key_listeners:
listener.up_key()
elif ch == ord('p'):
for listener in self.key_listeners:
listener.pause_key()
else:
return False
except KeyboardInterrupt:
return False
return True
class Event:
def __init__(self, ev_type: str, desc: str, stage_ev: "Event" = None):
self.ev_type = ev_type
self.desc = desc
self.stage_ev = stage_ev
def is_stage(self) -> bool:
return 'stage' in self.ev_type
def is_step(self) -> bool:
return 'step' in self.ev_type
def is_begin(self) -> bool:
return 'begin' in self.ev_type
def is_end(self) -> bool:
return 'end' in self.ev_type
def __str__(self) -> str:
if self.stage_ev is None:
return "EV({}, {})".format(self.ev_type, self.desc)
return "EV({}, {}, {})".format(self.ev_type, self.desc, self.stage_ev)
class Step:
def __init__(self, minion, desc, timestamp):
self.minion = minion
self.desc = desc
self.begin_time = timestamp
self.end_time = None
self.failure = None
self.success = None
log_msg = "STEP [BEGIN] \"{}\" on minion {}".format(self.desc, self.minion)
logger.info(log_msg)
def end(self, timestamp, success=True):
self.end_time = timestamp
self.success = success
log_msg = "STEP [END] \"{}\" on minion {} (success={})".format(
self.desc, self.minion, self.success)
logger.info(log_msg)
def finished(self):
return self.end_time is not None
def report_failure(self, state_data):
self.failure = state_data
self.success = False
class Stage:
def __init__(self, minion, desc, timestamp):
self.minion = minion
self.desc = desc
self.steps = OrderedDict()
self.begin_time = timestamp
self.current_step = None
self.end_time = None
self.success = None
self.warning = False
log_msg = "STAGE [BEGIN] \"{}\" on minion {}".format(self.desc, self.minion)
logger.info(log_msg)
@property
def last_step(self):
if self.current_step is None and self.steps:
return self.steps[next(reversed(self.steps))]
return self.current_step
def end(self, timestamp, success=True):
self.end_time = timestamp
self.current_step = None
self.success = success
for step in self.steps.values():
if not step.finished():
step.end(timestamp, True)
log_msg = "STAGE [END] \"{}\" on minion {} (success={})".format(
self.desc, self.minion, self.success)
logger.info(log_msg)
def step_begin(self, desc, timestamp):
"""
:return: "False" if duplicated, otherwise "True"
"""
if desc in self.steps:
if self.steps[desc].begin_time:
return False
logger.warning("[%s] received begin_step event after end: %s", self.minion, desc)
self.steps[desc].begin_time = timestamp
return True
self.steps[desc] = Step(self.minion, desc, timestamp)
self.current_step = self.steps[desc]
return True
def step_end(self, desc, timestamp):
"""
:return: "False" if duplicated, otherwise "True"
"""
if desc not in self.steps:
logger.warning("[%s] received end_step event without a begin: %s", self.minion, desc)
self.steps[desc] = Step(self.minion, desc, None)
if self.steps[desc].finished():
return False
self.steps[desc].end(timestamp)
self.current_step = None
return True
def finished(self):
return self.end_time is not None
def report_failure(self, event: Event, state_data):
_steps = self.steps
self.steps = OrderedDict()
self.success = False
if event is None:
self.steps['|failure|{}'.format(state_data['__id__'])] = state_data
for key, val in _steps.items():
self.steps[key] = val
if event is not None and event.desc == key:
if event.is_begin():
val.report_failure(state_data)
if event.is_end():
self.steps['|failure|{}'.format(state_data['__id__'])] = state_data
class MinionExecution:
def __init__(self, name):
self.name = name
self.stages = OrderedDict()
self.current_stage = None
self.begin_time = datetime.datetime.utcnow()
self.end_time = None
self.rebooting = False
self.warnings = []
self.success = None
@property
def last_stage(self):
if self.current_stage is None and self.stages:
return self.stages[next(reversed(self.stages))]
return self.current_stage
def stage_begin(self, desc, timestamp):
"""
:return: "False" if duplicated, otherwise "True"
"""
if desc in self.stages:
if self.stages[desc].begin_time:
return False
logger.warning("[%s] received begin_stage event after end: %s", self.name, desc)
self.stages[desc].begin_time = timestamp
return True
self.stages[desc] = Stage(self.name, desc, timestamp)
self.current_stage = self.stages[desc]
return True
def stage_end(self, desc, timestamp):
"""
:return: "False" if duplicated, otherwise "True"
"""
if desc not in self.stages:
logger.warning("[%s] received end_stage event without a begin: %s", self.name, desc)
self.stages[desc] = Stage(self.name, desc, None)
if self.stages[desc].finished():
return False
self.stages[desc].end(timestamp)
self.current_stage = None
return True
def stage_warn(self, desc):
self.stages[desc].warning = True
self.warnings.append(desc)
def step_begin(self, desc, timestamp):
"""
:return: "False" if duplicated, "None" if outside stage, otherwise "True"
"""
if self.current_stage:
return self.current_stage.step_begin(desc, timestamp)
| |
None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and (not sim_is_big_dog and (not is_line_obj and relative_object is not None)) and is_single_point:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Qualified Invalid', location, cost, validation, failure))
if not self._is_generated_goal_location_valid(location, goal_height_limit, target_height):
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Height Invalid', location, cost, validation, failure))
if invalid_goal and failure == GoalFailureType.OutOfWaterDepth:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Water Depth Invalid', location, cost, validation, failure))
if invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and failure == GoalFailureType.OutOfWaterDepth:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Water Depth Invalid', location, cost, validation, failure))
if invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif not self._is_generated_goal_location_valid(location, goal_height_limit, target_height):
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Height Invalid', location, cost, validation, failure))
if invalid_goal and failure == GoalFailureType.OutOfWaterDepth:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Water Depth Invalid', location, cost, validation, failure))
if invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and failure == GoalFailureType.OutOfWaterDepth:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Water Depth Invalid', location, cost, validation, failure))
if invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
elif invalid_goal and failure == GoalFailureType.TerrainTagViolations:
if out_result_info is not None:
out_result_info.append(GoalFailureInfo('Terrain Tags Invalid', location, cost, validation, failure))
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
else:
if minimum_router_cost is not None:
if cost > sims4.math.EPSILON:
cost = max(cost, minimum_router_cost)
full_cost = self._get_location_cost(location.position, location.orientation, location.routing_surface, cost)
full_cost += surface_costs[location.routing_surface.type]
if self.constraint.enables_height_scoring:
full_cost += max_goal_height - location.position.y
goal = self.create_goal(location, full_cost, tag, failure)
goal_list.append(goal)
if gsi_handlers.routing_handlers.archive_goals_enabled():
gsi_handlers.routing_handlers.archive_goals(self, goal_list, out_result_info, max_goals=max_goals, relative_object=relative_object, single_goal_only=single_goal_only, for_carryable=for_carryable, for_source=for_source, goal_height_limit=goal_height_limit, target_reference_override=target_reference_override, always_reject_invalid_goals=always_reject_invalid_goals, perform_los_check=perform_los_check)
return goal_list
def create_goal(self, location, full_cost, tag, failure):
return routing.Goal(location, cost=full_cost, tag=tag, requires_los_check=self.los_reference_point is not None, connectivity_handle=self, failure_reason=failure)
def _is_generated_goal_location_valid(self, location, goal_height_limit=None, target_height=None):
if goal_height_limit is None or target_height is None:
return True
else:
goal_y = location.position.y
y_delta = abs(goal_y - target_height)
if y_delta > goal_height_limit:
return False
return True
def _get_location_cost(self, position, orientation, routing_surface, router_cost):
return router_cost + sum(cost_fn.constraint_cost(position, orientation, routing_surface) for cost_fn in self.constraint._scoring_functions)
def _is_geometry_single_point(self):
if len(self.geometry.polygon) == 1 and len(self.geometry.polygon[0]) == 1:
return True
return False
def _get_minimum_router_cost(self):
pass
@constproperty
def for_slot_constraint():
return False
class SlotRoutingHandle(RoutingHandle):
def __init__(self, *args, reference_transform=None, entry=True, **kwargs):
super().__init__(*args, **kwargs)
self._entry = entry
self._reference_transform = reference_transform
def _get_kwargs_for_clone(self, kwargs):
super()._get_kwargs_for_clone(kwargs)
kwargs.update(reference_transform=self._reference_transform, entry=self._entry)
def create_goal(self, location, full_cost, tag, failure):
reference_transform = self._reference_transform
if reference_transform is None:
reference_transform = self.constraint.containment_transform if self._entry else self.constraint.containment_transform_exit
initial_transform = location.transform if self._entry else reference_transform
target_transform = reference_transform if self._entry else location.transform
if self._entry:
target_orientation = target_transform.orientation
else:
v = target_transform.translation - initial_transform.translation
target_orientation = sims4.math.angle_to_yaw_quaternion(sims4.math.vector3_angle(v))
locked_params = dict(self.locked_params)
locked_params[('InitialTranslation', 'x')] = initial_transform.translation
locked_params[('InitialOrientation', 'x')] = initial_transform.orientation
locked_params[(animation_constants.ASM_TARGET_TRANSLATION, 'x')] = target_transform.translation
locked_params[(animation_constants.ASM_TARGET_ORIENTATION, 'x')] = target_orientation
locked_params = frozendict(locked_params)
if location.orientation == sims4.math.Quaternion.ZERO():
goal_location = routing.Location(location.position, orientation=target_orientation, routing_surface=location.routing_surface)
else:
goal_location = location
return SlotGoal(goal_location, containment_transform=self.constraint.containment_transform, cost=full_cost, tag=tag, requires_los_check=self.los_reference_point is not None, connectivity_handle=self, slot_params=locked_params, failure_reason=failure)
def _get_location_cost(self, position, orientation, routing_surface, router_cost):
transform = self.constraint.containment_transform
return super()._get_location_cost(transform.translation, transform.orientation, routing_surface, router_cost)
def _get_minimum_router_cost(self):
if self._is_geometry_single_point():
return 1
@constproperty
def for_slot_constraint():
return True
class UniversalSlotRoutingHandle(SlotRoutingHandle):
def __init__(self, *args, cost_functions_override=None, posture=None, **kwargs):
super().__init__(*args, **kwargs)
self._cost_functions_override = cost_functions_override
self._posture = posture
def _get_kwargs_for_clone(self, kwargs):
super()._get_kwargs_for_clone(kwargs)
kwargs.update(cost_functions_override=self._cost_functions_override, posture=self._posture)
def get_los_reference_point(self, routing_surface, force_multi_surface=False):
if routing_surface.type == routing.SurfaceType.SURFACETYPE_WORLD:
return self.los_reference_point
def _is_generated_goal_location_valid(self, location, goal_height_limit=None, target_height=None):
if not self._validate_y_delta(location):
return False
elif not self._validate_raycast(location):
return False
return True
def _validate_y_delta(self, location):
universal_data = self._get_universal_data()
if universal_data | |
<gh_stars>1-10
#FLM: Glyph: Layers
# ----------------------------------------
# (C) <NAME>, 2018 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#-----------------------------------------
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -----------------
import fontlab as fl6
import fontgate as fgt
from PythonQt import QtCore
from typerig import QtGui
from typerig.glyph import eGlyph
from typerig.gui import trSliderCtrl
from itertools import groupby
from math import radians
# - Init
global pLayers
global pMode
pLayers = None
pMode = 0
app_name, app_version = 'TypeRig | Layers', '0.36'
# - Sub widgets ------------------------
class QlayerSelect(QtGui.QVBoxLayout):
# - Split/Break contour
def __init__(self):
super(QlayerSelect, self).__init__()
# - Init
# -- Head
self.lay_head = QtGui.QHBoxLayout()
self.edt_glyphName = QtGui.QLineEdit()
self.btn_refresh = QtGui.QPushButton('&Refresh')
self.btn_refresh.clicked.connect(self.refresh)
self.lay_head.addWidget(QtGui.QLabel('G:'))
self.lay_head.addWidget(self.edt_glyphName)
self.lay_head.addWidget(self.btn_refresh)
self.addLayout(self.lay_head)
# -- Layer List
self.lst_layers = QtGui.QListWidget()
self.lst_layers.setAlternatingRowColors(True)
self.lst_layers.setMinimumHeight(100)
#self.lst_layers.setMaximumHeight(100)
self.lst_layers.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection) # Select multiple items call .selectedItems() to get a QList
self.addWidget(self.lst_layers)
self.refresh()
def refresh(self):
# - Init
layerBanList = ['#', 'img']
self.glyph = eGlyph()
# - Prepare
self.edt_glyphName.setText(eGlyph().name)
self.selection = self.glyph.layer().name
self.lst_layers.clear()
# - Build List and style it
self.lst_layers.addItems(sorted([layer.name for layer in self.glyph.layers() if all([item not in layer.name for item in layerBanList])]))
for index in range(self.lst_layers.count):
currItem = self.lst_layers.item(index)
currLayer = self.glyph.layer(currItem.text())
control = (currLayer.isService, currLayer.isMasterLayer, currLayer.isMaskLayer, currLayer.isWireframe)
controlColor = [int(item)*255 for item in control[:-1]] + [150-int(control[-1])*100]
text = 'Service Master Mask Wireframe'.split(' ')
controlText = ' | '.join([text[pos] for pos in range(len(text)) if control[pos]])
currItem.setData(QtCore.Qt.DecorationRole, QtGui.QColor(*controlColor))
currItem.setData(QtCore.Qt.ToolTipRole, controlText)
def doCheck(self):
if self.glyph.fg.id != fl6.CurrentGlyph().id and self.glyph.fl.name != fl6.CurrentGlyph().name:
print '\nERRO:\tGlyph mismatch:\n\tCurrent active glyph: %s\n\tLayers panel glyph: %s' %(fl6.CurrentGlyph(), self.glyph.fg)
print 'WARN:\tNo action taken! Forcing refresh!'
self.refresh()
#raise Exception('Glyph mismatch')
return 0
return 1
class QlayerBasic(QtGui.QVBoxLayout):
def __init__(self, aux):
super(QlayerBasic, self).__init__()
# - Init
self.aux = aux
# -- Basic Tool buttons
self.lay_buttons = QtGui.QGridLayout()
self.btn_add = QtGui.QPushButton('Add')
self.btn_del = QtGui.QPushButton('Remove')
self.btn_dup = QtGui.QPushButton('Duplicate')
self.btn_setServ = QtGui.QPushButton('Service')
self.btn_setMask = QtGui.QPushButton('Mask')
self.btn_setWire = QtGui.QPushButton('Wireframe')
#self.btn_dup.setEnabled(False)
self.btn_add.setToolTip('Add new layer with name')
self.btn_dup.setToolTip('Duplicate selected with suffix')
self.btn_del.setToolTip('Delete selected layers')
self.btn_setServ.setToolTip('Set selected layers as Service')
self.btn_setWire.setToolTip('Set selected layers as Wireframe')
self.edt_name = QtGui.QLineEdit('New')
self.edt_name.setToolTip('Name or suffix')
self.btn_add.clicked.connect(self.addLayer)
self.btn_dup.clicked.connect(self.duplicateLayers)
self.btn_del.clicked.connect(self.deleteLayers)
self.btn_setMask.clicked.connect(self.addMaskLayers)
self.btn_setServ.clicked.connect(lambda: self.setLayer('Service'))
self.btn_setWire.clicked.connect(lambda: self.setLayer('Wireframe'))
self.lay_buttons.addWidget(QtGui.QLabel('Suffix/Name:'), 0, 0, 1, 1)
self.lay_buttons.addWidget(self.edt_name, 0, 1, 1, 2)
self.lay_buttons.addWidget(self.btn_add, 1, 0, 1, 1)
self.lay_buttons.addWidget(self.btn_del, 1, 1, 1, 1)
self.lay_buttons.addWidget(self.btn_dup, 1, 2, 1, 1)
self.lay_buttons.addWidget(self.btn_setServ, 2, 0, 1, 1)
self.lay_buttons.addWidget(self.btn_setMask, 2, 1, 1, 1)
self.lay_buttons.addWidget(self.btn_setWire, 2, 2, 1, 1)
self.addLayout(self.lay_buttons)
def addLayer(self):
if self.aux.doCheck():
newLayer = fl6.flLayer()
newLayer.name = str(self.edt_name.text)
self.aux.glyph.addLayer(newLayer)
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Add Layer: %s.' %newLayer.name)
self.aux.glyph.update()
self.aux.refresh()
def duplicateLayers(self):
if self.aux.doCheck():
''' # This should work but it does not
for item in self.aux.lst_layers.selectedItems():
newLayer = fl6.flLayer(self.aux.glyph.layer(item.text()))
newLayer.name += '.%s' #%str(self.edt_name.text)
self.aux.glyph.addLayer(newLayer)
'''
# - Duplicate by layer copy solution
for item in self.aux.lst_layers.selectedItems():
self.aux.glyph.duplicateLayer(item.text() , '%s.%s' %(item.text(), self.edt_name.text), True)
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Duplicate Layer: %s.' %'; '.join([item.text() for item in self.aux.lst_layers.selectedItems()]))
self.aux.glyph.update()
self.aux.refresh()
def addMaskLayers(self):
if self.aux.doCheck():
for item in self.aux.lst_layers.selectedItems():
# - Build mask layer
srcShapes = self.aux.glyph.shapes(item.text())
newMaskLayer = self.aux.glyph.layer(item.text()).getMaskLayer(True)
# - Copy shapes to mask layer
for shape in srcShapes:
newMaskLayer.addShape(shape.cloneTopLevel()) # Clone so that the shapes are NOT referenced, but actually copied!
self.aux.glyph.updateObject(self.aux.glyph.fl, 'New Mask Layer: %s.' %'; '.join([item.text() for item in self.aux.lst_layers.selectedItems()]))
self.aux.glyph.update()
self.aux.refresh()
def deleteLayers(self):
if self.aux.doCheck():
for item in self.aux.lst_layers.selectedItems():
self.aux.glyph.removeLayer(item.text())
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Delete Layer: %s.' %'; '.join([item.text() for item in self.aux.lst_layers.selectedItems()]))
self.aux.glyph.update()
self.aux.refresh()
def setLayer(self, type):
if self.aux.doCheck():
for item in self.aux.lst_layers.selectedItems():
wLayer = self.aux.glyph.layer(item.text())
if type is 'Service': wLayer.isService = not wLayer.isService
if type is 'Wireframe': wLayer.isWireframe = not wLayer.isWireframe
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Set Layer as <%s>: %s.' %(type, '; '.join([item.text() for item in self.aux.lst_layers.selectedItems()])))
self.aux.glyph.update()
self.aux.refresh()
class QlayerTools(QtGui.QVBoxLayout):
def __init__(self, aux):
super(QlayerTools, self).__init__()
# - Init
self.aux = aux
# -- Mode checks
self.lay_checks = QtGui.QGridLayout()
self.chk_outline = QtGui.QCheckBox('Outline')
self.chk_guides = QtGui.QCheckBox('Guides')
self.chk_anchors = QtGui.QCheckBox('Anchors')
self.chk_lsb = QtGui.QCheckBox('LSB')
self.chk_adv = QtGui.QCheckBox('Advance')
self.chk_rsb = QtGui.QCheckBox('RSB')
# -- Set States
self.chk_outline.setCheckState(QtCore.Qt.Checked)
self.chk_adv.setCheckState(QtCore.Qt.Checked)
# -- Build
self.lay_checks.addWidget(self.chk_outline, 0, 0)
self.lay_checks.addWidget(self.chk_guides, 0, 1)
self.lay_checks.addWidget(self.chk_anchors, 0, 2)
self.lay_checks.addWidget(self.chk_lsb, 1, 0)
self.lay_checks.addWidget(self.chk_adv, 1, 1)
self.lay_checks.addWidget(self.chk_rsb, 1, 2)
self.addLayout(self.lay_checks)
# -- Quick Tool buttons
self.lay_buttons = QtGui.QGridLayout()
self.btn_swap = QtGui.QPushButton('Swap')
self.btn_copy = QtGui.QPushButton('Copy')
self.btn_paste = QtGui.QPushButton('Paste')
self.btn_clean = QtGui.QPushButton('Remove')
self.btn_unlock = QtGui.QPushButton('Unlock')
self.btn_expand = QtGui.QPushButton('Expand')
#self.btn_unlock.setEnabled(False)
self.btn_expand.setEnabled(False)
self.btn_swap.setToolTip('Swap Selected Layer with Active Layer')
self.btn_copy.setToolTip('Copy Active Layer to Selected Layer')
self.btn_paste.setToolTip('Paste Selected Layer to Active Layer')
self.btn_clean.setToolTip('Remove contents from selected layers')
self.btn_unlock.setToolTip('Unlock all locked references.\nSHIFT+Click will lock all references.')
self.btn_expand.setToolTip('Expand transformations for selected layers')
self.btn_swap.clicked.connect(self.swap)
self.btn_copy.clicked.connect(self.copy)
self.btn_paste.clicked.connect(self.paste)
self.btn_clean.clicked.connect(self.clean)
self.btn_unlock.clicked.connect(self.unlock)
#self.btn_expand.clicked.connect(self.expand)
self.lay_buttons.addWidget(self.btn_swap, 0, 0, 1, 1)
self.lay_buttons.addWidget(self.btn_copy, 0, 1, 1, 1)
self.lay_buttons.addWidget(self.btn_paste, 0, 2, 1, 1)
self.lay_buttons.addWidget(self.btn_clean, 1, 0, 1, 1)
self.lay_buttons.addWidget(self.btn_unlock, 1, 1, 1, 1)
self.lay_buttons.addWidget(self.btn_expand, 1, 2, 1, 1)
self.addLayout(self.lay_buttons)
# - Helper Procedures ----------------------------------------------
def Copy_Paste_Layer_Shapes(self, glyph, layerName, copy=True, cleanDST=False, impSRC=[]):
srcLayerName = layerName if copy else None # Note: None refers to activeLayer
dstLayerName = None if copy else layerName
exportDSTShapes = None
# -- Get shapes
srcShapes = glyph.shapes(srcLayerName) if len(impSRC) == 0 else impSRC
# -- Cleanup destination layers
if cleanDST:
exportDSTShapes = glyph.shapes(dstLayerName)
glyph.layer(dstLayerName).removeAllShapes()
# -- Copy/Paste
for shape in srcShapes:
glyph.layer(dstLayerName).addShape(shape.cloneTopLevel())
return exportDSTShapes
def Copy_Paste_Layer_Metrics(self, glyph, layerName, copy=True, mode='ADV', impSRC=None):
srcLayerName = layerName if copy else None # Note: None refers to activeLayer
dstLayerName = None if copy else layerName
if 'LSB' in mode.upper():
exportMetric = glyph.getLSB(dstLayerName)
glyph.setLSB(glyph.getLSB(srcLayerName) if impSRC is None else impSRC, dstLayerName)
return exportMetric
if 'ADV' in mode.upper():
exportMetric = glyph.getAdvance(dstLayerName)
glyph.setAdvance(glyph.getAdvance(srcLayerName) if impSRC is None else impSRC, dstLayerName)
return exportMetric
if 'RSB' in mode.upper():
exportMetric = glyph.getRSB(dstLayerName)
glyph.setRSB(glyph.getRSB(srcLayerName) if impSRC is None else impSRC, dstLayerName)
return exportMetric
def Copy_Paste_Layer_Guides(self, glyph, layerName, copy=True, cleanDST=False):
srcLayerName = layerName if copy else None # Note: None refers to activeLayer
dstLayerName = None if copy else layerName
# -- Cleanup !!! Not implementable for now?! Why
if cleanDST:
pass
glyph.layer(dstLayerName).appendGuidelines(glyph.guidelines(srcLayerName))
def Copy_Paste_Layer_Anchors(self, glyph, layerName, copy=True, cleanDST=False, impSRC=[]):
srcLayerName = layerName if copy else None # Note: None refers to activeLayer
dstLayerName = None if copy else layerName
exportDSTAnchors = None
# -- Get anchors
srcAnchors = glyph.anchors(srcLayerName) if len(impSRC) == 0 else impSRC
# -- Cleanup !!! Not working
if cleanDST:
exportDSTAnchors = glyph.anchors(dstLayerName)
for anchor in glyph.anchors(dstLayerName):
glyph.layer(dstLayerName).removeAnchor(anchor)
for anchor in srcAnchors:
glyph.anchors(dstLayerName).append(anchor)
return exportDSTAnchors
# - Button procedures ---------------------------------------------------
def unlock(self):
if self.aux.doCheck():
modifiers = QtGui.QApplication.keyboardModifiers()
if self.chk_outline.isChecked():
for item in self.aux.lst_layers.selectedItems():
for shape in self.aux.glyph.shapes(item.text()):
if modifiers == QtCore.Qt.ShiftModifier: # Shift + Click will lock
shape.contentLocked = True
else:
shape.contentLocked = False
self.aux.glyph.updateObject(self.aux.glyph.fl, '%s shapes on Layer(s) | %s' %(['Unlock', 'Lock'][modifiers == QtCore.Qt.ShiftModifier],'; '.join([item.text() for item in self.aux.lst_layers.selectedItems()])))
self.aux.glyph.update()
def swap(self):
if self.aux.doCheck():
if self.chk_outline.isChecked():
exportSRC = self.Copy_Paste_Layer_Shapes(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True, True)
self.Copy_Paste_Layer_Shapes(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False, True, exportSRC)
if self.chk_guides.isChecked():
pass
if self.chk_anchors.isChecked():
pass
if self.chk_lsb.isChecked():
exportMetric = self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True, 'LSB')
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False, 'LSB', exportMetric)
if self.chk_adv.isChecked():
exportMetric = self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True, 'ADV')
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False, 'ADV', exportMetric)
if self.chk_rsb.isChecked():
exportMetric = self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True, 'RSB')
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False, 'RSB', exportMetric)
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Swap Layers | %s <-> %s.' %(self.aux.glyph.activeLayer().name, self.aux.lst_layers.currentItem().text()))
self.aux.glyph.update()
def copy(self):
if self.aux.doCheck():
if self.chk_outline.isChecked():
self.Copy_Paste_Layer_Shapes(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True)
if self.chk_guides.isChecked():
self.Copy_Paste_Layer_Guides(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True)
if self.chk_anchors.isChecked():
self.Copy_Paste_Layer_Anchors(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True)
if self.chk_lsb.isChecked():
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True, 'LSB')
if self.chk_adv.isChecked():
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True, 'ADV')
if self.chk_rsb.isChecked():
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), True, 'RSB')
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Copy Layer | %s <- %s.' %(self.aux.glyph.activeLayer().name, self.aux.lst_layers.currentItem().text()))
self.aux.glyph.update()
def paste(self):
if self.aux.doCheck():
if self.chk_outline.isChecked():
self.Copy_Paste_Layer_Shapes(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False)
if self.chk_guides.isChecked():
self.Copy_Paste_Layer_Guides(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False)
if self.chk_anchors.isChecked():
self.Copy_Paste_Layer_Anchors(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False)
if self.chk_lsb.isChecked():
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False, 'LSB')
if self.chk_adv.isChecked():
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False, 'ADV')
if self.chk_rsb.isChecked():
self.Copy_Paste_Layer_Metrics(self.aux.glyph, self.aux.lst_layers.currentItem().text(), False, 'RSB')
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Paste Layer | %s -> %s.' %(self.aux.glyph.activeLayer().name, self.aux.lst_layers.currentItem().text()))
self.aux.glyph.update()
def clean(self):
if self.aux.doCheck():
if self.chk_outline.isChecked():
for item in self.aux.lst_layers.selectedItems():
self.aux.glyph.layer(item.text()).removeAllShapes()
if self.chk_guides.isChecked():
pass # TODO!!!!!
if self.chk_anchors.isChecked():
pass # TODO!!!!!
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Clean Layer(s) | %s' %'; '.join([item.text() for item in self.aux.lst_layers.selectedItems()]))
self.aux.glyph.update()
class QlayerMultiEdit(QtGui.QVBoxLayout):
def __init__(self, aux):
super(QlayerMultiEdit, self).__init__()
# - Init
self.aux = aux
self.backup = {}
self.contourClipboard = {}
# -- Edit fileds
self.edt_shift = QtGui.QLineEdit('0.0, 0.0')
self.edt_scale = QtGui.QLineEdit('100, 100')
self.edt_slant = QtGui.QLineEdit('0.0')
self.edt_rotate = QtGui.QLineEdit('0.0')
self.edt_shift.setToolTip('Translate Layer by X, Y (comma separated)')
self.edt_scale.setToolTip('Scale Layer by X percent, Y percent(comma separated)')
self.edt_slant.setToolTip('Slant/Shear degrees')
self.edt_rotate.setToolTip('Rotate degrees')
# -- Quick Tool buttons
self.lay_buttons = QtGui.QGridLayout()
self.btn_unfold = QtGui.QPushButton('Unfold Layers')
self.btn_restore = QtGui.QPushButton('Fold Layers')
self.btn_copy = QtGui.QPushButton('Copy Outline')
self.btn_paste = QtGui.QPushButton('Paste Outline')
self.btn_transform = QtGui.QPushButton('Transform Layer')
self.btn_transform_shape = QtGui.QPushButton('Transform Elements')
self.btn_restore.setEnabled(False)
self.btn_paste.setEnabled(False)
self.btn_unfold.setToolTip('Reposition selected layers side by side. Selection order does matter!')
self.btn_restore.setToolTip('Restore Layer Metrics.')
self.btn_copy.setToolTip('Copy selected outline to cliboard for each of selected layers.')
self.btn_paste.setToolTip('Paste outline from cliboard layer by layer (by name). Non existing layers are discarded! New Element is created upon Paste!')
self.btn_transform.setToolTip('Affine transform selected layers')
self.btn_unfold.clicked.connect(self.unfold)
self.btn_restore.clicked.connect(self.restore)
self.btn_copy.clicked.connect(self.copy)
self.btn_paste.clicked.connect(self.paste)
self.btn_transform.clicked.connect(lambda: self.transform(False))
self.btn_transform_shape.clicked.connect(lambda: self.transform(True))
self.lay_buttons.addWidget(self.btn_unfold, 0, 0, 1, 4)
self.lay_buttons.addWidget(self.btn_restore, 0, 4, 1, 4)
self.lay_buttons.addWidget(self.btn_copy, 1, 0, 1, 4)
self.lay_buttons.addWidget(self.btn_paste, 1, 4, 1, 4)
self.lay_buttons.addWidget(QtGui.QLabel('Translate:'), 2, 0, 1, 2)
self.lay_buttons.addWidget(QtGui.QLabel('Scale:'), 2, 2, 1, 2)
self.lay_buttons.addWidget(QtGui.QLabel('Shear:'), 2, 4, 1, 2)
self.lay_buttons.addWidget(QtGui.QLabel('Rotate:'), 2, 6, 1, 2)
self.lay_buttons.addWidget(self.edt_shift, 3, 0, 1, 2)
self.lay_buttons.addWidget(self.edt_scale, 3, 2, 1, 2)
self.lay_buttons.addWidget(self.edt_slant, 3, 4, 1, 2)
self.lay_buttons.addWidget(self.edt_rotate, 3, 6, 1, 2)
self.lay_buttons.addWidget(self.btn_transform, 4, 0, 1, 4)
self.lay_buttons.addWidget(self.btn_transform_shape, 4, 4, 1, 4)
self.addLayout(self.lay_buttons)
# - Button procedures ---------------------------------------------------
def unfold(self):
if self.aux.doCheck() and len(self.aux.lst_layers.selectedItems()) > 1:
# - Init
wGlyph = self.aux.glyph
# - Prepare Backup
self.backup = {item.text():(wGlyph.getLSB(item.text()), wGlyph.getAdvance(item.text())) for item in self.aux.lst_layers.selectedItems()}
self.btn_restore.setEnabled(True)
# - Calculate metrics
newLSB = 0
nextLSB = 0
newAdvance = sum([sum(item) for item in self.backup.values()])
for item in self.aux.lst_layers.selectedItems():
wLayer = item.text()
newLSB += nextLSB + self.backup[wLayer][0]
nextLSB = self.backup[wLayer][1]
wGlyph.setLSB(newLSB, wLayer)
wGlyph.setAdvance(newAdvance, wLayer)
wGlyph.layer(wLayer).isVisible = True
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Unfold Layers (Side By Side): %s.' %'; '.join([item.text() for item in self.aux.lst_layers.selectedItems()]))
self.aux.glyph.update()
def restore(self):
if self.aux.doCheck() and len(self.backup.keys()):
# - Resore metrics
wGlyph = self.aux.glyph
for layer, metrics in self.backup.iteritems():
wGlyph.setLSB(metrics[0], layer)
wGlyph.setAdvance(metrics[1], layer)
wGlyph.layer(layer).isVisible = False
# - Reset
self.backup = {}
self.btn_restore.setEnabled(False)
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Restore Layer metrics: %s.' %'; '.join([item.text() for item in self.aux.lst_layers.selectedItems()]))
self.aux.glyph.update()
def copy(self):
# - Init
wGlyph = self.aux.glyph
wContours = wGlyph.contours()
self.contourClipboard = {}
# - Build initial contour information
selectionTuples = wGlyph.selectedAtContours()
selection = {key:[item[1] for item in value] if not wContours[key].isAllNodesSelected() else [] for key, value in groupby(selectionTuples, lambda x:x[0])}
if len(selection.keys()):
self.btn_paste.setEnabled(True)
for item in self.aux.lst_layers.selectedItems():
wLayer = item.text()
self.contourClipboard[wLayer] = []
for cid, nList in selection.iteritems():
if len(nList):
self.contourClipboard[wLayer].append(fl6.flContour([wGlyph.nodes(wLayer)[nid].clone() for nid in nList]))
else:
self.contourClipboard[wLayer].append(wGlyph.contours(wLayer)[cid].clone())
print 'DONE:\t Copy outline; Glyph: %s; Layers: %s.' %(self.aux.glyph.fl.name, '; '.join([item.text() for item in self.aux.lst_layers.selectedItems()]))
def paste(self):
wGlyph = self.aux.glyph
if len(self.contourClipboard.keys()):
for layerName, contours in self.contourClipboard.iteritems():
wLayer = wGlyph.layer(layerName)
if wLayer is not None:
newShape = fl6.flShape()
newShape.addContours(contours, True)
wLayer.addShape(newShape)
self.aux.glyph.updateObject(self.aux.glyph.fl, 'Paste outline; Glyph: %s; Layers: %s' %(self.aux.glyph.fl.name, '; '.join([item.text() for item in self.aux.lst_layers.selectedItems()])))
self.aux.glyph.update()
def transform(self, shapes=False):
if self.aux.doCheck() and len(self.aux.lst_layers.selectedItems()):
# - Init
wGlyph = self.aux.glyph
inpShift = self.edt_shift.text.split(',') if len(self.edt_shift.text) and ',' in self.edt_shift.text else '0.0, 0.0'
inpScale = self.edt_scale.text.split(',') if len(self.edt_scale.text) | |
rules: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorRuleArgs']]]] = None,
show_data_markers: Optional[pulumi.Input[bool]] = None,
show_event_lines: Optional[pulumi.Input[bool]] = None,
start_time: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
teams: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
time_range: Optional[pulumi.Input[int]] = None,
timezone: Optional[pulumi.Input[str]] = None,
url: Optional[pulumi.Input[str]] = None,
viz_options: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorVizOptionArgs']]]] = None):
"""
Input properties used for looking up and filtering Detector resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_writer_teams: Team IDs that have write access to this detector. Remember to use an admin's token if using this feature and to include that admin's team id (or user id in `authorized_writer_users`).
:param pulumi.Input[Sequence[pulumi.Input[str]]] authorized_writer_users: User IDs that have write access to this detector. Remember to use an admin's token if using this feature and to include that admin's user id (or team id in `authorized_writer_teams`).
:param pulumi.Input[str] description: Description for the rule. Displays as the alert condition in the Alert Rules tab of the detector editor in the web UI.
:param pulumi.Input[bool] disable_sampling: When `false`, the visualization may sample the output timeseries rather than displaying them all. `false` by default.
:param pulumi.Input[int] end_time: Seconds since epoch. Used for visualization. Conflicts with `time_range`.
:param pulumi.Input[int] max_delay: How long (in seconds) to wait for late datapoints. See [Delayed Datapoints](https://signalfx-product-docs.readthedocs-hosted.com/en/latest/charts/chart-builder.html#delayed-datapoints) for more info. Max value is `900` seconds (15 minutes). `Auto` (as little as possible) by default.
:param pulumi.Input[int] min_delay: How long (in seconds) to wait even if the datapoints are arriving in a timely fashion. Max value is 900 (15m).
:param pulumi.Input[str] name: Name of the detector.
:param pulumi.Input[str] program_text: Signalflow program text for the detector. More info [in the SignalFx docs](https://developers.signalfx.com/signalflow_analytics/signalflow_overview.html#_signalflow_programming_language).
:param pulumi.Input[Sequence[pulumi.Input['DetectorRuleArgs']]] rules: Set of rules used for alerting.
:param pulumi.Input[bool] show_data_markers: When `true`, markers will be drawn for each datapoint within the visualization. `true` by default.
:param pulumi.Input[bool] show_event_lines: When `true`, the visualization will display a vertical line for each event trigger. `false` by default.
:param pulumi.Input[int] start_time: Seconds since epoch. Used for visualization. Conflicts with `time_range`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Tags associated with the detector.
:param pulumi.Input[Sequence[pulumi.Input[str]]] teams: Team IDs to associate the detector to.
:param pulumi.Input[int] time_range: Seconds to display in the visualization. This is a rolling range from the current time. Example: `3600` corresponds to `-1h` in web UI. `3600` by default.
:param pulumi.Input[str] timezone: The property value is a string that denotes the geographic region associated with the time zone, (e.g. Australia/Sydney)
:param pulumi.Input[str] url: The URL of the detector.
:param pulumi.Input[Sequence[pulumi.Input['DetectorVizOptionArgs']]] viz_options: Plot-level customization options, associated with a publish statement.
"""
if authorized_writer_teams is not None:
pulumi.set(__self__, "authorized_writer_teams", authorized_writer_teams)
if authorized_writer_users is not None:
pulumi.set(__self__, "authorized_writer_users", authorized_writer_users)
if description is not None:
pulumi.set(__self__, "description", description)
if disable_sampling is not None:
pulumi.set(__self__, "disable_sampling", disable_sampling)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if max_delay is not None:
pulumi.set(__self__, "max_delay", max_delay)
if min_delay is not None:
pulumi.set(__self__, "min_delay", min_delay)
if name is not None:
pulumi.set(__self__, "name", name)
if program_text is not None:
pulumi.set(__self__, "program_text", program_text)
if rules is not None:
pulumi.set(__self__, "rules", rules)
if show_data_markers is not None:
pulumi.set(__self__, "show_data_markers", show_data_markers)
if show_event_lines is not None:
pulumi.set(__self__, "show_event_lines", show_event_lines)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if teams is not None:
pulumi.set(__self__, "teams", teams)
if time_range is not None:
pulumi.set(__self__, "time_range", time_range)
if timezone is not None:
pulumi.set(__self__, "timezone", timezone)
if url is not None:
pulumi.set(__self__, "url", url)
if viz_options is not None:
pulumi.set(__self__, "viz_options", viz_options)
@property
@pulumi.getter(name="authorizedWriterTeams")
def authorized_writer_teams(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Team IDs that have write access to this detector. Remember to use an admin's token if using this feature and to include that admin's team id (or user id in `authorized_writer_users`).
"""
return pulumi.get(self, "authorized_writer_teams")
@authorized_writer_teams.setter
def authorized_writer_teams(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_writer_teams", value)
@property
@pulumi.getter(name="authorizedWriterUsers")
def authorized_writer_users(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
User IDs that have write access to this detector. Remember to use an admin's token if using this feature and to include that admin's user id (or team id in `authorized_writer_teams`).
"""
return pulumi.get(self, "authorized_writer_users")
@authorized_writer_users.setter
def authorized_writer_users(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "authorized_writer_users", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the rule. Displays as the alert condition in the Alert Rules tab of the detector editor in the web UI.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="disableSampling")
def disable_sampling(self) -> Optional[pulumi.Input[bool]]:
"""
When `false`, the visualization may sample the output timeseries rather than displaying them all. `false` by default.
"""
return pulumi.get(self, "disable_sampling")
@disable_sampling.setter
def disable_sampling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_sampling", value)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[pulumi.Input[int]]:
"""
Seconds since epoch. Used for visualization. Conflicts with `time_range`.
"""
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter(name="maxDelay")
def max_delay(self) -> Optional[pulumi.Input[int]]:
"""
How long (in seconds) to wait for late datapoints. See [Delayed Datapoints](https://signalfx-product-docs.readthedocs-hosted.com/en/latest/charts/chart-builder.html#delayed-datapoints) for more info. Max value is `900` seconds (15 minutes). `Auto` (as little as possible) by default.
"""
return pulumi.get(self, "max_delay")
@max_delay.setter
def max_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_delay", value)
@property
@pulumi.getter(name="minDelay")
def min_delay(self) -> Optional[pulumi.Input[int]]:
"""
How long (in seconds) to wait even if the datapoints are arriving in a timely fashion. Max value is 900 (15m).
"""
return pulumi.get(self, "min_delay")
@min_delay.setter
def min_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_delay", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the detector.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="programText")
def program_text(self) -> Optional[pulumi.Input[str]]:
"""
Signalflow program text for the detector. More info [in the SignalFx docs](https://developers.signalfx.com/signalflow_analytics/signalflow_overview.html#_signalflow_programming_language).
"""
return pulumi.get(self, "program_text")
@program_text.setter
def program_text(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "program_text", value)
@property
@pulumi.getter
def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorRuleArgs']]]]:
"""
Set of rules used for alerting.
"""
return pulumi.get(self, "rules")
@rules.setter
def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorRuleArgs']]]]):
pulumi.set(self, "rules", value)
@property
@pulumi.getter(name="showDataMarkers")
def show_data_markers(self) -> Optional[pulumi.Input[bool]]:
"""
When `true`, markers will be drawn for each datapoint within the visualization. `true` by default.
"""
return pulumi.get(self, "show_data_markers")
@show_data_markers.setter
def show_data_markers(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "show_data_markers", value)
@property
@pulumi.getter(name="showEventLines")
def show_event_lines(self) -> Optional[pulumi.Input[bool]]:
"""
When `true`, the visualization will display a vertical line for each event trigger. `false` by default.
"""
return pulumi.get(self, "show_event_lines")
@show_event_lines.setter
def show_event_lines(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "show_event_lines", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[int]]:
"""
Seconds since epoch. Used for visualization. Conflicts with `time_range`.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Tags associated with the detector.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def teams(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Team IDs to associate the detector to.
"""
return pulumi.get(self, "teams")
@teams.setter
def teams(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "teams", value)
@property
@pulumi.getter(name="timeRange")
def time_range(self) -> Optional[pulumi.Input[int]]:
"""
Seconds to display in the visualization. This is a rolling range from the current time. Example: `3600` corresponds to `-1h` in web UI. `3600` by default.
"""
return pulumi.get(self, "time_range")
@time_range.setter
def time_range(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "time_range", value)
@property
@pulumi.getter
def timezone(self) -> Optional[pulumi.Input[str]]:
"""
The property value is a string that denotes the geographic region associated with the time zone, (e.g. Australia/Sydney)
"""
return pulumi.get(self, "timezone")
@timezone.setter
def timezone(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timezone", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the detector.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@property
@pulumi.getter(name="vizOptions")
def viz_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DetectorVizOptionArgs']]]]:
"""
Plot-level customization options, associated with a publish statement.
"""
return pulumi.get(self, "viz_options")
@viz_options.setter
def viz_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DetectorVizOptionArgs']]]]):
pulumi.set(self, "viz_options", value)
class Detector(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authorized_writer_teams: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
authorized_writer_users: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
disable_sampling: Optional[pulumi.Input[bool]] = None,
end_time: Optional[pulumi.Input[int]] = None,
max_delay: Optional[pulumi.Input[int]] = None,
min_delay: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
program_text: Optional[pulumi.Input[str]] = None,
rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['DetectorRuleArgs']]]]] = None,
show_data_markers: Optional[pulumi.Input[bool]] = None,
show_event_lines: Optional[pulumi.Input[bool]] = None,
start_time: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
teams: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
time_range: Optional[pulumi.Input[int]] = None,
timezone: Optional[pulumi.Input[str]] | |
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="market",
description="""
Market data for loaded coin. There you find metrics like:
market_cap_rank, total_supply, max_supply, circulating_supply,
price_change_percentage_24h, price_change_percentage_7d, 'price_change_percentage_30d',
price_change_percentage_60d', 'price_change_percentage_1y', 'market_cap_change_24h',
market_cap_btc', 'market_cap_eth', 'market_cap_usd', 'total_volume_btc', 'total_volume_eth',
total_volume_usd', 'high_24h_btc', 'high_24h_eth', 'high_24h_usd', 'low_24h_btc', 'low_24h_eth',
low_24h_usd'
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.market_data
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
)
)
print("")
except SystemExit:
print("")
except Exception as e:
print(e)
print("")
def holdings_overview(other_args: List[str]):
"""
Shows overview of public companies that holds ethereum or bitcoin from www.coingecko.com
Parameters
----------
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
prog="hold",
add_help=False,
description="""
Shows overview of public companies that holds ethereum or bitcoin.
You can find there most important metrics like:
Total Bitcoin Holdings, Total Value (USD), Public Companies Bitcoin Dominance, Companies
""",
)
parser.add_argument(
"-c",
"--coin",
dest="coin",
type=str,
help="companies with ethereum or bitcoin",
default="bitcoin",
choices=["ethereum", "bitcoin"],
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = gecko.get_holdings_overview(endpoint=ns_parser.coin)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
)
)
print("")
except Exception as e:
print(e)
print("")
def holdings_companies_list(other_args: List[str]):
"""Shows Ethereum/Bitcoin Holdings by Public Companies from www.coingecko.com
Track publicly traded companies around the world that are buying ethereum as part of corporate treasury
Parameters
----------
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
prog="hold_comp",
add_help=False,
description="""Track publicly traded companies around the world that
are buying ethereum or bitcoin as part of corporate treasury:
rank, company, ticker, country, total_btc, entry_value, today_value, pct_of_supply
You can use additional flag --links to see urls to announcement about buying btc or eth by given company.
In this case you will see only columns like rank, company, url
""",
)
parser.add_argument(
"-c",
"--coin",
dest="coin",
type=str,
help="companies with ethereum or bitcoin",
default="bitcoin",
choices=["ethereum", "bitcoin"],
)
parser.add_argument(
"-l",
"--links",
dest="links",
action="store_true",
help="Flag to show urls. If you will use that flag you will see only rank, company, url columns",
default=False,
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = gecko.get_companies_assets(endpoint=ns_parser.coin)
if ns_parser.links is True:
df = df[["rank", "company", "url"]]
else:
df.drop("url", axis=1, inplace=True)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
)
)
print("")
except Exception as e:
print(e)
print("")
def gainers(other_args: List[str]):
"""Shows Largest Gainers - coins which gain the most in given period from www.coingecko.com
Parameters
----------
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
prog="gainers",
add_help=False,
description="""
Shows Largest Gainers - coins which gain the most in given period.
You can use parameter --period to set which timeframe are you interested in. eg. 1h, 24h, 7d, 14d, 30d, 60d, 1y
You can look on only top N number of records with --top,
You can sort by rank, symbol, name, volume, price, change with --sort and also with --descend flag to set it
to sort descending.
There is --links flag, which will display one additional column you all urls for coins.
""",
)
parser.add_argument(
"-p",
"--period",
dest="period",
type=str,
help="time period, one from [1h, 24h, 7d, 14d, 30d, 60d, 1y]",
default="1h",
choices=["1h", "24h", "7d", "14d", "30d", "60d", "1y"],
)
parser.add_argument(
"-t",
"--top",
dest="top",
type=int,
help="top N number records",
default=20,
)
parser.add_argument(
"-s",
"--sort",
dest="sortby",
type=str,
help="Sort by given column. Default: rank",
default="rank",
choices=["rank", "symbol", "name", "volume", "price", "change"],
)
parser.add_argument(
"--descend",
action="store_false",
help="Flag to sort in descending order (lowest first)",
dest="descend",
default=True,
)
parser.add_argument(
"-l",
"--links",
dest="links",
action="store_true",
help="Flag to show urls. If you will use that flag you will additional column with urls",
default=False,
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if ns_parser.sortby == "change":
sortby = f"%change_{ns_parser.period}"
else:
sortby = ns_parser.sortby
df = gecko.get_gainers_or_losers(
period=ns_parser.period, typ="gainers"
).sort_values(by=sortby, ascending=ns_parser.descend)
if not ns_parser.links:
df.drop("url", axis=1, inplace=True)
print(
tabulate(
df.head(ns_parser.top),
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
)
)
print("")
except Exception as e:
print(e)
print("")
def losers(other_args: List[str]):
"""Shows Largest Losers - coins which lost the most in given period of time from www.coingecko.com
Parameters
----------
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
prog="losers",
add_help=False,
description="""
Shows Largest Losers - coins which price dropped the most in given period
You can use parameter --period to set which timeframe are you interested in. eg. 1h, 24h, 7d, 14d, 30d, 60d, 1y
You can look on only top N number of records with --top,
You can sort by rank, symbol, name, volume, price, change with --sort and also with --descend flag
to sort descending.
Flag --links will display one additional column with all coingecko urls for listed coins.
""",
)
parser.add_argument(
"-p",
"--period",
dest="period",
type=str,
help="time period, one from [1h, 24h, 7d, 14d, 30d, 60d, 1y]",
default="1h",
choices=["1h", "24h", "7d", "14d", "30d", "60d", "1y"],
)
parser.add_argument(
"-t",
"--top",
dest="top",
type=int,
help="top N number records",
default=20,
)
parser.add_argument(
"-s",
"--sort",
dest="sortby",
type=str,
help="Sort by given column. Default: change",
default="rank",
choices=["rank", "symbol", "name", "volume", "price", "change"],
)
parser.add_argument(
"--descend",
action="store_false",
help="Flag to sort in descending order (lowest first)",
dest="descend",
default=True,
)
parser.add_argument(
"-l",
"--links",
dest="links",
action="store_true",
help="Flag to show urls. If you will use that flag you will additional column with urls",
default=False,
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
if ns_parser.sortby == "change":
sortby = f"%change_{ns_parser.period}"
else:
sortby = ns_parser.sortby
df = gecko.get_gainers_or_losers(
period=ns_parser.period, typ="losers"
).sort_values(by=sortby, ascending=ns_parser.descend)
if not ns_parser.links:
df.drop("url", axis=1, inplace=True)
print(
tabulate(
df.head(ns_parser.top),
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
)
)
print("")
except Exception as e:
print(e)
print("")
def discover(category: str, other_args: List[str]):
"""Discover coins by different categories
- Most voted coins
- Most popular coins
- Recently added coins
- Most positive sentiment coins
Parameters
----------
category: str
one from list: [trending, most_voted, positive_sentiment, most_visited]
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
prog=f"{category}",
add_help=False,
description=f"""Discover {category} coins.
Use --top parameter to display only top N number of records,
You can sort by rank, name, price_btc, price_usd, using --sort parameter and also with --descend flag
to sort descending.
Flag --links will display one additional column with all coingecko urls for listed coins.
{category} will display: rank, name, price_usd, price_btc
""",
)
parser.add_argument(
"-t",
"--top",
dest="top",
type=int,
help="top N number records",
default=20,
)
parser.add_argument(
"-s",
"--sort",
dest="sortby",
type=str,
help="Sort by given column. Default: rank",
default="rank",
choices=["rank", "name", "price_usd", "price_btc"],
)
parser.add_argument(
"--descend",
action="store_false",
help="Flag to sort in descending order (lowest first)",
dest="descend",
default=True,
)
parser.add_argument(
"-l",
"--links",
dest="links",
action="store_true",
help="Flag to show urls. If you will use that flag you will additional column with urls",
default=False,
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = gecko.discover_coins(category=category)
df.index = df.index + 1
df.reset_index(inplace=True)
df.rename(columns={"index": "rank"}, inplace=True)
df = df.sort_values(by=ns_parser.sortby, ascending=ns_parser.descend)
if not ns_parser.links:
df.drop("url", axis=1, inplace=True)
print(
tabulate(
df.head(ns_parser.top),
headers=df.columns,
floatfmt=".5f",
showindex=False,
tablefmt="fancy_grid",
)
)
print("")
except Exception as e:
print(e)
print("")
def news(other_args: List[str]):
"""Shows latest crypto news from www.coingecko.com
Parameters
----------
other_args: List[str]
Arguments to pass to argparse
"""
parser = argparse.ArgumentParser(
prog="news",
add_help=False,
description="Shows latest crypto news from CoinGecko. "
"You will see index, title, author, posted columns. "
"You can sort by each of column above, using --sort parameter and also do it descending with --descend flag"
"To display urls to news use --links flag.",
)
parser.add_argument(
"-t",
"--top",
dest="top",
type=int,
help="top N number of news >=10",
default=50,
)
parser.add_argument(
"-s",
"--sort",
dest="sortby",
type=str,
help="Sort by given column. Default: index",
default="index",
choices=["index", "title", "author", "posted"],
)
parser.add_argument(
"--descend",
action="store_false",
help="Flag to sort in descending order (lowest first)",
dest="descend",
default=True,
)
parser.add_argument(
"-l",
"--links",
dest="links",
action="store_true",
help="Flag to show urls. If you will use that flag you will additional column with urls",
default=False,
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = gecko.get_news(n=ns_parser.top).sort_values(
by=ns_parser.sortby, ascending=ns_parser.descend
)
df["title"] = df["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=50)) if isinstance(x, str) else x
)
if not ns_parser.links:
df.drop("url", axis=1, inplace=True)
else:
df = df[["index", "url"]]
print(
tabulate(
df,
headers=df.columns,
floatfmt=".0f",
showindex=False,
tablefmt="fancy_grid",
| |
words[0] == 'hypercalls':
for w in map(var_subst, words[1:]):
hypercalls.add(add_hypercall_dsl(d, w, local_env))
elif words[0] == 'events':
for w in map(var_subst, words[1:]):
event_sources.add(add_event_dsl(d, w, local_env))
have_events = True
elif words[0] == 'local_include':
add_include(d, 'include', local_env)
elif words[0] == 'source':
for w in map(var_subst, words[1:]):
objs.append(add_source(d, w, src_requires, local_env))
elif words[0] == 'external_object':
if not do_partial_link:
for w in map(var_subst, words[1:]):
external_objects.add(w)
elif words[0] == 'flags':
add_flags(map(var_subst, words[1:]), local_env)
elif words[0] == 'configs':
for c in map(var_subst, words[1:]):
add_global_define(c)
elif words[0] == 'arch_types':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
types.add(add_type_dsl(
os.path.join(d, words[1]), w, local_env))
elif words[0] == 'arch_hypercalls':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
f = os.path.join(words[1], w)
hypercalls.add(add_hypercall_dsl(d, f, local_env))
elif words[0] == 'arch_events':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
event_sources.add(add_event_dsl(
os.path.join(d, words[1]), w, local_env))
have_events = True
elif words[0] == 'arch_local_include':
if arch_match(words[1]):
add_include(d, os.path.join(words[1], 'include'), local_env)
elif words[0] == 'arch_source':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
objs.append(add_source(os.path.join(d, words[1]),
w, src_requires, local_env))
elif words[0] == 'arch_external_object':
if not do_partial_link:
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
external_objects.add(w)
elif words[0] == 'arch_flags':
if arch_match(words[1]):
add_flags(map(var_subst, words[2:]), local_env)
elif words[0] == 'arch_configs':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
add_global_define(w)
elif words[0] == 'first_class_object':
for w in map(var_subst, words[1:]):
first_class_objects.add(w)
elif words[0] == 'base_module':
for w in map(var_subst, words[1:]):
# Require the base module's generated headers
local_headers.append(module_local_headers_gen(w))
# FIXME: We can't properly determine whether there are
# local_includes or not unless we do two-pass parsing of the
# build configs, so we just add them all.
logger.disabled = True
add_include(w, 'include', local_env)
add_include(os.path.join(build_dir, w), 'include', local_env)
# FIXME: We assume module has all possible arch include dirs
for arch_name in target_arch_names:
arch_dir = os.path.join(arch_name, 'include')
add_include(w, arch_dir, local_env)
add_include(os.path.join(build_dir, w),
arch_dir, local_env)
logger.disabled = False
if w not in module_dirs:
module_dirs.append(w)
elif words[0] == 'template' and words[1] == 'simple':
for w in map(var_subst, words[2:]):
add_simple_template(d, w, src_requires, local_env,
local_headers=True, headers=local_headers,
objects=objs)
elif words[0] == 'template':
ts = template_match(words[1], d)
for w in map(var_subst, words[2:]):
if add_template(ts, d, '', w, src_requires, local_env,
module):
have_events = True
elif words[0] == 'arch_template' and words[1] == 'simple':
if arch_match(words[2]):
for w in map(var_subst, words[3:]):
add_simple_template(d, w, src_requires, local_env,
local_headers=True,
headers=local_headers,
objects=objs, arch=words[2])
elif words[0] == 'arch_template':
ts = template_match(words[1], d)
if arch_match(words[2]):
for w in map(var_subst, words[3:]):
if add_template(ts, d, words[2], w, src_requires,
local_env, module):
have_events = True
else:
# TODO: dependencies, configuration variables, etc
# Restructure this to use a proper parser first
logger.error('Unknown token "%s" in module conf for %s',
words[0], d)
sys.exit(1)
if have_events:
local_headers.append(get_event_local_inc_file(module))
modules_with_events.add(module)
add_event_handlers(module)
graph.add_alias(local_headers_gen, local_headers)
def parse_interface_conf(d, f):
local_env = {}
interface = os.path.basename(d)
have_events = False
for s in f.readlines():
words = s.split()
if not words or words[0].startswith('#'):
# Skip comments or blank lines
pass
elif words[0] == 'types':
for w in map(var_subst, words[1:]):
types.add(add_type_dsl(d, w, local_env))
elif words[0] == 'hypercalls':
for w in map(var_subst, words[1:]):
hypercalls.add(add_hypercall_dsl(d, w, local_env))
elif words[0] == 'events':
for w in map(var_subst, words[1:]):
event_sources.add(add_event_dsl(d, w, local_env))
have_events = True
elif words[0] == 'arch_types':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
types.add(add_type_dsl(
os.path.join(d, words[1]), w, local_env))
elif words[0] == 'arch_hypercalls':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
f = os.path.join(words[1], w)
hypercalls.add(add_hypercall_dsl(d, f, local_env))
elif words[0] == 'arch_events':
if arch_match(words[1]):
for w in map(var_subst, words[2:]):
event_sources.add(add_event_dsl(
os.path.join(d, words[1]), w, local_env))
have_events = True
elif words[0] == 'first_class_object':
for w in map(var_subst, words[1:]):
first_class_objects.add(w)
elif words[0] == 'template' and words[1] == 'simple':
for w in map(var_subst, words[2:]):
add_simple_template(d, w, src_requires, local_env)
elif words[0] == 'template':
ts = template_match(words[1], d)
for w in map(var_subst, words[2:]):
if add_template(ts, d, '', w, None, local_env, None):
have_events = True
else:
# TODO: dependencies, configuration variables, etc
# Restructure this to use a proper parser first
logger.error('Unknown token "%s" in interface conf for %s',
words[0], d)
sys.exit(1)
if have_events:
interfaces_with_events.add(interface)
add_event_handlers(interface)
def add_include_dir(d, local_env):
if not d.startswith(build_dir):
if not os.path.isdir(d):
logger.warning("include path: '{:s}' non-existant!".format(d))
if 'LOCAL_CPPFLAGS' in local_env:
local_env['LOCAL_CPPFLAGS'] += ' '
else:
local_env['LOCAL_CPPFLAGS'] = ''
local_env['LOCAL_CPPFLAGS'] += '-iquote ' + relpath(d)
def add_include(module_dir, include, local_env):
add_include_dir(os.path.join(module_dir, include), local_env)
def add_flags(flags, local_env):
if 'LOCAL_CFLAGS' in local_env:
local_env['LOCAL_CFLAGS'] += ' '
else:
local_env['LOCAL_CFLAGS'] = ''
local_env['LOCAL_CFLAGS'] += ' '.join(flags)
def add_global_define(d):
check_global_define(d)
graph.append_env('CPPFLAGS', "-D" + d)
graph.append_env('CODEGEN_CONFIGS', "-D" + d)
def add_source_file(src, obj, requires, local_env):
file_env = local_env.copy()
file_define = '-D__BUILD_FILE__=\\"{:s}\\"'.format(src)
if 'LOCAL_CPPFLAGS' not in file_env:
file_env['LOCAL_CPPFLAGS'] = ''
else:
file_env['LOCAL_CPPFLAGS'] += ' '
file_env['LOCAL_CPPFLAGS'] += file_define
graph.add_target([obj], 'cc', [src], requires=requires,
**file_env)
objects.add(obj)
if do_sa_html and src.endswith(".c"):
ast = os.path.join(ctu_dir, src + ".ast")
graph.add_target([ast], 'cc-ctu-ast', [src], requires=requires,
**file_env)
asts.add(ast)
defmap_frag = os.path.join(ctu_dir, src + ".map")
graph.add_target([defmap_frag], 'cc-ctu-map', [src], requires=requires,
**file_env)
defmap_frags.add(defmap_frag)
sa_html_dir = obj + ".html"
graph.add_target([sa_html_dir], 'cc-analyze', [src], requires=requires,
depends=(ast_gen, defmap), **file_env)
sa_html.add(sa_html_dir)
def add_source(module_dir, src, requires, local_env):
if not src.endswith(".c") and not src.endswith(".S"):
logger.error('unknown source file type for: %s', src)
sys.exit(1)
out_dir = os.path.join(build_dir, module_dir, 'obj')
i = os.path.join(module_dir, 'src', src)
o = os.path.join(out_dir, src + '.o')
add_source_file(i, o, requires, local_env)
return o
def add_preproc_dsl(module_dir, src, **local_env):
out_dir = os.path.join(build_dir, module_dir)
i = os.path.join(module_dir, src)
o = os.path.join(out_dir, src + '.pp')
graph.add_target([o], 'cpp-dsl', [i], **local_env)
return o
def add_type_dsl(module_dir, src, local_env):
return add_preproc_dsl(module_dir, src, DSL_DEFINES='-D__TYPED_DSL__',
**local_env)
def add_hypercall_dsl(module_dir, src, local_env):
return add_preproc_dsl(module_dir, src, DSL_DEFINES='-D__HYPERCALLS_DSL__',
**local_env)
def add_event_dsl(module_dir, src, local_env):
return add_preproc_dsl(module_dir, src, requires=(hypconstants_header,),
DSL_DEFINES='-D__EVENTS_DSL__', **local_env)
def add_template(ts, d, arch, tmpl_file, requires, local_env, module):
ext = os.path.splitext(tmpl_file)[1]
is_event = False
is_module = module is not None
if ext == '.h' and is_module:
mod_gen_dir = os.path.join(objects_build_dir, module)
add_include(mod_gen_dir, 'include', local_env)
if ext == '.c' and not is_module:
logger.error('C template specified for interface %s', d)
sys.exit(1)
else:
ts.sources.append((d, tmpl_file, arch, requires, is_module, local_env))
if ext == '.ev':
is_event = True
return is_event
def add_simple_template(d, t, requires, local_env, local_headers=False,
headers=None, objects=None, arch=''):
i = os.path.join(d, arch, 'templates', t)
out_name, ext = os.path.splitext(t)
if ext != '.tmpl':
logger.warning("Template filename does not end in .tmpl: %s", t)
out_ext = os.path.splitext(out_name)[1]
if out_ext == '.h' and headers is not None:
if local_headers:
out_dir = os.path.join(build_dir, d, arch, 'include')
add_include_dir(out_dir, local_env)
else:
assert not arch
out_dir = interface_gen_dir
o = os.path.join(out_dir, out_name)
headers.append(o)
elif out_ext in ('.c', '.S') and objects is not None:
out_dir = os.path.join(build_dir, d, arch, 'src')
o = os.path.join(out_dir, out_name)
oo = o + '.o'
add_source_file(o, oo, requires, local_env)
objects.append(oo)
else:
logger.error("Unsupported template output: %s", out_name)
sys.exit(1)
graph.add_target([o], 'code_gen', [i])
event_handler_modules = set()
def add_event_handlers(module):
if module in event_handler_modules:
return
event_handler_modules.add(module)
obj = get_event_src_file(module) + '.o'
event_src_requires = (
hyptypes_header,
get_event_inc_file(module),
)
add_source_file(get_event_src_file(module), obj, event_src_requires,
{})
# Header locations
interface_gen_dir = os.path.join(build_dir, 'interface', 'include')
graph.append_env('CPPFLAGS', '-I ' + relpath(interface_gen_dir))
objects_build_dir = os.path.join(build_dir, 'objects')
events_inc_dir = os.path.join(build_dir, 'events', 'include')
objects_headers_gen = graph.future_alias(
os.path.join(build_dir, 'objects_headers_gen'))
# Support for the event generator
graph.append_env('CPPFLAGS', '-I ' + relpath(events_inc_dir))
event_headers_gen = graph.future_alias(
os.path.join(build_dir, 'event_headers_gen'))
# Support for the hypercalls generator
hypercalls_headers_gen = graph.future_alias(
os.path.join(build_dir, 'hypercalls_headers_gen'))
def get_event_local_inc_dir(module):
return os.path.join(build_dir, 'events', module, 'include')
def get_event_local_inc_file(module):
return os.path.join(get_event_local_inc_dir(module), 'event_handlers.h')
def get_event_inc_file(module):
return os.path.join(events_inc_dir, 'events', module + '.h')
def get_event_src_file(module):
return os.path.join(build_dir, 'events', 'src', module + '.c')
#
# Global generated headers depends
#
build_includes = os.path.join(build_dir, 'include')
hyptypes_header = os.path.join(build_includes, 'hyptypes.h')
hypconstants_header = os.path.join(build_includes, 'hypconstants.h')
hypcontainers_header = os.path.join(build_includes, 'hypcontainers.h')
registers_header = os.path.join(build_includes, 'hypregisters.h')
version_header = os.path.join(build_includes, 'hypversion.h')
sym_version_header = os.path.join(build_includes, 'hypsymversion.h')
graph.append_env('CPPFLAGS', '-I ' + relpath(build_includes))
typed_headers_gen = graph.future_alias(
os.path.join(build_dir, 'typed_headers_gen'))
#
# Hypercalls generated files
#
# FIXME: This is not hypervisor source, it should not be built.
# Generation temporarily hard coded here until better handling implemented
hypguest_interface_src = os.path.join(build_dir, 'hypercalls', 'src',
'guest_interface.c')
hypguest_interface_header = os.path.join(build_dir, 'hypercalls', 'include',
'guest_interface.h')
hypguest_interface_types = os.path.join(build_dir, 'hypercalls', 'include',
'guest_types.h')
#
# Set up the simple code generator
#
codegen_script = os.path.join('tools', 'codegen', 'codegen.py')
graph.add_env('CODEGEN', relpath(codegen_script))
graph.add_rule('code_gen', '${CODEGEN} ${CODEGEN_ARCHS} ${CODEGEN_CONFIGS} '
'-f ${FORMATTER} -o ${out} -d ${out}.d ${in}',
depfile='${out}.d')
#
# Set up the Clang static analyser
#
defmap = os.path.join(ctu_dir, "externalDefMap.txt")
ast_gen = graph.future_alias(os.path.join(build_dir, 'ast-gen'))
#
# Collect the lists of objects, modules and interfaces
#
module_dirs = sorted(os.path.join(module_base, m) for m in modules)
for d in module_dirs:
process_dir(d, parse_module_conf)
for i in sorted(interfaces):
d = os.path.join(interface_base, i)
process_dir(d, parse_interface_conf)
#
# Generate types and events for first class objects
#
def add_object_c_template(module, template, requires, object_str, target,
local_env):
out = os.path.join(objects_build_dir, module, target)
graph.add_target([out], 'object_gen_c', [template], OBJ=object_str,
depends=[objects_script])
add_source_file(out, out + '.o', requires, local_env)
def add_object_h_template(module, template, requires, object_str, target,
is_module, local_env):
| |
macie pryszcze!',
'CrtmiO0vp3aqtYFOm2IY4pXZ2qKOfmKs': '*Skucha, skucha, zezulce',
'pEmnG9ivAZ1xjbBl0xnePNL2IqTv5lGf': 'Nic mi nie jest! Nic mi nie jest!',
'EtV2D1NfGt7ugV3UWJnksvZKGSbYzZOy':
'Trzeba powiadomić Kleopatrę! Tylko ona może sprawić, żeby Cezar... Żeby Cezar zaprzestał ostrzału.',
'lb7uN8PqZhhQRdhCNePnm96W2trk7yQR': 'Owszem, owszem',
'XUqqDalaHdV4uF4Rjh8KxQyjNttFAagc': 'Nie, nie, nie, nie.',
'efwNs87K5EN7u2FrULqPzaUyoktWQaQ3':
'zechciał przyjąć odpowiednie wyrazy itp itd, koniec',
'B19URXrAf6GehKU5IpSV2u5AtZDSgiWO': 'Huuu, proszę.',
'z14AwPCDDWx2nhbSgf1cJc09D8SO7J8I':
'Wiecie co?! To Idefix zaniesie Kleopatrze naszą wiadomość!',
'FDTklxOdCYtK1qohXsGhEiZNV7YpoeJg': 'Idefix zaniesie wiadomość ....',
'cytxRLOkcNzjtdRkXg1HSU2IU0VcG6bA':
'Magda Fijałkowska Jasne. Czyli w ogóle już wam na mnie nie zależy.',
'Al4N4nvob3pYUGlcy9rZiKMIMlXUw8cU':
'Wiesz co Obelix - to jest jednak pies..',
'1G2nxoL9Iv60KEzH3eumxk6vzc7K0qeE': 'A kto Cię wyprowadził z piramidy?',
'Dp8XiiBQsyV5WXjF34GX5nQMBtExwn2a':
'Idź Idefixie, zanieś Kleopatrze wiadomość.',
'JK5FU6Mg7ncQWW8Q9fiiX6t2QEEnzJOX':
'A nie mówiłem. Za mały jest, żeby zrozumieć.',
'bHdJ630PtalToB1YVkHgVnDIddmSGeTk': 'Chyba oberwałem... A nie, to nie ja.',
'WjDrOd7dBFsiCmmZH2UjXhQBzN6jPbWd':
'Tu tu du tu tu - Chłopcy! Po raz kolejny staneliśmy na nogi na pokładzie naszego wiernego barkasu!',
'QWPoVzIyz73vavOZPFfQvTOCBfaY2bai':
'Dobry to statek, choć słabe ma zło! Ciągnij go!',
'afEmNEwKpL1LluwIWpRFCCGe7WHMlbMA': 'owszem',
'mPaGnc5pUwkB4nWzVfsMlB0qO8pgqf4k': 'nie',
'y34kTbEHbQeauqLkRHBxFRbMkWAVSexi': 'Ssssssss',
'pogyeyhiipWBUmKu0p2GZhUH4XMTzgiP': 'haaaaaaa',
'8oNfkxLczJaslmc0eLB4gg3RtPFjPQCY': 'Sssss',
'oZja3DjCSWnp1oiAZ9JwJIu3zUcCv8R8': 'O nie',
'MRrVJn76p88ISjHp3YJWM9zsdrpwmfri': 'Chyba aut.',
'RtyiKh652ywQNjTHCVGsYgj5Nopk3KxS': 'Nieu',
'WZnTSUA6gK4ms2Yovc147hIdVqlSiss2': 'Tratwa Nieduza',
'lgCBhOTWfiYyXUYDcblnl17exK2Hfi0k': 'Powinno się trzymać',
'0JYkcETIFuKK2zXHgbbaPIz2oPSBL0IN': 'To co, golne sobie i szybka bramka',
'Plxd8B62bHeHNu65Cstz6mLzH0ZTdzmw':
'Pośpiesz się i uważaj na siebie. Dasz radę',
'q5G8c6BrXlxY4pDRSU5AMi1J3HXv3kfV': 'Trzymaj się Idefix!',
'G6Ir27sdUNofvLxJ1F3l64obzysl80Rn': 'SKURÓTEM ASTERIMXIE!!! SKRÓTEM!!!',
'jDJCmg5s4t59qu8wxKgaap1HINBzThT1': '...rixie!!!',
'1LFDmhtgycDv9fY3B6F53xaoWyFRmnWl': 'Alarm!',
'FBkaKmUFQHy2FtHgLj9QmjXTnTtE2hky':
'Czekaj, tego to ja chyba poznaję. To wariat!',
'0nyEyQ4AEM25w9cHbAqLYCZvkvIPf1o6': 'Pancerz staw!',
'SLYidf4MEoClYw4wzvFykqZVNPXTj7Y7': 'Pilum!',
'NwjhQX7RAbpe9vQoP9FeHhqklXV9tsXX': 'Pilum',
'Mp5NeHryO7GL8CQWUW0tnb2AGFs88Ct4': 'Czy jest cos o czym nie wiem',
'7hPEFPTUz9k3ZziuNAXQTXPnMpx53iDf': 'Magiczny napój.',
'tKq2Kl7hHMYgjbfc1WSzpaaFxRqjSD1D':
'Autobus, Frankolus łapcie go! Jazda, no!',
'LJQmOncKsWBGhPRZwZhksNLu3Q8rmSJs': 'Ale ja nie jestem Frankolus',
'spSztzixmlxhSI8mog5hQ4SHK8jxzWo8': 'WIOOO',
'hlTYX2MsKG3mPqjx8pG8fSkIJ6A5NmUU': 'Wio',
'8E93vEPlg1N5GmGUFTVu3uRGwOC9ZBso': 'Idź pomóż Obeliksowi',
'aHUbDGLo0YwY6NsKBHFUf2EJCA2bRVgf': 'Woooow! To co, już działa?',
'28ueJiFxkbFXU2Cq47KxZWW1R0ZfhZdE': 'Numernabisie, masz już sarkofag?',
'26pS8IH3LO3rDUPPYtT6C4PtflSci5wl':
'Marnypopisie, przemoc to nie najlepszy materiał na szatę dla mnicha.',
'18DKHKiv1xdIaADVloWfiRe8RcwAahZN':
'*to nie najlepszy materiał na szatę dla mnicha',
'mdoNYXZEYyUC3DjBQp200BLF1uKdijDg': 'Zrobię z Ciebie szyszkebab',
'03F0BLAzPeR0SMaI6Rkw9EMTUeMO45D8':
'Ale czekaj, ja nie niczego nie piłem, wiesz',
'6xkJxUD6kiRuWelnEVTwWw6SdYkrJQbR': 'Hijaaaaaoooooo',
'xF8xT2jzsH54oVFDICU3XOOnfwkNfWyA': 'Huaaaaaaa',
'7Z7Pa0IrN3j6H3iBhPwejnny2uoQBAUG': 'Papier zwycięża kamień.',
'8x7SJJwGnJheo326FkpafbPKoMTT6JBB': 'Na pewno?',
'KW8IjbD2qnkoOKFxQ6NToljXs16Cj0Oo': 'Yyy... tak',
'bH4O0azDmCl45qr2N1jYZURA9vR1iqJ8': 'Aha...',
'E93RQLlw4xtBkRakbOA97o2dCw293YVT': 'No dobra.',
'BnAIJGqQD49LTucuN9LhguaV1aQq53QC':
'No ale nie nie nie nie, no nie, tak nie wolno, aaaaa',
'ubjWl2F1NJ6crlhxWKRf3P04ObSYhyWS':
'Ha! I a uruciu a i a uruciu a. Ja techniki wszystkie znam',
'ELn5JOlEZBU7d1s3Z3uhxKmFWTGQY2N7': '<NAME> *różne',
'EchSdyKtJmfMKMo36Vfktym2LBG1n55q': 'I dlatego wygrywam!',
'89oA80QZC7Lrboqw4LlZvnVEZvgeoXpv':
'zium, zium, zium. Magiczny napój przestał działać. Nie dostarczę wiadomości na czas',
'0BOBUWlTL7yBi5CHddyLThlzaxth9U5m': 'Jest jest!!!',
'JLqiBViZYTjXULvAFU1vT51IEbJLOtk8': 'A rodzina zdrowa?',
'pPKmBWxcJmg2n41kiWVf2hIqF60feklM': 'A zdrowa zdrowa, dziękować',
'4axW4dMYjpwGBSV5ILCGrCK2Ca1tFAg7': 'A ojciec zdrowy?',
'QZrQYUIJhD7t6relrh9fKFYhu4nnLCr2': 'A zdrowy, dziękować',
'4QvF8otxAFycIYThDacNRmxFjGefRiRy': 'A twoja siostra zdrowa?',
'zLWQMCKaphomY08ZwJrhb5XjQQdkEk4a': 'zdrowa, zdrowa, dziękować',
'FhlBPs5eAnYULzBbe1gEk9yY0IGLBsOA': 'A twój kuzyn, jak zdrowie kuzyna?',
'chbwufrMO0ehIlVIZQ2Pk1OLyVBO00EB': 'A zdrowy, dziękować.',
'3XRilxRYRbnY9o7tZqMS8hpMEhg6DAdK': 'A interes, jak twój interes?',
'gXF5kXq1fDwVsVwXoProCtkBAEAWvmPy': 'Zdrowy',
'Gn1iFE3pglbY8CtX0yuT4EldDfrIivao': 'Przepraszam, ale to ważne!',
'EcaDshT9rteboOOQBtWvJLQpW4U6uLf7': 'Ruch, jak widzę, spory.',
'xp8nx9b8Jyl5sTMPVqt1xzGcm7ePzG9r': 'A spory, dziękować.',
'O0EbefBxqHmeWUpTOZ6qZrHeE4r63IQr': 'Wio wio!',
'c88uQdvDuq5ma3XB0nGUBjaKxr6eh586': '*Wysuwa lusterko* Praktyczna rzecz',
'DTnd8jGCb9ZZo3OyezipqMNRbtkBUFDi': 'Szybciej, szybciej, bo nam ucieknie!',
'2EQeHp4yQdhMHkpmzvEJJgkSUZZ12GtO':
'Stary, ta bryka ma dwa konie mocy, szybciej nie da rady',
'FY0r9ittblFJNg4T7VyDMVvzy0VQ1Ezk': 'Jazda!',
'1opohWi2DPi8t9sRmx17tcyIXNPeexKO': 'Wio! Wio!',
'kaofWDSJQI0OooNvoucOQUl8EjxygbYM': 'Wio wio wio!',
'vRpFM3Pt6w27wZqsDU7DbVtLlGoZXkcr': 'Co ty tak ciagle wio wio wio?',
'eKxdZ4GXlSvGX4bDWUC9BMOmpbM6lPbR': 'Co mi zrobisz, jak mnie złapiesz?',
'QlmU0r4t1gdmtnYrWaF51vlhU4MK1xL0': 'Ahahahaha!',
'KJIyeNHaEQ2LkM8qUshqSYg39romglzi': 'No i niby jak teraz wstaniesz, co?',
'7DZx13FpLgGgwxWNrMdJErif8NuVZESq': 'Hu! Tssss',
'J72Uk8hbq1drNsKJpKithjeaHbjs9sG3': 'Choćby tak.',
'mCaZqMUmqsEzyHcvRa2J1yq5sxMq0bW4':
'hm. Posłuchaj, nie ma sensu walczyć. Załóżmy spółkę i dokończmy budowę razem!',
'XHDFA0NWzo24Lu9UK2gh94FqxJWkRD5T': 'Lew nie sprzymierza się z kojotem!',
'cJN4m52Fm1Z9SHy4jMqGg2Rc23XSPD7v': 'Co?',
'ZIju7uzmdYq5pUYQeO5tQHyeMPdJzjo6':
'No, lew, tak? Nie sprzymierza się... z kojotem, no...',
'srFBY4JsTnafLoYRqazGVg41IEc8ga2G':
'Nie, bo ja usłyszałem: letnie kołnierze z polotem, ale nie leżało mi w kontekście',
'7IVQatDmMliusUcZ1VVVh6oJjIwRoQH8': 'A nie *letnie kołnierze z polotem?',
'cGQ2YZFqOG1Gs35ypgTQwLH9eiU01NbB':
'pewnie miałoby to więcej sensu, ale zawsze słyszałem swoją wersję :P',
'tI7JiYgmeiIT02o3eNABOCx6gytDBg6u': 'Nie, rzeczywiście.',
'UdlRwiPsiGEQAKDUasykxK6mhSyH8rR3': 'Transformacjooon!',
'WyHz3fhsxwqzA1Itx3BpSvjwQBjJlXWE': 'Łaaaaaa!',
'Fw332jQLSDI43JWZtOwt2juLIQOPL39i': 'Wracając do tematu to ten lew to ja.',
'C4i4lGqWjHzWqA7Lu8lTityAU8szjYDH':
'Otóż nie, najpierw Numernabis szepta "puść mnie" XD',
'EKeBgxpewRQ5QIqaQnfLyMi7UrLAKTzJ': 'Jakoś się domyśliłem, wiesz?',
't3YjRupYp0HGbfwPWxnGqVPBE0GPRiQ6': 'Co się dzieje?',
'PNkEHFP02x6D5glbpdaPa9jsTcDjdkfH': 'Hmm.',
'IE2nroDDnq1T4mis8GsPELPE1hx8uZgi': 'AAAAAAAAAAA! (Numernabis)',
'sWYOMPZTD9FkyD2tJLs5sVtCapIL9PHS': 'Lew nie sprzymierza się z kojotem!',
'hu1GCIa6mjatyA6qs7NImLmv3z7cFDNd': 'Minoko!',
'sznFWOUGuufvU4peITOnvSei04jjqf8L': '* trzy ściany później',
'ojVo8GlVb0sSK45ARaauT1n9Z8FO4UQy': 'Poczuj w sobie siłę lwa.',
'qsgxBvlW5cwFdqX8HELF1h81J1pSSWQ6':
'Jak jesteś taki twardy to powiedz mi to prosto w profil',
'D9tclNYk1HSlhc29YbAXapETikEBCh5z': 'Wio! wio! wio!',
'lzRDA0v1xVRb4Ch1sUdOTN1Zb055053m': 'Wio! Wio!',
'XndIovgbEnk0NoDdLRoaeKJZDyQjTbXO': 'Z drogi! Z drogi!',
'oqkVLnLqyWr00xiSk0Wk7t3kZkZVj26B': 'No co tam?',
'6vXq5EfDbDIrx52wHXouL3V2I4RqPAri':
'Co jest, w poprzedniej scenie był, a teraz nagle go nie ma?',
'QFDEXKh2IREzeY85HkBjn50ehuVBr8RB':
'No to co, wracamy, nie? Nie ma, to nie ma. Nazad.',
'79bTDVQPl21bmDxSwQqJICnT85aLLtvX':
'Nie będę już dłużej tolerował dyletanctwa Numernambisa',
'HOyF7KXQqKVwBmb8klHFwBAVXU3HgIjr': 'Będzie tego!',
'LzjrIjK9zWdUZ2FgsWAiA3KiQDqJZAaa': 'A dokąd idziemy?',
'stUeUE4xChvSiMepE0stqG40uCrvXtaB': '<NAME> Do Kleopatry!',
'EJOxGiqweh7dUz8tAEKGe62MQiRXqf0y': 'Uuuu, boję się.',
'f4heMkqPdXu5FCRLIZ8jgUD3ty24AFtP':
'Śmiej się pan, śmiej. Tak się składa, że ja dla Kleopatry pracuję w księgowości, więc zobaczymy, kto się będzie śmiać.',
'POh58Qub38WfYOB54Dw3fctq7Th96XUi':
'Ktoś rozprowadza fałszywy napój magiczny... Smak ten sam, zapach ten sam. Ale to nie napój magiczny! To zupa jest z brukselek!',
'aIIRQH0ZCUcgOO7fnchPFn3ZwVRCAlaL': 'Obawiam się, że rzeczywiście zobaczę.',
'FA75mi895SRaW8pTzoZKaNflZ81gf73j':
'Raz, dwa, trzy, cztery, pięć i ciach. Antylopa i lew.',
'sSE1aXhUeYfTCLcLAPcpDobGnuIj0HSJ': 'Co to oznacza?',
'KCvG1KvBDd9jq4Ou2yp39cgModxVz5Bg': 'Wiadomość...i adoratora 😊',
'PNEefOqlUrPE0klfCj9ZdjyVlsNwO0Tk': 'Pff, też mi wróżba.',
'fj0q8qwu00Q8HY4NK8nfG9b7VQxBGjdt': 'Moja pani!',
'j8ixgvBi1WwBlacZElQsIfDQBGpuhuVN': 'Oo, mamy gości. Proszę.',
'w684KK38fdZrC4P1U6laays4ZwbHm4Nl':
'No, idź Idefix, idź. Zanieś królowej wiadomość.',
'AKjIRiOEKHVsXSUyo252jUylTTgTHsBG': 'Hihi, hopla. Oto i wiadomość.',
'QQAQ0HuK310e2go3Wd2n1PBOeYs1K80q':
'Dawno, dawno temu w odległej galaktyce.',
'IVxnSsDwT715BKgInaYAk4tZfyg4zAbl':
'Królowo, od pewnego czasu buduję sobie dom.',
'0e5fKzwjG9YFHCzcINQME6k0qUwGu4uB':
'*Królowo, jak zapewne jest ci wiadomo, od pewnego czasu buduję sobie dom.',
'gQTT5szD3tKoOLWOqLnpjzgWs7G6GuR4':
'Ale nie dotrzymują terminów od co najmniej czterech miesięcy.',
'r4eOvEUTYd5FGjJfmDrG8b740CRnNSST': 'COO?!',
'SlMgilp7z2FkPsO4Xdldm1LulFgjLSfo': 'Co najmniej!',
'ucGaQdwa3Tuv7WlMZd2FTom6hktqANHU': 'No to im nie ujdzie płazem.',
'FitNSlnCcAJBZN1ET3kBS8lTGM7y2Sdw':
'Dziękuję, moja pani. Wiedziałem, że im na to nie pozwolisz!',
'XdeS7IOZnCYx8m0X6OOHmiIW0veiAt77':
'Ależ to marny gracz i oszust! Doprawdy, Cezarze! Ale tym razem będziesz musiał przyznać się do porażki, bo budowę zakończono na czas.',
'KiDQQcZSmbChIeSVXR2y2xzrKM56MIaz': '<NAME> marny*',
'HGERCC2PP0icdV8LdaBWfSLfhJtZW1GW': 'No właśnie nie, nie zakończono',
'ns0s9l8O7hQ3DawHtA215AJtKIrm8eZG':
'Ja to załatwię! Idź, Gallu. Gimmekiss, odprowadzisz pana. Ja jadę na teren budowy.',
'SNh1hInJtcUk8ZisLCnjCL4JzllCllXw': 'Przepraszam. Chodz, Idefixie',
'FYpHSWd4VASzr24gIhyr9Mk9T5AQCYFF': 'Chodź, mój Gallu.',
'Qwhnm0a0fKFNhncBzws6bZJX9LUgyRsA':
'- Odnoszę wrażenie, że nie zostałem zrozumiany.',
'XIi5jiuDEu6wYDBkbGEvaXFl5thCHQgF': 'No, chyba nawet nieźle nam idzie.',
'2dPm3HlgS1x5K43VHKO4Hv1EpV90LEzs': 'Noo, trochę to trwa, ale skuteczne.',
'MC5G9POr6tLuB0Fhh4xI3wMaY6m3Pkky': 'Spokojnie.',
'JVkDSztm404ejHidRHOI53UBkb1qfJm6':
'Niezły miałem pomysł żeby zaatakować, nie?',
'7FhK8b71QP1I74jaQbRm0fU4FfWlZFsY':
'Tak. Trzeba ci oddać, Cezarze, co twoje.',
'iFwWuaBTLq9FHzfoXShU7aTlUoG6gurX': 'No cóż...',
'be77MCYqK7aGcEKEA9EtDYSsiIRuOrz1': 'Asterix wraca.',
'Zgwqtp3ls7SPpaaeu5fe4nD2sgbSQB80': 'Na mój znak!',
'jgsG8xAaRindFoqj5Xvs6bP5nmyq43gf': 'Aaaaaa!',
'2RsnFj7uF0vf5VPkSLT2X6hiF9asBgEZ': 'Co jest?',
'nepB7cZNgh8BoRF2gcXnDKDMghzFptYx': 'No nie wiem... taki odruch...',
'e2RnpOJXlRFH1BiqPP6jiBqkNe1tXKmI': 'No i, jak poszło?',
'Q51nfUmODZ2hmGD8aeT3rHyt3nxMc6VK': 'Dobrze, dostarczyłem wiadomość.',
'JSdi95AEj0KWjTFnWgcaY96Va5xWWdqV':
'Świetnie. Ale jak napój magiczny mógł działać tak długo?',
'nRKMtYN0heZ26l3ukCtklHiNdrnqbkFq': 'Właściwie, to to było tak, że...',
'wBxJ9d8I4PRaoN2aV5BZ2t1XwNkHONXV':
'Hehe... to to było tak, że... posmakowałeś największej magii na świecie.',
'sp2KKxYt9lpVEK65WRm0Tsa4RYaxAjTi': 'Tak, to my idziemy.',
'Um4PlIRZWyVEmHagyB4ch5Ag7VhNtzKG': 'Buzi buzi!',
'37vWCG5i7OYnI1ATKv2KKzBtsUNmOfX9':
'Obelixie, Idefix właśnie wrócił i wiesz co? Dostarczył wiadomość!',
'Xld7XJVHKWxIrmYLHKKKcYqNXhOGI2sf':
'Łohoho... Widzisz, a nie mówiłem! A co ty tu masz?',
'93eg4QYvKN3GchFS6Mqltf0xKI8uXCY6':
'Ave Cezarze, khymkhym, ktoś pragnie z tobą mówić, Cezarze.',
'KHy2AlYAh9Sd35jJiA2qh8oiWsCoX28g': 'Tak, kto?',
'n3hkXzeAlIEEOAVhWKQrscOeyzGACnbY': 'Ona.',
'Ar0OYKKDhc73sbcBOC47OeXOvWeQEeib':
'To ja już ten tego... jakby co... no to pa.',
'CL5YKkqmczODm9VjLN0BEj0Ynic1sXWE':
'Ooooo, proszę jaka miła niespodzianka...',
'Pj3YwSl7Oef3HXvKFPAnRLWsO8u1xx9o':
'DOSYĆ! Jak strzała wypadłam z pałacu, nawet nie miałam czasu się przebrać, kiedy dowiedziałam się, co się stało!',
'kseCwgICMeqGOVSDrIOeszQJMjAaKTof': 'A co się stało?',
'3J4mqZ9xEqbfS0q8cWNGf7oEh0aN0IlS':
'Jak się z kimś zakłada, trzeba grać fair.',
'SLpABuUIMjLSLhUTtOeHxW3BumdS38eE': 'Yy?',
'NonyPC6DOVsuSB7AsDCbOvFTVP7wbcpr':
'Ciii. A ja miałam prawo prosić Gallów o pomoc.',
'7Eb1eTeHpYzPpwyDrWRr5DQ8zzI0C0Xr': 'A czy ja mówię, że...',
'o7AWtnF3rOj5zGD3tT3U9xRsHUBhTm7u': 'No!',
'fBSHHYYcJuRAR04CHCZlTzTiS5xvGuA3': 'Ale może jednak...',
'NYNopfs1H3bUfgscL18cINhjkdbAdd9E': 'No!',
'LxzoBPWcvtQQWNR9XABNEaBaL3MaUoKx': 'Chyba mogę...',
'NffQfnjqFmbd95P5hTMw4yd3uCwwYRjD':
'TO NIE JEST JEDNA Z TWOICH PROWINCJI! A ja rozkazuję, żeby Rzymianie zostawili budowniczych w spokoju! A zanim odejdą, niech naprawią szkody, które...',
'WQn1ZSqBQanBHjQlRuDwv7C5oqMtYCgs': 'Ale...',
'agnRTkliHF1iIDYPjHfdp0kN58WTvFUb': 'CICHO BĄDŹ! DO ROBOTY!',
'5WDWsdO551Y0v45BmW7KnLXWgCsCq3n8': 'Huuu...',
'uJ4OEW6QuK73BLZ1W2OZhlRmcd9auAJm': 'Tak, to co teraz robimy?',
'ae6UvgYLnxlVnSKlSXBLPD3V2OITlRRG':
'To co powiedziała, zbieramy manatki i naprawiamy szkody, gamoniu.',
'JkYv9Xddk7aRMG63NBequLE70OUkQyDR': 'Aaa... aha, tak, tak, jasne.',
'7utprA9i800PiRxStIEapw5MwCp7QXqA':
'Nie chciałbym Kleopatrze zajść za skórę. Nawet tak jedwabistą.',
'ILZaBGk2dOvcrL2BTwNYd4MM9NNPFqJ3': 'Cezar mówi, poszli wooon!',
'pKGuRSViOHBpFnzJwrjAU4h3elcVIZGo': 'Cezar mówi poszli won!',
'NEeQafuH8YgyLX24phoi2QFsMtNc9tKL': 'To idziemy sobie stąd!',
'4lNjqyrwdWAOigpp8l9zgsiPfHnTb2gj': 'To idziemy sobie stąd!',
'2kVqK7xicMxGmuwFYcAj1iBVw9C2ZY9F': 'Patrzcie, dali za wygraną!',
'h4HZlxCZzOjhzwMPjmZpj4vsuLhWpb0B': 'Heh, nie polecam się.',
'CovhNl7cXELM099E0xCDou0jLNcA30NW': 'No nie, dlaczego tak szybko, co?!',
'P828PTVaOVyaibpQBBK74Pa3tdP3tJXm':
'Niesamowite, kiedy pomyśleć, że każdy z tych Rzymian pozostawi tu jakąś cząstkę siebie...',
'M7qVeaYMNLRv6diiwBLqEAhXsB2tMiCK':
'(jeb) No wiesz, przepraszam, ale sam się prosiłeś!',
'4LhzStgkoMfK2NPZ9qVepp6NF1Uaa2yR': 'Nie, nie, nie, nie szkodzi.',
'DLhDu6HbzYC9ZP3SmTtxkoGhTyGTWDRv':
'I tak oto po raz pierwszy w historii, Rzymian i Egipcjan połączył wspólny trud odbudowy. Pracowali do ostatniej chwili, co do ziarenka.',
'5Vsj2W2zmjVpFEQyBVqba6Pm5mJro5Ub': 'Hau!',
'XY2qz1wPJpqrcHbtCqWx38lHLKbG7iIf': 'No!',
'kyVVakASpiESYM2j1jbg3Y59s8sKQImH': 'Jakie masz teraz plany, Numernabisie?',
| |
"""
Module for methods reading from and writing to the file system
"""
import os
import logging
from pathlib import PurePath
from ractl_cmds import (
get_server_info,
get_reserved_ids,
attach_image,
detach_all,
list_devices,
reserve_scsi_ids,
)
from pi_cmds import run_async
from socket_cmds import send_pb_command
from settings import CFG_DIR, CONFIG_FILE_SUFFIX, PROPERTIES_SUFFIX, RESERVATIONS
import rascsi_interface_pb2 as proto
def list_files(file_types, dir_path):
"""
Takes a (list) or (tuple) of (str) file_types - e.g. ('hda', 'hds')
Returns (list) of (list)s files_list:
index 0 is (str) file name and index 1 is (int) size in bytes
"""
files_list = []
for path, dirs, files in os.walk(dir_path):
# Only list selected file types
files = [f for f in files if f.lower().endswith(file_types)]
files_list.extend(
[
(
file,
os.path.getsize(os.path.join(path, file))
)
for file in files
]
)
return files_list
def list_config_files():
"""
Finds fils with file ending CONFIG_FILE_SUFFIX in CFG_DIR.
Returns a (list) of (str) files_list
"""
files_list = []
for root, dirs, files in os.walk(CFG_DIR):
for file in files:
if file.endswith("." + CONFIG_FILE_SUFFIX):
files_list.append(file)
return files_list
def list_images():
"""
Sends a IMAGE_FILES_INFO command to the server
Returns a (dict) with (bool) status, (str) msg, and (list) of (dict)s files
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.DEFAULT_IMAGE_FILES_INFO
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
# Get a list of all *.properties files in CFG_DIR
prop_data = list_files(PROPERTIES_SUFFIX, CFG_DIR)
prop_files = [PurePath(x[0]).stem for x in prop_data]
from zipfile import ZipFile, is_zipfile
server_info = get_server_info()
files = []
for file in result.image_files_info.image_files:
# Add properties meta data for the image, if applicable
if file.name in prop_files:
process = read_drive_properties(f"{CFG_DIR}/{file.name}.{PROPERTIES_SUFFIX}")
prop = process["conf"]
else:
prop = False
if file.name.lower().endswith(".zip"):
zip_path = f"{server_info['image_dir']}/{file.name}"
if is_zipfile(zip_path):
zipfile = ZipFile(zip_path)
# Get a list of (str) containing all zipfile members
zip_members = zipfile.namelist()
# Strip out directories from the list
zip_members = [x for x in zip_members if not x.endswith("/")]
else:
logging.warning("%s is an invalid zip file", zip_path)
zip_members = False
else:
zip_members = False
size_mb = "{:,.1f}".format(file.size / 1024 / 1024)
dtype = proto.PbDeviceType.Name(file.type)
files.append({
"name": file.name,
"size": file.size,
"size_mb": size_mb,
"detected_type": dtype,
"prop": prop,
"zip_members": zip_members,
})
return {"status": result.status, "msg": result.msg, "files": files}
def create_new_image(file_name, file_type, size):
"""
Takes (str) file_name, (str) file_type, and (int) size
Sends a CREATE_IMAGE command to the server
Returns (dict) with (bool) status and (str) msg
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.CREATE_IMAGE
command.params["file"] = file_name + "." + file_type
command.params["size"] = str(size)
command.params["read_only"] = "false"
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
return {"status": result.status, "msg": result.msg}
def delete_image(file_name):
"""
Takes (str) file_name
Sends a DELETE_IMAGE command to the server
Returns (dict) with (bool) status and (str) msg
"""
command = proto.PbCommand()
command.operation = proto.PbOperation.DELETE_IMAGE
command.params["file"] = file_name
data = send_pb_command(command.SerializeToString())
result = proto.PbResult()
result.ParseFromString(data)
return {"status": result.status, "msg": result.msg}
def delete_file(file_path):
"""
Takes (str) file_path with the full path to the file to delete
Returns (dict) with (bool) status and (str) msg
"""
if os.path.exists(file_path):
os.remove(file_path)
return {"status": True, "msg": f"File deleted: {file_path}"}
return {"status": False, "msg": f"File to delete not found: {file_path}"}
def rename_file(file_path, target_path):
"""
Takes (str) file_path and (str) target_path
Returns (dict) with (bool) status and (str) msg
"""
if os.path.exists(PurePath(target_path).parent):
os.rename(file_path, target_path)
return {"status": True, "msg": f"File moved to: {target_path}"}
return {"status": False, "msg": f"Unable to move to: {target_path}"}
def unzip_file(file_name, member=False, members=False):
"""
Takes (str) file_name, optional (str) member, optional (list) of (str) members
file_name is the name of the zip file to unzip
member is the full path to the particular file in the zip file to unzip
members contains all of the full paths to each of the zip archive members
Returns (dict) with (boolean) status and (list of str) msg
"""
from asyncio import run
server_info = get_server_info()
prop_flag = False
if not member:
unzip_proc = run(run_async(
f"unzip -d {server_info['image_dir']} -n -j "
f"{server_info['image_dir']}/{file_name}"
))
if members:
for path in members:
if path.endswith(PROPERTIES_SUFFIX):
name = PurePath(path).name
rename_file(f"{server_info['image_dir']}/{name}", f"{CFG_DIR}/{name}")
prop_flag = True
else:
from re import escape
member = escape(member)
unzip_proc = run(run_async(
f"unzip -d {server_info['image_dir']} -n -j "
f"{server_info['image_dir']}/{file_name} {member}"
))
# Attempt to unzip a properties file in the same archive dir
unzip_prop = run(run_async(
f"unzip -d {CFG_DIR} -n -j "
f"{server_info['image_dir']}/{file_name} {member}.{PROPERTIES_SUFFIX}"
))
if unzip_prop["returncode"] == 0:
prop_flag = True
if unzip_proc["returncode"] != 0:
logging.warning("Unzipping failed: %s", unzip_proc["stderr"])
return {"status": False, "msg": unzip_proc["stderr"]}
from re import findall
unzipped = findall(
"(?:inflating|extracting):(.+)\n",
unzip_proc["stdout"]
)
return {"status": True, "msg": unzipped, "prop_flag": prop_flag}
def download_file_to_iso(url):
"""
Takes (int) scsi_id and (str) url
Returns (dict) with (bool) status and (str) msg
"""
from time import time
from subprocess import run
server_info = get_server_info()
file_name = PurePath(url).name
tmp_ts = int(time())
tmp_dir = "/tmp/" + str(tmp_ts) + "/"
os.mkdir(tmp_dir)
tmp_full_path = tmp_dir + file_name
iso_filename = f"{server_info['image_dir']}/{file_name}.iso"
req_proc = download_to_dir(url, tmp_dir)
if not req_proc["status"]:
return {"status": False, "msg": req_proc["msg"]}
iso_proc = run(
["genisoimage", "-hfs", "-o", iso_filename, tmp_full_path],
capture_output=True,
check=True,
)
if iso_proc.returncode != 0:
return {"status": False, "msg": iso_proc.stderr.decode("utf-8")}
return {"status": True, "msg": iso_proc.stdout.decode("utf-8"), "file_name": iso_filename}
def download_to_dir(url, save_dir):
"""
Takes (str) url, (str) save_dir
Returns (dict) with (bool) status and (str) msg
"""
import requests
file_name = PurePath(url).name
logging.info("Making a request to download %s", url)
try:
with requests.get(url, stream=True, headers={"User-Agent": "Mozilla/5.0"}) as req:
req.raise_for_status()
with open(f"{save_dir}/{file_name}", "wb") as download:
for chunk in req.iter_content(chunk_size=8192):
download.write(chunk)
except requests.exceptions.RequestException as error:
logging.warning("Request failed: %s", str(error))
return {"status": False, "msg": str(error)}
logging.info("Response encoding: %s", req.encoding)
logging.info("Response content-type: %s", req.headers["content-type"])
logging.info("Response status code: %s", req.status_code)
return {"status": True, "msg": f"File downloaded from {url} to {save_dir}"}
def write_config(file_name):
"""
Takes (str) file_name
Returns (dict) with (bool) status and (str) msg
"""
from json import dump
file_name = f"{CFG_DIR}/{file_name}"
try:
with open(file_name, "w") as json_file:
version = get_server_info()["version"]
devices = list_devices()["device_list"]
for device in devices:
# Remove keys that we don't want to store in the file
del device["status"]
del device["file"]
# It's cleaner not to store an empty parameter for every device without media
if device["image"] == "":
device["image"] = None
# RaSCSI product names will be generated on the fly by RaSCSI
if device["vendor"] == "RaSCSI":
device["vendor"] = device["product"] = device["revision"] = None
# A block size of 0 is how RaSCSI indicates N/A for block size
if device["block_size"] == 0:
device["block_size"] = None
# Convert to a data type that can be serialized
device["params"] = dict(device["params"])
reserved_ids_and_memos = []
reserved_ids = get_reserved_ids()["ids"]
for scsi_id in reserved_ids:
reserved_ids_and_memos.append({"id": scsi_id, "memo": RESERVATIONS[int(scsi_id)]})
dump(
{"version": version, "devices": devices, "reserved_ids": reserved_ids_and_memos},
json_file,
indent=4
)
return {"status": True, "msg": f"Saved config to {file_name}"}
except (IOError, ValueError, EOFError, TypeError) as error:
logging.error(str(error))
delete_file(file_name)
return {"status": False, "msg": str(error)}
except:
logging.error("Could not write to file: %s", file_name)
delete_file(file_name)
return {"status": False, "msg": f"Could not write to file: {file_name}"}
def read_config(file_name):
"""
Takes (str) file_name
Returns (dict) with (bool) status and (str) msg
"""
from json import load
file_name = f"{CFG_DIR}/{file_name}"
try:
with open(file_name) as json_file:
config = load(json_file)
# If the config file format changes again in the future,
# introduce more sophisticated format detection logic here.
if isinstance(config, dict):
detach_all()
ids_to_reserve = []
for item in config["reserved_ids"]:
ids_to_reserve.append(item["id"])
RESERVATIONS[int(item["id"])] = item["memo"]
reserve_scsi_ids(ids_to_reserve)
for row in config["devices"]:
kwargs = {
"device_type": row["device_type"],
"image": row["image"],
"unit": int(row["unit"]),
"vendor": row["vendor"],
"product": row["product"],
"revision": row["revision"],
"block_size": row["block_size"],
}
params = dict(row["params"])
for param in params.keys():
kwargs[param] = params[param]
attach_image(row["id"], **kwargs)
# The config file format in RaSCSI 21.10 is using a list data type at the top level.
# If future config file formats return to the list data type,
# introduce more sophisticated format detection logic here.
elif isinstance(config, list):
detach_all()
for row in config:
kwargs = {
"device_type": row["device_type"],
"image": row["image"],
# "un" for backwards compatibility
"unit": int(row["un"]),
"vendor": row["vendor"],
"product": row["product"],
"revision": row["revision"],
"block_size": row["block_size"],
}
params = dict(row["params"])
for param in params.keys():
kwargs[param] = params[param]
attach_image(row["id"], **kwargs)
else:
return {"status": False, "msg": "Invalid config file format."}
return {"status": True, "msg": f"Loaded config from: {file_name}"}
except (IOError, ValueError, EOFError, TypeError) as error:
logging.error(str(error))
return {"status": False, "msg": str(error)}
| |
import functools
import inspect
import logging
import typing
from typing import Any, Optional, Type
from dbnd._core.configuration.environ_config import get_dbnd_project_config
from dbnd._core.current import current_task_run, get_databand_run, try_get_current_task
from dbnd._core.errors import show_exc_info
from dbnd._core.errors.errors_utils import user_side_code
from dbnd._core.failures import dbnd_handle_errors
from dbnd._core.plugin.dbnd_airflow_operator_plugin import (
build_task_at_airflow_dag_context,
is_in_airflow_dag_build_context,
)
from dbnd._core.task.decorated_callable_task import _DecoratedCallableTask
from dbnd._core.task_build.task_context import TaskContextPhase, current_phase
from dbnd._core.task_build.task_metaclass import TaskMetaclass
from dbnd._core.task_build.task_passport import TaskPassport
from dbnd._core.tracking.managers.callable_tracking import CallableTrackingManager
from dbnd._core.utils.basics.nothing import NOTHING
from dbnd._core.utils.callable_spec import (
CallableSpec,
args_to_kwargs,
build_callable_spec,
)
from dbnd._core.utils.lazy_property_proxy import CallableLazyObjectProxy
from targets.inline_target import InlineTarget
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from dbnd import Task
from dbnd._core.task_run.task_run import TaskRun
class TaskDecorator(object):
"""
This object represent the state and logic of decorated callable (user class or user function)
All "expensive" objects are lazy: task_cls, callable_spec, callable_tracking_manager
all "user-side" calls are routed into self.handle_callable_call() that will decide should we
1. call user code directly (dbnd is disabled)
2. call and track user callable call (tracking is enabled)
3. create a Task that represents user code ( orchestration mode at @pipeline.band
4. create a Task and run it (orchestration mode at @task.run)
"""
def __init__(self, class_or_func, decorator_kwargs):
# known parameters for @task
self.class_or_func = class_or_func
self.original_class_or_func = class_or_func
self.task_type = decorator_kwargs.pop(
"_task_type"
) # type: Type[_DecoratedCallableTask]
self.task_default_result = decorator_kwargs.pop(
"_task_default_result"
) # ParameterFactory
self.task_defaults = decorator_kwargs.pop("defaults", None)
self.task_namespace = decorator_kwargs.get("task_namespace", NOTHING)
self.task_family = decorator_kwargs.get("_conf__task_family")
# rest of kwargs are "user params"
self.decorator_kwargs = decorator_kwargs
self.task_passport = TaskPassport.build_task_passport(
cls_name=self.original_class_or_func.__name__,
module_name=self.original_class_or_func.__module__,
task_namespace=self.task_namespace,
task_family=self.task_family,
)
self.is_class = inspect.isclass(class_or_func)
# used by decorated UserClass only, stores "wrapped" user class
# lazy task class definition for orchestration case
self._task_cls = None # type: Optional[Type[Task]]
self._callable_spec = None # type: Optional[CallableSpec]
self._callable_tracking_manager = (
None
) # type: Optional[CallableTrackingManager]
def get_callable_spec(self):
if not self._callable_spec:
try:
self._callable_spec = build_callable_spec(
class_or_func=self.original_class_or_func
)
except Exception as ex:
logger.error(
"Failed to create task %s: %s\n%s\n",
self.original_class_or_func.__name__,
str(ex),
user_side_code(context=5),
exc_info=show_exc_info(ex),
)
raise
return self._callable_spec
def get_task_cls(self):
"""
Returns Runnable Task for Orchestration
"""
if self._task_cls is None:
# Use the task_type we got from the decorator. check @task/@pipeline/@spark_task
bases = (self.task_type,)
self._task_cls = TaskMetaclass(
str(self.original_class_or_func.__name__),
bases,
dict(
__doc__=self.original_class_or_func.__doc__,
__module__=self.original_class_or_func.__module__,
defaults=self.task_defaults,
task_decorator=self,
),
)
return self._task_cls
def get_task_definition(self):
return self.get_task_cls().task_definition
@dbnd_handle_errors(exit_on_error=False)
def _build_task(self, *args, **kwargs):
task_cls = self.get_task_cls()
return task_cls(*args, **kwargs)
@dbnd_handle_errors(exit_on_error=False)
def dbnd_run(self, *args, **kwargs):
# type: (...)-> DatabandRun
"""
Run task via Databand execution system
"""
t = self._build_task(*args, **kwargs)
return t.dbnd_run()
def tracking_context(self, call_args, call_kwargs):
if not self._callable_tracking_manager:
self._callable_tracking_manager = CallableTrackingManager(
task_decorator=self
)
return self._callable_tracking_manager.tracking_context(
call_args=call_args, call_kwargs=call_kwargs
)
def handle_callable_call(self, *call_args, **call_kwargs):
dbnd_project_config = get_dbnd_project_config()
if dbnd_project_config.disabled:
return self.class_or_func(*call_args, **call_kwargs)
# we are at tracking mode
if dbnd_project_config.is_tracking_mode():
with self.tracking_context(call_args, call_kwargs) as track_result_callback:
fp_result = self.class_or_func(*call_args, **call_kwargs)
return track_result_callback(fp_result)
#### DBND ORCHESTRATION MODE
#
# -= Use "Step into My Code"" to get back from dbnd code! =-
#
# decorated object call/creation ( my_func(), MyDecoratedTask()
# we are at orchestration mode
task_cls = self.get_task_cls()
if is_in_airflow_dag_build_context():
# we are in Airflow DAG building mode - AIP-31
return build_task_at_airflow_dag_context(
task_cls=task_cls, call_args=call_args, call_kwargs=call_kwargs
)
current = try_get_current_task()
if not current:
# no tracking/no orchestration,
# falling back to "natural call" of the class_or_func
return self.class_or_func(*call_args, **call_kwargs)
######
# current is not None, and we are not in tracking/airflow/luigi
# this is DBND Orchestration mode
# we can be in the context of task.run() or in task.band()
# called from user code using user_decorated_func() or UserDecoratedTask()
if self.is_class:
call_kwargs.pop("__call_original_cls", False)
# we should not get here from _TaskFromTaskDecorator.invoke()
# at that function we should call user code directly
phase = current_phase()
if phase is TaskContextPhase.BUILD:
# we are in the @pipeline.band() context, we are building execution plan
t = task_cls(*call_args, **call_kwargs)
# we are in the band, and if user_code() is called we want to remove redundant
# `user_code().result` usage
if t.task_definition.single_result_output:
return t.result
# we have multiple outputs (more than one "output" parameter)
# just return task object, user will use it as `user_code().output_1`
return t
elif phase is TaskContextPhase.RUN:
# we are "running" inside some other task execution (orchestration!)
# (inside user_defined_function() or UserDefinedTask.run()
# if possible we will run it as "orchestration" task
# with parameters parsing
if (
current.settings.run.task_run_at_execution_time_enabled
and current.task_supports_dynamic_tasks
):
return self._run_task_from_another_task_execution(
parent_task=current, call_args=call_args, call_kwargs=call_kwargs,
)
# we can not call it in "dbnd" way, fallback to normal call
if self.is_class:
call_kwargs["__call_original_cls"] = False
return self.class_or_func(*call_args, **call_kwargs)
else:
raise Exception()
def _run_task_from_another_task_execution(
self, parent_task, call_args, call_kwargs
):
# type: (TaskDecorator, Task, *Any, **Any) -> TaskRun
# task is running from another task
task_cls = self.get_task_cls()
from dbnd import pipeline, PipelineTask
from dbnd._core.task_build.dbnd_decorator import _default_output
dbnd_run = get_databand_run()
# orig_call_args, orig_call_kwargs = call_args, call_kwargs
call_args, call_kwargs = args_to_kwargs(
self.get_callable_spec().args, call_args, call_kwargs
)
# Map all kwargs to the "original" target of that objects
# for example: for DataFrame we'll try to find a relevant target that were used to read it
# get all possible value's targets
call_kwargs_as_targets = dbnd_run.target_origin.get_for_map(call_kwargs)
for p_name, value_origin in call_kwargs_as_targets.items():
root_target = value_origin.origin_target
path = root_target.path if hasattr(root_target, "path") else None
original_object = call_kwargs[p_name]
call_kwargs[p_name] = InlineTarget(
root_target=root_target,
obj=original_object,
value_type=value_origin.value_type,
source=value_origin.origin_target.source,
path=path,
)
call_kwargs.setdefault("task_is_dynamic", True)
call_kwargs.setdefault(
"task_in_memory_outputs",
parent_task.settings.run.task_run_at_execution_time_in_memory_outputs,
)
if issubclass(task_cls, PipelineTask):
# if it's pipeline - create new databand run
# create override _task_default_result to be object instead of target
task_cls = pipeline(
self.class_or_func, _task_default_result=_default_output
).task_cls
# instantiate inline pipeline
task = task_cls(*call_args, **call_kwargs)
# if it's pipeline - create new databand run
run = dbnd_run.context.dbnd_run_task(task)
task_run = run.get_task_run(task.task_id)
else:
# instantiate inline task (dbnd object)
task = task_cls(*call_args, **call_kwargs)
# update upstream/downstream relations - needed for correct tracking
# we can have the task as upstream , as it was executed already
if not parent_task.task_dag.has_upstream(task):
parent_task.set_upstream(task)
from dbnd._core.task_build.task_cls__call_state import TaskCallState
task._dbnd_call_state = TaskCallState(should_store_result=True)
try:
task_run = dbnd_run.run_executor.run_task_at_execution_time(
task, task_engine=current_task_run().task_engine
)
# this will work only for _DecoratedTask
if task._dbnd_call_state.result_saved:
return task._dbnd_call_state.result
finally:
# we'd better clean _invoke_result to avoid memory leaks
task._dbnd_call_state = None
# if we are inside run, we want to have real values, not deferred!
if task.task_definition.single_result_output:
return task.__class__.result.load_from_target(task.result)
# we have func without result, just fallback to None
return task
# compatibility support
@property
def task_cls(self):
return self.get_task_cls()
@property
def t(self):
return self.get_task_cls()
@property
def task(self):
return self.get_task_cls()
@property
def task_definition(self):
return self.get_task_definition()
@property
def func(self):
return self.class_or_func
@property
def callable(self):
return self.class_or_func
class _UserClassWithTaskDecoratorMetaclass(type):
"""
Used by decorated user classes only,
1. we change metaclass of original class to go through __call__ on object call
2. object still behaves as original object (until __call_ is called)
3. we intercept the call and may call original object, or create dbnd task class
(at @pipeline or inside @task.run function)
in order to prevent recursion ( from DecoratedCallableTask.invoke for example)
we use `__call_original_cls` kwarg. if present we would call an original code
this code should be serializable with pickle!
@task
class UserClass():
pass
"""
__is_dbnd_task__ = True
task_decorator = None # type: TaskDecorator
def __call__(cls, *args, **kwargs):
"""
wrap user class ,so on user_class() we run _item_call first and if required we return task object inplace
"""
if kwargs.pop("__call_original_cls", False):
return super(_UserClassWithTaskDecoratorMetaclass, cls).__call__(
*args, **kwargs
)
# prevent recursion call. next time we call cls() we will go into original ctor()
kwargs["__call_original_cls"] = True
return cls.task_decorator.handle_callable_call(*args, **kwargs)
# exposing dbnd logic
# so OriginalUserClass.task can be used
# this list should be alligned with attributes at build_dbnd_decorated_func
@property
def task_cls(self):
return self.task_decorator.get_task_cls()
@property
def t(self):
return self.task_cls
@property
def task(self):
return self.task_cls
@property
def func(self):
return self.task_decorator.callable
@property
def callable(self):
return self.task_decorator.callable
def dbnd_run(self, *args, **kwargs):
return self.task_decorator.dbnd_run(*args, **kwargs)
def build_dbnd_decorated_func(task_decorator):
def dbnd_decorated_func(*args, **kwargs):
"""
DBND Wrapper of User Function
Redirect call to dbnd logic that might track/orchestrate user code
"""
return task_decorator.handle_callable_call(*args, **kwargs)
# new wrapper should should look like original function and be serializable
# class decorator will not work, because of pickle errors
functools.update_wrapper(dbnd_decorated_func, task_decorator.original_class_or_func)
# this list should be alligned with attributes at _UserClassWithTaskDecoratorMetaclass
# we don't want to create .task_cls object immediately(that is used for orchestration only)
# however, we can't just return | |
<filename>modules/s3/s3sync.py
# -*- coding: utf-8 -*-
""" S3 Synchronization
@copyright: 2011-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import datetime
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from s3datetime import s3_parse_datetime, s3_utc
from s3rest import S3Method
from s3import import S3ImportItem
from s3query import S3URLQuery
from s3utils import S3ModuleDebug
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3SYNC: DEBUG MODE"
_debug = S3ModuleDebug.on
else:
_debug = S3ModuleDebug.off
# =============================================================================
class S3Sync(S3Method):
""" Synchronization Handler """
def __init__(self):
""" Constructor """
S3Method.__init__(self)
self.log = S3SyncLog()
self._config = None
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
RESTful method handler (repository/sync, repository/register)
@param r: the S3Request instance
@param attr: controller attributes for the request
"""
output = dict()
if r.method == "sync":
if r.http == "GET":
# Incoming pull
output = self.__send(r, **attr)
elif r.http in ("PUT", "POST"):
# Incoming push
output = self.__receive(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif r.name == "repository" and r.method == "register":
if r.http == "GET":
# Incoming registration request
output = self.__register(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
# REST Methods:
# -------------------------------------------------------------------------
def __register(self, r, **attr):
"""
Respond to an incoming registration request
@param r: the S3Request
@param attr: the controller attributes
"""
log = self.log
result = log.SUCCESS
message = "registration successful"
repository_id = None
if "repository" in r.vars:
ruid = r.vars["repository"]
db = current.db
rtable = current.s3db.sync_repository
row = db(rtable.uuid == ruid).select(limitby=(0, 1)).first()
if row:
repository_id = row.id
if not row.accept_push and current.auth.s3_has_role("ADMIN"):
row.update_record(accept_push=True)
else:
if current.auth.s3_has_role("ADMIN"):
accept_push = True
else:
accept_push = False
repository_id = rtable.insert(name=ruid,
uuid=ruid,
accept_push=accept_push)
if not repository_id:
result = log.ERROR
message = "registration failed"
else:
result = log.ERROR
message = "no repository identifier specified"
if result == log.SUCCESS:
output = current.xml.json_message(message=message,
sender="%s" % self.config.uuid)
else:
output = current.xml.json_message(False, 400,
message=message,
sender="%s" % self.config.uuid)
# Set content type header
headers = current.response.headers
headers["Content-Type"] = "application/json"
# Log the operation
log.write(repository_id=repository_id,
resource_name=log.NONE,
transmission=log.IN,
mode=log.REGISTER,
result=result,
message=message)
return output
# -------------------------------------------------------------------------
def __send(self, r, **attr):
"""
Respond to an incoming pull
@param r: the S3Request
@param attr: the controller attributes
"""
mixed = attr.get("mixed", False)
get_vars = r.get_vars
resource = r.resource
# Identify the requesting repository
repository_uuid = get_vars.get("repository")
connector = None
if repository_uuid:
rtable = current.s3db.sync_repository
query = rtable.uuid == repository_uuid
row = current.db(query).select(limitby=(0, 1)).first()
if row:
connector = S3SyncRepository(row)
if connector is None:
# Use a dummy repository with Eden API
connector = S3SyncRepository(Storage(id = None,
name = "unknown",
apitype = "eden",
))
current.log.debug("S3Sync PULL from %s (%s)" % (connector.name,
connector.apitype))
# Additional export parameters
start = get_vars.get("start", None)
if start is not None:
try:
start = int(start)
except ValueError:
start = None
limit = get_vars.get("limit", None)
if limit is not None:
try:
limit = int(limit)
except ValueError:
limit = None
msince = get_vars.get("msince", None)
if msince is not None:
msince = s3_parse_datetime(msince)
# Sync filters from peer
filters = {}
for k, v in get_vars.items():
if k[0] == "[" and "]" in k:
tablename, urlvar = k[1:].split("]", 1)
if urlvar:
if not tablename or tablename == "~":
tablename = resource.tablename
f = filters.get(tablename, {})
u = f.get(urlvar, None)
if u:
u = "%s&%s" % (u, v)
else:
u = v
f[urlvar] = u
filters[tablename] = f
if not filters:
filters = None
try:
result = connector.send(resource,
start = start,
limit = limit,
msince = msince,
filters = filters,
mixed = mixed,
)
except NotImplementedError:
r.error(405, "Synchronization method not supported for repository")
log = self.log
log.write(repository_id = connector.id,
resource_name = "mixed" if mixed else resource.tablename,
transmission = log.IN,
mode = log.PULL,
action = "send",
remote = result.get("remote", False),
result = result.get("status", log.NONE),
message = result.get("message", ""),
)
return result.get("response")
# -------------------------------------------------------------------------
def __receive(self, r, **attr):
"""
Respond to an incoming push
@param r: the S3Request
@param attr: the controller attributes
"""
mixed = attr.get("mixed", False)
get_vars = r.get_vars
s3db = current.s3db
db = current.db
# Identify the sending repository
repository_uuid = get_vars.get("repository")
connector = None
if repository_uuid:
rtable = s3db.sync_repository
query = rtable.uuid == repository_uuid
row = current.db(query).select(limitby=(0, 1)).first()
if row:
connector = S3SyncRepository(row)
# Check that the repository is registered and allowed to push
if connector is None or not connector.accept_push:
r.error(403, current.ERROR.NOT_PERMITTED)
current.log.debug("S3Sync PUSH from %s (%s)" % (connector.name,
connector.apitype))
# Get strategy and policy
default_update_policy = S3ImportItem.POLICY.NEWER
default_conflict_policy = S3ImportItem.POLICY.MASTER
# Identify the synchronization task
ttable = s3db.sync_task
if not mixed:
query = (ttable.repository_id == connector.id) & \
(ttable.resource_name == r.tablename) & \
(ttable.deleted != True)
task = db(query).select(limitby=(0, 1)).first()
else:
task = None
last_sync = None
if task:
strategy = task.strategy
update_policy = task.update_policy or default_update_policy
conflict_policy = task.conflict_policy or default_conflict_policy
if update_policy not in ("THIS", "OTHER"):
last_sync = task.last_pull
else:
policies = S3ImportItem.POLICY
p = get_vars.get("update_policy", None)
values = {"THIS": "OTHER", "OTHER": "THIS"}
switch = lambda p: p in values and values[p] or p
if p and p in policies:
p = switch(p)
update_policy = policies[p]
else:
update_policy = default_update_policy
p = get_vars.get("conflict_policy", None)
if p and p in policies:
p = switch(p)
conflict_policy = policies[p]
else:
conflict_policy = default_conflict_policy
msince = get_vars.get("msince", None)
if msince is not None:
last_sync = s3_parse_datetime(msince)
s = get_vars.get("strategy", None)
if s:
s = str(s).split(",")
methods = S3ImportItem.METHOD
strategy = [method for method in methods.values()
if method in s]
else:
strategy = ttable.strategy.default
# Get the source
source = r.read_body()
# Import resource
resource = r.resource
try:
result = connector.receive(source,
resource,
strategy = strategy,
update_policy = update_policy,
conflict_policy = conflict_policy,
last_sync = last_sync,
onconflict = self.onconflict,
mixed = mixed,
)
except IOError:
current.auth.permission.fail()
except SyntaxError:
e = sys.exc_info()[1]
r.error(400, e)
except NotImplementedError:
r.error(405, "Synchronization method not supported for repository")
log = self.log
log.write(repository_id = connector.id,
resource_name = "mixed" if mixed else resource.tablename,
transmission = log.IN,
mode = log.PUSH,
action = "receive",
remote = result.get("remote", False),
result = result.get("status", log.NONE),
message = result.get("message", ""),
)
return result.get("response")
# -------------------------------------------------------------------------
# API Methods:
# -------------------------------------------------------------------------
def synchronize(self, repository):
"""
Synchronize with a repository, called from scheduler task
@param repository: the repository Row
@return: True if successful, False if there was an error
"""
current.log.debug("S3Sync: synchronize %s" % repository.url)
log = self.log
error = None
if repository.apitype == "filesync":
if not repository.path:
error = "No path set for repository"
else:
if not repository.url:
error = "No URL set for repository"
if error:
log.write(repository_id = repository.id,
resource_name = None,
transmission = None,
mode = log.NONE,
action = "connect",
remote = False,
result = self.log.FATAL,
message = error,
)
return False
ttable = current.s3db.sync_task
query = (ttable.repository_id == repository.id) & \
(ttable.deleted != True)
tasks = current.db(query).select()
connector = S3SyncRepository(repository)
error = connector.login()
if error:
log.write(repository_id = repository.id,
resource_name = None,
transmission = log.OUT,
mode = log.LOGIN,
action = "login",
remote = True,
result = log.FATAL,
message = error,
)
return False
# Activate UUID synchronisation if required
s3 = current.response.s3
| |
固定長度
# 固定長度
if len(parityAmount) < len(parityJourney):
parityAmount.extend([None] * (len(parityJourney) - len(parityAmount)))
if len(parityBadge) < len(parityJourney):
parityBadge.extend([None] * (len(parityJourney) - len(parityBadge)))
if len(parityFlat) < len(parityJourney):
parityFlat.extend([None] * (len(parityJourney) - len(parityFlat)))
if len(parityBadge2) < len(parityJourney):
parityBadge2.extend([None] * (len(parityJourney) - len(parityBadge2)))
# show each seat's price
for pj, pa, pf, pb, pb2 in zip(parityJourney, parityAmount, parityFlat, parityBadge, parityBadge2):
print("AutoAA: {}".format(pj.text))
if pa is None or pb is None:
print("AutoAA: {}. normal seat price: not available")
else:
print("AutoAA: {}. normal seat price: {} {} {}".format(
self.pricecounter - tps, pa.text, self.ct, "" if pb is None else pb.text
))
self.pricecounter += 1
if pf is None or pb2 is None:
print("AutoAA: 0. luxury flat seat: not available")
else:
print("AutoAA: {}. luxury flat seat price: {} {} {}\n".format(
self.pricecounter - tps, pf.text, self.ct, "" if pb2 is None else pb2.text
))
self.pricecounter += 1
print("AutoAA: please enter the desired flight price: ")
while True:
chosen = input()
if not chosen.isdigit():
print("AutoAA: Invalid input, try again")
else:
chosen = int(chosen)
if chosen >= self.pricecounter or chosen <= 0:
print("AutoAA: Error index, try again")
else:
break
try:
# 點選選擇票價
tmp = self.browser.find_element_by_id(
tflightBtn[chosen - 1].get_attribute("id")
)
tmp2 = tmp.find_element_by_xpath(
'.//*[contains(@class, "{}")]'.format(
aaConfig.flightCheckIconField
)
)
except selenium.common.exceptions.NoSuchElementException:
selenium.webdriver.ActionChains(self.browser).move_to_element(tmp).click(tmp).perform()
print("AutoAA: Departure selected")
# 輸出總票價
rt = int(self.pr.flightReturn)
if rt != 1:
tp = totalPrice = self.browser.find_element_by_xpath(
'//div[@id="{}"]/span'.format(
aaConfig.flightTotalField
)
).text
print("AutoAA: Total ticket price: {} {}".format(tp, self.ct))
print("AutoAA: press any key to continue")
trash = input()
self.submit()
def selectReturnPrice(self):
# 等待頁面載入完成
WebDriverWait(self.browser, 60).until(
EC.visibility_of_element_located(
(
By.ID, aaConfig.flightDepartureRightBtn
)
)
)
WebDriverWait(self.browser, 60).until(
EC.presence_of_element_located(
(
By.XPATH, '//div[contains(@class, "{} {}")]'.format(
"fare-date-item-inner",
"active"
)
)
)
)
tps = self.pricecounter - 1
rt = int(self.pr.flightReturn)
if rt != 1:
print("AutoAA: Querying return flight price...One way trip, cancel")
return
print("AutoAA: Querying return flight price...")
try:
# find all price button
tflightBtn = self.browser.find_elements_by_xpath(
'//*[contains(@id, "{}")]'.format(
aaConfig.flightChoosePriceField
)
)
except selenium.common.exceptions.NoSuchElementException:
print("AutoAA: No flights in the desired time. exit")
sys.exit(1)
numDown = 1
# find parity seat price
parityJourney = self.browser.find_elements_by_xpath(
'//*[contains(@id, "{}")]'.format(
aaConfig.flightJourneyField2
)
)
parityAmount = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightSeatFieldH,
aaConfig.flightAmountField,
aaConfig.flightSeatFieldT,
numDown
)
)
parityBadge = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightSeatFieldH,
aaConfig.flightBageField,
aaConfig.flightSeatFieldT,
numDown
)
)
parityFlat = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightPrioritySeatFieldH,
aaConfig.flightAmountField,
aaConfig.flightSeatFieldT,
numDown
)
)
parityBadge2 = self.browser.find_elements_by_xpath(
'//*[starts-with(@id, "{}") and contains(@id, "{}{}{}-")]'.format(
aaConfig.flightPrioritySeatFieldH,
aaConfig.flightBageField,
aaConfig.flightSeatFieldT,
numDown
)
)
# 固定長度
if len(parityJourney) != len(parityJourney):
parityJourney = [None] * len(parityJourney)
if len(parityAmount) != len(parityJourney):
parityAmount = [None] * len(parityJourney)
if len(parityBadge) != len(parityJourney):
parityBadge = [None] * len(parityJourney)
if len(parityFlat) != len(parityJourney):
parityFlat = [None] * len(parityJourney)
if len(parityBadge2) != len(parityJourney):
parityBadge2 = [None] * len(parityJourney)
# print(len(parityJourney))
# print(len(parityAmount))
# print(len(parityBadge))
# print(len(parityFlat))
# print(len(parityBadge2))
# show each seat's price
for pj, pa, pf, pb, pb2 in zip(parityJourney, parityAmount, parityFlat, parityBadge, parityBadge2):
print("AutoAA: {}".format(pj.text))
if pa is None or pb is None:
print("AutoAA: {}. normal seat price: not available")
else:
print("AutoAA: {}. normal seat price: {} {} {}".format(
self.pricecounter - tps, pa.text, self.ct, "" if pb is None else pb.text
))
self.pricecounter += 1
if pf is None or pb2 is None:
print("AutoAA: 0. luxury flat seat: not available")
else:
print("AutoAA: {}. luxury flat seat price: {} {} {}\n".format(
self.pricecounter - tps, pf.text, self.ct, "" if pb2 is None else pb2.text
))
self.pricecounter += 1
print("AutoAA: please enter the desired flight price: ")
while True:
chosen = input()
if not chosen.isdigit():
print("AutoAA: Invalid input, try again")
else:
chosen = int(chosen)
if chosen >= (self.pricecounter - tps) or chosen <= 0:
print("AutoAA: Error index, try again")
else:
break
try:
# 點選選擇票價
tempId = tflightBtn[tps + chosen - 1].get_attribute("id")
tmp = self.browser.find_element_by_id(
tempId
)
tmp2 = tmp.find_element_by_xpath(
'.//*[contains(@class, "{}")]'.format(
aaConfig.flightCheckIconField,
)
)
except selenium.common.exceptions.NoSuchElementException:
selenium.webdriver.ActionChains(self.browser).move_to_element(tmp).click(tmp).perform()
print("AutoAA: Return selected")
# 輸出總票價
tp = totalPrice = self.browser.find_element_by_xpath(
'//div[@id="{}"]/span'.format(
aaConfig.flightTotalField
)
).text
print("AutoAA: Total ticket price: {} {}".format(tp, self.ct))
self.submit()
def getSpecialOffer(self):
priceLabel2 = None
desiredVip = int(self.pr.vip)
print("AutoAA: Processing special offer...")
try:
WebDriverWait(self.browser, 60).until(
EC.visibility_of_element_located(
(
By.ID, aaConfig.specialOfferBtnField
)
)
)
WebDriverWait(self.browser, 60).until(
EC.presence_of_element_located(
(
By.XPATH, '//*[contains(@class, "{}")]'.format(
"bundle-item"
)
)
)
)
except selenium.common.exceptions.TimeoutException as e:
print("AutoAA: special offer failed. exit")
sys.exit(1)
vipId = [
aaConfig.specialOfferVip1HField,
aaConfig.specialOfferVip2HField,
aaConfig.specialOfferVip3HField
]
if desiredVip == 0:
print("AutoAA: No additional special offer required")
else:
# 點選 vip 等級
# self.browser.find_element_by_id(
# "{}{}{}".format(
# aaConfig.specialOfferVipHField,
# vipId[desiredVip - 1],
# aaConfig.specialOfferVipTField
# )
# ).click()
tmp = self.browser.find_elements_by_xpath(
'//*[contains(@class, "{} {}")]//div'.format(
aaConfig.specialOfferVipOneField,
aaConfig.specialOfferVipTwoField
)
)
# 檢查是否已選擇
try:
tmp2 = tmp[desiredVip - 1].find_element_by_xpath(
'//button[contains(@class, "{}")]'.format(
aaConfig.specialOfferCheckField
)
)
except selenium.common.exceptions.NoSuchElementException:
pass
else:
self.browser.find_elements_by_xpath(
'//ul[@class="{}"]'.format(
aaConfig.specialOfferListField
)
)[desiredVip - 1].click()
lala = self.browser.find_element_by_xpath(
'//div[starts-with(@id, "{}")]'.format(
vipId[desiredVip - 1]
)
)
selenium.webdriver.ActionChains(self.browser).move_to_element(lala).click(lala).perform()
# 確認點選
tr = [
aaConfig.specialOfferVip1HField,
aaConfig.specialOfferVip2HField,
aaConfig.specialOfferVip3HField
]
for index in range(0, len(tr)):
tmp = self.browser.find_elements_by_xpath(
'//*[contains(@id, "{}")]'.format(
tr[index]
)
)
for element in tmp:
# 取出各 vip 選取資訊
name = element.find_element_by_class_name(
aaConfig.specialOfferdrField
).text
price = element.find_element_by_class_name(
aaConfig.specialOfferPriceField
).text.replace(" /", "")
priceLabel = element.find_element_by_class_name(
aaConfig.specialOfferCurrency
).text.replace(" /", "")
priceLabel2 = priceLabel
try:
# 查看 vip 是否有被選取
select = element.find_element_by_class_name(
"selected"
)
except selenium.common.exceptions.NoSuchElementException:
pass
else:
price = price.replace(priceLabel, "")
print("AutoAA: vip{} {} {}{}".format(index + 1, name, price, priceLabel))
price = self.browser.find_element_by_xpath(
'//div[@id="{}"]/span'.format(
aaConfig.flightTotalField
)
).text
print("AutoAA: Total ticket price: {} {}".format(price, priceLabel2))
self.browser.find_element_by_id(
aaConfig.specialOfferBtnField
).click()
def fillInfo(self):
# 隱性等待直到頁面載入完成
self.browser.implicitly_wait(10)
# 確定已點選
# spinlock
while True:
tmp = self.browser.find_element_by_id(
aaConfig.infoPreinstalledField.replace("label-", "")
).is_selected()
if tmp:
break
WebDriverWait(self.browser, 60).until(
EC.presence_of_element_located(
(
By.XPATH, '//div[contains(@class, "{}")]'.format(
"icon-done"
)
)
)
)
# 等待 input 框框完成勾選
WebDriverWait(self.browser, 60).until(
EC.element_to_be_clickable(
(
By.ID, aaConfig.infoPreinstalledField
)
)
)
time.sleep(3)
# # 取消預填選項
tmp = self.browser.find_element_by_id(
aaConfig.infoPreinstalledField
)
selenium.webdriver.ActionChains(self.browser).move_to_element(tmp).click(tmp).perform()
time.sleep(0.5)
# 填入旅客資料
tarrName = [
"firstname", "lastname", "gender", "birthday"
]
tarrXpath = [
'//input[contains(@id, "{}")]'.format(
aaConfig.infoFirstNameField
),
'//input[contains(@id, "{}")]'.format(
aaConfig.infoLastNameField
),
'//div[@class="{}"]'.format(
aaConfig.infoGenderField
),
'//label[contains(@for, "{}")]'.format(
aaConfig.infoBirthdayField
),
'.//*[contains(@for, "{}")]'.format(
aaConfig.infoMaleField
),
'.//*[contains(@for, "{}")]'.format(
aaConfig.infoFemaleField
)
]
def _clicker(input, m, f):
if input == "F":
tmp = "document.getElementById('{}').click();".format(f.get_attribute("for"))
self.browser.execute_script(tmp)
else:
tmp = "document.getElementById('{}').click();".format(m.get_attribute("for"))
self.browser.execute_script(tmp)
def clicker(field, input):
tmp = "document.getElementById('{}').value = '{}';".format(field.get_attribute("for"), input)
self.browser.execute_script(tmp)
# 建立旅客連結
guest = []
guest.extend(["a"] * int(self.pr.flightAdult))
guest.extend(["b"] * int(self.pr.flightBaby))
guest.extend(["c"] * int(self.pr.flightChildren))
totalTraveler = int(self.pr.flightAdult) + int(self.pr.flightBaby) + int(self.pr.flightChildren)
# 建立輸入格
travelerFirstName = self.browser.find_elements_by_xpath(tarrXpath[0])
travelerLastName = self.browser.find_elements_by_xpath(tarrXpath[1])
travelerGender = self.browser.find_elements_by_xpath(tarrXpath[2])
travelerBirthday = self.browser.find_elements_by_xpath(tarrXpath[3])
travelerMale = self.browser.find_elements_by_xpath(tarrXpath[4])
travelerFemale = self.browser.find_elements_by_xpath(tarrXpath[5])
adultListCount = 0
childrenListCount = 0
infantListCount = 0
# 填寫每位旅客資訊
for counter in range(0, totalTraveler):
# 填入 first name
tag = "firstname"
tid = travelerFirstName[counter].get_attribute("id")
tmmp = None
if "child" in tid:
tmmp = self.pr.childrenInfo[childrenListCount].get(tag, None)
self.checker(tmmp)
travelerFirstName[counter].send_keys(tmmp)
elif "infant" in tid:
tmmp = self.pr.babyInfo[infantListCount].get(tag, None)
self.checker(tmmp)
travelerFirstName[counter].send_keys(tmmp)
elif "adult" in tid:
tmmp = self.pr.adultInfo[adultListCount].get(tag, None)
self.checker(tmmp)
travelerFirstName[counter].send_keys(tmmp)
print("AutoAA: passenger {} info: {} filled in".format(tag, tmmp))
# 填入 last name
tag = "lastname"
tid = travelerLastName[counter].get_attribute("id")
tmmp = None
if "child" in tid:
tmmp = self.pr.childrenInfo[childrenListCount].get(tag, None)
self.checker(tmmp)
travelerLastName[counter].send_keys(tmmp)
elif "infant" in tid:
tmmp = self.pr.babyInfo[infantListCount].get(tag, None)
self.checker(tmmp)
travelerLastName[counter].send_keys(tmmp)
elif "adult" in tid:
tmmp = self.pr.adultInfo[adultListCount].get(tag, None)
self.checker(tmmp)
travelerLastName[counter].send_keys(tmmp)
print("AutoAA: passenger {} info: {} filled in".format(tag, tmmp))
# 填入生日
tag = "birthday"
tid = travelerBirthday[counter].get_attribute("for")
tmmp = None
if "child" in tid:
tmmp = self.pr.childrenInfo[childrenListCount].get(tag, None)
self.validate(tmmp)
clicker(travelerBirthday[counter], tmmp)
elif "infant" in tid:
tmmp = self.pr.babyInfo[infantListCount].get(tag, None)
self.validate(tmmp)
clicker(travelerBirthday[counter], tmmp)
elif "adult" in tid:
tmmp = self.pr.adultInfo[adultListCount].get(tag, None)
self.validate(tmmp)
clicker(travelerBirthday[counter], tmmp)
print("AutoAA: passenger {} info: {} filled in".format(tag, tmmp))
# 填入性別
tag = "gender"
tid = travelerMale[counter].get_attribute("for")
tmmp = None
if "child" in tid:
tmmp = self.pr.childrenInfo[childrenListCount].get(tag, None)
self.checker(tmmp)
_clicker(tmmp, travelerMale[counter], travelerFemale[counter])
elif "infant" in tid:
tmmp = self.pr.babyInfo[infantListCount].get(tag, None)
self.checker(tmmp)
_clicker(tmmp, travelerMale[counter], travelerFemale[counter])
elif "adult" in tid:
tmmp = self.pr.adultInfo[adultListCount].get(tag, None)
self.checker(tmmp)
_clicker(tmmp, travelerMale[counter], travelerFemale[counter])
print("AutoAA: passenger {} info: {} filled in".format(tag, tmmp))
# 更新人數
if "child" in tid:
childrenListCount += 1
elif "infant" in tid:
infantListCount += 1
elif "adult" in tid:
adultListCount += 1
print()
# 填入 contact email
self.browser.find_element_by_id(
aaConfig.contactEmailField
).clear()
self.browser.find_element_by_id(
aaConfig.contactEmailField
).send_keys(self.pr.contactEmail)
# 填入 | |
open.
URL : str
URL to open.
title_in : str
Title to add manually.
tags_in : str
Comma-separated tags to add manually.
desc : str
Bookmark description.
Returns
-------
tuple
Parsed results from parse_temp_file_content().
"""
temp_file_content = to_temp_file_content(url, title_in, tags_in, desc)
fd, tmpfile = tempfile.mkstemp(prefix="buku-edit-")
os.close(fd)
try:
with open(tmpfile, "w+", encoding="utf-8") as fp:
fp.write(temp_file_content)
fp.flush()
LOGDBG("Edited content written to %s", tmpfile)
cmd = editor.split(" ")
cmd += (tmpfile,)
subprocess.call(cmd)
with open(tmpfile, "r", encoding="utf-8") as f:
content = f.read()
os.remove(tmpfile)
except FileNotFoundError:
if os.path.exists(tmpfile):
os.remove(tmpfile)
LOGERR("Cannot open editor")
else:
LOGERR("Cannot open tempfile")
return None
parsed_content = parse_temp_file_content(content)
return parsed_content
def setup_logger(LOGGER):
"""Setup logger with color.
Parameters
----------
LOGGER : logger object
Logger to colorize.
"""
def decorate_emit(fn):
def new(*args):
levelno = args[0].levelno
if levelno == logging.DEBUG:
color = "\x1b[35m"
elif levelno == logging.ERROR:
color = "\x1b[31m"
elif levelno == logging.WARNING:
color = "\x1b[33m"
elif levelno == logging.INFO:
color = "\x1b[32m"
elif levelno == logging.CRITICAL:
color = "\x1b[31m"
else:
color = "\x1b[0m"
args[0].msg = "{}[{}]\x1b[0m {}".format(
color, args[0].levelname, args[0].msg
)
return fn(*args)
return new
sh = logging.StreamHandler()
sh.emit = decorate_emit(sh.emit)
LOGGER.addHandler(sh)
def piped_input(argv, pipeargs=None):
"""Handle piped input.
Parameters
----------
pipeargs : str
"""
if not sys.stdin.isatty():
pipeargs += argv
print("buku: waiting for input (unexpected? try --nostdin)")
for s in sys.stdin:
pipeargs += s.split()
def setcolors(args):
"""Get colors from user and separate into 'result' list for use in arg.colors.
Parameters
----------
args : str
Color string.
"""
Colors = collections.namedtuple(
"Colors", " ID_srch, ID_STR, URL_STR, DESC_STR, TAG_STR"
)
colors = Colors(*[COLORMAP[c] for c in args])
id_col = colors.ID_srch
id_str_col = colors.ID_STR
url_col = colors.URL_STR
desc_col = colors.DESC_STR
tag_col = colors.TAG_STR
result = [id_col, id_str_col, url_col, desc_col, tag_col]
return result
def unwrap(text):
"""Unwrap text."""
lines = text.split("\n")
result = ""
for i in range(len(lines) - 1):
result += lines[i]
if not lines[i]:
# Paragraph break
result += "\n\n"
elif lines[i + 1]:
# Next line is not paragraph break, add space
result += " "
# Handle last line
result += lines[-1] if lines[-1] else "\n"
return result
def check_stdout_encoding():
"""Make sure stdout encoding is utf-8.
If not, print error message and instructions, then exit with
status 1.
This function is a no-op on win32 because encoding on win32 is
messy, and let's just hope for the best. /s
"""
if sys.platform == "win32":
return
# Use codecs.lookup to resolve text encoding alias
encoding = codecs.lookup(sys.stdout.encoding).name
if encoding != "utf-8":
locale_lang, locale_encoding = locale.getlocale()
if locale_lang is None:
locale_lang = "<unknown>"
if locale_encoding is None:
locale_encoding = "<unknown>"
ioencoding = os.getenv("PYTHONIOENCODING", "not set")
sys.stderr.write(
unwrap(
textwrap.dedent(
"""\
stdout encoding '{encoding}' detected. ddgr requires utf-8 to
work properly. The wrong encoding may be due to a non-UTF-8
locale or an improper PYTHONIOENCODING. (For the record, your
locale language is {locale_lang} and locale encoding is
{locale_encoding}; your PYTHONIOENCODING is {ioencoding}.)
Please set a UTF-8 locale (e.g., en_US.UTF-8) or set
PYTHONIOENCODING to utf-8.
""".format(
encoding=encoding,
locale_lang=locale_lang,
locale_encoding=locale_encoding,
ioencoding=ioencoding,
)
)
)
)
sys.exit(1)
def monkeypatch_textwrap_for_cjk():
"""Monkeypatch textwrap for CJK wide characters."""
try:
if textwrap.wrap.patched:
return
except AttributeError:
pass
psl_textwrap_wrap = textwrap.wrap
def textwrap_wrap(text, width=70, **kwargs):
width = max(width, 2)
# We first add a U+0000 after each East Asian Fullwidth or East
# Asian Wide character, then fill to width - 1 (so that if a NUL
# character ends up on a new line, we still have one last column
# to spare for the preceding wide character). Finally we strip
# all the NUL characters.
#
# East Asian Width: https://www.unicode.org/reports/tr11/
return [
line.replace("\0", "")
for line in psl_textwrap_wrap(
"".join(
ch + "\0" if unicodedata.east_asian_width(ch) in ("F", "W") else ch
for ch in unicodedata.normalize("NFC", text)
),
width=width - 1,
**kwargs
)
]
def textwrap_fill(text, width=70, **kwargs):
return "\n".join(textwrap_wrap(text, width=width, **kwargs))
textwrap.wrap = textwrap_wrap
textwrap.fill = textwrap_fill
textwrap.wrap.patched = True
textwrap.fill.patched = True
# main starts here
def main():
"""Main."""
global ID_STR, ID_DB_STR, MUTE_STR, URL_STR, DESC_STR, DESC_WRAP, TAG_STR, TAG_WRAP, PROMPTMSG
title_in = None
tags_in = None
desc_in = None
pipeargs = []
colorstr_env = os.getenv("BUKU_COLORS")
if len(sys.argv) >= 2 and sys.argv[1] != "--nostdin":
try:
piped_input(sys.argv, pipeargs)
except KeyboardInterrupt:
pass
# If piped input, set argument vector
if pipeargs:
sys.argv = pipeargs
# Setup custom argument parser
argparser = ExtendedArgumentParser(
description="""Bookmark manager like a text-based mini-web.
POSITIONAL ARGUMENTS:
KEYWORD search keywords""",
formatter_class=argparse.RawTextHelpFormatter,
usage="""buku [OPTIONS] [KEYWORD [KEYWORD ...]]""",
add_help=False,
)
hide = argparse.SUPPRESS
argparser.add_argument("keywords", nargs="*", metavar="KEYWORD", help=hide)
# ---------------------
# GENERAL OPTIONS GROUP
# ---------------------
general_grp = argparser.add_argument_group(
title="GENERAL OPTIONS",
description=""" -a, --add URL [tag, ...]
bookmark URL with comma-separated tags
-u, --update [...] update fields of an existing bookmark
accepts indices and ranges
refresh title and desc if no edit options
if no arguments:
- update results when used with search
- otherwise refresh all titles and desc
-w, --write [editor|index]
edit and add a new bookmark in editor
else, edit bookmark at index in EDITOR
edit last bookmark, if index=-1
if no args, edit new bookmark in EDITOR
-d, --delete [...] remove bookmarks from DB
accepts indices or a single range
if no arguments:
- delete results when used with search
- otherwise delete all bookmarks
-h, --help show this information and exit
-v, --version show the program version and exit""",
)
addarg = general_grp.add_argument
addarg("-a", "--add", nargs="+", help=hide)
addarg("-u", "--update", nargs="*", help=hide)
addarg("-w", "--write", nargs="?", const=get_system_editor(), help=hide)
addarg("-d", "--delete", nargs="*", help=hide)
addarg("-h", "--help", action="store_true", help=hide)
addarg("-v", "--version", action="version", version=__version__, help=hide)
# ------------------
# EDIT OPTIONS GROUP
# ------------------
edit_grp = argparser.add_argument_group(
title="EDIT OPTIONS",
description=""" --url keyword bookmark link
--tag [+|-] [...] comma-separated tags
clear bookmark tagset, if no arguments
'+' appends to, '-' removes from tagset
--title [...] bookmark title; if no arguments:
-a: do not set title, -u: clear title
-c, --comment [...] notes or description of the bookmark
clears description, if no arguments
--immutable N disable web-fetch during auto-refresh
N=0: mutable (default), N=1: immutable""",
)
addarg = edit_grp.add_argument
addarg("--url", nargs=1, help=hide)
addarg("--tag", nargs="*", help=hide)
addarg("--title", nargs="*", help=hide)
addarg("-c", "--comment", nargs="*", help=hide)
addarg("--immutable", type=int, default=-1, choices={0, 1}, help=hide)
# --------------------
# SEARCH OPTIONS GROUP
# --------------------
search_grp = argparser.add_argument_group(
title="SEARCH OPTIONS",
description=""" -s, --sany [...] find records with ANY matching keyword
this is the default search option
-S, --sall [...] find records matching ALL the keywords
special keywords -
"blank": entries with empty title/tag
"immutable": entries with locked title
--deep match substrings ('pen' matches 'opens')
-r, --sreg expr run a regex search
-t, --stag [tag [,|+] ...] [- tag, ...]
search bookmarks by tags
use ',' to find entries matching ANY tag
use '+' to find entries matching ALL tags
excludes entries with tags after ' - '
list all tags, if no search keywords
-x, --exclude [...] omit records matching specified keywords""",
)
addarg = search_grp.add_argument
addarg("-s", "--sany", nargs="*", help=hide)
addarg("-S", "--sall", nargs="*", help=hide)
addarg("-r", "--sreg", nargs="*", help=hide)
addarg("--deep", action="store_true", help=hide)
addarg("-t", "--stag", nargs="*", help=hide)
addarg("-x", "--exclude", nargs="*", help=hide)
# ------------------------
# ENCRYPTION OPTIONS GROUP
# ------------------------
crypto_grp = argparser.add_argument_group(
title="ENCRYPTION OPTIONS",
description=""" -l, --lock [N] encrypt DB in N (default 8) # iterations
-k, --unlock [N] decrypt DB in N (default 8) # iterations""",
)
addarg = crypto_grp.add_argument
addarg("-k", "--unlock", nargs="?", type=int, const=8, help=hide)
addarg("-l", "--lock", nargs="?", type=int, const=8, help=hide)
# ----------------
# POWER TOYS GROUP
# ----------------
power_grp = argparser.add_argument_group(
title="POWER TOYS",
description=""" --ai auto-import from Firefox/Chrome/Chromium
-e, --export file export bookmarks to Firefox format HTML
export Markdown, if file ends with '.md'
format: [title](url) <!-- TAGS -->
export Orgfile, if file ends with '.org'
format: *[[url][title]] :tags:
export buku DB, if file ends with '.db'
combines with search results, if opted
-i, --import file import bookmarks based on file extension
supports 'html', 'json', 'md', 'org', 'db'
-p, --print [...] show record details by indices, ranges
print all bookmarks, if no arguments
-n shows the last n results (like tail)
-f, --format N limit fields in -p or JSON search | |
import sys
import os
import json
import time
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import grapher_admin.wsgi
from openpyxl import load_workbook
from grapher_admin.models import Entity, DatasetSubcategory, DatasetCategory, Dataset, Source, Variable, VariableType, DataValue
from importer.models import ImportHistory
from country_name_tool.models import CountryName
from django.conf import settings
from django.db import connection, transaction
from django.utils import timezone
from django.urls import reverse
from grapher_admin.views import write_dataset_csv
import unidecode
who_wash_downloads_save_location = settings.BASE_DIR + '/data/who_wash/'
wb = load_workbook(who_wash_downloads_save_location + 'dataset.xlsx', read_only=True)
source_description = {
'dataPublishedBy': "WHO/UNICEF Joint Monitoring Programme for Water Supply, Sanitation and Hygiene (JMP)",
'dataPublisherSource': None,
'link': "https://washdata.org/data",
'retrievedDate': timezone.now().strftime("%d-%B-%y"),
'additionalInfo': None
}
sections = ['Water', 'Sanitation', 'Hygiene']
columns = {
'Sanitation': {
1: 'Country',
3: 'Year',
4: 'Population',
5: 'Percent urban',
6: {
'name': 'At least basic',
'type': 'National'
},
7: {
'name': 'Limited (shared)',
'type': 'National'
},
8: {
'name': 'Unimproved',
'type': 'National'
},
9: {
'name': 'Open defecation',
'type': 'National'
},
10: {
'name': 'Annual rate of change in basic',
'type': 'National'
},
11: {
'name': 'Annual rate of change in open defecation',
'type': 'National'
},
12: {
'name': 'At least basic',
'type': 'Rural'
},
13: {
'name': 'Limited (shared)',
'type': 'Rural'
},
14: {
'name': 'Unimproved',
'type': 'Rural'
},
15: {
'name': 'Open defecation',
'type': 'Rural'
},
16: {
'name': 'Annual rate of change in basic',
'type': 'Rural'
},
17: {
'name': 'Annual rate of change in open defecation',
'type': 'Rural'
},
18: {
'name': 'At least basic',
'type': 'Urban'
},
19: {
'name': 'Limited (shared)',
'type': 'Urban'
},
20: {
'name': 'Unimproved',
'type': 'Urban'
},
21: {
'name': 'Open defecation',
'type': 'Urban'
},
22: {
'name': 'Annual rate of change in basic',
'type': 'Urban'
},
23: {
'name': 'Annual rate of change in open defecation',
'type': 'Urban'
},
24: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Safely managed',
'type': 'National'
},
25: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Disposed in situ',
'type': 'National'
},
26: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Emptied and treated',
'type': 'National'
},
27: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Wastewater treated',
'type': 'National'
},
28: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Latrines and other',
'type': 'National'
},
29: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Septic tanks',
'type': 'National'
},
30: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Sewer connections',
'type': 'National'
},
31: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Safely managed',
'type': 'Rural'
},
32: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Disposed in situ',
'type': 'Rural'
},
33: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Emptied and treated',
'type': 'Rural'
},
34: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Wastewater treated',
'type': 'Rural'
},
35: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Latrines and other',
'type': 'Rural'
},
36: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Septic tanks',
'type': 'Rural'
},
37: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Sewer connections',
'type': 'Rural'
},
38: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Safely managed',
'type': 'Urban'
},
39: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Disposed in situ',
'type': 'Urban'
},
40: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Emptied and treated',
'type': 'Urban'
},
41: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Wastewater treated',
'type': 'Urban'
},
42: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Latrines and other',
'type': 'Urban'
},
43: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Septic tanks',
'type': 'Urban'
},
44: {
'name': 'Proportion of population using improved sanitation facilities (excluding shared) - Sewer connections',
'type': 'Urban'
},
},
'Hygiene': {
1: 'Country',
3: 'Year',
4: 'Population',
5: 'Percent urban',
6: {
'name': 'Basic',
'type': 'National'
},
7: {
'name': 'Limited (without water or soap)',
'type': 'National'
},
8: {
'name': 'No facility',
'type': 'National'
},
9: {
'name': 'Basic',
'type': 'Rural'
},
10: {
'name': 'Limited (without water or soap)',
'type': 'Rural'
},
11: {
'name': 'No facility',
'type': 'Rural'
},
12: {
'name': 'Basic',
'type': 'Urban'
},
13: {
'name': 'Limited (without water or soap)',
'type': 'Urban'
},
14: {
'name': 'No facility',
'type': 'Urban'
}
},
'Water': {
1: 'Country',
3: 'Year',
4: 'Population',
5: 'Percent urban',
6: {
'name': 'At least basic',
'type': 'National'
},
7: {
'name': 'Limited (more than 30 mins)',
'type': 'National'
},
8: {
'name': 'Unimproved',
'type': 'National'
},
9: {
'name': 'Surface water',
'type': 'National'
},
10: {
'name': 'Annual rate of change in basic',
'type': 'National'
},
11: {
'name': 'At least basic',
'type': 'Rural'
},
12: {
'name': 'Limited (more than 30 mins)',
'type': 'Rural'
},
13: {
'name': 'Unimproved',
'type': 'Rural'
},
14: {
'name': 'Surface water',
'type': 'Rural'
},
15: {
'name': 'Annual rate of change in basic',
'type': 'Rural'
},
16: {
'name': 'At least basic',
'type': 'Urban'
},
17: {
'name': 'Limited (more than 30 mins)',
'type': 'Urban'
},
18: {
'name': 'Unimproved',
'type': 'Urban'
},
19: {
'name': 'Surface water',
'type': 'Urban'
},
20: {
'name': 'Annual rate of change in basic',
'type': 'Urban'
},
21: {
'name': 'Proportion of population using improved water supplies - Safely managed',
'type': 'National'
},
22: {
'name': 'Proportion of population using improved water supplies - Accessible on premises',
'type': 'National'
},
23: {
'name': 'Proportion of population using improved water supplies - Available when needed',
'type': 'National'
},
24: {
'name': 'Proportion of population using improved water supplies - Free from contamination',
'type': 'National'
},
25: {
'name': 'Proportion of population using improved water supplies - Piped',
'type': 'National'
},
26: {
'name': 'Proportion of population using improved water supplies - Non-piped',
'type': 'National'
},
27: {
'name': 'Proportion of population using improved water supplies - Safely managed',
'type': 'Rural'
},
28: {
'name': 'Proportion of population using improved water supplies - Accessible on premises',
'type': 'Rural'
},
29: {
'name': 'Proportion of population using improved water supplies - Available when needed',
'type': 'Rural'
},
30: {
'name': 'Proportion of population using improved water supplies - Free from contamination',
'type': 'Rural'
},
31: {
'name': 'Proportion of population using improved water supplies - Piped',
'type': 'Rural'
},
32: {
'name': 'Proportion of population using improved water supplies - Non-piped',
'type': 'Rural'
},
33: {
'name': 'Proportion of population using improved water supplies - Safely managed',
'type': 'Urban'
},
34: {
'name': 'Proportion of population using improved water supplies - Accessible on premises',
'type': 'Urban'
},
35: {
'name': 'Proportion of population using improved water supplies - Available when needed',
'type': 'Urban'
},
36: {
'name': 'Proportion of population using improved water supplies - Free from contamination',
'type': 'Urban'
},
37: {
'name': 'Proportion of population using improved water supplies - Piped',
'type': 'Urban'
},
38: {
'name': 'Proportion of population using improved water supplies - Non-piped',
'type': 'Urban'
}
}
}
who_wash_category_name_in_db = 'WHO WASH Datasets' # set the name of the root category of all data that will be imported by this script
start_time = time.time()
with transaction.atomic():
existing_categories = DatasetCategory.objects.values('name')
existing_categories_list = {item['name'] for item in existing_categories}
if who_wash_category_name_in_db not in existing_categories_list:
the_category = DatasetCategory(name=who_wash_category_name_in_db, fetcher_autocreated=True)
the_category.save()
else:
the_category = DatasetCategory.objects.get(name=who_wash_category_name_in_db)
existing_subcategories = DatasetSubcategory.objects.filter(categoryId=the_category.pk).values('name')
existing_subcategories_list = {item['name'] for item in existing_subcategories}
existing_variables = Variable.objects.filter(datasetId__namespace='who_wash').values('name')
existing_variables_list = {item['name'].lower() for item in existing_variables}
dataset_name_to_object = {item.name: item for item in Dataset.objects.filter(namespace='who_wash')}
source_name_to_object = {item.name: item for item in Source.objects.filter(
datasetId__in=[x.pk for x in Dataset.objects.filter(namespace='who_wash')])}
variable_name_to_object = {}
existing_entities = | |
self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Gutachterfotos, self).delete(*args, **kwargs)
signals.post_save.connect(photo_post_processing, sender=Gutachterfotos)
signals.post_save.connect(assign_permissions, sender=Gutachterfotos)
signals.post_delete.connect(delete_photo, sender=Gutachterfotos)
signals.post_delete.connect(remove_permissions, sender=Gutachterfotos)
# Hospize
class Hospize(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
aktiv = models.BooleanField(' aktiv?', default=True)
adresse = models.ForeignKey(
Adressen,
verbose_name='Adresse',
on_delete=models.SET_NULL,
db_column='adresse',
to_field='uuid',
related_name='adressen+',
blank=True,
null=True)
bezeichnung = models.CharField(
'Bezeichnung', max_length=255, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
traeger = models.ForeignKey(
Bewirtschafter_Betreiber_Traeger_Eigentuemer,
verbose_name='Träger',
on_delete=models.RESTRICT,
db_column='traeger',
to_field='uuid',
related_name='traeger+')
plaetze = PositiveSmallIntegerMinField(
'Plätze', min_value=1, blank=True, null=True)
telefon_festnetz = models.CharField(
'Telefon (Festnetz)',
max_length=255,
blank=True,
null=True,
validators=[
RegexValidator(
regex=rufnummer_regex,
message=rufnummer_message)])
telefon_mobil = models.CharField(
'Telefon (mobil)',
max_length=255,
blank=True,
null=True,
validators=[
RegexValidator(
regex=rufnummer_regex,
message=rufnummer_message)])
email = models.CharField(
'E-Mail-Adresse',
max_length=255,
blank=True,
null=True,
validators=[
EmailValidator(
message=email_message)])
website = models.CharField(
'Website',
max_length=255,
blank=True,
null=True,
validators=[
URLValidator(
message=url_message)])
geometrie = models.PointField(
'Geometrie', srid=25833, default='POINT(0 0)')
class Meta:
managed = False
db_table = 'fachdaten_adressbezug\".\"hospize_hro'
verbose_name = 'Hospiz'
verbose_name_plural = 'Hospize'
description = 'Hospize in der Hanse- und Universitätsstadt Rostock'
list_fields = {
'aktiv': 'aktiv?',
'adresse': 'Adresse',
'bezeichnung': 'Bezeichnung',
'traeger': 'Träger'
}
list_fields_with_foreign_key = {
'adresse': 'adresse',
'traeger': 'bezeichnung'
}
map_feature_tooltip_field = 'bezeichnung'
map_filter_fields = {
'bezeichnung': 'Bezeichnung',
'traeger': 'Träger'
}
map_filter_fields_as_list = ['traeger']
address_type = 'Adresse'
address_mandatory = True
geometry_type = 'Point'
def __str__(self):
return self.bezeichnung + ' [' + ('Adresse: ' + str(
self.adresse) + ', ' if self.adresse else '') + 'Träger: ' + str(self.traeger) + ']'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Hospize, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Hospize, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions, sender=Hospize)
signals.post_delete.connect(remove_permissions, sender=Hospize)
# Haltestellen des Haltestellenkatasters
class Haltestellenkataster_Haltestellen(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
aktiv = models.BooleanField(' aktiv?', default=True)
deaktiviert = models.DateField(
'Außerbetriebstellung', blank=True, null=True)
id = models.PositiveIntegerField('ID', default=sequence_id(
'fachdaten.haltestellenkataster_haltestellen_hro_id_seq'))
hst_bezeichnung = models.CharField(
'Haltestellenbezeichnung', max_length=255, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
hst_hafas_id = models.CharField(
'HAFAS-ID',
max_length=8,
blank=True,
null=True,
validators=[
RegexValidator(
regex=haltestellenkataster_haltestellen_hst_hafas_id_regex,
message=haltestellenkataster_haltestellen_hst_hafas_id_message)])
hst_bus_bahnsteigbezeichnung = models.CharField(
'Bus-/Bahnsteigbezeichnung',
max_length=255,
blank=True,
null=True,
validators=[
RegexValidator(
regex=akut_regex,
message=akut_message),
RegexValidator(
regex=anfuehrungszeichen_regex,
message=anfuehrungszeichen_message),
RegexValidator(
regex=apostroph_regex,
message=apostroph_message),
RegexValidator(
regex=doppelleerzeichen_regex,
message=doppelleerzeichen_message),
RegexValidator(
regex=gravis_regex,
message=gravis_message)])
hst_richtung = models.CharField(
'Richtungsinformation', max_length=255, blank=True, null=True, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
hst_kategorie = models.CharField(
'Haltestellenkategorie', max_length=255, blank=True, null=True, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
hst_linien = ChoiceArrayField(
models.CharField(
' bedienende Linie(n)',
max_length=4,
choices=()),
verbose_name=' bedienende Linie(n)',
blank=True,
null=True)
hst_rsag = models.BooleanField(
' bedient durch Rostocker Straßenbahn AG?',
blank=True,
null=True)
hst_rebus = models.BooleanField(
' bedient durch rebus Regionalbus Rostock GmbH?',
blank=True,
null=True)
hst_nur_ausstieg = models.BooleanField(
' nur Ausstieg?', blank=True, null=True)
hst_nur_einstieg = models.BooleanField(
' nur Einstieg?', blank=True, null=True)
hst_verkehrsmittelklassen = ChoiceArrayField(
models.CharField(
'Verkehrsmittelklasse(n)',
max_length=255,
choices=()),
verbose_name='Verkehrsmittelklasse(n)')
hst_abfahrten = PositiveSmallIntegerMinField(
' durchschnittliche tägliche Zahl an Abfahrten',
min_value=1,
blank=True,
null=True)
hst_fahrgastzahl_einstieg = PositiveSmallIntegerMinField(
' durchschnittliche tägliche Fahrgastzahl (Einstieg)',
min_value=1,
blank=True,
null=True)
hst_fahrgastzahl_ausstieg = PositiveSmallIntegerMinField(
' durchschnittliche tägliche Fahrgastzahl (Ausstieg)',
min_value=1,
blank=True,
null=True)
bau_typ = models.ForeignKey(
Typen_Haltestellen,
verbose_name='Typ',
on_delete=models.SET_NULL,
db_column='bau_typ',
to_field='uuid',
related_name='bau_typen+',
blank=True,
null=True)
bau_wartebereich_laenge = models.DecimalField(
'Länge des Wartebereichs (in m)',
max_digits=5,
decimal_places=2,
validators=[
MinValueValidator(
Decimal('0.01'),
'Der <strong><em>Wartebereich</em></strong> muss mindestens 0,01 m lang sein.'),
MaxValueValidator(
Decimal('999.99'),
'Der <strong><em>Wartebereich</em></strong> darf höchstens 999,99 m lang sein.')],
blank=True,
null=True)
bau_wartebereich_breite = models.DecimalField(
'Breite des Wartebereichs (in m)',
max_digits=5,
decimal_places=2,
validators=[
MinValueValidator(
Decimal('0.01'),
'Der <strong><em>Wartebereich</em></strong> muss mindestens 0,01 m breit sein.'),
MaxValueValidator(
Decimal('999.99'),
'Der <strong><em>Wartebereich</em></strong> darf höchstens 999,99 m breit sein.')],
blank=True,
null=True)
bau_befestigungsart_aufstellflaeche_bus = models.ForeignKey(
Befestigungsarten_Aufstellflaeche_Bus_Haltestellenkataster,
verbose_name='Befestigungsart der Aufstellfläche Bus',
on_delete=models.SET_NULL,
db_column='bau_befestigungsart_aufstellflaeche_bus',
to_field='uuid',
related_name='bau_befestigungsarten_aufstellflaeche_bus+',
blank=True,
null=True)
bau_zustand_aufstellflaeche_bus = models.ForeignKey(
Schaeden_Haltestellenkataster,
verbose_name='Zustand der Aufstellfläche Bus',
on_delete=models.SET_NULL,
db_column='bau_zustand_aufstellflaeche_bus',
to_field='uuid',
related_name='bau_zustaende_aufstellflaeche_bus+',
blank=True,
null=True)
bau_befestigungsart_warteflaeche = models.ForeignKey(
Befestigungsarten_Warteflaeche_Haltestellenkataster,
verbose_name='Befestigungsart der Wartefläche',
on_delete=models.SET_NULL,
db_column='bau_befestigungsart_warteflaeche',
to_field='uuid',
related_name='bau_befestigungsarten_warteflaeche+',
blank=True,
null=True)
bau_zustand_warteflaeche = models.ForeignKey(
Schaeden_Haltestellenkataster,
verbose_name='Zustand der Wartefläche',
on_delete=models.SET_NULL,
db_column='bau_zustand_warteflaeche',
to_field='uuid',
related_name='bau_zustaende_warteflaeche+',
blank=True,
null=True)
bf_einstieg = models.BooleanField(
' barrierefreier Einstieg vorhanden?', blank=True, null=True)
bf_zu_abgaenge = models.BooleanField(
' barrierefreie Zu- und Abgänge vorhanden?', blank=True, null=True)
bf_bewegungsraum = models.BooleanField(
' barrierefreier Bewegungsraum vorhanden?', blank=True, null=True)
tl_auffindestreifen = models.BooleanField(
'Taktiles Leitsystem: Auffindestreifen vorhanden?', blank=True, null=True)
tl_auffindestreifen_ausfuehrung = models.ForeignKey(
Ausfuehrungen_Haltestellenkataster,
verbose_name='Taktiles Leitsystem: Ausführung Auffindestreifen',
on_delete=models.SET_NULL,
db_column='tl_auffindestreifen_ausfuehrung',
to_field='uuid',
related_name='tl_auffindestreifen_ausfuehrungen+',
blank=True,
null=True)
tl_auffindestreifen_breite = PositiveIntegerMinField(
'Taktiles Leitsystem: Breite des Auffindestreifens (in cm)',
min_value=1,
blank=True,
null=True)
tl_einstiegsfeld = models.BooleanField(
'Taktiles Leitsystem: Einstiegsfeld vorhanden?', blank=True, null=True)
tl_einstiegsfeld_ausfuehrung = models.ForeignKey(
Ausfuehrungen_Haltestellenkataster,
verbose_name='Taktiles Leitsystem: Ausführung Einstiegsfeld',
on_delete=models.SET_NULL,
db_column='tl_einstiegsfeld_ausfuehrung',
to_field='uuid',
related_name='tl_einstiegsfeld_ausfuehrungen+',
blank=True,
null=True)
tl_einstiegsfeld_breite = PositiveIntegerMinField(
'Taktiles Leitsystem: Breite des Einstiegsfelds (in cm)',
min_value=1,
blank=True,
null=True)
tl_leitstreifen = models.BooleanField(
'Taktiles Leitsystem: Leitstreifen vorhanden?', blank=True, null=True)
tl_leitstreifen_ausfuehrung = models.ForeignKey(
Ausfuehrungen_Haltestellenkataster,
verbose_name='Taktiles Leitsystem: Ausführung Leitstreifen',
on_delete=models.SET_NULL,
db_column='tl_leitstreifen_ausfuehrung',
to_field='uuid',
related_name='tl_leitstreifen_ausfuehrungen+',
blank=True,
null=True)
tl_leitstreifen_laenge = PositiveIntegerMinField(
'Taktiles Leitsystem: Länge des Leitstreifens (in cm)',
min_value=1,
blank=True,
null=True)
tl_aufmerksamkeitsfeld = models.BooleanField(
'Aufmerksamkeitsfeld (1. Tür) vorhanden?', blank=True, null=True)
tl_bahnsteigkante_visuell = models.BooleanField(
'Bahnsteigkante visuell erkennbar?', blank=True, null=True)
tl_bahnsteigkante_taktil = models.BooleanField(
'Bahnsteigkante taktil erkennbar?', blank=True, null=True)
as_zh_typ = models.ForeignKey(
ZH_Typen_Haltestellenkataster,
verbose_name='ZH-Typ',
on_delete=models.SET_NULL,
db_column='as_zh_typ',
to_field='uuid',
related_name='as_zh_typen+',
blank=True,
null=True)
as_h_mast = models.BooleanField('Mast vorhanden?', blank=True, null=True)
as_h_masttyp = models.ForeignKey(
Masttypen_Haltestellenkataster,
verbose_name='Masttyp',
on_delete=models.SET_NULL,
db_column='as_h_masttyp',
to_field='uuid',
related_name='as_h_masttypen+',
blank=True,
null=True)
as_papierkorb = models.BooleanField(
'Papierkorb vorhanden?', blank=True, null=True)
as_fahrgastunterstand = models.BooleanField(
'Fahrgastunterstand vorhanden?', blank=True, null=True)
as_fahrgastunterstandstyp = models.ForeignKey(
Fahrgastunterstandstypen_Haltestellenkataster,
verbose_name='Typ des Fahrgastunterstand',
on_delete=models.SET_NULL,
db_column='as_fahrgastunterstandstyp',
to_field='uuid',
related_name='as_fahrgastunterstandstypen+',
blank=True,
null=True)
as_sitzbank_mit_armlehne = models.BooleanField(
'Sitzbank mit Armlehne vorhanden?', blank=True, null=True)
as_sitzbank_ohne_armlehne = models.BooleanField(
'Sitzbank ohne Armlehne vorhanden?', blank=True, null=True)
as_sitzbanktyp = models.ForeignKey(
Sitzbanktypen_Haltestellenkataster,
verbose_name='Typ der Sitzbank',
on_delete=models.SET_NULL,
db_column='as_sitzbanktyp',
to_field='uuid',
related_name='as_sitzbanktypen+',
blank=True,
null=True)
as_gelaender = models.BooleanField(
'Geländer vorhanden?', blank=True, null=True)
as_fahrplanvitrine = models.BooleanField(
'Fahrplanvitrine vorhanden?', blank=True, null=True)
as_fahrplanvitrinentyp = models.ForeignKey(
Fahrplanvitrinentypen_Haltestellenkataster,
verbose_name='Typ der Fahrplanvitrine',
on_delete=models.SET_NULL,
db_column='as_fahrplanvitrinentyp',
to_field='uuid',
related_name='as_fahrplanvitrinentypen+',
blank=True,
null=True)
as_tarifinformation = models.BooleanField(
'Tarifinformation vorhanden?', blank=True, null=True)
as_liniennetzplan = models.BooleanField(
'Liniennetzplan vorhanden?', blank=True, null=True)
as_fahrplan = models.BooleanField(
'Fahrplan vorhanden?', blank=True, null=True)
as_fahrausweisautomat = models.BooleanField(
'Fahrausweisautomat vorhanden?', blank=True, null=True)
as_lautsprecher = models.BooleanField(
'Lautsprecher vorhanden?', blank=True, null=True)
as_dfi = models.BooleanField(
'Dynamisches Fahrgastinformationssystem vorhanden?',
blank=True,
null=True)
as_dfi_typ = models.ForeignKey(
DFI_Typen_Haltestellenkataster,
verbose_name='Typ des Dynamischen Fahrgastinformationssystems',
on_delete=models.SET_NULL,
db_column='as_dfi_typ',
to_field='uuid',
related_name='as_dfi_typen+',
blank=True,
null=True)
as_anfragetaster = models.BooleanField(
'Anfragetaster vorhanden?', blank=True, null=True)
as_blindenschrift = models.BooleanField(
'Haltestellen-/Linieninformationen in Blindenschrift vorhanden?',
blank=True,
null=True)
as_beleuchtung = models.BooleanField(
'Beleuchtung vorhanden?', blank=True, null=True)
as_hinweis_warnblinklicht_ein = models.BooleanField(
'Hinweis „Warnblinklicht ein“ vorhanden?', blank=True, null=True)
bfe_park_and_ride = models.BooleanField(
'P+R-Parkplatz in Umgebung vorhanden?', blank=True, null=True)
bfe_fahrradabstellmoeglichkeit = models.BooleanField(
'Fahrradabstellmöglichkeit in Umgebung vorhanden?', blank=True, null=True)
bfe_querungshilfe = models.BooleanField(
'Querungshilfe in Umgebung vorhanden?', blank=True, null=True)
bfe_fussgaengerueberweg = models.BooleanField(
'Fußgängerüberweg in Umgebung vorhanden?', blank=True, null=True)
bfe_seniorenheim = models.BooleanField(
'Seniorenheim in Umgebung vorhanden?', blank=True, null=True)
bfe_pflegeeinrichtung = models.BooleanField(
'Pflegeeinrichtung in Umgebung vorhanden?', blank=True, null=True)
bfe_medizinische_versorgungseinrichtung = models.BooleanField(
'Medizinische Versorgungseinrichtung in Umgebung vorhanden?', blank=True, null=True)
bearbeiter = models.CharField(
'Bearbeiter', max_length=255, blank=True, null=True, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
bemerkungen = NullTextField(
'Bemerkungen', max_length=500, blank=True, null=True, validators=[
RegexValidator(
regex=akut_regex, message=akut_message), RegexValidator(
regex=anfuehrungszeichen_regex, message=anfuehrungszeichen_message), RegexValidator(
regex=apostroph_regex, message=apostroph_message), RegexValidator(
regex=doppelleerzeichen_regex, message=doppelleerzeichen_message), RegexValidator(
regex=gravis_regex, message=gravis_message)])
geometrie = models.PointField(
'Geometrie', srid=25833, default='POINT(0 0)')
class Meta:
managed = False
db_table = 'fachdaten\".\"haltestellenkataster_haltestellen_hro'
verbose_name = 'Haltestelle des Haltestellenkatasters'
verbose_name_plural = 'Haltestellen des Haltestellenkatasters'
description = 'Haltestellen des Haltestellenkatasters der Hanse- und Universitätsstadt Rostock'
choices_models_for_choices_fields = {
'hst_linien': 'Linien',
'hst_verkehrsmittelklassen': 'Verkehrsmittelklassen'
}
list_fields = {
'aktiv': 'aktiv?',
'deaktiviert': 'Außerbetriebstellung',
'id': 'ID',
'hst_bezeichnung': 'Haltestellenbezeichnung',
'hst_hafas_id': 'HAFAS-ID',
'hst_bus_bahnsteigbezeichnung': 'Bus-/Bahnsteigbezeichnung'
}
list_fields_with_number = ['id']
associated_models = {
'Haltestellenkataster_Fotos': 'haltestellenkataster_haltestelle'
}
readonly_fields = ['id']
map_feature_tooltip_field = 'hst_bezeichnung'
geometry_type = 'Point'
# wichtig, denn nur so werden Drop-down-Einträge in Formularen von
# Kindtabellen sortiert aufgelistet
ordering = ['id']
as_overlay = True
def __str__(self):
return self.hst_bezeichnung + ' [ID: ' + str(self.id) + (', HAFAS-ID: ' + self.hst_hafas_id if self.hst_hafas_id else '') + (
', Bus-/Bahnsteig: ' + self.hst_bus_bahnsteigbezeichnung if self.hst_bus_bahnsteigbezeichnung else '') + ']'
def save(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Haltestellenkataster_Haltestellen, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.current_authenticated_user = get_current_authenticated_user()
super(Haltestellenkataster_Haltestellen, self).delete(*args, **kwargs)
signals.post_save.connect(assign_permissions,
sender=Haltestellenkataster_Haltestellen)
signals.post_delete.connect(
remove_permissions,
sender=Haltestellenkataster_Haltestellen)
# Fotos des Haltestellenkatasters
class Haltestellenkataster_Fotos(models.Model):
uuid = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False)
aktiv = models.BooleanField(' aktiv?', default=True)
haltestellenkataster_haltestelle = models.ForeignKey(
Haltestellenkataster_Haltestellen,
verbose_name='Haltestelle',
on_delete=models.CASCADE,
db_column='haltestellenkataster_haltestelle',
to_field='uuid',
related_name='haltestellenkataster_haltestellen+')
motiv = models.ForeignKey(
Fotomotive_Haltestellenkataster,
verbose_name='Motiv',
on_delete=models.RESTRICT,
db_column='motiv',
to_field='uuid',
related_name='motive+')
aufnahmedatum = models.DateField('Aufnahmedatum', default=date.today)
dateiname_original = models.CharField(
'Original-Dateiname', max_length=255, default='ohne')
foto = models.ImageField(
'Foto',
storage=OverwriteStorage(),
upload_to=path_and_rename(
settings.PHOTO_PATH_PREFIX_PRIVATE +
'haltestellenkataster'),
max_length=255)
class Meta:
managed = | |
".join(["{:.2f}".format(f) for f in shareddata["timepoints"]])
default="Array size"
if "temperaturepoints" in shareddata:
TMP=shareddata["temperaturepoints"]
average,MINT,MAXT=sum(TMP)/len(TMP),min(TMP),max(TMP)
temppoints=", ".join(["{:.1f}".format(t) for t in TMP])
Twarning=""
else:
temppoints=MINT=MAXT="INCUBATOR NOT TURNED ON"
Twarning="N/A"
SO=time.asctime(time.localtime(shareddata["exp_datetime"]))
FO=time.asctime(time.localtime(shareddata["finishedtime"]))
MINM=shareddata["minimummeasure"]
MAXM=shareddata["maximummeasure"]
def check_prp():
if filename.startswith(shareddata["platereaderprogram"]):
return ""
return "NOT IN FILENAME!?"
def check_mintemp(mintemp):
if type(mintemp)==str:
return "N/A"
if not average-0.5<mintemp<average+0.5:
return "TOO LOW!"
return ""
def check_maxtemp(maxtemp):
if type(maxtemp)==str:
return "N/A"
if not average-0.5<maxtemp<average+0.5:
return "TOO HIGH!"
return ""
def check_minmeasure(minmeasure):
if minmeasure<0.001:
return "TOO SMALL!"
return ""
def check_maxmeasure(maxmeasure):
if maxmeasure>3.5:
return "TOO HIGH!"
return ""
LST=[("Platereader program","",shareddata["platereaderprogram"]),
("Array size","",shareddata["n_curves"]),
("Started on","",SO),
("Finished on","",FO),
("Total runtime (hrs)","","{:.2f}".format(shareddata["runtime_hours"])),
("Number of measurements","",shareddata["n_measures"]),
("Timepoints","",timepoints),
("","",""),
("Temperature readings",Twarning,temppoints),
("Minimum temperature",check_mintemp(MINT),MINT),
("Maximum temperature",check_maxtemp(MAXT),MAXT),
("","",""),
("Lowest reading",check_minmeasure(MINM),MINM),
("Highest reading",check_maxmeasure(MINM),MAXM)]
C1,C2,C3=zip(*LST)
nonemptywarningindexes=[i for i,n in enumerate(C2) if n]
if nonemptywarningindexes: defaultindex=nonemptywarningindexes[0]
else: defaultindex=2
default=C1[defaultindex]
root=tk.Tk()
LB1b=MultiColumnListbox(root,
title="AFVCF",
instruct=("Check file {}{}"
"Hit OK/<Enter> to proceed with this "
"file, or <Escape> to cancel and "
"choose another file."
.format(filename,os.linesep)),
buttontext="OK",
headers=["Check","WARNINGS","Value"],
lists=LST,
default=default)
root.focus_force()
root.geometry(windowposition)
root.mainloop() #waits for selection/cancel
if LB1b.values[0]:
MAINDICT["shareddata"]=shareddata
MAINDICT.update(shareddata)
MAINDICT["rowdata"]=rowdata
return MAINDICT
else:
return False
def count_files_in(folder,dig=False,include=[".csv",".DAT"]):
dm=DirectoryMonitor(folder,dig=dig,include=include,report=False)
return len(dm)
def output_to_txt(MAINDICT,
extension="tab",
delimiter="\t",
spacer="\t",
ask=False,
replace=False,
**kwargs):
headers=["well","isborder","minimum","maximum","measurements:"]
sourcefilename=MAINDICT["originalfilename"]
shareddata=MAINDICT["shareddata"]
rowdata=MAINDICT["rowdata"]
filepath=os.path.join(platereader_output,sourcefilename)
if shareddata is None or rowdata is None:
shareddata,rowdata=read_data_file(filepath)
headers+=["{:.2f}".format(t) for t in shareddata["timepoints"]]
targetfilename=os.path.splitext(sourcefilename)[0]+"."+extension
targetfilefolder=os.path.join(platereader_output,"ConvertedFiles")
prepare_path(targetfilefolder)
#OPEN THIS TARGETFILEPATH FOLDER AT THE END
targetfilepath=os.path.join(targetfilefolder,targetfilename)
#get plate
plt=Plates()[str(shareddata["n_curves"])]
if os.path.exists(targetfilepath):
if ask:
answer=raw_input("{} already exists. Overwrite it?"
.format(targetfilepath))
if not answer.lower().startswith("y"):
return
elif not replace:
LOG.info("{} already exists".format(targetfilepath))
open_on_Windows(targetfilefolder)
return
with open(targetfilepath,"wb") as fileob:
writer=csv.writer(fileob,
delimiter=delimiter,
quoting=csv.QUOTE_MINIMAL)
writer.writerow(headers)
for i,(row,well) in enumerate(zip(rowdata,plt.yield_records())):
measures=row["measurements"]
if spacer==delimiter:
measurestring=list(measures)
else:
measurestring=[spacer.join([str(v)
for v in row["measurements"]])]
rowout=[str(well["wellid"].value),
str(well["isborder"].value),
str(min(measures)),
str(max(measures)),
""]+measurestring
writer.writerow(rowout)
fileob.close()
LOG.info("{} created".format(targetfilepath))
open_on_Windows(targetfilefolder)
#open_on_Windows(targetfilepath)
return targetfilepath
def choose_user_initials(MAINDICT):
userfolder=MAINDICT["userfolder"]
ALLINI=FALL.get_values_of_atom("user")
F=Files(userfolder)
if len(F)==0:
USERINITIALS="*new*"
INI=[]
else:
INI=F.get_values_of_atom("user")
LST=list(INI.items())+[("*new*","")]
DEF=F[-1]["user"].value
root=tk.Tk()
TIT="PHENOS"
LB3=MultiColumnListbox(root,
title=TIT,
instruct=("Select initials.{}"
"Or *new* to enter new initials."
.format(os.linesep)),
headers=["User initials","Number of files"],
lists=LST,
default=DEF)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
USERINITIALS=LB3.values[0]
if USERINITIALS=="*new*":
USERINITIALS=None
instruction="Enter new user initials (<=5 letters)"
while not USERINITIALS:
root=tk.Tk()
EB2=EntryBox(root,title="AFVCF",instruct=instruction)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
USERINITIALS=EB2.value.strip()
if not 1<=len(USERINITIALS)<=5:
instruction=("{} not OK. Must be 1-5 letters long. Choose again."
.format(USERINITIALS))
LOG.error(instruction)
USERINITIALS=None
elif USERINITIALS in INI:
instruction=("{} already in {}. Choose again"
.format(USERINITIALS,LOCS.currentdbase))
LOG.error(instruction)
USERINITIALS=None
elif USERINITIALS in ALLINI:
instruction=("{} already in use in another folder ({} files). "
"Choose again.".format(USERINITIALS,ALLINI[USERINITIALS]))
LOG.error(instruction)
USERINITIALS=None
else:
chars=set(USERINITIALS.lower())
ok=set("abcdefghijklmnopqrstuvwxyz")
notok=chars-ok
if notok:
notokstring=", ".join(list(notok))
instruction=("The following characters are not valid letters: "
"{} Choose again.".format(notokstring))
LOG.error(instruction)
USERINITIALS=None
if USERINITIALS:
MAINDICT["userinitials"]=USERINITIALS
return MAINDICT
def choose_experiment_number(MAINDICT):
userfolder=MAINDICT["userfolder"]
userinitials=MAINDICT["userinitials"]
FLST=Files(userfolder).get(user=userinitials)
if not FLST:
EXPNUMBER=1
LST=[("*new* (1)",""),
("*new* (other)","")]
INI=[]
DEF=LST[0][0]
else:
if type(FLST)!=list:
FLST=[FLST]
INI=defaultdict(list)
for FL in FLST:
previousexpnum=FL["experimentnumber"].value
previousfilelet=FL["fileletter"].value
INI[previousexpnum].append(previousfilelet)
LST=sorted([(k,"".join(sorted(v))) for k,v in INI.items()],
reverse=True)
EXPNUMBER=LST[0][0]+1
DEF=LST[0][0]
LST=[("*new* ({})".format(EXPNUMBER),""),
("*new* (other)",""),]+LST
root=tk.Tk()
TIT="PHENOS: '{}'".format(build_filetitle(**locals().copy()))
LB4=MultiColumnListbox(root,
title=TIT,
instruct=("Select experiment number.{}"
.format(os.linesep)),
headers=["Experiment number","Existing file letters"],
lists=LST,
default=DEF)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
EXPNUMBER=LB4.values[0]
if EXPNUMBER=="*new* (other)":
EXPNUMBER=None
instruction="Enter new experiment number (0-255)"
while not EXPNUMBER:
root=tk.Tk()
EB3=EntryBox(root,title="AFVCF",instruct=instruction)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
EXPNUMBER=EB3.value.strip()
if EXPNUMBER is None:
return None
try:
EXPNUMBER=int(EXPNUMBER)
except:
instruction=("{} not a number. Choose again.")
LOG.error(instruction)
EXPNUMBER=None
if not 0<=EXPNUMBER<=255:
instruction=("{} not OK. Must be 0-255. Choose again."
.format(EXPNUMBER))
LOG.error(instruction)
EXPNUMBER=None
elif EXPNUMBER in INI:
instruction=("{} already in {}. Choose again"
.format(EXPNUMBER,LOCS.currentdbase))
LOG.error(instruction)
EXPNUMBER=None
if type(EXPNUMBER)==unicode:
if EXPNUMBER.startswith("*new* "):
EXPNUMBER=int(EXPNUMBER[7:-1])
if EXPNUMBER:
MAINDICT["experimentnumber"]=EXPNUMBER
return MAINDICT
def choose_file_letter(MAINDICT):
ok="abcdefghijklmnopqrstuvwxyz"
userfolder=MAINDICT["userfolder"]
userinitials=MAINDICT["userinitials"]
experimentnumber=MAINDICT["experimentnumber"]
FLST=Files(userfolder).get(user=userinitials,
experimentnumber=experimentnumber)
if not FLST:
FILELETTER="a"
LST=[("*new* (a)",""),
("*new* (other)","")]
INI={}
else:
previousfiles={}
INI={}
if type(FLST)!=list:
FLST=[FLST]
for FL in FLST:
INI[FL["fileletter"].value]=FL["filepath"].value
previousfiles[FL["fileletter"].value]=FL
LST=sorted(INI.items(),reverse=True)
MAINDICT["previousfiles"]=[previousfiles[l] for l,fn in LST]
FILELETTER=chr(ord(LST[0][0])+1)
if FILELETTER not in ok:
LOG.error("fileletter {}' not valid"
.format(FILELETTER))
return
LST=[("*new* ({})".format(FILELETTER),""),
("*new* (other)",""),]+LST
DEF=LST[0][0]
root=tk.Tk()
TIT="PHENOS: '{}'".format(build_filetitle(**locals().copy()))
LB4=MultiColumnListbox(root,
title=TIT,
instruct=("Select file letter.{}"
.format(os.linesep)),
headers=["File letter","Used in"],
lists=LST,
default=DEF,
notselectable=INI.keys())
root.focus_force()
root.geometry(windowposition)
root.mainloop()
FILELETTER=LB4.values[0]
if FILELETTER=="*new* (other)":
FILELETTER=None
instruction="Enter new file letter (a-z)"
while not FILELETTER:
root=tk.Tk()
EB3=EntryBox(root,title="AFVCF",instruct=instruction)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
FILELETTER=EB3.value.strip()
if FILELETTER is None:
return None
try:
FILELETTER=FILELETTER.lower()
except:
instruction=("{} not a letter. Choose again.")
LOG.error(instruction)
FILELETTER=None
if FILELETTER not in ok:
instruction=("{} not OK. Must be a-z. Choose again."
.format(FILELETTER))
LOG.error(instruction)
FILELETTER=None
elif FILELETTER in INI:
instruction=("{} already in experiment {}{}. Choose again"
.format(FILELETTER,
userinitials,
experimentnumber))
LOG.error(instruction)
FILELETTER=None
if FILELETTER:
if FILELETTER.startswith("*new* "):
FILELETTER=FILELETTER[7:-1]
MAINDICT["fileletter"]=FILELETTER
return MAINDICT
def choose_treatment(MAINDICT):
userfolder=MAINDICT["userfolder"]
userinitials=MAINDICT["userinitials"]
experimentnumber=MAINDICT["experimentnumber"]
fileletter=MAINDICT["fileletter"]
FAd=FALL.get_values_of_atom("treatment")
for k,v in Files().get_values_of_atom("treatment").items():
if k in FAd:
FAd[k]+=v
else:
FAd[k]=v
LST=sorted(FAd.items())
#
DEF="YPD"
if fileletter!="a":
FLST=Files(userfolder).get(user=userinitials,
experimentnumber=experimentnumber)
if FLST:
if type(FLST)!=list:
FLST=[FLST]
DEF=FLST[0]["treatment"].value
if DEF=="YPD":
DEF="YPD (control)"
#
#Shunt YPD to top
LST2=[("YPD (control)",FAd.get("YPD",0))]
LST2+=[(a,b) for a,b in LST if a!="YPD"]
LST2+=[("*new*","")]
root=tk.Tk()
TIT="PHENOS: '{}'".format(build_filetitle(**locals().copy()))
LB5=MultiColumnListbox(root,
title=TIT,
instruct=("Select treatment.{}"
.format(os.linesep)),
headers=["Treatment","Number of files (including in All) with treatment"],
lists=LST2,
default=DEF)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
TREATMENT=LB5.values[0]
if TREATMENT=="*new*":
TREATMENT=None
instruction="Enter new treatment name"
while not TREATMENT:
root=tk.Tk()
EB4=EntryBox(root,title="AFVCF",instruct=instruction)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
TREATMENT=EB4.value.strip()
if len(TREATMENT)>40:
instruction="{} is too long. Choose again (<=40 characters).".format(TREATMENT)
LOG.error(instruction)
TREATMENT=None
if TREATMENT=="YPD (control)":
TREATMENT="YPD"
if TREATMENT:
MAINDICT["treatment"]=TREATMENT
return MAINDICT
#
def categorize_alleles(ALOCS):
ALOCS=choose_gff_file(ALOCS)
if not ALOCS: return
ALOCS=choose_vcf_file(ALOCS)
if not ALOCS: return
vcfFNB=os.path.splitext(os.path.basename(ALOCS["vcf_filepath"]))[0]
rootdir=ALOCS["outputdir"]
if not rootdir:
rootdir=scriptdir()
GR=ALOCS["gffreader"]
outputFP=os.path.join(rootdir,"AFVCF allele details {}.txt".format(vcfFNB))
GR.save_allele_details(filepath=outputFP)
outputFP=os.path.join(rootdir,"AFVCF dna allele per strain {}.tab".format(vcfFNB))
GR.save_dna_allele_strains(filepath=outputFP)
outputFP=os.path.join(rootdir,"AFVCF protein allele per strain {}.tab".format(vcfFNB))
GR.save_protein_allele_strains(filepath=outputFP)
open_on_Windows(rootdir)
return ALOCS
def choose_gff_file(ALOCS):
tit="Locate reference genome .gff file"
while not ALOCS.get("gffreader",None):
FILEPATH=ALOCS["gff_filepath"]
FILEDIR=ALOCS["gff_filedir"]
if not FILEDIR:
FILEDIR=ALOCS["scriptdir"]
root=tk.Tk()
root.geometry(windowposition)
root.withdraw()
filepath=tkFileDialog.askopenfilename(title=tit,
filetypes=[("GFF","*.gff")],
initialdir=FILEDIR,
initialfile=FILEPATH)
root.destroy()
if not filepath:
return
elif not os.path.exists(filepath):
continue
else:
#open file and check it looks right
try:
GR=GFF_reader(filepath)
GR.make_intergenic_features()
ALOCS["gff_filepath"]=filepath
ALOCS["gffreader"]=GR
ALOCS["chrnames"]=GR.chrnames
return ALOCS
except:
tit="Invalid .gff file. Choose again"
continue
def check_vcf_file(vcffilepath):
strainnames=[]
with open(vcffilepath,'rb') as vcfFO:
vcf_reader=vcf.Reader(vcfFO)
for VR in vcf_reader:
chrname=VR.CHROM
for call in VR.samples:
strainnames.append(call.sample)
break
return chrname,sorted(strainnames)
def choose_vcf_file(ALOCS):
tit="Locate .vcf file"
while not ALOCS.get("vcffilepath",None):
FILEDIR=ALOCS["vcf_filedir"]
if not FILEDIR:
FILEDIR=["gff_filedir"]
FILEPATH=ALOCS["vcf_filepath"]
root=tk.Tk()
root.geometry(windowposition)
root.withdraw()
filepath=tkFileDialog.askopenfilename(title=tit,
filetypes=[("VCF","*.vcf")],
initialdir=FILEDIR,
initialfile=FILEPATH)
root.destroy()
if not filepath:
return
elif not os.path.exists(filepath):
continue
else:
#open file and check it looks right
#WIP
try:
chrname,strainnames=check_vcf_file(filepath)
except:
tit="Invalid .vcf file. Choose again"
continue
ALOCS["vcf_filepath"]=filepath
ALOCS["vcf_filedir"]=os.path.dirname(filepath)
ALOCS["strainnames"]=strainnames
if chrname not in ALOCS["chrnames"]:
ALOCS=choose_chralias_file(ALOCS)
ALOCS=choose_strainalias_file(ALOCS)
if not ALOCS: return
ALOCS["gffreader"].add_vcfs(filepath)
return ALOCS
def choose_chralias_file(ALOCS):
tit="Locate chromosome alias file"
while True:
FILEDIR=ALOCS["chralias_filedir"]
if not FILEDIR:
FILEDIR=ALOCS["vcf_filedir"]
FILEPATH=ALOCS["chralias_filepath"]
root=tk.Tk()
root.geometry(windowposition)
root.withdraw()
filepath=tkFileDialog.askopenfilename(initialdir=FILEDIR,
title=tit,
filetypes=[("TAB DELIMITED","*.tab")],
initialfile=FILEPATH)
root.destroy()
if not filepath:
return
try:
GR=ALOCS["gffreader"]
GR.make_chraliasdict(filepath)
ALOCS["chralias_filepath"]=filepath
ALOCS["chralias_filedir"]=os.path.dirname(filepath)
return ALOCS
except:
tit="Not a valid chromosome alias file. "+tit
continue
def choose_strainalias_file(ALOCS):
tit="Locate strain alias file"
while True:
FILEDIR=ALOCS["stralias_filedir"]
if not FILEDIR:
FILEDIR=ALOCS["chralias_filedir"]
FILEPATH=ALOCS["stralias_filepath"]
root=tk.Tk()
root.geometry(windowposition)
root.withdraw()
filepath=tkFileDialog.askopenfilename(initialdir=FILEDIR,
title=tit,
filetypes=[("TAB DELIMITED","*.tab")],
initialfile=FILEPATH)
root.destroy()
if not filepath:
return
print filepath
try:
GR=ALOCS["gffreader"]
GR.make_strainaliasdict(filepath)
ALOCS["stralias_filepath"]=filepath
ALOCS["stralias_filedir"]=os.path.dirname(filepath)
return ALOCS
except:
tit="Not a valid strain alias file. "+tit
continue
#
def visualize_alleles(ALOCS):
if not ALOCS: return
ALOCS=choose_userfolder(ALOCS)
if not ALOCS: return
ALOCS=choose_combifiles(ALOCS)
if not ALOCS: return
ALOCS=choose_allele_files(ALOCS)
if not ALOCS: return
ALOCS=choose_regions(ALOCS)
if not ALOCS: return
CFs=ALOCS["combifileobjects"]
RNs=ALOCS["selectedregions"]
DCD={}
PNs=ALOCS["plotnames"]=[]
DASSD=ALOCS["dnaallelestrainsshareddata"]
DCDFR=ALOCS["dnaallelestrainsrowdictionary"]
outputdir=ALOCS["outputdir"]
if not outputdir:
outputdir=ALOCS["locations"]["genotypes"]
ALOCS["outputdir"]=outputdir
CPD=CurvesWithoutAgar_Alleles2(CFs,
RNs,
DCDFR,
savedir=outputdir)
ALOCS["savepaths"]=CPD.savepaths
PCDFR=ALOCS["proteinallelestrainsrowdictionary"]
CPP=CurvesWithoutAgar_Alleles2(CFs,
RNs,
PCDFR,
dnaorprotein="protein",
savedir=outputdir)
ALOCS["savepaths"]+=CPP.savepaths
open_on_Windows(os.path.dirname(CPP.savepaths[0]))
return ALOCS
def choose_userfolder(ALOCS,
IGNORE=["All","Controls"]):
LST,LSTED=[],[]
LOCS=ALOCS["locations"]
for p in LOCS.yield_userpaths():
fp=os.path.basename(p)
fpc=count_files_in(p)
if fp not in IGNORE:
LST.append((fp,fpc))
LSTED.append(fp)
LST.sort()
DEF=LOCS.currentuserfolder
if DEF in IGNORE or DEF not in LSTED:
DEF=NF[0]
root=tk.Tk()
LB2=MultiColumnListbox(root,
title="AFVCF",
instruct=("Select user folder.{}"
"Or ESCAPE/<close> to save as tab "
"file without further analysis"
.format(os.linesep)),
headers=["User folder",
"Number of files in folder"],
lists=LST,
default=DEF)
root.focus_force()
root.geometry(windowposition)
root.mainloop()
USERFOLDER=LB2.values[0]
LOG.info("user selected folder {}".format(USERFOLDER))
if USERFOLDER:
LOCS.change(USERFOLDER,create=True)
LOG.info("active folder set to {}".format(USERFOLDER))
ALOCS["userfolder"]=USERFOLDER
return ALOCS
def choose_combifiles(ALOCS):
CF=CombiFiles(ALOCS["userfolder"])
LST=[cf for cf in CF]
LST.sort(key=lambda cf:getattr(cf,"timestamp",0),reverse=True)
def timeconvert(tv):
if tv:
try:
return time.asctime(time.localtime(tv))
except:
pass
return ""
LST2=[(cf.value,
cf["treatment"].value,
cf["platelayout"].value,
cf.is_control(),
timeconvert(getattr(cf,"timestamp","")))
for cf in LST]
if not LST2:
LOG.error("No new combifiles to create in {}"
.format(LOCS.get_userpath()))
headers=["Files","Treatment","Layout","Is control?",
"Timestamp of first"]
root=tk.Tk()
TIT="AFVCF: '{}'".format(ALOCS["userfolder"])
instruction=("Select combined file(s) in user folder {}\n"
"to visualize alleles for,\n"
"or <Insert> to open plots folder,\n"
"or <Escape> to quit.\n\n".format(ALOCS["userfolder"]))
try:
DEF=LST2[0][0]
except:
DEF=None
LB7=MultiColumnListbox(root,
title=TIT,
instruct=instruction,
headers=headers,
lists=LST2,
default=DEF,
selectmode="extended")
root.focus_force()
root.geometry(windowposition)
root.mainloop()
PICK=LB7.values
if type(PICK)!=list:
PICK=[PICK]
combifileobs=[]
for P in PICK:
if P is None or P==' ':
| |
str(self)
return float(self).__format__(s)
def to_wei(self):
return self._wei
def decimals(self):
return self._decimals
class TokenProxy:
"""
A proxy for an ERC20 token. Monitors events, processes them when update()
is called, and fulfils balance requests from memory.
"""
def __init__(self, contract):
"""
Set up a proxy around a Web3py contract object that implements ERC20.
"""
self.__contract = contract
self.__transfer_filter = self.__contract.events.Transfer.createFilter(fromBlock='latest')
# This maps from string address to Balance balance
self.__balances = {}
# This records who we approved for who
self.__approved_file = "{}-{}.json".format(str(contract.address), 'approvals')
if not os.path.exists(self.__approved_file):
f = open(self.__approved_file, 'w+')
f.write('{}')
f.close()
tmp_file_data = {}
else:
data = open(self.__approved_file, 'r+').read()
tmp_file_data = {} if len(data) == 0 else json.loads(data)
self.__approved = tmp_file_data
# Load initial parameters from the chain.
# Assumes no events are happening to change the supply while we are doing this.
self.__decimals = self.__contract.functions.decimals().call()
self.__symbol = self.__contract.functions.symbol().call()
self.__supply = Balance(self.__contract.functions.totalSupply().call(), self.__decimals)
# Expose some properties to make us easy to use in place of the contract
@property
def decimals(self):
return self.__decimals
@property
def symbol(self):
return self.__symbol
@property
def totalSupply(self):
return self.__supply
@property
def address(self):
return self.__contract.address
@property
def contract(self):
return self.__contract
def update(self, is_init_agents=[]):
"""
Process pending events and update state to match chain.
Assumes no transactions are still in flight.
"""
# These addresses need to be polled because we have no balance from
# before all these events.
new_addresses = set()
for transfer in self.__transfer_filter.get_new_entries():
# For every transfer event since we last updated...
# Each loooks something like:
# AttributeDict({'args': AttributeDict({'from': '0x0000000000000000000000000000000000000000',
# 'to': '0x20042A784Bf0743fcD81136422e12297f52959a0', 'value': 19060347313}),
# 'event': 'Transfer', 'logIndex': 0, 'transactionIndex': 0,
# 'transactionHash': HexBytes('0xa6f4ca515b28301b224f24b7ee14b8911d783e2bf965dbcda5784b4296c84c23'),
# 'address': '0xa2Ff73731Ee46aBb6766087CE33216aee5a30d5e',
# 'blockHash': HexBytes('0xb5ffd135318581fcd5cd2463cf3eef8aaf238bef545e460c284ad6283928ed08'),
# 'blockNumber': 17})
args = transfer['args']
moved = Balance(args['value'], self.__decimals)
if args['from'] in self.__balances:
self.__balances[args['from']] -= moved
elif args['from'] == ZERO_ADDRESS:
# This is a mint
self.__supply += moved
else:
new_addresses.add(args['from'])
if args['to'] in self.__balances:
self.__balances[args['to']] += moved
elif args['to'] == ZERO_ADDRESS:
# This is a burn
self.__supply -= moved
else:
new_addresses.add(args['to'])
for address in new_addresses:
# TODO: can we get a return value and a correct-as-of block in the same call?
self.__balances[address] = Balance(self.__contract.caller({'from' : address, 'gas': 100000}).balanceOf(address), self.__decimals)
if is_init_agents:
for agent in is_init_agents:
# TODO: can we get a return value and a correct-as-of block in the same call?
self.__balances[agent.address] = Balance(self.__contract.caller({'from' : agent.address, 'gas': 100000}).balanceOf(agent.address), self.__decimals)
def __getitem__(self, address):
"""
Get the balance of the given address as a Balance, with the given number of decimals.
Address can be a string or any object with an .address field.
"""
address = getattr(address, 'address', address)
if address not in self.__balances:
# Don't actually cache here; wait for a transfer.
# Transactions may still be in flight
return Balance(self.__contract.caller({'from' : address, 'gas': 100000}).balanceOf(address), self.__decimals)
else:
# Clone the stored balance so it doesn't get modified and upset the user
return self.__balances[address].clone()
def ensure_approved(self, owner, spender):
"""
Approve the given spender to spend all the owner's tokens on their behalf.
Owner and spender may be addresses or things with addresses.
"""
spender = getattr(spender, 'address', spender)
if (getattr(owner, 'address', owner) not in self.__approved) or (spender not in self.__approved[getattr(owner, 'address', owner)]):
# Issue an approval
#logger.info('WAITING FOR APPROVAL {} for {}'.format(getattr(owner, 'address', owner), spender))
tx_hash = transaction_helper(
owner,
self.__contract.functions.approve(spender, UINT256_MAX),
500000
)
providerAvax.make_request("avax.issueBlock", {})
receipt = w3.eth.waitForTransactionReceipt(tx_hash, poll_latency=tx_pool_latency)
#logger.info('APPROVED')
if getattr(owner, 'address', owner) not in self.__approved:
self.__approved[getattr(owner, 'address', owner)] = {spender: 1}
else:
self.__approved[getattr(owner, 'address', owner)][spender] = 1
open(self.__approved_file, 'w+').write(json.dumps(self.__approved))
def from_wei(self, wei):
"""
Convert a number of wei (possibly a float) into a Balance with the
right number of decimals.
"""
return Balance(wei, self.__decimals)
def from_tokens(self, tokens):
"""
Convert a number of token units (possibly a float) into a Balance with
the right number of decimals.
"""
return Balance.from_tokens(tokens, self.__decimals)
class Agent:
"""
Represents an agent. Tracks all the agent's balances.
"""
def __init__(self, dao, pangolin_pair, xsd_token, usdt_token, **kwargs):
# xSD TokenProxy
self.xsd_token = xsd_token
# USDT TokenProxy
self.usdt_token = usdt_token
# xSDS (Dao share) balance
self.xsds = Balance(0, xSDS["decimals"])
# avax balance
self.avax = kwargs.get("starting_avax", Balance(0, 18))
# Coupon underlying part by expiration epoch
self.underlying_coupons = collections.defaultdict(float)
# Coupon premium part by expiration epoch
self.premium_coupons = collections.defaultdict(float)
# What's our max faith in the system in USDT?
self.max_faith = kwargs.get("max_faith", 0.0)
# And our min faith
self.min_faith = kwargs.get("min_faith", 0.0)
# Should we even use faith?
self.use_faith = kwargs.get("use_faith", True)
# add wallet addr
self.address = kwargs.get("wallet_address", '0x0000000000000000000000000000000000000000')
#coupon expirys
self.coupon_expirys = []
# how many times coupons have been redeemmed
self.redeem_count = 0
self.dao = dao
# current coupon assigned index of epoch
self.max_coupon_epoch_index = 0
# Pangolin Pair TokenProxy
self.pangolin_pair_token = pangolin_pair
# keeps track of latest block seen for nonce tracking/tx
self.seen_block = {}
self.next_tx_count = w3.eth.getTransactionCount(self.address, block_identifier=int(w3.eth.get_block('latest')["number"]))
self.current_block = 0
if True:#kwargs.get("is_mint", False):
# need to mint USDT to the wallets for each agent
start_usdt_formatted = kwargs.get("starting_usdt", Balance(0, USDT["decimals"]))
providerAvax.make_request("avax.issueBlock", {})
tx_hash = transaction_helper(
self,
self.usdt_token.contract.functions.mint(
self.address, start_usdt_formatted.to_wei()
),
500000
)
time.sleep(1.1)
providerAvax.make_request("avax.issueBlock", {})
w3.eth.waitForTransactionReceipt(tx_hash, poll_latency=tx_pool_latency)
@property
def xsd(self):
"""
Get the current balance in USDT from the TokenProxy.
"""
return self.xsd_token[self]
@property
def usdt(self):
"""
Get the current balance in USDT from the TokenProxy.
"""
return self.usdt_token[self]
@property
def lp(self):
"""
Get the current balance in Pangolin LP Shares from the TokenProxy.
"""
return self.pangolin_pair_token[self]
@property
def coupons(self):
"""
Get the current balance in of coupons for agent
"""
return self.dao.total_coupons_for_agent(self)
def __str__(self):
"""
Turn into a readable string summary.
"""
return "Agent(xSD={:.2f}, usdt={:.2f}, avax={}, lp={}, coupons={:.2f})".format(
self.xsd, self.usdt, self.avax, self.lp, self.coupons)
def get_strategy(self, current_timestamp, price, total_supply, total_coupons, agent_coupons):
"""
Get weights, as a dict from action to float, as a function of the price.
"""
strategy = collections.defaultdict(lambda: 1.0)
# TODO: real (learned? adversarial? GA?) model of the agents
# TODO: agent preferences/utility function
# People are fast to coupon bid to get in front of redemption queue
strategy["coupon_bid"] = 2.0
strategy["provide_liquidity"] = 0.1
if price >= 1.0:
# No rewards for expansion by itself
strategy["bond"] = 0
# And not unbond
strategy["unbond"] = 0
# Or redeem if possible
# strategy["redeem"] = 10000000000000.0 if self.coupons > 0 else 0
# incetive to buy above 1 is for more coupons
strategy["buy"] = 1.0
strategy["sell"] = 1.0
# less incentive to remove liquidity above 1
strategy["remove_liquidity"] = 0.1
else:
# We probably want to unbond due to no returns
strategy["unbond"] = 0
# And not bond
strategy["bond"] = 0
# likely to remove liquidity below peg to reduce IL?
strategy["remove_liquidity"] = 4.0 if agent_coupons > 0 else 1.0
if self.use_faith:
# Vary our strategy based on how much xSD we think ought to exist
if price * total_supply > self.get_faith(current_timestamp, price, total_supply):
# There is too much xSD, so we want to sell
strategy["sell"] = 2.0
else:
# no faith based buying, just selling
pass
return strategy
def get_faith(self, current_timestamp, price, total_supply):
"""
Get the total faith in xSD that this agent has, in USDT.
If the market cap is over the faith, the agent thinks the system is
over-valued. If the market cap is under the faith, the agent thinks the
system is under-valued.
"""
# TODO: model the real economy as bidding on utility in
# mutually-beneficial exchanges conducted in xSD, for which a velocity
# is needed, instead of an abstract faith?
# TODO: different faith for different people
center_faith = (self.max_faith + self.min_faith) / 2
swing_faith = (self.max_faith - self.min_faith) / 2
faith = center_faith + swing_faith * math.sin(current_timestamp * (2 * math.pi / 5000000))
return faith
class | |
<filename>tests/functional/Hydro/Riemann/RiemannSolution.py
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# RiemannSolution
#
# Adapted from code I got from <NAME>, which in turn was based on code from
# Toro as described in the following comments.
#
# Exact Riemann solver for the Euler equations in one dimension
# Translated from the Fortran code er1pex.f and er1pex.ini
# by Dr. <NAME> downloaded from
# http://www.numeritek.com/numerica_software.html#freesample
#-------------------------------------------------------------------------------
from math import *
import numpy as np
import argparse
# Several standard tests listed as (x0, x1, xdiaph, gamma_gas, out_time, dl, vl, pl, dr, vr, pr)
# The bounds are chosen so that a good comparison can be made in the range x \in [0,1]
Riemann_packaged_problems = {
"sod" : ( 0.0, 1.0, 0.5, 1.4, 0.20, 1.0, 0.0, 1.0, 0.125, 0.0, 0.1), # TEST 1 (Modified Sod)
"123" : ( 0.0, 1.0, 0.5, 1.4, 0.15, 1.0, -2.0, 0.4, 1.0, 2.0, 0.4), # TEST 2 (123 problem)
"leftwc" : ( 0.0, 1.0, 0.5, 1.4, 0.012, 1.0, 0.0, 1000.0, 1.0, 0.0, 0.01), # TEST 3 (Left Woodward & Colella)
"2shock_collision" : (-1.0, 2.0, 0.4, 1.4, 0.035, 5.99924, 19.5975, 460.894, 5.99242, -6.19633, 46.0950), # TEST 4 (Collision of 2 shocks)
"stationary_contact" : (-0.5, 1.5, 0.8, 1.4, 0.012, 1.0, -19.59745, 1000.0, 1.0, -19.59745, 0.01), # TEST 5 (Stationary contact)
"slow_shock" : (-2.0, 8.0, 0.5, 1.4, 1.0, 3.857143, -0.810631, 10.33333, 1.0, -3.44, 1.0), # TEST 6 (Slow shock)
"shock_contact_shock" : (-1.0, 2.0, 0.5, 1.4, 0.3, 1.0, 0.5, 1.0, 1.25, -0.5, 1.0), # TEST 7 (Shock-Contact-Shock)
"leblanc" : ( 0.0, 1.0, 0.3, 1.4, 0.5, 1.0, 0.0, 2.0e-1/3.0, 0.01, 0.0, 2.0e-10/3.0), # TEST 8 (LeBlanc)
}
#-------------------------------------------------------------------------------
# The main object.
#-------------------------------------------------------------------------------
class RiemannSolution:
def __init__(self,
problem = "Sod", # ("", "Sod", "123", "Stationary_contact", "Slow_shock", "Slow_contact_shock", "LeBlanc")
n = 1000, # number of points in evaluating exact solution
x0 = None, # box min coordinate
x1 = None, # box max coordinate
xdiaph = None, # position of diaphragm xdiaph \in [x0, x1]
gamma_gas = None, # ratio of specific heats
out_time = None, # default time of solution
dl = None, # density (left state)
vl = None, # velocity (left state)
pl = None, # pressure (left state)
hl = None, # smoothing scale (left state)
dr = None, # density (right state)
vr = None, # velocity (right state)
pr = None, # pressure (right state)
hr = None): # smoothing scale (right state)
assert problem or (x0 and x1 and out_time and xdiaph and gamma_gas and dl and vl and pl and dr and vr and pr)
# Get the ICs.
if problem:
assert problem.lower() in Riemann_packaged_problems
_x0, _x1, _xdiaph, _gamma_gas, _out_time, _dl, _vl, _pl, _dr, _vr, _pr = Riemann_packaged_problems[problem.lower()]
if x0 is None:
x0 = _x0
if x1 is None:
x1 = _x1
if xdiaph is None:
xdiaph = _xdiaph
if gamma_gas is None:
gamma_gas = _gamma_gas
if out_time is None:
out_time = _out_time
if dl is None:
dl = _dl
if vl is None:
vl = _vl
if pl is None:
pl = _pl
if dr is None:
dr = _dr
if vr is None:
vr = _vr
if pr is None:
pr = _pr
# Store the variables
self.n = n
self.x0 = x0
self.x1 = x1
self.xdiaph = xdiaph
self.gamma_gas = gamma_gas
self.out_time = out_time
self.dl = dl
self.vl = vl
self.pl = pl
self.hl = hl
self.dr = dr
self.vr = vr
self.pr = pr
self.hr = hr
return
#---------------------------------------------------------------------------
# Compute the solution.
#---------------------------------------------------------------------------
def solution(self,
time = None,
x = None):
n = self.n
x0 = self.x0
x1 = self.x1
xdiaph = self.xdiaph
gamma_gas = self.gamma_gas
out_time = self.out_time
dl = self.dl
vl = self.vl
pl = self.pl
hl = self.hl
dr = self.dr
vr = self.vr
pr = self.pr
hr = self.hr
# Solution time
if not time is None:
out_time = time
else:
out_time = self.out_time
# Sampling positions
if x is None:
assert n > 0
assert x1 > x0
x = np.linspace(x0, x1, n)
else:
n = len(x)
# Did we get the initial (left, right) h?
if hl is None:
hl = x[1] - x[0]
if hr is None:
hr = x[-1] - x[-2]
assert hl > 0 and hr > 0
# compute gamma related constants
g1 = (gamma_gas - 1.0)/(2.0*gamma_gas)
g2 = (gamma_gas + 1.0)/(2.0*gamma_gas)
g3 = 2.0*gamma_gas/(gamma_gas - 1.0)
g4 = 2.0/(gamma_gas - 1.0)
g5 = 2.0/(gamma_gas + 1.0)
g6 = (gamma_gas - 1.0)/(gamma_gas + 1.0)
g7 = (gamma_gas - 1.0)/2.0
g8 = gamma_gas - 1.0
# compute sound speeds
cl = sqrt(gamma_gas*pl/dl)
cr = sqrt(gamma_gas*pr/dr)
#---------------------------------------------------------------------------
# purpose: to provide a guessed value for pressure
# pm in the Star Region. The choice is made
# according to adaptive Riemann solver using
# the PVRS, TRRS and TSRS approximate
# Riemann solvers. See Sect. 9.5 of Chapt. 9 of Ref. 1
#---------------------------------------------------------------------------
def guessp():
quser = 2.0
# compute guess pressure from PVRS Riemann solver
cup = 0.25*(dl + dr)*(cl + cr)
ppv = 0.5*(pl + pr) + 0.5*(vl - vr)*cup
ppv = max(0.0, ppv)
pmin = min(pl, pr)
pmax = max(pl, pr)
qmax = pmax/pmin
if (qmax <= quser and (pmin <= ppv and ppv <= pmax)):
pm = ppv # select PVRS Riemann solver
else:
if (ppv < pmin):
# select Two-Rarefaction Riemann solver
pq = pow(pl/pr, g1)
vm = (pq*vl/cl + vr/cr + g4*(pq - 1.0))/(pq/cl + 1.0/cr)
ptl = 1.0 + g7*(vl - vm)/cl
ptr = 1.0 + g7*(vm - vr)/cr
pm = 0.5*(pow(pl*ptl, g3) + pow(pr*ptr, g3))
else:
# select Two-Shock Riemann solver with PVRS as estimate
gel = sqrt((g5/dl)/(g6*pl + ppv))
ger = sqrt((g5/dr)/(g6*pr + ppv))
pm = (gel*pl + ger*pr - (vr - vl))/(gel + ger)
return pm
#---------------------------------------------------------------------------
# purpose: to evaluate the pressure functions
# fl and fr in exact Riemann solver
# and their first derivatives
#---------------------------------------------------------------------------
def prefun(p, dk, pk, ck):
if (p <= pk):
# rarefaction wave
pratio = p/pk
f = g4*ck*(pow(pratio, g1) - 1.0)
fd = (1.0/(dk*ck))*pow(pratio, -g2)
else:
# shock wave
ak = g5/dk
bk = g6*pk
qrt = sqrt(ak/(bk + p))
f = (p - pk)*qrt
fd = (1.0 - 0.5*(p - pk)/(bk + p))*qrt
return f, fd
#---------------------------------------------------------------------------
# purpose: to compute the solution for pressure and
# velocity in the Star Region
#---------------------------------------------------------------------------
def starpu(pscale):
nriter = 20
tolpre = 1.0e-6
# guessed value pstart is computed
pstart = guessp()
pold = pstart
udiff = vr - vl
print ("----------------------------------------\n"
" Iteration number Change\n"
"----------------------------------------")
i = 1
change = 10.0*tolpre
while i <= nriter and change > tolpre:
fl, fld = prefun(pold, dl, pl, cl)
fr, frd = prefun(pold, dr, pr, cr)
p = pold - (fl + fr + udiff)/(fld + frd)
change = 2.0*abs((p - pold)/(p + pold))
print '\t', i, "\t\t", change
if (p < 0.0):
p = tolpre
pold = p
i += 1
if (i > nriter):
print "divergence in Newton-Raphson iteration"
# compute velocity in star region
u = 0.5*(vl + vr + fr - fl)
print "----------------------------------------\n" \
" Pressure Velocity\n" \
"----------------------------------------\n" \
" ", p/pscale, "\t\t", u, '\n' \
"----------------------------------------"
return p, u
#---------------------------------------------------------------------------
# purpose: to sample the solution throughout the wave
# pattern. Pressure pm and velocity vm in the
# star region are known. Sampling is performed
# in terms of the 'speed' s = x/t. Sampled
# values are d, v, p
#---------------------------------------------------------------------------
def sample(pm, vm, s):
if (s <= vm):
# sampling point lies to the left of the contact discontinuity
if (pm <= pl):
# left rarefaction
shl = vl - cl
if (s <= shl):
# sampled point is left data state
d = dl
v = vl
p = pl
h = hl
else:
cml = cl*pow(pm/pl, g1)
stl = vm - cml
if (s > stl):
| |
<gh_stars>1-10
import json
import copy
from pprint import pprint
import itertools
import numpy as np
from falx.symbolic import SymTable, SymVal
from falx.utils import table_utils
from falx.visualization import visual_trace
from falx.visualization.visual_trace import BarV, BarH, Point, Line, Area, Box
def remove_unused_fields(data):
# remove fields that contain none values
unused_fields = [key for key in data[0] if all([r[key] is None for r in data])]
for r in data:
for k in unused_fields:
r.pop(k)
return unused_fields
def get_channel_value(encodings, channel, r):
""" Given encodings e,
return the value in the tuple r that maps to the channel c
"""
return r[encodings[channel].field] if channel in encodings else None
class VisDesign(object):
"""Top level visualization construct """
def __init__(self, data, chart):
# data can either be a list of tables (for layered chart) or a single table
# each table is a list of named tuples:
# e.g. [{"a": 1, "b": 2, "c": 100},
# {"a": 2, "b"; 5, "c": 15}]
# is a table with 3 columns "a", "b", "c" and two rows
self.data = data
self.chart = chart
def to_vl_obj(self):
"""generate vl obj from the vis design"""
chart_obj = self.chart.to_vl_obj()
if isinstance(self.chart, LayeredChart):
# in this case, the data section is a list of tables
# we combine them into one table to feed to the database
combined_data = []
for i, layer_data in enumerate(self.data):
for r in layer_data:
new_r = copy.copy(r)
new_r["layer_id"] = i
combined_data.append(new_r)
chart_obj["data"] = {"values": combined_data}
else:
chart_obj["data"] = {"values": self.data}
return chart_obj
def to_ggplot2(self):
script = [
"library(jsonlite)",
"library(ggplot2)"
]
data_vars = ["data_{}".format(i) for i in range(len(self.data))]
if isinstance(self.chart, LayeredChart):
for i in range(len(self.data)):
script.append("{} <- fromJSON('{}')".format(data_vars[i], json.dumps(self.data[i])))
script.append("{}$row_id <- as.numeric(row.names({}))".format(data_vars[i], data_vars[i]))
script.append("p <- ggplot() + {}".format(self.chart.to_ggplot2(data_vars)))
else:
data_var = "data"
script.append("{} <- fromJSON('{}')".format(data_var, json.dumps(self.data)))
script.append("{}$row_id <- as.numeric(row.names({}))".format(data_var, data_var))
script.append("p <- ggplot() + {}".format(self.chart.to_ggplot2(data_var)))
script.append("p")
return script
def to_vl_json(self, indent=4):
return json.dumps(self.to_vl_obj(), indent=indent)
def eval(self):
return self.chart.eval(self.data)
def update_field_names(self, mapping):
"""Given a mapping between current field names to new field names,
update all of their occurence
Args:
mapping:
a dict that maps old names to new names if the chart is single layered
a list of dicts that maps each layer if multi-layered
Return: None
it updates names in place
"""
if isinstance(self.chart, (LayeredChart,)):
assert(isinstance(mapping, (list,)))
for i, l in enumerate(self.chart.layers):
for key, e in l.encodings.items():
try:
e.field = mapping[i][e.field]
except:
pass
else:
for key, e in self.chart.encodings.items():
e.field = mapping[e.field]
@staticmethod
def inv_eval(vtrace):
"""inverse evaluation of a visual trace
Args: vtrace: a visual trace
Returns: a list of pairs (table, vis) s.t. vis(table)=vtrace
The output table is wrapped in a SymTable
"""
res = []
for data, chart in LayeredChart.inv_eval(vtrace):
if isinstance(data, (list,)):
for d in data:
d.values.sort(key=lambda x: json.dumps(x))
else:
data.values.sort(key=lambda x: json.dumps(x))
res.append((data, chart))
return res
@staticmethod
def load_from_vegalite(vl_spec, input_data):
"""given a vegalite spec, load the spec into Falx VisDesign object """
def get_value(obj, key):
return obj[key] if key in obj else None
# multi-layered chart
if "layer" in vl_spec:
data = []
layer_specs = vl_spec["layer"]
for lspec in vl_spec["layer"]:
# note that "True" here is a string expression
pred = lspec["transform"][0]["filter"] if "transform" in lspec else "True"
data.append(table_utils.filter_table(input_data, pred))
else:
data = input_data
layer_specs = [vl_spec]
layers = []
for lspec in layer_specs:
mark_ty = lspec["mark"] if not isinstance(lspec["mark"], (dict,)) else lspec["mark"]["type"]
encodings = [Encoding(channel, get_value(enc, "field"), get_value(enc, "type"), get_value(enc, "sort"))
for channel, enc in lspec["encoding"].items()]
if mark_ty == "bar":
orientation = "horizontal" if lspec["encoding"]["y"]["type"] == "nominal" else "vertical"
chart = BarChart(encodings, orientation)
elif mark_ty == "boxplot":
chart = BoxPlot(encodings)
elif mark_ty == "area":
chart = AreaChart(encodings)
elif mark_ty == "line":
chart = LineChart(encodings)
elif mark_ty in ["point", "circle", "text", "rect"]:
chart = ScatterPlot(mark_ty, encodings)
layers.append(chart)
# layered chart will handle load responsibility
if len(layers) == 1:
return VisDesign(data, layers[0])
return VisDesign(data, LayeredChart(layers, resolve=vl_spec["resolve"] if "resolve" in vl_spec else None))
class LayeredChart(object):
def __init__(self, layers, resolve):
"""A layered chart, shared encodings contains encodings for all layers. """
self.layers = layers
self.resolve = resolve
def to_vl_obj(self):
layer_obj = [l.to_vl_obj() for l in self.layers]
for i, l in enumerate(layer_obj):
if isinstance(l["mark"], (dict,)) and "opacity" in l["mark"]:
# opacity for the given chart is already set
l["mark"]["opacity"] = 0.7
else:
l["mark"] = {"type": l["mark"], "opacity": 0.7}
# lines / points can have 1 opacity in multilayered charts
if l["mark"]["type"] in ["line", "circ", "point"]:
l["mark"]["opacity"] = 1
l["transform"] = [{"filter": "datum.layer_id == {}".format(i)}]
vl_obj = {
"layer": layer_obj,
"resolve": self.resolve
}
return vl_obj
def to_ggplot2(self, data_vars):
return " + ".join([layer.to_ggplot2(data_var=data_vars[i],alpha=0.5) for i, layer in enumerate(self.layers)])
def eval(self, data_list):
"""obtain elements in each layer and put them together. """
result = []
for data, layer in zip(data_list, self.layers):
result += layer.eval(data)
return result
@staticmethod
def inv_eval(vtrace):
"""returns a list of (abs_table, layer) pairs. """
trace_layer = visual_trace.partition_trace(vtrace)
layers = {}
for vty in trace_layer:
if vty == "BarV":
layers[vty] = BarChart.inv_eval(trace_layer[vty], orientation="vertical")
elif vty == "BarH":
layers[vty] = BarChart.inv_eval(trace_layer[vty], orientation="horizontal")
elif vty == "Point":
layers[vty] = ScatterPlot.inv_eval(trace_layer[vty])
elif vty == "Line":
layers[vty] = LineChart.inv_eval(trace_layer[vty])
elif vty == "Area":
layers[vty] = AreaChart.inv_eval(trace_layer[vty])
elif vty == "Box":
layers[vty] = BoxPlot.inv_eval(trace_layer[vty])
#TODO: handle stacked area chart later
if len(layers) == 1:
# directly return the layer if there is only one layer
return layers[list(layers.keys())[0]]
else:
res = []
layer_candidates = [layers[vty] for vty in layers]
sizes = [list(range(len(l))) for l in layer_candidates]
# iterating over combinations for different layers
for id_list in itertools.product(*sizes):
#id_list[i] is the candidate (data, layer) pair for layer i
data_layer_pairs = [layer_candidates[i][id_list[i]] for i in range(len(id_list))]
data_for_all_layers = [cl[0] for cl in data_layer_pairs]
all_layers = [cl[1] for cl in data_layer_pairs]
res.append((data_for_all_layers, LayeredChart(layers=all_layers, resolve={})))
return res
class BarChart(object):
def __init__(self, encodings, orientation):
"""encodings of x,y,x2,y2
orientation is one of vertical / horizontal
"""
assert(orientation in ["horizontal", "vertical"])
self.encodings = {e.channel:e for e in encodings}
self.orientation = orientation
def to_vl_obj(self):
mark = "bar"
encodings = {e:self.encodings[e].to_vl_obj() for e in self.encodings}
if self.orientation == "horizontal":
encodings["y"]["sort"] = None
if self.orientation == "vertical":
encodings["x"]["sort"] = None
if "color" in self.encodings:
mark = {"type": "bar", "opacity": 0.8}
#TODO: stack or not???
# if self.orientation == "horizontal":
# encodings["x"]["stack"] = None
# if self.orientation == "vertical":
# encodings["y"]["stack"] = None
return {
"mark": mark,
"encoding": encodings
}
def to_ggplot2(self, data_var, alpha=1):
mark = "geom_bar"
#default channel names
channel_map = { "color": "fill",
"x": "x",
"y": "y",
"column": "column" }
coord_flip = ""
if ("x2" not in self.encodings) and ("y2" not in self.encodings):
# normal bar chart
if self.orientation == "horizontal":
channel_map["x"] = "y"
channel_map["y"] = "x"
coord_flip = " + coord_flip()"
aes_pairs = {channel_map[channel]:"`{}`".format(enc.field) for channel, enc in self.encodings.items()}
else:
# bar chart with x1,x2
assert("column" not in self.encodings)
mark = "geom_rect"
if self.orientation == "horizontal":
channel_map["x"] = "ymin"
channel_map["x2"] = "ymax"
channel_map["y"] = "x"
coord_flip = " + coord_flip()"
else:
channel_map["y"] = "ymin"
channel_map["y2"] = "ymax"
aes_pairs = {channel_map[channel]:"`{}`".format(enc.field) for channel, enc in self.encodings.items()}
aes_pairs["xmin"] = "`row_id`-0.45"
aes_pairs["xmax"] = "`row_id`+0.45"
facet = ""
if "column" in aes_pairs:
facet += " + facet_grid(cols = vars(`{}`))".format(aes_pairs["column"])
aes_pairs.pop("column")
aes_str = ",".join(["{}={}".format(p, aes_pairs[p]) for p in aes_pairs])
return "{}(data={},aes({}),stat ='identity',alpha={}) + scale_x_discrete(){}{}".format(mark, data_var, aes_str, alpha, coord_flip, facet)
def eval(self, data):
res = []
for r in data:
if self.orientation == "horizontal":
x1 = get_channel_value(self.encodings, "x", r)
x2 = get_channel_value(self.encodings, "x2", r)
y = get_channel_value(self.encodings, "y", r)
color = get_channel_value(self.encodings, "color", r)
column = get_channel_value(self.encodings, "column", r)
res.append(BarH(x1=x1, x2=x2, y=y, color=color, column=column))
elif self.orientation == "vertical":
y1 = get_channel_value(self.encodings, "y", r)
y2 = get_channel_value(self.encodings, "y2", r)
x = get_channel_value(self.encodings, "x", r)
color = get_channel_value(self.encodings, "color", r)
column = get_channel_value(self.encodings, "column", r)
res.append(BarV(x=x, y1=y1, y2=y2, color=color, column=column))
return res
@staticmethod
def inv_eval(vtrace, orientation):
assert(orientation in ["horizontal", "vertical"])
data_values = []
if orientation == "vertical":
for vt in vtrace:
data_values.append({"c_x": vt.x, "c_y": vt.y1, "c_y2": vt.y2, "c_column": vt.column, "c_color": vt.color})
channel_types = [("x", "nominal"), ("y", "quantitative"), ("y2", "quantitative"), ("color", "nominal"), ("column", "nominal")]
if orientation | |
#! python3
# -*- coding: utf-8 -*-
"""mwxlib param controller and wx custom controls
Author: <NAME> <<EMAIL>>
"""
from __future__ import division, print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from itertools import chain
import sys
import wx
import numpy as np
from numpy import pi
from numpy import nan, inf
try:
import framework as mwx
import images
except ImportError:
from . import framework as mwx
from . import images
import wx.lib.platebtn as pb
import wx.lib.scrolledpanel as scrolled
LITERAL_TYPE = (str,) if sys.version_info >= (3,0) else (str,unicode)
## EPSILON = sys.float_info.epsilon
## EPSILON = 1e-15
class Param(object):
"""Standard Parameter
Attributes:
name : label
range : range [min:max:step]
min,max : lower and upper limits
std_value : standard value (default None)
value : current value := std_value + offset
offset : ditto (if std_value is None, this is the same as `value)
knobs : knob list
index : knob index -> reset -> callback
check : knob tick (undefined)
tip : doc:str also shown as a tooltip
callback : single state machine that handles following events:
control -> when `index changed by knobs or reset (call handler)
check -> when `check ticks on/off (call updater)
overflow -> when `value overflows
underflow -> when `value underflows
Args:
fmt : text formatter or format str (default is '%g')
`hex` specifies hexadecimal format/eval
handler : called when control changed
updater : called when check changed
tip : tooltip:str shown on the associated knobs
"""
def __init__(self, name, range=None, value=None, fmt=None,
handler=None, updater=None, tip=None):
self.__knobs = []
self.__name = name
self.range = range if range is not None else [0]
self.__value = value if value is not None else self.min
self.__std_value = value
if fmt is hex:
self.__eval = lambda v: int(v,16)
self.__format = lambda v: '{:04X}'.format(int(v))
else:
self.__eval = lambda v: float(v)
self.__format = fmt if callable(fmt) else (lambda v: (fmt or "%g") % v)
self.__check = 0
self.__callback = mwx.SSM({
'control' : [ handler ] if handler else [],
'update' : [ updater ] if updater else [],
'check' : [ updater ] if updater else [],
'overflow' : [],
'underflow' : [],
})
## self.tip = tip
self.tip = '\n'.join(filter(None, (tip,
handler and handler.__doc__,
updater and updater.__doc__))).strip()
def __str__(self, v=None):
v = self.__value if v is None else v
try:
return self.__format(v)
except ValueError:
return str(v)
def __int__(self):
return int(self.__value)
def __long__(self):
return long(self.__value)
def __float__(self):
return float(self.__value)
def __len__(self):
return len(self.__range)
name = property(
lambda self: self.__name,
lambda self,v: self.set_name(v))
value = property(
lambda self: self.__value,
lambda self,v: self.set_value(v) and self._notify())
std_value = property(
lambda self: self.__std_value,
lambda self,v: self.set_std_value(v))
offset = property(
lambda self: self.get_offset(),
lambda self,v: self.set_offset(v))
range = property(
lambda self: self.get_range(),
lambda self,v: self.set_range(v))
min = property(lambda self: self.__range[0])
max = property(lambda self: self.__range[-1])
index = property(
lambda self: self.get_index(),
lambda self,j: self.set_index(j))
## rindex = property(
## lambda self: len(self) - self.get_index() - 1,
## lambda self,j: self.set_index(len(self) - j - 1))
knobs = property(
lambda self: self.__knobs)
check = property(
lambda self: self.__check,
lambda self,v: self.set_check(v))
callback = property(
lambda self: self.__callback)
def bind(self, f=None, target='control'):
la = self.__callback[target]
if not f:
return lambda f: self.bind(f, target)
if f not in la:
la.append(f)
return f
def unbind(self, f=None, target='control'):
la = self.__callback[target]
if not f:
la[:] = [a for a in la if not callable(a)]
return
if f in la:
la.remove(f)
def reset(self, v=None, backcall=True):
"""Reset value when indexed (by knobs) with callback"""
if v is None or v == '':
v = self.__std_value
if v is None:
return
elif v == 'nan': v = nan
elif v == 'inf': v = inf
elif isinstance(v, LITERAL_TYPE):
v = self.__eval(v.replace(',', '')) # eliminates commas(, to be deprecated)
## v = self.__eval(v)
self.set_value(v)
if backcall:
self.__callback('control', self)
def _notify(self):
for knob in self.knobs:
knob.notify_ctrl()
def set_check(self, v):
self.__check = v
self.__callback('check', self)
for knob in self.knobs:
knob.update_label()
def set_name(self, v):
self.__name = v
for knob in self.knobs:
knob.update_label()
def set_value(self, v):
"""Set value and check the limit.
If the value is out of range, modify the value.
"""
if v is None:
v = nan
if v in (nan, inf):
self.__value = v
for knob in self.knobs:
knob.update_ctrl(None)
return
elif v == self.__value:
return
valid = (self.min <= v <= self.max)
if valid:
self.__value = v
elif v < self.min:
self.__value = self.min
self.__callback('underflow', self)
else:
self.__value = self.max
self.__callback('overflow', self)
for knob in self.knobs:
knob.update_ctrl(valid)
return valid
def set_std_value(self, v):
self.__std_value = v
for knob in self.knobs:
knob.update_label()
def get_offset(self):
if self.__std_value is not None:
return self.__value - self.__std_value
return self.__value
def set_offset(self, v):
if self.__std_value is not None:
if v is not nan: # Note: nan +x is not nan
v += self.__std_value
self.set_value(v)
def get_range(self):
return self.__range
def set_range(self, v):
self.__range = sorted(v)
for knob in self.knobs:
knob.update_range() # list range of related knobs
def get_index(self, v=None):
if v is None:
v = self.value
return int(np.searchsorted(self.__range, v))
def set_index(self, j):
n = len(self.__range)
i = (0 if j<0 else j if j<n else -1)
return self.set_value(self.__range[i])
class LParam(Param):
"""Linear Parameter
"""
__doc__ = Param.__doc__
min = property(lambda self: self.__min)
max = property(lambda self: self.__max)
step = property(lambda self: self.__step)
def __len__(self):
return 1 + self.get_index(self.max) # includes [min,max]
def get_range(self):
return np.arange(self.min, self.max + self.step, self.step)
def set_range(self, v):
self.__min = v[0]
self.__max = v[1]
self.__step = v[2] if len(v)>2 else 1
for knob in self.knobs:
knob.update_range() # linear range of related knobs
def get_index(self, v=None):
if v is None:
v = self.value
return int(round((v - self.min) / self.step))
def set_index(self, j):
return self.set_value(self.min + j * self.step)
## --------------------------------
## Knob unit for Parameter Control
## --------------------------------
class Knob(wx.Panel):
"""Parameter controller unit
パラメータクラスのコントロールノブ
In addition to direct key input to the textctrl,
[up][down][wheelup][wheeldown] keys can be used,
with modifiers S- 2x, C- 16x, and M- 256x steps.
[Mbutton] resets to the std. value if it exists.
Attributes:
param : A param <Param> object referred from knobs
Args:
par : Param <object>
type : ctrl type (slider[*], [hv]spin, choice, and default None)
style : style of label
None -> static text (default)
chkbox -> label with check box
button -> label with flat button
editable : textctrl is editable or readonly
lw,tw,cw : width of label, textbox, and ctrl (default height `h=22 of widgets)
"""
@property
def param(self):
return self.__par
@param.setter
def param(self, v):
self.__par.knobs.remove(self)
self.__par = v
self.__par.knobs.append(self)
self.update_range()
self.update_ctrl()
def __init__(self, parent, par, type='slider',
style=None, editable=1, lw=-1, tw=-1, cw=-1, h=22):
wx.Panel.__init__(self, parent)
self.__bit = 1
self.__par = par
self.__par.knobs.append(self) # パラメータの関連付けを行う
if type is None:
type = 'slider'
cw = 0
elif type == 'choice':
if cw < 0:
cw = 20
cw += tw
tw = 0
label = self.__par.name + ' ' #(' ' if lw else '')
if style == 'chkbox':
if lw > 0:
lw += 16
self.label = wx.CheckBox(self, label=label, size=(lw,-1))
self.label.Bind(wx.EVT_CHECKBOX, self.OnCheck)
elif style == 'button':
if lw > 0:
lw += 16
self.label = pb.PlateButton(self, label=label, size=(lw,-1),
style=(pb.PB_STYLE_DEFAULT | pb.PB_STYLE_SQUARE))
self.label.Bind(wx.EVT_BUTTON, self.OnPress)
elif not style:
self.label = wx.StaticText(self, label=label, size=(lw,-1))
else:
raise Exception("unknown style: {!r}".format(style))
self.label.Enable(lw)
self.label.Bind(wx.EVT_MIDDLE_DOWN, lambda v: self.__par.reset())
self.label.SetToolTip(self.__par.tip)
if editable:
self.text = wx.TextCtrl(self, size=(tw,h), style=wx.TE_PROCESS_ENTER)
self.text.Bind(wx.EVT_TEXT, self.OnText)
self.text.Bind(wx.EVT_TEXT_ENTER, self.OnTextEnter)
self.text.Bind(wx.EVT_SET_FOCUS, self.OnTextFocus)
self.text.Bind(wx.EVT_KILL_FOCUS, self.OnTextFocusKill)
if type[-1] == '*':
self.text.Bind(wx.EVT_KEY_DOWN, self.OnTextKeyDown)
else:
self.text.Bind(wx.EVT_KEY_DOWN, self.OnTextKey)
self.text.Bind(wx.EVT_KEY_UP, self.OnTextKeyUp)
self.text.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.text.Bind(wx.EVT_MIDDLE_DOWN, lambda v: self.__par.reset())
else:
self.text = wx.TextCtrl(self, size=(tw,h), style=wx.TE_READONLY)
self.text.Enable(tw)
if type == 'slider':
self.ctrl = wx.Slider(self, size=(cw,h), style=wx.SL_HORIZONTAL)
self.ctrl.Bind(wx.EVT_SCROLL_CHANGED, self.OnScroll)
self.ctrl.Bind(wx.EVT_KEY_DOWN, self.OnCtrlKeyDown)
self.ctrl.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
elif type == 'slider*':
self.ctrl = wx.Slider(self, size=(cw,h), style=wx.SL_HORIZONTAL)
self.ctrl.Bind(wx.EVT_SCROLL, self.OnScroll) # called while dragging
self.ctrl.Bind(wx.EVT_SCROLL_CHANGED, lambda v: None) # pass no action
self.ctrl.Bind(wx.EVT_KEY_DOWN, | |
self and R is the base ring.
Note that these are not generally uniquely determined, and depending on
how Smith normal form is implemented for the base ring, they may not
even be deterministic.
This can safely be overridden in all derived classes.
EXAMPLES::
sage: V = span([[1/2,1,1],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1, 9*V.0+12*V.1, 4*V.2])
sage: Q = V/W
sage: Q.gens()
((1, 0), (0, 1))
sage: Q.0
(1, 0)
"""
return self.smith_form_gens()
@cached_method
def smith_form_gens(self):
"""
Return a set of generators for self which are in Smith normal form.
EXAMPLES::
sage: V = span([[1/2,1,1],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1, 9*V.0+12*V.1, 4*V.2])
sage: Q = V/W
sage: Q.smith_form_gens()
((1, 0), (0, 1))
sage: [x.lift() for x in Q.smith_form_gens()]
[(0, 3, 1), (0, -1, 0)]
"""
# Get the rightmost transformation in the Smith form
_, _, X = self._smith_form()
# Invert it to get a matrix whose rows (in terms of the basis for V)
# are the gi (including 1 invariants).
Y = X**(-1)
# Get the basis matrix for V
B = self._V.basis_matrix()
# Multiply to express the gi in terms of the ambient vector space.
Z = Y*B
# Make gens out of the rows of Z that correspond to non-1 invariants.
v = self.invariants(include_ones=True)
non1 = [i for i in range(Z.nrows()) if v[i] != 1]
Z = Z.matrix_from_rows(non1)
self._gens_smith = tuple([self(z, check=DEBUG) for z in Z.rows()])
return self._gens_smith
def gens_to_smith(self):
r"""
Return the transformation matrix from the user to Smith form generators.
To go in the other direction use :meth:`smith_to_gens`.
OUTPUT:
- a matrix over the base ring
EXAMPLES::
sage: L2 = IntegralLattice(3 * matrix([[-2,0,0],[0,1,0],[0,0,-4]]))
sage: D = L2.discriminant_group().normal_form()
sage: D
Finite quadratic module over Integer Ring with invariants (3, 6, 12)
Gram matrix of the quadratic form with values in Q/Z:
[1/2 0 0 0 0]
[ 0 1/4 0 0 0]
[ 0 0 1/3 0 0]
[ 0 0 0 1/3 0]
[ 0 0 0 0 2/3]
sage: D.gens_to_smith()
[0 3 0]
[0 0 3]
[0 4 0]
[1 2 0]
[0 0 4]
sage: T = D.gens_to_smith()*D.smith_to_gens()
sage: T
[ 3 0 3 0 0]
[ 0 33 0 0 3]
[ 4 0 4 0 0]
[ 2 0 3 1 0]
[ 0 44 0 0 4]
The matrix `T` now satisfies a certain congruence::
sage: for i in range(T.nrows()):
....: T[:,i] = T[:,i] % D.gens()[i].order()
sage: T
[1 0 0 0 0]
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
[0 0 0 0 1]
"""
gens_to_smith = matrix(self.base_ring(),
[t.vector() for t in self.gens()])
gens_to_smith.set_immutable()
return gens_to_smith
@cached_method
def smith_to_gens(self):
r"""
Return the transformation matrix from Smith form to user generators.
To go in the other direction use :meth:`gens_to_smith`.
OUTPUT:
- a matrix over the base ring
EXAMPLES::
sage: L2 = IntegralLattice(3 * matrix([[-2,0,0],[0,1,0],[0,0,-4]]))
sage: D = L2.discriminant_group().normal_form()
sage: D
Finite quadratic module over Integer Ring with invariants (3, 6, 12)
Gram matrix of the quadratic form with values in Q/Z:
[1/2 0 0 0 0]
[ 0 1/4 0 0 0]
[ 0 0 1/3 0 0]
[ 0 0 0 1/3 0]
[ 0 0 0 0 2/3]
sage: D.smith_to_gens()
[ 0 0 1 1 0]
[ 1 0 1 0 0]
[ 0 11 0 0 1]
sage: T = D.smith_to_gens()*D.gens_to_smith()
sage: T
[ 1 6 0]
[ 0 7 0]
[ 0 0 37]
This matrix satisfies the congruence::
sage: for i in range(T.ncols()):
....: T[:, i] = T[:, i] % D.smith_form_gens()[i].order()
sage: T
[1 0 0]
[0 1 0]
[0 0 1]
We create some element of our FGP_module::
sage: x = D.linear_combination_of_smith_form_gens((1,2,3))
sage: x
(1, 2, 3)
and want to know some (it is not unique) linear combination
of the user defined generators that is x::
sage: x.vector() * D.smith_to_gens()
(2, 33, 3, 1, 3)
"""
if self.base_ring() != ZZ:
# it is not
raise NotImplementedError("the base ring must be ZZ")
base = self.base_ring()
invs = self.invariants()
B = self.gens_to_smith()
n = len(invs)
smith_to_gens = []
for k in range(n):
R = base.quotient_ring(invs[k])
e = (R**n).gen(k) # k-th standard basis vector
v = B.change_ring(R).solve_left(e)
smith_to_gens.append(v)
smith_to_gens = matrix(base, smith_to_gens)
smith_to_gens.set_immutable()
return smith_to_gens
def gens_vector(self, x, reduce=False):
r"""
Return coordinates of x with respect to the generators.
INPUT:
- ``x`` -- element of ``self``
- ``reduce`` -- (default: ``False``); if ``True``,
reduce coefficients modulo invariants; this is
ignored if the base ring is not `\ZZ`
EXAMPLES:
We create a derived class and overwrite :meth:`gens`::
sage: from sage.modules.fg_pid.fgp_module import FGP_Module_class
sage: W = ZZ^3
sage: V = W.span(matrix.diagonal([1/6,1/3,1/12]))
sage: class FGP_with_gens(FGP_Module_class):
....: def __init__(self, V, W, gens):
....: FGP_Module_class.__init__(self, V, W)
....: self._gens = tuple([self(g) for g in gens])
....: def gens(self):
....: return self._gens
sage: gens = [(1/2, 0, 0), (0, 0, 1/4), (1/3, 0, 0), (0, 1/3, 0), (0, 0, 2/3)]
sage: gens = [V(g) for g in gens]
sage: D = FGP_with_gens(V, W, gens)
sage: D.gens()
((0, 3, 0), (0, 0, 3), (0, 4, 0), (1, 2, 0), (0, 0, 8))
We create some element of D::
sage: x = D.linear_combination_of_smith_form_gens((1,2,3))
sage: x
(1, 2, 3)
In our generators::
sage: v = D.gens_vector(x)
sage: v
(2, 9, 3, 1, 33)
The output can be further reduced::
sage: D.gens_vector(x, reduce=True)
(0, 1, 0, 1, 0)
Let us check::
sage: x == sum(v[i]*D.gen(i) for i in range(len(D.gens())))
True
"""
x = self(x)
v = x.vector() * self.smith_to_gens()
from sage.rings.all import infinity
if reduce and self.base_ring() == ZZ:
orders = [g.order() for g in self.gens()]
v = v.parent()([v[i] if orders[i] == infinity
else v[i] % orders[i]
for i in range(len(self.gens()))])
return v
def coordinate_vector(self, x, reduce=False):
"""
Return coordinates of x with respect to the optimized
representation of self.
INPUT:
- ``x`` -- element of self
- ``reduce`` -- (default: False); if True, reduce
coefficients modulo invariants; this is
ignored if the base ring is not ZZ.
OUTPUT:
The coordinates as a vector. That is, the same type as
``self.V()``, but in general with fewer entries.
EXAMPLES::
sage: V = span([[1/4,0,0],[3/4,4,2],[0,0,2]],ZZ); W = V.span([4*V.0+12*V.1])
sage: Q = V/W; Q
Finitely generated module V/W over Integer Ring with invariants (4, 0, 0)
sage: Q.coordinate_vector(-Q.0)
(-1, 0, 0)
sage: Q.coordinate_vector(-Q.0, reduce=True)
(3, 0, 0)
If x is not in self, it is coerced in::
sage: Q.coordinate_vector(V.0)
(1, -3, 0)
sage: Q.coordinate_vector(Q(V.0))
(1, -3, 0)
TESTS::
sage: V = span([[1/2,0,0],[3/2,2,1],[0,0,1]],ZZ); W = V.span([2*V.0+4*V.1, 9*V.0+12*V.1, 4*V.2])
sage: Q = V/W; Q
Finitely generated module V/W over Integer Ring with invariants (4, 12)
sage: Q.coordinate_vector(Q.0 - Q.1, reduce=True)
(1, 11)
sage: a, b = Q.coordinate_vector(Q.0 - Q.1)
sage: (a % 4, b % 12)
(1, 11)
sage: O, X = Q.optimized()
sage: O.V()
Free module of degree 3 and rank 2 over Integer Ring
User basis matrix:
[ 0 6 1]
[ 0 -2 0]
sage: phi = Q.hom([Q.0, 4*Q.1])
sage: x = Q(V.0); x
(0, 8)
sage: Q.coordinate_vector(x, reduce=True)
(0, 8)
sage: a, b = Q.coordinate_vector(-x, reduce=False)
sage: (a % 4, b % 12)
(0, 4)
sage: x == 8*Q.1
True
sage: x = Q(V.1); x
(0, 11)
sage: a, b = Q.coordinate_vector(x)
sage: (a % 4, b % 12)
(0, 11)
sage: x == -Q.1
True
sage: x = Q(V.2); x
(1, 3)
sage: Q.coordinate_vector(x)
(1, 3)
sage: x == Q.0 + 3*Q.1
True
"""
try:
T = self.__T
except AttributeError:
self.optimized() # computes T as side effect
# see the "optimized" method.
T = self.__T
x = self(x)
c = self._V.coordinate_vector(x.lift())
b = (c * T).change_ring(self.base_ring())
if reduce and self.base_ring() == ZZ:
I = self.invariants()
return b.parent()([b[i] if I[i] == 0 else b[i] % I[i]
for i in range(len(I))])
else:
# Don't know (or not requested) canonical way to reduce
# each entry yet, | |
'''
Does Covid 19 contact tracing analysis on data of the form:
<name, latitude, longitude, date, time, condition>
where name = name of the person, latitude & longitude are the geographical coordinates, date
& time are the time stamp when those coordinates were recorded and finally condition indicates
whether this person is sick or healthy.
Use generator.py to generate data in the above form with various configurations.
This program assumes that the input data is in the format prescribed above. It takes this data
and then builds various directed as well as undirected graphs. It use the graphs to:
- detect potential high risk contacts
- detect risky locations
- detect vulnerable subset of the population
- predict potential future vulnerable population / locations
Dependencies:
- Python 2.7 only (latlon doesn't support Python 3 :(. For python 3+, use pyGeodesy)
- LatLon 1.0.2 - https://pypi.org/project/LatLon/
- pandas 0.24.2
- networkx 2.2 ( !pip install networkx=2.2. 2.2 due to python 2.7 - use latest if on Python 3+ )
- python-louvain 0.13 ( !pip install python-louvain )
- matplotlib 2.2.4
'''
import pandas as pd
import LatLon
from LatLon import *
import networkx as nx
import matplotlib.pyplot as plt
import time
from copy import deepcopy
import community
##### All configurations start here #####
#set for lat, lon otherwise implicit default loses precision
pd.set_option('display.precision',12)
#data file path. this is the data to be analyzed.
datapath = 'cov19_gen_dataset_05_doctored.csv' #'cov19_gen_dataset_10k.csv'
#stores the size of the virtual microcell around each location a person was recorded to have visited.
#this is used to calculate if two persons have breached the commonly accepted social distance limits.
#can be changed to anything, default is kept at x metres. This is for tagging high risk contacts.
microcell_radius = 0.005 # e.g., say is set to 0.003. It is about 10 ft captured here in (3) metres
#controls whether graphs are visually displayed or not. If running on linux ensure X Windows is available.
#0 = graphs are displayed in ui. 1 = no graphs are displayed.
ui = 1
##### All configurations end here #####
##### Runtime variables #####
rawdataframe = pd.DataFrame()
sorteddf = pd.DataFrame() #same as raw data frame except all locations are sorted asc order by time of visit
persons = []
gxarry_pop_travel_hist = [] #array of nx graphs holding travel history of each member in pop
undir_gxarray_pop_travel_hist = []#same graph as gxarry_pop_travel_hist except it is undirected
col_breach = ['name1','con1','latlon1','entrytm1','exittm1','name2','con2','latlon2',
'entrytm2','exittm2','dist','breach', 'risk']
#holds info of all possible travels by the population and which two people were involved. This
#is used to generate a risk profile for the population.
travel_hist = pd.DataFrame(columns = col_breach)
#graph with various new edges and attributes on both nodes and edges. Used for
#overall analysis activities.
biggx = nx.Graph()
#list of known infected people
known_infected_list = []
##### Methods #####
#customized printer
def printcov(str_to_print):
print("[log]:--> " + str_to_print)
#Cleans and perpares data to be suitable for running analysis. Typically, this involves
#finding each unique person in the dataset, sorting the location records by time in an
#ascending order and others.
def dataprep():
rawdataframe = pd.read_csv(datapath, sep=',', header=0)
printcov("Sample of loaded raw data: ")
print(rawdataframe.head(3))
print(rawdataframe.tail(3))
popcount = 0
lastname = ""
dftmp = pd.DataFrame()
#our goal is to get each unique name and then prepare data for that.
for index, row in rawdataframe.iterrows():
currname = row['name']
if(currname != lastname):
printcov("Processing for: " + currname)
persons.append(currname)
df = rawdataframe.loc[rawdataframe['name'] == currname]
printcov("# of rows found: " + str(len(df)))
popcount = popcount + 1
#now to sort the rows by time. We ignore the Date field as we are assuming
#that the data is of a single day only.
df = df.sort_values(by=['time'])
#finally append this to the sorted df
dftmp = dftmp.append(df)
lastname = currname
printcov("Completed prep for data.")
#sorteddf = sorteddf.append(dftmp)
dftmp = dftmp.reset_index(drop=True)
printcov("Prepp'd data: ")
print(dftmp.head(27))
print(dftmp.tail(27))
printcov("Unique people found in pop of size: " + str(popcount))
print(persons)
printcov("Saving prepp'd data to a file: preppd_df.csv for debugging (in current folder).")
dftmp.to_csv("preppd_df.csv")
return dftmp
#prepares graph data per unique person in the provided dataset and plots their travel
#history with locations and time. Also generates and adds useful attributes to nodes
#and edges that help in further analysis. At this point, we know the total population
#size, the names of each unique person. We use this to plot a graph for analysis.
def graph_per_person(person):
printcov("Generating graph for: " + person)
one_persons_records = sorteddf.loc[sorteddf['name'] == person] #sorted by time in asc order
one_persons_records = one_persons_records.reset_index(drop=True)
print(one_persons_records)
gx = nx.MultiDiGraph(name=person,con=one_persons_records['condition'][0]) #new graph for curr person
#create all nodes
nodeid=0
for index, row in one_persons_records.iterrows():
#each recorded loc is a node
nodelabel = str(person) + str(nodeid)
gx.add_node(nodelabel,latlon=LatLon(Latitude(row['lat']),Longitude(row['lon'])))
nodeid = nodeid+1
noofnodes = nx.number_of_nodes(gx)
#now let's add edges for the nodes
print("Adding edges for: " + str(nx.number_of_nodes(gx)) + " nodes...")
print(gx.nodes())
for x in range(0,noofnodes):
y = x + 1
if(y == noofnodes):
print("reached end node")
break
else:
nodelabel1 = str(person) + str(x)
nodelabel2 = str(person) + str(y)
#gx.add_edge(nodelabel1,nodelabel2,time=one_persons_records.at[nodelabel2,'time'])
gx.add_edge(nodelabel1,nodelabel2,time=one_persons_records['time'][y])
print("Completed adding edges for: " + str(person) + ". Graph complete.")
disp_graph(gx)
gxarry_pop_travel_hist.append(gx)
return
#finds overlapping locations with time for the population and also marks such
#locations with a new attribute so that we can easily analyze them later. We also
#create a new undirected graph that has all overlaps available. There shall be one
#such overlap graph per person in the population.
def overlaps_for_pop(gxall):
printcov("Finding overlaps within population's location history")
b_all = pd.DataFrame(columns = col_breach)
for x in range(0, len(gxall)):
#get the 1st person and find overlaps of each of their loc
#with each loc of each other person in the population.
#we convert the graph to an undirected copy since mixed graphs are
#not possible in nx. We'll use both versions for later analysis. Note
#that the loc overlap calc doesnt need undirected graph. We shall create
#a new undirected edge for each overlap and that is why we need to
#convert to undirected graph
undirectedgxcurr = gxall[x].to_undirected() #get this person's graph
#compare current person graph with all others for loc overlaps
#first copy out the graph container
gxallminuscurr = []
for cv in range(0,len(gxall)):
newgx = deepcopy(gxall[cv]) #use a deep copy
gxallminuscurr.append(newgx)
gxallminuscurr.pop(x)#remove current persons graph before cmp
for y in range(0, len(gxallminuscurr)):
undirectedgxnext = gxallminuscurr[y].to_undirected()
disp_graph(undirectedgxnext)
bxy = find_overlap(undirectedgxcurr,undirectedgxnext)
b_all = b_all.append(bxy)
printcov("Completed overlap extractions.")
return b_all
#finds overlapping locations between two graphs
def find_overlap(undgx_curr, undgx_next):
#get 'latlon' attributes of both and figure out if present in microcell
anchorgraph_name = str(undgx_curr.graph['name'])
compargraph_name = str(undgx_next.graph['name'])
anchor_health_status = str(undgx_curr.graph['con'])
compar_health_status = str(undgx_next.graph['con'])
printcov("Processing overlaps. Anchor graph: " + anchorgraph_name + " | " +
anchor_health_status + " and Comparison graph: "
+ compargraph_name + " | " + compar_health_status)
gxcurr_nodeattrib = nx.get_node_attributes(undgx_curr,'latlon')
gxnext_nodeattrib = nx.get_node_attributes(undgx_next,'latlon')
printcov("Node attributes for overlap calc are:\n")
print("curr anchor graph: " + str(gxcurr_nodeattrib))
print("comparison graph: " + str(gxnext_nodeattrib))
print("\n")
b = pd.DataFrame(columns = col_breach)
for x in range(0, len(gxcurr_nodeattrib)):
for y in range(0, len(gxnext_nodeattrib)):
#here, we compare curr(latlon) with next(latlon) iteratively.
gxcurr_curr_nodelbl = str(anchorgraph_name) + str(x)
gxnext_curr_nodelbl = str(compargraph_name) + str(y)
print(str(gxcurr_nodeattrib[gxcurr_curr_nodelbl]) + " ----- " + str(gxnext_nodeattrib[gxnext_curr_nodelbl]))
distance = gxcurr_nodeattrib[gxcurr_curr_nodelbl].distance(gxnext_nodeattrib[gxnext_curr_nodelbl])
print("Person: " + anchorgraph_name + " & Person " + compargraph_name)
print(" - anchor node: " + str(gxcurr_curr_nodelbl) + " and comparison node: " + str(gxnext_curr_nodelbl))
print(" - distance between above two: " + str(distance))
entm1 = find_startime_gx(x, undgx_curr, anchorgraph_name)
extm1 = find_endtime_gx(x, undgx_curr, anchorgraph_name)
entm2 = find_startime_gx(y, undgx_next, compargraph_name)
extm2 = find_endtime_gx(y, undgx_next, compargraph_name)
risk = 'none'
breach = 'no'
if(distance <= microcell_radius):
#a new edge connecting these two nodes and save the graph. Also mark
#the relevant loc's as 'breached' with a new node attribute. risk is still
#classified as none because we have not yet calculated time overlap
print("Microcell radius breached.")
breach = 'yes'
#breachnodes attribute is useful to find edges that caused a breach
biggx.add_edge(gxcurr_curr_nodelbl,gxnext_curr_nodelbl,
breachnodes=(gxcurr_curr_nodelbl+':'+gxnext_curr_nodelbl))
biggx.nodes[gxcurr_curr_nodelbl]['breached'] = 'yes'
biggx.nodes[gxnext_curr_nodelbl]['breached'] = 'yes'
#time overlaps. use e*tm1 and e*tm2 to calculate overlap. If there is
#an overlap of time then we have two people in the same location at the same
#time => risk == high if one of them is sick. For the h person | |
image for central {}/{}.'.format(ii+1, len(galaxy_indx)))
# Build the model image (of every object except the central)
# on-the-fly. Need to be smarter about Tractor sources of resolved
# structure (i.e., sources that "belong" to the central).
nocentral = np.delete(np.arange(len(tractor)), central)
srcs = tractor.copy()
srcs.cut(nocentral)
model_nocentral = srcs2image(srcs, data['{}_wcs'.format(refband)], band=refband.lower(),
pixelized_psf=data['{}_psf'.format(refband)])
# Mask all previous (brighter) central galaxies, if any.
img, newmask = ma.getdata(data[refband]) - model_nocentral, ma.getmask(data[refband])
for jj in np.arange(ii):
geo = data['mge'][jj] # the previous galaxy
# Do this step iteratively to capture the possibility where the
# previous galaxy has masked the central pixels of the *current*
# galaxy, in each iteration reducing the size of the mask.
for shrink in np.arange(0.1, 1.05, 0.05)[::-1]:
maxis = shrink * geo['majoraxis']
_mask = ellipse_mask(geo['xmed'], geo['ymed'], maxis, maxis * (1-geo['eps']),
np.radians(geo['theta']-90), xobj, yobj)
notok = False
for xb in box:
for yb in box:
if _mask[int(yb+tractor.by[central]), int(xb+tractor.bx[central])]:
notok = True
break
if notok:
#if _mask[int(tractor.by[central]), int(tractor.bx[central])]:
print('The previous central has masked the current central with shrink factor {:.2f}'.format(shrink))
else:
break
newmask = ma.mask_or(_mask, newmask)
# Next, get the basic galaxy geometry and pack it into a dictionary. If
# the object of interest has been masked by, e.g., an adjacent star
# (see, e.g., IC4041), temporarily unmask those pixels using the Tractor
# geometry.
minsb = 10**(-0.4*(27.5-22.5)) / filt2pixscale[refband]**2
#import matplotlib.pyplot as plt ; plt.clf()
#mgegalaxy = find_galaxy(img / filt2pixscale[refband]**2, nblob=1, binning=3, quiet=not verbose, plot=True, level=minsb)
#mgegalaxy = find_galaxy(img / filt2pixscale[refband]**2, nblob=1, fraction=0.1, binning=3, quiet=not verbose, plot=True)
notok, val = False, []
for xb in box:
for yb in box:
#print(xb, yb, val)
val.append(newmask[int(yb+tractor.by[central]), int(xb+tractor.bx[central])])
# Use np.any() here to capture the case where a handful of the central
# pixels are masked due to, e.g., saturation, which if we don't do, will
# cause issues in the ellipse-fitting (specifically with
# CentralEllipseFitter(censamp).fit() if the very central pixel is
# masked). For a source masked by a star, np.all() would have worked
# fine.
if np.any(val):
notok = True
if notok:
print('Central position has been masked, possibly by a star (or saturated core).')
xmed, ymed = tractor.by[central], tractor.bx[central]
#if largegalaxy:
# ba = tractor.ba_leda[central]
# pa = tractor.pa_leda[central]
# maxis = tractor.d25_leda[central] * 60 / 2 / filt2pixscale[refband] # [pixels]
ee = np.hypot(tractor.shape_e1[central], tractor.shape_e2[central])
ba = (1 - ee) / (1 + ee)
pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[central], tractor.shape_e1[central]) / 2))
pa = pa % 180
maxis = 1.5 * tractor.shape_r[central] / filt2pixscale[refband] # [pixels]
theta = (270 - pa) % 180
fixmask = ellipse_mask(xmed, ymed, maxis, maxis*ba, np.radians(theta-90), xobj, yobj)
newmask[fixmask] = ma.nomask
#import matplotlib.pyplot as plt ; plt.clf()
mgegalaxy = find_galaxy(ma.masked_array(img/filt2pixscale[refband]**2, newmask),
nblob=1, binning=3, level=minsb)#, plot=True)#, quiet=not verbose
#plt.savefig('junk.png') ; pdb.set_trace()
# Above, we used the Tractor positions, so check one more time here with
# the light-weighted positions, which may have shifted into a masked
# region (e.g., check out the interacting pair PGC052639 & PGC3098317).
val = []
for xb in box:
for yb in box:
val.append(newmask[int(xb+mgegalaxy.xmed), int(yb+mgegalaxy.ymed)])
if np.any(val):
notok = True
# If we fit the geometry by unmasking pixels using the Tractor fit then
# we're probably sitting inside the mask of a bright star, so call
# find_galaxy a couple more times to try to grow the "unmasking".
if notok:
print('Iteratively unmasking pixels:')
maxis = 1.0 * mgegalaxy.majoraxis # [pixels]
print(' r={:.2f} pixels'.format(maxis))
prevmaxis, iiter, maxiter = 0.0, 0, 4
while (maxis > prevmaxis) and (iiter < maxiter):
#print(prevmaxis, maxis, iiter, maxiter)
print(' r={:.2f} pixels'.format(maxis))
fixmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed,
maxis, maxis * (1-mgegalaxy.eps),
np.radians(mgegalaxy.theta-90), xobj, yobj)
newmask[fixmask] = ma.nomask
mgegalaxy = find_galaxy(ma.masked_array(img/filt2pixscale[refband]**2, newmask),
nblob=1, binning=3, quiet=True, plot=False, level=minsb)
prevmaxis = maxis.copy()
maxis = 1.2 * mgegalaxy.majoraxis # [pixels]
iiter += 1
#plt.savefig('junk.png') ; pdb.set_trace()
print(mgegalaxy.xmed, tractor.by[central], mgegalaxy.ymed, tractor.bx[central])
maxshift = 10
if (np.abs(mgegalaxy.xmed-tractor.by[central]) > maxshift or # note [xpeak,ypeak]-->[by,bx]
np.abs(mgegalaxy.ymed-tractor.bx[central]) > maxshift):
print('Peak position has moved by more than {} pixels---falling back on Tractor geometry!'.format(maxshift))
#import matplotlib.pyplot as plt ; plt.clf()
#mgegalaxy = find_galaxy(ma.masked_array(img/filt2pixscale[refband]**2, newmask), nblob=1, binning=3, quiet=False, plot=True, level=minsb)
#plt.savefig('junk.png') ; pdb.set_trace()
#pdb.set_trace()
largeshift = True
ee = np.hypot(tractor.shape_e1[central], tractor.shape_e2[central])
ba = (1 - ee) / (1 + ee)
pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[central], tractor.shape_e1[central]) / 2))
mgegalaxy.xmed = tractor.by[central]
mgegalaxy.ymed = tractor.bx[central]
mgegalaxy.xpeak = tractor.by[central]
mgegalaxy.ypeak = tractor.bx[central]
mgegalaxy.eps = 1 - ba
mgegalaxy.pa = pa % 180
mgegalaxy.theta = (270 - pa) % 180
mgegalaxy.majoraxis = 2 * tractor.shape_r[central] / filt2pixscale[refband] # [pixels]
print(' r={:.2f} pixels'.format(mgegalaxy.majoraxis))
fixmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed,
mgegalaxy.majoraxis, mgegalaxy.majoraxis * (1-mgegalaxy.eps),
np.radians(mgegalaxy.theta-90), xobj, yobj)
newmask[fixmask] = ma.nomask
else:
largeshift = False
#if tractor.ref_id[central] == 474614:
# import matplotlib.pyplot as plt
# plt.imshow(mask, origin='lower')
# plt.savefig('junk.png')
# pdb.set_trace()
radec_med = data['{}_wcs'.format(refband)].pixelToPosition(mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals
radec_peak = data['{}_wcs'.format(refband)].pixelToPosition(mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals
mge = {'largeshift': largeshift,
'ra': tractor.ra[central], 'dec': tractor.dec[central],
'bx': tractor.bx[central], 'by': tractor.by[central],
'mw_transmission_g': tractor.mw_transmission_g[central],
'mw_transmission_r': tractor.mw_transmission_r[central],
'mw_transmission_z': tractor.mw_transmission_z[central],
'ra_x0': radec_med[0], 'dec_y0': radec_med[1],
#'ra_peak': radec_med[0], 'dec_peak': radec_med[1]
}
for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):
mge[key] = np.float32(getattr(mgegalaxy, key))
if key == 'pa': # put into range [0-180]
mge[key] = mge[key] % np.float32(180)
data['mge'].append(mge)
# Now, loop on each filter and build a custom image and mask for each
# central. Specifically, pack the model-subtracted images images
# corresponding to each (unique) central into a list. Note that there's
# a little bit of code to deal with different pixel scales but this case
# requires more work.
#for filt in [refband]:
for filt in bands:
thispixscale = filt2pixscale[filt]
imagekey, varkey = '{}_masked'.format(filt), '{}_var'.format(filt)
if imagekey not in data.keys():
data[imagekey], data[varkey] = [], []
factor = filt2pixscale[refband] / filt2pixscale[filt]
majoraxis = 1.5 * factor * mgegalaxy.majoraxis # [pixels]
# Grab the pixels belonging to this galaxy so we can unmask them below.
central_mask = ellipse_mask(mge['xmed'] * factor, mge['ymed'] * factor,
majoraxis, majoraxis * (1-mgegalaxy.eps),
np.radians(mgegalaxy.theta-90), xobj, yobj)
if np.sum(central_mask) == 0:
print('No pixels belong to the central galaxy---this is bad!')
data['failed'] = True
break
# Build the mask from the (cumulative) residual-image mask and the
# inverse variance mask for this galaxy, but then "unmask" the
# pixels belonging to the central.
_residual_mask = residual_mask.copy()
_residual_mask[central_mask] = ma.nomask
mask = ma.mask_or(_residual_mask, newmask, shrink=False)
#import matplotlib.pyplot as plt
#plt.clf() ; plt.imshow(central_mask, origin='lower') ; plt.savefig('junk2.png')
#pdb.set_trace()
# Need to be smarter about the srcs list...
srcs = tractor.copy()
srcs.cut(nocentral)
model_nocentral = srcs2image(srcs, data['{}_wcs'.format(refband)], band=filt.lower(),
pixelized_psf=data['{}_psf'.format(refband)])
# Convert to surface brightness and 32-bit precision.
img = (ma.getdata(data[filt]) - model_nocentral) / thispixscale**2 # [nanomaggies/arcsec**2]
img = ma.masked_array(img.astype('f4'), mask)
var = data['{}_var_'.format(filt)] / thispixscale**4 # [nanomaggies**2/arcsec**4]
# Fill with zeros, for fun--
ma.set_fill_value(img, fill_value)
#img.filled(fill_value)
data[imagekey].append(img)
data[varkey].append(var)
#if tractor.ref_id[central] == 474614:
# import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm ; plt.clf()
# thisimg = np.log10(data[imagekey][ii]) ; norm = simple_norm(thisimg, 'log') ; plt.imshow(thisimg, origin='lower', norm=norm) ; plt.savefig('junk{}.png'.format(ii+1))
# pdb.set_trace()
# Cleanup?
for filt in bands:
del data[filt]
del data['{}_var_'.format(filt)]
return data
def read_multiband(galaxy, galaxydir, filesuffix='largegalaxy', refband='r',
bands=['g', 'r', 'z'], pixscale=0.262, fill_value=0.0,
galaxy_id=None, verbose=False):
"""Read the multi-band images (converted to surface brightness) and create a
masked array suitable for ellipse-fitting.
"""
import fitsio
import astropy.units as u
from astropy.table import Table
from astrometry.util.fits import fits_table
from legacypipe.bits import MASKBITS
from legacyhalos.io import _get_psfsize_and_depth, _read_image_data
# Dictionary mapping between optical filter and filename coded up in
# coadds.py, galex.py, and unwise.py, which depends on the project.
data, filt2imfile, filt2pixscale = {}, {}, {}
for band in bands:
filt2imfile.update({band: {'image': '{}-image'.format(filesuffix),
'model': '{}-model'.format(filesuffix),
'invvar': '{}-invvar'.format(filesuffix),
'psf': '{}-psf'.format(filesuffix),
}})
filt2pixscale.update({band: pixscale})
filt2imfile.update({'tractor': '{}-tractor'.format(filesuffix),
'sample': '{}-sample'.format(filesuffix),
'maskbits': '{}-maskbits'.format(filesuffix),
})
# Do all the files exist? If not, bail!
missing_data = False
for filt in bands:
for ii, imtype in enumerate(filt2imfile[filt].keys()):
#if imtype == 'sky': # this is a dictionary entry
# continue
imfile = os.path.join(galaxydir, '{}-{}-{}.fits.fz'.format(galaxy, filt2imfile[filt][imtype], filt))
#print(imtype, imfile)
if os.path.isfile(imfile):
| |
<reponame>cristobaltapia/sajou
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Defines the different load types,thta can be applied to the elements, which are
transfered accordingly to the respective nodes.
"""
import numpy as np
import scipy.sparse as sparse
from sajou.utils import Local_Csys_two_points
class Load(object):
"""Defines the Load object."""
def __init__(self):
"""Initialize the Load instance"""
self._type = ''
self._load_vector_global = None
class DistributedLoad(Load):
"""Docstring for DistributedLoad. """
def __init__(self, elem, p1, p2=None, direction='y', coord_system='local'):
"""Apply a distributed load on the frame element.
If 'p2' is given, then a linearly varying load is applied with value 'p1' at the
fisrt node of the element and 'p2' at the second. Otherwise the load is uniformly
distributed with value 'p1'.
The direction can be established, being possible to use either 'z' (default) or 'x'. Also
the coordinate system can be change between 'local' (default) or 'global'.
:elem: beam element
:p1: TODO
:p2: TODO
:direction: TODO
:coord_system: TODO
"""
Load.__init__(self)
self._elem = elem
self._p1 = p1
self._direction = direction
self._coord_system = coord_system
self._type = 'Distributed Load'
# Assign a coordinate system to the load
if coord_system == 'local':
self._localCSys = elem._localCSys
elif coord_system == 'global':
self._localCSys = Local_Csys_two_points(point1=(0., 0., 0.),
point2=(1., 0., 0.))
# Detect if distribution is a varying distributed load or not
if p2 is None:
self.is_uniform = True
p2 = p1
self._p2 = p1
else:
self.is_uniform = False
self._p2 = p2
# If the distributed load is given in local coordinates
if coord_system == 'local':
# Generate loading vector
load_v, poly_sec_force = self._calc_loading_vector_local(
p1, p2, elem._length, direction)
# Else, if the distributed load is given in global coordinates
elif coord_system == 'global':
# Generate loading vector
load_v, poly_sec_force = self._calc_loading_vector_global(
p1, p2, elem._length, direction)
self._load_vector_global = load_v
# Polynomial coefficients to calculate the section forces along
# the beam element.
self._poly_sec_force = poly_sec_force
def _calc_loading_vector_local(self, p1, p2, length, direction):
"""
Generate the loading vector, when the distributed load is in local coords.
Also returns the matrix with the polynomial coefficients used for the
calculation of the sectional forces.
:returns: TODO
"""
# Initialize loading vector
# FIXME: make this dependant from the specific element.
# (thinking in 3D case)
n_dof = self._elem.n_active_dof
load_v = np.zeros(n_dof)
# Load vector for the axial load
# (direction='x')
if direction == 'x':
load_v[0] = length * (2. * p1 + p2) / 6.
load_v[3] = length * (p1 + 2. * p2) / 6.
# Generate matrix used for the calculation of section forces
poly_sec_force = self._generate_section_force_poly(p1, p2, length,
direction)
# Load vector for the transversal load
# (direction='z')
elif direction == 'y':
load_v[1] = length * (7. * p1 + 3. * p2) / 20.
load_v[2] = length**2 * (p1 / 20. + p2 / 30.)
load_v[4] = length * (3. * p1 + 7. * p2) / 20.
load_v[5] = -length**2 * (p1 / 30. + p2 / 20.)
# Generate matrix used for the calculation of section forces
poly_sec_force = self._generate_section_force_poly(p1, p2, length,
direction)
self._loading_vector = load_v
# Calculate the load vector in global coordinates, using the
# transformation matrix
Te = self._elem.transformation_matrix
# Rotate
load_vector_global = Te.T @ load_v
return load_vector_global, poly_sec_force
def _calc_loading_vector_global(self, p1, p2, length, direction):
"""
Generate the loading vector, when the distributed load is in global coords.
:returns: TODO
"""
# Initialize loading vector
# FIXME: make this dependant from the specific element.
# (thinking in 3D case)
n_dof = self._elem.n_active_dof
load_v = np.zeros(n_dof)
poly_sec_force = np.zeros((4, 3))
# transformation matrix
T = self._elem.transformation_matrix
# the load has to be decomposed in their respective local
# components:
if direction == 'x':
# x-component in local coordinates
p1_x = p1 * T[0, 0]
p2_x = p2 * T[0, 0]
load_v_aux, poly_sec_force_aux = self._calc_loading_vector_local(
p1_x, p2_x, length, 'x')
load_v += load_v_aux
poly_sec_force += poly_sec_force_aux
# y-component in local coordinates
p1_y = p2 * T[0, 1]
p2_y = p1 * T[0, 1]
load_v_aux, poly_sec_force_aux = self._calc_loading_vector_local(
p1_y, p2_y, length, 'y')
load_v += load_v_aux
poly_sec_force += poly_sec_force_aux
elif direction == 'y':
# x-component in local coordinates
# FIXME: this transformation works but I don't like it
p1_x = p1 * T[0, 1]
p2_x = p2 * T[0, 1]
load_v_aux, poly_sec_force_aux = self._calc_loading_vector_local(
p1_x, p2_x, length, 'x')
load_v += load_v_aux
poly_sec_force += poly_sec_force_aux
# y-component in local coordinates
p1_y = p2 * T[0, 0]
p2_y = p1 * T[0, 0]
load_v_aux, poly_sec_force_aux = self._calc_loading_vector_local(
p1_y, p2_y, length, 'y')
load_v += load_v_aux
poly_sec_force += poly_sec_force_aux
return load_v, poly_sec_force
def _generate_section_force_poly(self, p1, p2, length, direction):
"""
Generate the matrix with polynomial coefficients used to calculate
the section forces.
This matrix has a shape (4 x n_dof) and will be used to calculate the sectional
forces produced by this LoadDistribution instance.
It will then be added in the Element object to contain every contribution made to
the element.
[ N ]
[ V ] = [ 1, x, x**2, x**3 ] * S
[ M ]
where 'S' is the matrix created here, 'x' is the position along the element in
local direction 'x'.
:p1: TODO
:p2: TODO
:length: TODO
:direction: TODO
:returns: TODO
"""
# TODO: implement for the 3D case
# Initialize matrix
m_sec_force = np.zeros((4, 3))
# Determine in which case we are
if direction == 'x':
m_sec_force[:, 0] = np.array(
[0., -p1, (p1 - p2) / (2 * length), 0.])
# For the case in which the loading direction is 'y'
elif direction == 'y':
m_sec_force[:, 1] = np.array(
[0., p1, (p2 - p1) / (2 * length), 0.])
m_sec_force[:, 2] = np.array(
[0., 0., p1 * 0.5, (p2 - p1) / (6 * length)])
return m_sec_force
class DistributedMoment(Load):
"""Docstring for DistributedMoment. """
def __init__(self, elem, m1, m2=None, direction='z', coord_system='local'):
"""Apply a distributed moment to a beam element
:elem: TODO
:m1: TODO
:m2: TODO
"""
Load.__init__(self)
self._elem = elem
self._m1 = m1
self._m2 = m2
self._direction = direction
self._coord_system = coord_system
self._type = 'Distributed Moment'
self.is_uniform = True
# Detect if distribution is a varying distributed load or not
if m2 == None:
self.is_uniform = True
m2 = m1
self._m2 = m1
else:
self.is_uniform = False
self._m2 = m2
# Initialize loading vector
# FIXME: make this dependant from the specific element.
# (thinking in 3D case)
load_v = np.zeros(6)
if coord_system == 'local':
# Generate loading vector
load_v, poly_sec_force = self._calc_loading_vector_local(
m1, m2, elem._length, direction)
# Else, if the distributed load is given in global coordinates
elif coord_system == 'global':
# Generate loading vector
load_v, poly_sec_force = self._calc_loading_vector_global(
m1, m2, elem._length, direction)
self._load_vector_global = load_v
self._poly_sec_force = poly_sec_force
def _calc_loading_vector_local(self, m1, m2, length, direction):
""" Generate the loading vector, when the distributed load is in local coords.
Also returns the matrix used for the calculation of the sectional forces.
:returns: TODO
"""
# Initialize loading vector
# FIXME: make this dependant from the specific element.
# (thinking in 3D case)
n_dof = self._elem._ndof
load_v = np.zeros(n_dof)
# Load vector for the moment load
# (direction='z')
# FIXME:
if direction == 'z':
load_v[1] = -(m1 + m2) * 0.5
load_v[2] = -length * (m1 - m2) / 12.
load_v[4] = (m1 + m2) * 0.5
load_v[5] = length * (m1 - m2) / 12.
# Generate matrix used for the calculation of section forces
poly_sec_force = self._generate_section_force_poly(m1, m2, length,
direction)
self._loading_vector = load_v
# Calculate the load vector in global coordinates, using the
# transformation matrix
Te = self._elem.transformation_matrix
# Rotate
load_vector_global = Te.T @ load_v
return load_vector_global, poly_sec_force
def _calc_loading_vector_global(self, m1, m2, length, direction):
""" Generate the loading vector, when the distributed load is in global coords.
:returns: TODO
"""
# Initialize loading vector
# FIXME: make this dependant from the specific element.
# (thinking in 3D case)
n_dof = self._elem._ndof
load_v = np.zeros(n_dof)
poly_sec_force = np.zeros((4, 3))
# transformation matrix
T = self._elem.transformation_matrix
# the load has to be decomposed in their | |
import __main__
import os
import importlib
import gzip
from json import dumps
from lxml import etree
schema_path = os.path.join(os.path.dirname(__main__.__file__), 'schemas')
parser = etree.XMLParser(
schema=etree.XMLSchema(file=os.path.join(schema_path, 'form.xsd')),
attribute_defaults=True, remove_comments=True, remove_blank_text=True)
form_path = os.path.join(os.path.dirname(__main__.__file__), 'init', 'forms')
import db.objects
import db.create_table
import db.cache
param_style = db.cache.param_style
def upgrade_datamodel(db_session, old_version, new_version, company='_sys'):
print('update {} to {}'.format(old_version, new_version))
if old_version < (0, 1, 1):
upgrade_0_1_1(db_session)
if old_version < (0, 1, 2):
upgrade_0_1_2(db_session)
if old_version < (0, 1, 3):
upgrade_0_1_3(db_session, company)
if old_version < (0, 1, 4):
upgrade_0_1_4(db_session)
if old_version < (0, 1, 5):
upgrade_0_1_5(db_session, company)
if old_version < (0, 1, 6):
upgrade_0_1_6(db_session, company)
if old_version < (0, 1, 7):
upgrade_0_1_7(db_session, company)
if old_version < (0, 1, 8):
upgrade_0_1_8(db_session, company)
if old_version < (0, 1, 9):
upgrade_0_1_9(db_session, company)
if old_version < (0, 1, 10):
upgrade_0_1_10(db_session, company)
if old_version < (0, 1, 11):
upgrade_0_1_11(db_session, company)
# replace amended form definitions
def upd_form_defn(conn, company, form_name):
xml = open('{}/{}.xml'.format(form_path, form_name)).read()
xml = xml.replace('`', '"')
xml = xml.replace('<<', '<')
xml = xml.replace('>>', '>')
xml = etree.fromstring(xml, parser=parser)
xml = gzip.compress(etree.tostring(xml))
sql = (
'UPDATE {0}.sys_form_defns SET form_xml = {1} WHERE form_name = {1}'
.format(company, param_style)
)
params = [xml, form_name]
conn.exec_sql(sql, params)
def upgrade_0_1_1(db_session):
print('upgrading to 0.1.1')
with db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
# update db_tables.sys_menu_defns with new hooks
db_table = db.objects.get_db_object(__main__, '_sys', 'db_tables')
db_table.setval('table_name', 'sys_menu_defns')
db_table.setval('table_hooks', etree.fromstring(
'<hooks><hook type="before_save"><increment_seq args="parent_id"/></hook>'
'<hook type="after_delete"><decrement_seq args="parent_id"/></hook></hooks>'
))
db_table.save()
# update db_columns.sys_menu_defns.opt_type with allow_amend, choices
db_column = db.objects.get_db_object(__main__, '_sys', 'db_columns')
db_column.setval('table_name', 'sys_menu_defns')
db_column.setval('col_name', 'opt_type')
db_column.setval('allow_amend', False)
choices = [True, False, []]
choices[2].append(['0', 'Root', [['descr', True]], []])
choices[2].append(['1', 'Menu', [['descr', True]], []])
choices[2].append(['2', 'Grid',
[['descr', True], ['table_name', True], ['cursor_name', True]], []])
choices[2].append(['3', 'Form', [['descr', True], ['form_name', True]], []])
choices[2].append(['4', 'Report', [['descr', True]], []])
choices[2].append(['5', 'Process', [['descr', True]], []])
db_column.setval('choices', choices)
db_column.save()
# delete db_columns.sys_menu_defns.opt_data
db_column.init()
db_column.setval('table_name', 'sys_menu_defns')
db_column.setval('col_name', 'opt_data')
db_column.delete()
# insert new columns definitions for sys_menu_defns
db_column.init()
db_column.setval('table_name', 'sys_menu_defns')
db_column.setval('col_name', 'table_name')
db_column.setval('col_type', 'sys')
db_column.setval('seq', -1)
db_column.setval('data_type', 'TEXT')
db_column.setval('short_descr', 'Table name')
db_column.setval('long_descr', 'Table name')
db_column.setval('col_head', '')
db_column.setval('key_field', 'N')
db_column.setval('generated', False)
db_column.setval('allow_null', True)
db_column.setval('allow_amend', True)
db_column.setval('max_len', 0)
db_column.setval('db_scale', 0)
db_column.save()
db_column.init()
db_column.setval('table_name', 'sys_menu_defns')
db_column.setval('col_name', 'cursor_name')
db_column.setval('col_type', 'sys')
db_column.setval('seq', -1)
db_column.setval('data_type', 'TEXT')
db_column.setval('short_descr', 'Cursor name')
db_column.setval('long_descr', 'Cursor name')
db_column.setval('col_head', '')
db_column.setval('key_field', 'N')
db_column.setval('generated', False)
db_column.setval('allow_null', True)
db_column.setval('allow_amend', True)
db_column.setval('max_len', 0)
db_column.setval('db_scale', 0)
db_column.save()
db_column.init()
db_column.setval('table_name', 'sys_menu_defns')
db_column.setval('col_name', 'form_name')
db_column.setval('col_type', 'sys')
db_column.setval('seq', -1)
db_column.setval('data_type', 'TEXT')
db_column.setval('short_descr', 'Form name')
db_column.setval('long_descr', 'Form name')
db_column.setval('col_head', '')
db_column.setval('key_field', 'N')
db_column.setval('generated', False)
db_column.setval('allow_null', True)
db_column.setval('allow_amend', True)
db_column.setval('max_len', 0)
db_column.setval('db_scale', 0)
db_column.save()
db_column.init()
db_column.setval('table_name', 'sys_menu_defns')
db_column.setval('col_name', 'children')
db_column.setval('col_type', 'virt')
db_column.setval('seq', -1)
db_column.setval('data_type', 'INT')
db_column.setval('short_descr', 'Children')
db_column.setval('long_descr', 'Number of children')
db_column.setval('col_head', '')
db_column.setval('key_field', 'N')
db_column.setval('generated', False)
db_column.setval('allow_null', True)
db_column.setval('allow_amend', True)
db_column.setval('max_len', 0)
db_column.setval('db_scale', 0)
db_column.setval('sql',
'SELECT count(*) FROM _sys.sys_menu_defns b WHERE b.parent_id = a.row_id')
db_column.save()
# drop and re-create sys_menu_defns table, and populate with data
sql = (
"DROP TABLE _sys.sys_menu_defns"
)
conn.exec_sql(sql)
db.create_table.create_table(conn, '_sys', 'sys_menu_defns')
params = []
params.append(('System Administration' ,None, 0, '0', None, None, None))
params.append(('System setup', 1, 0, '1', None, None, None))
params.append(('Table definitions', 2, 0, '2', 'db_tables', 'db_tables', None))
params.append(('Form definitions', 2, 1, '2', 'sys_form_defns', 'form_list', None))
params.append(('Directories', 1, 1, '1', None, None, None))
params.append(('Setup users', 5, 0, '2', 'dir_users', 'users', None))
params.append(('Setup companies', 5, 1, '3', None, None, 'company_setup'))
params.append(('Accounts receivable', 1, 2, '1', None, None, None))
params.append(('AR setup', 8, 0, '1', None, None, None))
params.append(('AR transactions', 8, 1, '1', None, None, None))
params.append(('Accounts payable', 1, 3, '1', None, None, None))
params.append(('AP setup', 11, 0, '1', None, None, None))
params.append(('AP transactions', 11, 1, '1', None, None, None))
conn.cur.executemany(
"INSERT INTO _sys.sys_menu_defns "
"(descr, parent_id, seq, opt_type, table_name, cursor_name, form_name) "
"VALUES ({})".format(', '.join([param_style] * 7))
, params)
def upgrade_0_1_2(db_session):
print('upgrading to 0.1.2')
with db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
# insert new form definition 'menu_setup'
form_name = 'menu_setup'
form_module = importlib.import_module('.forms.{}'.format(form_name), 'init')
xml = getattr(form_module, form_name)
xml = xml[1:] # strip leading '\n'
xml = xml.replace('`', '"')
xml = xml.replace('<<', '<')
xml = xml.replace('>>', '>')
form_defn = db.objects.get_db_object(__main__, '_sys', 'sys_form_defns')
form_defn.setval('form_name', form_name)
form_defn.setval('title', 'Menu Setup')
form_defn.setval('form_xml', etree.fromstring(xml, parser=parser))
form_defn.save()
menu_defn = db.objects.get_db_object(__main__, '_sys', 'sys_menu_defns')
menu_defn.select_row(keys={'descr': 'System setup'})
if menu_defn.exists:
parent_id = await menu_defn.getval('row_id')
else: # user has changed menu setup
parent_id = 1 # append to root
menu_defn.init()
menu_defn.setval('descr', 'Menu definitions')
menu_defn.setval('parent_id', parent_id)
menu_defn.setval('seq', -1)
menu_defn.setval('opt_type', '3') # form definition
menu_defn.setval('form_name', 'menu_setup')
menu_defn.save()
def upgrade_0_1_3(db_session, company):
print('upgrading to 0.1.3')
with db_session.get_connection() as db_mem_conn:
conn = db_mem_conn.db
# upd db_columns.dir_users.display_name - allow_null -> True
sql = (
'SELECT row_id FROM {}.db_tables WHERE table_name = {}'
.format(company, param_style)
)
cur = conn.exec_sql(sql, ['dir_users'])
table_id = cur.fetchone()[0]
sql = (
'UPDATE {0}.db_columns SET allow_null = {1} '
'WHERE table_id = {1} AND col_name={1}'
.format(company, param_style)
)
params = ['1', table_id, 'display_name']
conn.exec_sql(sql, params)
# upd db_columns.sys_menu_defns.children - allow_amend -> True
# upd db_columns.sys_menu_defns.children - sql - '_sys' -> '{company}'
sql = (
'SELECT row_id FROM {}.db_tables WHERE table_name = {}'
.format(company, param_style)
)
cur = conn.exec_sql(sql, ['sys_menu_defns'])
table_id = cur.fetchone()[0]
sql = (
'UPDATE {0}.db_columns SET allow_amend = {1}, sql={1} '
'WHERE table_id = {1} AND col_name={1}'
.format(company, param_style)
)
params = [
'1',
'SELECT count(*) FROM {company}.sys_menu_defns b '
'WHERE b.parent_id = a.row_id',
table_id, 'children']
conn.exec_sql(sql, params)
# add db_columns.sys_menu_defns.expandable
# add db_columns.sys_menu_defns.parent_num
params = []
params.append(('expandable', 'BOOL', 'Expandable?', 'Is this node expandable?', '',
'N', False, False, True, 0, 0,
"SELECT CASE WHEN a.opt_type in ('0', '1') THEN 1 ELSE 0 END"))
params.append(('parent_num', 'INT', 'Parent numeric id', 'Parent id - change null to 0', '',
'N', False, False, True, 0, 0,
"SELECT COALESCE(a.parent_id, 0)"))
db_column = db.objects.get_db_object(__main__, company, 'db_columns')
for seq, param in enumerate(params):
db_column.init()
db_column.setval('table_name', 'sys_menu_defns')
db_column.setval('col_name', param[0])
db_column.setval('col_type', 'virt')
db_column.setval('seq', seq+1)
db_column.setval('data_type', param[1])
db_column.setval('short_descr', param[2])
db_column.setval('long_descr', param[3])
db_column.setval('col_head', param[4])
db_column.setval('key_field', param[5])
db_column.setval('generated', param[6])
db_column.setval('allow_null', param[7])
db_column.setval('allow_amend', param[8])
db_column.setval('max_len', param[9])
db_column.setval('db_scale', param[10])
db_column.setval('scale_ptr', None)
db_column.setval('dflt_val', None)
db_column.setval('col_checks', None)
db_column.setval('fkey', None)
db_column.setval('choices', None)
db_column.setval('sql', param[11])
db_column.save()
# add del_chk to dir_companies (company_id != '_sys')
sql = (
'UPDATE {0}.db_tables SET del_checks = {1} WHERE table_name = {1}'
.format(company, param_style)
)
del_checks = []
del_checks.append(('CHECK', '', 'company_id', '!=', '"_sys"', ''))
params = [dumps(del_checks), 'dir_companies']
conn.exec_sql(sql, params)
sql = (
'SELECT row_id FROM {}.db_tables WHERE table_name = {}'
.format(company, param_style)
)
cur = conn.exec_sql(sql, ['dir_companies'])
table_id = cur.fetchone()[0]
sql = (
'SELECT audit_row_id FROM {0}.db_tables_audit_xref '
'WHERE data_row_id = {1} AND type = {1}'
.format(company, param_style)
)
params = [table_id, 'chg']
cur = conn.exec_sql(sql, params)
audit_row_id = cur.fetchone()[0]
sql = (
'UPDATE {0}.db_tables_audit SET del_checks = {1} WHERE row_id = {1}'
.format(company, param_style)
)
params = [dumps(del_checks), audit_row_id]
conn.exec_sql(sql, params)
# replace amended form definition 'login_form'
form_name = 'login_form'
form_module = importlib.import_module('.forms.{}'.format(form_name), 'init')
xml = getattr(form_module, form_name)
xml = xml[1:] # strip leading '\n'
xml = xml.replace('`', '"')
xml = xml.replace('<<', '<')
xml = xml.replace('>>', '>')
xml = etree.fromstring(xml, parser=parser)
xml = gzip.compress(etree.tostring(xml))
sql = (
'UPDATE {0}.sys_form_defns SET form_xml = {1} WHERE form_name = {1}'
.format(company, param_style)
)
params = [xml, form_name]
conn.exec_sql(sql, params)
# upd db_columns.dir_users_companies.user_row_id.fkey - child -> True
# upd db_columns.dir_users_companies.company_id.fkey - child -> True
sql = (
'SELECT row_id FROM {}.db_tables WHERE table_name = {}'
.format(company, param_style)
)
cur = conn.exec_sql(sql, ['dir_users_companies'])
table_id = cur.fetchone()[0]
sql = (
'UPDATE {0}.db_columns SET fkey = {1} '
'WHERE table_id = {1} AND col_name={1}'
.format(company, param_style)
)
fkey = []
fkey.append('dir_users')
fkey.append('row_id')
fkey.append('user_id')
fkey.append('user_id')
fkey.append(True)
params = [dumps(fkey), table_id, 'user_row_id']
conn.exec_sql(sql, params)
sql = (
'UPDATE {0}.db_columns SET fkey = {1} '
'WHERE table_id = {1} AND col_name={1}'
.format(company, param_style)
)
fkey = []
fkey.append('dir_companies')
fkey.append('company_id')
fkey.append(None)
fkey.append(None)
fkey.append(True)
params = [dumps(fkey), table_id, 'company_id']
conn.exec_sql(sql, params)
# upd db_tables.dir_users.form_xml
form_name = 'user_formview'
form_module = importlib.import_module('.forms.{}'.format(form_name), 'init')
xml = getattr(form_module, form_name)
xml = xml[1:] # strip leading '\n'
xml = xml.replace('`', '"')
xml = xml.replace('<<', '<')
xml = xml.replace('>>', '>')
xml = etree.fromstring(xml, parser=parser)
xml = gzip.compress(etree.tostring(xml))
sql = (
'UPDATE {0}.db_tables SET form_xml = {1} WHERE table_name = {1}'
.format(company, param_style)
)
params = [xml, 'dir_users']
conn.exec_sql(sql, params)
# upd dir_users_companies schema - foreign key (user_row_id) add ON DELETE | |
<reponame>saba-ja/fprime_owls<filename>Fw/Python/src/fprime/util/build_helper.py
"""
fprime.util.build_helper.py
This script is defined to help users run standard make processes using CMake. This will support migrants from the old
make system, as well as enable more efficient practices when developing in the CMake system. The following functions
are supported herein:
- build: build the current directory/module/deployment
- impl: make implementation templates
- testimpl: make testing templates
- build_ut: build the current UTs
- check: run modules unit tests
@author mstarch
"""
import argparse
import os
import re
import sys
from pathlib import Path
from typing import Dict, List, Tuple
from fprime.common.error import FprimeException
from fprime.fbuild.builder import (
Target,
Build,
BuildType,
GenerateException,
InvalidBuildCacheException,
UnableToDetectDeploymentException,
)
from fprime.fbuild.settings import IniSettings
CMAKE_REG = re.compile(r"-D([a-zA-Z0-9_]+)=(.*)")
def get_target(parsed: argparse.Namespace) -> Target:
"""Gets the target given the argparse namespace
Takes the parsed namespace and processes it to a known matching target.
Args:
parsed: argparse namespace to read values from
Returns:
target that can support the given supplied namespace
"""
mnemonic = parsed.command
flags = {
flag for flag in Target.get_all_possible_flags() if getattr(parsed, flag, False)
}
return Target.get_target(mnemonic, flags)
def validate(parsed, unknown):
"""
Validate rules to ensure that the args are properly consistent. This will also generate a set of validated arguments
to pass to CMake. This allows these values to be created, defaulted, and validated in one place
:param parsed: args to validate
:param unknown: unknown arguments
:return: cmake arguments to pass to CMake
"""
cmake_args = {}
make_args = {}
# Check platforms for existing toolchain, unless the default is specified.
if parsed.command == "generate":
d_args = {
match.group(1): match.group(2)
for match in [CMAKE_REG.match(arg) for arg in unknown]
}
cmake_args.update(d_args)
# Build type only for generate, jobs only for non-generate
elif parsed.command not in ["info", "purge", "hash-to-file", "new"]:
parsed.settings = None # Force to load from cache if possible
make_args.update({"--jobs": (1 if parsed.jobs <= 0 else parsed.jobs)})
return cmake_args, make_args
def add_target_parser(
target: Target,
subparsers,
common: argparse.ArgumentParser,
existing: Dict[str, Tuple[argparse.ArgumentParser, List[str]]],
):
"""Add a subparser for a given build target
For a given build target, construct an argument parser with mnemonic as subcommand and needed flags. Then add it as
a subparser to the global parent. If it already exists in existing, then just add non-existent flags/
Args:
target: target for building a new subparser
subparsers: argument parser to add a subparser to
common: common subparser to be used as parent carrying common flags
existing: dictionary storing the mnemonic to parser and flags tuple
Notes:
This functions has side effects of editing existing and the list of subparsers
"""
if target.mnemonic not in existing:
parser = subparsers.add_parser(
target.mnemonic,
parents=[common],
add_help=False,
help="{} in the specified directory".format(target.desc),
)
# --ut flag also exists at the global parsers, skip adding it
existing[target.mnemonic] = (parser, ["ut"])
# Common target-only items
parser.add_argument(
"-j",
"--jobs",
default=1,
type=int,
help="Parallel build job count. Default: %(default)s.",
)
parser, flags = existing[target.mnemonic]
new_flags = [flag for flag in target.flags if flag not in flags]
for flag in new_flags:
parser.add_argument(
"--{}".format(flag), action="store_true", default=False, help=target.desc
)
flags.extend(new_flags)
def parse_args(args):
"""
Parse the arguments to the CLI. This will then enable the user to run the above listed commands via the commands.
:param args: CLI arguments to process
:return: parsed arguments in a Namespace
"""
# Common parser specifying common arguments input into the utility
common_parser = argparse.ArgumentParser(
description="Common Parser for Common Ingredients."
)
common_parser.add_argument(
"platform",
nargs="?",
default="default",
help="F prime build platform (e.g. Linux, Darwin). Default specified in settings.ini",
)
common_parser.add_argument(
"-d",
"--deploy",
dest="deploy",
default=None,
help="F prime deployment directory to use. May contain multiple build directories.",
)
common_parser.add_argument(
"-p",
"--path",
default=os.getcwd(),
help="F prime directory to operate on. Default: cwd, %(default)s.",
)
common_parser.add_argument(
"-v",
"--verbose",
default=False,
action="store_true",
help="Turn on verbose output.",
)
common_parser.add_argument(
"--ut",
action="store_true",
help="Run command against unit testing build type",
)
# Main parser for the whole application
parsers = {}
parser = argparse.ArgumentParser(description="F prime helper application.")
subparsers = parser.add_subparsers(
description="F prime utility command line. Please run one of the commands. "
+ "For help, run a command with the --help flag.",
dest="command",
)
# Add non-target parsers
generate_parser = subparsers.add_parser(
"generate",
help="Generate a build cache directory. Defaults to generating a release build cache",
parents=[common_parser],
add_help=False,
)
generate_parser.add_argument(
"-Dxyz",
action="append",
help="Pass -D flags through to CMakes",
nargs=1,
default=[],
)
purge_parser = subparsers.add_parser(
"purge",
help="Purge build cache directories",
add_help=False,
parents=[common_parser],
)
purge_parser.add_argument(
"-f",
"--force",
default=False,
action="store_true",
help="Purges the build directory by force. No confirmation will be requested.",
)
# Add a search for hash function
hash_parser = subparsers.add_parser(
"hash-to-file",
help="Converts F prime build hash to filename.",
parents=[common_parser],
add_help=False,
)
hash_parser.add_argument(
"hash",
type=lambda x: int(x, 0),
help="F prime assert hash to associate with a file.",
)
# Add a search for hash function
subparsers.add_parser(
"info",
help="Gets fprime-util contextual information.",
parents=[common_parser],
add_help=False,
)
# New functionality
subparsers.add_parser(
"new",
help="Generate a new component",
parents=[common_parser],
add_help=False,
)
for target in Target.get_all_targets():
add_target_parser(target, subparsers, common_parser, parsers)
# Parse and prepare to run
parsed, unknown = parser.parse_known_args(args)
bad = [bad for bad in unknown if not CMAKE_REG.match(bad)]
if not hasattr(parsed, "command") or parsed.command is None:
parser.print_help()
sys.exit(1)
elif bad:
print("[ERROR] Unknown arguments: {}".format(", ".join(bad)))
parser.print_help()
sys.exit(1)
cmake_args, make_args = validate(parsed, unknown)
return parsed, cmake_args, make_args, parser
def confirm(msg=None):
""" Confirms the removal of the file with a yes or no input """
# Loop "forever" intended
while True:
confirm_input = input(msg if msg is not None else "Purge this directory (yes/no)?")
if confirm_input.lower() in ["y", "yes"]:
return True
if confirm_input.lower() in ["n", "no"]:
return False
print("{} is invalid. Please use 'yes' or 'no'".format(confirm_input))
def print_info(parsed, deployment):
""" Builds and prints the informational output block """
cwd = Path(parsed.path)
build_types = BuildType
# Roll up targets for more concise display
build_infos = {}
local_generic_targets = set()
global_generic_targets = set()
# Loop through available builds and harvest targets
for build_type in build_types:
build = Build(build_type, deployment, verbose=parsed.verbose)
try:
build.load(cwd, parsed.platform)
except InvalidBuildCacheException:
print(
"[WARNING] Not displaying results for build type '{}', missing build cache.".format(
build_type.get_cmake_build_type()
)
)
continue
build_info = build.get_build_info(cwd)
# Target list
local_targets = {
"'{}'".format(target) for target in build_info.get("local_targets", [])
}
global_targets = {
"'{}'".format(target) for target in build_info.get("global_targets", [])
}
build_artifacts = (
build_info.get("auto_location")
if build_info.get("auto_location") is not None
else "N/A"
)
local_generic_targets = local_generic_targets.union(local_targets)
global_generic_targets = global_generic_targets.union(global_targets)
build_infos[build_type] = build_artifacts
# Print out directory and deployment target sections
print("[INFO] Fprime build information:")
print(" Available directory targets: {}".format(" ".join(local_generic_targets)))
print()
print(
" Available deployment targets: {}".format(" ".join(global_generic_targets))
)
# Artifact locations come afterwards
print(" ----------------------------------------------------------")
for build_type, build_artifact_location in build_infos.items():
format_string = " {} build cache: {}"
print(
format_string.format(
build_type.get_cmake_build_type(), build_artifact_location
)
)
print()
def add_to_cmake(list_file: Path, comp_path: Path):
""" Adds new component to CMakeLists.txt"""
print("[INFO] Found CMakeLists.txt at '{}'".format(list_file))
with open(list_file, "r") as file_handle:
lines = file_handle.readlines()
topology_lines = [(line, text) for line, text in enumerate(lines) if "/Top/" in text]
line = len(topology_lines)
if topology_lines:
line, text = topology_lines[0]
print("[INFO] Topology inclusion '{}' found on line {}.".format(text.strip(), line + 1))
if not confirm("Add component {} to {} {}?".format(comp_path, list_file, "at end of file" if not topology_lines else " before topology inclusion")):
return
addition = 'add_fprime_subdirectory("${{CMAKE_CURRENT_LIST_DIR}}/{}/")\n'.format(comp_path)
lines.insert(line, addition)
with open(list_file, "w") as file_handle:
file_handle.write("".join(lines))
def fprime_new(path: Path, settings: Dict[str, str]):
""" Uses cookiecutter for making new components """
try:
from cookiecutter.main import cookiecutter
from cookiecutter.exceptions import OutputDirExistsException
except ImportError:
print("[ERROR] 'cookiecutter' package not installed. Source venv and rum 'pip install cookiecutter'.",
file=sys.stderr)
return 1
try:
calculated_defaults = {}
proj_root = None
comp_parent_path = None
try:
proj_root = Path(settings.get("project_root", None))
comp_parent_path = path.relative_to(proj_root)
back_path = os.sep.join([".." for _ in str(comp_parent_path).split(os.sep)])
calculated_defaults["component_path"] = str(comp_parent_path).rstrip(os.sep)
calculated_defaults["component_path_to_fprime_root"] = str(back_path).rstrip(os.sep)
except (ValueError, TypeError):
print("[WARNING] No found project root. Set 'component_path' and 'component_path_to_fprime_root' carefully")
source = 'gh:SterlingPeet/cookiecutter-fprime-component'
print("[INFO] Cookiecutter source: {}".format(source))
print()
print("----------------")
print("[INFO] Help available here: https://github.com/SterlingPeet/cookiecutter-fprime-component/blob/master/README.rst#id3")
print("----------------")
print()
final_dir = cookiecutter(source, extra_context=calculated_defaults)
# Attempt to register to CMakeLists.txt
test_path = Path(final_dir).parent.resolve()
while proj_root is not None and test_path != proj_root.parent:
cmake_list_file = (test_path / "CMakeLists.txt")
if cmake_list_file.is_file():
add_to_cmake(cmake_list_file, Path(final_dir).relative_to(test_path))
break
test_path = test_path.parent
return 0
except OutputDirExistsException as oderr:
| |
self.checkScript(test_sizes, (torch.rand(777),))
self.checkScript(test_sizes, (torch.rand(0),))
def test_for_in_tensors_rank0(self):
with self.assertRaisesRegex(RuntimeError, "of a 0-d tensor"):
@torch.jit.script
def test_sizes(x):
sumz = 0
for s in x:
sumz += 1
return sumz
test_sizes(torch.tensor(1))
def test_for_in_tensors_fail_scalar(self):
with self.assertRaisesRegex(RuntimeError, "'float' object is not iterable"):
@torch.jit.script
def test_sizes(x):
# type: (float) -> int
sumz = 0
for s in x:
sumz += 1
return sumz
test_sizes(0.0)
def test_for_in_tensors_nested(self):
def test_sizes(x):
sumz = 0
for n in x:
for t in n:
sumz += 1
return sumz
self.checkScript(test_sizes, (torch.rand(5, 4, 3, 2, 1),))
# to avoid defining sum_list in multiple tests
def get_sum_list_fn(self):
def sum_list(a):
# type: (List[int]) -> int
sum = 0
for i in a:
sum += i
return sum
return sum_list
def test_sum_list_diff_elms(self):
self.checkScript(self.get_sum_list_fn(), ([1, 2, 3, 4, 5],))
def test_sum_list_empty(self):
self.checkScript(self.get_sum_list_fn(), ([],))
def test_sum_list_one(self):
self.checkScript(self.get_sum_list_fn(), ([1],))
def test_sum_list_literal(self):
def sum_list():
# type: () -> int
sum = 0
for i in [1, 2, 3, 4, 5]:
sum += i
return sum
self.checkScript(sum_list, ())
def test_sum_list_wrong_type(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
@torch.jit.script
def sum_list(a):
# type: (int) -> int
sum = 0
for i in a: # noqa: T484
sum += i
return sum
sum_list(1)
def test_list_iterables(self):
with self.assertRaisesRegex(RuntimeError, 'List of iterables is not supported currently'):
cu = torch.jit.CompilationUnit('''
def list_iterables(x):
for i, j in [2, 3, 4], [5, 6, 7]:
x += i
x += j
return x
''')
def test_for_in_string(self):
def test_strings(x):
# type: (str) -> str
reverse = ""
for c in x:
reverse = c + reverse
return reverse
self.checkScript(test_strings, ("hello",))
self.checkScript(test_strings, ("",))
def test_list_strings(x):
# type: (List[str]) -> str
result = ""
for sub_str in x:
result += sub_str
return result
self.checkScript(test_list_strings, (["hello", "world"],))
self.checkScript(test_list_strings, (["hello", " ", "world", ""],))
def test_for_in_dict(self):
def test_dicts(x):
# type: (Dict[str, int]) -> int
sum = 0
for key in x:
sum += x[key]
return sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_dict_keys_values(x):
# type: (Dict[str, int]) -> Tuple[str, int]
key_str = ""
sum = 0
for key in x.keys():
key_str += key
for val in x.values():
sum += val
return key_str, sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_for_tuple_unpack(self):
def for_tuple_unpack(x, y):
for i, j in [[3, 4], [5, 6], [7, 8]]:
x += i
y += j
return x, y
self.checkScript(for_tuple_unpack, (torch.tensor(3), torch.tensor(5)))
def nested_tuple_unpack(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for i, (j, k), v in zip(x, enumerate(x), y):
sum += i + j + k + v
return sum
self.checkScript(nested_tuple_unpack, ([1, 3, 5], [2, 4, 6]))
def test_for_tuple_assign(self):
def test_simple_assign(x):
# type: (Tuple[int, float]) -> float
sum = 0.0
for a in x:
sum += float(a)
return sum
self.checkScript(test_simple_assign, ((1, 2.5),))
def test_tuple_assign(x):
# type: (Tuple[Tuple[int, int], Tuple[int, int]]) -> int
sum = 0
for a in x:
sum += a[0]
sum += a[1]
return sum
self.checkScript(test_tuple_assign, (((1, 2), (4, 7)), ))
def test_single_starred_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear on the lhs within the presence'
' of another non-starred expression'):
cu = torch.jit.CompilationUnit('''
def single_starred_lhs(x):
a = (x, x, x)
*b, = a
return b
''')
def test_singleton_tuple_unpack(self):
def foo(a):
b, = (a,)
return b + 1
self.checkScript(foo, (torch.rand(3),))
def test_tuple_assignments(self):
def var_tuple_assign(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
(a, b), c = x, y
return a + b + c
tuple_inputs = (torch.randn(1, 4), torch.randn(3, 4))
self.checkScript(var_tuple_assign, (tuple_inputs, torch.randn(3, 4)))
def nested_tuple_assign(x, y, z):
# type: (int, Tuple[int, Tuple[int, int]], Tuple[int, int]) -> int
a, (b, (c, d)), (e, f) = x, y, z
return a + b + c + d + e + f
self.checkScript(nested_tuple_assign, ((1, (2, (3, 4)), (5, 6))))
def subscript_tuple_assign(a, x, i):
# type: (List[int], Tensor, int) -> Tuple[int, Tensor, int]
a[i], (x[i], b) = 1, (2, 3)
return a[i] + 1, x + 5, b
self.checkScript(subscript_tuple_assign, ([12, 7, 9, 11], torch.tensor((3, 13, 17)), 0))
def star_tuple_assign():
# type: () -> Tuple[int, int, Tuple[int, int], Tuple[int, int]]
a, (b, *c), *d = 1, (2, 3, 4), 5, 6
return a, b, c, d
self.checkScript(star_tuple_assign, ())
def subscript_tuple_augmented_assign(a):
# type: (Tuple[int, int]) -> Tuple[int, int]
a[0] += 1
return a
with self.assertRaisesRegex(RuntimeError, 'does not support augmented assign'):
scripted_aug_assign = torch.jit.script(subscript_tuple_augmented_assign)
class AttrTupleAssignmentTestClass:
def __init__(self, a: int, b: int):
self.a = a
self.b = b
def set_ab(self, a: int, b: int):
self.a, self.b = (a, b)
def get(self) -> Tuple[int, int]:
return (self.a, self.b)
make_global(AttrTupleAssignmentTestClass)
@torch.jit.script
def attr_tuple_assignment(o: AttrTupleAssignmentTestClass, a: int, b: int):
o.set_ab(a, b)
return o
o = AttrTupleAssignmentTestClass(1, 2)
self.assertEqual(attr_tuple_assignment(o, 3, 4).get(), (3, 4))
def test_multiple_assign(self):
def test():
a = b, c = d, f = (1, 1)
# side effect
ten = torch.tensor(1)
ten1 = ten2 = ten.add_(1)
# ordering
x = 1
y = 3
x, y = y, x + y
return a, b, c, d, f, ten, ten1, ten2, x, y
self.checkScript(test, ())
def test_multi_reduction(self):
with self.assertRaisesRegex(
RuntimeError,
'augmented assignment can only have one LHS expression'):
cu = torch.jit.CompilationUnit('''
def multi_reduction(x):
a, b += x
return a, b
''')
def test_invalid_call_arguments(self):
with self.assertRaisesRegex(RuntimeError, 'but instead found type '):
@torch.jit.script
def invalid_call_arguments(x):
return torch.unsqueeze(3, 4, 5, 6, 7, 8)
def test_invalid_lhs_assignment(self):
with self.assertRaisesRegex(RuntimeError, 'unexpected expression'):
cu = torch.jit.CompilationUnit('''
def invalid_lhs_assignment(x):
x + 1 = x
return x
''')
def test_multi_starred_expr_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'Only one starred expression is allowed on the lhs'):
cu = torch.jit.CompilationUnit('''
def multi_starred_expr_lhs():
a, *b, *c = [1, 2, 3, 4, 5, 6]
return a
''')
def test_pack_tuple_into_non_var(self):
with self.assertRaisesRegex(RuntimeError, 'Cannot pack a tuple into a non-variable'):
cu = torch.jit.CompilationUnit('''
def pack_tuple_into_non_var(x):
a, *1 = (3, 4, 5)
return x
''')
def test_print_kwargs(self):
with self.assertRaisesRegex(RuntimeError, 'print doesn\'t accept any keyword arguments'):
cu = torch.jit.CompilationUnit('''
def print_kwargs(x):
print(x, flush=True)
return x
''')
def test_builtin_use_as_value(self):
with self.assertRaisesRegex(RuntimeError, 'builtin cannot be used as a value'):
@torch.jit.script
def builtin_use_as_value(x):
return x.unsqueeze
def test_wrong_use_as_tuple(self):
with self.assertRaisesRegex(RuntimeError, 'cannot be used as a tuple'):
def test_fn():
return 3
@torch.jit.script
def wrong_use_as_tuple(self):
a, b = test_fn
return a
def test_wrong_attr_lookup(self):
with self.assertRaisesRegex(RuntimeError, 'attribute lookup is not defined on builtin'):
@torch.jit.script
def wrong_attr_lookup(self, x):
a = x.unsqueeze.myattr
return a
def test_wrong_use_as_callable(self):
with self.assertRaisesRegex(RuntimeError, 'cannot call a value'):
@torch.jit.script
def wrong_use_as_callable(x):
return x(3, 4, 5)
def test_python_val_doesnt_have_attr(self):
with self.assertRaisesRegex(RuntimeError, 'object has no attribute abcd'):
@torch.jit.script
def python_val_doesnt_have_attr():
# this has to be a module otherwise attr lookup would not be
# allowed in the first place
return shutil.abcd
def test_wrong_module_attr_lookup(self):
with self.assertRaisesRegex(RuntimeError, 'python value of type \'type\' cannot be used as a value'):
import io
@torch.jit.script
def wrong_module_attr_lookup():
return io.BytesIO
def test_wrong_method_call_inputs(self):
with self.assertRaisesRegex(RuntimeError, 'Argument y not provided'):
class SomeModule(torch.jit.ScriptModule):
@torch.jit.script_method
def foo(self, x, y):
return x
@torch.jit.script_method
def forward(self, x, y):
return self.foo(x)
SomeModule()
def test_single_starred_expr_for_loop(self):
with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear'):
cu = torch.jit.CompilationUnit('''
def test():
x = 0
for *a in [1, 2, 3]:
x = x + 1
return x
''')
def test_call_ge(self):
with self.assertRaisesRegex(RuntimeError, 'Expected at most 1 arguments but found 3'):
@_trace(torch.zeros(1, 2, 3))
def foo(x):
return x
@torch.jit.script
def test_fn():
return foo(torch.full([1], 1), torch.full([1], 2), torch.full([1], 3))
def test_wrong_return_type(self):
with self.assertRaisesRegex(RuntimeError, 'but instead got value of type tuple'):
@torch.jit.ignore
def somefunc():
# type: () -> Tuple[Tuple[Tensor, Tensor]]
return torch.zeros(3, 4), torch.zeros(4, 5) # noqa: T484
@torch.jit.script
def wrong_return_type():
return somefunc()
wrong_return_type()
# Tests for calling between different front-end modes
def test_call_python_fn_from_tracing_fn(self):
def python_fn(x):
return torch.neg(x)
@_trace(torch.rand(3, 4))
def traced_fn(x):
return python_fn(x) + 1
# The neg op in the python function should be properly inlined to the
# graph
FileCheck().check("aten::neg").run(str(traced_fn.graph))
def test_call_python_mod_from_tracing_fn(self):
class PythonMod(torch.nn.Module):
def __init__(self):
super(PythonMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3), requires_grad=False)
def forward(self, x):
return torch.mm(x, self.param)
pm = PythonMod()
@_trace(torch.rand(3, 4))
def traced_fn(x):
return pm(x) + 1.0
# Note: the parameter self.param from the Python module is inlined
# into the graph
self.assertTrue(len(list(traced_fn.graph.inputs())) == 1)
FileCheck().check("aten::mm").check("aten::add").run(str(traced_fn.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_tracing_fn(self):
@_trace(torch.rand(3, 4))
def | |
# Copyright (c) 2021, <NAME>
# License: MIT License
from typing import Iterable, List
import pytest
from itertools import permutations
import ezdxf.tools.text_layout as tl
@pytest.mark.parametrize(
"margins,expected",
[
[None, (0, 0, 0, 0)],
[(1,), (1, 1, 1, 1)],
[(1, 2), (1, 2, 1, 2)],
[(1, 2, 3), (1, 2, 3, 2)],
[(1, 2, 3, 4), (1, 2, 3, 4)],
],
)
def test_resolve_margins(margins, expected):
assert tl.resolve_margins(margins) == expected
@pytest.mark.parametrize(
"align,expected",
[
[tl.LayoutAlignment.TOP_LEFT, (0, 0)],
[tl.LayoutAlignment.TOP_CENTER, (-2, 0)],
[tl.LayoutAlignment.TOP_RIGHT, (-4, 0)],
[tl.LayoutAlignment.MIDDLE_LEFT, (0, 3)],
[tl.LayoutAlignment.MIDDLE_CENTER, (-2, 3)],
[tl.LayoutAlignment.MIDDLE_RIGHT, (-4, 3)],
[tl.LayoutAlignment.BOTTOM_LEFT, (0, 6)],
[tl.LayoutAlignment.BOTTOM_CENTER, (-2, 6)],
[tl.LayoutAlignment.BOTTOM_RIGHT, (-4, 6)],
],
)
def test_insert_location(align, expected):
assert tl.insert_location(align, width=4, height=6) == expected
class Rect(tl.ContentRenderer):
def __init__(self, name: str, result: List = None):
if result is None:
result = []
self.result = result # store test results
self.name = name
def render(
self, left: float, bottom: float, right: float, top: float, m=None
) -> None:
self.result.append(
f"{self.name}({left:.1f}, {bottom:.1f}, {right:.1f}, {top:.1f})"
)
def line(self, x1: float, y1: float, x2: float, y2: float, m=None) -> None:
self.result.append(f"LINE({x1:.1f}, {y1:.1f})TO({x2:.1f}, {y2:.1f})")
class TestTopLevelLayout:
@pytest.fixture
def layout1(self):
return tl.Layout(
width=10, height=None, margins=(1, 1), renderer=Rect("Layout1")
)
def test_create_empty_layout_top_left(self, layout1):
# layout1 has no height, only margins
# 1. do layout placing
layout1.place(align=tl.LayoutAlignment.TOP_LEFT)
# 2. render content
layout1.render()
result = layout1.renderer.result
assert len(result) == 1
assert result[0] == "Layout1(0.0, -2.0, 12.0, 0.0)"
def test_create_empty_layout_middle_center(self, layout1):
# layout1 has no height, only margins
# 1. do layout placing
layout1.place(align=tl.LayoutAlignment.MIDDLE_CENTER)
# 2. render content
layout1.render()
result = layout1.renderer.result
assert len(result) == 1
assert result[0] == "Layout1(-6.0, -1.0, 6.0, 1.0)"
def test_add_one_column_by_reference_width(self, layout1):
height = 17
width = layout1.content_width # reference column width
result = layout1.renderer.result # use same result container
layout1.append_column(height=height, renderer=Rect("Col1", result))
assert layout1.total_width == width + 2
assert layout1.total_height == height + 2
layout1.place(align=tl.LayoutAlignment.BOTTOM_LEFT)
layout1.render()
assert len(result) == 2
assert result[0] == "Layout1(0.0, 0.0, 12.0, 19.0)"
assert result[1] == "Col1(1.0, 1.0, 11.0, 18.0)"
def test_add_two_equal_columns(self, layout1):
margins = (1,)
layout1.append_column(
width=5, height=10, gutter=2, margins=margins, renderer=Rect("Col1")
)
layout1.append_column(
width=7, height=20, margins=margins, renderer=Rect("Col2")
)
# width1 + margins + gutter + width2 + margins
assert layout1.content_width == (5 + 2 + 2 + 7 + 2)
# max(height) + margins
assert layout1.content_height == (20 + 2)
def test_bounding_box_for_not_placed_layout(self, layout1):
# applies default alignment top/left, margins = (1, 1)
layout1.append_column(10, 10)
bbox = layout1.bbox()
assert bbox.extmin == (0, -12) # left/bottom
assert bbox.extmax == (12, 0) # right/top
def test_bounding_box_for_placed_layout(self, layout1):
# margins = (1, 1)
layout1.append_column(10, 10)
layout1.place(0, 0, tl.LayoutAlignment.MIDDLE_CENTER)
bbox = layout1.bbox()
assert bbox.extmin == (-6, -6) # left/bottom
assert bbox.extmax == (6, 6) # right/top
def test_next_existing_column(self, layout1):
layout1.append_column(height=10)
layout1.append_column(height=10)
assert len(layout1) == 2
assert layout1.current_column_index == 0
layout1.next_column()
assert layout1.current_column_index == 1
def test_next_column_creates_a_new_column(self, layout1):
layout1.append_column(height=10)
assert len(layout1) == 1
assert layout1.current_column_index == 0
layout1.next_column()
assert layout1.current_column_index == 1
assert len(layout1) == 2, "a new column should be created"
class TestColumn:
@pytest.fixture
def c1(self):
return tl.Column(
# margins = top, right, bottom, left - same order as for CSS
width=5,
height=7,
margins=(1, 2, 3, 4),
renderer=Rect("C1"),
)
def test_size_calculation(self, c1):
c1.place(0, 0)
assert c1.content_width == 5
assert c1.content_height == 7
assert c1.total_width == 2 + 5 + 4
assert c1.total_height == 1 + 7 + 3
def test_render(self, c1):
c1.place(0, 0)
c1.render()
result = c1.renderer.result
assert result[0] == "C1(0.0, -11.0, 11.0, 0.0)"
def test_paragraph_available_line_content_space():
par = tl.Paragraph(width=12, indent=(0.7, 0.5, 0.9))
assert par.line_width(first=True) == 12 - 0.7 - 0.9
assert par.line_width(first=False) == 12 - 0.5 - 0.9
class TestParagraphWithUnrestrictedHeight:
# default values:
# column width = 10
# content width = 3
# space width = 0.5
@pytest.fixture
def par(self):
# Paragraph alignment is not important for content distribution,
# because the required space is independent from alignment (left,
# right, center or justified).
# This may change by implementing regular tabulator support.
return tl.Paragraph(width=10, renderer=Rect("PAR"))
def test_empty_paragraph_dimensions(self, par):
assert par.content_height == 0
assert par.content_width == 10
def test_render_empty_paragraph(self, par):
par.place(0, 0)
par.render()
result = par.renderer.result
assert len(result) == 1
assert result[0] == "PAR(0.0, 0.0, 10.0, 0.0)"
def test_distribute_invalid_content(self, par):
par.append_content(str2cells("ttt"))
with pytest.raises(ValueError):
par.distribute_content(height=None)
def test_distribute_common_case_without_nbsp(self, par):
# column width = 10
# content width = 3
# space width = 0.5
par.append_content(str2cells("t t t t t t t t t"))
par.distribute_content(height=None)
assert lines2str(par) == [
"t t t", # width = 3x3 + 2x0.5 = 10
"t t t", # remove line breaking spaces!
"t t t",
]
def test_distribute_with_nbsp(self, par):
# column width = 10
# content width = 3
# space width = 0.5
par.append_content(str2cells("t t t~t t t"))
par.distribute_content(height=None)
assert lines2str(par) == [
"t t", # t~t does not fit and goes to next line
"t~t t", # width = 3x3 + 2x0.5 = 10
"t",
]
def test_distribute_too_long_lines(self, par):
# column width = 10
par.append_content(str2cells("t t t", content=12))
par.distribute_content(height=None)
assert lines2str(par) == ["t", "t", "t"]
def test_distribute_too_long_lines_including_nbsp(self, par):
# column width = 10
par.append_content(str2cells("t~t~t t~t t", content=5))
par.distribute_content(height=None)
assert lines2str(par) == [
"t~t~t", # width = 3x5 + 2x0.5 = 17
"t~t", # width = 2x5 + 0.5 = 10.5
"t",
]
class TestParagraphWithRestrictedHeight:
# default values:
# column width = 10
# content width = 3
# space width = 0.5
# cap height = 1,
# line spacing 3-on-5 by 100% = 1.667
THREE_LINE_SPACE = tl.leading(1, 1) * 2 + 1
@pytest.fixture
def par(self):
# Paragraph alignment is not important for content distribution.
return tl.Paragraph(width=10, renderer=Rect("PAR"))
def test_distribute_with_exact_height_match(self, par):
par.append_content(str2cells("t t t t t t t t t"))
par.distribute_content(height=self.THREE_LINE_SPACE)
assert lines2str(par) == [
"t t t", # width = 3x3 + 2x0.5 = 10
"t t t",
"t t t",
]
def test_distribute_with_one_line_left_over(self, par):
par.append_content(str2cells("t t t t t t t t t"))
# Paragraph has only space for 2 lines by reducing the available space
# by a small amount:
height = self.THREE_LINE_SPACE - 0.01
leftover = par.distribute_content(height=height)
assert lines2str(par) == [
"t t t",
"t t t",
]
leftover.distribute_content(height=1)
assert lines2str(leftover) == ["t t t"]
def test_distribute_with_all_lines_left_over(self, par):
par.append_content(str2cells("t t t~t t t t t t"))
# Paragraph has no space at all:
leftover = par.distribute_content(height=0)
assert lines2str(par) == []
# None = unrestricted height
leftover.distribute_content(height=None)
assert lines2str(leftover) == [
"t t",
"t~t t",
"t t t",
"t",
]
def set_paragraph_content(flow):
flow.append_content(str2cells("t t t t t t t t t"))
flow.distribute_content()
class TestParagraphLeftAlignment:
# default values:
# content width = 3
# space width = 0.5
def test_without_indentation(self):
par = tl.Paragraph(width=12, align=tl.ParagraphAlignment.LEFT)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 0
def test_left_indentation(self):
par = tl.Paragraph(
width=12, indent=(0.7, 0.5, 0), align=tl.ParagraphAlignment.LEFT
)
set_paragraph_content(par)
par.place(0, 0)
lines = list(par)
# first line:
assert par.line_width(True) == 12 - 0.7 # available content space
assert lines[0].final_location()[0] == 0.7
assert lines[0].total_width == 10
# remaining lines:
for line in lines[1:]:
assert par.line_width(False) == 12 - 0.5 # available content space
assert line.total_width == 10
assert line.final_location()[0] == 0.5
def test_move_tab_to_next_line_if_following_content_does_not_fit(self):
result = []
par = tl.Paragraph(width=10, tab_stops=[tl.TabStop(4)])
par.append_content(str2cells("t#t", content=6, result=result))
# The tab (#) should move the following text to the tab stop
# in the next line at position 4.
par.distribute_content()
par.place(0, 0)
par.render()
assert result[0] == "Text(0.0, -1.0, 6.0, 0.0)"
assert result[1] == "Text(4.0, -2.7, 10.0, -1.7)", "x1 has to be 4.0"
class TestParagraphAlignment:
# default values:
# content width = 3
# space width = 0.5
def test_without_indentation(self):
par = tl.Paragraph(width=12, align=tl.ParagraphAlignment.RIGHT)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 2
def test_right_indentation(self):
par = tl.Paragraph(
width=12, indent=(0.5, 0.5, 0.5), align=tl.ParagraphAlignment.RIGHT
)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 1.5 # 12 - 0.5 - 10
class TestParagraphCenterAlignment:
# default values:
# content width = 3
# space width = 0.5
def test_without_indentation(self):
par = tl.Paragraph(width=12, align=tl.ParagraphAlignment.CENTER)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 1
def test_left_indentation(self):
par = tl.Paragraph(
width=12, indent=(0.5, 0.5, 0), align=tl.ParagraphAlignment.CENTER
)
set_paragraph_content(par)
par.place(0, 0)
| |
self.numTimes = C.c_int(0)
else:
self.__ex_get_info()
self.numTimes = C.c_int(
self.__ex_inquire_int(ex_inquiry("EX_INQ_TIME")))
def copy(self, fileName):
"""
copy to a new database
e.copy(fileName) -> copies Exodus database to fileName and returns
this copy as a new exodus object
"""
new = exodus(fileName,
mode="w",
title=self.title(),
numDims=self.num_dimensions(),
numNodes=self.num_nodes(),
numElems=self.num_elems(),
numBlocks=self.num_blks(),
numNodeSets=self.num_node_sets(),
numSideSets=self.num_side_sets())
self.__copy_file(new)
return new
#
# general info
#
# --------------------------------------------------------------------
def title(self):
"""
e.title() -> title in exodus file
"""
return self.Title.value
# --------------------------------------------------------------------
def version_num(self):
"""
e.version_num() -> string representation of exodus version number
"""
return "%1.2f" % self.version.value
# --------------------------------------------------------------------
def put_info(self, Title, numDim, numNodes, numElem, numElemBlk,
numNodeSets, numSideSets):
"""
e.put_info(self,Title,numDim,numNodes,numElem,numElemBlk,numNodeSets,numSideSets)
-> put initialization information into exodus file
"""
self.__ex_put_info([Title, numDim, numNodes, numElem, numElemBlk,
numNodeSets, numSideSets])
return True
# --------------------------------------------------------------------
def get_qa_records(self):
"""
e.get_qa_records() -> get a list of QA records from the exodus database;
each QA record is a length-4 tuple of strings:
1) the analysis code name
2) the analysis code descriptor, e.g. version
3) the analysis data
4) the analysis time
"""
return self.__ex_get_qa()
# --------------------------------------------------------------------
def put_qa_records(self, records):
"""
e.put_qa_records(records)
-> put a list of QA records into the exodus database; each QA record
must be a length-4 tuple of strings:
1) the analysis code name
2) the analysis code descriptor, e.g. version
3) the analysis data
4) the analysis time
"""
for rec in records:
assert len(rec) == 4
for recEntry in rec:
assert len(str(recEntry)) < MAX_STR_LENGTH
if self.__ex_put_qa(records):
return True
else:
return False
# --------------------------------------------------------------------
def num_info_records(self):
return int(self.__ex_inquire_int(ex_inquiry("EX_INQ_INFO")))
# --------------------------------------------------------------------
def get_info_records(self):
"""
e.get_info_records() -> get a list info records from the exodus database;
each entry in the list is one line of info, e.g.
a line of an input deck
"""
info_recs = self.__ex_get_info_recs()
return info_recs
# --------------------------------------------------------------------
def put_info_records(self, info):
"""
e.put_info_records(info) -> put a list of info records into an exodus
database; each entry in the list is one
line of info, e.g. a line of an input deck
"""
for rec in info:
if len(str(rec)) > MAX_LINE_LENGTH:
print("WARNING: max line length reached for one or more info "
"records;")
print(
" info stored to exodus file is incomplete for "
"these records")
break
if self.__ex_put_info_recs(info):
return True
else:
return False
# --------------------------------------------------------------------
def get_sierra_input(self, inpFileName=None):
"""
e.get_sierra_input(inpFileName) ->
get sierra input deck from exodus database; if inpFileName is
passed, input is written to this file, e.g. myInputDeck.i;
otherwise a list of file lines is returned
"""
info_recs = self.__ex_get_info_recs()
sierra_inp = []
begin = False
for rec in info_recs:
vals = rec.split()
if not begin: # have not reached Sierra block
if len(vals) >= 2 and vals[0].lower(
) == 'begin' and vals[1].lower() == "sierra":
begin = True
if begin: # inside Sierra block
sierra_inp.append(rec)
if len(rec) > MAX_LINE_LENGTH:
print("WARNING: max line length reached for one or more "
"input lines;")
print(" input data might be incomplete for these "
"lines")
break
if len(vals) >= 2 and vals[0].lower(
) == "end" and vals[1].lower() == "sierra":
break # end of Sierra block
if inpFileName:
fd = open(inpFileName, "w")
for fileLine in sierra_inp:
fd.write(fileLine + "\n")
fd.close()
return ""
else:
return sierra_inp
#
# time steps
#
# --------------------------------------------------------------------
def num_times(self):
"""
e.num_times() -> number of time steps in exodus file
"""
return self.numTimes.value
# --------------------------------------------------------------------
def get_times(self):
"""
e.get_times() -> get list of times in exodus file
"""
if self.numTimes.value == 0:
self.times = []
else:
self.__ex_get_all_times()
if self.use_numpy:
self.times = ctype_to_numpy(self, self.times)
return self.times
# --------------------------------------------------------------------
def put_time(self, step, value):
"""
e.put_time(step,value) -> put time step and value into exodus file
"""
self.__ex_put_time(step, value)
self.numTimes = C.c_int(self.__ex_inquire_int(
ex_inquiry("EX_INQ_TIME")))
return True
#
# coordinate system
#
# --------------------------------------------------------------------
def num_dimensions(self):
"""
e.num_dimensions() -> number of dimensions in exodus file
"""
return self.numDim.value
# --------------------------------------------------------------------
def get_coord_names(self):
"""
e.get_coord_names() -> get list of coordinate names in exodus file
"""
names = self.__ex_get_coord_names()
return names
# --------------------------------------------------------------------
def put_coord_names(self, names):
"""
e.put_coord_names() -> set list of coordinate names in exodus file
"""
self.__ex_put_coord_names(names)
#
# nodes
#
# --------------------------------------------------------------------
def num_nodes(self):
"""
e.num_nodes() -> number of nodes in exodus file
"""
return self.numNodes.value
# --------------------------------------------------------------------
def get_coords(self):
"""
e.get_coords()
-> get tuple of lists of coordinates (X,Y,Z) in exodus file
"""
self.__ex_get_coord()
if self.use_numpy:
self.coordsX = ctype_to_numpy(self, self.coordsX)
self.coordsY = ctype_to_numpy(self, self.coordsY)
self.coordsZ = ctype_to_numpy(self, self.coordsZ)
return (self.coordsX, self.coordsY, self.coordsZ)
# --------------------------------------------------------------------
def get_coord(self, i):
"""
e.get_coord(i) -> get (x,y,z) of i^th node in exodus file
"""
listX, listY, listZ = self.__ex_get_n_coord(i, 1)
return (listX[0], listY[0], listZ[0])
# --------------------------------------------------------------------
def put_coords(self, xCoords, yCoords, zCoords):
"""
e.put_coords() -> put coordinates (xCoords,yCoords,zCoords) in exodus
file
"""
self.__ex_put_coord(xCoords, yCoords, zCoords)
return True
# --------------------------------------------------------------------
def get_node_num_map(self):
"""
e.get_node_num_map() -> **DEPRECATED**
use: e.get_node_id_map()
get list mapping local node id to global id
in exodus file
"""
nodeNumMap = self.__ex_get_node_num_map()
return nodeNumMap
# --------------------------------------------------------------------
def get_node_id_map(self):
"""
e.get_node_id_map() -> get list mapping local node id to global id
in exodus file
"""
objType = ex_entity_type("EX_NODE_MAP")
inqType = ex_inquiry("EX_INQ_NODES")
nodeIdMap = self.__ex_get_id_map(objType, inqType)
if self.use_numpy:
nodeIdMap = self.np.array(nodeIdMap)
return nodeIdMap
# --------------------------------------------------------------------
def put_node_id_map(self, map):
"""
e.put_node_id_map(map) -> put list mapping local node id to global id
into exodus file
"""
objType = ex_entity_type("EX_NODE_MAP")
inqType = ex_inquiry("EX_INQ_NODES")
return self.__ex_put_id_map(objType, inqType, map)
# --------------------------------------------------------------------
def get_node_variable_names(self):
"""
e.get_node_variable_names() -> get list of node variable names in
exodus file
"""
if self.__ex_get_var_param('n').value == 0:
return []
return self.__ex_get_var_names("n")
# --------------------------------------------------------------------
def get_node_variable_number(self):
"""
e.get_node_variable_number() -> get number of node variables in exodus
file
"""
ndType = ex_entity_type("EX_NODAL")
num = self.__ex_get_variable_param(ndType)
return num.value
# --------------------------------------------------------------------
def set_node_variable_number(self, number):
"""
e.set_node_variable_number(number) -> set number of node variables in
exodus file
"""
ndType = ex_entity_type("EX_NODAL")
self.__ex_put_variable_param(ndType, number)
return True
# --------------------------------------------------------------------
def put_node_variable_name(self, name, index):
"""
e.put_node_variable_name("name",index) -> node variable with name at
index into exodus file
"""
ndType = ex_entity_type("EX_NODAL")
NDvarNames = self.get_node_variable_names()
if name in NDvarNames:
print("WARNING:node variable \"", name, "\" already exists.")
if index > len(NDvarNames):
raise Exception("ERROR: variable index out of range.")
self.__ex_put_variable_name(ndType, index, name)
return True
# --------------------------------------------------------------------
def get_node_variable_values(self, name, step):
"""
e.get_node_variable_values("name",step) -> get list of node variable values
for a step within exodus file
"""
names = self.get_node_variable_names()
var_id = names.index(name) + 1
ndType = ex_entity_type("EX_NODAL")
numVals = self.num_nodes()
values = self.__ex_get_var(step, ndType, var_id, 0, numVals)
if self.use_numpy:
values = ctype_to_numpy(self, values)
return values
# --------------------------------------------------------------------
def put_node_variable_values(self, name, step, values):
"""
e.put_node_variable_values("name",step,values) -> put node values into
variable name at step into exodus file
"""
names = self.get_node_variable_names()
var_id = names.index(name) + 1
ndType = ex_entity_type("EX_NODAL")
numVals = self.num_nodes()
self.__ex_put_var(step, ndType, var_id, 0, numVals, values)
return True
#
# elements
#
# --------------------------------------------------------------------
def num_elems(self):
"""
e.num_elems() -> number of elements in exodus file
"""
return self.numElem.value
# --------------------------------------------------------------------
def get_elem_num_map(self):
"""
e.get_elem_num_map() -> **DEPRECATED**
use: e.get_elem_id_map()
get list mapping local element id to
global id in exodus file
"""
elemNumMap = self.__ex_get_elem_num_map()
return elemNumMap
# --------------------------------------------------------------------
def get_elem_id_map(self):
"""
e.get_elem_id_map() -> get list mapping local elem id to global id
in exodus file
"""
objType = ex_entity_type("EX_ELEM_MAP")
inqType = ex_inquiry("EX_INQ_ELEM")
elemIdMap = self.__ex_get_id_map(objType, inqType)
if self.use_numpy:
elemIdMap = self.np.array(elemIdMap)
return elemIdMap
# --------------------------------------------------------------------
def put_elem_id_map(self, map):
"""
e.put_elem_id_map(map) -> put list mapping local elem id to global id
into exodus file
"""
objType = ex_entity_type("EX_ELEM_MAP")
inqType = ex_inquiry("EX_INQ_ELEM")
return self.__ex_put_id_map(objType, inqType, map)
# --------------------------------------------------------------------
def get_elem_order_map(self):
"""
e.get_elem_order_map() -> get list of optimized element ordering
"""
elemOrderMap = self.__ex_get_elem_order_map()
if self.use_numpy:
elemOrderMap = ctype_to_numpy(self, elemOrderMap)
return elemOrderMap
#
# element blocks
#
# --------------------------------------------------------------------
def num_blks(self):
"""
e.num_blks() -> number of element blocks in exodus file
"""
return self.numElemBlk.value
# --------------------------------------------------------------------
def get_elem_blk_ids(self):
"""
e.get_elem_blk_ids() -> get list of element block ids in exodus file
"""
self.__ex_get_elem_blk_ids()
elemBlkIds = self.elemBlkIds
if self.use_numpy:
elemBlkIds = ctype_to_numpy(self, elemBlkIds)
return elemBlkIds
# --------------------------------------------------------------------
def get_elem_blk_name(self, id):
"""
e.get_elem_blk_name(id) -> get element block name for block "id" in
exodus file
| |
<reponame>wikimedia/operations-software-python-poolcounter
"""Poolcounter client implementation."""
import logging
import socket
from enum import Enum
from typing import Callable, Dict, Optional # pylint: disable=unused-import
from poolcounter.ring import HashRing
class PoolcounterError(Exception):
"""Custom exception class."""
pass
class PoolcounterQueueError(PoolcounterError):
"""Special exception class if poolcounter has too many workers in queue."""
pass
class PoolcounterTimeoutError(PoolcounterError):
"""Special exception class if poolcounter returns a timeout."""
pass
class RequestType(Enum):
"""Represents the different request types."""
RELEASE = 'RELEASE %s\n'
LOCK_EXC = 'ACQ4ME %s %d %d %d\n'
LOCK_ANY = 'ACQ4ANY %s %d %d %d\n'
@classmethod
def command(cls, action: 'RequestType', key: str,
concurrency: int, max_queue: int, timeout: int) -> str:
"""Formats a command, and returns it as a string.
For parameter definitions, see the Request object signature.
"""
if action == cls.RELEASE:
return action.value % key
return action.value % (key, concurrency, max_queue, timeout)
class Request:
"""Encapsulates a poolcounter request."""
MAX_QUEUE = 1000
CONCURRENCY = 1
TIMEOUT = 1
# Actions
RELEASE = 0
LOCK_EXC = 1
LOCK_ANY = 2
FMTS = [
'RELEASE %s\n',
'ACQ4ME %s %d %d %d\n',
'ACQ4ANY %s %d %d %d\n'
]
def __init__(
self,
action: RequestType,
key: str,
concurrency: Optional[int] = None,
max_queue: Optional[int] = None,
timeout: Optional[int] = None):
"""Initialize a request object.
Arguments:
action (poolcounter.client.RequestType): the command to send
key (str): the key for the lock
concurrency (int): the maximum number of workers allowed to run at the same time
max_queue (int): the maximum number of objects that can wait in queue
timeout (int): the maximum time to wait to acquire the lock.
Raises:
ValueError if the action is not recognized
"""
self.key = key
if concurrency is None:
concurrency = self.CONCURRENCY
if max_queue is None:
max_queue = self.MAX_QUEUE
if timeout is None:
timeout = self.TIMEOUT
try:
self.command = RequestType.command(action, key, concurrency, max_queue, timeout)
except AttributeError:
raise ValueError('Invalid action code requested: {}'.format(action))
def wire(self) -> bytes:
"""Return the wire format of the request.
Returns:
bytes: the raw command to send over the socket
"""
return bytes(self.command, 'utf-8')
class Response:
"""Describes a poolcounter response."""
# Poolcounter response messages.
LOCKED = 'LOCKED'
NOT_LOCKED = 'NOT_LOCKED'
DONE = 'DONE'
QUEUE_FULL = 'QUEUE_FULL'
TIMEOUT = 'TIMEOUT'
LOCK_HELD = 'LOCK_HELD'
RELEASED = 'RELEASED'
def __init__(self, key: str, msg: str):
"""Initialize a response.
Arguments:
key (str): the key we've requested a lock for
msg (str): the response we got from the server
Raises:
PoolCounterError: if an error is encountered.
"""
self.key = key
self.msg = msg
# Catch errors early
if self.msg.startswith('ERROR '):
raise PoolcounterError('Error talking to poolcounter: {}'.format(self.msg[6:]))
elif self.msg == Response.TIMEOUT:
raise PoolcounterTimeoutError(
'Too much time waiting for the lock for {}'.format(key))
elif self.msg == Response.QUEUE_FULL:
raise PoolcounterQueueError(
'Too many workers trying to acquire a lock for {}'.format(key))
def status_is(self, status: str) -> bool:
"""Checks the status of the response corresponds to the expected one.
Arguments:
status (str): The expected status
Returns:
bool: wether the status matches or not.
"""
return self.msg == status
class Server:
"""Object encapsulating a poolcounter backend connection."""
connection_timeout = 1
def __init__(self, fqdn: str, port: int = 7531,
weight: int = 1, label: Optional[str] = None) -> None:
"""Initialize the server.
Arguments:
fqdn (str): the fully qualified domain name or IP of the server
port (int): The port to connect to. The default (7531) should be ok.
weight (int): The weight of the server in the consistent hash ring
label (str): The identifier of the node in the consistent hash ring.
Defaults to the fqdn if none is provided.
"""
if label is None:
label = fqdn
self.label = label
self.fqdn = fqdn
# This raises an exception if the fqdn can't be resolved
self.ipaddr = socket.gethostbyname(fqdn)
self.port = port
self.weight = weight
self._stream = None # type: Optional[socket.socket]
self.has_lock = False
def __str__(self) -> str:
"""String representation of the server.
Returns:
str: the string "server.label (server.ip:server.port)"
"""
return '{label} ({ip}:{port})'.format(label=self.label, ip=self.ipaddr, port=self.port)
def get_lock(self, lock_type: RequestType, key: str, **kwargs) -> Response:
"""Get a lock, either exclusive or shared.
Arguments:
lock_type (poolcounter.client.RequestType): lock type, either RequestType.LOCK_EXC
or RequestType.LOCK_ANY
key (str): the poolcounter key
**kwargs: additional arguments to build the Request object
Returns:
poolcounter.client.Response the response object
Raises:
PoolcounterError: if an error is encountered in the response, or if a lock is held
"""
if self.has_lock:
raise PoolcounterError('You cannot acquire a new lock while holding one.')
req = Request(lock_type, key, **kwargs)
resp = self._command(req)
if resp.status_is(Response.LOCKED):
# Got the lock
self.has_lock = True
elif resp.status_is(Response.DONE):
# The lock was acquired and completed by another instance
self.has_lock = False
elif resp.status_is(Response.LOCK_HELD):
# We own another lock, not this one.
self.has_lock = True
raise PoolcounterError('You cannot acquire a new lock while holding one.')
return resp
def lock_release(self, key: str) -> Response:
"""Releases a lock if previously acquired.
Arguments:
key (str): the lock key to release
Returns:
poolcounter.Response: the response object
Raises:
PoolcounterError: if no connection is found, or if the response contains an error
"""
# We can't release a lock from an nonexistent connection
if not self._stream:
raise PoolcounterError('Trying to release a lock without a connection')
req = Request(RequestType.RELEASE, key)
resp = self._command(req)
if (resp.status_is(Response.RELEASED)
or resp.status_is(Response.NOT_LOCKED)):
self.has_lock = False
return resp
def shutdown(self) -> None:
"""Shuts down the connection to the server."""
if self._stream is not None:
self._stream.close()
self._stream = None
self.has_lock = False
def _command(self, req: Request) -> Response:
if self._stream is None:
self._stream = self._connect()
try:
self._stream.send(req.wire())
return Response(req.key, self._stream.recv(4096).decode('utf-8').strip())
except socket.error as e:
self.shutdown()
raise PoolcounterError('Error communicating with the server: {}'.format(e))
def _connect(self) -> socket.socket:
"""Connect to the server, return the connection socket."""
try:
stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream.settimeout(self.connection_timeout)
stream.connect((self.ipaddr, self.port))
stream.settimeout(None)
return stream
except ConnectionRefusedError:
stream.close()
raise PoolcounterError("Cannot connect to server {fqdn}:{port}".format(
fqdn=self.fqdn, port=self.port))
except TimeoutError:
stream.close()
raise PoolcounterTimeoutError("Connection to {fqdn}:{port} timed out".format(
fqdn=self.fqdn, port=self.port))
class PoolcounterClient:
"""Class used to interact with the Poolcounter server."""
lock_release_retry = 4 # Number of times we'll try to release a lock before giving up.
def __init__(self, logger: logging.Logger) -> None:
"""Initiate the instance.
Arguments:
logger (logging.Logger): the logger to use for this client.
"""
self.logger = logger
self.backends = {} # type: Dict[str, 'Server']
self.ring = HashRing()
def add_backend(self, server: Server) -> None:
"""Add a backend server.
Arguments:
server (poolcounter.client.Server): a Server instance to add to the pool.
Raises:
poolcounter.client.PoolcounterError: if the label is already present.
ValueError: if a collision is found in the hash ring.
"""
if server.label in self.backends:
raise PoolcounterError(
"A server with label '{label}' is already present."
" Please call remove_backend() first".format(
label=server.label
)
)
self.backends[server.label] = server
self.ring.add_node(server.label, server.weight)
def remove_backend(self, label: str) -> None:
"""Remove a backend from the pool.
Arguments:
label (str): The label the node was added with.
Raises:
poolcounter.client.PoolcounterError: if the label can't be found.
"""
if label not in self.backends:
raise PoolcounterError("No backend with label '{label}'".format(label=label))
del self.backends[label]
self.ring.del_node(label)
def default_errback(self, err) -> bool:
"""Default callback for errors.
Returns:
bool: False
"""
self.logger.exception("Error running command with poolcounter: %s", err)
return False
def run(self, lock_type: RequestType, key: str, callback: Callable,
*args, errback: Optional[Callable] = None, **kwargs) -> bool:
"""Run a callable when a lock is acquired.
Example:
# Make a post to a remote endpoint, limiting concurrency
def callback(msg, priority):
requests.post('https://example.com/some/endpoint',
params={'msg': msg, 'retcode': priority})
if client.run(RequestType.LOCK_EXC, 'example.com::sendMsg', callback, 'test', 1,
concurrency: 2, max_queue: 1000):
print('Message sent!')
Arguments:
lock_type (poolcounter.client.RequestType): lock type, either RequestType.LOCK_EXC
or RequestType.LOCK_ANY
key (str): poolcounter key to use
callback (Callable): callable to execute (with *args) if the lock is acquired
*args: arguments to pass to the callback
errback (Callable): callable to execute if the lock cannot be acquired (with the error
as argument)
concurrency (int): the maximum number of workers allowed to run at the same time
max_queue (int): the maximum number of objects that can wait in queue
timeout (int): the maximum time to wait to acquire the lock.
Returns:
bool: True upon successful exection
Raises:
PoolcounterError if there was an error releasing the lock
"""
if errback is None:
errback = self.default_errback
backend = self.backend_for(key)
| |
# -*- coding: UTF-8 -*-
# the comment above is for the Python interpreter, there are Unicode
# characters written straight into this source file
import json
import sys
import os
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
from portality.ordereddict import OrderedDict
else:
from collections import OrderedDict
# countries
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", 'country-codes.json'), 'rb') as f:
countries = json.loads(f.read())
countries_dict = OrderedDict(sorted(countries.items(), key=lambda x: x[1]['name']))
countries = countries_dict.items()
country_options = [('','')]
country_options_two_char_code_index = []
CURRENCY_DEFAULT = ''
currency_options = [(CURRENCY_DEFAULT, CURRENCY_DEFAULT)]
currency_options_code_index = []
currency_name_opts = []
for code, country_info in countries: # FIXME: a bit of a mess - would be better to have an object that just gave us the answers on demand
country_options.append((code, country_info['name']))
country_options_two_char_code_index.append(code)
if 'currency_alphabetic_code' in country_info and 'currency_name' in country_info:
if country_info['currency_alphabetic_code'] not in currency_options_code_index:
# prevent duplicates in the currency options by checking if
# that currency has already been added - multiple countries
# use the same currency after all (GBP, EUR..)
currency_options.append(
(
country_info['currency_alphabetic_code'],
country_info['currency_alphabetic_code'] + ' - ' + country_info['currency_name']
)
)
currency_name_opts.append(
(
country_info['currency_alphabetic_code'],
country_info['currency_name']
)
)
currency_options_code_index.append(country_info['currency_alphabetic_code'])
currencies_dict = dict(currency_options)
currency_name_map = dict(currency_name_opts)
# languages
languages_iso639_2 = [
[u"aar", u"", u"aa", u"Afar", u"afar"],
[u"abk", u"", u"ab", u"Abkhazian", u"abkhaze"],
[u"ace", u"", u"", u"Achinese", u"aceh"],
[u"ach", u"", u"", u"Acoli", u"acoli"],
[u"ada", u"", u"", u"Adangme", u"adangme"],
[u"ady", u"", u"", u"Adyghe; Adygei", u"adyghé"],
[u"afa", u"", u"", u"Afro-Asiatic languages", u"afro-asiatiques, langues"],
[u"afh", u"", u"", u"Afrihili", u"afrihili"],
[u"afr", u"", u"af", u"Afrikaans", u"afrikaans"],
[u"ain", u"", u"", u"Ainu", u"aïnou"],
[u"aka", u"", u"ak", u"Akan", u"akan"],
[u"akk", u"", u"", u"Akkadian", u"akkadien"],
[u"alb", u"sqi", u"sq", u"Albanian", u"albanais"],
[u"ale", u"", u"", u"Aleut", u"aléoute"],
[u"alg", u"", u"", u"Algonquian languages", u"algonquines, langues"],
[u"alt", u"", u"", u"Southern Altai", u"altai du Sud"],
[u"amh", u"", u"am", u"Amharic", u"amharique"],
[u"ang", u"", u"", u"English, Old (ca.450-1100)", u"anglo-saxon (ca.450-1100)"],
[u"anp", u"", u"", u"Angika", u"angika"],
[u"apa", u"", u"", u"Apache languages", u"apaches, langues"],
[u"ara", u"", u"ar", u"Arabic", u"arabe"],
[u"arc", u"", u"", u"Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)", u"araméen d'empire (700-300 BCE)"],
[u"arg", u"", u"an", u"Aragonese", u"aragonais"],
[u"arm", u"hye", u"hy", u"Armenian", u"arménien"],
[u"arn", u"", u"", u"Mapudungun; Mapuche", u"mapudungun; mapuche; mapuce"],
[u"arp", u"", u"", u"Arapaho", u"arapaho"],
[u"art", u"", u"", u"Artificial languages", u"artificielles, langues"],
[u"arw", u"", u"", u"Arawak", u"arawak"],
[u"asm", u"", u"as", u"Assamese", u"assamais"],
[u"ast", u"", u"", u"Asturian; Bable; Leonese; Asturleonese", u"asturien; bable; léonais; asturoléonais"],
[u"ath", u"", u"", u"Athapascan languages", u"athapascanes, langues"],
[u"aus", u"", u"", u"Australian languages", u"australiennes, langues"],
[u"ava", u"", u"av", u"Avaric", u"avar"],
[u"ave", u"", u"ae", u"Avestan", u"avestique"],
[u"awa", u"", u"", u"Awadhi", u"awadhi"],
[u"aym", u"", u"ay", u"Aymara", u"aymara"],
[u"aze", u"", u"az", u"Azerbaijani", u"azéri"],
[u"bad", u"", u"", u"Banda languages", u"banda, langues"],
[u"bai", u"", u"", u"Bamileke languages", u"bamiléké, langues"],
[u"bak", u"", u"ba", u"Bashkir", u"bachkir"],
[u"bal", u"", u"", u"Baluchi", u"baloutchi"],
[u"bam", u"", u"bm", u"Bambara", u"bambara"],
[u"ban", u"", u"", u"Balinese", u"balinais"],
[u"baq", u"eus", u"eu", u"Basque", u"basque"],
[u"bas", u"", u"", u"Basa", u"basa"],
[u"bat", u"", u"", u"Baltic languages", u"baltes, langues"],
[u"bej", u"", u"", u"Beja; Bedawiyet", u"bedja"],
[u"bel", u"", u"be", u"Belarusian", u"biélorusse"],
[u"bem", u"", u"", u"Bemba", u"bemba"],
[u"ben", u"", u"bn", u"Bengali", u"bengali"],
[u"ber", u"", u"", u"Berber languages", u"berbères, langues"],
[u"bho", u"", u"", u"Bhojpuri", u"bhojpuri"],
[u"bih", u"", u"bh", u"Bihari languages", u"langues biharis"],
[u"bik", u"", u"", u"Bikol", u"bikol"],
[u"bin", u"", u"", u"Bini; Edo", u"bini; edo"],
[u"bis", u"", u"bi", u"Bislama", u"bichlamar"],
[u"bla", u"", u"", u"Siksika", u"blackfoot"],
[u"bnt", u"", u"", u"Bantu (Other)", u"bantoues, autres langues"],
[u"bos", u"", u"bs", u"Bosnian", u"bosniaque"],
[u"bra", u"", u"", u"Braj", u"braj"],
[u"bre", u"", u"br", u"Breton", u"breton"],
[u"btk", u"", u"", u"Batak languages", u"batak, langues"],
[u"bua", u"", u"", u"Buriat", u"bouriate"],
[u"bug", u"", u"", u"Buginese", u"bugi"],
[u"bul", u"", u"bg", u"Bulgarian", u"bulgare"],
[u"bur", u"mya", u"my", u"Burmese", u"birman"],
[u"byn", u"", u"", u"Blin; Bilin", u"blin; bilen"],
[u"cad", u"", u"", u"Caddo", u"caddo"],
[u"cai", u"", u"", u"Central American Indian languages", u"amérindiennes de L'Amérique centrale, langues"],
[u"car", u"", u"", u"G<NAME>", u"karib; galibi; carib"],
[u"cat", u"", u"ca", u"Catalan; Valencian", u"catalan; valencien"],
[u"cau", u"", u"", u"Caucasian languages", u"caucasiennes, langues"],
[u"ceb", u"", u"", u"Cebuano", u"cebuano"],
[u"cel", u"", u"", u"Celtic languages", u"celtiques, langues; celtes, langues"],
[u"cha", u"", u"ch", u"Chamorro", u"chamorro"],
[u"chb", u"", u"", u"Chibcha", u"chibcha"],
[u"che", u"", u"ce", u"Chechen", u"tchétchène"],
[u"chg", u"", u"", u"Chagatai", u"djaghataï"],
[u"chi", u"zho", u"zh", u"Chinese", u"chinois"],
[u"chk", u"", u"", u"Chuukese", u"chuuk"],
[u"chm", u"", u"", u"Mari", u"mari"],
[u"chn", u"", u"", u"Chinook jargon", u"chinook, jargon"],
[u"cho", u"", u"", u"Choctaw", u"choctaw"],
[u"chp", u"", u"", u"Chipewyan; D<NAME>", u"chipewyan"],
[u"chr", u"", u"", u"Cherokee", u"cherokee"],
[u"chu", u"", u"cu", u"Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic", u"slavon d'église; vieux slave; slavon liturgique; vieux bulgare"],
[u"chv", u"", u"cv", u"Chuvash", u"tchouvache"],
[u"chy", u"", u"", u"Cheyenne", u"cheyenne"],
[u"cmc", u"", u"", u"Chamic languages", u"chames, langues"],
[u"cop", u"", u"", u"Coptic", u"copte"],
[u"cor", u"", u"kw", u"Cornish", u"cornique"],
[u"cos", u"", u"co", u"Corsican", u"corse"],
[u"cpe", u"", u"", u"Creoles and pidgins, English based", u"créoles et pidgins basés sur l'anglais"],
[u"cpf", u"", u"", u"Creoles and pidgins, French-based u", u"créoles et pidgins basés sur le français"],
[u"cpp", u"", u"", u"Creoles and pidgins, Portuguese-based u", u"créoles et pidgins basés sur le portugais"],
[u"cre", u"", u"cr", u"Cree", u"cree"],
[u"crh", u"", u"", u"Cri<NAME>; Crimean Turkish", u"tatar de Crimé"],
[u"crp", u"", u"", u"Creoles and pidgins u", u"créoles et pidgins"],
[u"csb", u"", u"", u"Kashubian", u"kachoube"],
[u"cus", u"", u"", u"Cushitic languages", u"couchitiques, langues"],
[u"cze", u"ces", u"cs", u"Czech", u"tchèque"],
[u"dak", u"", u"", u"Dakota", u"dakota"],
[u"dan", u"", u"da", u"Danish", u"danois"],
[u"dar", u"", u"", u"Dargwa", u"dargwa"],
[u"day", u"", u"", u"Land Dayak languages", u"dayak, langues"],
[u"del", u"", u"", u"Delaware", u"delaware"],
[u"den", u"", u"", u"Slave (Athapascan)", u"esclave (athapascan)"],
[u"dgr", u"", u"", u"Dogrib", u"dogrib"],
[u"din", u"", u"", u"Dinka", u"dinka"],
[u"div", u"", u"dv", u"Divehi; Dhivehi; Maldivian", u"maldivien"],
[u"doi", u"", u"", u"Dogri", u"dogri"],
[u"dra", u"", u"", u"Dravidian languages", u"dravidiennes, langues"],
[u"dsb", u"", u"", u"Lower Sorbian", u"bas-sorabe"],
[u"dua", u"", u"", u"Duala", u"douala"],
[u"dum", u"", u"", u"Dutch, Middle (ca.1050-1350)", u"néerlandais moyen (ca. 1050-1350)"],
[u"dut", u"nld", u"nl", u"Dutch; Flemish", u"néerlandais; flamand"],
[u"dyu", u"", u"", u"Dyula", u"dioula"],
[u"dzo", u"", u"dz", u"Dzongkha", u"dzongkha"],
[u"efi", u"", u"", u"Efik", u"efik"],
[u"egy", u"", u"", u"Egyptian (Ancient)", u"égyptien"],
[u"eka", u"", u"", u"Ekajuk", u"ekajuk"],
[u"elx", u"", u"", u"Elamite", u"élamite"],
[u"eng", u"", u"en", u"English", u"anglais"],
[u"enm", u"", u"", u"English, Middle (1100-1500)", u"anglais moyen (1100-1500)"],
[u"epo", u"", u"eo", u"Esperanto", u"espéranto"],
[u"est", u"", u"et", u"Estonian", u"estonien"],
[u"ewe", u"", u"ee", u"Ewe", u"éwé"],
[u"ewo", u"", u"", u"Ewondo", u"éwondo"],
[u"fan", u"", u"", u"Fang", u"fang"],
[u"fao", u"", u"fo", u"Faroese", u"féroïen"],
[u"fat", u"", u"", u"Fanti", u"fanti"],
[u"fij", u"", u"fj", u"Fijian", u"fidjien"],
[u"fil", u"", u"", u"Filipino; Pilipino", u"filipino; pilipino"],
[u"fin", u"", u"fi", u"Finnish", u"finnois"],
[u"fiu", u"", u"", u"Finno-Ugrian languages", u"finno-ougriennes, langues"],
[u"fon", u"", u"", u"Fon", u"fon"],
[u"fre", u"fra", u"fr", u"French", u"français"],
[u"frm", u"", u"", u"French, Middle (ca.1400-1600)", u"français moyen (1400-1600)"],
[u"fro", u"", u"", u"French, Old (842-ca.1400)", u"français ancien (842-ca.1400)"],
[u"frr", u"", u"", u"Northern Frisian", u"frison septentrional"],
[u"frs", u"", u"", u"Eastern Frisian", u"frison oriental"],
[u"fry", u"", u"fy", u"Western Frisian", u"frison occidental"],
[u"ful", u"", u"ff", u"Fulah", u"peul"],
[u"fur", u"", u"", u"Friulian", u"frioulan"],
[u"gaa", u"", u"", u"Ga", u"ga"],
[u"gay", u"", u"", u"Gayo", u"gayo"],
[u"gba", u"", u"", u"Gbaya", u"gbaya"],
[u"gem", u"", u"", u"Germanic languages", u"germaniques, langues"],
[u"geo", u"kat", u"ka", u"Georgian", u"géorgien"],
[u"ger", u"deu", u"de", u"German", u"allemand"],
[u"gez", u"", u"", u"Geez", u"guèze"],
[u"gil", u"", u"", u"Gilbertese", u"kiribati"],
[u"gla", u"", u"gd", u"Gaelic; Scottish Gaelic", u"gaélique; gaélique écossais"],
[u"gle", u"", u"ga", u"Irish", u"irlandais"],
[u"glg", u"", u"gl", u"Galician", u"galicien"],
[u"glv", u"", u"gv", u"Manx", u"manx; mannois"],
[u"gmh", u"", u"", u"German, Middle High (ca.1050-1500)", u"allemand, moyen haut (ca. 1050-1500)"],
[u"goh", u"", u"", u"German, Old High (ca.750-1050)", u"allemand, vieux haut (ca. 750-1050)"],
[u"gon", u"", u"", u"Gondi", u"gond"],
[u"gor", u"", u"", u"Gorontalo", u"gorontalo"],
[u"got", u"", u"", u"Gothic", u"gothique"],
[u"grb", u"", u"", u"Grebo", u"grebo"],
[u"grc", u"", u"", u"Greek, Ancient (to 1453)", u"grec ancien (jusqu'à 1453)"],
[u"gre", u"ell", u"el", u"Greek, Modern (1453-)", u"grec moderne (après 1453)"],
[u"grn", u"", u"gn", u"Guarani", u"guarani"],
[u"gsw", u"", u"", u"Swiss German; Alemannic; Alsatian", u"suisse alémanique; alémanique; alsacien"],
[u"guj", u"", u"gu", u"Gujarati", u"goudjrati"],
[u"gwi", u"", u"", u"Gwich'in", u"gwich'in"],
[u"hai", u"", u"", u"Haida", u"haida"],
[u"hat", u"", u"ht", u"Haitian; Haitian Creole", u"haïtien; créole haïtien"],
[u"hau", u"", u"ha", u"Hausa", u"haoussa"],
[u"haw", u"", u"", u"Hawaiian", u"hawaïen"],
[u"heb", u"", u"he", u"Hebrew", u"hébreu"],
[u"her", u"", u"hz", u"Herero", u"herero"],
[u"hil", u"", u"", u"Hiligaynon", u"hiligaynon"],
[u"him", u"", u"", u"Himachali languages; Western Pahari languages", u"langues himachalis; langues paharis occidentales"],
[u"hin", u"", u"hi", u"Hindi", | |
<filename>synapse/handlers/cas_handler.py
# -*- coding: utf-8 -*-
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib.parse
from typing import TYPE_CHECKING, Dict, Optional
from xml.etree import ElementTree as ET
import attr
from twisted.web.client import PartialDownloadError
from synapse.api.errors import HttpResponseException
from synapse.handlers.sso import MappingException, UserAttributes
from synapse.http.site import SynapseRequest
from synapse.types import UserID, map_username_to_mxid_localpart
if TYPE_CHECKING:
from synapse.app.homeserver import HomeServer
logger = logging.getLogger(__name__)
class CasError(Exception):
"""Used to catch errors when validating the CAS ticket.
"""
def __init__(self, error, error_description=None):
self.error = error
self.error_description = error_description
def __str__(self):
if self.error_description:
return "{}: {}".format(self.error, self.error_description)
return self.error
@attr.s(slots=True, frozen=True)
class CasResponse:
username = attr.ib(type=str)
attributes = attr.ib(type=Dict[str, Optional[str]])
class CasHandler:
"""
Utility class for to handle the response from a CAS SSO service.
Args:
hs
"""
def __init__(self, hs: "HomeServer"):
self.hs = hs
self._hostname = hs.hostname
self._store = hs.get_datastore()
self._auth_handler = hs.get_auth_handler()
self._registration_handler = hs.get_registration_handler()
self._cas_server_url = hs.config.cas_server_url
self._cas_service_url = hs.config.cas_service_url
self._cas_displayname_attribute = hs.config.cas_displayname_attribute
self._cas_required_attributes = hs.config.cas_required_attributes
self._http_client = hs.get_proxied_http_client()
# identifier for the external_ids table
self._auth_provider_id = "cas"
self._sso_handler = hs.get_sso_handler()
def _build_service_param(self, args: Dict[str, str]) -> str:
"""
Generates a value to use as the "service" parameter when redirecting or
querying the CAS service.
Args:
args: Additional arguments to include in the final redirect URL.
Returns:
The URL to use as a "service" parameter.
"""
return "%s%s?%s" % (
self._cas_service_url,
"/_matrix/client/r0/login/cas/ticket",
urllib.parse.urlencode(args),
)
async def _validate_ticket(
self, ticket: str, service_args: Dict[str, str]
) -> CasResponse:
"""
Validate a CAS ticket with the server, and return the parsed the response.
Args:
ticket: The CAS ticket from the client.
service_args: Additional arguments to include in the service URL.
Should be the same as those passed to `get_redirect_url`.
Raises:
CasError: If there's an error parsing the CAS response.
Returns:
The parsed CAS response.
"""
uri = self._cas_server_url + "/proxyValidate"
args = {
"ticket": ticket,
"service": self._build_service_param(service_args),
}
try:
body = await self._http_client.get_raw(uri, args)
except PartialDownloadError as pde:
# Twisted raises this error if the connection is closed,
# even if that's being used old-http style to signal end-of-data
body = pde.response
except HttpResponseException as e:
description = (
(
'Authorization server responded with a "{status}" error '
"while exchanging the authorization code."
).format(status=e.code),
)
raise CasError("server_error", description) from e
return self._parse_cas_response(body)
def _parse_cas_response(self, cas_response_body: bytes) -> CasResponse:
"""
Retrieve the user and other parameters from the CAS response.
Args:
cas_response_body: The response from the CAS query.
Raises:
CasError: If there's an error parsing the CAS response.
Returns:
The parsed CAS response.
"""
# Ensure the response is valid.
root = ET.fromstring(cas_response_body)
if not root.tag.endswith("serviceResponse"):
raise CasError(
"missing_service_response",
"root of CAS response is not serviceResponse",
)
success = root[0].tag.endswith("authenticationSuccess")
if not success:
raise CasError("unsucessful_response", "Unsuccessful CAS response")
# Iterate through the nodes and pull out the user and any extra attributes.
user = None
attributes = {}
for child in root[0]:
if child.tag.endswith("user"):
user = child.text
if child.tag.endswith("attributes"):
for attribute in child:
# ElementTree library expands the namespace in
# attribute tags to the full URL of the namespace.
# We don't care about namespace here and it will always
# be encased in curly braces, so we remove them.
tag = attribute.tag
if "}" in tag:
tag = tag.split("}")[1]
attributes[tag] = attribute.text
# Ensure a user was found.
if user is None:
raise CasError("no_user", "CAS response does not contain user")
return CasResponse(user, attributes)
def get_redirect_url(self, service_args: Dict[str, str]) -> str:
"""
Generates a URL for the CAS server where the client should be redirected.
Args:
service_args: Additional arguments to include in the final redirect URL.
Returns:
The URL to redirect the client to.
"""
args = urllib.parse.urlencode(
{"service": self._build_service_param(service_args)}
)
return "%s/login?%s" % (self._cas_server_url, args)
async def handle_ticket(
self,
request: SynapseRequest,
ticket: str,
client_redirect_url: Optional[str],
session: Optional[str],
) -> None:
"""
Called once the user has successfully authenticated with the SSO.
Validates a CAS ticket sent by the client and completes the auth process.
If the user interactive authentication session is provided, marks the
UI Auth session as complete, then returns an HTML page notifying the
user they are done.
Otherwise, this registers the user if necessary, and then returns a
redirect (with a login token) to the client.
Args:
request: the incoming request from the browser. We'll
respond to it with a redirect or an HTML page.
ticket: The CAS ticket provided by the client.
client_redirect_url: the redirectUrl parameter from the `/cas/ticket` HTTP request, if given.
This should be the same as the redirectUrl from the original `/login/sso/redirect` request.
session: The session parameter from the `/cas/ticket` HTTP request, if given.
This should be the UI Auth session id.
"""
args = {}
if client_redirect_url:
args["redirectUrl"] = client_redirect_url
if session:
args["session"] = session
try:
cas_response = await self._validate_ticket(ticket, args)
except CasError as e:
logger.exception("Could not validate ticket")
self._sso_handler.render_error(request, e.error, e.error_description, 401)
return
await self._handle_cas_response(
request, cas_response, client_redirect_url, session
)
async def _handle_cas_response(
self,
request: SynapseRequest,
cas_response: CasResponse,
client_redirect_url: Optional[str],
session: Optional[str],
) -> None:
"""Handle a CAS response to a ticket request.
Assumes that the response has been validated. Maps the user onto an MXID,
registering them if necessary, and returns a response to the browser.
Args:
request: the incoming request from the browser. We'll respond to it with an
HTML page or a redirect
cas_response: The parsed CAS response.
client_redirect_url: the redirectUrl parameter from the `/cas/ticket` HTTP request, if given.
This should be the same as the redirectUrl from the original `/login/sso/redirect` request.
session: The session parameter from the `/cas/ticket` HTTP request, if given.
This should be the UI Auth session id.
"""
# first check if we're doing a UIA
if session:
return await self._sso_handler.complete_sso_ui_auth_request(
self._auth_provider_id, cas_response.username, session, request,
)
# otherwise, we're handling a login request.
# Ensure that the attributes of the logged in user meet the required
# attributes.
for required_attribute, required_value in self._cas_required_attributes.items():
# If required attribute was not in CAS Response - Forbidden
if required_attribute not in cas_response.attributes:
self._sso_handler.render_error(
request,
"unauthorised",
"You are not authorised to log in here.",
401,
)
return
# Also need to check value
if required_value is not None:
actual_value = cas_response.attributes[required_attribute]
# If required attribute value does not match expected - Forbidden
if required_value != actual_value:
self._sso_handler.render_error(
request,
"unauthorised",
"You are not authorised to log in here.",
401,
)
return
# Call the mapper to register/login the user
# If this not a UI auth request than there must be a redirect URL.
assert client_redirect_url is not None
try:
await self._complete_cas_login(cas_response, request, client_redirect_url)
except MappingException as e:
logger.exception("Could not map user")
self._sso_handler.render_error(request, "mapping_error", str(e))
async def _complete_cas_login(
self,
cas_response: CasResponse,
request: SynapseRequest,
client_redirect_url: str,
) -> None:
"""
Given a CAS response, complete the login flow
Retrieves the remote user ID, registers the user if necessary, and serves
a redirect back to the client with a login-token.
Args:
cas_response: The parsed CAS response.
request: The request to respond to
client_redirect_url: The redirect URL passed in by the client.
Raises:
MappingException if there was a problem mapping the response to a user.
RedirectException: some mapping providers may raise this if they need
to redirect to an interstitial page.
"""
# Note that CAS does not support a mapping provider, so the logic is hard-coded.
localpart = map_username_to_mxid_localpart(cas_response.username)
async def cas_response_to_user_attributes(failures: int) -> UserAttributes:
"""
Map from CAS attributes to user attributes.
"""
# Due to the grandfathering logic matching any previously registered
# mxids it isn't expected for there to be any failures.
| |
<gh_stars>10-100
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NFS driver module."""
import ddt
import errno
import os
import mock
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers import nfs
from cinder.volume.drivers import remotefs
class RemoteFsDriverTestCase(test.TestCase):
TEST_FILE_NAME = 'test.txt'
TEST_EXPORT = 'nas-host1:/export'
TEST_MNT_POINT = '/mnt/nas'
def setUp(self):
super(RemoteFsDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSDriver()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self._driver = remotefs.RemoteFSDriver(
configuration=self.configuration)
mock_exc = mock.patch.object(self._driver, '_execute')
self._execute = mock_exc.start()
self.addCleanup(mock_exc.stop)
def test_create_sparsed_file(self):
self._driver._create_sparsed_file('/path', 1)
self._execute.assert_called_once_with('truncate', '-s', '1G',
'/path', run_as_root=True)
def test_create_regular_file(self):
self._driver._create_regular_file('/path', 1)
self._execute.assert_called_once_with('dd', 'if=/dev/zero',
'of=/path', 'bs=1M',
'count=1024', run_as_root=True)
def test_create_qcow2_file(self):
file_size = 1
self._driver._create_qcow2_file('/path', file_size)
self._execute.assert_called_once_with('qemu-img', 'create', '-f',
'qcow2', '-o',
'preallocation=metadata',
'/path', '%s' %
str(file_size * units.Gi),
run_as_root=True)
def test_set_rw_permissions_for_all(self):
self._driver._set_rw_permissions_for_all('/path')
self._execute.assert_called_once_with('chmod', 'ugo+rw', '/path',
run_as_root=True)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_with_secure_file_permissions(self, LOG):
self._driver._mounted_shares = [self.TEST_EXPORT]
self.configuration.nas_secure_file_permissions = 'true'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertFalse(LOG.warning.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
self.configuration.nas_secure_file_permissions = 'false'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertTrue(LOG.warning.called)
warn_msg = "%(path)s is being set with open permissions: %(perm)s"
LOG.warning.assert_called_once_with(
warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'})
@mock.patch('os.path.join')
@mock.patch('os.path.isfile', return_value=False)
def test_determine_nas_security_options_when_auto_and_new_install(
self,
mock_isfile,
mock_join):
"""Test the setting of the NAS Security Option
In this test case, we will create the marker file. No pre-exxisting
Cinder volumes found during bootup.
"""
self._driver._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = True
self._driver._ensure_shares_mounted = mock.Mock()
nas_mount = self._driver._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
mock_join.return_value = file_path
secure_file_permissions = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_new_install_exists(
self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file already exists. Cinder volumes
found during bootup.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = True
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_old_install(self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file does not exist. There are also
pre-existing Cinder volumes.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = False
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
def test_determine_nas_security_options_when_admin_set_true(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'true'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
def test_determine_nas_security_options_when_admin_set_false(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'false'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
@mock.patch.object(remotefs, 'LOG')
def test_set_nas_security_options(self, LOG):
"""Test setting of NAS Security options.
The RemoteFS driver will force set options to false. The derived
objects will provide an inherited interface to properly set options.
"""
drv = self._driver
is_new_install = False
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warning.called)
def test_secure_file_operations_enabled_true(self):
"""Test nas_secure_file_operations = 'true'
Networked file system based drivers may support secure file
operations. This test verifies the settings when secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'true'
ret_flag = drv.secure_file_operations_enabled()
self.assertTrue(ret_flag)
def test_secure_file_operations_enabled_false(self):
"""Test nas_secure_file_operations = 'false'
Networked file system based drivers may support secure file
operations. This test verifies the settings when not secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'false'
ret_flag = drv.secure_file_operations_enabled()
self.assertFalse(ret_flag)
@ddt.ddt
class NfsDriverTestCase(test.TestCase):
"""Test case for NFS driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_NFS_EXPORT2_OPTIONS = '-o intr'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this'
TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo'
def setUp(self):
super(NfsDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self.configuration.nfs_shares_config = None
self.configuration.nfs_sparsed_volumes = True
self.configuration.nfs_reserved_percentage = 5.0
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.configuration.nfs_mount_options = None
self.configuration.nfs_mount_attempts = 3
self.configuration.nfs_qcow2_volumes = False
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.nas_host = None
self.configuration.nas_share_path = None
self.configuration.nas_mount_options = None
self.configuration.volume_dd_blocksize = '1M'
self._driver = nfs.NfsDriver(configuration=self.configuration)
self._driver.shares = {}
mock_exc = mock.patch.object(self._driver, '_execute')
self._execute = mock_exc.start()
self.addCleanup(mock_exc.stop)
self.context = context.get_admin_context()
def test_local_path(self):
"""local_path common use case."""
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
provider_location=self.TEST_NFS_EXPORT1)
self.assertEqual(
'/mnt/test/2f4f60214cf43c595666dd815f0360a4/%s' % volume.name,
drv.local_path(volume))
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch.object(image_utils, 'resize_image')
@mock.patch.object(image_utils, 'fetch_to_raw')
def test_copy_image_to_volume(self, mock_fetch, mock_resize, mock_qemu):
"""resize_image common case usage."""
drv = self._driver
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
TEST_IMG_SOURCE = 'volume-%s' % volume.id
with mock.patch.object(drv, 'local_path',
return_value=TEST_IMG_SOURCE):
data = mock.Mock()
data.virtual_size = 1 * units.Gi
mock_qemu.return_value = data
drv.copy_image_to_volume(None, volume, None, None)
mock_fetch.assert_called_once_with(
None, None, None, TEST_IMG_SOURCE, mock.ANY, run_as_root=True,
size=self.TEST_SIZE_IN_GB)
mock_resize.assert_called_once_with(TEST_IMG_SOURCE,
self.TEST_SIZE_IN_GB,
run_as_root=True)
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should calculate correct value."""
drv = self._driver
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_mount_point_for_share_given_extra_slash_in_state_path(self):
"""_get_mount_point_for_share should calculate correct value."""
# This test gets called with the extra slash
self.configuration.nfs_mount_point_base = (
self.TEST_MNT_POINT_BASE_EXTRA_SLASH)
# The driver gets called with the correct configuration and removes
# the extra slash
drv = nfs.NfsDriver(configuration=self.configuration)
self.assertEqual('/opt/stack/data/cinder/mnt', drv.base)
self.assertEqual(
'/opt/stack/data/cinder/mnt/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_capacity_info(self):
"""_get_capacity_info should calculate correct value."""
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT
self._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(self.TEST_NFS_EXPORT1))
mock_get_mount.assert_called_once_with(self.TEST_NFS_EXPORT1)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT, run_as_root=True)]
self._execute.assert_has_calls(calls)
def test_get_capacity_info_for_share_and_mount_point_with_spaces(self):
"""_get_capacity_info should calculate correct value."""
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT_SPACES
self._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(
self.TEST_NFS_EXPORT_SPACES))
mock_get_mount.assert_called_once_with(
self.TEST_NFS_EXPORT_SPACES)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT_SPACES, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT_SPACES, run_as_root=True)]
self._execute.assert_has_calls(calls)
def test_load_shares_config(self):
drv = self._driver
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
config_data.append('#' + self.TEST_NFS_EXPORT2)
config_data.append('')
config_data.append(self.TEST_NFS_EXPORT2 + ' ' +
self.TEST_NFS_EXPORT2_OPTIONS)
config_data.append('broken:share_format')
mock_read_config.return_value = config_data
drv._load_shares_config(drv.configuration.nfs_shares_config)
mock_read_config.assert_called_once_with(
self.TEST_SHARES_CONFIG_FILE)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertIn(self.TEST_NFS_EXPORT2, drv.shares)
self.assertEqual(2, len(drv.shares))
self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS,
drv.shares[self.TEST_NFS_EXPORT2])
def test_load_shares_config_nas_opts(self):
drv = self._driver
drv.configuration.nas_host = self.TEST_NFS_HOST
drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
drv._load_shares_config(drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertEqual(1, len(drv.shares))
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value = config_data
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
mock_ensure.assert_called_once_with(self.TEST_NFS_EXPORT1)
@mock.patch.object(remotefs, 'LOG')
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self,
LOG):
"""_ensure_shares_mounted should not save share if failed to mount."""
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value | |
<filename>schicexplorer/scHicCluster.py
import argparse
import os
from multiprocessing import Process, Queue
import time
import logging
log = logging.getLogger(__name__)
from scipy import linalg
import cooler
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
from hicmatrix import HiCMatrix as hm
import numpy as np
from scipy.sparse import csr_matrix
from holoviews.plotting.util import process_cmap
from schicexplorer._version import __version__
from schicexplorer.utilities import cell_name_list, create_csr_matrix_all_cells
def parse_arguments(args=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False,
description='scHicCluster uses kmeans or spectral clustering to associate each cell to a cluster and therefore to its cell cycle. '
'The clustering can be run on the raw data, on a kNN computed via the exact euclidean distance or via PCA. '
'Please consider also the other clustering and dimension reduction approaches of the scHicExplorer suite. They can give you better results, '
'can be faster or less memory demanding.'
)
parserRequired = parser.add_argument_group('Required arguments')
# define the arguments
parserRequired.add_argument('--matrix', '-m',
help='The single cell Hi-C interaction matrices to cluster. Needs to be in scool format',
metavar='scool scHi-C matrix',
required=True)
parserRequired.add_argument('--numberOfClusters', '-c',
help='Number of to be computed clusters',
required=False,
default=12,
type=int)
parserRequired.add_argument('--clusterMethod', '-cm',
help='Algorithm to cluster the Hi-C matrices',
choices=['spectral', 'kmeans'],
default='spectral')
parserOpt = parser.add_argument_group('Optional arguments')
parserOpt.add_argument('--chromosomes',
help='List of to be plotted chromosomes',
nargs='+')
parserOpt.add_argument('--intraChromosomalContactsOnly', '-ic',
help='This option loads only the intra-chromosomal contacts. Can improve the cluster result if data is very noisy.',
action='store_true')
parserOpt.add_argument('--additionalPCA', '-pca',
help='Computes PCA on top of a k-nn. Can improve the cluster result.',
action='store_true')
parserOpt.add_argument('--dimensionsPCA', '-dim_pca',
help='The number of dimensions from the PCA matrix that should be considered for clustering. Can improve the cluster result.',
default=20,
type=int)
parserOpt.add_argument('--dimensionReductionMethod', '-drm',
help='Dimension reduction methods, knn with euclidean distance, pca',
choices=['none', 'knn', 'pca'],
default='none')
parserOpt.add_argument('--createScatterPlot', '-csp',
help='Create a scatter plot for the clustering, the x and y are the first and second principal component of the computed k-nn graph.',
required=False,
default=None)
parserOpt.add_argument('--numberOfNearestNeighbors', '-k',
help='Number of to be used computed nearest neighbors for the knn graph. Default is either the default value or the number of the provided cells, whatever is smaller.',
required=False,
default=100,
type=int)
parserOpt.add_argument('--dpi', '-d',
help='The dpi of the scatter plot.',
required=False,
default=300,
type=int)
parserOpt.add_argument('--outFileName', '-o',
help='File name to save the resulting clusters',
required=True,
default='clusters.txt')
parserOpt.add_argument('--cell_coloring_type', '-cct',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--cell_coloring_batch', '-ccb',
help='A two column list, first colum the cell names as stored in the scool file, second column the associated coloring for the scatter plot',
required=False)
parserOpt.add_argument('--latexTable', '-lt',
help='Return the overlap statistics if --cell_coloring_type is given as a latex table.')
parserOpt.add_argument('--figuresize',
help='Fontsize in the plot for x and y axis.',
type=float,
nargs=2,
default=(15, 6),
metavar=('x-size', 'y-size'))
parserOpt.add_argument('--colorMap',
help='Color map to use for the heatmap, supported are the categorical colormaps from holoviews: '
'http://holoviews.org/user_guide/Colormaps.html',
default='glasbey_dark')
parserOpt.add_argument('--fontsize',
help='Fontsize in the plot for x and y axis.',
type=float,
default=15)
parserOpt.add_argument('--threads', '-t',
help='Number of threads. Using the python multiprocessing module.',
required=False,
default=8,
type=int)
parserOpt.add_argument('--help', '-h', action='help', help='show this help message and exit')
parserOpt.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
return parser
def main(args=None):
args = parse_arguments().parse_args(args)
outputFolder = os.path.dirname(os.path.abspath(args.outFileName)) + '/'
log.debug('outputFolder {}'.format(outputFolder))
if args.cell_coloring_type:
cell_name_cell_type_dict = {}
cell_type_color_dict = {}
color_cell_type_dict = {}
cell_type_counter = 0
with open(args.cell_coloring_type, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict[cell_name] = cell_type
if cell_type not in cell_type_color_dict:
cell_type_color_dict[cell_type] = cell_type_counter
color_cell_type_dict[cell_type_counter] = cell_type
cell_type_counter += 1
if args.cell_coloring_batch:
cell_name_cell_type_dict_batch = {}
cell_type_color_dict_batch = {}
color_cell_type_dict_batch = {}
cell_type_counter_batch = 0
with open(args.cell_coloring_batch, 'r') as file:
for i, line in enumerate(file.readlines()):
line = line.strip()
try:
cell_name, cell_type = line.split('\t')
except Exception:
cell_name, cell_type = line.split(' ')
cell_name_cell_type_dict_batch[cell_name] = cell_type
if cell_type not in cell_type_color_dict_batch:
cell_type_color_dict_batch[cell_type] = cell_type_counter_batch
color_cell_type_dict_batch[cell_type_counter_batch] = cell_type
cell_type_counter_batch += 1
raw_file_name = os.path.splitext(os.path.basename(args.outFileName))[0]
neighborhood_matrix, matrices_list = create_csr_matrix_all_cells(args.matrix, args.threads, args.chromosomes, outputFolder, raw_file_name, args.intraChromosomalContactsOnly)
reduce_to_dimension = neighborhood_matrix.shape[0] - 1
if args.dimensionReductionMethod == 'knn':
if args.numberOfNearestNeighbors > reduce_to_dimension:
args.numberOfNearestNeighbors = reduce_to_dimension
nbrs = NearestNeighbors(n_neighbors=args.numberOfNearestNeighbors, algorithm='ball_tree', n_jobs=args.threads).fit(neighborhood_matrix)
neighborhood_matrix = nbrs.kneighbors_graph(mode='distance')
if args.additionalPCA:
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix = pca.fit_transform(neighborhood_matrix.todense())
if args.dimensionsPCA:
args.dimensionsPCA = min(args.dimensionsPCA, neighborhood_matrix.shape[0])
neighborhood_matrix = neighborhood_matrix[:, :args.dimensionsPCA]
elif args.dimensionReductionMethod == 'pca':
corrmatrix = np.cov(neighborhood_matrix.todense())
evals, eigs = linalg.eig(corrmatrix)
neighborhood_matrix = eigs[:, :reduce_to_dimension].transpose()
if args.clusterMethod == 'spectral':
spectralClustering_object = SpectralClustering(n_clusters=args.numberOfClusters, n_jobs=args.threads,
n_neighbors=reduce_to_dimension, affinity='nearest_neighbors', random_state=0, eigen_solver="arpack")
labels_clustering = spectralClustering_object.fit_predict(neighborhood_matrix)
elif args.clusterMethod == 'kmeans':
kmeans_object = KMeans(n_clusters=args.numberOfClusters, random_state=0, n_jobs=args.threads, precompute_distances=True)
labels_clustering = kmeans_object.fit_predict(neighborhood_matrix)
if args.colorMap:
colors = process_cmap(args.colorMap)
if args.cell_coloring_type:
if len(colors) < len(cell_type_color_dict):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type = []
for cell_name in matrices_list:
labels_clustering_cell_type.append(cell_type_color_dict[cell_name_cell_type_dict[cell_name]])
labels_clustering_cell_type = np.array(labels_clustering_cell_type)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
label_x = 'PC1'
label_y = 'PC2'
if args.createScatterPlot:
if args.dimensionReductionMethod == 'none':
log.warning('Raw matrix clustering scatter plot needs to compute a PCA and can request large amount (> 100 GB) of memory.')
log.debug('args.additionalPCA {}'.format(args.additionalPCA))
log.debug('args.dimensionReductionMethod {}'.format(args.dimensionReductionMethod))
if args.dimensionReductionMethod == 'none' or (args.dimensionReductionMethod == 'knn' and not args.additionalPCA):
log.debug('compute pca')
pca = PCA(n_components=min(neighborhood_matrix.shape) - 1)
neighborhood_matrix_knn = pca.fit_transform(neighborhood_matrix.todense())
log.debug('compute pca')
else:
log.debug('already computed pca')
neighborhood_matrix_knn = neighborhood_matrix
if args.cell_coloring_type:
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict)]):
mask = labels_clustering_cell_type == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.cell_coloring_batch:
if len(colors) < len(cell_type_color_dict_batch):
log.error('The chosen colormap offers too less values for the number of clusters.')
exit(1)
labels_clustering_cell_type_batch = []
for cell_name in matrices_list:
labels_clustering_cell_type_batch.append(cell_type_color_dict_batch[cell_name_cell_type_dict_batch[cell_name]])
labels_clustering_cell_type_batch = np.array(labels_clustering_cell_type_batch)
log.debug('labels_clustering_cell_type: {}'.format(len(labels_clustering_cell_type_batch)))
log.debug('matrices_list: {}'.format(len(matrices_list)))
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:len(cell_type_color_dict_batch)]):
mask = labels_clustering_cell_type_batch == i
log.debug('plot cluster: {} {}'.format(color_cell_type_dict_batch[i], np.sum(mask)))
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(color_cell_type_dict_batch[i]), s=20, alpha=0.7)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '_cell_color_batch.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
plt.figure(figsize=(args.figuresize[0], args.figuresize[1]))
for i, color in enumerate(colors[:args.numberOfClusters]):
mask = labels_clustering == i
plt.scatter(neighborhood_matrix_knn[:, 0].T[mask], neighborhood_matrix_knn[:, 1].T[mask], color=color, label=str(i), s=20, alpha=0.7)
plt.legend(fontsize=args.fontsize)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=args.fontsize)
plt.xticks([])
plt.yticks([])
plt.xlabel(label_x, fontsize=args.fontsize)
plt.ylabel(label_y, fontsize=args.fontsize)
if '.' not in args.createScatterPlot:
args.createScatterPlot += '.png'
scatter_plot_name = '.'.join(args.createScatterPlot.split('.')[:-1]) + '.' + args.createScatterPlot.split('.')[-1]
plt.tight_layout()
plt.savefig(scatter_plot_name, dpi=args.dpi)
plt.close()
if args.latexTable and args.cell_coloring_type:
# compute overlap of cell_type find found clusters
computed_clusters = set(labels_clustering)
cell_type_amounts_dict = {}
# percentage_threshold = 0.8
for threshold in [0.7, 0.8, 0.9]:
cell_type_amounts_dict[threshold] = {}
with open(args.latexTable, 'w') as matches_file:
header = '\\begin{table}[!htb]\n\\footnotesize\n\\begin{tabular}{|l'
body = '\\hline Cluster '
for i in range(len(color_cell_type_dict)):
mask_cell_type = labels_clustering_cell_type == i
header += '|c'
body += '& ' + str(color_cell_type_dict[i]) + ' (' + str(np.sum(mask_cell_type)) + ' cells)'
header += '|}\n'
body += '\\\\\n'
# body = ''
for i in computed_clusters:
body += '\\hline Cluster ' + str(i)
mask_computed_clusters = labels_clustering == i
body += ' (' + str(np.sum(mask_computed_clusters)) + ' cells)'
for j in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == j
mask = mask_computed_clusters & mask_cell_type
number_of_matches = np.sum(mask)
body += '& ' + str(number_of_matches)
if number_of_matches != 1:
body += ' cells / '
else:
body += ' cell / '
body += '{:.2f}'.format((number_of_matches / np.sum(mask_computed_clusters)) * 100) + ' \\% '
for threshold in [0.7, 0.8, 0.9]:
if number_of_matches / np.sum(mask_computed_clusters) >= threshold:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] += number_of_matches
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = number_of_matches
else:
if color_cell_type_dict[j] in cell_type_amounts_dict[threshold]:
continue
else:
cell_type_amounts_dict[threshold][color_cell_type_dict[j]] = 0
body += '\\\\\n'
body += '\\hline ' + '&' * len(cell_type_color_dict) + '\\\\\n'
for threshold in [0.7, 0.8, 0.9]:
body += '\\hline Correct identified $>{}\\%$'.format(int(threshold * 100))
for i in range(len(cell_type_color_dict)):
mask_cell_type = labels_clustering_cell_type == i
if color_cell_type_dict[i] in cell_type_amounts_dict[threshold]:
body += '& ' + str(cell_type_amounts_dict[threshold][color_cell_type_dict[i]]) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body += '{:.2f}'.format((cell_type_amounts_dict[threshold][color_cell_type_dict[i]] / np.sum(mask_cell_type)) * 100)
else:
body += '& ' + str(0) + ' / ' + str(np.sum(mask_cell_type)) + ' ('
body | |
Note: no trailing whitespace.
#@+node:ekr.20041005105605.182: *6* at.putStartDocLine
def putStartDocLine(self, s, i, kind):
"""Write the start of a doc part."""
at = self
sentinel = "@+doc" if kind == at.docDirective else "@+at"
directive = "@doc" if kind == at.docDirective else "@"
# Put whatever follows the directive in the sentinel.
# Skip past the directive.
i += len(directive)
j = g.skip_to_end_of_line(s, i)
follow = s[i: j]
# Put the opening @+doc or @-doc sentinel, including whatever follows the directive.
at.putSentinel(sentinel + follow)
# Put the opening comment if we are using block comments.
if at.endSentinelComment:
at.putIndent(at.indent)
at.os(at.startSentinelComment); at.onl()
#@+node:ekr.20041005105605.187: *4* Writing 4,x sentinels...
#@+node:ekr.20041005105605.188: *5* at.nodeSentinelText & helper
def nodeSentinelText(self, p):
"""Return the text of a @+node or @-node sentinel for p."""
at = self
h = at.removeCommentDelims(p)
if getattr(at, 'at_shadow_test_hack', False):
# A hack for @shadow unit testing.
# see AtShadowTestCase.makePrivateLines.
return h
gnx = p.v.fileIndex
level = 1 + p.level() - self.root.level()
if level > 2:
return "%s: *%s* %s" % (gnx, level, h)
return "%s: %s %s" % (gnx, '*' * level, h)
#@+node:ekr.20041005105605.189: *6* at.removeCommentDelims
def removeCommentDelims(self, p):
'''
If the present @language/@comment settings do not specify a single-line comment
we remove all block comment delims from h. This prevents headline text from
interfering with the parsing of node sentinels.
'''
at = self
start = at.startSentinelComment
end = at.endSentinelComment
h = p.h
if end:
h = h.replace(start, "")
h = h.replace(end, "")
return h
#@+node:ekr.20041005105605.190: *5* at.putLeadInSentinel
def putLeadInSentinel(self, s, i, j, delta):
"""
Set at.leadingWs as needed for @+others and @+<< sentinels.
i points at the start of a line.
j points at @others or a section reference.
delta is the change in at.indent that is about to happen and hasn't happened yet.
"""
at = self
at.leadingWs = "" # Set the default.
if i == j:
return # The @others or ref starts a line.
k = g.skip_ws(s, i)
if j == k:
# Only whitespace before the @others or ref.
at.leadingWs = s[i: j] # Remember the leading whitespace, including its spelling.
else:
self.putIndent(at.indent) # 1/29/04: fix bug reported by <NAME>.
at.os(s[i: j])
at.onl_sent()
#@+node:ekr.20041005105605.191: *5* at.putCloseNodeSentinel
def putCloseNodeSentinel(self, p):
'''End a node.'''
at = self
at.raw = False # Bug fix: 2010/07/04
#@+node:ekr.20041005105605.192: *5* at.putOpenLeoSentinel 4.x
def putOpenLeoSentinel(self, s):
"""Write @+leo sentinel."""
at = self
if at.sentinels or hasattr(at, 'force_sentinels'):
s = s + "-thin"
encoding = at.encoding.lower()
if encoding != "utf-8":
# New in 4.2: encoding fields end in ",."
s = s + "-encoding=%s,." % (encoding)
at.putSentinel(s)
#@+node:ekr.20041005105605.193: *5* at.putOpenNodeSentinel
def putOpenNodeSentinel(self, p, inAtAll=False):
"""Write @+node sentinel for p."""
at = self
if not inAtAll and p.isAtFileNode() and p != at.root:
at.writeError("@file not valid in: " + p.h)
return
s = at.nodeSentinelText(p)
at.putSentinel("@+node:" + s)
# Leo 4.7 b2: we never write tnodeLists.
#@+node:ekr.20041005105605.194: *5* at.putSentinel (applies cweb hack) 4.x
def putSentinel(self, s):
'''
Write a sentinel whose text is s, applying the CWEB hack if needed.
This method outputs all sentinels.
'''
at = self
if at.sentinels or hasattr(at, 'force_sentinels'):
at.putIndent(at.indent)
at.os(at.startSentinelComment)
# Apply the cweb hack to s:
# If the opening comment delim ends in '@',
# double all '@' signs except the first.
start = at.startSentinelComment
if start and start[-1] == '@':
s = s.replace('@', '@@')[1:]
at.os(s)
if at.endSentinelComment:
at.os(at.endSentinelComment)
at.onl()
#@+node:ekr.20041005105605.196: *4* Writing 4.x utils...
#@+node:ekr.20181024134823.1: *5* at.addToOrphanList
def addToOrphanList(self, root):
'''Mark the root as erroneous for c.raise_error_dialogs().'''
c = self.c
# Fix #1050:
root.setOrphan()
c.orphan_at_file_nodes.append(root.h)
#@+node:ekr.20190111111608.1: *5* at.checkPath & helpers
def checkPath(self, fileName):
'''Return True if we can write to the file's directory.'''
at = self
assert g.os_path_isabs(fileName), (repr(fileName), g.callers())
directory = g.os_path_dirname(fileName)
if not at.checkDirectory(directory):
return False
if g.os_path_exists(fileName):
return at.isWritable(fileName)
return True
#@+node:ekr.20190111112432.1: *6* at.checkDir
def checkDirectory(self, directory):
'''Return True if directory exists or could be created.'''
at, c = self, self.c
assert directory, g.callers()
if g.os_path_exists(directory):
return at.isWritable(directory)
try:
g.makeAllNonExistentDirectories(directory, c=c)
return True
except Exception:
g.es("exception creating path: %r" % (directory), color='red')
g.es_exception()
return False
#@+node:ekr.20190111112442.1: *6* at.isWritable
def isWritable(self, path):
'''Return True if the path is writable.'''
try:
# os.access() may not exist on all platforms.
ok = os.access(path, os.W_OK)
except AttributeError:
return True
if not ok:
g.es('read only:', repr(path), color='red')
return ok
#@+node:ekr.20090514111518.5661: *5* at.checkPythonCode & helpers
def checkPythonCode(self, contents, fileName, root, pyflakes_errors_only=False):
'''Perform python-related checks on root.'''
at = self
if contents and fileName and fileName.endswith('.py') and at.checkPythonCodeOnWrite:
# It's too slow to check each node separately.
if pyflakes_errors_only:
ok = True
else:
ok = at.checkPythonSyntax(root, contents)
# Syntax checking catches most indentation problems.
# if ok: at.tabNannyNode(root,s)
if ok and at.runPyFlakesOnWrite and not g.unitTesting:
ok2 = self.runPyflakes(root, pyflakes_errors_only=pyflakes_errors_only)
else:
ok2 = True
if not ok or not ok2:
g.app.syntax_error_files.append(g.shortFileName(fileName))
#@+node:ekr.20090514111518.5663: *6* at.checkPythonSyntax
def checkPythonSyntax(self, p, body, supress=False):
at = self
try:
body = body.replace('\r', '')
fn = '<node: %s>' % p.h
compile(body + '\n', fn, 'exec')
return True
except SyntaxError:
if not supress:
at.syntaxError(p, body)
except Exception:
g.trace("unexpected exception")
g.es_exception()
return False
#@+node:ekr.20090514111518.5666: *7* at.syntaxError (leoAtFile)
def syntaxError(self, p, body):
'''Report a syntax error.'''
g.error("Syntax error in: %s" % (p.h))
typ, val, tb = sys.exc_info()
message = hasattr(val, 'message') and val.message
if message: g.es_print(message)
if val is None: return
lines = g.splitLines(body)
n = val.lineno
offset = val.offset or 0
if n is None: return
i = val.lineno - 1
for j in range(max(0, i - 2), min(i + 2, len(lines) - 1)):
if j == i:
mark = '*'
node_link = "%s,-%d" % (
p.get_UNL(with_proto=True, with_count=True), j+1)
else:
mark = ' '
node_link = None
text = '%5s:%s %s' % (j+1, mark, lines[j].rstrip())
g.es_print(text, nodeLink=node_link)
if j == i:
g.es_print(' ' * (7 + offset) + '^')
#@+node:ekr.20161021084954.1: *6* at.runPyflakes
def runPyflakes(self, root, pyflakes_errors_only):
'''Run pyflakes on the selected node.'''
try:
import leo.commands.checkerCommands as checkerCommands
if checkerCommands.pyflakes:
x = checkerCommands.PyflakesCommand(self.c)
ok = x.run(p=root,pyflakes_errors_only=pyflakes_errors_only)
return ok
return True # Suppress error if pyflakes can not be imported.
except Exception:
g.es_exception()
#@+node:ekr.20090514111518.5665: *6* at.tabNannyNode
def tabNannyNode(self, p, body, suppress=False):
import parser
import tabnanny
import tokenize
try:
readline = g.ReadLinesClass(body).next
tabnanny.process_tokens(tokenize.generate_tokens(readline))
except parser.ParserError:
junk, msg, junk = sys.exc_info()
if suppress:
raise
g.error("ParserError in", p.h)
g.es('', str(msg))
except IndentationError:
junk, msg, junk = sys.exc_info()
if suppress:
raise
g.error("IndentationError in", p.h)
g.es('', str(msg))
except tokenize.TokenError:
junk, msg, junk = sys.exc_info()
if suppress:
raise
g.error("TokenError in", p.h)
g.es('', str(msg))
except tabnanny.NannyNag:
junk, nag, junk = sys.exc_info()
if suppress:
raise
badline = nag.get_lineno()
line = nag.get_line()
message = nag.get_msg()
g.error("indentation error in", p.h, "line", badline)
g.es(message)
line2 = repr(str(line))[1: -1]
g.es("offending line:\n", line2)
except Exception:
g.trace("unexpected exception")
g.es_exception()
if suppress: raise
#@+node:ekr.20041005105605.198: *5* at.directiveKind4 (write logic)
# These patterns exclude constructs such as @encoding.setter or @encoding(whatever)
# However, they must allow @language python, @nocolor-node, etc.
at_directive_kind_pattern = re.compile(r'\s*@([\w-]+)\s*')
def directiveKind4(self, s, i):
"""
Return the kind of at-directive or noDirective.
Potential simplifications:
- Using strings instead of constants.
- Using additional regex's to recognize directives.
"""
at = self
n = len(s)
if i >= n or s[i] != '@':
j = g.skip_ws(s, i)
if g.match_word(s, j, "@others"):
return at.othersDirective
if g.match_word(s, j, "@all"):
return at.allDirective
return at.noDirective
table = (
("@all", at.allDirective),
("@c", at.cDirective),
("@code", at.codeDirective),
("@doc", at.docDirective),
("@end_raw", at.endRawDirective),
("@others", at.othersDirective),
("@raw", at.rawDirective),
("@verbatim", at.startVerbatim))
# Rewritten 6/8/2005.
if i + 1 >= n or s[i + 1] in (' ', '\t', '\n'):
# Bare '@' not recognized in cweb mode.
return at.noDirective if at.language == "cweb" else at.atDirective
if not s[i + 1].isalpha():
return at.noDirective # Bug fix: do NOT return miscDirective here!
if at.language == "cweb" and g.match_word(s, i, '@c'):
return at.noDirective
for name, directive in table:
if g.match_word(s, i, name):
return directive
# Support for add_directives plugin.
# Use regex to properly distinguish between Leo directives
# and python decorators.
s2 = s[i:]
m | |
import time, math, copy
import numpy as np
import pandas as pd
import machineLearning
import pickle
INFINITY = float("inf")
class GameAI(object):
def __init__(self, game):
super().__init__()
self.game = game
self.move = (-1,-1)
self.timeLimit = 3 # 3 seconds is the time limit for search
self.debug = False # True for debugging
self.fileObject = open("decisionTree", 'rb')
self.tree = pickle.load(self.fileObject)
# AI perform move (there must be an available move due to the pre-move check)
def performMove(self, index):
# Iterative Deepening MiniMax Search with Alpha-Beta Pruning
tmpBoard = [row[:] for row in self.game.board] # we don't want to make changes to the game board
if index == 0:
self.move = self.miniMax(tmpBoard)
print("minimax")
print(self.move)
else:
self.move = self.negaScout(tmpBoard)
print("negascout")
#testing decision tree
#self.move = self.oriminiMax(tmpBoard)
#print("oriMinimax")
print(self.move)
if self.move is None:
#print("here")
return
else:
# perform move (there must be an available move)
self.game.performMove(self.move[0], self.move[1])
def getSortedNode(self, board, player):
sortedNodes = []
successorBoards = self.findSuccessorBoards(board, player)
for successorBoard in successorBoards:
sortedNodes.append((successorBoard, self.utilityOf(successorBoard, player)))
sortedNodes = sorted(sortedNodes, key=lambda node: node[1], reverse=True)
sortedNodes = [node[0] for node in sortedNodes]
return sortedNodes
""" Iterative Deepening MiniMax Search Algorithm within Time Limit
From depth = 3, if still within the time limit, continue search to get more insight.
Return the optimal move within limited resources.
"""
def miniMax(self, board):
print("here")
startTime = time.time()
timeElapsed = 0
depth = 3
optimalMove = (-1, -1)
optimalBoard = board
stopDigging = False
while not stopDigging and timeElapsed < self.timeLimit:
stopDigging, optimalBoard = self.IDMiniMax(board, 0, depth, 2, -INFINITY, INFINITY)
endTime = time.time()
timeElapsed += endTime - startTime
startTime = endTime
depth += 1
print("[Console MSG] Time used by AI: " + str(timeElapsed))
if optimalBoard == board:
return None
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] != optimalBoard[row][col]:
optimalMove = (row, col)
print(np.asarray(optimalBoard).reshape(8, 8))
return optimalMove
""" Iterative Deepening MiniMax Search with Alpha-Beta Pruning
board - state at current node
player - player at current node (AI - white - maximizer; Player - black - minimizer)
currentLevel - level at current node
maxLevel - used to judge whether go deeper or not
Return the optimal board (state) found in the current level for the current node.
"""
def IDMiniMax(self, board, currentLevel, maxLevel, player, alpha, beta):
if self.debug:
print("Level: " + str(currentLevel) + " maxLevel: " + str(maxLevel))
stopDigging = False
if (not self.game.moveCanBeMade(board, player) or currentLevel == maxLevel):
return (stopDigging, board)
successorBoards = self.findSuccessorBoards(board, player)
if len(successorBoards) == 0:
stopDigging = True
return stopDigging, board
bestBoard = None
if player == 2:
maxValue = -INFINITY
for successor in successorBoards:
stopDigging, lookaheadBoard = self.IDMiniMax(successor, currentLevel+1, maxLevel, 1, alpha, beta)
utility = self.utilityOf(lookaheadBoard, player)
if utility > maxValue:
maxValue = utility
bestBoard = successor
alpha = max(alpha, utility)
if utility >= beta:
#print("alphaBeta is pruning", successor)
return stopDigging, successor # prune
else:
minValue = INFINITY
for successor in successorBoards:
stopDigging, lookaheadBoard = self.IDMiniMax(successor, currentLevel+1, maxLevel, 2, alpha, beta)
utility = self.utilityOf(lookaheadBoard, player)
if utility < minValue:
minValue = utility
bestBoard = successor
beta = min(beta, utility)
if utility <= alpha:
#print("alphaBeta is pruning", successor)
return stopDigging, successor # prune
return stopDigging, bestBoard
def negaScout(self, board):
startTime = time.time()
timeElapsed = 0
depth = 3
optimalMove = (-1, -1)
optimalBoard = board
stopDigging = False
while not stopDigging and timeElapsed < self.timeLimit:
# (stopDigging, optimalBoard, alpha) = self.negaScoutHelper(board, 2, depth, -INFINITY, INFINITY, 1)
maxScore = -INFINITY
for successor in self.getSortedNode(board, 1):
point = self.negaScoutHelper2(successor, 1, depth, -INFINITY, INFINITY, 1)
if point > maxScore:
maxScore = point
optimalBoard = successor
endTime = time.time()
timeElapsed += endTime - startTime
startTime = endTime
depth += 1
print("[Console MSG] Time used by AI: " + str(timeElapsed))
if optimalBoard == board:
print("here")
return None
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] != optimalBoard[row][col]:
optimalMove = (row, col)
print(np.asarray(optimalBoard).reshape(8, 8))
print(optimalMove)
return optimalMove
def negaScoutHelper2(self, board, player, depth, alpha, beta, color):
if not self.game.moveCanBeMade(board, player) or depth == 0:
return self.utilityOf(board, player) * color
successorBoards = self.getSortedNode(board, player)
first = True
for successor in successorBoards:
if not first:
score = -self.negaScoutHelper2(successor, player, depth - 1, -alpha - 1, -alpha, -color)
if alpha < score < beta:
score = -self.negaScoutHelper2(successor, player, depth - 1, -beta, -score, -color)
else:
first = False
score = -self.negaScoutHelper2(successor, player, depth - 1, -beta, -alpha, -color)
alpha = max(alpha, score)
if alpha >= beta:
#print("negascout is pruning", successor)
break
return alpha
# return a list of successor boards
def findSuccessorBoards(self, board, player):
successorBoards = []
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] == 0:
numAvailableMoves = self.game.placePiece(board, row, col, player, PLAYMODE=False)
if numAvailableMoves > 0:
successorBoard = copy.deepcopy([row[:] for row in board])
successorBoard[row][col] = player
successorBoards.append(successorBoard)
return successorBoards
# evaluation function (heuristics for non-final node) in this state (board)
def utilityOf(self, board, player):
board_mobility = self.mobility(board, player)
board_frontier = self.frontierSquares(board, player)
board_corners = self.corners(board, player)
xsquares, csquares = self.x_c_squares(board, player)
board_parity = self.parity(board)
board_state = self.gameState(board)
df = pd.Series([board_mobility, board_frontier, board_corners, xsquares, csquares, board_parity, board_state],
index=["numMoves", "frontier", "corners", "Xsquares", "CSquares", "parity", "state"])
return machineLearning.predict(df, self.tree)
# mobility, number of moves a player can make minus number of moves its opponent can make
def mobility(self, board, player):
blackMovesFound = self.findSuccessorBoards(board, 1)
whiteMovesFound = self.findSuccessorBoards(board, 2)
if player == 1:
return len(blackMovesFound) - len(whiteMovesFound)
elif player == 2:
return len(whiteMovesFound) - len(blackMovesFound)
else:
return 0
# number of frontier that player occupies
def frontierSquares(self, board, player):
if player == 1:
opp = 2
if player == 2:
opp = 1
coords_x, coords_y = np.where(np.array(board) == player) # coordinates that surround opponents' pieces
opp_coords_x, opp_coords_y = np.where(np.array(board) == opp)
frontier = []
frontier_opp = []
sur_player = []
for i in range(len(coords_x)):
for row in [-1, 0, 1]:
for col in [-1, 0, 1]:
x = coords_x[i] + row
y = coords_y[i] + col
if 0 <= x < 8 and 0 <= y < 8:
np.append(sur_player, np.array([x, y]))
if len(sur_player) > 0:
sur_player = np.unique(np.asarray(sur_player), axis=0)
for i in range(len(sur_player)):
if board[sur_player[i][0]][sur_player[i][1]] == 0:
np.append(frontier, sur_player[i])
sur_opp = []
for i in range(len(opp_coords_x)):
for row in [-1, 0, 1]:
for col in [-1, 0, 1]:
x = opp_coords_x[i] + row
y = opp_coords_y[i] + col
if 0 <= x < 8 and 0 <= y < 8:
#sur_opp.append(np.array([x, y]))
np.append(sur_opp, np.array([x, y]))
if len(sur_opp) > 0:
sur_opp = np.unique(np.asarray(sur_opp), axis=0)
for i in range(len(sur_opp)):
if board[sur_opp[i][0]][sur_opp[i][1]] == 0:
np.append(frontier_opp, sur_opp[i])
return len(frontier) - len(frontier_opp)
#number of corners the player occupies
def corners(self, board, player):
corners = np.array([[0, 0], [0, 7], [7, 0], [7, 7]])
if player == 1:
opp = 2
if player == 2:
opp = 1
black_corner = 0
white_corner = 0
for corner in corners:
if board[corner[0]][corner[1]] == 0:
continue
elif board[corner[0]][corner[1]] == 1:
black_corner += 1
else:
white_corner += 1
if player == 1:
return black_corner - white_corner
elif player == 2:
return white_corner - black_corner
else:
return 0 # bit different from how the data is created, does not matter, because player 0 gets subsetted
#number of x_c squares player occupies
def x_c_squares(self, board, player):
corners = np.array([[0, 0], [0, 7], [7, 0], [7, 7]])
x_squares = np.array([[1, 1], [1, 6], [6, 1], [6, 6]])
c_squares1 = np.array([[0, 1], [1, 7], [6, 0], [7, 6]])
c_squares2 = np.array([[1, 0], [0, 6], [7, 1], [6, 7]])
if player == 1:
opp = 2
if player == 2:
opp = 1
player_x_squares = 0
opp_x_squares = 0
player_c_squares = 0
opp_c_squares = 0
for i in range(len(x_squares)):
if board[corners[i][0]][corners[i][1]] == 0:
if board[x_squares[i][0]][x_squares[i][1]] == player:
player_x_squares += 1
if board[c_squares1[i][0]][c_squares1[i][1]] == player:
player_c_squares += 1
if board[c_squares2[i][0]][c_squares2[i][1]] == player:
player_c_squares += 1
if board[x_squares[i][0]][x_squares[i][1]] == opp:
opp_x_squares += 1
if board[c_squares1[i][0]][c_squares1[i][1]] == opp:
opp_c_squares += 1
if board[c_squares2[i][0]][c_squares2[i][1]] == opp:
opp_c_squares += 1
else:
continue
XSquares = player_x_squares - opp_x_squares
CSquares = player_c_squares - opp_c_squares
return XSquares, CSquares
def parity(self, board):
progress = 0
for row in range(8):
for col in range(8):
if board[row][col] != 0:
progress += 1
if progress % 2 == 0:
parity = 0
else:
parity = 1
return parity
#which game state the player is on
def gameState(self, board):
progress = 0
for row in range(8):
for col in range(8):
if board[row][col] != 0:
progress += 1
if progress % 61 <= 20:
return "beginning"
elif progress % 61 <= 40:
return "middle"
else:
return "end"
#Code later is used to test the how well the decision performs
#Original code from the ai.py
def oriminiMax(self, board):
startTime = time.time()
timeElapsed = 0
depth = 2
optimalMove = (-1, -1)
optimalBoard = board
stopDigging = False
while not stopDigging and timeElapsed < self.timeLimit:
(stopDigging, optimalBoard) = self.IDMiniMax(board, 0, depth, 1, -INFINITY, INFINITY)
endTime = time.time()
timeElapsed += endTime - startTime
startTime = endTime
depth += 1
print("[Console MSG] Time used by AI: " + str(timeElapsed))
for row in range(0, 8):
for col in range(0, 8):
if board[row][col] != optimalBoard[row][col]:
optimalMove = (row, col)
return optimalMove
""" Iterative Deepening MiniMax Search with Alpha-Beta Pruning
board - state at current node
player - player at current node (AI - white - maximizer; Player - black - minimizer)
currentLevel - level at current node
maxLevel - used to judge whether go deeper or not
Return the optimal board (state) found in the current level for the current node.
"""
def oriIDMiniMax(self, board, currentLevel, maxLevel, player, alpha, beta):
if self.debug:
print("Level: " + str(currentLevel) + " maxLevel: " + str(maxLevel))
stopDigging = False
if (not self.game.moveCanBeMade(board, player) or currentLevel == maxLevel):
return (stopDigging, board)
successorBoards = self.findSuccessorBoards(board, player)
if len(successorBoards) == 0:
stopDigging = True
return (stopDigging, board)
bestBoard = None
if player == 2:
maxValue = -INFINITY
for idx in range(0, len(successorBoards)):
stopDigging, lookaheadBoard = self.oriIDMiniMax(successorBoards[idx], currentLevel + 1, maxLevel, 1, alpha,
beta)
utility = self.oriUtilityOf(lookaheadBoard)
if utility > maxValue:
maxValue = utility
bestBoard = successorBoards[idx]
alpha = max(alpha, utility)
if utility >= | |
# @Author: JC
import requests
import json
from lxml import etree
from tool import read_json_file_to_object
from tool import get_html
from tool import save_object_to_json_file
from tool import save_html_response_to_html_file
from tool import read_txt_file_to_list
from tool import Logger
from login import get_cookie_from_github
from copy import deepcopy
import re
import os
import sys
import time
import hashlib
import shutil
def get_html_with_keyword(keyword,cookie,pageNum = 1):
'''
抓取包含关键词的某一页的user/project
'''
url = 'https://github.com/search?o=desc&q="'+ keyword + '"&s=indexed&type=Code&p='+ str(pageNum)
html = get_html(url,cookie = cookie)
if html != 'Fail':
dom_tree = etree.HTML(html)
# xpath匹配
userProjects = dom_tree.xpath('//*[@id="code_search_results"]/div[1]/div/div[1]/div/a[1]/text()')
for userProject in userProjects:
print(userProject)
bottomTab = dom_tree.xpath('//*[@id="code_search_results"]/div[2]/div/a/text()')
isEnd = True
for tab in bottomTab:
# print(tab)
if tab == "Next":
isEnd = False
if not isEnd:
print("第%d页结束,继续下一页"%pageNum)
else:
print("整体结束")
else:
isEnd = False
userProjects = []
return isEnd,userProjects
def get_all_user_project_with_keyword(keyword = 'chroblert',cookie={}):
'''
抓取包含关键词的所有页的user/project
'''
print("抓取所有包含%s关键词的user和project"%keyword)
pageNum = 0
#isEnd,allUserProjectList = get_html_with_keyword(keyword = keyword,cookie = cookie,pageNum = pageNum)
isEnd = False
allUserProjectList = []
while(not isEnd):
pageNum = pageNum + 1
isEnd,userProjectList = get_html_with_keyword(keyword = keyword,cookie = cookie,pageNum = pageNum)
if len(userProjectList) == 0:
#pageNum = pageNum -1
print('没有获取到%d页的内容,继续抓取第%d页'%(pageNum-1,pageNum))
else:
allUserProjectList.extend(userProjectList)
print(userProjectList)
return allUserProjectList
def save_List_to_file(dataList,fileName = 'allUserProjectList.txt'):
with open(fileName,'w') as f:
for i in dataList:
f.write(i)
f.write('\n')
# print("保存成功")
def file_data_process(uri):
'''
该函数用于处理初始的格式为user/project的字符串,整理成一个字典,并存储为json文件
:uri 包含字符串的文件
'''
userProjectDict = {}
with open(uri,'r') as f:
lineList = f.readlines()
print(len(lineList))
for i in lineList:
user,project = i.split('/')
project = project.replace('\n','')
print(user)
if user not in userProjectDict.keys():
userProjectDict[user] = []
if project not in userProjectDict[user]:
userProjectDict[user].append(project)
#print(userProjectDict)
return userProjectDict
def get_all_fileLink_one_user_one_project(userName,projectName,cookie = {}):
url = 'https://github.com/' + userName + '/' + projectName
return deepcopy(get_fileLink_use_recursive(url = url,cookie = cookie))
def get_fileLink_use_recursive(url,cookie = {}):
'''
递归实现抓取文件链接
'''
# 1. 结束条件:当访问目录url获得的内容里只有文件名,没有目录名(理想情况);特殊情况(无法访问目标url),同样结束
html = get_html(url = url ,cookie = cookie)
fileLinkList = []
# 第一个递归结束条件
print("递归查找 %s 目录下的文件"%url)
if html == 'Fail' :
# print('html = Fail,结束递归: ' + url)
return []
else:
dom_tree = etree.HTML(html)
fileAndDirLinkList = dom_tree.xpath('//tr[@class="js-navigation-item" or @class="js-navigation-item navigation-focus"]//td[@class="content"]//a/@href')
fileLinkCount = 0
for fileOrDirLink in fileAndDirLinkList:
# 为链接前加上https://github.com
fileOrDirLink = 'https://github.com' + fileOrDirLink
# if fileOrDirLink.split('/')[3] == 'blob':
if fileOrDirLink.split('/')[5] == 'blob': # 代表是文件
# fileLink的个数。
fileLinkCount = fileLinkCount + 1
# add:只有在文件后缀格式不为可读文件的情况下,才添加进fileLinkList中
if fileOrDirLink.split('.')[len(fileOrDirLink.split('.')) - 1] in ['jpg','png','gif','ico','svg','zip','rar','exe','bin','jar','mp3','mp4','class','pdf']:
print('不记录不可读文件链接')
pass
else:
fileLinkList.append(fileOrDirLink)
else: # 说明是目录
searchResult = get_fileLink_use_recursive(fileOrDirLink)
# print("查找结束,返回查找到的fileLinkList")
# print(searchResult)
fileLinkList.extend(searchResult)
# 第二个递归结束条件:若fileLink的个数为fileOrDirLink列表中元素的个数,则结束递归
if fileLinkCount == len(fileAndDirLinkList):
# print('该目录下全部为文件链接,结束递归:' + url)
# 返回查找得到的fileLinkList
return fileLinkList
# 递归返回结果
return fileLinkList
class userItem:
'''
一个用于存储用户、权重、项目列表、文件链接列表的结构
'''
def __init__(self):
self.userName = '' # 名称
self.weight = 0 # 权重
self.projectList = [] # 包含关键词的项目的list
self.fileLinkDict = {} # 包含关键词的文件链接的dict 格式为 {"project1":["fileLink1","fileLink2","fileLink3"],"project2":["fileLink1","fileLink2","fileLink3"]}
def get_all_fileLink(dataDict,cookie = {}):
'''
根据dataDict得到所有的文件链接
:dataDict 是上面定义的结构
'''
userItemList = []
for user in dataDict.keys():
print("user:" + user)
uItem = userItem()
uItem.userName = user
uItem.projectList.extend(dataDict[user])
fileLinkDict = {}
for project in dataDict[user]:
#print(project)
allFileLinkList = get_all_fileLink_one_user_one_project(userName = user,projectName = project,cookie = cookie)
fileLinkDict[project] = []
fileLinkDict[project].extend(allFileLinkList)
print("%s 用户 %s 项目内包含敏感信息的文件链接:=======================")
print(allFileLinkList)
uItem.fileLinkDict.update(fileLinkDict)
userItemList.append(uItem)
#print(userItemList)
return userItemList
def read_json_file_to_userItemList(fileName):
'''
从json格式的文件中读取数据并返回一饿userItemList
:fileName 存储json数据的文件名
'''
allUserItemList = read_json_file_to_object(fileName)
# print(allUserItemList)
allUItemList = []
for uItem in allUserItemList:
usItem = userItem()
usItem.userName = uItem['userName']
usItem.weight = uItem['weight']
usItem.projectList.extend(deepcopy(uItem['projectList']))
usItem.fileLinkDict= deepcopy(uItem['fileLinkDict'])
allUItemList.append(usItem)
return allUItemList
def save_userItemList_to_json_file(userItemList,fileName = 'userItemList.json'):
'''
该函数将从json文件中读取数据并将其转换为userItem类型,存储在一个list中
'''
tempList = []
tempDict = {}
for userItem in userItemList:
tempDict['userName'] = userItem.userName
tempDict['weight'] = userItem.weight
tempDict['projectList'] = userItem.projectList
tempDict['fileLinkDict'] = userItem.fileLinkDict
tempList.append(deepcopy(tempDict))
save_object_to_json_file(tempList,fileName)
def search_all_sensitive_data_in_one_file(fileLink,cookie = {}):
'''
从给定的fileLink中去查找一些关键信息,并进行匹配返回
:fileLink 文件链接
:cookie 维持登录状态的cookie值
:return fileWeight,fileHtml,list(set(retIPList)),list(set(retDomainList)),list(set(havedSensitiveKeywordList))
'''
companyIPList = read_txt_file_to_list(uri = companyIPListUri)
companyDomainList = read_txt_file_to_list(companyDomainListUri)
sensitiveKeywordList = read_txt_file_to_list(sensitiveKeywordListUri)
fileWeight = 0 # 用来表示该文件包含敏感信息源码的程度,若为0,则表明为正常文件
# 1. 获取到该文件中的内容
html = get_html(url = fileLink,cookie=cookie)
htmlFileName = str(fileLink.split('/')[len(fileLink.split('/'))-1]) + ".html"
if html != "Fail":
dom_tree = etree.HTML(html)
# print(html)
dom_tree_xpath_result_list = dom_tree.xpath('/html/body/div[4]/div/main//div[@itemprop="text" or@ id="readme"]')
fileHtml = ''
if len(dom_tree_xpath_result_list) != 0:
fileHtml = (etree.tostring(dom_tree_xpath_result_list[0])).decode('utf-8')
else:
save_html_response_to_html_file(html,'ttttt.html')
print("没有匹配到文本中的内容")
# 2. 提取出响应内容中的所有IP
allIPList = list(set(re.findall(r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b',fileHtml)))
# print("该文件内包含的IP如下:")
# print(allIPList)
save_List_to_file(allIPList,'allIPList.txt')
# 求出companyIPList与allIPList的交集
# retIPList = list(set(allIPList).intersection(set(companyIPList)))
retIPList = [i for i in allIPList if i in list(set(companyIPList)) ]
# 通过交集IPlist的len() 来判断该文件内容中是否有公司的服务器IP
isHaveCompanyIP = True
if len(retIPList) == 0:
isHaveCompanyIP = False
print("该文件内不包含公司服务器的IP地址")
else:
fileWeight = fileWeight + len(retIPList)
print("包含的公司IP如下:")
print(list(set(retIPList)))
# print(allIP)
# 3. 提取出响应内容的所有域名url
# allDomainUrlList = list(set(re.findall(r'\b(https?:\/\/.*\.?com|cn|org|edu)\b',fileHtml)))
# allDomainUrlList = list(set(re.findall(r'(http|https)://(\w+\.){1,3}\w+',fileHtml)))
allDomainUrlList = re.findall(r'\b((?:http|https)://(?:[\w|-]+\.){1,}\w+)\b',fileHtml)
# print("该文件内包含的域名如下:")
# print(allDomainUrlList)
save_List_to_file(list(set(allDomainUrlList)),'allDomainUrlList.txt')
# 求出allDomainUrlList中的url包含company关键字的url的list
retDomainList = [i for i in allDomainUrlList if company in i]
# 通过交集domainList的len()来判断该文件内容中是否有公司的domain
isHaveCompanyDomain = True
if len(retDomainList) == 0:
isHaveCompanyDomain = False
print("该文件内不包含公司服务器的域名")
else:
fileWeight = fileWeight + len(retDomainList)
print("包含的公司域名如下:")
print(list(set(retDomainList)))
# 4. 检索该文件内是否有一些敏感信息词汇
isHaveSensitiveKeyword = True
havedSensitiveKeywordList = []
for sensitiveKeyword in sensitiveKeywordList:
regPattern = re.compile(r'' + sensitiveKeyword + '')
result = regPattern.findall(fileHtml)
havedSensitiveKeywordList.extend(result)
if len(havedSensitiveKeywordList) == 0:
isHaveSensitiveKeyword = False
print("不包含一些敏感信息词汇")
else:
fileWeight = fileWeight + len(havedSensitiveKeywordList)
print("包含的敏感词汇如下:")
print(list(set(havedSensitiveKeywordList)))
else:
print("在GitHub上读取 %s 文件失败"%fileLink)
fileWeight = 0
fileHtml = ''
retIPList = []
retDomainList = []
havedSensitiveKeywordList = []
return fileWeight,fileHtml,list(set(retIPList)),list(set(retDomainList)),list(set(havedSensitiveKeywordList))
# 4. 响应内容中是否包含普通敏感关键词
def get_sensitive_info_for_one_file(fullDir,fileLink,cookie = {}):
'''
对于给定的文件链接,调用search_all_sensitive_data_in_one_file()函数,去得到一些关键信息。
若文件内包含敏感信息,则fileWeight为相应的权重,returnInfo包含文件存储名,文件权重,文件敏感词list,domainlist,IPList
:fullDir是为某用户某项目创建的目录
:fileLink是某用户某项目内的一个文件的链接
:cookie是去访问链接时需要用到的cookie
:return fileWeight,returnInfo
'''
fileWeight,fileHtml,retIPList,retDomainList,havedSensitiveKeywordList = search_all_sensitive_data_in_one_file(fileLink = fileLink,cookie = cookie)
if fileWeight != 0:
toStoreFileNameB = str(fileLink.split('/')[len(fileLink.split('/'))-1])
# 此处对要保存的文件名做处理,因为字符个数限制,否则可能会报错
# 格式为 :url编码后的源文件名-MD5(除了用户和项目名之外的url编码后的文件名).html
fileNameUsedToComputeMD5Hash = '-'.join(fileLink.split('/')[5:]) + ".html"
fileNameMD5Hash = str(hashlib.md5(fileNameUsedToComputeMD5Hash.encode('utf-8')).hexdigest())
toStoreFileNameA = toStoreFileNameB + '-' + fileNameMD5Hash + '.html'
toStoreFileName = fullDir + '/' + toStoreFileNameA
# 将包含敏感信息的文件的具体内容下载到本地
print("将包含敏感信息的文件的具体内容下载到本地")
save_html_response_to_html_file(responseData = fileHtml,htmlFileName=toStoreFileName)
# toStoreInfo = userName + '|#|' + projectName + '|#|' + toStoreFileNameA + '|#|' + str(fileWeight) + '|#|' + ' , '.join(havedSensitiveKeywordList) + '|#|' + ' , '.join(retDomainList) + '|#|' + ' , '.join(retIPList)
returnInfo = toStoreFileNameA + '|#|' + str(fileWeight) + '|#|' + ' , '.join(havedSensitiveKeywordList) + '|#|' + ' , '.join(retDomainList) + '|#|' + ' , '.join(retIPList)
else:
returnInfo = ''
return fileWeight,returnInfo
def get_sensitive_info_for_one_userProject(scanResultDir,userName,projectName,userItemDict,cookie = {}):
'''
获取用户某个项目带有敏感信息的情况
'''
# fullUserProjectDir是针对于某个用户的某个项目的目录
fullUserProjectDir = scanResultDir + '/' + userName + '/' + projectName
if not os.path.exists(fullUserProjectDir):
os.makedirs(fullUserProjectDir)
userProjectWeight = 0
with open(fullUserProjectDir + '/result.txt','w',encoding = 'utf-8') as f:
f.write('userName|#|projectName|#|fileLinkUrl|#|toStoreFileName|#|fileWeight|#|havedSensitiveKeywordList|#|retDomainList|#|retIPList')
f.write('\n')
fileLinkUrlList = userItemDict.fileLinkDict[projectName]
for fileLinkUrl in fileLinkUrlList:
print("|||||======%s--->%s--->%s ======|||||"%(userName,projectName,fileLinkUrl))
fileWeight,fileInfo = get_sensitive_info_for_one_file(fullDir = fullUserProjectDir,fileLink = fileLinkUrl,cookie = cookie)
if fileWeight != 0:
toStoreFileInfoWithUserProjectName = userName + '|#|' + projectName + '|#|' + fileLinkUrl + '|#|' + fileInfo
with open( fullUserProjectDir + '/result.txt','a',encoding = 'utf-8') as f:
f.write(toStoreFileInfoWithUserProjectName)
f.write('\n')
userProjectWeight = userProjectWeight + fileWeight
else:
pass
return userProjectWeight
def get_sensitive_info_for_one_user(scanResultDir,userName,userItemDict,cookie = {}):
'''
获取一个用户带有敏感信息的情况
'''
projectNameList = userItemDict.projectList
userWeight = 0
os.makedirs(scanResultDir + '/' + userName)
with open(scanResultDir + '/' + userName + '/result.txt','w',encoding = 'utf-8') as f:
f.write('userName|#|projectName|#|userProjectWeight')
f.write('\n')
for projectName in projectNameList:
print("|||||======%s--->%s ======|||||"%(userName,projectName))
userProjectWeight = get_sensitive_info_for_one_userProject(scanResultDir = scanResultDir,userName = userName,projectName = projectName,userItemDict = userItemDict,cookie = cookie)
if userProjectWeight != 0 :
toStoreUserProjectInfo = userName + '|#|' + projectName + '|#|' + str(userProjectWeight)
with open(scanResultDir + '/' + userName + '/result.txt','a',encoding = 'utf-8') as f:
f.write(toStoreUserProjectInfo)
f.write('\n')
userWeight = userWeight + userProjectWeight
else:
pass
return userWeight
def get_sensitive_info_for_github(scanResultDir,userItemList,cookie = {}):
'''
获取GitHub上关于敏感信息的情况
'''
# userNameList = ['qiumingzhao']
with open(scanResultDir + '/result.txt','w',encoding = 'utf-8') as f:
f.write('userName|#|userWeight')
f.write('\n')
for userItemDict in userItemList:
# print(type(userItemDict))
userName = userItemDict.userName
print("|||||======%s======|||||"%userName)
userWeight = get_sensitive_info_for_one_user(scanResultDir = scanResultDir,userName = userName,userItemDict = userItemDict,cookie = cookie)
if userWeight != 0:
toStoreUserInfo = userName + '|#|' + str(userWeight)
with open(scanResultDir + '/result.txt','a',encoding = 'utf-8') as f:
f.write(toStoreUserInfo)
f.write('\n')
else:
# 删除为该用户创建的目录
pathToBeDelete = scanResultDir + '/' + userName
try:
shutil.rmtree(pathToBeDelete)
except OSError as e:
print(e)
else:
print("The directory is deleted successfully")
print('在Github上的源码扫描结束')
def get_all_user_project_with_all_keyword(uri = 'companyKeywords.txt',cookie = {}):
'''
获取所有带有关键词的所有的用户名和项目名
'''
allUserProjectList = []
companyKeywordList = read_txt_file_to_list(uri = uri)
for companyKeyword in companyKeywordList:
# 将list转为set再转为list是为了去掉初始list中的重复数据
allUserProjectList.extend(list(set(get_all_user_project_with_keyword(keyword = companyKeyword,cookie = cookie))))
return deepcopy(list(set(allUserProjectList)))
def show_search_result(scanResultDirUri):
'''
将最后所有的结果收集起来,并存储进show-result.txt文件中
'''
userResultTxtUri = scanResultDirUri + '/result.txt'
userList = read_txt_file_to_list(userResultTxtUri)
userList = deepcopy(userList[1:])
allUserProjectFileList = []
for user in userList:
userProjectResultTxtUri = scanResultDirUri + '/' + user.split('|#|')[0] + '/result.txt'
userProjectList = read_txt_file_to_list(userProjectResultTxtUri)
userProjectList = deepcopy(userProjectList[1:])
for userProject in userProjectList:
userProjectFileResultTxtUri = scanResultDirUri + '/' + user.split('|#|')[0] + '/' + userProject.split('|#|')[1] + '/result.txt'
userProjectFileList = read_txt_file_to_list(userProjectFileResultTxtUri)
allUserProjectFileList.extend(deepcopy(userProjectFileList[1:]))
save_List_to_file(allUserProjectFileList,scanResultDirUri + '/show-result.txt')
if __name__ == '__main__':
scanTimeAsDir = time.strftime('%Y%m%d%H%M',time.localtime(time.time()))
scanResultDir = 'scanResult/' + scanTimeAsDir
if not os.path.exists(scanResultDir):
os.makedirs(scanResultDir)
overallScanResultUri = scanResultDir + '/' + 'scanResult.txt'
logPath = scanResultDir + '/Logs'
if not os.path.exists(logPath):
os.makedirs(logPath)
sys.stdout = Logger(logPath + '/info.log',sys.stdout)
sys.stderr = Logger(logPath + '/error.log',sys.stderr)
sys.stderr | |
from __future__ import absolute_import, division
import json
import logging
import random
import re
import requests
import sys
import time
import uuid
from cStringIO import StringIO
from contextlib import closing
from datetime import datetime
from flask import current_app
from lxml import etree, objectify
from typing import Any # NOQA
from changes.artifacts.analytics_json import AnalyticsJsonHandler
from changes.artifacts.coverage import CoverageHandler
from changes.artifacts.dummylogfile import DummyLogFileHandler
from changes.artifacts.manager import Manager
from changes.artifacts.manifest_json import ManifestJsonHandler
from changes.artifacts.xunit import XunitHandler
from changes.backends.base import BaseBackend, UnrecoverableException
from changes.buildsteps.base import BuildStep
from changes.config import db, redis, statsreporter
from changes.constants import Result, Status
from changes.db.utils import get_or_create
from changes.jobs.sync_job_step import sync_job_step
from changes.lib.artifact_store_lib import ArtifactStoreClient
from changes.models.artifact import Artifact
from changes.models.failurereason import FailureReason
from changes.models.jobphase import JobPhase
from changes.models.jobstep import JobStep
from changes.models.log import LogSource, LOG_CHUNK_SIZE
from changes.models.node import Cluster, ClusterNode, Node
from changes.storage.artifactstore import ArtifactStoreFileStorage
from changes.utils.http import build_patch_uri
from changes.utils.text import chunked
RESULT_MAP = {
'SUCCESS': Result.passed,
'ABORTED': Result.aborted,
'FAILURE': Result.failed,
'REGRESSION': Result.failed,
'UNSTABLE': Result.failed,
}
QUEUE_ID_XPATH = '/queue/item[action/parameter/name="CHANGES_BID" and action/parameter/value="{job_id}"]/id'
BUILD_ID_XPATH = ('/freeStyleProject/build[action/parameter/name="CHANGES_BID" and '
'action/parameter/value="{job_id}"]/number')
ID_XML_RE = re.compile(r'<id>(\d+)</id>')
LOG_SYNC_TIMEOUT_SECS = 30
# Redis key for storing the master blacklist set
# The blacklist is used to temporarily remove jenkins masters from the pool of available masters.
MASTER_BLACKLIST_KEY = 'jenkins_master_blacklist'
# Default name for the Jenkins console log.
# Note that artifactstore may alter the name for deduplication, so this cannot directly be used.
JENKINS_LOG_NAME = 'jenkins-console'
class NotFound(Exception):
"""Indicates a 404 response from the Jenkins API."""
pass
class JenkinsBuilder(BaseBackend):
def __init__(self, master_urls=None, diff_urls=None, job_name=None,
auth_keyname=None, verify=True,
cluster=None, debug_config=None,
*args, **kwargs):
super(JenkinsBuilder, self).__init__(*args, **kwargs)
self.master_urls = master_urls
self.diff_urls = diff_urls
assert self.master_urls, 'No Jenkins masters specified'
self.logger = logging.getLogger('jenkins')
self.job_name = job_name
self.http_session = requests.Session()
self.auth = self.app.config[auth_keyname] if auth_keyname else None
self.verify = verify
self.cluster = cluster
self.debug_config = debug_config or {}
self.artifact_store_client = ArtifactStoreClient(current_app.config['ARTIFACTS_SERVER'])
def report_response_status(r, *args, **kwargs):
statsreporter.stats().incr('jenkins_api_response_{}'.format(r.status_code))
self.http_session.hooks['response'].append(report_response_status)
def _get_text_response(self, master_base_url, path, method='GET',
params=None, data=None):
"""Make an HTTP request and return a text response.
Params:
master_base_url (str): Jenkins master URL, in scheme://host form.
path (str): URL path on the master to access.
method (str): HTTP verb to use; Either 'GET' or 'POST'; 'GET' is the default.
params (dict): Optional dictionary of URL parameters to append to the URL.
data (dict): Optional body to attach to the request. If a dict is provided, it will be form-encoded.
Returns:
Content of the response, in unicode.
Raises:
NotFound if the server responded with a 404 status.
Exception for other error status codes.
"""
url = '{}/{}'.format(master_base_url, path.lstrip('/'))
if params is None:
params = {}
self.logger.info('Fetching %r', url)
resp = getattr(self.http_session, method.lower())(url, params=params,
data=data,
allow_redirects=False,
timeout=30,
auth=self.auth,
verify=self.verify)
if resp.status_code == 404:
raise NotFound
elif not (200 <= resp.status_code < 400):
exception_msg = 'Invalid response. Status code for %s was %s'
attrs = url, resp.status_code
self.logger.exception(exception_msg, *attrs)
raise Exception(exception_msg % attrs)
return resp.text
def _get_json_response(self, master_base_url, path):
"""Makes a Jenkins API request and returns the JSON response
Args:
master_base_url (str): Jenkins master URL, in scheme://host form.
path (str): URL path on the master to access.
Returns:
Parsed JSON from the request.
Raises:
NotFound if the server responded with a 404 status.
Exception for other error status codes.
ValueError if the response wasn't valid JSON.
"""
path = '{}/api/json/'.format(path.strip('/'))
text = self._get_text_response(master_base_url, path, method='GET')
return json.loads(text)
def _parse_parameters(self, json):
params = {}
for action in json['actions']:
params.update(
(p['name'], p.get('value'))
for p in action.get('parameters', [])
)
return params
def _get_artifactstore_bucket(self, step):
# Create the artifactstore bucket, if it doesn't already exist
bucket_name = step.data.get('jenkins_bucket_name')
if not bucket_name:
bucket_name = self.artifact_store_client.create_bucket(step.id.hex + '-jenkins').name
step.data['jenkins_bucket_name'] = bucket_name
db.session.add(step)
db.session.commit()
return bucket_name
def _create_job_step(self, phase, data, force_create=False, cluster=None, **defaults):
"""
Gets or creates the primary JobStep for a Jenkins Job.
Args:
phase (JobPhase): JobPhase the JobStep should be part of.
data (dict): JSON-serializable data associated with the Jenkins build.
force_create (bool): Force this JobStep to be created (rather than
retrieved). This is used when replacing a JobStep to make sure
we don't just get the old one.
cluster (Optional[str]): Cluster in which the JobStep will be run.
Returns:
JobStep: The JobStep that was retrieved or created.
"""
defaults['data'] = data
if cluster:
defaults['cluster'] = cluster
# TODO(kylec): Get rid of the kwargs.
if not defaults.get('label'):
# we update this once we have the build_no for this jobstep
defaults['label'] = '<Creating Jenkins build>'
where = {
'job': phase.job,
'project': phase.project,
'phase': phase,
}
if force_create:
# uuid is unique which forces jobstep to be created
where['id'] = uuid.uuid4()
step, created = get_or_create(JobStep, where=where, defaults=defaults)
assert created or not force_create
BuildStep.handle_debug_infra_failures(step, self.debug_config, 'primary')
return step
def fetch_artifact(self, jobstep, artifact_data):
"""
Fetch an artifact from a Jenkins job.
Args:
jobstep (JobStep): The JobStep associated with the artifact.
artifact_data (dict): Jenkins job artifact metadata dictionary.
Returns:
A streamed requests Response object.
Raises:
HTTPError: if the response code didn't indicate success.
Timeout: if the server took too long to respond.
"""
url = '{base}/job/{job}/{build}/artifact/{artifact}'.format(
base=jobstep.data['master'],
job=jobstep.data['job_name'],
build=jobstep.data['build_no'],
artifact=artifact_data['relativePath'],
)
return self._streaming_get(url)
def sync_artifact(self, artifact):
jobstep = artifact.step
resp = self.fetch_artifact(jobstep, artifact.data)
# NB: Accessing Response.content results in the entire artifact
# being loaded into memory.
if len(resp.content) == 0:
# Artifact store does not support empty artifacts, and they're not very useful, so just discard them.
self.logger.info('Artifact %s from jobstep %s is empty, discarding' % (artifact.name, jobstep.id.hex))
return
bucket_name = self._get_artifactstore_bucket(jobstep)
artifact.file.storage = 'changes.storage.artifactstore.ArtifactStoreFileStorage'
filename = ArtifactStoreFileStorage.get_filename_from_artifact_name(bucket_name, artifact.id.hex)
artifact.file.save(StringIO(resp.content), filename, path=artifact.name)
# commit file save regardless of whether handler is successful
db.session.commit()
# TODO(dcramer): requests doesnt seem to provide a non-binary file-like
# API, so we're stuffing it into StringIO
try:
self.get_artifact_manager(jobstep).process(artifact, StringIO(resp.content))
except Exception:
self.logger.exception(
'Failed to sync test results for job step %s', jobstep.id)
def _sync_log(self, jobstep):
bucket_name = self._get_artifactstore_bucket(jobstep)
# Note: artifactstore may alter the log name to deduplicate it, so always use data.get('log_artifact_name')
artifact_name = jobstep.data.get('log_artifact_name')
if not artifact_name:
artifact_name = self.artifact_store_client\
.create_chunked_artifact(bucket_name, artifact_name=JENKINS_LOG_NAME).name
jobstep.data['log_artifact_name'] = artifact_name
db.session.add(jobstep)
db.session.commit()
logsource, created = get_or_create(LogSource, where={
'name': artifact_name,
'step': jobstep,
}, defaults={
'job': jobstep.job,
'project': jobstep.project,
'date_created': jobstep.date_started,
'in_artifact_store': True,
})
if created:
offset = 0
else:
offset = jobstep.data.get('log_offset', 0)
url = '{base}/job/{job}/{build}/logText/progressiveText/'.format(
base=jobstep.data['master'],
job=jobstep.data['job_name'],
build=jobstep.data['build_no'],
)
start_time = time.time()
with closing(self._streaming_get(url, params={'start': offset})) as resp:
log_length = int(resp.headers['X-Text-Size'])
# When you request an offset that doesnt exist in the build log, Jenkins
# will instead return the entire log. Jenkins also seems to provide us
# with X-Text-Size which indicates the total size of the log
if offset > log_length:
return
# Jenkins will suggest to us that there is more data when the job has
# yet to complete
has_more = resp.headers.get('X-More-Data') == 'true'
# XXX: requests doesnt seem to guarantee chunk_size, so we force it
# with our own helper
iterator = resp.iter_content()
for chunk in chunked(iterator, LOG_CHUNK_SIZE):
chunk_size = len(chunk)
try:
self.artifact_store_client.post_artifact_chunk(bucket_name, artifact_name, offset, chunk)
offset += chunk_size
if time.time() > start_time + LOG_SYNC_TIMEOUT_SECS:
raise RuntimeError('TOO LONG TO DOWNLOAD LOG: %s' % logsource.get_url())
except Exception as e:
# On an exception or a timeout, attempt to truncate the log
# Catch all exceptions, including timeouts and HTTP errors
self.logger.warning('Exception when uploading logchunks: %s', e.message)
has_more = False
warning = ("\nLOG TRUNCATED. SEE FULL LOG AT "
"{base}/job/{job}/{build}/consoleText\n").format(
base=jobstep.data['master'],
job=jobstep.data['job_name'],
build=jobstep.data['build_no'])
self.artifact_store_client.post_artifact_chunk(bucket_name, artifact_name, offset, warning)
break
# We **must** track the log offset externally as Jenkins embeds encoded
# links and we cant accurately predict the next `start` param.
jobstep.data['log_offset'] = log_length
db.session.add(jobstep)
if not has_more:
self.artifact_store_client.close_chunked_artifact(bucket_name, artifact_name)
return True if has_more else None
def _pick_master(self, job_name, is_diff=False):
"""
Identify a master to run the given job on.
The master with the lowest queue for the given job is chosen. By random
sorting the first empty queue will be prioritized.
"""
candidate_urls = self.master_urls
if is_diff and self.diff_urls:
candidate_urls = self.diff_urls
blacklist = redis.smembers(MASTER_BLACKLIST_KEY)
master_urls = [c for c in candidate_urls if c not in blacklist]
if len(master_urls) == 0:
raise ValueError("No masters to pick from.")
if len(master_urls) == 1:
return master_urls[0]
random.shuffle(master_urls)
best_match = (sys.maxint, None)
for url in master_urls:
try:
queued_jobs = self._count_queued_jobs(url, job_name)
except:
self.logger.exception("Couldn't count queued jobs on master %s", url)
continue
if queued_jobs == 0:
return url
| |
import math
import pyglet
import rabbyt
from pyglet.gl import *
from glart import *
from sounds import *
class MapDialogue:
def __init__(self,window):
self.window = window
self.width = 500
self.height = 500
self.margin = [(window[0] - self.width) / 2 ,(window[1] - self.height) / 2]
self.corners = [(self.margin[0], self.margin[1]), (self.margin[0], window[1] - self.margin[1]),
(window[0] - self.margin[0], window[1] -self.margin[1]), (window[0] - self.margin[0], self.margin[1])]
self.center = (window[0]/2, window[1]/2)
self.open = 0
self.maplocation = [0,0]
self.zoom = 3
self.selected = None
self.active = None
self.activelinks = []
self.batch = pyglet.graphics.Batch()
self.buttons = {"ZOOM": OGLButton(label="ZOOM", height=20, width=62,
center=(self.corners[0][0]+50,self.corners[0][1]+20),batch = self.batch, font_size=10),
"OUT": OGLButton(label="OUT", height=20, width=62,
center=(self.corners[0][0]+125,self.corners[0][1]+20),batch = self.batch, font_size=10),
"CONFIRM": OGLButton(label="CONFIRM", height=20, width=70,
center=(self.corners[0][0]+300,self.corners[0][1]+20),batch = self.batch, font_size=10),
"ABORT": OGLButton(label="ABORT", height=20, width=70,
center=(self.corners[0][0]+375,self.corners[0][1]+20),batch = self.batch, font_size=10)}
def draw(self,systemdict,linkdict):
thick=2
glBegin(GL_QUADS)
#glColor3f(0.0, 0.0, 0.0)
#glVertex2f(self.corners[0][0], self.corners[0][1])
#glVertex2f(self.corners[1][0], self.corners[1][1])
#glVertex2f(self.corners[2][0], self.corners[2][1])
#glVertex2f(self.corners[3][0], self.corners[3][1])
glColor3f(0.1, 0.1, 0.1) #start border
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[1][0] + thick, self.corners[1][1])
glVertex2f(self.corners[0][0] + thick, self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[2][0], self.corners[2][1] - thick)
glVertex2f(self.corners[1][0], self.corners[1][1] - thick)
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glVertex2f(self.corners[3][0] - thick, self.corners[3][1])
glVertex2f(self.corners[2][0] - thick, self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[0][0], self.corners[0][1] + thick)
glVertex2f(self.corners[3][0], self.corners[3][1] + thick)
glEnd()
glPointSize(7.0)
glBegin(GL_POINTS)
glColor3f(0.247, 0.463, 0.682)
for item in systemdict:
system = systemdict.get(item)
x = self.maplocation[0] + self.center[0] + system.location[0]*self.zoom
y = self.maplocation[1] + self.center[1] + system.location[1]*self.zoom
system.icon = [x,y]
#print "SYSTEM" + system
if system.active == 1:
glColor3f(1.0, 0.678, 1.0)
elif item == self.selected:
glColor3f(0.463, 0.678, 1.0)
glVertex2f(x, y)
glColor3f(0.247, 0.463, 0.682)
glEnd()
glLineWidth(1.0)
glBegin(GL_LINES)
glColor3f(0.25, 0.468, 0.69)
for item in linkdict:
link = linkdict.get(item)
ax = self.maplocation[0] + self.center[0] + link.start[0] * self.zoom
ay = self.maplocation[1] + self.center[1] + link.start[1] * self.zoom
bx = self.maplocation[0] + self.center[0] + link.end[0] * self.zoom
by = self.maplocation[1] + self.center[1] + link.end[1] * self.zoom
if link.active == 1:
glColor3f(0.4196, 0.1373, 0.5569)
elif link.plotted == 1:
glColor3f(0.0, 1.0, 0.498)
glVertex2f(ax, ay)
glVertex2f(bx, by)
glColor3f(0.25, 0.468, 0.69)
glEnd()
for button in self.buttons:
self.buttons.get(button).draw()
self.batch.draw()
def buttonEvent(self, event):
if event == "ZOOM":
pass
if event == "OUT":
pass
if event == "CONFIRM":
self.open = 0
return 1
if event == "ABORT":
self.open = 0
return None
def highlightLinks(self, linkdict, activesystem):
for link in linkdict:
if (linkdict.get(link).name == str(self.selected)+activesystem or
linkdict.get(link).name == activesystem+str(self.selected)):
linkdict.get(link).active = 1
else: linkdict.get(link).active = 0
def handleEvent(self, event, systemdict, selectedsystem, activesystem, linkdict, x, y, sounds):
if event == "mouse_left":
value = None
wasbutton = 0
for button in self.buttons:
if (x - self.buttons.get(button).center[0] in
range(self.buttons.get(button).width * -1, self.buttons.get(button).width) and
y - self.buttons.get(button).center[1] in
range(self.buttons.get(button).height * -1, self.buttons.get(button).height)):
value = self.buttonEvent(button)
wasbutton = 1
sounds.menu1.play()
if wasbutton == 0:
for system in systemdict:
if (x - systemdict.get(system).icon[0] in range(-7,7) and
y - systemdict.get(system).icon[1] in range(-7,7)):
if system == self.selected:
self.selected = None
value = None
elif systemdict.get(system).selected != self.selected:
self.selected = system
value = None
break
self.highlightLinks(linkdict, activesystem)
if value == None:
return selectedsystem
if value == 1:
return self.selected
class DockDialogue:
def __init__(self,window):
self.window = window
self.width = 400
self.height = 400
self.margin = [(window[0] - self.width) / 2 ,(window[1] - self.height) / 2]
self.corners = [(self.margin[0], self.margin[1]), (self.margin[0], window[1] - self.margin[1]),
(window[0] - self.margin[0], window[1] -self.margin[1]), (window[0] - self.margin[0], self.margin[1])]
self.center = (window[0]/2, window[1]/2)
self.open = 0
self.batch = pyglet.graphics.Batch()
self.buttons = {"ANTIMATTER": OGLButton(label="ANTIMATTER", height=20, width=50,
center=(self.corners[2][0]-50,self.corners[2][1]-150),batch = self.batch),
"JOBS": OGLButton(label="JOBS", height=20, width=50,
center=(self.corners[2][0]-50,self.corners[2][1]-180),batch = self.batch),
"OUTFIT": OGLButton(label="OUTFIT", height=20, width=50,
center=(self.corners[2][0]-50,self.corners[2][1]-210),batch = self.batch),
"EMBARK": OGLButton(label="EMBARK", height=20, width=50,
center=(self.corners[2][0]-50,self.corners[2][1]-240),batch = self.batch)}
def draw(self):
thick=2
glBegin(GL_QUADS) #start background
glColor3f(0.0, 0.0, 0.0)
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glColor3f(0.4, 0.5, 0.2) #start border
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[1][0] + thick, self.corners[1][1])
glVertex2f(self.corners[0][0] + thick, self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[2][0], self.corners[2][1] - thick)
glVertex2f(self.corners[1][0], self.corners[1][1] - thick)
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glVertex2f(self.corners[3][0] - thick, self.corners[3][1])
glVertex2f(self.corners[2][0] - thick, self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[0][0], self.corners[0][1] + thick)
glVertex2f(self.corners[3][0], self.corners[3][1] + thick)
glEnd()
for button in self.buttons:
self.buttons.get(button).draw()
self.batch.draw()
def handleEvent(self, event, x, y, sounds):
if event == "mouse_left":
for button in self.buttons:
if (x - self.buttons.get(button).center[0] in
range(self.buttons.get(button).width * -1, self.buttons.get(button).width) and
y - self.buttons.get(button).center[1] in
range(self.buttons.get(button).height * -1, self.buttons.get(button).height)):
sounds.menu1.play()
return button
class MenuDialogue:
def __init__(self,window):
self.docked = 0
self.window = window
self.width = 250
self.height = 300
self.margin = [(window[0] - self.width) / 2 ,(window[1] - self.height) / 2]
self.corners = [(self.margin[0], self.margin[1]), (self.margin[0], window[1] - self.margin[1]),
(window[0] - self.margin[0], window[1] -self.margin[1]), (window[0] - self.margin[0], self.margin[1])]
self.center = (window[0]/2, window[1]/2)
self.open = 0
self.batch = pyglet.graphics.Batch()
self.buttons = {"BACK": OGLButton(label="BACK", height=30, width=70,
center=(self.center[0],self.center[1]+50),batch = self.batch),
"OPTIONS": OGLButton(label="OPTIONS", height=30, width=70,
center=(self.center[0],self.center[1]),batch = self.batch),
"QUIT": OGLButton(label="QUIT", height=30, width=70,
center=(self.center[0],self.center[1]-50),batch = self.batch)}
def draw(self):
thick=2
glBegin(GL_QUADS) #start background
if self.docked == 1:
glColor3f(0.0, 0.0, 0.0)
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glColor3f(0.4, 0.5, 0.2) #start border
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[1][0] + thick, self.corners[1][1])
glVertex2f(self.corners[0][0] + thick, self.corners[0][1])
glVertex2f(self.corners[1][0], self.corners[1][1])
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[2][0], self.corners[2][1] - thick)
glVertex2f(self.corners[1][0], self.corners[1][1] - thick)
glVertex2f(self.corners[2][0], self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glVertex2f(self.corners[3][0] - thick, self.corners[3][1])
glVertex2f(self.corners[2][0] - thick, self.corners[2][1])
glVertex2f(self.corners[3][0], self.corners[3][1])
glVertex2f(self.corners[0][0], self.corners[0][1])
glVertex2f(self.corners[0][0], self.corners[0][1] + thick)
glVertex2f(self.corners[3][0], self.corners[3][1] + thick)
glEnd()
for button in self.buttons:
self.buttons.get(button).draw()
self.batch.draw()
def handleEvent(self, event, x, y, sounds):
if event == "mouse_left":
for button in self.buttons:
if (x - self.buttons.get(button).center[0] in
range(self.buttons.get(button).width * -1, self.buttons.get(button).width) and
y - self.buttons.get(button).center[1] in
range(self.buttons.get(button).height * -1, self.buttons.get(button).height)):
sounds.menu1.play()
return button
class OptionsDialogue:
def __init__(self,window):
self.docked = 0
self.window = window
self.width = window[0]-40
self.height = window[1]-40
self.margin = [(window[0] - self.width) / 2 ,(window[1] - self.height) / 2]
self.corners = [(self.margin[0], self.margin[1]), (self.margin[0], window[1] - self.margin[1]),
(window[0] - self.margin[0], window[1] -self.margin[1]), (window[0] - self.margin[0], self.margin[1])]
self.center = (window[0]/2, window[1]/2)
self.open = 0
self.batch = pyglet.graphics.Batch()
self.labels = {"OPTIONS": pyglet.text.Label('OPTIONS',
font_name = 'verdana', font_size = 26,
bold = 0, color = (200, 100, 100, 255),
anchor_x = 'center', anchor_y = 'center',
x = self.width/2,
y = self.height - 100, batch = self.batch),
"MODE": pyglet.text.Label('Screen Mode',
font_name = 'verdana', font_size = 12,
bold = 0, color = (200, 100, 100, 255),
anchor_x = 'right', anchor_y = 'center',
x = self.width/2-100,
y = self.height - 200, batch = self.batch),
"RESOLUTION": pyglet.text.Label('Resolution',
font_name = 'verdana', font_size = 12,
bold = 0, color = (200, 100, 100, 255),
anchor_x = 'right', anchor_y = 'center',
x = self.width/2-100,
y = self.height - 260, batch = self.batch),
"FX VOLUME": pyglet.text.Label('FX Volume',
font_name = 'verdana', font_size = 12,
bold = 0, color = (200, 100, 100, 255),
anchor_x = 'right', anchor_y = 'center',
x = self.width/2-100,
y = self.height - 320, batch = self.batch),
"FX VALUE": pyglet.text.Label('80',
font_name = 'verdana', font_size = 12,
bold = 0, color = (200, 100, 100, 255),
anchor_x = 'center', anchor_y = 'center',
x = self.width/2,
y = self.height - 320, batch = self.batch),
"MUSIC VOLUME": pyglet.text.Label('Music Volume',
font_name = 'verdana', font_size = 12,
bold = 0, color = (200, 100, 100, 255),
anchor_x = 'right', anchor_y = 'center',
x = self.width/2-100,
y = self.height - 380, batch = self.batch),
"MUSIC VALUE": pyglet.text.Label('80',
font_name = 'verdana', font_size = 12,
bold = 0, color = (200, 100, 100, 255),
anchor_x = 'center', anchor_y = 'center',
x = self.width/2,
y = self.height - 380, batch = self.batch)}
self.buttons = {"FULLSCREEN": OGLButton(label="WINDOWED", height=34, width=100,
center=(self.width/2 , self.height - 200), batch=self.batch),
"RESOLUTION": OGLButton(label="800x600", height=34, width=100,
center=(self.width/2 , self.height - 260), batch=self.batch),
"FXUP": OGLButton(label="UP", height=30, width=40,
center=(self.width/2+50 , self.height - 320), batch=self.batch),
"FXDOWN": OGLButton(label="DOWN", height=30, width=40,
center=(self.width/2-50 , self.height - 320), batch=self.batch),
"FXMUTE": OGLButton(label="MUTE", height=24, width=40,
center=(self.width/2+120 , self.height | |
def plotHDeadZoneLine(self):
self.tagDeadZone(self.plotXLine(comp.getYAxis(), conf.get('colour', 'grid-line-4')))
def plotWDeadZoneLine(self):
self.tagDeadZone(self.plotYLine(comp.getXAxis(), conf.get('colour', 'grid-line-4')))
def plotZeroLabel(self):
self.tagDeadZone(self.plotGridLabel(x=comp.getXAxis(), y=comp.getYAxis(), text='0'))
# Gettery jádra
def getCanvasWidth(self):
return int(self.cget('width'))
def getCanvasHeight(self):
return int(self.cget('height'))
class Panel(OutputClass, tk.Frame):
def __init__(self, master):
self.master = master
tk.Frame.__init__(self, self.master)
self.setVars()
self.build()
self.bindEvents()
def build(self):
pass
def bindEvents(self):
pass
def setVars(self):
pass
def display(self):
self.pack()
class ControlPanel(Panel):
def build(self):
self.topPanel = tk.Frame(self)
self.middlePanel = tk.Frame(self)
self.bottomPanel = tk.Frame(self)
self.funcList = FunctionList(self.topPanel)
self.zoomPanel = ZoomPanel(self.middlePanel)
self.motionPanel = MotionPanel(self.middlePanel)
self.funcPanel = FunctionPanel(self.bottomPanel)
def zoomNormalize(self):
self.zoomPanel.zoomNormalize()
def motion(self, *args, **kwargs):
self.motionPanel.motion(*args, **kwargs)
def motionEnd(self):
self.motionPanel.motionEnd()
def scrollStart(self, *args, **kwargs):
self.motionPanel.scrollStart(*args, **kwargs)
def scrollEnd(self, *args, **kwargs):
self.motionPanel.scrollEnd(*args, **kwargs)
def setXScale(self, value):
self.zoomPanel.setXScale(value)
def setXPiScale(self, quot):
self.zoomPanel.setXPiScale(quot)
def setYScale(self, value):
self.zoomPanel.setYScale(value)
def selectFunction(self, fid):
self.unselectFunction(hide=False)
self.funcList.selectFunction(fid)
self.funcPanel.showEditPanels(fid)
def unselectFunction(self, hide=True):
selected = self.funcList.getSelectedIndex()
if selected is not None:
self.funcList.unselectFunction()
if hide:
self.funcPanel.hideEditPanels()
def addFunction(self, fid, *args):
self.funcList.addFunction(fid, *args)
if fman.getFunction(fid).isParametric():
self.funcPanel.registerFunction(fid)
def updateFunction(self, fid, *args):
self.funcList.updateFunction(fid, *args)
if fman.getFunction(fid).isParametric():
self.funcPanel.unregisterFunction(fid)
self.funcPanel.registerFunction(fid)
def createFunction(self, text):
mgr.createFunction(text)
def editFunction(self, text):
mgr.editFunction(self.funcList.getSelectedFID(), text)
def getExprEdit(self):
return fman.getExprEdit(self.funcList.getSelectedFID())
def deleteFunction(self):
selected = self.funcList.getSelectedIndex()
if selected is not None:
mgr.deleteFunction(self.funcList.getSelectedFID())
def removeFunction(self, fid):
self.funcList.deleteFunction(fid)
self.funcPanel.unregisterFunction(fid)
self.funcPanel.hideEditPanels()
def deleteAllFunctions(self):
mgr.deleteAllFunctions()
def display(self):
self.pack(fill='y', expand=1)
self.topPanel.pack(side='top', fill='x')
self.middlePanel.pack(side='top', fill='x')
self.bottomPanel.pack(side='top', fill='both', expand=1)
self.funcList.display()
self.zoomPanel.display()
self.motionPanel.display()
self.funcPanel.display()
class FunctionList(Panel):
def __init__(self, master):
Panel.__init__(self, master)
self.fidList = []
def itemSelected(self, event):
self.after(20, self.functionSelected) # musi se chvili pockat, nez se zmeni vybrana polozka
def itemUnselected(self, event):
self.functionUnselected()
def functionSelected(self, *args):
selected = self.getSelectedFID()
if selected is not None:
mgr.selectFunction(selected)
def functionUnselected(self):
mgr.unselectFunction()
def selectFunction(self, fid):
self.funcList.selection_set(self.fidList.index(fid))
def unselectFunction(self):
self.funcList.selection_clear(self.getSelectedIndex())
def addFunction(self, fid):
self.insertFunction(fid, 'end')
def updateFunction(self, fid):
index = self.fidList.index(fid)
self.removeFunction(index)
self.insertFunction(fid, index)
def insertFunction(self, fid, index):
func = fman.getFunction(fid)
if index == 'end':
self.fidList.append(func.getFID())
else:
self.fidList.insert(index, func.getFID())
self.funcList.insert(index, (' ' + str(func.getNumber())).ljust(10) + func.getExpr())
# Vymazava funkci uplne (externi vrstva, funkce mizi odevsad)
def deleteFunction(self, fid):
ford = self.fidList.index(fid)
self.removeFunction(ford)
# Vymazava funkci ze seznamu (interni vrstva)
def removeFunction(self, index):
self.funcList.delete(index)
del self.fidList[index]
def getSelectedIndex(self):
selection = self.funcList.curselection()
if not selection:
return None
else:
return int(selection[0])
def getSelectedFID(self):
if self.getSelectedIndex() is not None:
return self.fidList[self.getSelectedIndex()]
else:
fmanFID = fman.getSelectedFID()
if fmanFID is not None:
return fmanFID
else:
return None
def build(self):
self.frame = tk.LabelFrame(self, text=conf.get('out', 'funclist-header'))
self.funcList = tk.Listbox(self.frame, height=conf.get('graphpar', 'funclist-lines'),
width=conf.get('graphpar', 'funclist-width'), selectmode='single')
def bindEvents(self):
self.funcList.bind('<ButtonRelease-1>', self.itemSelected)
self.funcList.bind('<ButtonRelease-3>', self.itemUnselected)
def display(self):
self.pack(side='top', fill='x')
self.frame.pack(side='top', pady=2)
self.funcList.pack(side='top', fill='x', padx=2, pady=2)
class ZoomPanel(Panel):
def __init__(self, master):
Panel.__init__(self, master)
self.supBoth = False
self.supX = False
def setVars(self):
self.bothVal = tk.StringVar()
self.xVal = tk.StringVar()
self.yVal = tk.StringVar()
self.setAllVals(String.floatStr(conf.get('gridpar', 'default-scale')))
def setAllVals(self, value):
self.bothVal.set(value)
self.xVal.set(value)
self.yVal.set(value)
# ZOOM PANEL METHODS
def zoomBoth(self, event=None):
bothText = self.bothVal.get()
try:
zoomBoth = base.parseFloat(bothText)
except ValueError:
if self.supBoth:
self.bothVal.set('')
else:
self.bothVal.set(plot.getXScale())
return
mgr.setBothScales(zoomBoth)
self.supBoth = False
disp = String.floatStr(zoomBoth)
self.bothVal.set(disp)
self.xVal.set(disp)
self.xVal.set(disp)
def zoomX(self, event=None):
xText = self.xVal.get()
try:
zoomX = base.parseFloat(xText)
except ValueError:
self.xVal.set(plot.getXScale())
return
self.setXScale(zoomX)
def zoomY(self, event=None):
yText = self.yVal.get()
try:
zoomY = base.parseFloat(yText)
except ValueError:
self.yVal.set(plot.getYScale())
return
self.setYScale(zoomY)
def zoomBothOut(self, *args):
mgr.zoomBothOut()
print(self.supBoth)
if not self.supBoth:
self.zoomBothMult()
self.zoomXMult()
self.zoomYMult()
def zoomBothIn(self, *args):
mgr.zoomBothIn()
if not self.supBoth:
self.zoomBothDiv()
self.zoomXDiv()
self.zoomYDiv()
def zoomXOut(self):
mgr.zoomXOut()
self.zoomXMult()
self.suppressBoth()
def zoomXIn(self):
mgr.zoomXIn()
self.zoomXDiv()
self.suppressBoth()
def zoomYOut(self):
mgr.zoomYOut()
self.zoomYMult()
self.suppressBoth()
def zoomYIn(self):
mgr.zoomYIn()
self.zoomYDiv()
self.suppressBoth()
def zoomNormalized(self):
mgr.zoomNormalize()
def zoomNormalize(self):
disp = String.floatStr(conf.get('gridpar', 'default-scale'))
self.setAllVals(disp)
def zoomXPi(self, *args):
self.setXPiScale(self.xPiMultVar.get())
def setXScale(self, value):
mgr.setXScale(base.parseFloat(value))
self.suppressBoth()
self.xVal.set(String.floatStr(value))
def setXPiScale(self, mult):
mgr.setXPiScale(mult)
self.suppressX()
def setYScale(self, value):
mgr.setYScale(base.parseFloat(value))
self.suppressBoth()
self.yVal.set(String.floatStr(value))
def suppressBoth(self):
if not self.supBoth:
self.bothVal.set('')
self.supBoth = True
def suppressX(self):
if not self.supX:
self.bothVal.set('')
self.supX = True
def zoomBothMult(self):
self.bothVal.set(String.floatStr(base.parseFloat(self.bothVal.get()) * conf.get('gridpar', 'scale-multiplier')))
def zoomBothDiv(self):
self.bothVal.set(String.floatStr(base.parseFloat(self.bothVal.get()) / conf.get('gridpar', 'scale-multiplier')))
def zoomXMult(self):
baseVal = self.xVal.get()
if not baseVal:
baseVal = comp.getXScale()
self.xVal.set(String.floatStr(base.parseFloat(baseVal) * conf.get('gridpar', 'scale-multiplier')))
self.supX = False
def zoomXDiv(self):
baseVal = self.xVal.get()
if not baseVal:
baseVal = comp.getXScale()
self.xVal.set(String.floatStr(base.parseFloat(baseVal) / conf.get('gridpar', 'scale-multiplier')))
self.supX = False
def zoomYMult(self):
self.yVal.set(String.floatStr(base.parseFloat(self.yVal.get()) * conf.get('gridpar', 'scale-multiplier')))
def zoomYDiv(self):
self.yVal.set(String.floatStr(base.parseFloat(self.yVal.get()) / conf.get('gridpar', 'scale-multiplier')))
def build(self):
# Panel a hlavicka
self.mainPanel = tk.LabelFrame(self, text=conf.get('out', 'zoompanel-header'))
self.bodyPanel = tk.Frame(self.mainPanel)
self.labelPanel = tk.Frame(self.bodyPanel)
self.widgetPanel = tk.Frame(self.bodyPanel)
self.xPiPanel = tk.Frame(self.mainPanel)
# Popisky kategorii
self.bothLabel = tk.Label(self.labelPanel, text=conf.get('out', 'zoompanel-both'))
self.xLabel = tk.Label(self.labelPanel, text=conf.get('out', 'zoompanel-x'))
self.yLabel = tk.Label(self.labelPanel, text=conf.get('out', 'zoompanel-y'))
# Celkovy zoom
self.bothPanel = tk.Frame(self.widgetPanel)
self.bothPlus = tk.Button(self.bothPanel, image=win.plusImg, command=self.zoomBothIn)
self.bothMinus = tk.Button(self.bothPanel, image=win.minusImg, command=self.zoomBothOut)
self.bothEntry = tk.Entry(self.bothPanel, justify='right', width=6, exportselection=0, textvariable=self.bothVal)
# X-zoom
self.xPanel = tk.Frame(self.widgetPanel)
self.xPlus = tk.Button(self.xPanel, image=win.plusImg, command=self.zoomXIn)
self.xMinus = tk.Button(self.xPanel, image=win.minusImg, command=self.zoomXOut)
self.xEntry = tk.Entry(self.xPanel, justify='right', width=6, exportselection=0, textvariable=self.xVal)
# Y-zoom
self.yPanel = tk.Frame(self.widgetPanel)
self.yPlus = tk.Button(self.yPanel, image=win.plusImg, command=self.zoomYIn)
self.yMinus = tk.Button(self.yPanel, image=win.minusImg, command=self.zoomYOut)
self.yEntry = tk.Entry(self.yPanel, justify='right', width=6, exportselection=0, textvariable=self.yVal)
# Pomocna tlacitka
self.normalButton = tk.Button(self.xPiPanel, image=win.oneImg, command=self.zoomNormalized)
self.xPiButton = tk.Button(self.xPiPanel, image=win.piImg, command=self.zoomXPi)
self.xPiMultVar = tk.IntVar()
self.xPiMultVar.set(1)
self.xPiMultEntry = tk.Entry(self.xPiPanel, justify='right', width=4, exportselection=0, textvariable=self.xPiMultVar)
def bindEvents(self):
self.bothEntry.bind('<KeyRelease-Return>', self.zoomBoth)
self.xEntry.bind('<KeyRelease-Return>', self.zoomX)
self.yEntry.bind('<KeyRelease-Return>', self.zoomY)
self.xPiMultEntry.bind('<KeyRelease-Return>', self.zoomXPi)
def display(self):
self.pack(side='left', padx=2)
self.mainPanel.pack(side='top')
self.bodyPanel.pack(side='top')
self.labelPanel.pack(side='left', fill='y', expand=1)
self.widgetPanel.pack(side='left', fill='y', expand=1)
self.xPiPanel.pack(side='top', fill='x', expand=1, padx=2, pady=2)
# Popisky
self.bothLabel.pack(side='top', fill='x', expand=1)
self.xLabel.pack(side='top', fill='x', expand=1)
self.yLabel.pack(side='top', fill='x', expand=1)
# Celkovy zoom
self.bothPanel.pack(side='top', fill='x', expand=1)
self.bothPlus.pack(side='left')
self.bothMinus.pack(side='left')
self.bothEntry.pack(side='left', padx=2)
# X-zoom
self.xPanel.pack(side='top', fill='x', expand=1)
self.xPlus.pack(side='left')
self.xMinus.pack(side='left')
self.xEntry.pack(side='left', padx=2)
# Y-zoom
self.yPanel.pack(side='top', fill='x', expand=1)
self.yPlus.pack(side='left')
self.yMinus.pack(side='left')
self.yEntry.pack(side='left', padx=2)
# Pomocna tlacitka
self.normalButton.pack(side='left')
self.xPiButton.pack(side='left')
self.xPiMultEntry.pack(side='left')
# self.xPi4Button.pack(side='left')
# self.xPi6Button.pack(side='left')
# self.xPi12Button.pack(side='left')
class MotionPanel(Panel):
def motion(self, x, y, scroll):
self.xLabel.config(text=('x: ' + (conf.get('out', 'position-format').format(float(x)))))
self.yLabel.config(text=('y: ' + (conf.get('out', 'position-format').format(float(y)))))
if scroll:
self.scrollModify(x, y)
def motionEnd(self):
self.xLabel.config(text='')
self.yLabel.config(text='')
def scrollStart(self, x, y):
self.scrollBegin = (x, y)
self.scrollModify(x, y)
def scrollModify(self, x, y):
self.scrollActual = (x, y)
self.scrollChange = (x - self.scrollBegin[0], y - self.scrollBegin[1])
self.xLabel.config(text=('dx: ' + (conf.get('out', 'position-format').format(self.scrollChange[0]))))
self.yLabel.config(text=('dy: ' + (conf.get('out', 'position-format').format(self.scrollChange[1]))))
def scrollEnd(self):
self.xLabel.config(text='')
self.yLabel.config(text='')
def scrollCenter(self):
mgr.scrollCenter()
def scrollEdge(self):
mgr.scrollEdge()
def scrollBottom(self):
mgr.scrollBottom()
def scrollCorner(self):
mgr.scrollCorner()
def switchLines(self):
mgr.switchMotionLines()
def build(self):
self.frame = tk.LabelFrame(self, text=conf.get('out', 'motion-header'))
self.linesCheck = tk.Checkbutton(self.frame, text=conf.get('out', 'motion-lines-checker'), command=self.switchLines)
self.posPanel = tk.Frame(self.frame)
self.xLabel = tk.Label(self.posPanel, text='')
self.yLabel = tk.Label(self.posPanel, text='')
self.footerPanel = tk.Frame(self.frame)
# defined scroll buttons
self.centerButton = tk.Button(self.footerPanel, image=win.centerImg, command=self.scrollCenter)
self.edgeButton = tk.Button(self.footerPanel, image=win.edgeImg, command=self.scrollEdge)
self.bottomButton = tk.Button(self.footerPanel, image=win.bottomImg, command=self.scrollBottom)
self.cornerButton = tk.Button(self.footerPanel, image=win.cornerImg, command=self.scrollCorner)
def display(self):
self.pack(side='left', fill='both', expand=1, padx=4)
self.frame.pack(side='top', pady=2)
self.linesCheck.pack(side='top')
self.posPanel.pack(side='top', fill='both', expand=1)
self.xLabel.pack(side='top', pady=2)
self.yLabel.pack(side='top', pady=2)
self.footerPanel.pack(side='bottom', fill='x', expand=1, padx=5)
self.centerButton.pack(side='right')
self.bottomButton.pack(side='right')
self.edgeButton.pack(side='right')
self.cornerButton.pack(side='right')
class FunctionPanel(Panel):
def build(self):
self.frame = tk.LabelFrame(self, text=conf.get('out', 'funcpanel-header'))
self.mainPanel = tk.Frame(self.frame)
self.exprPanel = FunctionExprPanel(self.mainPanel)
self.managePanel = FunctionManagePanel(self.mainPanel)
def display(self):
self.pack(side='top', fill='both', expand=1, padx=2, pady=5)
self.frame.pack(side='top', fill='both', expand=1)
self.mainPanel.pack(side='top', fill='both', expand=1, padx=2, pady=2)
self.exprPanel.display()
self.managePanel.display()
def registerFunction(self, fid):
self.managePanel.registerFunction(fid)
def unregisterFunction(self, fid):
self.managePanel.unregisterFunction(fid)
def showEditPanels(self, fid=None):
self.exprPanel.showEditButton()
self.managePanel.showMain(fid)
def hideEditPanels(self):
self.exprPanel.hideEditButton()
self.managePanel.hideMain()
class FunctionExprPanel(Panel):
def __init__(self, *args, **kwargs):
Panel.__init__(self, *args, **kwargs)
self.state = None
self.headers = {'add' : conf.get('out', 'funcpanel-add-header'), 'edit' : conf.get('out', 'funcpanel-edit-header')}
self.confirms = {'add' : conf.get('out', 'funcpanel-add-confirm'), 'edit' : conf.get('out', 'funcpanel-edit-confirm')}
def build(self):
self.initPanel = tk.Frame(self)
self.addButton = tk.Button(self.initPanel, text=conf.get('out', 'funcpanel-add-button'), command=self.addMain, width=12)
self.editButton = tk.Button(self.initPanel, text=conf.get('out', 'funcpanel-edit-button'), command=self.editMain, width=12)
self.mainPanel = tk.Frame(self)
self.row1Panel = tk.Frame(self.mainPanel)
self.row2Panel = tk.Frame(self.mainPanel)
self.header = tk.Label(self.row1Panel, text='')
self.label = tk.Label(self.row2Panel, text='y = ')
self.entry = tk.Entry(self.row2Panel, width=35, textvariable=self.entryVar)
self.cancel = tk.Button(self.row1Panel, text=conf.get('out', 'cancel'), command=self.hideMain)
self.confirm = tk.Button(self.row1Panel, text='', command=self.submit)
def bindEvents(self):
self.entry.bind('<KeyRelease>', self.checkExpr)
self.entry.bind('<KeyRelease-Return>', self.submit)
def setVars(self):
self.entryVar = tk.StringVar()
def display(self):
self.pack(side='top', fill='x')
self.showInitPanel()
def addMain(self, event=None):
self.state = 'add'
self.showMain()
def editMain(self, event=None):
self.state = 'edit'
self.entryVar.set(panel.getExprEdit())
self.showMain()
def showMain(self):
self.hideInitPanel()
self.mainPanel.pack(side='top', pady=2)
self.row1Panel.pack(side='top', fill='x', expand=1)
self.header.config(text=self.headers[self.state])
self.header.pack(side='left')
self.confirm.config(text=self.confirms[self.state])
self.confirm.pack(side='right')
self.cancel.pack(side='right')
self.row2Panel.pack(side='top', fill='x', expand=1)
self.label.pack(side='left')
self.entry.pack(side='right', fill='x', expand=1)
self.entry.focus_set()
def showInitPanel(self):
self.initPanel.pack(side='top', fill='x')
self.addButton.pack(side='left')
def hideInitPanel(self):
self.initPanel.pack_forget()
def showEditButton(self):
self.editButton.pack(side='right')
def hideEditButton(self):
self.editButton.pack_forget()
def hideMain(self, event=None):
self.entryVar.set('')
self.mainPanel.pack_forget()
self.showInitPanel()
def checkExpr(self, event=None):
pass
| |
course.",
"Peter, what did she say to you? I couldn't make her out.",
"Do you know Keats? The poet, I mean, Keats, the poet.",
"I can't speak.",
"I'm too weak.",
"No, you're not.",
"You are not here.",
"And yet Who are you? You are not Sir Malcolm.",
"You have to name a thing to make it live, don't you? And shall I name you? Only if you want me to live.",
"Serpent.",
"Is that what you mean? Enchanter, Deceiver, Prince of Darkness, Devil of the Pit? Have you such contempt for your old friend? So, the Keats Four lines from Ode to a Nightingale have always struck a chord with me.",
"Keats was dying when he wrote them, and he knew he was dying, which adds to the piquancy, don't you think? Would you like to hear them? Do I have a choice? Vanessa, please.",
"You've always had a choice.",
"You allowed all this to happen.",
"Hell, you sought it out and fucked it.",
"You could have shut the door at any time.",
"You still can.",
"Right now.",
"Will you? Yes.",
"And give up everything we could have together? The true knowledge of man's virtue, as well as his sin.",
"The power.",
"The sight beyond this world.",
"I want nothing beyond this world.",
"Don't lie to me.",
"You've always been drawn to the deep ocean, to the dark whisper, a mirror behind the glass eyes.",
"To life at its fullest.",
"Will you close that door now? So, the Keats.",
"The lines from Keats.",
"Yes.",
"Darkling, I listen and, for many a time I have been half in love with easeful Death, Call'd him soft names in many a mused rhyme To take into the air my quiet breath.",
" So the darkness spoke.",
"Yes.",
"But you listened.",
"May the soul of <NAME> and the souls of all the faithful departed through the mercy of God rest in peace.",
"- Amen.",
"- Amen.",
"Sister.",
"I do not blame you.",
"Whatever sin, it has been forgiven in your suffering.",
"So much suffering.",
"It is more than I deserve, but I can't forgive myself.",
"There's so much to say.",
"Or so little.",
"I'm married now.",
"Can you imagine that? He's a lawyer, in the employ of Mr.",
"Hawkins of Essex.",
"He doesn't have a mustache like my gallant Captain Branson, but Mr.",
"Harker's a good man and he loves me.",
"I'm happy for you.",
"And our poor Peter.",
"I'm so sorry.",
"If only you had run after him that day in the maze, and held him tight, and told him not to go and that you loved him for his weakness.",
"How do you know about that? I know about many things now, Vanessa.",
"The master has taught me much.",
"Things no one should ever know.",
"Help me.",
"Save me from him.",
"Please.",
"Mina needs our help.",
"Will you let me in? And there's nothing more? No.",
"You believe you can reach out to her again? Or she to me.",
"I know you do not credit it.",
"You know nothing of me.",
"Do you know how many men I've killed? In Africa we walked in blood every step.",
"There was a time I would gladly have killed you.",
"There may come a time when I gladly shall.",
"But for now, I can make use of you.",
"- And no more? - What else? Forgiveness? Go to your Roman church for that, you'll find none here.",
"Have you imagined for one moment what this has been for me? An unforgivable transgression that has marked me for life.",
"You think you've suffered.",
"You think you know blood.",
"You think you've walked on corpses.",
"Spread them from here to the horizon and I have walked further! You weak, foul, lustful, vainglorious man.",
"How dare you presume to speak to me of death? Then we shall speak of it together.",
"Yes.",
"We will follow it to Mina.",
"Yes.",
"And then? You will be done.",
"I will walk on.",
"I'll have Sembene prepare a room.",
"This is your home now.",
"There is no exculpation I expect of you.",
"My guilt is my own and that I carry with me.",
"It is my present and my future.",
"It has damned me well beyond this short life of mine.",
"But I will not rest until you are safe.",
"There is no other purpose in my life.",
"I shall love you always.",
"Your dearest friend, Vanessa.",
"Postscript.",
"Your father loves you very much and would do anything to save you.",
"But I love you in a different way.",
"I love you enough to kill you.",
"There are tremors around us.",
"Hidden music.",
"Some might be more attuned to them than others.",
"What do those people do, those who have been chosen? Amunet and Amun-Ra.",
"If they ever came together, Amunet would become the mother of evil.",
"You are not <NAME>.",
"Serpent.",
"Deceiver, Prince of Darkness, Devil of the Pit? You've always been drawn to the dark whisper.",
"There are things within us all that can never be unleashed.",
"Where've you been all night? I went out with Mr.",
"Gray.",
"He's a devil, that one.",
"The medal around my neck.",
"Would you wear it for me? 'No more let Life divide what Death can join together.'",
" You're pretty goddamn sure you know what's going on all the time.",
"There are things you can't control.",
"At the end of the day, the only thing we have is the people we trust.",
"And you trust her? You better start doing the same, or get ready to lose a lot of battles.",
"Vanessa, I have a lot to tell you.",
"I haven't been honest with you about To be beautiful is to be almost dead, isn't it? Mmm The lassitude of the perfect woman, the languid ease, the obeisance, spirit-drained, anemic, pale as ivory and weak as a kitten.",
"There's a brisk trade for photographs of dead women, did you know that? In certain quarters.",
"The corpses are improved with cosmetics, then posed in postures of abject surrender and photographed.",
"The men circulate the pictures and pleasure themselves.",
"Mmm, such exquisiteness.",
"Vanessa? Do you hear me? Of course.",
"Last evening, you went into a a spell, or a fit of a kind, unlike previously.",
"Did I? How arresting.",
"It was unnatural.",
"Do you not remember? You've been asleep since.",
"Who dressed me? Oh.",
"Sembene and myself.",
"Did you make the tea then? What? No.",
"Sembene did.",
"And you dressed me? Yes.",
"Like when I was a girl? Oh, no.",
"You didn't dress me as a girl.",
"How silly of me.",
"You weren't there to dress me, were you? You were away on some trek or other.",
"Who could keep them straight? We tried to follow your progress on that map in the solarium with little red pins but you were in terra incognita.",
"That's where you said you were going.",
"What adventure in those words.",
"Do you feel you are there again Father? Mina? Somewhat.",
"Fat Mother wept, of course.",
"Is there anything more comical than a fat woman weeping? It never quite comes off.",
"You loathed her fatness.",
"Unlike those other women.",
"But fat Mother wept because she missed you, and feared for you.",
"Wept and wept and then she bit into her pillow so we wouldn't hear her, and then she turned to laudanum to sleep at all.",
"Poor fat thing.",
"Tell me about the other women.",
"Not Mrs.",
"Ives, I know all about her.",
"You might have attended the funeral at least.",
"For decency's sake! You stop this right now.",
"Ooh, it's that face, is it? The hard face for the niggers.",
"Scare them into obeying.",
"The porters and slaves.",
"But we were speaking of the women.",
"They were as follows The whores in Zanzibar when you landed, mostly North-African so almost white.",
"Then the native women along the way.",
"They enjoyed you pawing at them.",
"Or you convinced yourself they did.",
"You made Peter fuck them to prove he was a man.",
"He didn't enjoy it, but he would do anything for you.",
"Except make a proper off spin bowler like you wanted.",
"So on you went, tribe to tribe, father and son, fucking the Maasai, the Mamohela, the Bangweulu the Bantu, the Burundi! Honestly, I'm sorry they've troubled you.",
"I'm quite well again.",
"And where did you receive your medical degree, Dr.",
"Ives? Would you excuse us? Go on, <NAME>.",
"He fears for my modesty I think.",
"May I make a discreet examination? Ah, discretion always.",
"What happened to your lip? Sembene thought it necessary to 'sedate' me.",
"You've a steady pulse.",
"Yes, I believe I am still alive.",
"Would you come into the light for | |
item_extent=None, search_existing=True, owner=None):
super().__init__(target, clone_mapping, info, data, sharing, thumbnail, portal_item, folder, item_extent, search_existing, owner)
self._related_items = related_items
@property
def related_items(self):
"""Gets the related items for the survey"""
return self._related_items
def clone(self):
"""Clone the form in the target organization.
"""
try:
new_item = None
original_item = self.info
if self._search_existing:
new_item = _search_org_for_existing_item(self.target, self.portal_item)
if not new_item:
# Get the item properties from the original item to be applied when the new item is created
item_properties = self._get_item_properties(self.item_extent)
# Add the new item
new_item = self._add_new_item(item_properties)
# Update Survey123 form data
original_item = self.info
self.update_form(self.target, new_item, self._clone_mapping)
_share_item_with_groups(new_item, self.sharing, self._clone_mapping["Group IDs"])
self.resolved = True
self._clone_mapping['Item IDs'][original_item['id']] = new_item['id']
return new_item
except Exception as ex:
raise _ItemCreateException("Failed to create {0} {1}: {2}".format(original_item['type'], original_item['title'], str(ex)), new_item)
def update_form(self, target, new_item, clone_mapping):
"""Update the form with form zip data in the target organization.
Keyword arguments:
target - The instance of arcgis.gis.GIS (the portal) to update the item
new_item - The form item to update
clone_mapping - Dictionary containing mapping between new and old items.
"""
original_item = self.info
temp_dir = os.path.join(self._temp_dir.name, original_item['id'])
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
form_zip = self.portal_item.download(temp_dir)
zip_file = zipfile.ZipFile(form_zip)
org_url = _get_org_url(self.target)
try:
# Extract the zip archive to a sub folder
new_dir = os.path.join(temp_dir, 'extract')
zip_dir = os.path.join(new_dir, 'esriinfo')
zip_file.extractall(new_dir)
zip_file.close()
feature_service_url = None
form_json = None
# Loop through the files and update references to the feature service and item id
for path in os.listdir(zip_dir):
if os.path.splitext(path)[1].lower() == '.info':
with open(os.path.join(zip_dir, path), 'r', encoding="utf8") as file:
data = json.loads(file.read())
original_url = _deep_get(data, 'serviceInfo', 'url')
if original_url is not None:
for key, value in clone_mapping['Services'].items():
if _compare_url(original_url, key):
data['serviceInfo']['itemId'] = value['id']
data['serviceInfo']['url'] = value['url']
feature_service_url = value['url']
break
with open(os.path.join(zip_dir, path), 'w', encoding="utf8") as file:
file.write(json.dumps(data))
elif os.path.splitext(path)[1].lower() == '.xml' or path.lower() == 'webform.json':
with open(os.path.join(zip_dir, path), 'r', encoding="utf8") as file:
data = file.read()
data = data.replace(original_item['id'], new_item['id'])
for key, value in clone_mapping['Services'].items():
data = re.sub(key, value['url'], data, 0, re.IGNORECASE)
for key, value in clone_mapping['Item IDs'].items():
url = '{0}sharing/rest/content/items/{1}'.format(org_url, value)
data = re.sub('(?<=")([^<]+?{0})(?=")'.format(key), url, data, 0, re.IGNORECASE)
with open(os.path.join(zip_dir, path), 'w', encoding="utf8") as file:
file.write(data)
elif os.path.splitext(path)[1].lower() == '.iteminfo':
with open(os.path.join(zip_dir, path), 'w') as file:
file.write(json.dumps(dict(new_item)))
elif path.lower() == 'form.json':
with open(os.path.join(zip_dir, path), 'r') as file:
form_json = file.read()
elif os.path.splitext(path)[1].lower() == '.xlsx':
xlsx = zipfile.ZipFile(os.path.join(zip_dir, path))
xlsx_dir = os.path.join(zip_dir, 'xlsx')
try:
xlsx.extractall(xlsx_dir)
xlsx.close()
with open(os.path.join(xlsx_dir, 'xl/sharedStrings.xml'), 'r', encoding="utf8") as file:
data = file.read()
for key, value in clone_mapping['Services'].items():
data = re.sub(key, value['url'], data, 0, re.IGNORECASE)
for key, value in clone_mapping['Item IDs'].items():
url = '{0}sharing/rest/content/items/{1}'.format(org_url, value)
data = re.sub('(?<=>)([^<]+?{0})(?=<)'.format(key), url, data, 0, re.IGNORECASE)
with open(os.path.join(xlsx_dir, 'xl/sharedStrings.xml'), 'w', encoding="utf8") as file:
file.write(data)
xlsx = zipfile.ZipFile(os.path.join(zip_dir, path), 'w', zipfile.ZIP_DEFLATED)
_zip_dir(xlsx_dir, xlsx, False)
except Exception:
continue
finally:
xlsx.close()
if os.path.exists(xlsx_dir):
shutil.rmtree(xlsx_dir)
# Add a relationship between the new survey and the service
for related_item in self.related_items:
for key, value in clone_mapping['Services'].items():
if _compare_url(related_item['url'], key):
feature_service = target.content.get(value['id'])
new_item.add_relationship(feature_service, 'Survey2Service')
break
# If the survey was authored on the web add the web_json to the metadata table in the service
if form_json is not None and feature_service_url is not None:
svc = FeatureLayerCollection(feature_service_url, target)
table = next((t for t in svc.tables if t.properties.name == 'metadata'), None)
if table is not None:
deletes = table.query(where="name = 'form'")
table.edit_features(adds=[{'attributes' : {'name' : 'form', 'value' : form_json}}], deletes=deletes)
# Zip the directory
zip_file = zipfile.ZipFile(form_zip, 'w', zipfile.ZIP_DEFLATED)
_zip_dir(zip_dir, zip_file)
zip_file.close()
# Upload the zip to the item
new_item.update(data=form_zip)
except Exception as ex:
raise Exception("Failed to update {0} {1}: {2}".format(new_item['type'], new_item['title'], str(ex)))
finally:
zip_file.close()
class _WorkforceProjectDefinition(_TextItemDefinition):
"""
Represents the definition of an workforce project within ArcGIS Online or Portal.
"""
def clone(self):
"""Clone the form in the target organization.
"""
try:
new_item = None
original_item = self.info
if self._search_existing:
new_item = _search_org_for_existing_item(self.target, self.portal_item)
if not new_item:
# Get the item properties from the original application which will be applied when the new item is created
item_properties = self._get_item_properties(self.item_extent)
workforce_json = self.data
user = self.target.users.get(self.owner)
# Update the webmap references
webmaps = ['workerWebMapId', 'dispatcherWebMapId']
for webmap in webmaps:
original_id = _deep_get(workforce_json, webmap)
if original_id is not None and original_id in self._clone_mapping['Item IDs']:
workforce_json[webmap] = self._clone_mapping['Item IDs'][original_id]
# Update the service references
services = ['dispatchers', 'assignments', 'workers', 'tracks']
for service in services:
service_definiton = _deep_get(workforce_json, service)
if service_definiton is not None:
layer_url = _deep_get(service_definiton, 'url')
feature_service_url = os.path.dirname(layer_url)
for key, value in self._clone_mapping['Services'].items():
if _compare_url(feature_service_url, key):
layer_id = int(os.path.basename(layer_url))
new_id = value['layer_id_mapping'][layer_id]
service_definiton['url'] = "{0}/{1}".format(value['url'], new_id)
service_definiton['serviceItemId'] = value['id']
if service == 'dispatchers':
feature_layer = FeatureLayer(service_definiton['url'], self.target)
features = feature_layer.query("userId = '{0}'".format(user.username)).features
if len(features) == 0:
features = [{"attributes" : {"name" : user.fullName, "userId" : user.username}}]
feature_layer.edit_features(adds=features)
break
# Update the group reference
group_id = _deep_get(workforce_json, 'groupId')
workforce_json['groupId'] = self._clone_mapping['Group IDs'][group_id]
# Update the folder reference
if 'folderId' in workforce_json:
if self.folder is not None:
folders = user.folders
target_folder = next((f for f in folders if f['title'].lower() == self.folder.lower()), None)
if target_folder:
workforce_json['folderId'] = _deep_get(target_folder, 'id')
else:
workforce_json['folderId'] = None
# Update the application integration references
integrations = _deep_get(workforce_json, 'assignmentIntegrations')
if integrations is not None:
for integration in integrations:
url_template = _deep_get(integration, 'urlTemplate')
if url_template is not None:
for item_id in self._clone_mapping['Item IDs']:
integration['urlTemplate'] = re.sub(item_id, self._clone_mapping['Item IDs'][item_id], integration['urlTemplate'], 0, re.IGNORECASE)
for original_url in self._clone_mapping['Services']:
service = self._clone_mapping['Services'][original_url]
for key, value in service['layer_id_mapping'].items():
integration['urlTemplate'] = re.sub("{0}/{1}".format(original_url, key),
"{0}/{1}".format(service['url'], value),
integration['urlTemplate'], 0, re.IGNORECASE)
assignment_types = _deep_get(integration, 'assignmentTypes')
if assignment_types is not None:
for key, value in assignment_types.items():
url_template = _deep_get(value, 'urlTemplate')
if url_template is not None:
for item_id in self._clone_mapping['Item IDs']:
value['urlTemplate'] = re.sub(item_id, self._clone_mapping['Item IDs'][item_id], value['urlTemplate'], 0, re.IGNORECASE)
for original_url in self._clone_mapping['Services']:
service = self._clone_mapping['Services'][original_url]
for old_id, new_id in service['layer_id_mapping'].items():
value['urlTemplate'] = re.sub("{0}/{1}".format(original_url, old_id),
"{0}/{1}".format(service['url'], new_id),
value['urlTemplate'], 0, re.IGNORECASE)
item_properties['text'] = json.dumps(workforce_json)
# Add the new item
new_item = self._add_new_item(item_properties)
_share_item_with_groups(new_item, self.sharing, self._clone_mapping["Group IDs"])
self.resolved=True
self._clone_mapping['Item IDs'][original_item['id']] = new_item['id']
return new_item
except Exception as ex:
raise _ItemCreateException("Failed to create {0} {1}: {2}".format(original_item['type'], original_item['title'], str(ex)), new_item)
class _ProMapDefinition(_ItemDefinition):
"""
Represents the definition of an pro map within ArcGIS Online or Portal.
"""
def clone(self):
"""Clone the pro map in the target organization.
"""
try:
new_item = None
original_item = self.info
if self._search_existing:
new_item = _search_org_for_existing_item(self.target, self.portal_item)
if not new_item:
mapx = self.data
map_json = None
with open(mapx, 'r', encoding="utf8") as file:
map_json = json.loads(file.read())
data_connections = []
layer_definitions = _deep_get(map_json, 'layerDefinitions')
if layer_definitions is not None:
for layer_definition in layer_definitions:
data_connection = _deep_get(layer_definition, 'featureTable', 'dataConnection')
if data_connection is not None:
data_connections.append(data_connection)
table_definitions = _deep_get(map_json, 'tableDefinitions')
if table_definitions is not None:
for table_definition in table_definitions:
data_connection = _deep_get(table_definition, 'dataConnection')
if data_connection is not None:
data_connections.append(data_connection)
for data_connection in data_connections:
if 'workspaceFactory' in data_connection and data_connection['workspaceFactory'] == 'FeatureService':
if 'workspaceConnectionString' in data_connection and data_connection['workspaceConnectionString'] is not None:
feature_service_url = data_connection['workspaceConnectionString'][4:]
for original_url in self._clone_mapping['Services']:
if _compare_url(feature_service_url, original_url):
new_service = self._clone_mapping['Services'][original_url]
layer_id = int(data_connection['dataset'])
new_id = new_service['layer_id_mapping'][layer_id]
data_connection['workspaceConnectionString'] = "URL={0}".format(new_service['url'])
data_connection['dataset'] = new_id
new_mapx_dir = os.path.join(os.path.dirname(mapx), 'new_mapx')
os.makedirs(new_mapx_dir)
new_mapx = os.path.join(new_mapx_dir, os.path.basename(mapx))
with open(new_mapx, 'w', encoding="utf8") as file:
file.write(json.dumps(map_json))
self._data = new_mapx
new_item = super().clone()
_share_item_with_groups(new_item, self.sharing, self._clone_mapping["Group IDs"])
self.resolved = True
self._clone_mapping['Item IDs'][original_item['id']] = new_item['id']
return new_item
except Exception as ex:
if isinstance(ex, _ItemCreateException):
raise
raise _ItemCreateException("Failed to create {0} {1}: {2}".format(original_item['type'], original_item['title'], str(ex)), new_item)
finally:
self._data = mapx
new_mapx_dir = os.path.join(os.path.dirname(mapx), 'new_mapx')
if os.path.exists(new_mapx_dir):
shutil.rmtree(new_mapx_dir)
class _ProProjectPackageDefinition(_ItemDefinition):
"""
Represents the definition of an pro map within ArcGIS Online or Portal.
"""
def clone(self):
"""Clone the pro map in the target organization.
"""
try:
new_item = None
original_item = self.info
aprx = None
map = None
maps = None
layers = None
lyr = None
ppkx = self.data
if self._search_existing:
new_item = _search_org_for_existing_item(self.target, self.portal_item)
if not new_item:
try:
import arcpy
extract_dir = os.path.join(os.path.dirname(ppkx), 'extract')
if not os.path.exists(extract_dir):
os.makedirs(extract_dir)
arcpy.ExtractPackage_management(ppkx, extract_dir)
# 1.x versions of Pro use a different folder name
project_folder = 'p20'
version = arcpy.GetInstallInfo()['Version']
if version.startswith('1'):
project_folder = 'p12'
project_dir = os.path.join(extract_dir, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.