text stringlengths 957 885k |
|---|
#!/usr/bin/env python3
"""Very simple file based database resembling CSV but using a key."""
import pathlib
from typing import Tuple
from boxhead import config as boxhead_config
from boxhead.boxheadlogging import boxheadlogging
logger: boxheadlogging.BoxHeadLogger = boxheadlogging.get_logger(__name__)
class KeyMapItem():
"""An item representing a line in a keymap.
Attributes:
values: Values represented by this item.
parameters: Parameters represented by this item.
"""
__slots__ = ['values', 'params']
def __init__(self) -> None:
"""Initialise attributes."""
self.values: list[str] = []
self.params: dict[str, str] = {}
class KeyMap():
"""Representation of data where a database would be too much.
Information is stored in a plain utf-8 textfile like this:
KEY|DATUM1|KEYWORD2=DATUM2|KEYWORD3=DATUM3|...
The default delimiter is "|" but can be changed in the config.
Information is looked up by a key (the first item in a line).
Lines may be commented out by prefixing them with "#"
Attributes:
_delimiter: The delimiter between datapoints.
"""
def __init__(self, config: boxhead_config.Config) -> None:
"""Retrieve config values."""
self._delimiter: str = config.get_str('core',
'mapping',
'delimiter',
default='|')
def reset(self) -> None:
"""Reset variables."""
self._map: dict[str, KeyMapItem] = {}
def get_delimiter(self) -> str:
return self._delimiter
def get_map(self) -> dict[str, KeyMapItem]:
return self._map
def load(self) -> None:
"""Load all maps into `_map`.
As there is no default map this function does nothing and may
be re-implemented by its children.
"""
pass
def _load(self, path: pathlib.Path) -> None:
"""Load map from file.
Args:
path: Path to load the map from.
"""
path = pathlib.Path(path)
mapping: dict[str, KeyMapItem] = {}
try:
with path.open('r', encoding='utf-8') as raw_map:
lines: list[str] = raw_map.readlines()
for line in lines:
if line == '' or line[0] == '#':
# a comment
continue
line = line.strip()
try:
key, data = self._process_line(line)
mapping[key] = data
except ValueError:
logger.error('malformed line: "%s"', line)
continue
except FileNotFoundError:
logger.debug('could not open file at "%s"', str(path))
else:
logger.debug('loaded map: "%s"', str(path))
self._map.update(mapping)
def _process_line(self, raw_line: str) -> Tuple[str, KeyMapItem]:
"""Process a mapping line.
Mapping lines are formatted like this:
KEY|DATUM1|KEYWORD2=DATUM2|KEYWORD3=DATUM3|...
Args:
raw_line: The raw line to parse.
Returns:
A tuple containing the key and the KeyMapItem.
"""
if raw_line == '':
raise ValueError('empty mapping given')
key: str = ''
rest: list[str] = []
key, *rest = raw_line.split(self.get_delimiter())
item: KeyMapItem = KeyMapItem()
for raw in rest:
raw = raw.strip()
pieces: list[str] = raw.split('=', 1)
if len(pieces) == 1:
item.values.append(pieces[0])
else:
item.params[pieces[0]] = pieces[1]
return (key, item)
def get(self, key: str) -> KeyMapItem:
"""Return the entry mapped to the key or an empty item.
Args:
key: The key.
Returns:
The KeyMapItem bound to the key or an empty KeyMapItem.
"""
try:
return self.get_map()[key]
except KeyError:
logger.error('no entry for key: "%s"', key)
return KeyMapItem()
def _to_map_line(self, key: str, *values: str, **params: str) -> str:
"""Convert values and params to a map line.
Args:
key: The key:
values: Values.
params: A dict containing parameters
"""
flattend_params = [f'{key}={value}' for (key, value) in params.items()]
return self.get_delimiter().join([key, *values, *flattend_params])
def update(self, path: pathlib.Path, mapkey: str, *values: str,
**params: str) -> None:
"""Update, add or delete an entry.
Args:
path: The path of the file.
mapkey: The key.
*values: Values to store. Leave empty to remove entry.
**params: Parameters to store. Leave empty to remove entry.
"""
path = pathlib.Path(path)
logger.debug(
'updating keymap at "%s" for key "%s" with: %s,%s', path, mapkey,
','.join(values),
','.join([f'{kw}={v}'.format(kw, v) for kw, v in params.items()]))
try:
with path.open('r', encoding='utf-8') as keymap:
lines: list[str] = keymap.readlines()
except FileNotFoundError:
logger.debug('could not open file at %s', str(path))
lines = []
key_length: int = len(mapkey)
done: bool = False
for i, line in enumerate(lines):
if line[:key_length + 1] == mapkey + self.get_delimiter():
# the key is already mapped
# make sure we don't accidentally catch a longer key by adding
# the required delimiter to the end of the key
if len(values) > 0 or len(params) > 0:
# update the mapping
lines[i] = self._to_map_line(mapkey, *values, **
params) + '\n'
logger.debug('updated entry for key "%s"', mapkey)
else:
# remove the mapping
lines.pop(i)
logger.debug('removed entry for key "%s"', mapkey)
done = True
break
if not done and (len(values) > 0 or len(params) > 0):
# there was no mapping so append it
lines.append(self._to_map_line(mapkey, *values, **params) + '\n')
logger.info('added entry for key "%s"', mapkey)
if not path.parent.exists():
path.parent.mkdir(parents=True, exist_ok=True)
with path.open('w', encoding='utf-8') as keymap:
keymap.write(''.join(lines))
logger.debug('updated map: "%s"', path)
self.reset()
def remove(self, path: pathlib.Path, key: str) -> None:
"""Remove entry with key.
Args:
path: The path of the file.
key: The key of the data to remove.
"""
self.update(path, key)
|
from algotrader.model.market_data_pb2 import *
from algotrader.model.model_factory import ModelFactory
from algotrader.model.ref_data_pb2 import *
from algotrader.model.trade_data_pb2 import *
from algotrader.model.time_series_pb2 import *
from collections import OrderedDict
class SampleFactory(object):
def __init__(self):
pass
def sample_instrument(self):
inst = ModelFactory.build_instrument(symbol='2800.HK', type=Instrument.STK, primary_exch_id='SEHK',
ccy_id='HKD',
name='2800', exch_ids=['NYSE', 'TSE'], sector="Finance",
industry="Banking",
margin=100, tick_size=0.05,
alt_symbols={'IB': '2800', 'RIC': '2800.HK'},
alt_ids={'VALOREN': '123123', 'ISIN': '123123'},
underlying_type=Underlying.FixedWeightBasket,
underlying_ids=['0005.HK@SEHK', '0001.HK@SEHK'],
underlying_weights=[0.1, 0.9],
option_type=Instrument.Call,
option_style=Instrument.European,
strike=100.5,
exp_date=20160101)
return inst
def sample_exchange(self):
exchange = ModelFactory.build_exchange('SEHK', 'The Stock Exchange of Hong Kong Limited', 'HK', 'HKEX',
'HK_Holiday')
return exchange
def sample_currency(self):
currency = ModelFactory.build_currency('HKD', 'Hong Kong Doller')
return currency
def sample_country(self):
country = ModelFactory.build_country('US', 'United States of America', 'US_holiday')
return country
def sample_trading_holidays(self):
trading_holiday = ModelFactory.build_holiday_series("HK holiday")
ModelFactory.add_holiday(trading_holiday, 20161102,
HolidaySeries.Holiday.FullDay,
20161102, 20161103)
ModelFactory.add_holiday(trading_holiday, 20161223,
HolidaySeries.Holiday.FullDay,
20161223, 20161226)
return trading_holiday
def sample_trading_hours(self):
trading_hours = ModelFactory.build_trading_hours(trading_hours_id='SEHK_trdinghr', timezone_id='HK timezone'
)
ModelFactory.add_trading_session(trading_hours,
TradingHours.Session.Sunday,
9000,
TradingHours.Session.Monday,
1600, True)
return trading_hours
def sample_timezone(self):
timezone = ModelFactory.build_timezone("Venezuela Standard Time")
return timezone
def sample_time_series(self):
ds = ModelFactory.build_time_series(series_id="HSI.BAR.86400",
desc="HSI",
keys=["high", "low", "close"],
inputs='HSI.BAR.1',
input_keys=['close', 'open'],
default_output_key="close",
missing_value_replace=0)
ModelFactory.add_time_series_item(ds, timestamp=0,
data={"high": 350.00, "low": 200.45,
"close": 250.1})
ModelFactory.add_time_series_item(ds, timestamp=1,
data={"high": 1350.00, "low": 1200.45,
"close": 1250.1})
return ds
def sample_bar(self):
bar = ModelFactory.build_bar("HSI@SEHK", Bar.Time, 86400, provider_id='IB', timestamp=12312,
utc_time=12312, begin_time=12300, open=123, high=300, low=30, close=156, vol=500,
adj_close=324, open_interest=123)
return bar
def sample_quote(self):
quote = ModelFactory.build_quote("HSI@SEHK", provider_id="IB", timestamp=12312,
utc_time=12312, bid=50, bid_size=34, ask=102.2, ask_size=1)
return quote
def sample_trade(self):
trade = ModelFactory.build_trade("HSI<EMAIL>", provider_id="IB", timestamp=12312,
utc_time=12312, price=123, size=123)
return trade
def sample_market_depth(self):
md = ModelFactory.build_market_depth("HSI@SEHK", provider_id="IB", timestamp=12312, md_provider="H", position=0,
operation=MarketDepth.Insert, side=MarketDepth.Bid,
price=123, size=123, utc_time=12313)
return md
def sample_new_order_request(self):
req = ModelFactory.build_new_order_request(0, "BuyLowSellHigh", "1", portf_id="TestPortf",
broker_id="Simulator", inst_id="HSI@SEHK",
action=Buy, type=Limit, qty=4954.1,
limit_price=123.2, stop_price=123.2, tif=DAY, oca_tag="23",
params={"testparam1": "1", "testparam2": "2"})
return req
def sample_order_replace_request(self):
req = ModelFactory.build_order_replace_request(0, "BuyLowSellHigh", "1", "2", type=Limit, qty=4954.1,
limit_price=123.2, stop_price=123.2, tif=DAY, oca_tag="23",
params={"testparam1": "1", "testparam2": "2"})
return req
def sample_order_cancel_request(self):
req = ModelFactory.build_order_cancel_request(0, "BuyLowSellHigh", "1", "2",
params={"testparam1": "1", "testparam2": "2"})
return req
def sample_order_status_update(self):
event = ModelFactory.build_order_status_update(0, "IB", "event_123", broker_ord_id="broker_1234",
cl_id="BuyLowSellHigh", cl_ord_id="clOrdId_1",
inst_id="HSI@SEHK", filled_qty=1231.0, avg_price=123.1,
status=New)
return event
def sample_execution_report(self):
event = ModelFactory.build_execution_report(1, "IB", "event_123", broker_ord_id="broker_1234",
cl_id="BuyLowSellHigh", cl_ord_id="clOrdId_1", inst_id="HSI@SEHK",
last_qty=100.1, last_price=21.1,
commission=0.8, filled_qty=1231.0, avg_price=123.1,
status=New)
return event
def sample_account_update(self):
event = ModelFactory.build_account_update(0, "IB", broker_event_id="e_123", account_name="account1")
ModelFactory.update_account_value(event.values["equity"], key="equity", ccy_values={"HKD": 1231, "USD": 28.8})
ModelFactory.update_account_value(event.values["pnl"], key="pnl", ccy_values={"HKD": 1231, "USD": 28.8})
return event
def sample_portfolio_update(self):
event = ModelFactory.build_portfolio_update(0, "IB", broker_event_id="e_456", portf_id="BLSH",
inst_id="HSI@SEHK",
position=10, mkt_price=123.1, mkt_value=1231, avg_cost=12.8,
unrealized_pnl=1230, realized_pnl=0.8)
return event
def add_sample_position(self, attribute):
position = ModelFactory.add_position(attribute=attribute, inst_id="HSI@SEHK", ordered_qty=1000, filled_qty=20,
last_price=120.0)
self.add_sample_order_position(position=position)
return position
def add_sample_order_position(self, position):
ModelFactory.add_order_position(position, "BLSH", "O1", 100, 20)
ModelFactory.add_order_position(position, "BLSH", "O2", 100, 50)
def sample_account_state(self):
account = ModelFactory.build_account_state("test_acct")
ModelFactory.update_account_value(account.values["equity"], key="equity", ccy_values={"HKD": 1231, "USD": 28.8})
ModelFactory.update_account_value(account.values["pnl"], key="pnl", ccy_values={"HKD": 1231, "USD": 28.8})
self.add_sample_position(account)
return account
def sample_portfolio_state(self):
portfolio = ModelFactory.build_portfolio_state("test_portf", cash=1000)
self.add_sample_position(portfolio)
return portfolio
def sample_strategy_state(self):
strategy = ModelFactory.build_strategy_state("BLSH", "test")
self.add_sample_position(strategy)
return strategy
def sample_order_state(self):
order = ModelFactory.build_order_state("BuyLowSellHigh", "1", portf_id="TestPortf",
broker_id="Simulator", inst_id="HSI@SEHK", creation_timestamp=1,
action=Buy, type=Limit, qty=4954.1,
limit_price=123.2, stop_price=123.2, tif=DAY, oca_tag="23",
params={"testparam1": "1", "testparam2": "2"}, broker_ord_id="B01",
update_timestamp=12, status=New,
filled_qty=12312, avg_price=12, last_qty=12, last_price=2.8,
stop_limit_ready=True, trailing_stop_exec_price=12)
return order
def sample_sequence(self):
seq = ModelFactory.build_sequence("test_seq", 999)
return seq
|
<gh_stars>0
#!/usr/bin/env python
import pickle
import rospkg
rospack = rospkg.RosPack()
RACECAR_PKG_PATH = rospack.get_path('racecar')
PLANNER_PKG_PATH = rospack.get_path('planning_utils')
CURRENT_PKG_PATH = rospack.get_path('final')
BLUE_FILTER_TOPIC = '/cv_node/blue_data'
RED_FILTER_TOPIC = '/cv_node/red_data'
import collections
import math
import time
import rospy
import numpy as np
import matplotlib.pyplot as plt
from geometry_msgs.msg import PoseArray, PoseStamped, PoseWithCovarianceStamped, PointStamped
from ackermann_msgs.msg import AckermannDriveStamped
from std_msgs.msg import Float64
import utils
# The topic to publish control commands to
PUB_TOPIC = '/vesc/high_level/ackermann_cmd_mux/input/nav_0'
PUB_TOPIC_2 = '/plan_lookahead_follower/pose' # to publish plan lookahead follower to assist with troubleshooting
WINDOW_WIDTH = 5
INIT_POSE_TOPIC = "/initialpose"
'''
Follows a given plan using constant velocity and PID control of the steering angle
'''
class LineFollower:
"""
Initializes the line follower
plan: A list of length T that represents the path that the robot should follow
Each element of the list is a 3-element numpy array of the form [x,y,theta]
pose_topic: The topic that provides the current pose of the robot as a PoseStamped msg
plan_lookahead: If the robot is currently closest to the i-th pose in the plan,
then it should navigate towards the (i+plan_lookahead)-th pose in the plan
translation_weight: How much the error in translation should be weighted in relation
to the error in rotation
rotation_weight: How much the error in rotation should be weighted in relation
to the error in translation
kp: The proportional PID parameter
ki: The integral PID parameter
kd: The derivative PID parameter
error_buff_length: The length of the buffer that is storing past error values
speed: The speed at which the robot should travel
"""
def __init__(self, plan, pose_topic, plan_lookahead, translation_weight,
rotation_weight, kp, ki, kd, error_buff_length, speed):
# print "inside line_follower, constructor"
# Store the passed parameters
self.plan = plan
self.plan_lookahead = plan_lookahead
# Normalize translation and rotation weights
self.translation_weight = translation_weight / (translation_weight + rotation_weight)
self.rotation_weight = rotation_weight / (translation_weight + rotation_weight)
self.kp = kp
self.ki = ki
self.kd = kd
# The error buff stores the error_buff_length most recent errors and the
# times at which they were received. That is, each element is of the form
# [time_stamp (seconds), error]. For more info about the data struct itself, visit
# https://docs.python.org/2/library/collections.html#collections.deque
self.error_buff = collections.deque(maxlen=error_buff_length)
self.speed = speed
self.found_closest_point = False
self.total_error_list = []
self.angle_from_computer_vision = None
# # print "line_follower Initialized!"
# # print "plan[0]", self.plan[0]
# # print "plan[plan_lookahead]", self.plan[plan_lookahead]
# # print "error_buff length: ", len(self.error_buff)
# # print "error_buff: ", self.error_buff
self.min_delta = 99.99
self.max_delta = -99.99
# YOUR CODE HERE
self.cmd_pub = rospy.Publisher(PUB_TOPIC, AckermannDriveStamped,
queue_size=10) # Create a publisher to PUB_TOPIC
self.goal_pub = rospy.Publisher(PUB_TOPIC_2, PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.delete_pose_pub = rospy.Publisher("MaybeDelete", PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.robot = rospy.Publisher("Robot", PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.deleted = rospy.Publisher("Deleted", PoseStamped,
queue_size=10) # create a publisher for plan lookahead follower
self.float_pub = rospy.Publisher("angle_from_line_follower", Float64, queue_size=1)
self.selected_pub = rospy.Publisher("Selected", PoseStamped,
queue_size=1) # create a publisher to visualize some pose from selected rollout
self.line_follower_angle_pub = rospy.Publisher("LineFollowerAngle", PoseStamped,
queue_size=1) # create a publisher to visualize some pose from selected rollout
self.float_blue_sub = rospy.Subscriber(BLUE_FILTER_TOPIC, Float64, self.float_cb_blue)
self.float_red_sub = rospy.Subscriber(RED_FILTER_TOPIC, Float64, self.float_cb_red)
self.error = 0.0
try:
self.f = open('/home/nvidia/line_follower.log', 'w')
except IOError:
pass
# Create a publisher to publish the initial pose
init_pose_pub = rospy.Publisher(INIT_POSE_TOPIC, PoseWithCovarianceStamped,
queue_size=1) # to publish init position x=2500, y=640
PWCS = PoseWithCovarianceStamped() # create a PoseWithCovarianceStamped() msg
PWCS.header.stamp = rospy.Time.now() # set header timestamp value
PWCS.header.frame_id = "map" # set header frame id value
PWCS.pose.pose.position.x = plan[0][0]
PWCS.pose.pose.position.y = plan[0][1]
PWCS.pose.pose.position.z = 0
PWCS.pose.pose.orientation = utils.angle_to_quaternion(plan[0][2])
for i in range(0, 1):
rospy.sleep(0.5)
init_pose_pub.publish(
PWCS) # publish initial pose, now you can add a PoseWithCovariance with topic of "/initialpose" in rviz
# Create a subscriber to pose_topic, with callback 'self.pose_cb'
self.pose_sub = rospy.Subscriber(pose_topic, PoseStamped, self.pose_cb)
# print "inside line_follower, constructor end"
# This part is not used anymore. Old code.
self.new_init_pos = rospy.Subscriber(INIT_POSE_TOPIC, PoseWithCovarianceStamped, self.new_init_pose_cb)
def float_cb_red(self, msg):
pass
def float_cb_blue(self, msg):
# print "BLUE cb", msg.data
self.angle_from_computer_vision = msg.data
def new_init_pose_cb(self, msg):
if len(self.plan) > 0:
rot_mat = utils.rotation_matrix(-1 * self.curr_pose[2])
while len(self.plan) > 0:
distance = np.sqrt(np.square(self.curr_pose[0] - self.plan[0][0]) + np.square(self.curr_pose[1] - self.plan[0][1]))
# Figure out if self.plan[0] is in front or behind car
offset = rot_mat * ((self.plan[0][0:2] - self.curr_pose[0:2]).reshape(2, 1))
offset.flatten()
if offset[0] > 0.0 or distance > 1.0:
break
self.plan.pop(0)
'''
Computes the error based on the current pose of the car
cur_pose: The current pose of the car, represented as a numpy array [x,y,theta]
Returns: (False, 0.0) if the end of the plan has been reached. Otherwise, returns
(True, E) - where E is the computed error
'''
def compute_error(self, cur_pose):
"""
Find the first element of the plan that is in front of the robot, and remove
any elements that are behind the robot. To do this:
Loop over the plan (starting at the beginning) For each configuration in the plan
If the configuration is behind the robot, remove it from the plan
Will want to perform a coordinate transformation to determine if
the configuration is in front or behind the robot
If the configuration is in front of the robot, break out of the loop
"""
# # print "Computing error..."
# check the leftmost pose in the plan pose-array and if it is behind the car then delete it
if len(self.plan) > 0:
# This is the ta_lab1 solution code to delete poses behind the robot.
# Our solution is commented our below.
# Both produce identitical results.
rot_mat = utils.rotation_matrix(-1 * cur_pose[2])
while len(self.plan) > 0:
distance = np.sqrt(np.square(cur_pose[0] - self.plan[0][0]) + np.square(cur_pose[1] - self.plan[0][1]))
# Figure out if self.plan[0] is in front or behind car
offset = rot_mat * ((self.plan[0][0:2] - cur_pose[0:2]).reshape(2, 1))
offset.flatten()
if offset[0] > 0.0 or distance > 1.0:
break
self.plan.pop(0)
# left_edge = (cur_pose[2] + np.pi / 2) * 180 / 3.14 # deg
# right_edge = (cur_pose[2] - np.pi / 2) * 180 / 3.14 # deg
# # if cur_pose[2] < 0:
# # left_edge += 360.0
# # right_edge += 360.0
# angle_robot_path_point = math.atan2(cur_pose[1] - self.plan[0][1],
# cur_pose[0] - self.plan[0][0]) * 180 / 3.14 # deg
#
#
# # for troubleshooting if path points are not deleted correctly
# # converted angles from rad to deg for easier troubleshooting
# # # # print("robot position: ", cur_pose)
# # # # print("path point position: ", self.plan[0])
# # # # print("left_edge: ", left_edge)
# # # # print("right_edge: ", right_edge)
# # # # print("path point to robot vector: ",cur_pose[1] - self.plan[0][1], cur_pose[0] - self.plan[0][0])
# # # # print("angle of path point to robot vector",angle_robot_path_point)
# # # # print("path_point yaw",self.plan[0][2] * 180 / 3.14)
#
#
# # if left_edge <= 0:
# # left_edge += 2.0 * np.pi
# # if angle_robot_path_point <= 0:
# # angle_robot_path_point += 2.0 * np.pi
#
# behind = (angle_robot_path_point > right_edge and angle_robot_path_point < left_edge) # is path point behind robot?
#
# # print cur_pose, behind , left_edge, angle_robot_path_point, right_edge
#
# PS = PoseStamped() # create a PoseStamped() msg
# PS.header.stamp = rospy.Time.now() # set header timestamp value
# PS.header.frame_id = "map" # set header frame id value
# PS.pose.position.x = self.plan[0][0] # set msg x position to value of the x position in the look ahead pose from the path
# PS.pose.position.y = self.plan[0][1] # set msg y position to value of the y position in the look ahead pose from the path
# PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
# PS.pose.orientation = utils.angle_to_quaternion(self.plan[0][2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
#
# self.delete_pose_pub.publish(
# PS) # publish look ahead follower, now you can add a Pose with topic of PUB_TOPIC_2 value in rviz
#
#
#
# path_pose_similar_direction = (self.plan[0][2] > right_edge and self.plan[0][
# 2] < left_edge) # is path point in similar direction as robot?
# path_pose_similar_direction = True
# if behind and path_pose_similar_direction and len(
# self.plan) > 0: # delete point if behind robot, similar direction, and not last point in path
# # # print "delete element: ", len(self.plan) # for troubleshooting, show path points before deleting
#
# PS = PoseStamped() # create a PoseStamped() msg
# PS.header.stamp = rospy.Time.now() # set header timestamp value
# PS.header.frame_id = "map" # set header frame id value
# PS.pose.position.x = self.plan[0][
# 0] # set msg x position to value of the x position in the look ahead pose from the path
# PS.pose.position.y = self.plan[0][
# 1] # set msg y position to value of the y position in the look ahead pose from the path
# PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
# PS.pose.orientation = utils.angle_to_quaternion(self.plan[0][
# 2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
#
# self.deleted.publish(
# PS)
#
# PS = PoseStamped() # create a PoseStamped() msg
# PS.header.stamp = rospy.Time.now() # set header timestamp value
# PS.header.frame_id = "map" # set header frame id value
# PS.pose.position.x = cur_pose[0]
# PS.pose.position.y = cur_pose[1]
# PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
# PS.pose.orientation = utils.angle_to_quaternion(cur_pose[2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
# # print "CURR POSE", cur_pose, "DELETED", self.plan[0][:]
# self.robot.publish(
# PS)
#
#
#
#
#
# self.plan.pop(
# 0) # delete the first element in the path, since that point is behind robot and it's direction is similar to robot
# # # print "element deleted? : ", len(self.plan) # for troubleshooting, show path points after deleting
if len(self.plan) > 0:
PS = PoseStamped() # create a PoseStamped() msg
PS.header.stamp = rospy.Time.now() # set header timestamp value
PS.header.frame_id = "map" # set header frame id value
goal_idx = min(0 + self.plan_lookahead,
len(self.plan) - 1) # get goal index for looking ahead this many indices in the path
PS.pose.position.x = self.plan[goal_idx][
0] # set msg x position to value of the x position in the look ahead pose from the path
PS.pose.position.y = self.plan[goal_idx][
1] # set msg y position to value of the y position in the look ahead pose from the path
PS.pose.position.z = 0 # set msg z position to 0 since robot is on the ground
PS.pose.orientation = utils.angle_to_quaternion(self.plan[goal_idx][
2]) # set msg orientation to [converted to queternion] value of the yaw angle in the look ahead pose from the path
self.goal_pub.publish(
PS) # publish look ahead follower, now you can add a Pose with topic of PUB_TOPIC_2 value in rviz
# Check if the plan is empty. If so, return (False, 0.0)
# YOUR CODE HERE
if len(self.plan) == 0:
return False, 0.0
# At this point, we have removed configurations from the plan that are behind
# the robot. Therefore, element 0 is the first configuration in the plan that is in
# front of the robot. To allow the robot to have some amount of 'look ahead',
# we choose to have the robot head towards the configuration at index 0 + self.plan_lookahead
# We call this index the goal_index
goal_idx = min(0 + self.plan_lookahead, len(self.plan) - 1)
# Compute the translation error between the robot and the configuration at goal_idx in the plan
# YOUR CODE HERE
# # print "cur_pose: ", cur_pose
# # print "lookahead pose: ", self.plan[goal_idx]
look_ahead_position = np.array([self.plan[goal_idx][0], self.plan[goal_idx][1]]).reshape([2, 1])
translation_robot_to_origin = np.array([-cur_pose[0], -cur_pose[1]]).reshape([2, 1])
look_ahead_position_translated = look_ahead_position + translation_robot_to_origin
rotation_matrix_robot_to_x_axis = utils.rotation_matrix(-cur_pose[2])
look_ahead_position_translated_and_rotated = rotation_matrix_robot_to_x_axis * look_ahead_position_translated
# # print "look_ahead_position_translated_and_rotated: ", look_ahead_position_translated_and_rotated
x_error = float(look_ahead_position_translated_and_rotated[0][
0]) # This is the distance that the robot is behind the lookahead point parallel to the path
y_error = float(look_ahead_position_translated_and_rotated[1][
0]) # This is the distance away from the path, perpendicular from the path to the robot
translation_error = -y_error # math.tan(y_error / x_error) * math.pi / 180 # angle in rad to drive along hypotenuse toward the look ahead point
# translation_error *= 10 #float(y_error/x_error) # make the robot turn more sharply if far away from path
# translation_error = np.sqrt(np.square(cur_pose[0] - self.plan[goal_idx][0]) + np.square(cur_pose[1] - self.plan[goal_idx][1]))
# # print "Translation error: ", translation_error
# Compute the total error
# Translation error was computed above
# Rotation error is the difference in yaw between the robot and goal configuration
# Be careful about the sign of the rotation error
# YOUR CODE HERE
rotation_error = cur_pose[2] - self.plan[goal_idx][2]
if rotation_error > np.pi:
rotation_error -= np.pi*2.0
elif rotation_error < -np.pi:
rotation_error += np.pi*2.0
#ToDo: Fix rotation error when moving right to left, it only calculates correctly left to right
# # print "Rotation error: ", rotation_error
error = self.translation_weight * translation_error + self.rotation_weight * rotation_error
# # print "Overall error: ", error
self.total_error_list.append(error)
return True, error
'''
Uses a PID control policy to generate a steering angle from the passed error
error: The current error
Returns: The steering angle that should be executed
'''
def compute_steering_angle(self, error):
# # print "Computing steering angle..."
now = rospy.Time.now().to_sec() # Get the current time
# Compute the derivative error using the passed error, the current time,
# the most recent error stored in self.error_buff, and the most recent time
# stored in self.error_buff
# YOUR CODE HERE
deriv_error = 0 # for the first iteration, this is true
integ_error = 0
# # print "setting deriv and integ error to 0"
# # print "error_buff len", len(self.error_buff)
if len(self.error_buff) > 0:
time_delta = now - self.error_buff[-1][1] # -1 means peeking the rightmost element (most recent)
error_delta = error - self.error_buff[-1][0]
deriv_error = error_delta / time_delta
# # print "computed deriv error: ", deriv_error
# Add the current error to the buffer
self.error_buff.append((error, now))
# Compute the integral error by applying rectangular integration to the elements
# of self.error_buff:
# ://chemicalstatistician.wordpress.com/2014/01/20/rectangular-integration-a-k-a-the-midpoint-rule/
# YOUR CODE HERE
error_array = []
if len(self.error_buff) > 0:
for err in self.error_buff:
error_array.append(err[0])
integ_error = np.trapz(error_array)
# # print "computed integ error: ", integ_error
# Compute the steering angle as the sum of the pid errors
# YOUR CODE HERE
return -(self.kp * error + self.ki * integ_error + self.kd * deriv_error)
'''
Callback for the current pose of the car
msg: A PoseStamped representing the current pose of the car
This is the exact callback that we used in our solution, but feel free to change it
'''
def pose_cb(self, msg):
# print "inside line_follower ,pose_cb"
time.sleep(0)
# # print "Callback received current pose. "
cur_pose = np.array([msg.pose.position.x,
msg.pose.position.y,
utils.quaternion_to_angle(msg.pose.orientation)])
# print "Current pose: ", cur_pose
# # # # print "plan[:,[0,1]]", type(np.array(self.plan)), np.array(self.plan)[:,[0,1]]
# # find closest point and delete all points before it in the plan
# # only done once at the start of following the plan
# if self.found_closest_point == False:
# min_path_distance = np.Infinity # to find closest path point and delete all points before it
# for count, position in enumerate(np.array(self.plan)[:, [0, 1]]):
# distance = np.sqrt(np.square(cur_pose[0] - position[0]) + np.square(cur_pose[1] - position[1]))
# if distance < min_path_distance:
# self.found_closest_point = True
# min_path_distance = distance
# if count > 0:
# self.plan.pop(0)
success, error = self.compute_error(cur_pose)
self.error = error
# print "Success, Error: ", success, error
if not success:
# We have reached our goal
self.pose_sub = None # Kill the subscriber
self.speed = 0.0 # Set speed to zero so car stops
if False: # show error plot?
# plot the error here
title_string = "Error plot with kp=%.2f, kd=%.2f, ki=%.2f t_w=%.2f r_w=%.2f" % \
(self.kp, self.kd, self.ki, self.translation_weight, self.rotation_weight)
fig = plt.figure()
ax = fig.add_subplot(111) #
ax.plot(self.total_error_list)
plt.title(title_string)
plt.text(0.5, 0.85, 'Total error = %.2f' % np.trapz(abs(np.array(self.total_error_list))),
horizontalalignment='center',
verticalalignment='center', transform=ax.transAxes)
plt.xlabel('Iterations')
plt.ylabel('Error')
plt.show()
np.savetxt("/home/joe/Desktop/Error_1.csv", np.array(self.total_error_list), delimiter=",")
return 0
f = None
# if computer vision angle is published then use that angle
pid_angle = self.compute_steering_angle(error)
# if self.angle_from_computer_vision is not None and self.angle_from_computer_vision > -98.0 and self.error < 2:
if False:
delta = self.angle_from_computer_vision
print "CV ANGLE chosen: ", delta
else: # if computer vision angle is not published then use pid controller angle
delta = pid_angle
print "PID ANGLE chosen: ", delta
try:
self.f.write("CV ANGLE: " + str(delta) + "\tPID ANGLE" + str(pid_angle))
except (IOError, AttributeError):
pass
if delta < self.min_delta:
self.min_delta = delta
if delta > self.max_delta:
self.max_delta = delta
print 'min=%f and max=%f' % (self.min_delta, self.max_delta)
#
if True: # not using laser_wanderer_robot.launch
# Setup the control message
ads = AckermannDriveStamped()
ads.header.frame_id = '/map'
ads.header.stamp = rospy.Time.now()
ads.drive.steering_angle = delta
ads.drive.speed = 2.0
self.cmd_pub.publish(ads)
# Send the control message to laser_wanderer_robot.launch
else:
float_msg = Float32()
float_msg.data = delta
self.float_pub.publish(float_msg)
def main():
rospy.init_node('line_follower', anonymous=True) # Initialize the node
"""
Load these parameters from launch file
We provide suggested starting values of params, but you should
tune them to get the best performance for your system
Look at constructor of LineFollower class for description of each var
'Default' values are ones that probably don't need to be changed (but you could for fun)
'Starting' values are ones you should consider tuning for your system
"""
# YOUR CODE HERE
plan_topic = rospy.get_param('~plan_topic') # Default val: '/planner_node/car_plan'
pose_topic = rospy.get_param('~pose_topic') # Default val: '/sim_car_pose/pose'
if True: # if on robot? else in rviz
pose_topic = "/pf/viz/inferred_pose"
plan_lookahead = rospy.get_param('~plan_lookahead') # Starting val: 5
translation_weight = rospy.get_param('~translation_weight') # Starting val: 1.0
rotation_weight = rospy.get_param('~rotation_weight') # Starting val: 0.0
kp = rospy.get_param('~kp') # Startinig val: 1.0
ki = rospy.get_param('~ki') # Starting val: 0.0
kd = rospy.get_param('~kd') # Starting val: 0.0
error_buff_length = rospy.get_param('~error_buff_length') # Starting val: 10
speed = 2.0 # rospy.get_param('~speed') # Default val: 1.0
loadFinalPlan = True # set this to True to use preexisting plan instead of creating a new one in RVIZ
if not loadFinalPlan: # make new plan in RVIZ by setting initial and goal poses
raw_input("Press Enter to when plan available...") # Waits for ENTER key press
# Use rospy.wait_for_message to get the plan msg
# Convert the plan msg to a list of 3-element numpy arrays
# Each array is of the form [x,y,theta]
# Create a LineFollower object
raw_plan = rospy.wait_for_message(plan_topic, PoseArray)
# raw_plan is a PoseArray which has an array of geometry_msgs/Pose called poses
plan_array = []
for pose in raw_plan.poses:
plan_array.append(np.array([pose.position.x, pose.position.y, utils.quaternion_to_angle(pose.orientation)]))
else: # use preexisting plan from plan_creator.launch and plan_cleanup.launch
# # # print "Len of plan array: %d" % len(plan_array)
# # # # print plan_array
plan_relative_path = "/saved_plans/plan_12_9_2018"
# load plan_array
# load raw_plan msg (PoseArray)
loaded_vars = pickle.load(open(CURRENT_PKG_PATH + plan_relative_path, "r"))
plan_array = loaded_vars[0]
raw_plan = loaded_vars[1]
# visualize loaded plan
PA_pub = rospy.Publisher("/LoadedPlan", PoseArray, queue_size=1)
for i in range(0, 5):
rospy.sleep(0.5)
PA_pub.publish(raw_plan)
# # print "LoadedPlan Published"
try:
if raw_plan:
pass
except rospy.ROSException:
exit(1)
lf = LineFollower(plan_array, pose_topic, plan_lookahead, translation_weight,
rotation_weight, kp, ki, kd, error_buff_length, speed) # Create a Line follower
rospy.spin() # Prevents node from shutting down
if __name__ == '__main__':
main()
|
from datetime import datetime
import logging
from discord import User
from main import AIKyaru
from aiohttp import ClientSession, ClientTimeout
from expiringdict import ExpiringDict
from utils import errors
from copy import deepcopy
import re
class Api:
def __init__(self, bot: AIKyaru):
self.bot = bot
self.logger = logging.getLogger("AIKyaru.ClanApi")
self.apiUrl = self.bot.config.get(["GUILD_API_URL"])
self.session = ClientSession(
headers={
"User-Agent": "AIKyaru v3",
"x-token": self.bot.config.get(["GUILD_API_TOKEN"]),
},
timeout=ClientTimeout(total=10),
)
self.user_cache = ExpiringDict(max_len=1000, max_age_seconds=86400) # cache user data for 1 day
self.form_cache = ExpiringDict(max_len=1000, max_age_seconds=600) # cache form data for 10 mins
self.record_cache = ExpiringDict(
max_len=100, max_age_seconds=10
) # cache records for 10 secs to prevent high api use
def form_id_check(self, form_id: str):
if not re.match(r"^[0-9a-fA-F]{32}$", form_id):
raise errors.IncorrectFormId
async def get_user(self, user: User):
cached = self.user_cache.get(user.id)
if cached:
return cached
async with self.session.get(
f"{self.apiUrl}/bot/isRegister",
params={"platform": 1, "user_id": user.id},
) as resp:
data = await resp.json()
self.logger.debug(f"isRegister {user.id}: {resp.status}")
if resp.status == 404:
async with self.session.post(
f"{self.apiUrl}/bot/register",
json={"platform": 1, "user_id": user.id, "avatar": str(user.avatar_url), "name": user.name},
) as resp:
data = await resp.json()
if resp.status == 400:
self.logger.error(f"register {user.id}: {data}")
raise ValueError("重複註冊")
self.logger.debug(f"register {user.id}: {resp.status}")
self.user_cache[user.id] = data
return data
async def get_form(self, form_id: str):
cached = self.form_cache.get(form_id)
if cached:
return cached
async with self.session.get(f"{self.apiUrl}/forms/{form_id}") as resp:
data = await resp.json()
self.logger.debug(f"get_form {form_id}: {resp.status}")
if resp.status == 404:
raise errors.FormNotFound(form_id)
self.form_cache[form_id] = data
return data
async def create_form(self, user: User, title: str, month: int = None):
if not month:
month = datetime.now().strftime("%Y%m")
user_data = await self.get_user(user)
async with self.session.post(
f"{self.apiUrl}/bot/forms/create",
params={"user_id": user_data["id"]},
json={"month": month, "title": title},
) as resp:
data = await resp.json()
self.logger.debug(f"create_form {title}: {resp.status}")
return data
async def get_record(self, form_id: str, week: int = None, boss: int = None):
path = "/all"
if week:
path = f"/week/{week}"
if boss:
path += f"/boss/{boss}"
cached = self.record_cache.get(path)
if cached:
return cached
async with self.session.get(f"{self.apiUrl}/forms/{form_id}{path}") as resp:
data = await resp.json()
self.logger.debug(f"get_record /{form_id}/{week}/{boss}: {len(data)} records")
self.record_cache[path] = data
return data
async def get_boss(self, form_id: str, week: int, boss: int):
form = await self.get_form(form_id)
boss_data = deepcopy(form["boss"][boss - 1])
if week >= 45:
stage = 5
elif week >= 35:
stage = 4
elif week >= 11:
stage = 3
elif week >= 4:
stage = 2
else:
stage = 1
boss_data["hp"] = boss_data["hp"][stage - 1]
return boss_data
async def post_record(
self,
form_id: str,
week: int,
boss: int,
status: int,
user_id: str,
damage: int = None,
comment: str = None,
record_id: int = None,
month: int = None,
):
async with self.session.post(
f"{self.apiUrl}/bot/forms/{form_id}/week/{week}/boss/{boss}",
params={"user_id": user_id},
json={"id": record_id, "status": status, "damage": damage, "comment": comment, "month": month},
) as resp:
data = await resp.json()
if resp.status == 404:
raise errors.RecordDeleted
self.logger.debug(f"post_record /{form_id}/{week}/{boss}: {resp.status}")
return data |
<gh_stars>10-100
from __future__ import with_statement
import imp
import inspect
import os
import sys
from attest import ast, statistics
from attest.codegen import to_source, SourceGenerator
__all__ = ['COMPILES_AST',
'ExpressionEvaluator',
'TestFailure',
'assert_hook',
'AssertTransformer',
'AssertImportHook',
]
try:
compile(ast.parse('pass'), '<string>', 'exec')
except TypeError:
COMPILES_AST = False
else:
COMPILES_AST = True
class ExpressionEvaluator(SourceGenerator):
"""Evaluates ``expr`` in the context of ``globals`` and ``locals``,
expanding the values of variables and the results of binary operations, but
keeping comparison and boolean operators.
.. testsetup::
from attest import ExpressionEvaluator
>>> var = 1 + 2
>>> value = ExpressionEvaluator('var == 5 - 3', globals(), locals())
>>> value.late_visit()
>>> repr(value)
'(3 == 2)'
>>> bool(value)
False
.. versionadded:: 0.5
"""
def __init__(self, expr, globals, locals):
self.expr = expr
# Putting locals in globals for closures
self.globals = dict(globals)
self.locals = locals
self.globals.update(self.locals)
self.result = []
self.node = ast.parse(self.expr).body[0].value
# Trigger visit after init because we don't want to
# evaluate twice in case of a successful assert
def late_visit(self):
self.visit(self.node)
def __repr__(self):
return ''.join(self.result)
def __str__(self):
return '\n'.join((self.expr, repr(self)))
def __nonzero__(self):
return bool(eval(self.expr, self.globals, self.locals))
def eval(self, node):
return eval(to_source(node), self.globals, self.locals)
def write(self, s):
self.result.append(str(s))
def visit_Name(self, node):
value = self.eval(node)
if getattr(value, '__name__', None):
self.write(value.__name__)
else:
self.write(repr(value))
def generic_visit(self, node):
self.write(repr(self.eval(node)))
visit_BinOp = visit_Subscript = generic_visit
visit_ListComp = visit_GeneratorExp = generic_visit
visit_SetComp = visit_DictComp = generic_visit
visit_Call = visit_Attribute = generic_visit
class TestFailure(AssertionError):
"""Extended :exc:`AssertionError` used by the assert hook.
:param value: The asserted expression evaluated with
:class:`ExpressionEvaluator`.
:param msg: Optional message passed to the assertion.
.. versionadded:: 0.5
"""
def __init__(self, value, msg=''):
self.value = value
AssertionError.__init__(self, msg)
def assert_hook(expr, msg='', globals=None, locals=None):
"""Like ``assert``, but using :class:`ExpressionEvaluator`. If
you import this in test modules and the :class:`AssertImportHook` is
installed (which it is automatically the first time you import from
:mod:`attest`), ``assert`` statements are rewritten as a call to
this.
The import must be a top-level *from* import, example::
from attest import Tests, assert_hook
.. versionadded:: 0.5
"""
statistics.assertions += 1
if globals is None:
globals = inspect.stack()[1][0].f_globals
if locals is None:
locals = inspect.stack()[1][0].f_locals
value = ExpressionEvaluator(expr, globals, locals)
if not value:
# Visit only if assertion fails
value.late_visit()
raise TestFailure(value, msg)
# Build AST nodes on 2.5 more easily
def _build(node, **kwargs):
node = node()
for key, value in kwargs.iteritems():
setattr(node, key, value)
return node
class AssertTransformer(ast.NodeTransformer):
"""Parses `source` with :mod:`_ast` and transforms `assert`
statements into calls to :func:`assert_hook`.
.. warning::
CPython 2.5 doesn't compile AST nodes and when that fails this
transformer will generate source code from the AST instead. While
Attest's own tests passes on CPython 2.5, there might be code that
it currently would render back incorrectly, most likely resulting
in a failure. Because Python's syntax is simple, this isn't very
likely, but you might want to :meth:`~AssertImportHook.disable` the
import hook if you test regularly on CPython 2.5.
It also messes up the line numbers so they don't match the original
source code, meaning tracebacks will point to the line numbers in
the *generated* source and preview the code on that line in the
*original* source. The improved error message with the import hook
is often worth it however, and failures will still point to the
right file and function.
.. versionadded:: 0.5
"""
def __init__(self, source, filename=''):
self.source = source
self.filename = filename
@property
def should_rewrite(self):
""":const:`True` if the source imports :func:`assert_hook`."""
return ('assert_hook' in self.source and
any(s.module == 'attest' and
any(n.name == 'assert_hook' for n in s.names)
for s in ast.parse(self.source).body
if isinstance(s, ast.ImportFrom)))
def make_module(self, name, newpath=None):
"""Compiles the transformed code into a module object which it also
inserts in :data:`sys.modules`.
:returns: The module object.
"""
module = imp.new_module(name)
module.__file__ = self.filename
if newpath:
module.__path__ = newpath
sys.modules[name] = module
exec self.code in vars(module)
return module
@property
def node(self):
"""The transformed AST node."""
node = ast.parse(self.source, self.filename)
node = self.visit(node)
ast.fix_missing_locations(node)
return node
@property
def code(self):
"""The :attr:`node` compiled into a code object."""
if COMPILES_AST:
return compile(self.node, self.filename, 'exec')
return compile(to_source(self.node), self.filename, 'exec')
def visit_Assert(self, node):
args = [_build(ast.Str, s=to_source(node.test)),
node.msg if node.msg is not None else _build(ast.Str, s=''),
_build(ast.Call,
func=_build(ast.Name, id='globals', ctx=ast.Load()),
args=[], keywords=[], starargs=None, kwargs=None),
_build(ast.Call,
func=_build(ast.Name, id='locals', ctx=ast.Load()),
args=[], keywords=[], starargs=None, kwargs=None)
]
return ast.copy_location(
_build(ast.Expr, value=_build(ast.Call,
func=_build(ast.Name, id='assert_hook', ctx=ast.Load()),
args=args, keywords=[], starargs=None, kwargs=None)), node)
class AssertImportHookEnabledDescriptor(object):
def __get__(self, instance, owner):
return any(isinstance(ih, owner) for ih in sys.meta_path)
class AssertImportHook(object):
"""An :term:`importer` that transforms imported modules with
:class:`AssertTransformer`.
.. versionadded:: 0.5
"""
#: Class property, :const:`True` if the hook is enabled.
enabled = AssertImportHookEnabledDescriptor()
@classmethod
def enable(cls):
"""Enable the import hook."""
cls.disable()
sys.meta_path.insert(0, cls())
@classmethod
def disable(cls):
"""Disable the import hook."""
sys.meta_path[:] = [ih for ih in sys.meta_path
if not isinstance(ih, cls)]
def __init__(self):
self._cache = {}
def __enter__(self):
sys.meta_path.insert(0, self)
def __exit__(self, exc_type, exc_value, traceback):
sys.meta_path.remove(self)
def find_module(self, name, path=None):
lastname = name.rsplit('.', 1)[-1]
try:
self._cache[name] = imp.find_module(lastname, path), path
except ImportError:
return
return self
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
source, filename, newpath = self.get_source(name)
(fd, fn, info), path = self._cache[name]
if source is None:
return imp.load_module(name, fd, fn, info)
transformer = AssertTransformer(source, filename)
if not transformer.should_rewrite:
fd, fn, info = imp.find_module(name.rsplit('.', 1)[-1], path)
return imp.load_module(name, fd, fn, info)
try:
return transformer.make_module(name, newpath)
except Exception, err:
raise ImportError('cannot import %s: %s' % (name, err))
def get_source(self, name):
try:
(fd, fn, info), path = self._cache[name]
except KeyError:
raise ImportError(name)
code = filename = newpath = None
if info[2] == imp.PY_SOURCE:
filename = fn
with fd:
code = fd.read()
elif info[2] == imp.PY_COMPILED:
filename = fn[:-1]
with open(filename, 'U') as f:
code = f.read()
elif info[2] == imp.PKG_DIRECTORY:
filename = os.path.join(fn, '__init__.py')
newpath = [fn]
with open(filename, 'U') as f:
code = f.read()
return code, filename, newpath
|
<reponame>vtarasv/cbh21-protein-solubility-challenge
"""
The entry point for your prediction algorithm.
"""
from __future__ import annotations
import argparse
import csv
import itertools
from pathlib import Path
import pprint
from typing import Any
import zipfile
from Bio.PDB.PDBParser import PDBParser
from Bio.PDB.vectors import calc_dihedral
from Bio.PDB.Structure import Structure
import temppathlib
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import pickle
class Model(nn.Module):
def __init__(self, n_in, layers, p=0.5, out_sz=1):
super().__init__()
self.drop_first_layer = nn.Dropout(p)
layerlist = []
for i in layers:
layerlist.append(nn.Linear(n_in, i))
layerlist.append(nn.ReLU(inplace=True))
layerlist.append(nn.BatchNorm1d(i))
layerlist.append(nn.Dropout(p))
n_in = i
layerlist.append(nn.Linear(layers[-1], out_sz))
self.layers = nn.Sequential(*layerlist)
def forward(self, X):
# X = self.drop_first_layer(X)
x = self.layers(X)
return x
def predict(pdb_file: Path) -> float:
"""
The function that puts it all together: parsing the PDB file, generating
features from it and performing inference with the ML model.
"""
# parse PDB
parser = PDBParser()
structure = parser.get_structure(pdb_file.stem, pdb_file)
# featurize + perform inference
features = featurize(structure)
predicted_solubility = ml_inference(features)
return predicted_solubility
def featurize(structure: Structure) -> list[Any]:
"""
Calculates 3D ML features from the `structure`.
"""
# get all the residues
residues = [res for res in structure.get_residues()]
# calculate some random 3D features (you should be smarter here!)
protein_length = residues[1]["CA"] - residues[-2]["CA"]
angle = calc_dihedral(
residues[1]["CA"].get_vector(),
residues[2]["CA"].get_vector(),
residues[-3]["CA"].get_vector(),
residues[-2]["CA"].get_vector(),
)
# create the feature vector
features = [protein_length, angle]
return features
def ml_inference(features: list[Any]) -> float:
"""
This would be a function where you normalize/standardize your features and
then feed them to your trained ML model (which you would load from a file).
"""
# this is my stupid manual ML model
if features[0] > 15.0 and features[1] > 0.5:
return 60
elif features[0] > 30.0 and features[1] > 1.5:
return 80
return 20
if __name__ == "__main__":
# use_cuda = torch.cuda.is_available()
# device = torch.device("cpu")
# # torch.backends.cudnn.benchmark = True
#
# model1 = Model(1225, [1024, 1024, 512], p=0.6, out_sz=1).to(device)
# model1.load_state_dict(torch.load("MLP_r_0.493.pt", map_location=torch.device("cpu")))
# model1.eval()
# Features PFAM
df_pfam = pd.read_csv('data/features_pfam.csv')
# PhysProp features
df_pp = pd.read_csv('data/features_phys_prop.csv')
# Protdes features
df_protdes = pd.read_csv('data/features_protdes_combined.csv')
df_ck = pd.read_csv('data/features_CKSAAP.csv')
df_test = pd.read_csv('data/test.csv')
# Merging the data
df_test = pd.merge(df_test, df_pfam, on='id')
df_test = pd.merge(df_test, df_pp, on='id')
df_test = pd.merge(df_test, df_protdes, on='id')
df_test = pd.merge(df_test, df_ck, on='id')
df_test = df_test.fillna(0)
X_test = df_test.drop(['seq', 'id'], axis=1)
rf = pickle.load(open('RF.pkl', 'rb'))
# X_test = sc_X.transform(X_test)
# X_test = torch.tensor(np.array(X_test), dtype=torch.float)
# with torch.no_grad():
# y_test_pred = model1(X=X_test.to(device))
y_test_pred = rf.predict(X_test)
df_test["solubility"] = y_test_pred
predictions_df = df_test[["id", "solubility"]]
predictions_df.columns = ["protein", "solubility"]
# set up argument parsing
parser = argparse.ArgumentParser()
parser.add_argument("--infile", type=str, default="data/test.zip")
args = parser.parse_args()
predictions = []
# use a temporary directory so we don't pollute our repo
# with temppathlib.TemporaryDirectory() as tmpdir:
# # unzip the file with all the test PDBs
# with zipfile.ZipFile(args.infile, "r") as zip_:
# zip_.extractall(tmpdir.path)
for id, sol in zip(list(predictions_df["protein"]), list(predictions_df["solubility"])):
predictions.append({"protein": id, "solubility": sol})
# iterate over all test PDBs and generate predictions
# for test_pdb in tmpdir.path.glob("*.pdb"):
# predictions.append({"protein": test_pdb.stem, "solubility": predict(test_pdb)})
# save to csv file, this will be used for benchmarking
predictions_df.to_csv("predictions.csv", index=False)
# outpath = "predictions.csv"
# with open(outpath, "w") as fh:
# writer = csv.DictWriter(fh, fieldnames=["protein", "solubility"])
# writer.writeheader()
# writer.writerows(predictions)
# print predictions to screen
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(predictions)
|
<gh_stars>1-10
#!/usr/bin/env python
# Author: <NAME>
# Author: <NAME>
# MIT License.
#
# Copyright 2019 <NAME> and SWCCDC. Permission is hereby granted, free
# of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to
# the following conditions: The above copyright notice and this permission
# notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
import logging
import socket
import argparse
import json
import os
from prompt_toolkit import PromptSession
from prompt_toolkit.eventloop import From, get_event_loop
from prompt_toolkit.patch_stdout import patch_stdout
import data_model
from triassic_prompts import BasePrompt
def run_local():
pass
session = PromptSession()
shell_task = From(BasePrompt(session).loop_until_exit())
get_event_loop().run_until_complete(shell_task)
def launch_telnet_session(connection):
print('Launching new session')
try:
session = PromptSession(output=connection.vt100_output, input=connection.vt100_input)
yield From(BasePrompt(session, connection=connection).loop_until_exit())
except KeyboardInterrupt:
pass
except socket.error as e:
print('Socket error %s. Shutting down session.' % e.errno)
except:
print('Other error. Shutting down session.')
def exception_handler(context):
# If we've gotten here, it's likely that something horrible has happened.
print("<<< Unhandled exception in an event loop.")
print("<!! This usually means that something horrible has happened.")
print("<!! Therefore, we will completely restart the server.")
print("<!! Goodbye.")
os.execl(sys.executable, sys.executable, *sys.argv)
def run_telnet(host, port):
# Import it here, because the import causes an error in Windows;
# and I want to be able to run the local version in Windows.
from telnet.server import TelnetServer
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
server = TelnetServer(interact=launch_telnet_session, host=host, port=port)
server.start()
get_event_loop().set_exception_handler(exception_handler)
get_event_loop().run_forever()
def main():
parser = argparse.ArgumentParser(prog='triassic_shell.py')
parser.add_argument('-f', '--file', help="Path to the ZODB persistence file to use.")
telnet_parsers = parser.add_subparsers(dest='command')
telnet_parser = telnet_parsers.add_parser('telnet')
telnet_parser.add_argument('-a', '--address', default='127.0.0.1', dest='host')
telnet_parser.add_argument('-p', '--port', type=int, default=21321)
args = parser.parse_args()
# Initialize the database, if needed.
data_model.init_db(args.file if args.file else None)
if args.command == 'telnet':
run_telnet(args.host, args.port)
else:
run_local()
if __name__ == "__main__":
main()
|
"""
<EMAIL>
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from itertools import product
def tile_iterator(im,
blocksize = (64, 64),
padsize = (64,64),
mode = "constant",
verbose = False):
"""
iterates over padded tiles of an ND image
while keeping track of the slice positions
Example:
--------
im = np.zeros((200,200))
res = np.empty_like(im)
for padded_tile, s_src, s_dest in tile_iterator(im,
blocksize=(128, 128),
padsize = (64,64),
mode = "wrap"):
#do something with the tile, e.g. a convolution
res_padded = np.mean(padded_tile)*np.ones_like(padded_tile)
# reassemble the result at the correct position
res[s_src] = res_padded[s_dest]
Parameters
----------
im: ndarray
the input data (arbitrary dimension)
blocksize:
the dimension of the blocks to split into
e.g. (nz, ny, nx) for a 3d image
padsize:
the size of left and right pad for each dimension
mode:
padding mode, like numpy.pad
e.g. "wrap", "constant"...
Returns
-------
tile, slice_src, slice_dest
tile[slice_dest] is the tile in im[slice_src]
"""
if not(im.ndim == len(blocksize) ==len(padsize)):
raise ValueError("im.ndim (%s) != len(blocksize) (%s) != len(padsize) (%s)"
%(im.ndim , len(blocksize) , len(padsize)))
subgrids = tuple([int(np.ceil(1.*n/b)) for n,b in zip(im.shape, blocksize)])
#if the image dimension are not divible by the blocksize, pad it accordingly
pad_mismatch = tuple([(s*b-n) for n,s, b in zip(im.shape,subgrids,blocksize)])
if verbose:
print("tile padding... ")
im_pad = np.pad(im,[(p,p+pm) for pm,p in zip(pad_mismatch,padsize)], mode = mode)
# iterates over cartesian product of subgrids
for i,index in enumerate(product(*[range(sg) for sg in subgrids])):
# the slices
# if verbose:
# print("tile %s/%s"%(i+1,np.prod(subgrids)))
# dest[s_output] is where we will write to
s_input = tuple([slice(i*b,(i+1)*b) for i,b in zip(index, blocksize)])
s_output = tuple([slice(p,-p-pm*(i==s-1)) for pm,p,i,s in zip(pad_mismatch,padsize, index, subgrids)])
s_output = tuple([slice(p,b+p-pm*(i==s-1)) for b,pm,p,i,s in zip(blocksize,pad_mismatch,padsize, index, subgrids)])
s_padinput = tuple([slice(i*b,(i+1)*b+2*p) for i,b,p in zip(index, blocksize, padsize)])
padded_block = im_pad[s_padinput]
# print im.shape, padded_block.shape, s_output
yield padded_block, s_input, s_output
if __name__ == '__main__':
# simple test
for n in [1,2,3]:
print("n = %s"%n)
im = np.random.uniform(-1,1,[103+13*_n for _n in range(n)])
res = np.empty_like(im)
for padded_tile, s_src, s_dest in tile_iterator(im,
blocksize=(50,)*im.ndim,
padsize = (64,)*im.ndim,
mode = "wrap"):
# reassemble the result at the correct position
res[s_src] = padded_tile[s_dest]
print("OK" if np.allclose(res, im) else "ERRRRRRRRRRROOOOOOOORRRRRR")
|
"""Working with event data, events, and event sequences"""
# Copyright (c) 2019 <NAME>.
#
# This is free, open software licensed under the [MIT License](
# https://choosealicense.com/licenses/mit/).
import csv
import itertools as itools
import json as _json
import operator
import esal
from . import records
# Data format
def header(time_type=float):
"""
Return a header that describes the fields of an event record in a
table of events.
The header is (id:int, lo:time_type, hi:time_type, cat:str, typ:str,
val:str, jsn:str).
time_type:
Constructor for type of time / date found in event records:
time_type<T>(str) -> T.
"""
return (
('id', int),
('lo', time_type),
('hi', time_type),
('cat', str),
('typ', str),
('val', str),
('jsn', str), # Do not automatically parse JSON
)
header_nm2idx = {
fld[0]: i for (i, fld) in enumerate(header())}
"""Format of CSV tables of events."""
csv_format = dict(
delimiter='|',
doublequote=False,
escapechar='\\',
lineterminator='\n',
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
)
# Facts and events
def value(event):
"""
Return the value of an event that was constructed by `sequence`.
"""
return event.value[0]
def json(event):
"""
Parse the JSON of an event that was constructed by `sequence`.
"""
jsn = event.value[1]
return _json.loads(jsn) if isinstance(jsn, str) else jsn
# Event sequences
def sequence(
event_records,
event_sequence_id=None,
header_nm2idx=header_nm2idx,
):
"""
Construct an event sequence from the given records and return it.
Any record in which the fields `lo` and `hi` are both `None` is
treated as a fact. All other records are treated as events.
event_records:
Iterable of event records where each record is an indexable
collection of values.
event_sequence_id:
ID for constructed event sequence.
header_nm2idx:
Mapping of event record field names to their indices in the
record. Must include at least the following names: id, lo, hi,
cat, typ, val, jsn.
"""
# Unpack indices of event record fields
id_idx = header_nm2idx['id']
lo_idx = header_nm2idx['lo']
hi_idx = header_nm2idx['hi']
cat_idx = header_nm2idx['cat']
typ_idx = header_nm2idx['typ']
val_idx = header_nm2idx['val']
jsn_idx = header_nm2idx['jsn']
# Collect facts and events
facts = []
evs = []
for ev_rec in event_records:
# Fill in the ID if it hasn't been set
if event_sequence_id is None:
event_sequence_id = ev_rec[id_idx]
# Get the event interval in order to distinguish between facts
# and events
lo = ev_rec[lo_idx]
hi = ev_rec[hi_idx]
# Missing times indicate a fact
if lo is None and hi is None:
fact = ((ev_rec[cat_idx], ev_rec[typ_idx]), ev_rec[val_idx])
facts.append(fact)
# Otherwise this record is an event
else:
ev = esal.Event(
esal.Interval(ev_rec[lo_idx], ev_rec[hi_idx]),
(ev_rec[cat_idx], ev_rec[typ_idx]),
(ev_rec[val_idx], ev_rec[jsn_idx]))
evs.append(ev)
return esal.EventSequence(evs, facts, event_sequence_id)
def read_sequences(
csv_event_records,
header=header(),
parse_id=None,
include_ids=None,
parse_record=None,
include_record=None,
transform_record=None,
sequence_constructor=sequence,
):
"""
Read event records and yield event sequences.
csv_event_records:
Iterable of list<str>, as from `csv.reader`.
header:
Indexable collection of (name, type) pairs indicating the names
and data types of the fields of each record. Must include at
least the following names: id, lo, hi, cat, typ, val, jsn.
parse_id:
Function to parse the record ID: parse_id(str) -> object.
include_ids:
Set of IDs to include; all other IDs are excluded. Use to
short-circuit event record processing and event sequence
construction. Each ID is parsed before it is looked up.
parse_record:
Passed to `records.process`.
include_record:
Passed to `records.process`.
transform_record:
Passed to `records.process`.
sequence_constructor:
Function to construct an event sequence given an iterable of
event records and a sequence ID:
sequence_constructor(iter<list<object>>, object) ->
esal.EventSequence.
"""
# Make mapping of header names to indices
nm2idx = {field[0]: i for (i, field) in enumerate(header)}
id_idx = nm2idx['id']
# Make group-by function
if parse_id is None:
group_by = operator.itemgetter(id_idx)
else:
group_by = lambda rec: parse_id(rec[id_idx])
# Loop to process each sequence of events that share the same ID
for rec_id, group in itools.groupby(csv_event_records, group_by):
# Process this sequence or skip it?
if include_ids is None or rec_id in include_ids:
# Assemble the event records into an event sequence
yield sequence_constructor(records.process(
group, parse_record, include_record, transform_record,
), rec_id)
def periods(
events,
span_lo=None,
span_hi=None,
value=None,
zero_values=(0, None),
min_len=0,
backoff=0,
output_zero=0,
):
"""
Yield disjoint intervals corresponding to different values of the
given events.
Converts a sequence of events that approximately represent a signal
into a guess at the underlying piecewise constant signal (a sequence
of intervals that partitions a span of time, where each interval has
a value). Assumes the given events are sorted by their start times.
The conversion gives each interval a minimum length, unions
intervals with the same value and then puts them in sequence by
truncating an interval at the start of the next interval with a
different, nonzero value (with optional back-off). Finally, fills
in gaps with zero values. This is intended to be useful for
constructing event "eras" where the values of an event are mutually
exclusive (e.g. different dosages of a medication).
For example, in the following, the top collection of intervals would
be converted into the bottom sequence of intervals given min_len=6
and backoff=2.
--------------------------------------------------
222 22
11111 111 11111 11 11111
00000 00000 000000
--------------------------------------------------
00 111111 1111111 22222222222 000000 111111111 000
--------------------------------------------------
events:
Iterable of events.
span_lo:
Start (if any) of span to which events are clipped.
span_hi:
End (if any) of span to which events are clipped.
value:
Function to extract values from events: value(event) -> object.
Default uses `esal.Event.value`.
zero_values:
Set of values to treat as zero (non-signal) and ignore.
min_len:
Minimum length of each interval (prior to any truncation).
backoff:
Size of gap between intervals. A larger gap increases the
chances that an underlying transition from one value to the next
happened in the gap.
[TODO technically also need a starting lag / offset]
output_zero:
Value to use when filling in between nonzero values.
"""
prds = []
# Lengthen and clip nonzero periods
for ev in events:
# Ensure a minimum length before clipping
lo = ev.when.lo
hi = max(ev.when.hi, lo + min_len)
val = value(ev) if value is not None else ev.value
# Discard any events that are "non-events" (have zero value) or
# that are outside the allowed span
if (val in zero_values or
(span_lo is not None and hi < span_lo) or
(span_hi is not None and lo > span_hi)):
continue
# Clip to allowed span
if span_hi is not None:
hi = min(hi, span_hi)
if span_lo is not None:
lo = max(lo, span_lo)
prds.append((lo, hi, val))
# Merge and sequentialize periods
mrg_idx = 0
prd_idx = 1
while prd_idx < len(prds):
lo1, hi1, val1 = prds[mrg_idx]
lo2, hi2, val2 = prds[prd_idx]
# Merge periods with the same value
if hi1 >= lo2 and val1 == val2:
prds[mrg_idx] = (lo1, hi2, val1)
del prds[prd_idx]
else:
# Put periods in sequence by removing overlaps
if hi1 > lo2:
prds[mrg_idx] = (lo1, lo2, val1)
mrg_idx += 1
prd_idx += 1
# Yield periods with intervening zero periods as needed. Separate
# periods by backing off from the following nonzero event (if there
# is one).
zero_lo = span_lo
for (idx, (lo, hi, val)) in enumerate(prds):
# Yield a preceding zero period if it would be non-empty after
# backing off from the current event
zero_hi = lo - backoff
if zero_lo < zero_hi:
yield (esal.Interval(zero_lo, zero_hi), output_zero)
# Back off from the following nonzero event if there is one
hi_bk = (max(min(hi, prds[idx + 1][0] - backoff), lo)
if idx + 1 < len(prds)
else hi)
yield (esal.Interval(lo, hi_bk), val)
# Increment. Delay the zero period by the backoff amount.
zero_lo = hi_bk + backoff
if zero_lo < span_hi:
yield (esal.Interval(zero_lo, span_hi), output_zero)
|
<reponame>chua-n/particle
import random
from typing import List, Tuple, Union
import numpy as np
import pandas as pd
from skimage.measure import marching_cubes
import torch
def fig2array(fig):
"""Convert a Matplotlib figure to a 3D numpy array with RGB channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGB values
Note: Use fig.canvastostring_argb() to get the alpha channel of an image if you want.
"""
# draw the renderer
fig.canvas.draw()
# Get the RGB buffer from the figure
w, h = fig.canvas.get_width_height()
# buf = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
buf = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
buf = buf.reshape(h, w, 3)
return buf
def makeGrid(images: Union[np.ndarray, List[np.ndarray]],
filename: str,
nrow: int = 8,
normalize: bool = True):
"""Make a grid of images from input `images`.
Parameters:
-----------
images: a batch of images whose shape is (H, W, C)
filename: the name of the image-grid file to be saved
nrow (int, optional): Number of images displayed in each row of the grid
normalize (bool, optional): If True, shift the image to the range (0, 1),
by the min and max values specified by :attr:`range`. Default: ``True``.
"""
from torchvision.utils import save_image
try:
# 对于numpy的一些图片数组,torch.from_numpy(image) 会发生异常————PyTorch的奇哉怪也
torch.from_numpy(images[0])
except ValueError:
images = images.copy() if isinstance(images, np.ndarray) else [
img.copy() for img in images]
# get the batch, height, width, channel
b = len(images)
h, w, c = images[0].shape
tensors = torch.empty((b, c, h, w), dtype=torch.float32)
for i, image in enumerate(images):
for j in range(c):
tensors[i, j] = torch.from_numpy(image[:, :, j])
save_image(tensors, filename, nrow=nrow, normalize=normalize)
return
def singleSphere(center, radius, nPoints=100, opacity=1.0, color=None):
"""Draw a sphere according to given center and radius.
Parameters:
-----------
center(tuple): (x, y, z) coordinate
radius(float): radius of the sphere
"""
if color is None:
random.seed(3.14)
color = (random.random(), random.random(), random.random())
u = np.linspace(0, 2 * np.pi, nPoints)
v = np.linspace(0, np.pi, nPoints)
x = radius * np.outer(np.cos(u), np.sin(v)) + center[0]
y = radius * np.outer(np.sin(u), np.sin(v)) + center[1]
z = radius * np.outer(np.ones(np.size(u)), np.cos(v)) + center[2]
from mayavi import mlab
# scene = mlab.points3d(x, y, z, mode="point")
scene = mlab.mesh(x, y, z, color=color, opacity=opacity)
return scene
def sphere(center, radius, resolution=30, **kwargs):
"""Draw some spheres according to given center and radius.
Parameters:
-----------
center(np.array, n*3): x, y, z coordinates of n spheres
radius(np.array, n): radii of the n spheres
resolution(int): resolution of each sphere in returned scene
"""
x, y, z = center[:, 0], center[:, 1], center[:, 2]
from mayavi import mlab
scene = mlab.points3d(x, y, z, radius*2, scale_factor=1,
resolution=resolution, **kwargs)
return scene
def tetrahedron(tetrahedron, opacity=1.0, color=None):
"""Tetrahedron: tri.points[tri.simplices[i]]
Delaunay tetrahedral似乎不是根据三维体画出来的,而是三维表面画出来的。"""
from mayavi import mlab
if color is None:
random.seed(3.14)
color = (random.random(), random.random(), random.random())
scene = mlab.triangular_mesh(tetrahedron[:, 0], tetrahedron[:, 1], tetrahedron[:, 2],
[(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)],
color=color, opacity=opacity)
return scene
def cuboid(cuboidVerticesX, cuboidVerticesY, cuboidVerticesZ, color=(1, 1, 0.5), opacity=1.0):
"""Draw a cuboid.
Parameters:
-----------
cuboidVerticesX/Y/Z (np.ndarray, shape (2, 2, 2)): coordinates of the 8 vertices
of a cuboid along X/Y/Z axis.
"""
from mayavi import mlab
scene = mlab.gcf()
def plotPlane(slice1, slice2, slice3):
"""绘制长方体六个面中的某个面"""
return mlab.triangular_mesh(cuboidVerticesX[slice1, slice2, slice3],
cuboidVerticesY[slice1,
slice2, slice3],
cuboidVerticesZ[slice1,
slice2, slice3],
[(0, 1, 2), (1, 2, 3)], color=color, opacity=opacity)
sliceAll = slice(None)
plotPlane(sliceAll, sliceAll, 0)
plotPlane(sliceAll, sliceAll, 1)
plotPlane(sliceAll, 0, sliceAll)
plotPlane(sliceAll, 1, sliceAll)
plotPlane(0, sliceAll, sliceAll)
plotPlane(1, sliceAll, sliceAll)
return scene
class DisplayCube:
@staticmethod
def mpl(cube):
import matplotlib.pyplot as plt
verts, faces, *_ = marching_cubes(cube, 0)
fig = plt.figure(figsize=(3.2, 3.2))
ax = plt.axes(projection='3d')
ax.plot_trisurf(verts[:, 0], verts[:, 1], verts[:, 2], triangles=faces)
# ax.set_aspect('equal')
plt.axis('off')
plt.show()
return
@staticmethod
def vv(cube):
import visvis as vv
verts, faces, normals, values = marching_cubes(cube, 0)
vv.mesh(np.fliplr(verts), faces, normals, values)
vv.use().Run()
return
@staticmethod
def mayavi(cube):
from mayavi import mlab
verts, faces, *_ = marching_cubes(cube, 0)
mlab.options.offscreen = True
mlab.triangular_mesh(verts[:, 0], verts[:, 1], verts[:, 2], faces)
mlab.show()
return
class Violin:
"""Bad code..."""
NpzFile = np.lib.npyio.NpzFile
def __init__(self, dataSet: Union[NpzFile, Tuple[NpzFile]], name: Union[str, Tuple[str]]) -> None:
dataSet = list(dataSet)
name = list(name)
for i in range(len(dataSet)):
dataSet[i] = dict(dataSet[i].items())
dataSet[i].pop('mask')
dataSet[i] = pd.DataFrame(dataSet[i])
dataSet[i].index = pd.MultiIndex.from_tuples(
[(name[i], idx) for idx in dataSet[i].index])
self.dataSet = pd.concat(dataSet, axis=0)
self.name = name
def plot(self, *, feature, setName, figsize=None, zhfontProperty=None, **kwargs):
import seaborn as sns
import matplotlib.pyplot as plt
zhfont = {'font': zhfontProperty}
axNum = len(setName)
if axNum == 1:
data = self.dataSet.loc[setName, feature]
fig, ax = plt.subplots(figsize=figsize)
sns.violinplot(data=data, ax=ax, **kwargs)
elif axNum == 2:
setName = list(setName)
fig, ax = plt.subplots(figsize=figsize)
data = self.dataSet.loc[setName, feature]
js = self.jsHelper(data[setName[0]], data[setName[1]])
data = pd.DataFrame(data)
data['Data Set'] = None
for name in setName:
data.loc[name, 'Data Set'] = name
sns.violinplot(data=data, x=[0]*len(data), y=feature, hue='Data Set',
split=True, ax=ax, **kwargs)
ax.set_xticks([])
ax.set_title(f"JS散度: {js}", fontdict=zhfont)
elif axNum == 3:
setName = list(setName)
fig, ax = plt.subplots(1, 2, sharey=True,
figsize=figsize, tight_layout=True)
data = self.dataSet.loc[setName, feature]
data = pd.DataFrame(data)
data['Data Set'] = None
for name in setName:
data.loc[name, 'Data Set'] = name
vaeData = data.loc[['Real', 'VAE']]
ganData = data.loc[['Real', 'GAN']]
sns.violinplot(data=vaeData, x=[0]*len(vaeData), y=feature, hue='Data Set',
split=True, ax=ax[0], **kwargs)
sns.violinplot(data=ganData, x=[0]*len(ganData), y=feature, hue='Data Set',
split=True, ax=ax[1], **kwargs)
jsVae = self.jsHelper(
vaeData.loc['Real', feature].values, vaeData.loc['VAE', feature].values)
jsGan = self.jsHelper(
ganData.loc['Real', feature].values, ganData.loc['GAN', feature].values)
ax[0].set_xticks([])
ax[1].set_xticks([])
ax[1].set_ylabel(None)
ax[0].set_title(f"JS散度: {jsVae}", fontdict=zhfont)
ax[1].set_title(f"JS散度: {jsGan}", fontdict=zhfont)
# ax[1].set_ylabel(None)
else:
raise ValueError("Check the parameter `setName`!")
return fig
def jsHelper(self, vec1, vec2, nPoint=1001):
"""帮助计算同一个特征的两个分布之间的js散度。"""
from scipy.stats import gaussian_kde
from particle.utils.dirty import Entropy
# 两个向量的极值
extrema = np.array([np.sort(vec1)[[0, -1]],
np.sort(vec2)[[0, -1]]])
# 设定特征(x)的变化范围
xRange = np.linspace(extrema.min(), extrema.max(), nPoint)
unitIntervalLength = (xRange[-1] - xRange[0]) / (nPoint - 1)
# 计算概率密度density
dsty1 = gaussian_kde(vec1).pdf(xRange)
dsty2 = gaussian_kde(vec2).pdf(xRange)
# 计算概率
p1 = dsty1 * unitIntervalLength
p2 = dsty2 * unitIntervalLength
# 计算JS散度
js = Entropy.JSDivergence(p1, p2)
return round(js, 2)
|
'''
This script is intented to generate a dgemm model from a BLAS calibration archive.
'''
import sys
import datetime
import time
import yaml
import cashew
import numpy
from cashew import linear_regression as lr
from cashew import archive_extraction as ae
def my_dgemm_reg(df):
df = df.copy()
lr.compute_variable_products(df, 'mnk')
reg = lr.compute_full_reg(df, 'duration', ['mnk', 'mn', 'mk', 'nk'])
total_flop = (2 * df['m'] * df['n'] * df['k']).sum()
total_time = df['duration'].sum()
reg['mean_gflops'] = total_flop / total_time * 1e-9
return reg
def compute_reg(df):
reg = lr.regression(df, my_dgemm_reg)
for tmp in reg:
for key, val in tmp.items():
if isinstance(val, (numpy.int, numpy.int64)):
tmp[key] = int(tmp[key])
elif isinstance(val, (numpy.float, numpy.float64)):
tmp[key] = float(tmp[key])
result = {'info': {}}
for key in ['cluster', 'jobid', 'expfile_hash', 'start_time']:
values = {tmp[key] for tmp in reg}
assert len(values) == 1
result['info'][key] = values.pop()
for tmp in reg:
del tmp[key]
result['info']['experiment_date'] = str(datetime.datetime.fromtimestamp(result['info']['start_time']))
del result['info']['start_time']
avg_alpha = numpy.mean([tmp['mnk'] for tmp in reg])
avg_beta = numpy.mean([tmp['intercept'] for tmp in reg])
var_coeff = numpy.mean([tmp['mnk_residual']/tmp['mnk'] for tmp in reg])
het_coeff = numpy.std([tmp['mnk'] for tmp in reg]) / avg_alpha
result['info'].update({
'avg_gflops': float(2e-9/avg_alpha),
'avg_latency': float(avg_beta),
'heterogeneity_coefficient': float(het_coeff),
'variability_coefficient': float(var_coeff),
'nb_nodes': len(df['node'].unique()),
})
for tmp in reg:
tmp['cpu_id'] = 2*tmp['node'] + tmp['cpu'] # see the function get_cpuid() in HPL_dgemm.c
result['model'] = reg
return result
def main(archive_file, model_file):
t1 = time.time()
df = ae.read_performance(archive_file)
t2 = time.time()
print('Extracted archive in %.2f seconds' % (t2-t1))
reg = compute_reg(df)
t3 = time.time()
print('Computed model in %.2f seconds' % (t3-t2))
reg['metadata'] = {
'file_creation_date': str(datetime.datetime.now()),
'archive_file': archive_file,
'cashew_git': cashew.__git_version__,
'granularity': 'cpu',
}
with open(model_file, 'w') as f:
yaml.dump(reg, f)
if __name__ == '__main__':
if len(sys.argv) != 3:
sys.exit('Syntax: %s <archive_file> <model_file>' % sys.argv[0])
archive_file = sys.argv[1]
model_file = sys.argv[2]
if not archive_file.endswith('.zip'):
sys.exit('File %s must be a .zip file' % archive_file)
if not model_file.endswith('.yaml'):
sys.exit('File %s must be a .yaml file' % model_file)
main(archive_file, model_file)
|
<reponame>TheYuanLiao/individual_mobility_model
import os
import sys
import subprocess
import yaml
import time
import pandas as pd
import geopandas as gpd
import multiprocessing as mp
def get_repo_root():
"""Get the root directory of the repo."""
dir_in_repo = os.path.dirname(os.path.abspath('__file__'))
return subprocess.check_output('git rev-parse --show-toplevel'.split(),
cwd=dir_in_repo,
universal_newlines=True).rstrip()
ROOT_dir = get_repo_root()
sys.path.append(ROOT_dir)
sys.path.insert(0, ROOT_dir + '/lib')
import lib.helpers as helpers
with open(ROOT_dir + '/lib/regions.yaml') as f:
region_manager = yaml.load(f, Loader=yaml.FullLoader)
def get_homelocations(ts):
"""
Get the home location from a dataframe of geotagged tweets.
:param ts: geotagged tweets
:type ts: a dataframe
:return: home location (single-row)
:rtype: GeoDataFrame
"""
_ts = ts.query('label == "home"').groupby(['userid', 'region']).head(1)
return gpd.GeoDataFrame(
_ts,
crs='EPSG:4326',
geometry=gpd.points_from_xy(_ts['longitude'], _ts['latitude'])
)
class TweetsFilter:
def __init__(self, region=None):
"""
:param region: region for processing its tweets
:type region: string
"""
# Define the focus region
self.region = region
self.boundary = None
self.zones = None
# Load region data
self.region_info = region_manager[self.region]
# Which sqlite3 file to get geotweets from
if os.path.exists(ROOT_dir + f"/dbs/{region}/{region}.sqlite3"):
self.sqlite_geotweets = ROOT_dir + f"/dbs/{region}/{region}.sqlite3"
else:
raise Exception(f"The folder and .sqlite3 file do not exist for region, {self.region}")
# Where to save CSVs for geotweets and homelocations
self.csv_geotweets = ROOT_dir + f"/dbs/{region}/geotweets.csv"
self.csv_homelocations = ROOT_dir + f"/dbs/{region}/homelocations.csv"
# Place holder for the processed geotagged tweets
self.geotweets = None
self.homelocations = None
def zones_boundary_load(self):
"""
Get the boundary to use when removing users based on location.
:return: self.zones, self.boundary
:rtype: GeoDataFrame
"""
zones_loader = self.region_info['zones_loader']
metric_epsg = self.region_info['metric_epsg']
zone_id = self.region_info['zone_id']
zones_path = self.region_info['zones_path']
if zones_loader == 1:
zones = gpd.read_file(ROOT_dir + zones_path)
zones = zones[zones[zone_id].notnull()]
zones = zones.rename(columns={zone_id: "zone"})
zones.zone = zones.zone.astype(int)
self.zones = zones[zones.geometry.notnull()].to_crs(metric_epsg)
self.boundary = self.zones.assign(a=1).dissolve(by='a').simplify(tolerance=0.2).to_crs("EPSG:4326")
def tweets_filter_1(self):
"""
Filter out non-precise geotagged tweets and those individuals with less than 50 geotagged tweets.
:return: self.geotweets
:rtype: DataFrame
"""
# Load geotweets from .sqlite
geotweets = helpers.tweets_from_sqlite(self.sqlite_geotweets)
# 1 Filter out geotweets without precise geolocation information or more than 50
coord_counts = geotweets.groupby(['latitude', 'longitude']).size().sort_values(ascending=False)
coord_percentages = (coord_counts / geotweets.shape[0]).to_frame("perc").reset_index()
percentages_to_remove = coord_percentages[coord_percentages.perc > 0.001]
perc_filter = None
for (_, row) in percentages_to_remove.iterrows():
f = (geotweets.latitude != row.latitude) & (geotweets.longitude != row.longitude)
if perc_filter is None:
perc_filter = f
else:
perc_filter = perc_filter & f
print("Removing ", perc_filter[~perc_filter].size, "center-of-region geotweets")
geotweets = geotweets[perc_filter]
geotweets['createdat'] = pd.to_datetime(geotweets['createdat'], infer_datetime_format=True)
geotweets = geotweets.set_index(['userid', 'createdat']).sort_index()
tweet_count_before = geotweets.groupby('userid').size()
self.geotweets = geotweets.drop(
labels=tweet_count_before[tweet_count_before <= 50].index,
level=0,
)
def tweets_filter_2(self):
"""
Keep the data of the latest living area and filter out the users who live outside the boundary.
:return: self.homelocations, self.geotweets
:rtype: DataFrame
"""
# 2 Remove home...
geotweets = self.geotweets.reset_index('createdat')
geotweets = geotweets.assign(ym=geotweets['createdat'].dt.to_period('M'))
# Get home locations
geotweetsx = helpers.cluster(geotweets)
geotweetsx = helpers.label_home(geotweetsx)
# Only keep the latest home location and the relevant records
geotweetsx = helpers.remove_tweets_outside_home_period(geotweetsx)
homelocations = get_homelocations(geotweetsx)
self.homelocations = gpd.clip(homelocations, self.boundary.convex_hull)
# Only keep those users who live in the study area
geotweetsy = geotweetsx[geotweetsx.index.isin(self.homelocations.index)]
self.geotweets = geotweetsy
def tweets_save(self):
"""Save the processed tweets and home locations."""
if not os.path.exists(self.csv_geotweets):
self.geotweets.to_csv(self.csv_geotweets)
if not os.path.exists(self.csv_homelocations):
self.homelocations[['latitude', 'longitude']].to_csv(self.csv_homelocations)
def region_proc(region=None):
"""Process the tweets and save it from database in .csv."""
# Loading zones
start_time = time.time()
tl = TweetsFilter(region=region)
tl.zones_boundary_load()
print(f"{region}: zone loading done in %g seconds" % (time.time() - start_time))
# Filtering geotweets - 1
start_time = time.time()
tl.tweets_filter_1()
print(f"{region}: geotweets filtering 1 done in %g seconds" % (time.time() - start_time))
# Filtering geotweets - 2
start_time = time.time()
tl.tweets_filter_2()
print(f"{region}: geotweets filtering 2 done in %g seconds" % (time.time() - start_time))
tl.tweets_save()
if __name__ == '__main__':
region_list = ['sweden', 'netherlands', 'saopaulo', 'australia', 'austria', 'barcelona',
'capetown', 'cebu', 'egypt', 'guadalajara', 'jakarta',
'johannesburg', 'kualalumpur', 'lagos', 'madrid', 'manila', 'mexicocity', 'moscow', 'nairobi',
'rio', 'saudiarabia', 'stpertersburg', 'surabaya']
# parallelize the processing of geotagged tweets of multiple regions
pool = mp.Pool(mp.cpu_count() - 1)
pool.starmap(region_proc, [(r, ) for r in region_list])
pool.close()
|
<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>
Date: 6/20/2020
"""
import asyncio
import http
import json
import shutil
from pathlib import Path
from typing import List
from fastapi import APIRouter, File, UploadFile, Depends
from fastapi.exceptions import RequestValidationError, HTTPException
from pydantic.error_wrappers import ErrorWrapper
from starlette.responses import JSONResponse
from modelci.hub.registrar import register_model
from modelci.persistence.service_ import get_by_id, get_models, update_model, delete_model, exists_by_id
from modelci.types.models.mlmodel import MLModel, BaseMLModel, ModelUpdateSchema, Framework, Engine, Task
router = APIRouter()
@router.get('/')
def get_all_models(architecture: str = None, framework: Framework = None, engine: Engine = None, task: Task = None,
version: int = None):
models = get_models(architecture=architecture, framework=framework, engine=engine, task=task, version=version)
content = list(map( lambda item: json.loads(item.json(by_alias=False)), models))
return JSONResponse(content=content)
@router.get('/{id}')
def get_model(*, id: str): # noqa
# Due to FastAPI use default json encoder before customer encoder, we have to rely on
# Pydantic BaseModel.json and convert it back
# Check https://github.com/tiangolo/fastapi/blob/master/fastapi/encoders.py#L118 to see if this
# issue is fixed.
content = json.loads(get_by_id(id).json(by_alias=False))
return JSONResponse(content=content)
@router.patch('/{id}', response_model=MLModel)
def update(id: str, schema: ModelUpdateSchema):
if not exists_by_id(id):
raise HTTPException(
status_code=404,
detail=f'Model ID {id} does not exist. You may change the ID',
)
return update_model(id, schema)
@router.delete('/{id}', status_code=http.HTTPStatus.NO_CONTENT)
def delete(id: str):
if not exists_by_id(id):
raise HTTPException(
status_code=404,
detail=f'Model ID {id} does not exist. You may change the ID',
)
delete_model(id)
@router.post('/', status_code=201)
async def publish_model(
model: BaseMLModel = Depends(BaseMLModel.as_form),
files: List[UploadFile] = File(
[],
description='This field can be set with empty value. In such settings, the publish is a dry run to'
'validate the `ml_model_in_form` field. You are recommend to try a dry run to find input'
'errors before you send the wight file(s) to the server.'
),
convert: bool = True,
profile: bool = False
):
"""Publish model to the model hub. The model weight file(s) as well as its meta information (e.g.
architecture, framework, and serving engine) will be stored into the model hub.
The publish API will also automatically convert the published model into other deployable format such as
TorchScript and ONNX. After successfully converted, original model and its generated models will be profiled
on the underlying devices in the clusters, and collects, aggregates, and processes running model performance.
Args:
model (MLModel): Model meta information.
files (List[UploadFile]): A list of model weight files. The files are organized accordingly. Their file name
contains relative path to their common parent directory.
If the files is empty value, a dry-run to this API is conducted for parameter checks. No information
will be saved into model hub in this case.
convert (bool): Flag for auto configuration.
profile (bool): Flag for auto profiling.
Returns:
A message response, with IDs of all published model. The format of the return is:
```
{
"data": {"id": ["603e6a1f5a62b08bc0a2a7f2", "<KEY>"]},
"status": true
}
```
Specially, if the dry-run test passed, it will return a status True:
```
{
"status": true
}
```
"""
# save the posted files as local cache
loop = asyncio.get_event_loop()
saved_path = model.saved_path
if len(files) == 0:
# conduct dry run for parameter check only.
return {'status': True}
if len(files) == 1:
file = files[0]
suffix = Path(file.filename).suffix
try:
# create directory
if len(suffix) == 0:
error = ErrorWrapper(
ValueError(f'Expect a suffix for file {file.filename}, got None.'), loc='files[0]'
)
raise RequestValidationError([error])
saved_path = saved_path.with_suffix(suffix)
saved_path.parent.mkdir(exist_ok=True, parents=True)
# save file
await file.seek(0)
with open(saved_path, 'wb') as buffer:
await loop.run_in_executor(None, shutil.copyfileobj, file.file, buffer)
finally:
await file.close()
else:
raise NotImplementedError('`publish_model` not implemented for multiple files upload.')
# zip the files
model = MLModel(**model.dict(), weight=saved_path)
models = register_model(model=model, convert=convert, profile=profile)
return {
'data': {'id': [str(model.id) for model in models], },
'status': True,
'model_path': saved_path
} |
<filename>optimization/lightgbm.py
# Copyright 2020 The MuLT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from skopt import gp_minimize
from skopt.space import Real, Integer
from skopt.utils import use_named_args
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import log_loss, roc_auc_score
from lightgbm import LGBMModel
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
class LightGBMOptimizer(object):
def __init__(self,
n_folds=2,
n_calls=10,
shuffle=True,
early_stopping_rounds=None,
fixed_parameters=None,
random_state=None,
verbose=-1,
n_jobs=-1,
use_gpu=False):
self.n_calls = n_calls
self.n_folds = n_folds
self.random_state = random_state
self.shuffle = shuffle
self.verbose = verbose
self.n_jobs = n_jobs
self.optimization_details = {}
self.early_stopping_rounds = early_stopping_rounds
self.fixed_parameters = fixed_parameters or dict()
self.use_gpu = use_gpu
self.iterations = []
def execute_optimization(self, objective, space):
params = gp_minimize(objective, space, n_calls=self.n_calls, random_state=self.random_state,
verbose=(self.verbose >= 0), n_jobs=-1).x
return {space[i].name: params[i] for i in range(len(space))}
def optimize(self, x, y):
assert isinstance(x, pd.DataFrame) or isinstance(x, np.ndarray), \
'x should be a pd.DataFrame or np.ndarray'
assert isinstance(y, pd.DataFrame) or isinstance(y, pd.Series) or isinstance(y, np.ndarray), \
'y should be a pd.DataFrame or pd.Series or np.ndarray'
if isinstance(x, pd.DataFrame):
x = x.values
if isinstance(y, pd.DataFrame) or isinstance(y, pd.Series):
y = y.values
self.iterations = []
space = [
Integer(2, 20, name='num_leaves'),
Real(1e-1, 1.000, name='scale_pos_weight'),
Integer(2, 50, name='min_child_samples'),
Integer(2000, 8000, name='bin_construct_sample_cnt'),
Integer(2, 2048, name='max_bin'),
Real(1e-3, 1, name='min_sum_hessian_in_leaf'),
Integer(4, 20, name='max_depth'),
Real(1e-4, 1e-1, name='min_split_gain'),
Real(1e-4, 1e-1, name='min_child_weight'),
]
@use_named_args(space)
def objective(
num_leaves,
scale_pos_weight,
min_child_samples,
bin_construct_sample_cnt,
max_bin,
min_sum_hessian_in_leaf,
max_depth,
min_split_gain,
min_child_weight,
):
try:
scores = []
params = {
'num_leaves': int(round(num_leaves, ndigits=0)),
'scale_pos_weight': scale_pos_weight,
'min_child_samples': int(round(min_child_samples, ndigits=0)),
'bin_construct_sample_cnt': int(round(bin_construct_sample_cnt, ndigits=0)),
'max_bin': int(round(max_bin, ndigits=0)),
'min_sum_hessian_in_leaf': min_sum_hessian_in_leaf,
'max_depth': int(round(max_depth, ndigits=0)),
'min_split_gain': min_split_gain,
'min_child_weight': min_child_weight,
'n_jobs': self.n_jobs,
'silent': self.verbose < 1,
'random_state': self.random_state}
if isinstance(self.fixed_parameters, dict):
params.update(self.fixed_parameters)
if self.use_gpu:
params.update({'device': 'gpu',
'gpu_platform_id': 1,
'gpu_device_id': 0})
skf = StratifiedKFold(
self.n_folds, shuffle=self.shuffle, random_state=self.random_state)
for train_index, valid_index in skf.split(x, y):
x_train, y_train = x[train_index, :], y[train_index]
x_valid, y_valid = x[valid_index, :], y[valid_index]
params['objective'] = 'binary'
gbm = LGBMModel(**params)
gbm.fit(x_train, y_train,
eval_set=[(x_valid, y_valid)],
early_stopping_rounds=self.early_stopping_rounds,
verbose=int(self.verbose > 0))
y_valid_hat = gbm.predict(x_valid, num_iteration=gbm.best_iteration_)
loss_valid = log_loss(y_valid, y_valid_hat)
scores.append(loss_valid)
result = np.mean(scores)
self.iterations.append((params, result))
return result
except:
# exc_type, exc_obj, exc_tb = sys.exc_info()
# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
# print(exc_type, fname, exc_tb.tb_lineno)
return 999.99
return self.execute_optimization(objective, space)
|
<gh_stars>1-10
from string import ascii_lowercase
from defaultlist import defaultlist as ref_defaultlist
from hypothesis import assume
from hypothesis import strategies as st
from hypothesis.stateful import RuleBasedStateMachine
from hypothesis.stateful import initialize
from hypothesis.stateful import rule
from pytest import fixture
from pytest import raises
from coinflip.collections import defaultlist
ints = st.lists(st.integers())
chars = st.from_regex(r"[a-z]*", fullmatch=True)
# we disagree about slices so we use no rules for slice accessors
class DefaultListStateMachine(RuleBasedStateMachine):
@initialize(ints=ints)
def init_lists(self, ints):
self.ref_dlist = ref_defaultlist()
self.dlist = defaultlist()
assert self.dlist == self.ref_dlist
@property
def n(self):
return len(self.ref_dlist)
@rule(chars=chars)
def append(self, chars):
self.ref_dlist.append(chars)
self.dlist.append(chars)
assert self.dlist == self.ref_dlist
@rule(ints=ints)
def concat(self, ints):
self.ref_dlist += ints
self.dlist += ints
assert self.dlist == self.ref_dlist
@rule(data=st.data())
def get(self, data):
assume(self.n > 0)
i = data.draw(st.integers(min_value=0, max_value=self.n - 1))
assert self.dlist[i] == self.ref_dlist[i]
@rule(data=st.data(), chars=chars)
def set(self, data, chars):
assume(self.n > 0)
i = data.draw(st.integers(min_value=0, max_value=self.n - 1))
self.ref_dlist[i] = chars
self.dlist[i] = chars
assert self.dlist == self.ref_dlist
@rule(data=st.data())
def del_(self, data):
assume(self.n > 0)
i = data.draw(st.integers(min_value=0, max_value=self.n - 1))
del self.ref_dlist[i]
del self.dlist[i]
assert self.dlist == self.ref_dlist
TestDefaultListStateMachine = DefaultListStateMachine.TestCase
@fixture
def dlist():
return defaultlist()
def test_repr(dlist):
dlist.append(42)
assert repr(dlist) == "[42]"
def test_slice_del(dlist):
dlist[:] = range(15)
dlist[20] = 20
del dlist[0:10:2]
assert dlist == [
1,
3,
5,
7,
9,
10,
11,
12,
13,
14,
None,
None,
None,
None,
None,
20,
]
def test_iter(dlist):
dlist[:] = range(10)
assert all(a == b for a, b in zip(dlist, range(10)))
dlist[20] = 20
assert sum(1 for _ in dlist) == 21
def test_reversed(dlist):
dlist[:] = range(10)
assert all(a == b for a, b in zip(reversed(dlist), range(10)[::-1]))
def test_index(dlist):
dlist[:] = ascii_lowercase[:10]
assert dlist.index("b") == 1
with raises(ValueError):
dlist.index(42)
def test_reverse(dlist):
dlist[:] = range(10)
dlist[15] = 15
dlist.reverse()
assert dlist == [
15,
None,
None,
None,
None,
None,
9,
8,
7,
6,
5,
4,
3,
2,
1,
0,
]
def test_extend(dlist):
dlist[:] = range(3)
dlist.extend(ascii_lowercase[:3])
assert dlist == [0, 1, 2, "a", "b", "c"]
def test_pop(dlist):
dlist[:] = range(10)
dlist.pop()
assert dlist == range(9)
def test_remove(dlist):
dlist[:] = ascii_lowercase[:3]
dlist.remove("b")
assert dlist == ["a", "c"]
with raises(ValueError):
dlist.remove(42)
def test_slice_get(dlist):
dlist.default_factory = int
dlist[:] = ascii_lowercase[:3]
assert dlist[:5] == ["a", "b", "c", 0, 0]
|
<filename>cogs/voice.py<gh_stars>1-10
"""
Music functions by https://github.com/EvieePy
Original Music Module - https://gist.github.com/EvieePy/ab667b74e9758433b3eb806c53a19f34
Added Google Text-to-speech
"""
import discord
from discord.ext import commands
import asyncio
import itertools
import sys
import os
import traceback
from async_timeout import timeout
from functools import partial
from youtube_dl import YoutubeDL
from gtts import gTTS
ytdlopts = {
'format': 'bestaudio/best',
'outtmpl': 'downloads/%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # ipv6 addresses cause issues sometimes
}
ffmpegopts = {
'before_options': '-nostdin',
'options': '-vn'
}
ytdl = YoutubeDL(ytdlopts)
class VoiceConnectionError(commands.CommandError):
"""Custom Exception class for connection errors."""
class InvalidVoiceChannel(VoiceConnectionError):
"""Exception for cases of invalid Voice Channels."""
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, requester):
super().__init__(source)
self.requester = requester
self.title = data.get('title')
self.web_url = data.get('webpage_url')
# YTDL info dicts (data) have other useful information you might want
# https://github.com/rg3/youtube-dl/blob/master/README.md
def __getitem__(self, item: str):
"""Allows us to access attributes similar to a dict.
This is only useful when you are NOT downloading.
"""
return self.__getattribute__(item)
@classmethod
async def create_source(cls, ctx, search: str, *, loop, download=False):
loop = loop or asyncio.get_event_loop()
to_run = partial(ytdl.extract_info, url=search, download=download)
data = await loop.run_in_executor(None, to_run)
if 'entries' in data:
# take first item from a playlist
data = data['entries'][0]
await ctx.send(f'Добавил **{data["title"]}** в очередь воспроизведения.')
if download:
source = ytdl.prepare_filename(data)
else:
return {'webpage_url': data['webpage_url'], 'requester': ctx.author, 'title': data['title']}
return cls(discord.FFmpegPCMAudio(source), data=data, requester=ctx.author)
@classmethod
async def regather_stream(cls, data, *, loop):
"""Used for preparing a stream, instead of downloading.
Since Youtube Streaming links expire."""
loop = loop or asyncio.get_event_loop()
requester = data['requester']
to_run = partial(ytdl.extract_info, url=data['webpage_url'], download=False)
data = await loop.run_in_executor(None, to_run)
return cls(discord.FFmpegPCMAudio(data['url']), data=data, requester=requester)
class MusicPlayer:
"""A class which is assigned to each guild using the bot for Music.
This class implements a queue and loop, which allows for different guilds to listen to different playlists
simultaneously.
When the bot disconnects from the Voice it's instance will be destroyed.
"""
__slots__ = ('bot', '_guild', '_channel', '_cog', 'queue', 'next', 'current', 'np', 'volume')
def __init__(self, ctx):
self.bot = ctx.bot
self._guild = ctx.guild
self._channel = ctx.channel
self._cog = ctx.cog
self.queue = asyncio.Queue()
self.next = asyncio.Event()
self.np = None # Now playing message
self.volume = .5
self.current = None
ctx.bot.loop.create_task(self.player_loop())
async def player_loop(self):
"""Our main player loop."""
await self.bot.wait_until_ready()
while not self.bot.is_closed():
self.next.clear()
try:
# Wait for the next song. If we timeout cancel the player and disconnect...
async with timeout(300): # 5 minutes...
source = await self.queue.get()
except asyncio.TimeoutError:
return self.destroy(self._guild)
if not isinstance(source, YTDLSource):
# Source was probably a stream (not downloaded)
# So we should regather to prevent stream expiration
try:
source = await YTDLSource.regather_stream(source, loop=self.bot.loop)
except Exception as e:
await self._channel.send(f'Возникла проблема с воспроизведением песни:{e}')
continue
source.volume = self.volume
self.current = source
self._guild.voice_client.play(source, after=lambda _: self.bot.loop.call_soon_threadsafe(self.next.set))
self.np = await self._channel.send(f'Сейчас играет:**{source.title}** по заказу **{source.requester}**')
await self.next.wait()
# Make sure the FFmpeg process is cleaned up.
source.cleanup()
self.current = None
def destroy(self, guild):
"""Disconnect and cleanup the player."""
return self.bot.loop.create_task(self._cog.cleanup(guild))
class Voice(commands.Cog):
"""Music related commands."""
__slots__ = ('bot', 'players')
def __init__(self, bot):
self.bot = bot
self.players = {}
async def cleanup(self, guild):
try:
await guild.voice_client.disconnect()
except AttributeError:
pass
try:
del self.players[guild.id]
except KeyError:
pass
async def __local_check(self, ctx):
"""A local check which applies to all commands in this cog."""
if not ctx.guild:
raise commands.NoPrivateMessage
return True
async def __error(self, ctx, error):
"""A local error handler for all errors arising from commands in this cog."""
if isinstance(error, commands.NoPrivateMessage):
try:
return await ctx.send('Эта команда не может быть вызвана из личных сообщений.')
except discord.HTTPException:
pass
elif isinstance(error, InvalidVoiceChannel):
await ctx.send('Ошибка подключения к голосовому каналу.')
print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)
def get_player(self, ctx):
"""Retrieve the guild player, or generate one."""
try:
player = self.players[ctx.guild.id]
except KeyError:
player = MusicPlayer(ctx)
self.players[ctx.guild.id] = player
return player
@commands.command(name='connect', aliases=['join'])
async def connect_(self, ctx, *, channel: discord.VoiceChannel=None):
"""Connect to voice.
Parameters
------------
channel: discord.VoiceChannel [Optional]
The channel to connect to. If a channel is not specified, an attempt to join the voice channel you are in
will be made.
This command also handles moving the bot to different channels.
"""
if not channel:
try:
channel = ctx.author.voice.channel
except AttributeError:
raise InvalidVoiceChannel('Нет голосового канала. Дайте мне ID канала или зайдите в него и позовите меня.')
vc = ctx.voice_client
if vc:
if vc.channel.id == channel.id:
return
try:
await vc.move_to(channel)
except asyncio.TimeoutError:
raise VoiceConnectionError(f'Переезд в: **{channel}** время вышло.')
else:
try:
await channel.connect()
except asyncio.TimeoutError:
raise VoiceConnectionError(f'Подключаюсь к: **{channel}** время вышло.')
await ctx.send(f'Подключился к: **{channel}**')
@commands.command(name='play', aliases=['sing'])
async def play_(self, ctx, *, search: str):
"""Request a song and add it to the queue.
This command attempts to join a valid voice channel if the bot is not already in one.
Uses YTDL to automatically search and retrieve a song.
Parameters
------------
search: str [Required]
The song to search and retrieve using YTDL. This could be a simple search, an ID or URL.
"""
vc = ctx.voice_client
if not vc:
await ctx.invoke(self.connect_)
player = self.get_player(ctx)
# If download is False, source will be a dict which will be used later to regather the stream.
# If download is True, source will be a discord.FFmpegPCMAudio with a VolumeTransformer.
source = await YTDLSource.create_source(ctx, search, loop=self.bot.loop, download=False)
await player.queue.put(source)
@commands.command(name='pause')
async def pause_(self, ctx):
"""Pause the currently playing song."""
vc = ctx.voice_client
if not vc or not vc.is_playing():
return await ctx.send('Да я особо ничего и не проигрываю!')
elif vc.is_paused():
return
vc.pause()
await ctx.send(f'**{ctx.author}** поставил песню на паузу!')
@commands.command(name='resume')
async def resume_(self, ctx):
"""Resume the currently paused song."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Да я особо ничего и не проигрываю!')
elif not vc.is_paused():
return
vc.resume()
await ctx.send(f'**{ctx.author}** возобновил песню!')
@commands.command(name='skip')
async def skip_(self, ctx):
"""Skip the song."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Да я особо ничего и не проигрываю!')
if vc.is_paused():
pass
elif not vc.is_playing():
return
vc.stop()
await ctx.send(f'**{ctx.author}** пропустил песню!')
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue_info(self, ctx):
"""Retrieve a basic queue of upcoming songs."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Но я же не подключен к голосовому каналу!')
player = self.get_player(ctx)
if player.queue.empty():
return await ctx.send('В очереди воспроизведения нет песен.')
# Grab up to 5 entries from the queue...
upcoming = list(itertools.islice(player.queue._queue, 0, 5))
fmt = '\n'.join(f'**{_["title"]}**' for _ in upcoming)
embed = discord.Embed(title=f'Очередь воспроизведения - {len(upcoming)}', description=fmt, color=0xa500ff)
embed.set_author(name='<NAME>', icon_url='https://i.imgur.com/A7tQuJ1.png')
embed.set_thumbnail(url="https://i.imgur.com/A7tQuJ1.png")
await ctx.send(embed=embed)
@commands.command(name='now_playing', aliases=['np', 'current', 'currentsong', 'playing'])
async def now_playing_(self, ctx):
"""Display information about the currently playing song."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Но я же не подключен к голосовому каналу!')
player = self.get_player(ctx)
if not player.current:
return await ctx.send('Да я особо ничего и не проигрываю!')
player.np = await ctx.send(f'Сейчас играет: **{vc.source.title}** по заказу **{vc.source.requester}**')
@commands.command(name='volume', aliases=['vol'])
async def change_volume(self, ctx, *, vol: float):
"""Change the player volume.
Parameters
------------
volume: float or int [Required]
The volume to set the player to in percentage. This must be between 1 and 100.
"""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Но я же не подключен к голосовому каналу!')
if not 0 < vol < 101:
return await ctx.send('Пожалуйста, введите значение от 0 до 100.')
player = self.get_player(ctx)
if vc.source:
vc.source.volume = vol / 100
player.volume = vol / 100
await ctx.send(f'**{ctx.author}** установил громкость - **{vol}%**')
@commands.command(name='stop')
async def stop_(self, ctx):
"""Stop the currently playing song and destroy the player.
!Warning!
This will destroy the player assigned to your guild, also deleting any queued songs and settings.
"""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return await ctx.send('Да я особо ничего и не проигрываю!')
await self.cleanup(ctx.guild)
@commands.command(aliases=['сэй'])
async def say(self, ctx):
message = ctx.message
if len(message.clean_content) <= 4:
await ctx.send('Но тут же не о чем говорить.')
else:
try:
path = 'voice/' + str(message.id) + '.mp3'
tts = gTTS(text=message.clean_content[5:], lang='ru', slow=False)
tts.save(path)
vc = ctx.voice_client
if not vc:
await ctx.invoke(self.connect_)
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(path))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else os.remove(path))
except:
pass
await ctx.send('Жу-жу-жу... что-то пошло не так...')
# player = self.get_player(ctx)
# source = discord.FFmpegPCMAudio(path)
#
# await player.queue.put(source)
try:
await ctx.message.delete()
except:
await ctx.send('Не могу тут удалять сообщения! Удалите сами, пожалуйста!')
@commands.command(aliases=['music'])
async def voice(self, ctx):
embed = discord.Embed(title="", color=0xa500ff)
embed.set_author(name='<NAME>', icon_url='https://i.imgur.com/A7tQuJ1.png')
embed.set_thumbnail(url="https://i.imgur.com/A7tQuJ1.png")
embed.add_field(name='**;join ;connect**',
value='Вызвать бота в голосовй канал. Если после команды указать ID голосового канала, бот подключится к нему.',
inline=False)
embed.add_field(name="**;channels**", value="Получить список каналов и их ID для этого сервера", inline=False)
embed.add_field(name="**;say ;сэй <сообщение>**", value="Произнести <сообщение>", inline=False)
embed.add_field(name='**;play ;sing <song_name>**', value='Найти <song_name> или ссылку на ютубе и включить', inline=False)
embed.add_field(name='**;pause ;resume ;stop**', value='Управление воспроизведением', inline=False)
embed.add_field(name='**;queue ;np ;skip**', value='Управлеие очередью', inline=False)
embed.add_field(name='**;volume 0 - 100**', value='Управлеие громкостью', inline=False)
# embed.add_field(name="**;meow**", value="Получить лишнее доказательство уровня IQ человека, пишущего бота",
# inline=False)
# embed.add_field(name='**;adeekdruid**', value='Получить бесценное знание о содержимом рук друида', inline=False)
# embed.add_field(name='**;adeekspasibo**', value='SayThanks', inline=False)
# embed.add_field(name='**;adeekzhoo**', value='Adeek pchelqa', inline=False)
# embed.set_footer(text="")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Voice(bot)) |
<reponame>LiBa001/CoRe
#in dieser Version kam hinzu, dass zuerst die Ränder besezt werden,
#wenn es keinen anderen sinnvollen zug gibt
import random
# CoRe
def turn(board, symbol):
def randAction():
while 1:
x = random.choice(range(8))
y = random.choice(range(8))
if getboard(board,x,y) == '#': return (x,y)
if getboard(board,0,0) != '#' and getboard(board,1,1) == '#':
return(1,1)
if getboard(board,7,0) != '#' and getboard(board,6,1) == '#':
return(6,1)
if getboard(board,0,7) != '#' and getboard(board,1,6) == '#':
return(1,6)
if getboard(board,7,7) != '#' and getboard(board,6,6) == '#':
return(6,6)
if getboard(board,0,0) == '#' and getboard(board,1,1) != '#':
return(0,0)
if getboard(board,7,0) == '#' and getboard(board,6,1) != '#':
return(7,0)
if getboard(board,0,7) == '#' and getboard(board,1,6) != '#':
return(0,7)
if getboard(board,7,7) == '#' and getboard(board,6,6) != '#':
return(7,7)
poslist1 = []
for yp in range(8): # prueft kombinationen nebeneinander
enemy = 0
own = False
count = 0
for xp in range(8):
if getboard(board, xp, yp) != symbol and getboard(board, xp, yp) != '#':
if own == True:
x = xp + 1
y = yp
if x >= 0 and x <= 7 and getboard(board, x, y) == '#':
poslist1.append([enemy, x, y]) # fuegt moeglichen zug hinzu
enemy += 1
elif getboard(board, xp, yp) == '#':
if enemy >= 1:
count += 1
elif getboard(board, xp, yp) == symbol:
if enemy >= 1:
substractor = 1 + count + enemy # addiert freistellen und gesetzte Steine -> abzuziehender Abstand
x = xp - substractor
y = yp
if x >= 0 and x <= 7 and getboard(board, x, y) == '#':
poslist1.append([enemy, x, y]) # fuegt moeglichen zug hinzu
own = True
enemy = 0
token1 = 0
if len(poslist1) > 0: # filtert nach sinnvollstem zug
print(poslist1)
for i in range(len(poslist1)):
if poslist1[i][0] > poslist1[i - 1][0] and i > 0:
token1 = poslist1[i]
print("new token", token1)
elif i == 0:
token1 = poslist1[i]
poslist2 = []
for xp in range(8): # prueft kombinationen uebereinander
enemy = 0
own = False
count = 0
for yp in range(8):
if getboard(board, xp, yp) != symbol and getboard(board, xp, yp) != '#':
if own == True:
x = xp
y = yp + 1
if y >= 0 and y <= 7 and getboard(board, x, y) == '#':
poslist2.append([enemy, x, y]) # fuegt moeglichen zug hinzu
enemy += 1
elif getboard(board, xp, yp) == '#':
if enemy >= 1: # zaehlt Abstand zwischen gegnerischem und eigenem Symbol
count += 1
elif getboard(board, xp, yp) == symbol:
if enemy >= 1:
x = xp
substractor = 1 + count + enemy # addiert freistellen und gesetzte Steine -> abzuziehender Abstand
y = yp - substractor
if y >= 0 and y <= 7 and getboard(board, x, y) == '#':
poslist2.append([enemy, x, y]) # fuegt moeglichen Zug hinzu
own = True
enemy = 0
token2 = 0
if len(poslist2) > 0: # filtert nach sinnvollstem zug
print(poslist2)
for i in range(len(poslist2)):
if poslist2[i][0] > poslist2[i - 1][0] and i > 0:
token2 = poslist2[i]
print("new token", token2)
elif i == 0:
token2 = poslist2[i]
print("tokens are", token1, token2)
if token1 != 0:
if token1[0] > token2[0]:
result = (token1[1], token1[2])
return result
elif token1[0] < token2[0]:
result = (token2[1], token2[2])
return result
if token1 != 0:
result = (token1[1], token1[2])
return result
if token2 != 0:
print("taking token 2")
result = (token2[1], token2[2])
return result
sidelist = [0, 7]
for xp in sidelist: #setzt an linken, dann an rechten rand(außer ecken)
for yp in range(1,7):
if getboard(board,xp,yp) == '#':
return(xp,yp)
for yp in sidelist: #setzt an oberen, dann an unteren rand(außer ecken)
for xp in range(1,7):
if getboard(board,xp,yp) == '#':
return(xp,yp)
return randAction()
|
from __future__ import division, print_function
import os
from mmtbx.validation.ramalyze import ramalyze
from libtbx.program_template import ProgramTemplate
try:
from phenix.program_template import ProgramTemplate
except ImportError:
pass
from libtbx.utils import Sorry
class Program(ProgramTemplate):
prog = os.getenv('LIBTBX_DISPATCHER_NAME')
description="""
%(prog)s file.pdb [params.eff] [options ...]
Options:
model=input_file input PDB file
outliers_only=False only print outliers
verbose=False verbose text output
plot=False Create graphics of plots (if Matplotlib is installed)
Example:
%(prog)s model=1ubq.pdb outliers_only=True
""" % locals()
# Pavel's style:
# plot=True show_labels=False markerfacecolor=yellow markeredgecolor=red
master_phil_str = """
plot = False
.type = bool
.help = Create graphics of plots (if Matplotlib is installed)
show_labels = True
.type = bool
.help = Show labels on outlier residues
point_style = 'bo'
.type = str
.help = choose style of points, use matplotlib format from e.g. here: \
https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.plot.html \
very small is ',', little bigger is '.'
markersize=3
.type=int
markerfacecolor = white
.type = str
markeredgecolor="black"
.type = str
show_filling = True
.type = bool
show_contours = True
.type = bool
dpi=100
.type=int
wxplot = False
.type = bool
.help = Display interactive plots (requires wxPython and Matplotlib)
outliers_only = False
.type = bool
.help = "Only display outliers"
verbose = True
.type = bool
.help = '''Verbose'''
output_prefix = None
.type = str
.help = prefix for outputted plots (if plot=True)
"""
datatypes = ['model','phil']
known_article_ids = ['molprobity']
def validate(self):
self.data_manager.has_models(raise_sorry=True)
def run(self):
results = []
for model_name in self.data_manager.get_model_names():
hierarchy = self.data_manager.get_model(model_name).get_hierarchy()
hierarchy.atoms().reset_i_seq()
result = ramalyze(
pdb_hierarchy=hierarchy,
show_errors=None,
outliers_only=self.params.outliers_only,
out=self.logger,
quiet=False)
results.append(result)
if len(self.data_manager.get_model_names()) > 1:
self.params.verbose=False
print('\nmodel : %s' % model_name, file=self.logger)
# combine models
result = results[0]
for i in range(1,len(results)):
result += results[i]
if self.params.verbose:
result.show_old_output(out=self.logger, verbose=True)
if self.params.plot :
plot_file_base = self.params.output_prefix
if plot_file_base is None:
plot_file_base = os.path.splitext(os.path.basename(self.data_manager.get_model_names()[0]))[0]
result.write_plots(
plot_file_base=plot_file_base,
out=self.logger,
show_labels=self.params.show_labels,
point_style=self.params.point_style,
markerfacecolor=self.params.markerfacecolor,
show_filling=self.params.show_filling,
show_contours=self.params.show_contours,
dpi=self.params.dpi,
markeredgecolor=self.params.markeredgecolor,
markersize=self.params.markersize)
if self.params.wxplot :
try :
import wxtbx.app
except ImportError, e :
raise Sorry("wxPython not available.")
else :
app = wxtbx.app.CCTBXApp(0)
result.display_wx_plots()
app.MainLoop()
|
<reponame>tapis-project/tapipy
"""
Script to download/pickle/store configs under specified name.
This allows us to update the Tapipy configs with a script.
Note, this allows you to map any spec URL to any other URL filename as that's how they're saved.
MEANING! You can give an actor spec a 'files' filename and there will be no error, be careful.
"""
import os
import copy
import yaml
import requests
import pickle
from openapi_core import create_spec
from atomicwrites import atomic_write
###~~~#### FILL THESE IN ####~~~###
spec_dir = 'tapipy/specs'
resource_dir = 'tapipy/resources'
###################################
def get_file_info_from_url(url: str, spec_dir: str):
"""
Using a url string we create a file name to store said url contents.
"""
spec_name = url.replace('https://raw.githubusercontent.com/', '')\
.replace('.yml', '')\
.replace('.yaml', '')\
.replace('/', '-')\
.lower()
full_spec_name = f'{spec_name}.pickle'
spec_path = f'{spec_dir}/{spec_name}.pickle'
return spec_name, full_spec_name, spec_path
def save_url_as_other_url(spec_and_alias, spec_dir):
"""
Remember, filenames are derived from a URL, so you're essentially getting the data
from one URL and giving it the URL of another. In most cases you can map URL1
to URL1 and it'll act proper. But if a spec is broken, either use the existing or
change the source URL.
FOR EXAMPLE!
The following line would cast the actors spec dict to the authenticators filename. This mean
that Tapipy will ready in the actors spec when trying to get the authenticator url.
spec_and_alias = {actor_master_url: authenticator_master_url}
"""
for source_url, dest_url in spec_and_alias.items():
# Get path from "dest_url". Where we'll save spec.
_, _, dest_path = get_file_info_from_url(dest_url, spec_dir)
if "local:" in source_url:
# Loads yaml into Python dictionary
try:
source_path = source_url.replace('local:', '').strip()
with open(source_path, 'rb') as spec_file:
spec_dict = yaml.load(spec_file, Loader=yaml.FullLoader)
except Exception as e:
print(f'Error reading local "{source_path}" resource. '
f'Ensure path is absolute. e:{e}')
continue
# Attempts to create spec from dict to ensure the spec is valid
# We do a deepcopy as create_spec for some reason adds fields
# to the dictionary that's given
try:
test_spec_dict = copy.deepcopy(spec_dict)
create_spec(test_spec_dict)
except Exception as e:
print(f'Got exception when test creating spec for "{source_url}" '
f'resource; Spec probably not verifying; exception: {e}')
continue
# Pickles and saves the spec dict to the dest_path atomically
try:
with atomic_write(f'{dest_path}', overwrite=True, mode='wb') as spec_file:
pickle.dump(spec_dict, spec_file, protocol=4)
except Exception as e:
print(f'Got exception when attempting to pickle spec_dict and '
f'write to "{dest_path}"; exception: {e}')
continue
else:
response = requests.get(source_url)
if response.status_code == 200:
# Loads yaml into Python dictionary
try:
spec_dict = yaml.load(response.content, Loader=yaml.FullLoader)
except Exception as e:
print(f'Got exception when attempting to load yaml from '
f'"{source_url}" resource; exception: {e}')
continue
# Attempts to create spec from dict to ensure the spec is valid
# We do a deepcopy as create_spec for some reason adds fields
# to the dictionary that's given
try:
test_spec_dict = copy.deepcopy(spec_dict)
create_spec(test_spec_dict)
except Exception as e:
print(f'Got exception when test creating spec for "{source_url}" '
f'resource; Spec probably not verifying; exception: {e}')
continue
# Pickles and saves the spec dict to the dest_path atomically
try:
with atomic_write(f'{dest_path}', overwrite=True, mode='wb') as spec_file:
pickle.dump(spec_dict, spec_file, protocol=4)
except Exception as e:
print(f'Got exception when attempting to pickle spec_dict and '
f'write to "{dest_path}"; exception: {e}')
continue
RESOURCES = {
'local':{
'actors': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-actors.yml",
'authenticator': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-authenticator.yml",
'meta': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-meta.yml",
'files': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-files.yml",
'sk': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-sk.yml",
'streams': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-streams.yml",
'systems': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-systems.yml",
'tenants': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-tenants.yml",
'tokens': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-tokens.yml",
'pgrest': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-pgrest.yml",
'jobs': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-jobs.yml",
'apps': f"local: {os.path.join(os.path.dirname(__file__), resource_dir)}/openapi_v3-apps.yml"
},
'tapipy':{
'actors': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-actors.yml',
'authenticator': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-authenticator.yml',
'meta': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-meta.yml',
'files': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-files.yml',
'sk': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-sk.yml',
'streams': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-streams.yml',
'systems': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-systems.yml',
'tenants': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-tenants.yml',
'tokens': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-tokens.yml',
'pgrest': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-pgrest.yml',
'jobs': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-jobs.yml',
'apps': 'https://raw.githubusercontent.com/tapis-project/tapipy/prod/tapipy/resources/openapi_v3-apps.yml'
},
'prod': {
'actors': 'https://raw.githubusercontent.com/TACC/abaco/dev-v3/docs/specs/openapi_v3.yml',
'authenticator': 'https://raw.githubusercontent.com/tapis-project/authenticator/prod/service/resources/openapi_v3.yml',
'meta': 'https://raw.githubusercontent.com/tapis-project/tapis-client-java/prod/meta-client/src/main/resources/metav3-openapi.yaml',
'files': 'https://raw.githubusercontent.com/tapis-project/tapis-files/prod/api/src/main/resources/openapi.yaml',
'sk': 'https://raw.githubusercontent.com/tapis-project/tapis-client-java/prod/security-client/src/main/resources/SKAuthorizationAPI.yaml',
'streams': 'https://raw.githubusercontent.com/tapis-project/streams-api/prod/service/resources/openapi_v3.yml',
'systems': 'https://raw.githubusercontent.com/tapis-project/openapi-systems/prod/SystemsAPI.yaml',
'tenants': 'https://raw.githubusercontent.com/tapis-project/tenants-api/prod/service/resources/openapi_v3.yml',
'tokens': 'https://raw.githubusercontent.com/tapis-project/tokens-api/prod/service/resources/openapi_v3.yml',
'pgrest': 'https://raw.githubusercontent.com/tapis-project/paas/prod/pgrest/resources/openapi_v3.yml',
'jobs': 'https://raw.githubusercontent.com/tapis-project/tapis-client-java/prod/jobs-client/src/main/resources/JobsAPI.yaml',
'apps': 'https://raw.githubusercontent.com/tapis-project/openapi-apps/prod/AppsAPI.yaml'
},
'dev': {
'actors': 'https://raw.githubusercontent.com/TACC/abaco/dev-v3/docs/specs/openapi_v3.yml',
'authenticator': 'https://raw.githubusercontent.com/tapis-project/authenticator/dev/service/resources/openapi_v3.yml',
'meta': 'https://raw.githubusercontent.com/tapis-project/tapis-client-java/dev/meta-client/src/main/resources/metav3-openapi.yaml',
'files': 'https://raw.githubusercontent.com/tapis-project/tapis-files/dev/api/src/main/resources/openapi.yaml',
'sk': 'https://raw.githubusercontent.com/tapis-project/tapis-client-java/dev/security-client/src/main/resources/SKAuthorizationAPI.yaml',
'streams': 'https://raw.githubusercontent.com/tapis-project/streams-api/dev/service/resources/openapi_v3.yml',
'systems': 'https://raw.githubusercontent.com/tapis-project/openapi-systems/dev/SystemsAPI.yaml',
'tenants': 'https://raw.githubusercontent.com/tapis-project/tenants-api/dev/service/resources/openapi_v3.yml',
'tokens': 'https://raw.githubusercontent.com/tapis-project/tokens-api/dev/service/resources/openapi_v3.yml',
'pgrest': 'https://raw.githubusercontent.com/tapis-project/paas/prod/pgrest/resources/openapi_v3.yml',
'jobs': 'https://raw.githubusercontent.com/tapis-project/tapis-client-java/dev/jobs-client/src/main/resources/JobsAPI.yaml',
'apps': 'https://raw.githubusercontent.com/tapis-project/openapi-apps/dev/AppsAPI.yaml'
}
}
if __name__ == "__main__":
# Spec/Key is the url to download and copy the spec dict from.
# Alias/Val is the file to save the spec dict to.
spec_and_alias = {'source_spec_url': 'destination_spec_url'}
# Set 1 updates all tapipy pickle files with the local specs held in the resources folder.
spec_and_alias_set_1 = {RESOURCES['local']['actors']: RESOURCES['tapipy']['actors'],
RESOURCES['local']['authenticator']: RESOURCES['tapipy']['authenticator'],
RESOURCES['local']['meta']: RESOURCES['tapipy']['meta'],
RESOURCES['local']['files']: RESOURCES['tapipy']['files'],
RESOURCES['local']['sk']: RESOURCES['tapipy']['sk'],
RESOURCES['local']['streams']: RESOURCES['tapipy']['streams'],
RESOURCES['local']['systems']: RESOURCES['tapipy']['systems'],
RESOURCES['local']['tenants']: RESOURCES['tapipy']['tenants'],
RESOURCES['local']['tokens']: RESOURCES['tapipy']['tokens'],
RESOURCES['local']['pgrest']: RESOURCES['tapipy']['pgrest'],
RESOURCES['local']['jobs']: RESOURCES['tapipy']['jobs'],
RESOURCES['local']['apps']: RESOURCES['tapipy']['apps']}
# Set 2 updates all tapipy pickle files with the specs contained
# in each specs source's prod branch. So updating them completely.
spec_and_alias_set_2 = {RESOURCES['prod']['actors']: RESOURCES['tapipy']['actors'],
RESOURCES['prod']['authenticator']: RESOURCES['tapipy']['authenticator'],
RESOURCES['prod']['meta']: RESOURCES['tapipy']['meta'],
RESOURCES['prod']['files']: RESOURCES['tapipy']['files'],
RESOURCES['prod']['sk']: RESOURCES['tapipy']['sk'],
RESOURCES['prod']['streams']: RESOURCES['tapipy']['streams'],
RESOURCES['prod']['systems']: RESOURCES['tapipy']['systems'],
RESOURCES['prod']['tenants']: RESOURCES['tapipy']['tenants'],
RESOURCES['prod']['tokens']: RESOURCES['tapipy']['tokens']}
# Specify where you want the specs to be saved, to get ready for a release
# specify the github/tapipy/tapipy/specs folder to overwrite old specs.
# Don't forget to delete any specs that are no longer needed.
# Run the saver
save_url_as_other_url(spec_and_alias_set_1, spec_dir) |
#!/home/bin/python
import argparse
import time
import sys
print('\nChecking required modules \n')
''' Purpose of the program:
1) Used to merge the haplotype file generated by phase-Extender and phase-Stitcher.
2) Merge the table file back to VCF. '''
def main():
''' Define required argument for interactive mode program. '''
parser = argparse.ArgumentParser()
parser.add_argument("-fromType", required=True,
help="Type of the file the VCF is being prepared from. "
"Options: haplotype, table ")
parser.add_argument("-inFile", required=True,
help="Sorted table or haplotype file."
"This haplotype file should be obtained from phase-Stitcher, "
"phase-Extender. The table file should be in the format "
"output by 'VCF-Simplify'; only long format table is supported for now. ")
parser.add_argument("-outVCF", help="Name of the output VCF file.", required=True)
parser.add_argument("-vcfHeader", required=True,
help="A custom VCF header to add to the VCF file. "
"The VCF header should not contain the line with #CHROM .... ")
""" Additional argument parser only to use if "-fromType" is "table" """
table_to_vcf_parser = parser.add_argument_group('Extra flags when converting table to VCF.')
table_to_vcf_parser.add_argument("-GTbase", help="Representation of the GT base is : numeric, IUPAC ", required=False)
table_to_vcf_parser.add_argument("-samples",
help="Name of the samples -> "
"comma separated name of the samples that needs to be converted "
"to VCF format",
default='all', required=False)
table_to_vcf_parser.add_argument("-formats",
help="Name of the FORMAT tags to write -> "
"comma separated FORMAT tags name.",
default='all', required=False)
table_to_vcf_parser.add_argument("-infos",
help="Name of the INFO tags to write -> "
"comma separated INFO tags name. ",
default='all', required=False)
''' Call globals, input and output argument variable names'''
## declare global variables
global args;
args = parser.parse_args()
if args.fromType == 'haplotype':
print('Converting haplotype file to VCF')
fnc_haplotype_to_vcf()
elif args.fromType == 'table':
print('Converting Table to VCF')
fnc_table_to_vcf()
else:
print('fromType not indicated.')
'''Part C: Function to convert Haplotype To VCF.'''
def fnc_table_to_vcf():
print('converting Table file to VCF')
## declare globals
global genotype_is
global begin_time
global contig_idx
global pos_idx
global id_idx
global ref_idx
global alt_idx
global qual_idx
global filter_idx
# INFO, FORMAT and SAMPLE don't have index but tags
global info_tags
global infos_idx
global format_tags
global reduced_format_tags
global sample_names
global header_line
begin_time = time.time()
'''Assign some input variables. '''
genotype_is = args.GTbase
infile = args.inFile
meta_header = args.vcfHeader
outfile = args.outVCF
samples = args.samples
formats = args.formats
infos = args.infos
with open(infile) as hapfile, \
open(meta_header) as meta_header,\
open(outfile, 'w+') as vcf_out:
'''Start reading the haplotype file as generator. This saves memory. '''
for line in hapfile:
## find and set the indexes ...
# ... of pre-fields, INFO, FORMAT and SAMPLE level information
''' Step 01: The very first line of the file is read;
- to find the variable name and it's index position in the input file.
- almost all the variable created downstream are "global variables".
- SAMPLE level information is automatically identified unless explicitly given.
The sample names is identified using ":" in the column names, so other names
should not have ":" at all.
- FORMAT level tags can also be provided as list, or can be mined automatically
along with SAMPLE by using ":" matching.
- All the preHeader tags, ie. CHROM POS ID REF ALT QUAL FILTER are reserved and
updated by matching the names in text header line.
'''
# to use the "header" name that have already been taken
# this will help in finding appropriate "INFO" level tags from the header file
used_header = []
if line.startswith('CHROM') \
or line.startswith('#CHROM'):
header_line = line.rstrip('\n').split('\t')
if 'CHROM' in header_line:
contig_idx = header_line.index('CHROM')
elif '#CHROM' in header_line:
contig_idx = header_line.index('#CHROM')
else:
print('CHROM field does not exit. Update your file')
break
# update the taken header "labels"
used_header += ['CHROM', '#CHROM']
if 'POS' in header_line :
pos_idx = header_line .index('POS')
else:
print('POS field does not exit. Update your file')
break
# update
used_header += ['POS']
if 'ID' in header_line :
id_idx = header_line .index('ID')
else:
id_idx = None
used_header += ['ID']
if 'REF' in header_line :
ref_idx = header_line .index('REF')
else:
ref_idx == None
used_header += ['REF']
if 'ALT' in header_line :
alt_idx = header_line .index('ALT')
else:
alt_idx = None
used_header += ['ALT']
if 'QUAL' in header_line :
qual_idx = header_line .index('QUAL')
else:
qual_idx = None
used_header += ['QUAL']
if 'FILTER' in header_line :
filter_idx = header_line .index('FILTER')
else:
filter_idx = None
used_header += ['FILTER']
'''SAMPLE names and FORMAT tags are identified using ":" delimiter in the column names. '''
if samples != 'all':
sample_names = samples.split(',')
elif samples == 'all':
sample_names = []
for itemy in header_line:
if ':' in itemy:
sample_names.append(itemy.split(':')[0])
sample_names = list(set(sample_names))
used_header += [x for x in header_line if ':' in x]
# find the available format tags
if formats != 'all':
format_tags = formats.split(',')
elif formats == 'all':
format_tags = []
for names in sample_names:
for itemx in header_line :
if itemx.startswith(names):
format_tags.append(itemx.split(':')[1])
format_tags = list(set(format_tags))
# In the available FORMAT tags, move "GT" field to the beginning.
if 'GT' in format_tags:
format_tags.remove('GT')
format_tags.insert(0, 'GT')
''' Finally, update the tag names of the "INFO" field '''
#** Note: Any column names in the header line that is not taken so far is ..
# .. considered a "INFO" field.
remaining_cols = [itx for itx in header_line if itx not in set(used_header)]
if infos != 'all':
info_tags = infos.split(',')
elif infos == 'all' and len(remaining_cols) > 0:
info_tags = remaining_cols
else:
info_tags = None
print('INFO tags are not available.')
# also find the position of the info tags on header line
infos_idx = []
if info_tags != None:
for inftag in info_tags:
infos_idx.append(header_line.index(inftag))
else:
infos_idx = None
''' Now, Read the meta header and add it to the output VCF file. '''
print('\nReading meta header from file "%s" ' %(meta_header.name))
if meta_header != None:
meta_info = meta_header.read()
else:
print('Header with meta information is not provided')
break
# add meta header to the output VCF file
meta_info += '\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT',
'QUAL', 'FILTER', 'INFO', 'FORMAT']) + '\t'
# add SAMPLE fields to output VCF file
meta_info += '\t'.join(sample_names)
# Finally, write the header part of the output VCF
vcf_out.write(meta_info + '\n')
continue
#break
'''' Now, extract the required data from each of the remaining lines add to output VCF. '''
updated_line = table_to_vcf(line)
vcf_out.write(updated_line)
vcf_out.write('\n')
print('Elapsed time : "%s".' %(time.time()-begin_time))
'''Function part of Table to VCF '''
def table_to_vcf(line_in):
line = line_in.rstrip('\n').split('\t')
chrom = line[contig_idx]
pos = line[pos_idx]
if id_idx is not None:
ids = line[id_idx]
else:
ids = '.'
ref = line[ref_idx]
alt = line[alt_idx]
if qual_idx is not None:
qual = line[qual_idx]
else: qual = '.'
if filter_idx is not None:
filter = line[filter_idx]
else: filter = '.'
# Update "info tags and value". This is little complex
if info_tags !=None:
info_ = []
for ith, itemi in enumerate(info_tags):
tag_val = '='.join([itemi, line[infos_idx[ith]]])
info_.append(tag_val)
info_ = ';'.join(info_)
elif info_tags is None:
info_ = '.'
# write the tags names of the FORMAT column
if format_tags != None:
format_ = ':'.join(format_tags)
else:format_ = '.'
# update the output line
line_out = '\t'.join([chrom, pos, ids, ref, alt, qual, filter, info_, format_]) + '\t'
# Further update the SAMPLE-to-FORMAT values
# pass the line to another function
format_to_sample_vals = update_sample_format(line, ref, alt)
line_out = line_out + format_to_sample_vals
return line_out
''' Function part of Table to VCF '''
def update_sample_format(line, ref, alt):
# The "line" variable is passed into this function.
# The global variables are "genotype_is", "sample_names" and "format_tags"
# to store updated line
format_sample_line = []
all_alleles = [ref] + alt.split(',')
for namex in sample_names:
namex_vals = []
for tagx in format_tags:
sample_format_tag = namex + ':' + tagx
sample_format_idx = header_line.index(sample_format_tag)
sample_format_val = line[sample_format_idx]
''' further update the sample:format value if GT in table is as IUPAC base '''
if tagx == 'GT' and genotype_is == 'IUPAC':
if sample_format_val == '.' or \
sample_format_val == './.' or \
sample_format_val == '.|.':
continue
elif '/' in sample_format_val:
sample_format_val = sample_format_val.split('/')
sample_format_val = [all_alleles.index(sample_format_val[0]),
all_alleles.index(sample_format_val[1])]
sample_format_val = '/'.join(str(xth) for xth in sample_format_val)
elif '|' in sample_format_val:
sample_format_val = sample_format_val.split('|')
sample_format_val = [all_alleles.index(sample_format_val[0]),
all_alleles.index(sample_format_val[1])]
sample_format_val = '|'.join(str(xth) for xth in sample_format_val)
namex_vals.append(sample_format_val)
format_sample_line.append(':'.join(namex_vals))
sample_format_final = '\t'.join(format_sample_line)
return sample_format_final
'''Part B: Function to convert Haplotype To VCF.'''
def fnc_haplotype_to_vcf():
print('converting Haplotype file to VCF')
begin_time = time.time()
'''Assign some input variables. '''
infile = args.inFile
meta_header = args.vcfHeader
outfile = args.outVCF
with open(infile) as hapfile, \
open(meta_header) as meta_header,\
open(outfile, 'w+') as vcf_out:
'''Start reading the haplotype file as generator. This saves memory. '''
for line in hapfile:
if line.startswith('CHROM') \
or line.startswith('#CHROM'):
header_line = line.rstrip('\n').split('\t')
if 'CHROM' in header_line:
contig_idx = header_line.index('CHROM')
elif '#CHROM' in header_line:
contig_idx = header_line.index('#CHROM')
else:
print('CHROM field does not exit. Update your file')
break
if 'POS' in header_line :
pos_idx = header_line .index('POS')
else:
print('POS field does not exit. Update your file')
break
if 'all-alleles' in header_line :
all_alleles_idx = header_line .index('all-alleles')
else:
print('"all-alleles" field not available in input file. Update your file')
break
'''Finally find available SAMPLE names and it's FORMAT tags'''
sample_namex = []
for itemx in header_line:
if ':' in itemx:
sample_namex.append(itemx.split(':')[0])
sample_namex = list(set(sample_namex))
# assign FORMAT tags - keeping it fixed
format_tagx = ['GT', 'PI', 'PG', 'PG_al']
''' Now, Read the meta header and add it to the output VCF file. '''
print('\nReading meta header from file "%s" ' % (meta_header.name))
if meta_header != None:
meta_info = meta_header.read().rstrip('\n')
meta_info += '\n'
else:
print('Header with meta information is not provided')
break
# add meta header to the output VCF file
meta_info += '\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT',
'QUAL', 'FILTER', 'INFO', 'FORMAT']) + '\t'
# add SAMPLE fields to output VCF file
meta_info += '\t'.join(sample_namex)
# Finally, write the header part of the output VCF
vcf_out.write(meta_info + '\n')
continue
'''' Now, extract the required data from each of the remaining lines add to output VCF. '''
updated_line = haplotype_to_vcf(
line, header_line, all_alleles_idx, sample_namex, format_tagx)
vcf_out.write(updated_line)
vcf_out.write('\n')
print('Elapsed time : "%s".' % (time.time() - begin_time))
''' Part B: Function part of Haplotype To VCF '''
def haplotype_to_vcf(line, header_line, all_alleles_idx, sample_namex, format_tagx):
line = line.rstrip('\n').split('\t')
contig = line[0]
pos = line[1]
id = '.'
all_alleles = line[all_alleles_idx].split(',')
ref = all_alleles[0]
alt = ','.join(all_alleles[1:])
qual = '.'
filter_ = '.'
info_ = '.'
format_ = ':'.join(format_tagx)
line_out = '\t'.join([contig, pos, id, ref, alt, qual, filter_, info_, format_])
# Now, update SAMPLE:FORMAT values
# Haplotype file exclusively will have PI and PG_al
format_sample_values = []
for namex in sample_namex:
sample_PG_al = namex + ':PG_al'
sample_PG_al_idx = header_line.index(sample_PG_al)
sample_PG_al_value = line[sample_PG_al_idx]
sample_PI_idx = header_line.index(namex + ':PI')
sample_PI_value = line[sample_PI_idx]
# to store the values for GT and PG tags
sample_GT_value = '.'
sample_PG_value = '.'
if sample_PG_al_value == '.' \
or sample_PG_al_value == './.' \
or sample_PG_al_value == '.|.':
sample_GT_value = '.'
elif '/' in sample_PG_al_value:
sample_GT_value = sample_PG_al_value.split('/')
sample_GT_value = [all_alleles.index(sample_GT_value[0]),
all_alleles.index(sample_GT_value[1])]
sample_GT_value = '/'.join(str(sth) for sth in sample_GT_value)
sample_PG_value = sample_GT_value
elif '|' in sample_PG_al_value:
sample_GT_value = sample_PG_al_value.split('|')
sample_GT_value = [all_alleles.index(sample_GT_value[0]),
all_alleles.index(sample_GT_value[1])]
sample_GT_value = '|'.join(str(sth) for sth in sample_GT_value)
sample_PG_value = sample_GT_value
format_sample_values.append(
':'.join([sample_GT_value, sample_PI_value, sample_PG_value, sample_PG_al_value]))
line_out += '\t' + '\t'.join(format_sample_values)
return line_out
if __name__ == '__main__':
main() |
<reponame>PaddlePaddle/PaddleSpatial<filename>paddlespatial/networks/vmrgae/agcn.py
# -*-Encoding: utf-8 -*-
################################################################################
#
# Copyright (c) 2021 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
Description: Adaptive GCN module for latent relation study in graph structure
Authors: zhouqiang(<EMAIL>)
Date: 2021/10/26
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import pgl
class AGCNConv(nn.Layer):
"""
Desc:
Adaptive GCN convolution layer in the paper "Graph WaveNet for Deep Spatial-Temporal Graph Modeling"
"""
def __init__(self, input_size, output_size, num_nodes, activation=None, norm=True, addaptadj=True):
# type: (int, int, int, str, bool, bool) -> None
"""
Desc:
__init__
Args:
input_size: The dimension size of the input tensor
output_size: The dimension size of the output tensor
num_nodes: The node number of the input graph
activation: The activation for the output
norm: If norm is True, then the feature will be normalized
addaptadj: If addaptadj is False, then the standard GCN will be used
"""
super(AGCNConv, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.addaptadj = addaptadj
self.num_nodes = num_nodes
self.activation = activation
self.distance_conv = pgl.nn.GCNConv(input_size, output_size, activation=None, norm=norm)
if addaptadj:
self.nodevec_col = self.create_parameter(shape=[output_size, num_nodes])
self.nodevec_lin = self.create_parameter(shape=[num_nodes, output_size])
self.adaptive_conv = pgl.nn.GCNConv(input_size, output_size, activation=None, norm=norm)
self.mlp = nn.Sequential(nn.Linear(2 * output_size, output_size))
if self.activation is None:
self.end_conv = nn.Sequential(nn.Linear(output_size, output_size))
elif self.activation == 'relu':
self.end_conv = nn.Sequential(nn.Linear(output_size, output_size), nn.ReLU())
elif self.activation == 'softplus':
self.end_conv = nn.Sequential(nn.Linear(output_size, output_size), nn.Softplus())
def formulate_adp_graph(self, adpmat, feature):
# type: (paddle.tensor, paddle.tensor) -> pgl.graph
"""
Desc:
Formulate the adaptive graph given the adjacency matrix and node feature
Args:
adpmat: The adaptive adjacency matrix
feature: The node feature matrix
Returns:
graph_adp: The adaptive graph in pgl.graph format
"""
edge_index = []
edge_feat = []
for i in range(self.num_nodes):
for j in range(self.num_nodes):
if adpmat[i][j] > 0:
edge_index.append([i, j])
edge_feat.append(adpmat[i][j])
edge_feat = paddle.to_tensor(edge_feat)
graph_adp = pgl.Graph(edges=edge_index,
num_nodes=self.num_nodes,
node_feat={'nfeat':feature},
edge_feat={'efeat': edge_feat})
return graph_adp
def forward(self, graph, feature, norm=None):
# type: (pgl.graph, paddle.tensor, object) -> paddle.tensor
"""
Desc:
A step of forward the layer.
Args:
graph: pgl.Graph instance
feature: The node feature matrix with shape (num_nodes, input_size)
norm: If norm is not None, then the feature will be normalized by given norm.
If norm is None and self.norm is true, then we use lapacian degree norm.
Returns:
outputs: A tensor with shape (num_nodes, output_size)
"""
outputs = []
if self.addaptadj:
adpmat = F.softmax(F.relu(paddle.mm(self.nodevec_lin, self.nodevec_col)), axis=1)
graph_adp = self.formulate_adp_graph(adpmat, feature)
outputs.append(self.adaptive_conv(graph_adp, feature, norm=norm))
outputs.append(self.distance_conv(graph, feature, norm=norm))
outputs = paddle.concat(outputs, axis=1)
outputs = self.mlp(outputs)
else:
outputs = self.distance_conv(graph, feature, norm=norm)
outputs = self.end_conv(outputs)
return outputs
|
import streamlit as st
from datetime import date
from os import path
import time
from utils.tokenizer_funcs import spacy_fastai, Numericalize, open_vocab
from utils.processing import fastai_process_trans
from utils.logging import log_usage
from utils.model_utils import load_quantized_model
from utils.translate_utils import translate
from utils.v01_en_ga_transformer import pt_Transformer as ModelClass
config={'model_v':'0.2',
#'model_path':'models/paracrawl_en_ga_5e_5e-4_5e_1e-5_v0.2_exp4.pth',
'model_path':'models/paracrawl_en_ga_5e_5e-4_5e_1e-5_v0.2_exp4_no_opt_quantized',
'd_model':512,
'd_inner':2048,
'en_vocab_path':'data/paracrawl_vocab_en_v0.2_exp4.csv',
'ga_vocab_path':'data/paracrawl_vocab_ga_v0.2_exp4.csv'
}
model_v=config['model_v']
model_path=config['model_path']
d_model=config['d_model']
d_inner=config['d_inner']
en_vocab_path=config['en_vocab_path']
ga_vocab_path=config['ga_vocab_path']
# STYLING
html_title = """
<div style="background-color:{};padding:10px;border-radius:10px">
<h1 style="color:{};text-align:center;">AnTrá </h1>
</div>
"""
inp_html = """<h2 style=text-align:center;">🇬🇧👇 </h2>"""
out_html = """<h2 style=text-align:center;">🇮🇪👇 </h2>"""
# RUN APP
def main():
# TOKENIZER SETUP
en_vocab=open_vocab(en_vocab_path)
ga_vocab=open_vocab(ga_vocab_path)
tokenizer=spacy_fastai()
numericalizer=Numericalize(en_vocab, ga_vocab)
# LOAD MODEL
start = time.time()
model = load_quantized_model(model_path=model_path, ModelClass=ModelClass, src_vcbsz=len(en_vocab),
trg_vcbsz=len(ga_vocab), d_model=d_model, d_inner=d_inner)
# STREAMLIT PAGE SETUP
st.markdown(html_title.format('royalblue','white'),unsafe_allow_html=True)
st.text('')
#st.markdown('## 🇬🇧👇')
st.markdown(inp_html,unsafe_allow_html=True)
src_txt=st.text_area('', height=50, max_chars=280)
# TRANSLATE CODE
if st.button('Translate'):
trans_start = time.time()
st.text('')
st.text('')
trg_txt=translate(src_txt=src_txt,model=model,tokenizer=tokenizer,numericalizer=numericalizer)
trg_txt=fastai_process_trans(trans=trg_txt)[0]
st.markdown(out_html,unsafe_allow_html=True)
st.markdown(f"## \n \
> {trg_txt}")
trans_end = time.time()
inf_time = trans_end - trans_start
log_usage(src_txt,trg_txt,inf_time=inf_time,feedback=None,model_v=model_v)
# see html from here for layout ideas: https://discuss.streamlit.io/t/st-button-in-a-custom-layout/2187/2
# SIDEBAR CODE
st.sidebar.markdown('## About \n AnTrá is an Irish Language Toolset \n \n Translate from Enlglish to Irish \
and copy your translation to wherever you need to paste it')
st.sidebar.markdown("---")
st.sidebar.text('')
if st.sidebar.checkbox('Show Release Notes'):
st.sidebar.markdown(f'This is v{model_v}, \n [see here](https://github.com/morganmcg1/antra/blob/master/RELEASES.md)\
for full release notes')
# FORMATTING
hide_streamlit_style = """
<style>
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
if __name__ == '__main__':
main() |
# finufft module, ie python-user-facing access to (no-data-copy) interfaces
#
# Some default opts are stated here (in arg list, but not docstring).
# Barnett 10/31/17: changed all type-2 not to have ms,etc as an input but infer
# from size of f.
# Barnett 2018?: google-style docstrings for napoleon.
# Lu 03/10/20: added guru interface calls
# Anden 8/18/20: auto-made docstrings for the 9 simple/many routines
import numpy as np
import warnings
import numbers
from ctypes import byref
from ctypes import c_longlong
from ctypes import c_void_p
import finufft._finufft as _finufft
### Plan class definition
class Plan:
r"""
A non-uniform fast Fourier transform (NUFFT) plan
The ``Plan`` class lets the user exercise more fine-grained control over
the execution of an NUFFT. First, the plan is created with a certain set
of parameters (type, mode configuration, tolerance, sign, number of
simultaneous transforms, and so on). Then the nonuniform points are set
(source or target depending on the type). Finally, the plan is executed on
some data, yielding the desired output.
In the simple interface, all these steps are executed in a single call to
the ``nufft*`` functions. The benefit of separating plan creation from
execution is that it allows for plan reuse when certain parameters (like
mode configuration) or nonuniform points remain the same between different
NUFFT calls. This becomes especially important for small inputs, where
execution time may be dominated by initialization steps such as allocating
and FFTW plan and sorting the nonuniform points.
Example:
::
import numpy as np
import finufft
# set up parameters
n_modes = (1000, 2000)
n_pts = 100000
nufft_type = 1
n_trans = 4
# generate nonuniform points
x = 2 * np.pi * np.random.uniform(size=n_pts)
y = 2 * np.pi * np.random.uniform(size=n_pts)
# generate source strengths
c = (np.random.standard_normal(size=(n_trans, n_pts)),
+ 1J * np.random.standard_normal(size=(n_trans, n_pts)))
# initialize the plan
plan = finufft.Plan(nufft_type, n_modes, n_trans)
# set the nonuniform points
plan.setpts(x, y)
# execute the plan
f = plan.execute(c)
Also see ``python/examples/guru1d1.py`` and ``python/examples/guru2d1.py``.
Args:
nufft_type (int): type of NUFFT (1, 2, or 3).
n_modes_or_dim (int or tuple of ints): if ``nufft_type`` is 1 or 2,
this should be a tuple specifying the number of modes
in each dimension (for example, ``(50, 100)``),
otherwise, if `nufft_type`` is 3, this should be the
number of dimensions (between 1 and 3).
n_trans (int, optional): number of transforms to compute
simultaneously.
eps (float, optional): precision requested (>1e-16).
isign (int, optional): if non-negative, uses positive sign
exponential, otherwise negative sign.
**kwargs (optional): for more options, see :ref:`opts`.
"""
def __init__(self,nufft_type,n_modes_or_dim,n_trans=1,eps=1e-6,isign=None,**kwargs):
# set default isign based on if isign is None
if isign==None:
if nufft_type==2:
isign = -1
else:
isign = 1
# set opts and check precision type
opts = _finufft.NufftOpts()
_finufft._default_opts(opts)
is_single = setkwopts(opts,**kwargs)
# construct plan based on precision type and eps default value
plan = c_void_p(None)
# setting n_modes and dim for makeplan
n_modes = np.ones([3], dtype=np.int64)
if nufft_type==3:
npdim = np.asarray(n_modes_or_dim, dtype=np.int64)
if npdim.size != 1:
raise RuntimeError('FINUFFT type 3 plan n_modes_or_dim must be one number, the dimension')
dim = int(npdim)
else:
npmodes = np.asarray(n_modes_or_dim, dtype=np.int64)
if npmodes.size>3 or npmodes.size<1:
raise RuntimeError("FINUFFT n_modes dimension must be 1, 2, or 3")
dim = int(npmodes.size)
n_modes[0:dim] = npmodes[::-1]
n_modes = (c_longlong * 3)(*n_modes)
if is_single:
self._makeplan = _finufft._makeplanf
self._setpts = _finufft._setptsf
self._execute = _finufft._executef
self._destroy = _finufft._destroyf
else:
self._makeplan = _finufft._makeplan
self._setpts = _finufft._setpts
self._execute = _finufft._execute
self._destroy = _finufft._destroy
ier = self._makeplan(nufft_type, dim, n_modes, isign, n_trans, eps,
byref(plan), opts)
# check error
if ier != 0:
err_handler(ier)
# set C++ side plan as inner_plan
self.inner_plan = plan
# set properties
self.type = nufft_type
self.dim = dim
self.n_modes = n_modes
self.n_trans = n_trans
self.is_single = is_single
### setpts
def setpts(self,x=None,y=None,z=None,s=None,t=None,u=None):
r"""
Set the nonuniform points
For type 1, this sets the coordinates of the ``M`` nonuniform source
points, for type 2, it sets the coordinates of the ``M`` target
points, and for type 3 it sets both the ``M`` source points and the
``N`` target points.
The dimension of the plan determines the number of arguments supplied.
For example, if ``dim == 2``, we provide ``x`` and ``y`` (as well as
``s`` and ``t`` for a type-3 transform).
Args:
x (float[M]): first coordinate of the nonuniform points
(source for type 1 and 3, target for type 2).
y (float[M], optional): second coordinate of the nonuniform
points (source for type 1 and 3, target for type 2).
z (float[M], optional): third coordinate of the nonuniform
points (source for type 1 and 3, target for type 2).
s (float[N], optional): first coordinate of the nonuniform
points (target for type 3).
t (float[N], optional): second coordinate of the nonuniform
points (target for type 3).
u (float[N], optional): third coordinate of the nonuniform
points (target for type 3).
"""
if self.is_single:
# array sanity check
self._xj = _rchkf(x)
self._yj = _rchkf(y)
self._zj = _rchkf(z)
self._s = _rchkf(s)
self._t = _rchkf(t)
self._u = _rchkf(u)
else:
# array sanity check
self._xj = _rchk(x)
self._yj = _rchk(y)
self._zj = _rchk(z)
self._s = _rchk(s)
self._t = _rchk(t)
self._u = _rchk(u)
# valid sizes
dim = self.dim
tp = self.type
(self.nj, self.nk) = valid_setpts(tp, dim, self._xj, self._yj, self._zj, self._s, self._t, self._u)
# call set pts for single prec plan
if self.dim == 1:
ier = self._setpts(self.inner_plan, self.nj, self._xj, self._yj, self._zj, self.nk, self._s, self._t, self._u)
elif self.dim == 2:
ier = self._setpts(self.inner_plan, self.nj, self._yj, self._xj, self._zj, self.nk, self._t, self._s, self._u)
elif self.dim == 3:
ier = self._setpts(self.inner_plan, self.nj, self._zj, self._yj, self._xj, self.nk, self._u, self._t, self._s)
else:
raise RuntimeError("FINUFFT dimension must be 1, 2, or 3")
if ier != 0:
err_handler(ier)
### execute
def execute(self,data,out=None):
r"""
Execute the plan
Performs the NUFFT specified at plan instantiation with the points set
by ``setpts``. For type-1 and type-3 transforms, the input is a set of
source strengths, while for a type-2 transform, it consists of an
array of size ``n_modes``. If ``n_transf`` is greater than one,
``n_transf`` inputs are expected, stacked along the first axis.
Args:
data (complex[M], complex[n_transf, M], complex[n_modes], or complex[n_transf, n_modes]): The input source strengths
(type 1 and 3) or source modes (type 2).
out (complex[n_modes], complex[n_transf, n_modes], complex[M], or complex[n_transf, M], optional): The array where the
output is stored. Must be of the right size.
Returns:
complex[n_modes], complex[n_transf, n_modes], complex[M], or complex[n_transf, M]: The output array of the transform(s).
"""
if self.is_single:
_data = _cchkf(data)
_out = _cchkf(out)
else:
_data = _cchk(data)
_out = _cchk(out)
tp = self.type
n_trans = self.n_trans
nj = self.nj
nk = self.nk
dim = self.dim
if tp==1 or tp==2:
ms = self.n_modes[0]
mt = self.n_modes[1]
mu = self.n_modes[2]
# input shape and size check
if tp==2:
valid_fshape(data.shape,n_trans,dim,ms,mt,mu,None,2)
else:
valid_cshape(data.shape,nj,n_trans)
# out shape and size check
if out is not None:
if tp==1:
valid_fshape(out.shape,n_trans,dim,ms,mt,mu,None,1)
if tp==2:
valid_cshape(out.shape,nj,n_trans)
if tp==3:
valid_fshape(out.shape,n_trans,dim,None,None,None,nk,3)
# allocate out if None
if out is None:
if self.is_single:
pdtype=np.complex64
else:
pdtype=np.complex128
if tp==1:
_out = np.squeeze(np.zeros([n_trans, mu, mt, ms], dtype=pdtype, order='C'))
if tp==2:
_out = np.squeeze(np.zeros([n_trans, nj], dtype=pdtype, order='C'))
if tp==3:
_out = np.squeeze(np.zeros([n_trans, nk], dtype=pdtype, order='C'))
# call execute based on type and precision type
if tp==1 or tp==3:
ier = self._execute(self.inner_plan,
_data.ctypes.data_as(c_void_p),
_out.ctypes.data_as(c_void_p))
elif tp==2:
ier = self._execute(self.inner_plan,
_out.ctypes.data_as(c_void_p),
_data.ctypes.data_as(c_void_p))
else:
ier = 10
# check error
if ier != 0:
err_handler(ier)
# return out
if out is None:
return _out
else:
_copy(_out,out)
return out
def __del__(self):
destroy(self)
self.inner_plan = None
### End of Plan class definition
### <NAME>'s functions for checking input and output variables
def _rchk(x):
"""
Check if array x is of the appropriate type
(float64, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and x.dtype is not np.dtype('float64'):
raise RuntimeError('FINUFFT data type must be float64 for double precision, data may have mixed precision types')
return np.array(x, dtype=np.float64, order='C', copy=False)
def _cchk(x):
"""
Check if array x is of the appropriate type
(complex128, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and (x.dtype is not np.dtype('complex128') and x.dtype is not np.dtype('float64')):
raise RuntimeError('FINUFFT data type must be complex128 for double precision, data may have mixed precision types')
return np.array(x, dtype=np.complex128, order='C', copy=False)
def _rchkf(x):
"""
Check if array x is of the appropriate type
(float64, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and x.dtype is not np.dtype('float32'):
raise RuntimeError('FINUFFT data type must be float32 for single precision, data may have mixed precision types')
return np.array(x, dtype=np.float32, order='C', copy=False)
def _cchkf(x):
"""
Check if array x is of the appropriate type
(complex128, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and (x.dtype is not np.dtype('complex64') and x.dtype is not np.dtype('float32')):
raise RuntimeError('FINUFFT data type must be complex64 for single precision, data may have mixed precision types')
return np.array(x, dtype=np.complex64, order='C', copy=False)
def _copy(_x, x):
"""
Copy _x to x, only if the underlying data of _x differs from that of x
"""
if _x.data != x.data:
x[:] = _x
### error handler (keep up to date with FINUFFT/include/defs.h)
def err_handler(ier):
switcher = {
1: 'FINUFFT eps tolerance too small to achieve',
2: 'FINUFFT malloc size requested greater than MAX_NF',
3: 'FINUFFT spreader fine grid too small compared to kernel width',
4: 'FINUFFT spreader nonuniform point out of range [-3pi,3pi]^d in type 1 or 2',
5: 'FINUFFT spreader malloc error',
6: 'FINUFFT spreader illegal direction (must be 1 or 2)',
7: 'FINUFFT opts.upsampfac not > 1.0',
8: 'FINUFFT opts.upsampfac not a value with known Horner polynomial rule',
9: 'FINUFFT number of transforms ntrans invalid',
10: 'FINUFFT transform type invalid',
11: 'FINUFFT general malloc failure',
12: 'FINUFFT number of dimensions dim invalid',
13: 'FINUFFT spread_thread option invalid',
}
err_msg = switcher.get(ier,'Unknown error')
if ier == 1:
warnings.warn(err_msg, Warning)
else:
raise RuntimeError(err_msg)
### valid sizes when setpts
def valid_setpts(tp,dim,x,y,z,s,t,u):
if x.ndim != 1:
raise RuntimeError('FINUFFT x must be a vector')
nj = x.size
if tp == 3:
nk = s.size
if s.ndim != 1:
raise RuntimeError('FINUFFT s must be a vector')
else:
nk = 0
if dim > 1:
if y.ndim != 1:
raise RuntimeError('FINUFFT y must be a vector')
if y.size != nj:
raise RuntimeError('FINUFFT y must have same length as x')
if tp==3:
if t.ndim != 1:
raise RuntimeError('FINUFFT t must be a vector')
if t.size != nk:
raise RuntimeError('FINUFFT t must have same length as s')
if dim > 2:
if z.ndim != 1:
raise RuntimeError('FINUFFT z must be a vector')
if z.size != nj:
raise RuntimeError('FINUFFT z must have same length as x')
if tp==3:
if u.ndim != 1:
raise RuntimeError('FINUFFT u must be a vector')
if u.size != nk:
raise RuntimeError('FINUFFT u must have same length as s')
return (nj, nk)
### ntransf for type 1 and type 2
def valid_ntr_tp12(dim,shape,n_transin,n_modesin):
if len(shape) == dim+1:
n_trans = shape[0]
n_modes = shape[1:dim+1]
elif len(shape) == dim:
n_trans = 1
n_modes = shape
else:
raise RuntimeError('FINUFFT type 1 output dimension or type 2 input dimension must be either dim or dim+1(n_trans>1)')
if n_transin is not None and n_trans != n_transin:
raise RuntimeError('FINUFFT input n_trans and output n_trans do not match')
if n_modesin is not None:
if n_modes != n_modesin:
raise RuntimeError('FINUFFT input n_modes and output n_modes do not match')
return (n_trans,n_modes)
### valid number of transforms
def valid_ntr(x,c):
n_trans = int(c.size/x.size)
if n_trans*x.size != c.size:
raise RuntimeError('FINUFFT c.size must be divisible by x.size')
valid_cshape(c.shape,x.size,n_trans)
return n_trans
### valid shape of c
def valid_cshape(cshape,xsize,n_trans):
if n_trans == 1:
if len(cshape) != 1:
raise RuntimeError('FINUFFT c.ndim must be 1 if n_trans = 1')
if cshape[0] != xsize:
raise RuntimeError('FINUFFT c.size must be same as x.size if n_trans = 1')
if n_trans > 1:
if len(cshape) != 2:
raise RuntimeError('FINUFFT c.ndim must be 2 if n_trans > 1')
if cshape[1] != xsize or cshape[0] != n_trans:
raise RuntimeError('FINUFFT c.shape must be (n_trans, x.size) if n_trans > 1')
### valid shape of f
def valid_fshape(fshape,n_trans,dim,ms,mt,mu,nk,tp):
if tp == 3:
if n_trans == 1:
if len(fshape) != 1:
raise RuntimeError('FINUFFT f.ndim must be 1 for type 3 if n_trans = 1')
if fshape[0] != nk:
raise RuntimeError('FINUFFT f.size of must be nk if n_trans = 1')
if n_trans > 1:
if len(fshape) != 2:
raise RuntimeError('FINUFFT f.ndim must be 2 for type 3 if n_trans > 1')
if fshape[1] != nk or fshape[0] != n_trans:
raise RuntimeError('FINUFFT f.shape must be (n_trans, nk) if n_trans > 1')
else:
if n_trans == 1:
if len(fshape) != dim:
raise RuntimeError('FINUFFT f.ndim must be same as the problem dimension for type 1 or 2 if n_trans = 1')
if n_trans > 1:
if len(fshape) != dim+1:
raise RuntimeError('FINUFFT f.ndim must be same as the problem dimension + 1 for type 1 or 2 if n_trans > 1')
if fshape[0] != n_trans:
raise RuntimeError('FINUFFT f.shape[0] must be n_trans for type 1 or 2 if n_trans > 1')
if fshape[-1] != ms:
raise RuntimeError('FINUFFT f.shape is not consistent with n_modes')
if dim>1:
if fshape[-2] != mt:
raise RuntimeError('FINUFFT f.shape is not consistent with n_modes')
if dim>2:
if fshape[-3] != mu:
raise RuntimeError('FINUFFT f.shape is not consistent with n_modes')
### check if dtype is single or double
def is_single_dtype(dtype):
dtype = np.dtype(dtype)
if dtype == np.dtype('float64') or dtype == np.dtype('complex128'):
return False
elif dtype == np.dtype('float32') or dtype == np.dtype('complex64'):
return True
else:
raise RuntimeError('FINUFFT dtype(precision type) must be single or double')
### kwargs opt set
def setkwopts(opt,**kwargs):
warnings.simplefilter('always')
dtype = 'double'
for key,value in kwargs.items():
if hasattr(opt,key):
setattr(opt,key,value)
elif key == 'dtype':
dtype = value
else:
warnings.warn('Warning: nufft_opts does not have attribute "' + key + '"', Warning)
warnings.simplefilter('default')
return is_single_dtype(dtype)
### destroy
def destroy(plan):
if plan is None:
return
ier = plan._destroy(plan.inner_plan)
if ier != 0:
err_handler(ier)
### invoke guru interface, this function is used for simple interfaces
def invoke_guru(dim,tp,x,y,z,c,s,t,u,f,isign,eps,n_modes,**kwargs):
# infer dtype from x
if x.dtype is np.dtype('float64'):
pdtype = 'double'
elif x.dtype is np.dtype('float32'):
pdtype = 'single'
else:
raise RuntimeError('FINUFFT x dtype should be float64 for double precision or float32 for single precision')
# check n_modes type, n_modes must be a tuple or an integer
if n_modes is not None:
if (not isinstance(n_modes, tuple)) and (not isinstance(n_modes, numbers.Integral)):
raise RuntimeError('FINUFFT input n_modes must be a tuple or an integer')
# sanity check for n_modes input as tuple
if isinstance(n_modes, tuple):
if len(n_modes) != dim:
raise RuntimeError('FINUFFT input n_modes dimension does not match problem dimension')
if (not all(isinstance(elmi, numbers.Integral) for elmi in n_modes)):
raise RuntimeError('FINUFFT all elements of input n_modes must be integer')
# if n_modes is an integer populate n_modes for all dimensions
if isinstance(n_modes, numbers.Integral):
n_modes = (n_modes,)*dim
# infer n_modes/n_trans from input/output
if tp==1:
n_trans = valid_ntr(x,c)
if n_modes is None and f is None:
raise RuntimeError('FINUFFT type 1 input must supply n_modes or output vector, or both')
if f is not None:
(n_trans,n_modes) = valid_ntr_tp12(dim,f.shape,n_trans,n_modes)
elif tp==2:
(n_trans,n_modes) = valid_ntr_tp12(dim,f.shape,None,None)
else:
n_trans = valid_ntr(x,c)
#plan
if tp==3:
plan = Plan(tp,dim,n_trans,eps,isign,**dict(kwargs,dtype=pdtype))
else:
plan = Plan(tp,n_modes,n_trans,eps,isign,**dict(kwargs,dtype=pdtype))
#setpts
plan.setpts(x,y,z,s,t,u)
#excute
if tp==1 or tp==3:
out = plan.execute(c,f)
else:
out = plan.execute(f,c)
return out
def _wrap_docstring(docstring, tw=80, min_spacing=2):
lines = docstring.expandtabs().splitlines()
for k, line in enumerate(lines):
if len(line) > tw:
last_space = line[:tw].rfind(' ')
indent_level = line.rfind(' ' * min_spacing) + min_spacing
lines[k] = line[:last_space]
new_line = (' ' * indent_level) + line[last_space + 1:]
# Check if the indentation level continues on next line. If so,
# concatenate, otherwise insert new line.
if len(lines[k + 1]) - len(lines[k + 1].lstrip()) >= indent_level:
lines[k + 1] = new_line + ' ' + lines[k + 1].lstrip()
else:
lines.insert(k + 1, new_line)
docstring = '\n'.join(lines)
return docstring
def _set_nufft_doc(f, dim, tp, example='python/test/accuracy_speed_tests.py'):
doc_nufft1 = \
"""{dim}D type-1 (nonuniform to uniform) complex NUFFT
::
{pt_spacing} M-1
f[{pt_idx}] = SUM c[j] exp(+/-i {pt_inner})
{pt_spacing} j=0
for {pt_constraint}
Args:
{pts_doc}
c (complex[M] or complex[ntransf, M]): source strengths.
n_modes (integer or integer tuple of length {dim}, optional): number of
uniform Fourier modes requested {modes_tuple}. May be even or odd; in
either case, modes {pt_idx} are integers satisfying {pt_constraint}.
Must be specified if ``out`` is not given.
out (complex[{modes}] or complex[ntransf, {modes}], optional): output array
for Fourier mode values. If ``n_modes`` is specifed, the shape
must match, otherwise ``n_modes`` is inferred from ``out``.
eps (float, optional): precision requested (>1e-16).
isign (int, optional): if non-negative, uses positive sign in
exponential, otherwise negative sign.
**kwargs (optional): for more options, see :ref:`opts`.
.. note::
The output is written into the ``out`` array if supplied.
Returns:
complex[{modes}] or complex[ntransf, {modes}]: The resulting array.
Example:
::
# number of nonuniform points
M = 100
# the nonuniform points
{pts_generate}
# their complex strengths
c = (np.random.standard_normal(size=M)
+ 1J * np.random.standard_normal(size=M))
# desired number of Fourier modes
{modes} = {sample_modes}
# calculate the type-1 NUFFT
f = finufft.nufft{dim}d1({pts}, c, {modes_tuple})
See also ``{example}``.
"""
doc_nufft2 = \
"""{dim}D type-2 (uniform to nonuniform) complex NUFFT
::
c[j] = SUM f[{pt_idx}] exp(+/-i {pt_inner})
{pt_idx}
for j = 0, ..., M-1, where the sum is over {pt_constraint}
Args:
{pts_doc}
f (complex[{modes}] or complex[ntransf, {modes}]): Fourier mode
coefficients, where {modes} may be even or odd. In either case
the mode indices {pt_idx} satisfy {pt_constraint}.
out (complex[M] or complex[ntransf, M], optional): output array
at targets.
eps (float, optional): precision requested (>1e-16).
isign (int, optional): if non-negative, uses positive sign in
exponential, otherwise negative sign.
**kwargs (optional): for more options, see :ref:`opts`.
.. note::
The output is written into the ``out`` array if supplied.
Returns:
complex[M] or complex[ntransf, M]: The resulting array.
Example:
::
# number of nonuniform points
M = 100
# the nonuniform points
{pts_generate}
# number of Fourier modes
{modes} = {sample_modes}
# the Fourier mode coefficients
f = (np.random.standard_normal(size={modes_tuple})
+ 1J * np.random.standard_normal(size={modes_tuple}))
# calculate the type-2 NUFFT
c = finufft.nufft{dim}d2({pts}, f)
See also ``{example}``.
"""
doc_nufft3 = \
"""{dim}D type-3 (nonuniform to nonuniform) complex NUFFT
::
M-1
f[k] = SUM c[j] exp(+/-i {pt_inner_type3}),
j=0
for k = 0, ..., N-1
Args:
{src_pts_doc}
c (complex[M] or complex[ntransf, M]): source strengths.
{target_pts_doc}
out (complex[N] or complex[ntransf, N]): output values at target frequencies.
eps (float, optional): precision requested (>1e-16).
isign (int, optional): if non-negative, uses positive sign in
exponential, otherwise negative sign.
**kwargs (optional): for more options, see :ref:`opts`.
.. note::
The output is written into the ``out`` array if supplied.
Returns:
complex[M] or complex[ntransf, M]: The resulting array.
Example:
::
# number of source points
M = 100
# number of target points
N = 200
# the source points
{pts_generate}
# the target points
{target_pts_generate}
# their complex strengths
c = (np.random.standard_normal(size=M)
+ 1J * np.random.standard_normal(size=M))
# calcuate the type-3 NUFFT
f = finufft.nufft{dim}d3({pts}, c, {target_pts})
See also ``{example}``.
"""
doc_nufft = {1: doc_nufft1, 2: doc_nufft2, 3: doc_nufft3}
pts = ('x', 'y', 'z')
target_pts = ('s', 't', 'u')
sample_modes = (50, 75, 100)
dims = range(1, dim + 1)
v = {}
v['dim'] = dim
v['modes'] = ', '.join('N{}'.format(i) for i in dims)
v['modes_tuple'] = '(' + v['modes'] + (', ' if dim == 1 else '') + ')'
v['pt_idx'] = ', '.join('k{}'.format(i) for i in dims)
v['pt_spacing'] = ' ' * (len(v['pt_idx']) - 2)
v['pt_inner'] = ' + '.join('k{0} {1}(j)'.format(i, x) for i, x in zip(dims, pts[:dim]))
v['pt_constraint'] = ', '.join('-N{0}/2 <= k{0} <= (N{0}-1)/2'.format(i) for i in dims)
v['pts_doc'] = '\n'.join(' {} (float[M]): nonuniform points, valid only in [-3pi, 3pi].'.format(x) for x in pts[:dim])
# for example
v['pts'] = ', '.join(str(x) for x in pts[:dim])
v['pts_generate'] = '\n'.join(' {} = 2 * np.pi * np.random.uniform(size=M)'.format(x) for x in pts[:dim])
v['sample_modes'] = ', '.join(str(n) for n in sample_modes[:dim])
v['example'] = example
# for type 3 only
v['src_pts_doc'] = '\n'.join(' {} (float[M]): nonuniform source points.'.format(x) for x in pts[:dim])
v['target_pts_doc'] = '\n'.join(' {} (float[N]): nonuniform target points.'.format(x) for x in target_pts[:dim])
v['pt_inner_type3'] = ' + '.join('{0}[k] {1}[j]'.format(s, x) for s, x in zip(target_pts[:dim], pts[:dim]))
# for type 3 example only
v['target_pts'] = ', '.join(str(x) for x in target_pts[:dim])
v['target_pts_generate'] = '\n'.join(' {} = 2 * np.pi * np.random.uniform(size=N)'.format(x) for x in target_pts[:dim])
if dim > 1:
v['pt_inner'] = '(' + v['pt_inner'] + ')'
v['pt_inner_type3'] = '(' + v['pt_inner_type3'] + ')'
f.__doc__ = _wrap_docstring(doc_nufft[tp].format(**v))
### easy interfaces
### 1d1
def nufft1d1(x,c,n_modes=None,out=None,eps=1e-6,isign=1,**kwargs):
return invoke_guru(1,1,x,None,None,c,None,None,None,out,isign,eps,n_modes,**kwargs)
### 1d2
def nufft1d2(x,f,out=None,eps=1e-6,isign=-1,**kwargs):
return invoke_guru(1,2,x,None,None,out,None,None,None,f,isign,eps,None,**kwargs)
### 1d3
def nufft1d3(x,c,s,out=None,eps=1e-6,isign=1,**kwargs):
return invoke_guru(1,3,x,None,None,c,s,None,None,out,isign,eps,None,**kwargs)
### 2d1
def nufft2d1(x,y,c,n_modes=None,out=None,eps=1e-6,isign=1,**kwargs):
return invoke_guru(2,1,x,y,None,c,None,None,None,out,isign,eps,n_modes,**kwargs)
### 2d2
def nufft2d2(x,y,f,out=None,eps=1e-6,isign=-1,**kwargs):
return invoke_guru(2,2,x,y,None,out,None,None,None,f,isign,eps,None,**kwargs)
### 2d3
def nufft2d3(x,y,c,s,t,out=None,eps=1e-6,isign=1,**kwargs):
return invoke_guru(2,3,x,y,None,c,s,t,None,out,isign,eps,None,**kwargs)
### 3d1
def nufft3d1(x,y,z,c,n_modes=None,out=None,eps=1e-6,isign=1,**kwargs):
return invoke_guru(3,1,x,y,z,c,None,None,None,out,isign,eps,n_modes,**kwargs)
### 3d2
def nufft3d2(x,y,z,f,out=None,eps=1e-6,isign=-1,**kwargs):
return invoke_guru(3,2,x,y,z,out,None,None,None,f,isign,eps,None,**kwargs)
### 3d3
def nufft3d3(x,y,z,c,s,t,u,out=None,eps=1e-6,isign=1,**kwargs):
return invoke_guru(3,3,x,y,z,c,s,t,u,out,isign,eps,None,**kwargs)
_set_nufft_doc(nufft1d1, 1, 1, 'python/examples/simple1d1.py, python/examples/simpleopts1d1.py')
_set_nufft_doc(nufft1d2, 1, 2)
_set_nufft_doc(nufft1d3, 1, 3)
_set_nufft_doc(nufft2d1, 2, 1, 'python/examples/simple2d1.py, python/examples/many2d1.py')
_set_nufft_doc(nufft2d2, 2, 2)
_set_nufft_doc(nufft2d3, 2, 3)
_set_nufft_doc(nufft3d1, 3, 1)
_set_nufft_doc(nufft3d2, 3, 2)
_set_nufft_doc(nufft3d3, 3, 3)
|
<reponame>gabrielepessoa/programino
'''
Players are Python objects with a ``__call__`` method
defined to accept a Game instance as the sole argument.
Players return None, and leave the input Game unmodified,
except for its valid_moves attribute. This value may be
replaced with another tuple containing the same moves,
but sorted in decreasing order of preference. Players
may be applied one after another for easy composability.
.. code-block:: python
>>> import dominoes
>>> g = dominoes.Game.new()
>>> g.valid_moves
(([0|0], True), ([3|4], True), ([1|3], True), ([2|2], True), ([3|3], True), ([2|3], True), ([5|6], True))
>>> dominoes.players.random(g)
>>> g.valid_moves
(([5|6], True), ([1|3], True), ([3|3], True), ([2|2], True), ([0|0], True), ([2|3], True), ([3|4], True))
.. code-block:: python
def double(game):
\'\'\'
Prefers to play doubles.
:param Game game: game to play
:return: None
\'\'\'
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: m[0].first != m[0].second))
'''
import collections
import copy
import programino
import random as rand
def identity(game):
'''
Leaves move preferences unchanged.
:param Game game: game to play
:return: None
'''
return
class counter:
'''
Prefers moves in the same order as the passed-in player. Keeps
a counter of the amount of times that this player gets called.
An instance of this class must first be initialized before it
can be called in the usual way.
:param callable player: player that determines the move preferences of
this player. The identity player is the default.
:param str name: the name of this player. The default is the name
of this class.
:var int count: the amount of times that this player has been called.
:var str __name__: the name of this player.
: A quantidade de vezes que o jogador em questão foi chamdao
: Identidade é o padrão e nome é o próprio nome da classe
'''
def __init__(self, player=identity, name=None):
self.count = 0
self._player = player
if name is None:
self.__name__ = type(self).__name__
else:
self.__name__ = name
def __call__(self, game):
self.count += 1
return self._player(game)
def random(game):
'''
Prefers moves randomly.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda _: rand.random()))
def reverse(game):
'''
Reverses move preferences.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(reversed(game.valid_moves))
def bota_gorda(game):
'''
Prefers to play dominoes with higher point values.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: -(m[0].first + m[0].second)))
def double(game):
'''
Prefers to play doubles.
:param Game game: game to play
:return: None
'''
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: m[0].first != m[0].second))
class omniscient:
'''
Prefers to play the move that maximizes this player's final score,
assuming that all other players play with the same strategy. This
player "cheats" by looking at all hands to make its decision. An
instance of this class must first be initialized before it can be
called in the usual way.
:param int start_move: move number at which to start applying this
player. If this player is called before the
specified move number, it will have no effect.
Moves are 0-indexed. The default is 0.
:param callable player: player used to sort moves to be explored
in the underlying call to alphabeta search.
Ordering better moves first may significantly
reduce the amount of moves that need to be
explored. The identity player is the default.
:param str name: the name of this player. The default is the name
of this class.
:var str __name__: the name of this player
'''
def __init__(self, start_move=0, player=identity, name=None):
self._start_move = start_move
self._player = player
if name is None:
self.__name__ = type(self).__name__
else:
self.__name__ = name
def __call__(self, game):
# do not perform a potentially slow operation if it is
# too early in the game or if there is only one valid move
if len(game.moves) < self._start_move or len(game.valid_moves) == 1:
return
# so that we don't modify the original game
game_copy = copy.deepcopy(game)
# for performance
game_copy.skinny_board()
# perform an alphabeta search to find the optimal move sequence
moves, _ = dominoes.search.alphabeta(game_copy, player=self._player)
# place the optimal move at the beginning of game.valid_moves,
# while leaving the rest of the ordering unchanged
game.valid_moves = (moves[0],) + tuple(m for m in game.valid_moves if m != moves[0])
class probabilistic_alphabeta:
'''
This player repeatedly assumes the other players' hands, runs alphabeta search,
and prefers moves that are most frequently optimal. It takes into account all
known information to determine what hands the other players could possibly have,
including its hand, the sizes of the other players' hands, and the moves played
by every player, including the passes. An instance of this class must first be
initialized before it can be called in the usual way.
:param int start_move: move number at which to start applying this
player. If this player is called before the
specified move number, it will have no effect.
Moves are 0-indexed. The default is 0.
:param int sample_size: the number of times to assign random possible
hands to other players and run alphabeta search
before deciding move preferences. By default
considers all hands that other players could
possibly have.
:param callable player: player used to sort moves to be explored
in the underlying call to alphabeta search.
Ordering better moves first may significantly
reduce the amount of moves that need to be
explored. The identity player is the default.
:param str name: the name of this player. The default is the name
of this class.
:var str __name__: the name of this player
'''
def __init__(self, start_move=0, sample_size=float('inf'), player=identity, name=None):
self._start_move = start_move
self._sample_size = sample_size
self._player = player
if name is None:
self.__name__ = type(self).__name__
else:
self.__name__ = name
def __call__(self, game):
# do not perform a potentially slow operation if it is
# too early in the game or if there is only one valid move
if len(game.moves) < self._start_move or len(game.valid_moves) == 1:
return
if self._sample_size == float('inf'):
# by default consider all hands the other players could possibly have
hands = game.all_possible_hands()
else:
# otherwise obtain a random sample
hands = (game.random_possible_hands() for _ in range(self._sample_size))
# iterate over the selected possible hands
counter = collections.Counter()
for h in hands:
# do not modify the original game
game_copy = copy.deepcopy(game)
# set the possible hands
game_copy.hands = h
# for performance
game_copy.skinny_board()
# run alphabeta and record the optimal move
counter.update([
dominoes.search.alphabeta(game_copy, player=self._player)[0][0]
])
# prefer moves that are more frequently optimal
game.valid_moves = tuple(sorted(game.valid_moves, key=lambda m: -counter[m]))
|
<filename>run_lda.py
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 22:43:33 2019
@author: dell
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 31 11:27:24 2019
@author: dell
"""
import sys
import argparse
import json
import numpy as np
from LDA import lda_model, corp_dict
import random as rd
from gensim.models import CoherenceModel
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-k","--k",type = int,default = 5)#topic number
#parser.add_argument("-tf","--tfidf",action="store_false")
parser.add_argument('--tfidf', dest='tf_idf', action='store_true')
parser.add_argument('--no-tfidf', dest='tf_idf', action='store_false')
parser.set_defaults(tf_idf=True)
#parser.add_argument("-tr","--train",action="store_false")# whether or not select model
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--no-train', dest='train', action='store_false')
parser.set_defaults(train_model=True)
#parser.add_argument("-ts",'--tsne',action="store_true", default = False)# whether or not use tsne
'''
relative loose parameters #for lda model (gensim)
'''
parser.add_argument("-cksz","--chunksize",type = int,default = 32)
parser.add_argument("-ps","--passes",type = int,default = 10)
parser.add_argument("-ite","--iteration",type = int,default = 5)
parser.add_argument("-db","--dictionary_below",type = int,default = 10)
parser.add_argument("-da","--dictionary_above",type = float,default = 0.9)
#parser.add_argument("-wks","--workers",type = int,default = 3) #parrallized
parser.add_argument("-al","--alpha",type = str,default = 'asymmetric')
parser.add_argument("-dc","--decay",type = float,default = 0.5)
args = parser.parse_args()
#print(args.k,args.train,args.tf_idf)
print('Get dic and corpus!')
cp_dic = corp_dict(tf_idf = args.tf_idf,dic_below = args.dictionary_below,dic_above = args.dictionary_above)
corpus = cp_dic.corpus
dictionary = cp_dic.dictionary
processed_docs = cp_dic.processed_docs
inp = open('test.json','rb')
data = pd.DataFrame(json.load(inp))
inp.close()
def train_model():
print('choose topics!')
top_lst = list(range(2,11)) + list(range(12,20,2)) + list(range(20,51,3)) + list(range(55,101,5))
tfidf_v = [True,False]
min_prep = 10000000#init
min_k=-1
min_tfidf = None
#record to plot
tfidf_prep_v = []
prep_v = []
for tf_idf in tfidf_v:
for k in top_lst:
print(k)
train_idx = rd.sample(range(len(corpus)),int(0.9*len(corpus)))
test_idx = list(set(range(len(corpus))).difference(set(train_idx)))
train_corp = cp_dic.get_extra(np.array(processed_docs)[train_idx],tf_idf)
test_corp = cp_dic.get_extra(np.array(processed_docs)[test_idx],tf_idf)
_lda_model = lda_model(topic_num=k,corpus=train_corp,dictionary=dictionary,ite=args.iteration,ps=args.passes,
ck_size=args.chunksize,alpha=args.alpha,tf_idf=tf_idf,decay = args.decay)
cur_prep = _lda_model.get_prep(test_corp)
if cur_prep < min_prep:
min_k,min_tfidf = k,tf_idf
min_prep = cur_prep
print(min_k,min_tfidf)
print('topic:{0}--tf_idf{1}->prep:{2}'.format(k,tf_idf,cur_prep))
# _lda_model = lda_model(topic_num=min_k,corpus=corpus,dictionary=dictionary,ite=args.iteration,ps=args.passes,
# ck_size=args.chunksize,alpha=args.alpha,tf_idf=min_tfidf,decay = args.decay)
if tf_idf:
tfidf_prep_v.append(cur_prep)
else:
prep_v.append(cur_prep)
#_lda_model.save_model()
print('min_k:{0},min_tfidf:{1},min_prep:{2}'.format(min_k,min_tfidf,min_prep))
#return _lda_model
#plot
#设置图形大小
#save file for safety
outp1 = open("perplexity_tfidf.json", 'w', encoding="utf-8")
outp1.write(json.dumps(tfidf_prep_v, indent=4, ensure_ascii=False))
outp1.close()
outp2 = open("perplexity.json", 'w', encoding="utf-8")
outp2.write(json.dumps(prep_v, indent=4, ensure_ascii=False))
outp2.close()
matplotlib.use('pdf') #prevent linux server cmd error
plt.figure(figsize=(20,8),dpi=80)
# color可以百度颜色代码
plt.plot(top_lst,tfidf_prep_v,label="tf_idf",color="#F08080")
plt.plot(top_lst,prep_v,label="no-tf_idf",color="#DB7093",linestyle="--")
plt.xlabel('number of topics')
plt.ylabel('log_perplexity')
plt.grid(alpha=0.4,linestyle=':')
#添加图例,prop指定图例的字体, loc参数可以查看源码
plt.legend(loc="upper left")
plt.savefig('train.jpg')
if args.train:
# _lda_model = train_model()
# _lda_model.tsne_vis(data)
# _lda_model.lda_vis(corpus=corpus,dictionary=dictionary)
train_model() #only plot but not directly get the most suitable model/see it from eye
else:
_lda_model = lda_model(topic_num=args.k,corpus=corpus,dictionary=dictionary,ite=args.iteration,ps=args.passes,
ck_size=args.chunksize,alpha=args.alpha,tf_idf=args.tf_idf,decay = args.decay)
#_lda_model.show_lda()
#_lda_model.tsne_vis(data)
_lda_model.lda_vis()
|
<filename>vframe/vframe/settings/paths.py
import os
from os.path import join
import logging
from vframe.settings import vframe_cfg as vcfg
from vframe.settings import types
class Paths:
# class properties
MAPPINGS_DATE = vcfg.SUGARCUBE_DATES[0]
DIR_APP_VFRAME = 'apps/vframe/'
DIR_APP_SA = 'apps/syrianarchive'
DIR_MODELS_VFRAME = join(DIR_APP_VFRAME, 'models')
DIR_DARKNET = join(DIR_MODELS_VFRAME, 'darknet/pjreddie')
DIR_DARKNET_VFRAME = join(DIR_MODELS_VFRAME, 'darknet/vframe')
DIR_MEDIA = join(DIR_APP_SA, 'media')
DIR_METADATA = join(DIR_APP_SA, 'metadata')
DIR_RECORDS = join(DIR_APP_SA, 'records')
DIR_REPORTS = join(DIR_APP_SA, 'reports')
def __init__(self):
pass
@classmethod
def DataStorePath(cls, data_store=types.DataStore.HDD):
return '/data_store_{}'.format(data_store.name.lower())
# -------------------------------------------------------------------------------
# Darknet Paths
@classmethod
def darknet_classes(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO):
if opt_net == types.DetectorNet.COCO:
fp = join(cls.DIR_DARKNET, 'data', 'coco.names')
elif opt_net == types.DetectorNet.COCO_SPP:
fp = join(cls.DIR_DARKNET, 'data', 'coco.names')
elif opt_net == types.DetectorNet.VOC:
fp = join(cls.DIR_DARKNET, 'data', 'voc.names')
elif opt_net == types.DetectorNet.OPENIMAGES:
fp = join(cls.DIR_DARKNET, 'data', 'openimages.names')
elif opt_net == types.DetectorNet.SUBMUNITION:
fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'classes.txt')
return join(cls.DataStorePath(data_store), fp)
@classmethod
def darknet_data(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
if opt_net == types.DetectorNet.COCO:
fp = join(cls.DIR_DARKNET, 'cfg', 'coco.data')
elif opt_net == types.DetectorNet.COCO_SPP:
fp = join(cls.DIR_DARKNET, 'cfg', 'coco.data')
elif opt_net == types.DetectorNet.VOC:
fp = join(cls.DIR_DARKNET, 'cfg', 'voc.data')
elif opt_net == types.DetectorNet.OPENIMAGES:
fp = join(cls.DIR_DARKNET, 'cfg', 'openimages.data')
elif opt_net == types.DetectorNet.SUBMUNITION:
fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'meta.data')
fp = join(cls.DataStorePath(data_store), fp)
if as_bytes:
return bytes(fp, encoding="utf-8")
else:
return fp
@classmethod
def darknet_cfg(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
if opt_net == types.DetectorNet.COCO:
fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3.cfg')
elif opt_net == types.DetectorNet.COCO_SPP:
fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-spp.cfg')
elif opt_net == types.DetectorNet.VOC:
fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-voc.cfg')
elif opt_net == types.DetectorNet.OPENIMAGES:
fp = join(cls.DIR_DARKNET, 'cfg', 'yolov3-openimages.cfg')
elif opt_net == types.DetectorNet.SUBMUNITION:
fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b', 'yolov3.cfg')
fp = join(cls.DataStorePath(data_store), fp)
if as_bytes:
return bytes(fp, encoding="utf-8")
else:
return fp
@classmethod
def darknet_weights(cls, data_store=types.DataStore.HDD, opt_net=types.DetectorNet.COCO, as_bytes=True):
if opt_net == types.DetectorNet.COCO:
fp = join(cls.DIR_DARKNET, 'weights', 'yolov3.weights')
elif opt_net == types.DetectorNet.COCO_SPP:
fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-spp.weights')
elif opt_net == types.DetectorNet.VOC:
fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-voc.weights')
elif opt_net == types.DetectorNet.OPENIMAGES:
fp = join(cls.DIR_DARKNET, 'weights', 'yolov3-openimages.weights')
elif opt_net == types.DetectorNet.SUBMUNITION:
fp = join(cls.DIR_DARKNET_VFRAME, 'munitions_09b/weights', 'yolov3_40000.weights')
fp = join(cls.DataStorePath(data_store), fp)
if as_bytes:
return bytes(fp, encoding="utf-8")
else:
return fp
# -------------------------------------------------------------------------------
# Metadata Paths
@classmethod
def mapping_index(cls, opt_date, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED,
file_format=types.FileExt.PKL):
"""Returns filepath to a mapping file. Mapping files are the original Suguarcube mapping data"""
fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
# data_store = 'data_store_{}'.format(data_store.name.lower())
date_str = opt_date.name.lower()
fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, 'mapping', date_str, verified.name.lower(), fname)
return fp
@classmethod
def media_record_index(cls, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED,
file_format=types.FileExt.PKL):
"""Returns filepath to a mapping file. Mapping files are the original Suguarcube mapping data"""
fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
metadata_type = types.Metadata.MEDIA_RECORD.name.lower()
fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type, verified.name.lower(), fname)
return fp
@classmethod
def metadata_index(cls, metadata_type, data_store=types.DataStore.HDD,
verified=types.Verified.VERIFIED, file_format=types.FileExt.PKL):
"""Uses key from enum to get folder name and construct filepath"""
fname = 'index.pkl' if file_format == types.FileExt.PKL else 'index.json'
fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower(),
verified.name.lower(), fname)
return fp
@classmethod
def metadata_dir(cls, metadata_type, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED):
"""Uses key from enum to get folder name and construct filepath"""
fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower(),
verified.name.lower())
return fp
@classmethod
def metadata_tree_dir(cls, metadata_type, data_store=types.DataStore.HDD):
"""Uses key from enum to get folder name and construct filepath"""
fp = join(cls.DataStorePath(data_store), cls.DIR_METADATA, metadata_type.name.lower())
return fp
@classmethod
def media_dir(cls, media_type, data_store=types.DataStore.HDD, verified=types.Verified.VERIFIED):
"""Returns the directory path to a media directory"""
fp = join(cls.DataStorePath(data_store), cls.DIR_MEDIA, media_type.name.lower(), verified.name.lower())
return fp
# @classmethod
# def keyframe(cls, dir_media, idx, image_size=types.ImageSize.MEDIUM):
# """Returns path to keyframe image using supplied cls.media directory"""
# idx = str(idx).zfill(vcfg.ZERO_PADDING)
# size_label = vcfg.IMAGE_SIZE_LABELS[image_size]
# fp = join(dir_media, sha256_tree, sha256, idx, size_label, 'index.jpg')
# return fp
@classmethod
def dnn(cls):
"""Returns configurations for available DNNs"""
pass |
# Copyright (C) 2019 Intel Corporation.
# SPDX-License-Identifier: BSD-3-Clause
"""Controller for config app.
"""
import os
import xml.etree.ElementTree as ElementTree
class XmlConfig:
"""The core class to analyze and modify acrn config xml files"""
def __init__(self, path=None, default=True):
self._xml_path = path
self._default = default
self._curr_xml = None
self._curr_xml_tree = None
@staticmethod
def _get_xml_type(xml_file):
"""
get the config type by file.
:param xml_file: the file path of xml file.
:return: the xml type.
:raises: ValueError, OSError, SyntaxError.
"""
xml_type = ''
if os.path.splitext(xml_file)[1] != '.xml':
return xml_type
try:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
if 'uos_launcher' in root.attrib:
xml_type = 'uos_launcher'
elif 'scenario' in root.attrib:
xml_type = 'scenario'
elif 'board' in root.attrib:
xml_type = 'board'
elif 'board_setting' in root.attrib:
xml_type = 'board_setting'
except ValueError:
print('xml parse error: {}'.format(xml_file))
xml_type = ''
except OSError:
print('xml open error: {}'.format(xml_file))
xml_type = ''
except SyntaxError:
print('xml syntax error: {}'.format(xml_file))
xml_type = ''
return xml_type
def list_all(self, xml_type=None):
"""
list all xml config files by type.
:param xml_type: the xml type.
:return: he list of xml config files.
"""
xmls = []
user_xmls = []
if self._xml_path is None or not os.path.exists(self._xml_path):
return xmls, user_xmls
for test_file in os.listdir(self._xml_path):
test_file_path = os.path.join(self._xml_path, test_file)
if os.path.isfile(test_file_path):
if XmlConfig._get_xml_type(test_file_path) == xml_type:
xmls.append(os.path.splitext(test_file)[0])
user_path = os.path.join(self._xml_path, 'user_defined')
if os.path.isdir(user_path):
for test_file in os.listdir(user_path):
test_file_path = os.path.join(user_path, test_file)
if os.path.isfile(test_file_path):
if XmlConfig._get_xml_type(test_file_path) == xml_type:
user_xmls.append(os.path.splitext(test_file)[0])
return xmls, user_xmls
def set_curr(self, xml):
"""
set current xml file to analyze.
:param xml: the xml file.
:return: None.
:raises: ValueError, OSError, SyntaxError.
"""
if self._xml_path is None or xml is None:
return
try:
self._curr_xml = xml
xml_path = os.path.join(self._xml_path, self._curr_xml + '.xml') \
if self._default \
else os.path.join(self._xml_path, 'user_defined', self._curr_xml + '.xml')
tree = ElementTree.parse(xml_path)
self._curr_xml_tree = tree
except ValueError:
print('xml parse error: {}'.format(xml))
self._curr_xml = None
self._curr_xml_tree = None
except OSError:
print('xml open error: {}'.format(xml))
self._curr_xml = None
self._curr_xml_tree = None
except SyntaxError:
print('xml syntax error: {}'.format(xml))
self._curr_xml = None
self._curr_xml_tree = None
def get_curr(self):
"""
get current xml config file.
:return: current xml config file name.
"""
return self._curr_xml
def get_curr_root(self):
"""
get the xml root of current xml config file.
:return: the xml root of current xml config file.
"""
if self._curr_xml_tree is None:
return None
return self._curr_xml_tree.getroot()
def get_curr_value(self, *args):
"""
get the value of the element by its path.
:param args: the path of the element.
:return: the value of the element.
"""
if self._curr_xml_tree is None:
return None
dest_node = self._get_dest_node(*args)
if dest_node is None:
return None
if dest_node.text is None or dest_node.text.strip() == '':
return ''
return dest_node.text
def set_curr_value(self, value, *args):
"""
set the value of the element by its path.
:param value: the value of the element.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
dest_node = self._get_dest_node(*args)
dest_node.text = value
def set_curr_list(self, values, *args):
"""
set a list of sub element for the element by its path.
:param values: the list of values of the element.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
tag = args[-1]
args = args[:-1]
dest_node = self._get_dest_node(*args)
new_node_desc = None
for node in list(dest_node):
if node.tag == tag:
if 'desc' in node.attrib:
new_node_desc = node.attrib['desc']
dest_node.remove(node)
for value in values:
new_node = ElementTree.SubElement(dest_node, tag)
new_node.text = value
if new_node_desc is not None:
new_node.attrib['desc'] = new_node_desc
def set_curr_attr(self, attr_name, attr_value, *args):
"""
set the attribute of the element by its path.
:param attr_name: the attribute name of the element.
:param attr_value: the attribute value of the element.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
dest_node = self._get_dest_node(*args)
dest_node.attrib[attr_name] = attr_value
def add_curr_value(self, key, desc, value, *args):
"""
add a sub element for the element by its path.
:param key: the tag of the sub element.
:param desc: the attribute desc of the sub element.
:param value: the value of the sub element.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
dest_node = self._get_dest_node(*args)
if key in ['vm']:
ElementTree.SubElement(dest_node, key, attrib={'id': value, 'desc': desc})
else:
new_node = ElementTree.SubElement(dest_node, key, attrib={'desc': desc})
new_node.text = value
def get_curr_elem(self, *args):
"""
get elements for current path.
:param args: the path of the element.
:return: current element.
"""
if self._curr_xml_tree is None:
return
dest_node = self._get_dest_node(*args)
return dest_node
def clone_curr_elem(self, elem, *args):
"""
clone elements for current path.
:param elem: the element to clone.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
dest_node = self._get_dest_node(*args)
dest_node.append(elem)
def insert_curr_elem(self, index, elem, *args):
"""
insert elements for current path.
:param index: the location for the element to insert.
:param elem: the element to insert.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
dest_node = self._get_dest_node(*args)
dest_node.insert(index, elem)
def delete_curr_elem(self, *args):
"""
delete the element by its path.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
father_node = self._get_dest_node(*args[:-1])
dest_node = self._get_dest_node(*args)
father_node.remove(dest_node)
def delete_curr_key(self, *args):
"""
delete the element by its path.
:param args: the path of the element.
:return: None.
"""
if self._curr_xml_tree is None:
return
dest_node = self._get_dest_node(*args)
self._curr_xml_tree.getroot().remove(dest_node)
def _get_dest_node(self, *args):
"""
get the destination element by its path.
:param args: the path of the element.
:return: the destination element.
"""
if self._curr_xml_tree is None:
return None
dest_node = self._curr_xml_tree.getroot()
path = '.'
for arg in args:
# tag:attr=xxx
# tag:attr
# tag
tag = None
attr_name = None
attr_value = None
if ':' not in arg:
tag = arg
elif '=' not in arg:
# tag = arg.split(':')[0]
# attr_name = arg.split(':')[1]
raise Exception('unsupported xml path: tag:attr')
else:
tag = arg.split(':')[0]
attr = arg.split(':')[1]
attr_name = attr.split('=')[0]
attr_value = attr.split('=')[1]
if attr_value is None:
path += ("/" + tag)
else:
path += ("/" + tag + "[@" + attr_name + "='" + attr_value + "']")
dest_node = dest_node.findall(path)
if dest_node is not None and dest_node != []:
return dest_node[0]
raise Exception('can not find node by {} from xml'.format(args))
def save(self, xml=None, user_defined=True):
"""
save current xml to file.
:param xml: the file name to save; if not specified, save current xml to default names.
:param user_defined: save to user defined folder or default folder.
:return: None.
"""
if self._curr_xml_tree is None:
return
if xml is None:
xml = self._curr_xml
xml_path = self._xml_path
if user_defined:
xml_path = os.path.join(self._xml_path, 'user_defined')
if not os.path.isdir(xml_path):
os.makedirs(xml_path)
self._format_xml(self._curr_xml_tree.getroot())
self._curr_xml_tree.write(os.path.join(xml_path, xml+'.xml'), encoding='utf-8',
xml_declaration=True, method='xml')
def _format_xml(self, element, depth=0):
i = "\n" + depth * " "
if element:
if not element.text or not element.text.strip():
element.text = i + " "
if not element.tail or not element.tail.strip():
element.tail = i
for element in element:
self._format_xml(element, depth + 1)
if not element.tail or not element.tail.strip():
element.tail = i
else:
if depth and (not element.tail or not element.tail.strip()):
element.tail = i
|
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
import json as _json
from anvil.js import window as _window
__version__ = "1.5.1"
__all__ = ["local_storage", "session_storage"]
_prefix = "anvil_storage_"
_prefix_len = len(_prefix)
class Storage:
def __init__(self, store):
self._store = store
def _check_store(self):
# in some browsers localStorage might not be available
# we don't throw the error until we try to access the store
if self._store is None:
raise RuntimeError("browser storage is not available")
def _mangle_key(self, key):
# we mangle the names so that we avoid conflicts
self._check_store()
if not isinstance(key, str):
raise TypeError("storage keys must be strings")
return key if key.startswith(_prefix) else _prefix + key
def _filter_store(self):
return filter(lambda key: key.startswith(_prefix), self._store.keys())
def _map_store(self, predicate):
self._check_store()
return map(predicate, self._filter_store())
def __getitem__(self, key):
ret = self._store.getItem(self._mangle_key(key))
if ret is None:
raise KeyError(key)
return _json.loads(ret)
def __setitem__(self, key, val):
key = self._mangle_key(key)
try:
val = _json.dumps(val)
except Exception as e:
raise type(e)(f"There was a problem converting the value into json: {e}")
self._store.setItem(key, val)
def __delitem__(self, key):
self._store.removeItem(self._mangle_key(key))
def __contains__(self, key):
return self._mangle_key(key) in self._store
def __repr__(self):
pairs = ", ".join(f"{key!r}: {val!r}" for key, val in self.items())
return f"Storage({{{pairs}}})"
def __iter__(self):
return self.keys()
def keys(self):
"""returns the keys for local storage as an iterator"""
return self._map_store(lambda key: key[_prefix_len:])
def items(self):
"""returns the items for local storage as an iterator"""
return self._map_store(lambda key: (key[_prefix_len:], self.__getitem__(key)))
def values(self):
"""returns the values for local storage as an iterator"""
return self._map_store(lambda key: self.__getitem__(key))
def put(self, key, value):
"""put a key value pair into local storage"""
self[key] = value
def get(self, key, default=None):
"""get a value from local storage, returns the default value if the key is not in local storage"""
try:
return self.__getitem__(key)
except KeyError:
return default
def pop(self, key, default=None):
"""remove specified key and return the corresponding value.\n\nIf key is not found, default is returned"""
try:
return self.get(key, default)
finally:
del self[key]
local_storage = Storage(_window.get("localStorage"))
session_storage = Storage(_window.get("sessionStorage"))
if __name__ == "__main__":
print(local_storage)
for k, v in local_storage.items():
print(k, v)
local_storage["foo"] = "bar"
print(local_storage["foo"])
del local_storage["foo"]
print(local_storage.get("foo"))
try:
local_storage["foo"]
except KeyError as e:
print(repr(e))
local_storage.put("foo", 1)
print(local_storage.pop("foo"))
x = {"abc": 123}
local_storage["x"] = x
print("x" in local_storage)
print(local_storage["x"] == x)
print(local_storage.get("x") == x)
print(local_storage.pop("x") == x)
|
from flask import render_template, request, redirect, url_for, flash
from datetime import datetime as dt
import json
import locale
import pandas as pd
from sqlalchemy import func
from app.models import Rooms, Hotels, User, Reservation, Status, Guest, Account
from app import db
locale.setlocale(locale.LC_ALL, 'pt_BR.UTF-8')
def dashboard(hotel_id):
hotel = Hotels.query.get_or_404(hotel_id)
quartos = Rooms.query.filter_by(hotel_id=hotel_id).order_by(Rooms.id)
hospedes = Guest.query.filter_by(hotel_id=hotel_id).order_by(Guest.name)
reservas = Reservation.query.order_by(Reservation.id)
contas = Account.query.filter_by(hotel_id=hotel_id).order_by(Account.id)
hoje = dt.strptime(dt.today().strftime('%Y-%m-%d'), '%Y-%m-%d')
status_reservas = [(r.room_id, (r.check_in <= hoje <= r.check_out)) for r in reservas if r.status == Status.ATIVO]
# status_reservas = [status for status in status_reservas if status[1] is True]
contador_quartos = 0
contador_hospedes = 0
contas_receber_mes = 0
contas_pagar_mes = 0
for i in status_reservas:
if i[1]:
contador_quartos += 1
for r in reservas:
if r.status == Status.ATIVO and (r.check_in <= hoje <= r.check_out):
contador_hospedes += r.total_guests
for c in contas:
if c.tipo == 'Contas a receber' and c.data_pgto is not None:
if c.data_pgto.month == dt.today().month:
contas_receber_mes += c.valor
elif c.tipo == 'Contas a pagar' and c.data_pgto is not None:
if c.data_pgto.month == dt.today().month:
contas_pagar_mes += c.valor
status_reservas = dict(set(status_reservas))
'''contas_grafico = [(dt.strftime(conta.data_pgto, '%Y-%m'), conta.tipo, conta.valor) for conta in contas if
conta.data_pgto is not None]
contas_grafico = json.dumps(contas_grafico)'''
'''query = db.session.query(
Account.data_pgto, Account.tipo, func.sum(Account.valor)
).group_by(func.strftime(Account.data_pgto, '%Y-%m-01'), Account.tipo)#.filter(Account.data_pgto is not None).all()
contas_grafico = [(dt.strftime(conta[0], '%Y-%m'), conta[1], conta[2]) for conta in query if
conta[0] is not None]
contas_grafico = json.dumps(contas_grafico)'''
df = pd.read_sql(contas.statement, contas.session.bind)
df = df[df['data_pgto'].notna()]
df['ano_mes_pgto'] = pd.to_datetime(df['data_pgto'])
df['ano_mes_pgto'] = df['ano_mes_pgto'].dt.year.astype('str') + '-' + df['ano_mes_pgto'].dt.month.astype(
'str').str.zfill(2)
df = df.groupby(['ano_mes_pgto', 'tipo']).sum().reset_index()
subset = df[['ano_mes_pgto', 'tipo', 'valor']]
ano_mes_pgto = list(subset['ano_mes_pgto'].unique())
base = pd.DataFrame(
{'ano_mes_pgto': sorted(ano_mes_pgto * 2), 'tipo': ['Contas a pagar', 'Contas a receber'] * len(ano_mes_pgto)})
subset = base.merge(subset, on=['ano_mes_pgto', 'tipo'], how='left').fillna(0)
contas_grafico = [tuple(x) for x in subset.to_numpy()]
print(contas_grafico)
return render_template('dashboard.html',
status_reservas=status_reservas,
contador_quartos=contador_quartos,
contador_hospedes=contador_hospedes,
contas_receber_mes=contas_receber_mes,
contas_pagar_mes=contas_pagar_mes,
contas_grafico=contas_grafico,
len=len
)
|
<reponame>vinthedark/snet-marketplace-service
import json
import uuid
from enum import Enum
import web3
from eth_account.messages import defunct_hash_message
from web3 import Web3
from common.logger import get_logger
logger = get_logger(__name__)
class ContractType(Enum):
REGISTRY = "REGISTRY"
MPE = "MPE"
RFAI = "RFAI"
class BlockChainUtil(object):
def __init__(self, provider_type, provider):
if provider_type == "HTTP_PROVIDER":
self.provider = Web3.HTTPProvider(provider)
elif provider_type == "WS_PROVIDER":
self.provider = web3.providers.WebsocketProvider(provider)
else:
raise Exception("Only HTTP_PROVIDER and WS_PROVIDER provider type are supported.")
self.web3_object = Web3(self.provider)
def load_contract(self, path):
with open(path) as f:
contract = json.load(f)
return contract
def read_contract_address(self, net_id, path, key):
contract = self.load_contract(path)
return Web3.toChecksumAddress(contract[str(net_id)][key])
def contract_instance(self, contract_abi, address):
return self.web3_object.eth.contract(abi=contract_abi, address=address)
def get_contract_instance(self, base_path, contract_name, net_id):
contract_network_path, contract_abi_path = self.get_contract_file_paths(base_path, contract_name)
contract_address = self.read_contract_address(net_id=net_id, path=contract_network_path,
key='address')
contract_abi = self.load_contract(contract_abi_path)
logger.debug(f"contract address is {contract_address}")
contract_instance = self.contract_instance(contract_abi=contract_abi, address=contract_address)
return contract_instance
def generate_signature(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return signature.signature.hex()
def generate_signature_bytes(self, data_types, values, signer_key):
signer_key = "0x" + signer_key if not signer_key.startswith("0x") else signer_key
message = web3.Web3.soliditySha3(data_types, values)
signature = self.web3_object.eth.account.signHash(defunct_hash_message(message), signer_key)
return bytes(signature.signature)
def get_nonce(self, address):
""" transaction count includes pending transaction also. """
nonce = self.web3_object.eth.getTransactionCount(address)
return nonce
def sign_transaction_with_private_key(self, private_key, transaction_object):
return self.web3_object.eth.account.signTransaction(transaction_object, private_key).rawTransaction
def create_transaction_object(self, *positional_inputs, method_name, address, contract_path, contract_address_path,
net_id):
nonce = self.get_nonce(address=address)
self.contract = self.load_contract(path=contract_path)
self.contract_address = self.read_contract_address(net_id=net_id, path=contract_address_path, key='address')
self.contract_instance = self.contract_instance(contract_abi=self.contract, address=self.contract_address)
print("gas_price == ", self.web3_object.eth.gasPrice)
print("nonce == ", nonce)
gas_price = 3 * (self.web3_object.eth.gasPrice)
transaction_object = getattr(self.contract_instance.functions, method_name)(
*positional_inputs).buildTransaction({
"from": address,
"nonce": nonce,
"gasPrice": gas_price,
"chainId": net_id
})
return transaction_object
def process_raw_transaction(self, raw_transaction):
return self.web3_object.eth.sendRawTransaction(raw_transaction).hex()
def create_account(self):
account = self.web3_object.eth.account.create(uuid.uuid4().hex)
return account.address, account.privateKey.hex()
def get_current_block_no(self):
return self.web3_object.eth.blockNumber
def get_transaction_receipt_from_blockchain(self, transaction_hash):
return self.web3_object.eth.getTransactionReceipt(transaction_hash)
def get_contract_file_paths(self, base_path, contract_name):
if contract_name == ContractType.REGISTRY.value:
json_file = "Registry.json"
elif contract_name == ContractType.MPE.value:
json_file = "MultiPartyEscrow.json"
elif contract_name == ContractType.RFAI.value:
json_file = "ServiceRequest.json"
else:
raise Exception("Invalid contract Type {}".format(contract_name))
contract_network_path = base_path + "/{}/{}".format("networks", json_file)
contract_abi_path = base_path + "/{}/{}".format("abi", json_file)
return contract_network_path, contract_abi_path
|
<filename>sliding_window/sliding_window.py
import numpy as np
import pandas as pd
from pathlib import Path
class SlidingWindow:
def __init__(self, path_to_data, target, window_size, n_largest, stride):
# parameters
self.path_to_data = Path(path_to_data)
self.target = np.array(list(target))
self.window_size = window_size
self.n_largest = n_largest
self.stride = stride
# data
self.frequencies = None
self.window_data = None
self.window_data_filtered = None
self.summary_stats = []
# helper variables
self.groupby_window = None
self.processed_data_path = 'results/processed_data/'
self.fasta_extensions = (
'.fasta', '.fa', '.fna', '.ffn', '.faa', '.frn')
# generate subset labels from filenames
self.subset_names = self.apply_if_fasta(self.make_subset_name)
self.subset_names.sort()
self.n_subset = len(self.subset_names)
def make_subset_name(self, file):
return file.name.split('.')[0]
# returns a boolean indicating if a file's extension matches a FASTA format
def is_fasta(self, file):
return file.name.lower().endswith(self.fasta_extensions)
# applies a function to each file in the data directory if it is a FASTA file
def apply_if_fasta(self, f):
with self.path_to_data as entries:
return [f(file) for file in entries.iterdir() if self.is_fasta(file)]
# converts multiple alignment strings to a 2d numpy array
def seq_to_np_array(self, sequences):
return np.asarray(list(map(list, sequences)))
# convert alignment arrays to boolean values indicating target presence
def to_boolean(self, sequence_arrays):
return np.where(np.isin(sequence_arrays, self.target), 1, 0)
# returns an array of target counts for each window in a sequence
def window_counts(self, sequence):
# convolution weights; vector of ones with length of window size
weights = np.ones(self.window_size, dtype=int)
# calculate sum of boolean values for each window in a sequence
return np.convolve(sequence, weights, mode='valid')
# generates summary statistics aggregated across all alighments for a single subset
def subset_summary_stats(self, is_target):
alignment_length = is_target.shape[1]
seq_count = is_target.shape[0]
target_count = np.sum(is_target)
proportion = np.mean(is_target)
return pd.DataFrame([[alignment_length, seq_count, target_count, proportion]],
columns=['alignment_length', 'sequence_count', 'target_count',
'target_proportion'])
# returns a 2d array of the target counts in each window (column) for each sequence (row)
def sequence_window_counts(self, is_target):
return np.array([self.window_counts(seq) for seq in is_target])
# returns sequences at each window stride step
def filter_stride(self, sequences):
return sequences[:, 0::self.stride]
# returns a dataframe of sequence headers appended to window frequencies dataframe
def add_header(self, header, window_frequencies):
header = pd.Series(header, name='header')
# name_range = range(1, window_frequencies.shape[1] + 1)
# [f'window_{i}' for i in name_range]
columns = range(1, window_frequencies.shape[1] + 1)
window_frequencies = pd.DataFrame(
window_frequencies, columns=columns)
return pd.concat([header, window_frequencies], axis=1)
# returns a data frame of window frequencies for each sequence in a subset
def window_frequencies(self, sequences):
header, sequences = zip(*sequences)
sequences_arrays = self.seq_to_np_array(sequences)
is_target = self.to_boolean(sequences_arrays)
self.summary_stats.append(self.subset_summary_stats(is_target))
window_counts = self.sequence_window_counts(is_target)
window_counts = self.filter_stride(window_counts)
window_frequencies = window_counts / self.window_size
return self.add_header(header, window_frequencies)
# read and return raw FASTA file text
def read_fasta(self, infile):
with open(infile, "r") as f:
return f.read()
# returns a tuble containing the sequence header and the sequence itself
def separate_header(self, sequence):
return sequence[0], ''.join(sequence[1:])
# returns a list of tuples containing a header and sequence for each sequence in a FASTA file
def process_fasta(self, file_text):
sequences = file_text.split('>')[1:]
sequences = [sequence.split('\n') for sequence in sequences]
return [self.separate_header(sequence) for sequence in sequences]
# process each FASTA file
def subset_sequences(self):
fastas = self.apply_if_fasta(self.read_fasta)
return [self.process_fasta(fasta) for fasta in fastas]
# calculate window frequencies for each subset
def subset_frequencies(self):
return [self.window_frequencies(subset) for subset in self.subset_sequences()]
# concatenate subset frequencies and add subset column to dataframe
def make_frequencies(self):
self.frequencies = (
pd.concat(self.subset_frequencies(), keys=self.subset_names)
.reset_index()
.drop('level_1', axis=1)
.rename(columns={'level_0': 'subset'})
)
# generate aggregate summary statistics for each subset
def make_summary_stats(self):
self.summary_stats = pd.concat(self.summary_stats)
self.summary_stats.index = self.subset_names
self.summary_stats.index.name = 'subset'
subsets = [self.frequencies.loc[self.frequencies['subset'] == subset]
for subset in self.subset_names]
self.summary_stats['frequency_mean'] = [
np.mean(subset.iloc[:, 2:].values) for subset in subsets]
self.summary_stats['frequency_variance'] = [
np.var(subset.iloc[:, 2:].values) for subset in subsets]
self.summary_stats['frequency_std'] = np.sqrt(
self.summary_stats['frequency_variance'])
self.summary_stats.reset_index(inplace=True)
def make_column_names(self, postfix):
return [subset.lower() + postfix for subset in self.subset_names]
# returns a symmetric matrix of the differences between group means for a window
def delta_matrix(self, group):
return group['window_mean'].values - group['window_mean'].values[:, None]
def calculate_deltas(self):
return [self.delta_matrix(group) for _, group in self.groupby_window]
def concat_dataframes(self, deltas, effect_sizes):
self.window_data = pd.concat(
[self.window_data, deltas, effect_sizes], axis=1)
# scale variance of each subset relative to their sample size
def scale_variance(self, sample_size, variance):
return (sample_size - 1) * variance
# returns a 2d array of all pairwise sums of a vector's elements
def pairwise_sum(self, vector):
return vector.values + vector.values[:, None]
# check for division by zero and replace with 0 if encountered
def safe_zero_divide(self, dividend, divisor):
dividend = dividend.astype(float)
divisor = divisor.astype(float)
out = np.zeros_like(divisor)
return np.divide(dividend, divisor, out=out, where=divisor != 0)
# calculates pooled standard deviation for each pairwise combination of subset frequency variances
def pool_std(self, variance, sample_size_sums):
variance_sums = self.pairwise_sum(variance)
return np.sqrt(self.safe_zero_divide(variance_sums, sample_size_sums - 2))
def calculate_cohens_d(self, deltas, pooled_std):
return self.safe_zero_divide(deltas, pooled_std)
# returns the bias corrected Cohen's d effect size
def correct_bias(self, effect_size, n_sums):
divisor = (4 * n_sums) - 9
correction_factor = 1 - self.safe_zero_divide(np.array([3.0]), divisor)
return correction_factor * effect_size
# calculates Hedge's g effect size for groups with unequal sample sizes
def calculate_hedges_g(self, delta, group, sample_size, sample_size_sums):
variance = group['window_variance'].values
scaled_variance = self.scale_variance(sample_size, variance)
pooled_std = self.pool_std(scaled_variance, sample_size_sums)
cohens_d = self.calculate_cohens_d(delta, pooled_std)
return self.correct_bias(cohens_d, sample_size_sums)
# calculate Hedge's g effect size for each pairwise combination of subset frequencies
def calculate_effect_sizes(self, deltas):
sample_size = self.summary_stats['alignment_length']
sample_size_sums = self.pairwise_sum(sample_size)
return [self.calculate_hedges_g(delta, group[1], sample_size, sample_size_sums)
for delta, group in zip(deltas, self.groupby_window)]
# calculate and append deltas and effect sizes to window dataframe
def make_effect_sizes(self):
deltas = self.calculate_deltas()
deltas_temp = deltas.copy()
effect_sizes = self.calculate_effect_sizes(
deltas)
deltas = pd.DataFrame(np.concatenate(
deltas), columns=self.make_column_names('_delta'))
effect_sizes = pd.DataFrame(np.concatenate(
effect_sizes), columns=self.make_column_names('_hedges_g'))
self.concat_dataframes(deltas, effect_sizes)
return deltas_temp
# returns the lower half of a symmetric matrix (upper half is redundent) as a flattened array
def lower_tri_values(self, diff_matrix):
return diff_matrix[np.triu_indices(diff_matrix.shape[0], k=1)]
# returns the largest difference between the group means for a window
def pairwise_abs_max(self, diff_matrix):
diff_array = self.lower_tri_values(diff_matrix)
return np.max(abs(diff_array))
def filter_dataframe(self, array):
indices = np.argpartition(
array, -self.n_largest)[-self.n_largest:] + 1
mask = self.window_data['window'].isin(indices)
self.window_data_filtered = (
self.window_data[mask]
.copy()
.reset_index(drop=True)
)
# convert a scalar from one range to another
def interpolate_range(self, value, old_min, old_max, new_min, new_max):
return int(np.interp(value, [old_min, old_max], [new_min, new_max]))
def make_window_start(self):
alighment_length = self.summary_stats['alignment_length'][0]
old_min = self.window_data["window"].values[0]
old_max = self.window_data["window"].values[-1]
return [self.interpolate_range(value, old_min, old_max, 1, alighment_length)
for value in self.window_data["window"]]
def make_window_data(self):
groupby_subset = self.frequencies.groupby('subset')
self.window_data = groupby_subset.mean().T.stack().reset_index()
variance = groupby_subset.var().T.stack().reset_index(drop=True)
self.window_data['variance'] = variance
std = np.sqrt(variance)
self.window_data['std'] = std
self.window_data.columns = [
'window', 'subset', 'window_mean', 'window_variance', 'window_std']
self.window_data.insert(1, "window_start", self.make_window_start())
self.groupby_window = self.window_data.groupby('window')
if self.n_subset > 1:
deltas = self.make_effect_sizes()
# filter windows the n largest differences between group means
max_deltas = [self.pairwise_abs_max(delta) for delta in deltas]
self.filter_dataframe(max_deltas)
else:
# filter windows the n largest window means
self.filter_dataframe(self.window_data['window_mean'])
def save_csv(self, df, filename):
df.to_csv(f'{self.processed_data_path}{filename}.csv', index=False)
def save_data(self):
dataframes = [self.frequencies, self.window_data,
self.window_data_filtered, self.summary_stats]
names = ['window_frequencies', 'window_data',
'window_data_filtered', 'summary_stats']
for frame, name in zip(dataframes, names):
self.save_csv(frame, name)
def run_pipeline(self):
self.make_frequencies()
self.make_summary_stats()
self.make_window_data()
self.save_data()
|
#!/usr/bin/env python3
# encoding: utf-8
import os
import sys
import time
import numpy as np
from copy import deepcopy
from typing import (Dict,
NoReturn,
Optional)
from rls.utils.display import show_dict
from rls.utils.sundry_utils import (check_or_create,
set_global_seeds)
from rls.parse.parse_buffer import get_buffer
from rls.utils.time import get_time_hhmmss
from rls.algos import get_model_info
from rls.common.train.unity import (unity_train,
unity_no_op,
unity_inference)
from rls.common.train.gym import (gym_train,
gym_no_op,
gym_inference)
from rls.common.yaml_ops import (save_config,
load_config)
from rls.common.make_env import make_env
from rls.common.config import Config
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
def UpdateConfig(config: Dict, file_path: str, key_name: str = 'algo') -> Dict:
'''
update configurations from a readable file.
params:
config: current configurations
file_path: path of configuration file that needs to be loaded
key_name: a specified key in configuration file that needs to update current configurations
return:
config: updated configurations
'''
_config = load_config(file_path)
key_values = _config[key_name]
try:
for key in key_values:
config[key] = key_values[key]
except Exception as e:
logger.info(e)
sys.exit()
return config
class Trainer:
def __init__(self, env_args: Config, buffer_args: Config, train_args: Config):
'''
Initilize an agent that consists of training environments, algorithm model, replay buffer.
params:
env_args: configurations of training environments
buffer_args: configurations of replay buffer
train_args: configurations of training
'''
self.env_args = env_args
self.buffer_args = buffer_args
self.train_args = train_args
set_global_seeds(int(self.train_args.seed))
self._name = self.train_args['name']
self.train_args['base_dir'] = os.path.join(self.train_args['base_dir'], self.train_args['name']) # train_args['base_dir'] DIR/ENV_NAME/ALGORITHM_NAME
self.start_time = time.time()
self._allow_print = bool(self.train_args.get('allow_print', False))
# ENV
self.env = make_env(self.env_args.to_dict)
# ALGORITHM CONFIG
self.MODEL, self.algo_args, self.train_args['policy_mode'], _policy_type = get_model_info(self.train_args['algo'])
self.multi_agents_training = _policy_type == 'multi'
if self.train_args['algo_config'] is not None:
self.algo_args = UpdateConfig(self.algo_args, self.train_args['algo_config'], 'algo')
self.algo_args['memory_net_kwargs']['use_rnn'] = self.train_args['use_rnn']
self.algo_args['no_save'] = self.train_args['no_save']
show_dict(self.algo_args)
# BUFFER
if self.train_args['policy_mode'] == 'off-policy':
if self.algo_args['memory_net_kwargs']['use_rnn'] == True:
self.buffer_args['type'] = 'EpisodeER'
self.buffer_args['batch_size'] = self.algo_args.get('episode_batch_size', 0)
self.buffer_args['buffer_size'] = self.algo_args.get('episode_buffer_size', 0)
self.buffer_args['EpisodeER']['burn_in_time_step'] = self.algo_args.get('burn_in_time_step', 0)
self.buffer_args['EpisodeER']['train_time_step'] = self.algo_args.get('train_time_step', 0)
else:
self.buffer_args['type'] = 'ER'
self.buffer_args['batch_size'] = self.algo_args.get('batch_size', 0)
self.buffer_args['buffer_size'] = self.algo_args.get('buffer_size', 0)
_apex_buffer_args = {}
if self.algo_args.get('use_priority', False):
self.buffer_args['type'] = 'P' + self.buffer_args['type']
_apex_buffer_args.update({'max_train_step': self.train_args['max_train_step']})
if self.algo_args.get('n_step', False):
self.buffer_args['type'] = 'Nstep' + self.buffer_args['type']
self.algo_args['gamma'] = pow(self.algo_args['gamma'], self.buffer_args['NstepPER']['n']) # update gamma for n-step training.
_apex_buffer_args.update({'gamma': self.algo_args['gamma']})
self.buffer_args[self.buffer_args['type']].update(_apex_buffer_args)
else:
self.buffer_args['type'] = 'None'
self.train_args['pre_fill_steps'] = 0 # if on-policy, prefill experience replay is no longer needed.
if self.env_args['type'] == 'gym':
self.initialize_gym()
else:
# unity
if self.multi_agents_training:
self.initialize_multi_unity()
else:
self.initialize_unity()
pass
def initialize_gym(self):
# gym
if self.train_args['use_wandb']:
import wandb
check_or_create(os.path.join(self.train_args.base_dir, 'wandb'))
wandb.init(sync_tensorboard=True, name=self._name, dir=self.train_args.base_dir, project=self.train_args['wandb_project'])
# buffer ------------------------------
if 'Nstep' in self.buffer_args['type'] or 'Episode' in self.buffer_args['type']:
self.buffer_args[self.buffer_args['type']]['agents_num'] = self.env_args['env_num']
buffer = get_buffer(self.buffer_args)
# buffer ------------------------------
# model -------------------------------
self.algo_args.update({
'envspec': self.env.EnvSpec,
'max_train_step': self.train_args.max_train_step,
'base_dir': self.train_args.base_dir
})
self.model = self.MODEL(**self.algo_args)
self.model.set_buffer(buffer)
self.model.init_or_restore(self.train_args['load_model_path'])
# model -------------------------------
_train_info = self.model.get_init_training_info()
self.train_args['begin_train_step'] = _train_info['train_step']
self.train_args['begin_frame_step'] = _train_info['frame_step']
self.train_args['begin_episode'] = _train_info['episode']
if not self.train_args['inference'] and not self.train_args['no_save']:
self.algo_args['envspec'] = str(self.algo_args['envspec'])
records_dict = {
'env': self.env_args.to_dict,
'buffer': self.buffer_args.to_dict,
'train': self.train_args.to_dict,
'algo': self.algo_args
}
save_config(os.path.join(self.train_args.base_dir, 'config'), records_dict)
if self.train_args['use_wandb']:
wandb.config.update(records_dict)
def initialize_multi_unity(self):
# multi agents with unity
assert self.env.group_num > 1, 'if using ma* algorithms, number of brains must larger than 1'
if 'Nstep' in self.buffer_args['type'] or 'Episode' in self.buffer_args['type']:
self.buffer_args[self.buffer_args['type']]['agents_num'] = self.env_args['env_num']
buffer = get_buffer(self.buffer_args)
self.algo_args.update({
'envspec': self.env.EnvSpec,
'max_train_step': self.train_args.max_train_step,
'base_dir': self.train_args.base_dir,
})
self.model = self.MODEL(**self.algo_args)
self.model.set_buffer(buffer)
self.model.init_or_restore(self.train_args['load_model_path'])
_train_info = self.model.get_init_training_info()
self.train_args['begin_train_step'] = _train_info['train_step']
self.train_args['begin_frame_step'] = _train_info['frame_step']
self.train_args['begin_episode'] = _train_info['episode']
if not self.train_args['inference'] and not self.train_args['no_save']:
self.algo_args['envspec'] = str(self.algo_args['envspec'])
records_dict = {
'env': self.env_args.to_dict,
'buffer': self.buffer_args.to_dict,
'train': self.train_args.to_dict,
'algo': self.algo_args
}
save_config(os.path.join(self.train_args.base_dir, 'config'), records_dict)
def initialize_unity(self):
# single agent with unity
self.models = []
for i, fgn in enumerate(self.env.fixed_group_names):
_bargs, _targs, _aargs = map(deepcopy, [self.buffer_args, self.train_args, self.algo_args])
_targs.base_dir = os.path.join(_targs.base_dir, fgn)
if _targs.load_model_path is not None:
_targs.load_model_path = os.path.join(_targs.load_model_path, fgn)
if 'Nstep' in _bargs['type'] or 'Episode' in _bargs['type']:
_bargs[_bargs['type']]['agents_num'] = self.env.group_agents[i]
buffer = get_buffer(_bargs)
_aargs.update({
'envspec': self.env.EnvSpec[i],
'max_train_step': _targs.max_train_step,
'base_dir': _targs.base_dir,
})
model = self.MODEL(**_aargs)
model.set_buffer(buffer)
model.init_or_restore(_targs.load_model_path)
self.models.append(model)
if not _targs['inference'] and not _targs['no_save']:
_aargs['envspec'] = str(_aargs['envspec'])
records_dict = {
'env': self.env_args.to_dict,
'buffer': _bargs.to_dict,
'train': _targs.to_dict,
'algo': _aargs
}
save_config(os.path.join(_targs.base_dir, 'config'), records_dict)
_train_info = self.models[0].get_init_training_info()
self.train_args['begin_train_step'] = _train_info['train_step']
self.train_args['begin_frame_step'] = _train_info['frame_step']
self.train_args['begin_episode'] = _train_info['episode']
def pwi(self, *args, out_time: bool = False) -> NoReturn:
if self._allow_print:
model_info = f'{self._name} '
if out_time:
model_info += f'T: {get_time_hhmmss(self.start_time)} '
logger.info(''.join([model_info, *args]))
else:
pass
def __call__(self) -> NoReturn:
'''
train
'''
if self.env_args['type'] == 'gym':
try:
gym_no_op(
env=self.env,
model=self.model,
pre_fill_steps=int(self.train_args['pre_fill_steps']),
prefill_choose=bool(self.train_args['prefill_choose'])
)
gym_train(
env=self.env,
model=self.model,
print_func=self.pwi,
begin_train_step=int(self.train_args['begin_train_step']),
begin_frame_step=int(self.train_args['begin_frame_step']),
begin_episode=int(self.train_args['begin_episode']),
render=bool(self.train_args['render']),
render_episode=int(self.train_args.get('render_episode', sys.maxsize)),
save_frequency=int(self.train_args['save_frequency']),
max_step_per_episode=int(self.train_args['max_step_per_episode']),
max_train_episode=int(self.train_args['max_train_episode']),
eval_while_train=bool(self.train_args['eval_while_train']),
max_eval_episode=int(self.train_args['max_eval_episode']),
off_policy_step_eval_episodes=int(self.train_args['off_policy_step_eval_episodes']),
off_policy_train_interval=int(self.train_args['off_policy_train_interval']),
policy_mode=str(self.train_args['policy_mode']),
moving_average_episode=int(self.train_args['moving_average_episode']),
add_noise2buffer=bool(self.train_args['add_noise2buffer']),
add_noise2buffer_episode_interval=int(self.train_args['add_noise2buffer_episode_interval']),
add_noise2buffer_steps=int(self.train_args['add_noise2buffer_steps']),
off_policy_eval_interval=int(self.train_args['off_policy_eval_interval']),
max_train_step=int(self.train_args['max_train_step']),
max_frame_step=int(self.train_args['max_frame_step'])
)
finally:
self.model.close()
self.env.close()
else:
if self.multi_agents_training:
try:
ma_unity_no_op(
env=self.env,
model=self.model,
pre_fill_steps=int(self.train_args['pre_fill_steps']),
prefill_choose=bool(self.train_args['prefill_choose']),
real_done=bool(self.train_args['real_done'])
)
ma_unity_train(
env=self.env,
model=self.model,
print_func=self.pwi,
begin_train_step=int(self.train_args['begin_train_step']),
begin_frame_step=int(self.train_args['begin_frame_step']),
begin_episode=int(self.train_args['begin_episode']),
save_frequency=int(self.train_args['save_frequency']),
max_step_per_episode=int(self.train_args['max_step_per_episode']),
max_train_step=int(self.train_args['max_train_step']),
max_frame_step=int(self.train_args['max_frame_step']),
max_train_episode=int(self.train_args['max_train_episode']),
policy_mode=str(self.train_args['policy_mode']),
moving_average_episode=int(self.train_args['moving_average_episode']),
real_done=bool(self.train_args['real_done']),
off_policy_train_interval=int(self.train_args['off_policy_train_interval'])
)
finally:
self.model.close()
self.env.close()
else:
try:
unity_no_op(
env=self.env,
models=self.models,
pre_fill_steps=int(self.train_args['pre_fill_steps']),
prefill_choose=bool(self.train_args['prefill_choose']),
real_done=bool(self.train_args['real_done'])
)
unity_train(
env=self.env,
models=self.models,
print_func=self.pwi,
begin_train_step=int(self.train_args['begin_train_step']),
begin_frame_step=int(self.train_args['begin_frame_step']),
begin_episode=int(self.train_args['begin_episode']),
save_frequency=int(self.train_args['save_frequency']),
max_step_per_episode=int(self.train_args['max_step_per_episode']),
max_train_episode=int(self.train_args['max_train_episode']),
policy_mode=str(self.train_args['policy_mode']),
moving_average_episode=int(self.train_args['moving_average_episode']),
add_noise2buffer=bool(self.train_args['add_noise2buffer']),
add_noise2buffer_episode_interval=int(self.train_args['add_noise2buffer_episode_interval']),
add_noise2buffer_steps=int(self.train_args['add_noise2buffer_steps']),
max_train_step=int(self.train_args['max_train_step']),
max_frame_step=int(self.train_args['max_frame_step']),
real_done=bool(self.train_args['real_done']),
off_policy_train_interval=int(self.train_args['off_policy_train_interval'])
)
finally:
[model.close() for model in self.models]
self.env.close()
def evaluate(self) -> NoReturn:
if self.env_args['type'] == 'gym':
try:
gym_inference(
env=self.env,
model=self.model,
episodes=self.train_args['inference_episode']
)
finally:
self.model.close()
self.env.close()
else:
if self.multi_agents_training:
try:
ma_unity_inference(
env=self.env,
model=self.model,
episodes=self.train_args['inference_episode']
)
finally:
self.model.close()
self.env.close()
else:
try:
unity_inference(
env=self.env,
models=self.models,
episodes=self.train_args['inference_episode']
)
finally:
[model.close() for model in self.models]
self.env.close()
def apex(self) -> NoReturn:
if self.train_args['policy_mode'] != 'off-policy':
raise Exception('Ape-X only suitable for off-policy algorithms.')
if self.train_args['apex'] == 'learner':
from rls.distribute.apex.learner import learner
learner(
env=self.env,
model=self.model,
ip=self.train_args['apex_learner_ip'],
port=self.train_args['apex_learner_port']
)
elif self.train_args['apex'] == 'worker':
from rls.distribute.apex.worker import worker
worker(
env=self.env,
model=self.model,
learner_ip=self.train_args['apex_learner_ip'],
learner_port=self.train_args['apex_learner_port'],
buffer_ip=self.train_args['apex_buffer_ip'],
buffer_port=self.train_args['apex_buffer_port'],
worker_args=self.train_args['apex_worker_args']
)
elif self.train_args['apex'] == 'buffer':
from rls.distribute.apex.buffer import buffer
buffer(
ip=self.train_args['apex_buffer_ip'],
port=self.train_args['apex_buffer_port'],
learner_ip=self.train_args['apex_learner_ip'],
learner_port=self.train_args['apex_learner_port'],
buffer_args=self.train_args['apex_buffer_args']
)
elif self.train_args['apex'] == 'evaluator':
from rls.distribute.apex.evaluator import evaluator
evaluator(
env=self.env,
model=self.model,
learner_ip=self.train_args['apex_learner_ip'],
learner_port=self.train_args['apex_learner_port'],
evaluator_args=self.train_args['apex_evaluator_args']
)
return
|
import os
import glob
import tensorflow as tf
from timeit import default_timer
from itertools import product
from graph_nets.graphs import GraphsTuple
from graph_nets.utils_np import graphs_tuple_to_networkxs, networkxs_to_graphs_tuple, get_graph
import numpy as np
import networkx as nx
from networkx.drawing import draw
from tqdm import tqdm
from scipy.optimize import bisect
from scipy.spatial.ckdtree import cKDTree
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from multiprocessing import Pool, Lock
mp_lock = Lock()
def std(tensor, axis):
return tf.math.sqrt(tf.reduce_mean(tensor ** 2, axis=axis))
def find_screen_length(distance_matrix, k_mean):
"""
Get optimal screening length.
Args:
distance_matrix: [num_points, num_points]
k_mean: float
Returns: float the optimal screen length
"""
dist_max = distance_matrix.max()
distance_matrix_no_loops = np.where(distance_matrix == 0., np.inf, distance_matrix)
def get_k_mean(length):
paired = distance_matrix_no_loops < length
degree = np.sum(paired, axis=-1)
return degree.mean()
def loss(length):
return get_k_mean(length) - k_mean
if loss(0.) * loss(dist_max) >= 0.:
# When there are fewer than k_mean+1 nodes in the list,
# it's impossible for the average degree to be equal to k_mean.
# So choose max screening length. Happens when f(low) and f(high) have same sign.
return dist_max
return bisect(loss, 0., dist_max, xtol=0.001)
def generate_example_random_choice(positions, properties, k=26, plot=False):
print('choice nn')
idx_list = np.arange(len(positions))
virtual_node_positions = positions[np.random.choice(idx_list, 1000, replace=False)]
kdtree = cKDTree(virtual_node_positions)
dist, indices = kdtree.query(positions)
virtual_properties = np.zeros((len(np.bincount(indices)), len(properties[0])))
mean_sum = [lambda x: np.bincount(indices, weights=x) / np.maximum(1., np.bincount(indices)), # mean
lambda x: np.bincount(indices, weights=x)] # sum
mean_sum_enc = [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]
for p, enc in zip(np.arange(len(properties[0])), mean_sum_enc):
virtual_properties[:, p] = mean_sum[enc](properties[:, p])
virtual_positions = virtual_properties[:, :3]
graph = nx.DiGraph()
kdtree = cKDTree(virtual_positions)
dist, idx = kdtree.query(virtual_positions, k=k + 1)
receivers = idx[:, 1:] # N,k
senders = np.arange(virtual_positions.shape[0]) # N
senders = np.tile(senders[:, None], [1, k]) # N,k
receivers = receivers.flatten()
senders = senders.flatten()
n_nodes = virtual_positions.shape[0]
pos = dict() # for plotting node positions.
edgelist = []
for node, feature, position in zip(np.arange(n_nodes), virtual_properties, virtual_positions):
graph.add_node(node, features=feature)
pos[node] = position[:2]
# edges = np.stack([senders, receivers], axis=-1) + sibling_node_offset
for u, v in zip(senders, receivers):
graph.add_edge(u, v, features=np.array([1., 0.]))
graph.add_edge(v, u, features=np.array([1., 0.]))
edgelist.append((u, v))
edgelist.append((v, u))
graph.graph["features"] = np.array([0.])
# plotting
print('len(pos) = {}\nlen(edgelist) = {}'.format(len(pos), len(edgelist)))
if plot:
fig, ax = plt.subplots(1, 1, figsize=(20, 20))
draw(graph, ax=ax, pos=pos, node_color='blue', edge_color='red', node_size=10, width=0.1)
image_dir = '/data2/hendrix/images/'
graph_image_idx = len(glob.glob(os.path.join(image_dir, 'graph_image_*')))
plt.savefig(os.path.join(image_dir, 'graph_image_{}'.format(graph_image_idx)))
return networkxs_to_graphs_tuple([graph],
node_shape_hint=[virtual_positions.shape[1] + virtual_properties.shape[1]],
edge_shape_hint=[2])
def generate_example_nn(positions, properties, k=26, resolution=2, plot=False):
print('example nn')
resolution = 3.086e18 * resolution # pc to cm
node_features = []
node_positions = []
box_size = (np.max(positions), np.min(positions)) # box that encompasses all of the nodes
axis = np.arange(box_size[1] + resolution, box_size[0], resolution)
lists = [axis] * 3
virtual_node_pos = [p for p in product(*lists)]
virtual_kdtree = cKDTree(virtual_node_pos)
particle_kdtree = cKDTree(positions)
indices = virtual_kdtree.query_ball_tree(particle_kdtree, np.sqrt(3) / 2. * resolution)
for i, p in enumerate(indices):
if len(p) == 0:
continue
virt_pos, virt_prop = make_virtual_node(properties[p])
node_positions.append(virt_pos)
node_features.append(virt_prop)
node_features = np.array(node_features)
node_positions = np.array(node_positions)
graph = nx.DiGraph()
kdtree = cKDTree(node_positions)
dist, idx = kdtree.query(node_positions, k=k + 1)
receivers = idx[:, 1:] # N,k
senders = np.arange(node_positions.shape[0]) # N
senders = np.tile(senders[:, None], [1, k]) # N,k
receivers = receivers.flatten()
senders = senders.flatten()
n_nodes = node_positions.shape[0]
pos = dict() # for plotting node positions.
edgelist = []
for node, feature, position in zip(np.arange(n_nodes), node_features, node_positions):
graph.add_node(node, features=feature)
pos[node] = (position[:2] - box_size[1]) / (box_size[0] - box_size[1])
# edges = np.stack([senders, receivers], axis=-1) + sibling_node_offset
for u, v in zip(senders, receivers):
graph.add_edge(u, v, features=np.array([1., 0.]))
graph.add_edge(v, u, features=np.array([1., 0.]))
edgelist.append((u, v))
edgelist.append((v, u))
graph.graph["features"] = np.array([0.])
# plotting
print('len(pos) = {}\nlen(edgelist) = {}'.format(len(pos), len(edgelist)))
if plot:
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
draw(graph, ax=ax, pos=pos, node_color='green', edge_color='red')
image_dir = '/data2/hendrix/images/'
graph_image_idx = len(glob.glob(os.path.join(image_dir, 'graph_image_*')))
plt.savefig(os.path.join(image_dir, 'graph_image_{}'.format(graph_image_idx)))
return networkxs_to_graphs_tuple([graph],
node_shape_hint=[node_positions.shape[1] + node_features.shape[1]],
edge_shape_hint=[2])
def generate_example(positions, properties, k_mean=26, plot=False):
"""
Generate a geometric graph from positions.
Args:
positions: [num_points, 3] positions used for graph construction.
properties: [num_points, F0,...,Fd] each node will have these properties of shape [F0,...,Fd]
k_mean: float
plot: whether to plot graph.
Returns: GraphTuple
"""
graph = nx.DiGraph()
sibling_edgelist = []
parent_edgelist = []
pos = dict() # for plotting node positions.
real_nodes = list(np.arange(positions.shape[0]))
while positions.shape[0] > 1:
# n_nodes, n_nodes
dist = np.linalg.norm(positions[:, None, :] - positions[None, :, :], axis=-1)
opt_screen_length = find_screen_length(dist, k_mean)
print("Found optimal screening length {}".format(opt_screen_length))
distance_matrix_no_loops = np.where(dist == 0., np.inf, dist)
A = distance_matrix_no_loops < opt_screen_length
senders, receivers = np.where(A)
n_edge = senders.size
# num_points, F0,...Fd
# if positions is to be part of features then this should already be set in properties.
# We don't concatentate here. Mainly because properties could be an image, etc.
sibling_nodes = properties
n_nodes = sibling_nodes.shape[0]
sibling_node_offset = len(graph.nodes)
for node, feature, position in zip(np.arange(sibling_node_offset, sibling_node_offset + n_nodes), sibling_nodes,
positions):
graph.add_node(node, features=feature)
pos[node] = position[:2]
# edges = np.stack([senders, receivers], axis=-1) + sibling_node_offset
for u, v in zip(senders + sibling_node_offset, receivers + sibling_node_offset):
graph.add_edge(u, v, features=np.array([1., 0.]))
graph.add_edge(v, u, features=np.array([1., 0.]))
sibling_edgelist.append((u, v))
sibling_edgelist.append((v, u))
# for virtual nodes
sibling_graph = GraphsTuple(nodes=None, # sibling_nodes,
edges=None,
senders=senders,
receivers=receivers,
globals=None,
n_node=np.array([n_nodes]),
n_edge=np.array([n_edge]))
sibling_graph = graphs_tuple_to_networkxs(sibling_graph)[0]
# completely connect
connected_components = sorted(nx.connected_components(nx.Graph(sibling_graph)), key=len)
_positions = []
_properties = []
for connected_component in connected_components:
print("Found connected component {}".format(connected_component))
indices = list(sorted(list(connected_component)))
virtual_position, virtual_property = make_virtual_node(positions[indices, :], properties[indices, ...])
_positions.append(virtual_position)
_properties.append(virtual_property)
virtual_positions = np.stack(_positions, axis=0)
virtual_properties = np.stack(_properties, axis=0)
###
# add virutal nodes
# num_parents, 3+F
parent_nodes = virtual_properties
n_nodes = parent_nodes.shape[0]
parent_node_offset = len(graph.nodes)
parent_indices = np.arange(parent_node_offset, parent_node_offset + n_nodes)
# adding the nodes to global graph
for node, feature, virtual_position in zip(parent_indices, parent_nodes, virtual_positions):
graph.add_node(node, features=feature)
print("new virtual {}".format(node))
pos[node] = virtual_position[:2]
for parent_idx, connected_component in zip(parent_indices, connected_components):
child_node_indices = [idx + sibling_node_offset for idx in list(sorted(list(connected_component)))]
for child_node_idx in child_node_indices:
graph.add_edge(parent_idx, child_node_idx, features=np.array([0., 1.]))
graph.add_edge(child_node_idx, parent_idx, features=np.array([0., 1.]))
parent_edgelist.append((parent_idx, child_node_idx))
parent_edgelist.append((child_node_idx, parent_idx))
print("connecting {}<->{}".format(parent_idx, child_node_idx))
positions = virtual_positions
properties = virtual_properties
# plotting
virutal_nodes = list(set(graph.nodes) - set(real_nodes))
if plot:
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
draw(graph, ax=ax, pos=pos, node_color='green', edgelist=[], nodelist=real_nodes)
draw(graph, ax=ax, pos=pos, node_color='purple', edgelist=[], nodelist=virutal_nodes)
draw(graph, ax=ax, pos=pos, edge_color='blue', edgelist=sibling_edgelist, nodelist=[])
draw(graph, ax=ax, pos=pos, edge_color='red', edgelist=parent_edgelist, nodelist=[])
plt.show()
return networkxs_to_graphs_tuple([graph],
node_shape_hint=[positions.shape[1] + properties.shape[1]],
edge_shape_hint=[2])
def graph_tuple_to_feature(graph: GraphsTuple, name=''):
return {
f'{name}_nodes': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.nodes, tf.float32)).numpy()])),
f'{name}_edges': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.edges, tf.float32)).numpy()])),
f'{name}_senders': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.senders, tf.int64)).numpy()])),
f'{name}_receivers': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.io.serialize_tensor(tf.cast(graph.receivers, tf.int64)).numpy()]))}
def save_examples(generator, save_dir=None,
examples_per_file=26, num_examples=1, prefix='train'):
"""
Saves a list of GraphTuples to tfrecords.
Args:
generator: generator (or list) of (GraphTuples, image).
Generator is more efficient.
save_dir: dir to save tfrecords in
examples_per_file: int, max number examples per file
Returns: list of tfrecord files.
"""
print("Saving data in tfrecords.")
if save_dir is None:
save_dir = os.getcwd()
os.makedirs(save_dir, exist_ok=True)
files = []
data_iterable = iter(generator)
data_left = True
pbar = tqdm(total=num_examples)
while data_left:
mp_lock.acquire() # make sure no duplicate files are made / replaced
tf_files = glob.glob(os.path.join(save_dir, 'train_*'))
file_idx = len(tf_files)
mp_lock.release()
file = os.path.join(save_dir, 'train_{:04d}.tfrecords'.format(file_idx))
files.append(file)
with tf.io.TFRecordWriter(file) as writer:
for i in range(examples_per_file):
try:
(graph, image, example_idx) = next(data_iterable)
except StopIteration:
data_left = False
break
graph = get_graph(graph, 0)
features = dict(
image=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(image, tf.float32)).numpy()])),
example_idx=tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.io.serialize_tensor(tf.cast(example_idx, tf.int32)).numpy()])),
**graph_tuple_to_feature(graph, name='graph')
)
features = tf.train.Features(feature=features)
example = tf.train.Example(features=features)
writer.write(example.SerializeToString())
pbar.update(1)
print("Saved in tfrecords: {}".format(files))
return files
def feature_to_graph_tuple(name=''):
schema = {}
schema[f'{name}_nodes'] = tf.io.FixedLenFeature([], dtype=tf.string)
schema[f'{name}_senders'] = tf.io.FixedLenFeature([], dtype=tf.string)
schema[f'{name}_receivers'] = tf.io.FixedLenFeature([], dtype=tf.string)
return schema
def decode_examples_old(record_bytes, node_shape=None, image_shape=None):
"""
Decodes raw bytes as returned from tf.data.TFRecordDataset([example_path]) into a GraphTuple and image
Args:
record_bytes: raw bytes
node_shape: shape of nodes if known.
edge_shape: shape of edges if known.
image_shape: shape of image if known.
Returns: (GraphTuple, image)
"""
parsed_example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
dict(
image=tf.io.FixedLenFeature([], dtype=tf.string),
snapshot=tf.io.FixedLenFeature([], dtype=tf.string),
projection=tf.io.FixedLenFeature([], dtype=tf.string),
**feature_to_graph_tuple('graph')
)
)
image = tf.io.parse_tensor(parsed_example['image'], tf.float32)
image.set_shape(image_shape)
snapshot = tf.io.parse_tensor(parsed_example['snapshot'], tf.int32)
snapshot.set_shape(())
projection = tf.io.parse_tensor(parsed_example['projection'], tf.int32)
projection.set_shape(())
graph_nodes = tf.io.parse_tensor(parsed_example['graph_nodes'], tf.float32)
graph_nodes.set_shape([None] + list(node_shape))
receivers = tf.io.parse_tensor(parsed_example['graph_receivers'], tf.int64)
receivers = tf.cast(receivers, tf.int32)
receivers.set_shape([None])
senders = tf.io.parse_tensor(parsed_example['graph_senders'], tf.int64)
senders = tf.cast(senders, tf.int32)
senders.set_shape([None])
n_node = tf.shape(graph_nodes)[0:1]
n_edge = tf.shape(senders)[0:1]
# graph = GraphsTuple(nodes=graph_nodes,
# edges=graph_edges,
# globals=tf.zeros([1]),
# receivers=receivers,
# senders=senders,
# n_node=tf.shape(graph_nodes)[0:1],
# n_edge=tf.shape(graph_edges)[0:1])
graph_data_dict = dict(nodes=graph_nodes,
edges=tf.zeros((n_edge[0], 1)),
globals=tf.zeros([1]),
receivers=receivers,
senders=senders,
n_node=n_node,
n_edge=n_edge)
return (graph_data_dict, image, snapshot, projection)
def decode_examples(record_bytes, node_shape=None, image_shape=None, k=None):
"""
Decodes raw bytes as returned from tf.data.TFRecordDataset([example_path]) into a GraphTuple and image
Args:
k: number of nearest neighbours
record_bytes: raw bytes
node_shape: shape of nodes if known.
edge_shape: shape of edges if known.
image_shape: shape of image if known.
Returns: (GraphTuple, image)
"""
parsed_example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
dict(
idx=tf.io.FixedLenFeature([], dtype=tf.string),
image=tf.io.FixedLenFeature([], dtype=tf.string),
virtual_properties=tf.io.FixedLenFeature([], dtype=tf.string),
snapshot=tf.io.FixedLenFeature([], dtype=tf.string),
projection=tf.io.FixedLenFeature([], dtype=tf.string),
extra_info=tf.io.FixedLenFeature([], dtype=tf.string)
# **feature_to_graph_tuple('graph')
)
)
idx = tf.io.parse_tensor(parsed_example['idx'], tf.int32)
idx.set_shape([None] + [k + 1])
graph_nodes = tf.io.parse_tensor(parsed_example['virtual_properties'], tf.float32)
graph_nodes.set_shape([None] + list(node_shape))
image = tf.io.parse_tensor(parsed_example['image'], tf.float32)
image.set_shape(image_shape)
snapshot = tf.io.parse_tensor(parsed_example['snapshot'], tf.int32)
snapshot.set_shape(())
projection = tf.io.parse_tensor(parsed_example['projection'], tf.int32)
projection.set_shape(())
extra_info = tf.io.parse_tensor(parsed_example['extra_info'], tf.float32)
extra_info.set_shape([None])
receivers = idx[:, 1:] # N,k
senders = tf.cast(tf.range(tf.shape(graph_nodes)[0:1][0]), idx.dtype) # N
senders = tf.tile(senders[:, None], tf.constant([1, k], tf.int32)) # N, k
receivers = tf.reshape(receivers, shape=[-1])
senders = tf.reshape(senders, shape=[-1])
receivers_both_directions = tf.concat([receivers, senders], axis=0)
senders_both_directions = tf.concat([senders, receivers], axis=0)
n_node = tf.shape(graph_nodes)[0:1]
n_edge = tf.shape(senders_both_directions)[0:1]
# property_names = ['x', 'y', 'z', 'velocity_x', 'velocity_y', 'velocity_z', 'gravitational_potential',
# 'density', 'temperature', 'cell_mass', 'cell_volume']
print('before', graph_nodes.shape)
mask = tf.constant([True, True, True, # mask for density
False, False, False,
False,
True,
False, False, False], dtype=tf.bool)
graph_nodes = tf.boolean_mask(graph_nodes, mask, axis=1)
graph_nodes.set_shape([None, 4])
print('after', graph_nodes.shape)
# graph_data_dict = dict(nodes=graph_nodes,
# edges=tf.zeros((n_edge[0], 1)),
# globals=tf.zeros([1, 1]),
# receivers=receivers_both_directions,
# senders=senders_both_directions,
# n_node=n_node,
# n_edge=n_edge)
graph_data_dict = dict(nodes=graph_nodes,
# edges=tf.zeros((n_edge[0], 1)),
# globals=tf.zeros([1, 1]),
# receivers=receivers_both_directions,
# senders=senders_both_directions,
n_node=n_node,
n_edge=tf.zeros_like(n_node))
return (graph_data_dict, image, snapshot, projection, extra_info)
def get_data_info(data_dirs):
"""
Get information of saved data.
Args:
data_dirs: data directories
Returns:
"""
def data_generator():
for idx, dir in tqdm(enumerate(data_dirs)):
print("Generating data from {}".format(dir))
positions, properties, image = _get_data(dir)
yield (properties, image, dir)
data_iterable = iter(data_generator())
open('data_info.txt', 'w').close()
while True:
try:
(properties, image, dir) = next(data_iterable)
except StopIteration:
break
with open("data_info.txt", "a") as text_file:
print(f"dir: {dir}\n"
f" image_min: {np.min(image)}\n"
f" image_max: {np.max(image)}\n"
f" properties_min: {np.around(np.min(properties, axis=0), 2)}\n"
f" properties_max: {np.around(np.max(properties, axis=0), 2)}\n", file=text_file)
def get_data_image(data_dirs):
"""
Get information of saved data.
Args:
data_dirs: data directories
Returns:
"""
image_dir = '/data2/hendrix/projection_images/'
def data_generator():
for idx, dir in tqdm(enumerate(data_dirs)):
print("Generating data from {}".format(dir))
positions, properties, image = _get_data(dir)
yield (properties, image, dir)
data_iterable = iter(data_generator())
while True:
try:
(properties, image, dir) = next(data_iterable)
except StopIteration:
break
print('save image...')
proj_image_idx = len(glob.glob(os.path.join(image_dir, 'proj_image_*')))
plt.imsave(os.path.join(image_dir, 'proj_image_{}.png'.format(proj_image_idx)),
image[:, :, 0])
print('saved.')
def generate_data(data_dir, save_dir='/data2/hendrix/train_data_2/'):
"""
Routine for generating train data in tfrecords
Args:
data_dirs: where simulation data is.
save_dir: where tfrecords will go.
Returns: list of tfrecords.
"""
npz_files = glob.glob(os.path.join(data_dir, '*'))
def data_generator():
print("Making graphs.")
for idx, dir in tqdm(enumerate(npz_files)):
print("Generating data from {}/{}".format(data_dir, dir))
positions, properties, image = _get_data(dir)
graph = generate_example_random_choice(positions, properties)
yield (graph, image, idx)
train_tfrecords = save_examples(data_generator(),
save_dir=save_dir,
examples_per_file=len(npz_files),
num_examples=len(example_dirs),
prefix='train')
return train_tfrecords
###
# specific to project
def make_virtual_node(properties):
"""
Aggregate positions and properties of nodes into one virtual node.
Args:
positions: [N, 3]
properties: [N, F0,...Fd]
Returns: [3], [F0,...,Fd]
"""
virtual_properties = np.zeros(11)
virtual_properties[:6] = np.mean(properties[:, 6], axis=0)
virtual_properties[6] = np.sum(properties[:, 6])
virtual_properties[7:9] = np.mean(properties[:, 7:9], axis=0)
virtual_properties[9:11] = np.sum(properties[:, 9:11], axis=0)
return np.mean(properties[:, 3], axis=0), virtual_properties
def aggregate_lowest_level_cells(positions, properties):
'''
aggregate the lowest level particles.
Args:
positions: node positions [n, 3]
properties: node properties [n, f]
Returns:
agg_positions: aggregated node positions [m, 3]
agg_properties: aggregated node properties [m, f]
'''
lowest_level = np.max(properties[:, 11])
lowest_level_positions = positions[properties[:, 11] == lowest_level] # [j, 3]
lowest_level_properties = properties[properties[:, 11] == lowest_level] # [j, f]
cell_inds = list(set(lowest_level_properties[:, 12])) # [m-(n-j)]
grouped_ll_positions = [lowest_level_positions[lowest_level_properties[:, 12] == ind] for ind in
cell_inds] # [m-(n-j), 4096, 3]
grouped_ll_properties = [lowest_level_properties[lowest_level_properties[:, 12] == ind] for ind in
cell_inds] # [m-(n-j), 4096, f]
agg_positions = positions[properties[:, 11] < lowest_level] # [n-j, 3]
agg_properties = properties[properties[:, 11] < lowest_level] # [n-j, f]
agg_positions = np.concatenate((agg_positions, np.mean(grouped_ll_positions, axis=0))) # [m, 3]
agg_properties = np.concatenate((agg_properties, np.mean(grouped_ll_properties, axis=0))) # [m, f]
return agg_positions, agg_properties
def _get_data(dir):
"""
Should return the information for a single simulation.
Args:
dir: directory with sim data.
Returns:
positions for building graph
properties for putting in nodes and aggregating upwards
image corresponding to the graph
extra info corresponding to the example
"""
f = np.load(dir)
positions = f['positions']
properties = f['properties']
image = f['proj_image']
image = image.reshape((256, 256, 1))
# properties = properties / np.std(properties, axis=0) # normalize values
# extra_info = f['extra_info']
return positions, properties, image # , extra_info
def make_tutorial_data(examples_dir):
for i in range(10):
example_idx = len(glob.glob(os.path.join(examples_dir, 'example_*')))
data_dir = os.path.join(examples_dir, 'example_{:04d}'.format(example_idx))
os.makedirs(data_dir, exist_ok=True)
positions = np.random.uniform(0., 1., size=(50, 3))
properties = np.random.uniform(0., 1., size=(50, 5))
image = np.random.uniform(size=(24, 24, 1))
np.savez(os.path.join(data_dir, 'data.npz'), positions=positions, properties=properties, image=image)
if __name__ == '__main__':
examples_dir = '/data2/hendrix/examples/'
train_data_dir = '/data2/hendrix/train_data_2/'
example_dirs = glob.glob(os.path.join(examples_dir, 'example_*'))
print(example_dirs)
# get_data_info(example_dirs)
# get_data_image(example_dirs)
# list_of_example_dirs = []
# temp_lst = []
# for example_dir in example_dirs:
# if len(temp_lst) == 32:
# list_of_example_dirs.append(temp_lst)
# temp_lst = []
# else:
# temp_lst.append(example_dir)
# list_of_example_dirs.append(temp_lst)
# print(f'number of tfrecfiles: {len(list_of_example_dirs)}')
pool = Pool(1)
pool.map(generate_data, example_dirs)
|
<filename>cpdb/twitterbot/tests/test_response_builders.py
from django.test import TestCase
from django.test.utils import override_settings
from mock.mock import mock_open
from robber import expect
from mock import patch, Mock
from twitterbot.response_builders import (
SingleOfficerResponseBuilder, CoaccusedPairResponseBuilder, BaseResponseBuilder, NotFoundResponseBuilder)
from twitterbot.factories import ResponseTemplateFactory
from twitterbot.models import ResponseTemplate
from data.factories import OfficerFactory, OfficerAllegationFactory, AllegationFactory
class BaseResponseBuilderTestCase(TestCase):
def setUp(self):
ResponseTemplate.objects.all().delete()
class DummyResponseBuilder(BaseResponseBuilder):
response_type = 'single_officer'
def get_variables_sets(self, entities, context):
yield dict()
self.builder_class = DummyResponseBuilder
def test_build_with_round_robined_syntax(self):
builder = self.builder_class()
ResponseTemplateFactory(id=20, response_type='single_officer', syntax='temp1')
ResponseTemplateFactory(id=21, response_type='single_officer', syntax='temp2')
expect(list(builder.build(extra_variables={'user_name': 'abc'}))).to.eq([{
'source': (),
'tweet_content': 'temp1',
'url': '',
'type': 'single_officer',
'entity': None,
'coaccused': 0,
'officer1': None,
'officer2': None
}])
expect(list(builder.build(extra_variables={'user_name': 'def'}))).to.eq([{
'source': (),
'tweet_content': 'temp1',
'url': '',
'type': 'single_officer',
'entity': None,
'coaccused': 0,
'officer1': None,
'officer2': None
}])
expect(list(builder.build(extra_variables={'user_name': 'abc'}))).to.eq([{
'source': (),
'tweet_content': 'temp2',
'url': '',
'type': 'single_officer',
'entity': None,
'coaccused': 0,
'officer1': None,
'officer2': None
}])
expect(list(builder.build(extra_variables={'user_name': 'abc'}))).to.eq([{
'source': (),
'tweet_content': 'temp1',
'url': '',
'type': 'single_officer',
'entity': None,
'coaccused': 0,
'officer1': None,
'officer2': None
}])
def test_build_with_syntax_depend_on_right_response_type(self):
builder = self.builder_class()
ResponseTemplateFactory(response_type='single_officer', syntax='b')
ResponseTemplateFactory(response_type='test', syntax='c')
context = dict()
expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([{
'source': (),
'tweet_content': 'b',
'url': '',
'type': 'single_officer',
'entity': None,
'coaccused': 0,
'officer1': None,
'officer2': None
}])
expect(context['responses_count']).to.eq(1)
def test_build_with_truncating_user_name_if_tweet_content_longer_than_140_characters(self):
builder = self.builder_class()
ResponseTemplateFactory(response_type='single_officer', syntax='@{{user_name}} anything else')
with patch('twitterbot.response_builders.len', return_value=150):
first_built = list(builder.build(extra_variables={'user_name': 'abc'}))[0]
tweet_content = first_built['tweet_content']
expect(tweet_content).to.eq('anything else')
class SingleOfficerResponseBuilderTestCase(TestCase):
def setUp(self):
ResponseTemplate.objects.all().delete()
@override_settings(DOMAIN='http://foo.co')
def test_build(self):
_mock_open = mock_open()
with patch('twitterbot.handlers.open', _mock_open, create=True):
officer1 = OfficerFactory(id=1, first_name='Jerome', last_name='Finnigan', allegation_count=3)
officer1_doc = {
'id': officer1.id, 'full_name': officer1.full_name
}
officer2 = OfficerFactory(id=2, first_name='Raymond', last_name='Piwnicki')
officer2_doc = {
'id': officer2.id, 'full_name': officer2.full_name
}
ResponseTemplateFactory(
response_type='single_officer',
syntax='@{{user_name}} {{officer.full_name}} has {{officer.allegation_count}} complaints')
builder = SingleOfficerResponseBuilder()
officers = [('source1', officer1_doc), ('source2', officer2_doc)]
expect(list(builder.build(officers, {'user_name': 'abc'}))).to.eq([{
'source': ('source1',),
'tweet_content': '@abc <NAME> has 3 complaints',
'url': 'http://foo.co/officer/1/',
'type': 'single_officer',
'entity': officer1_doc,
'officer1': None,
'officer2': None,
'coaccused': 0,
}, {
'source': ('source2',),
'tweet_content': '@abc <NAME> has 0 complaints',
'url': 'http://foo.co/officer/2/',
'type': 'single_officer',
'entity': officer2_doc,
'officer1': None,
'officer2': None,
'coaccused': 0,
}])
class CoaccusedPairResponseBuilderTestCase(TestCase):
def setUp(self):
ResponseTemplate.objects.all().delete()
def test_build(self):
officer1 = OfficerFactory(first_name='Jerome', last_name='Finnigan')
allegation = AllegationFactory()
OfficerAllegationFactory(officer=officer1, allegation=allegation)
officer1_doc = {
'id': officer1.id, 'full_name': officer1.full_name, 'complaints': 3
}
officer2 = OfficerFactory(first_name='Raymond', last_name='Piwnicki')
OfficerAllegationFactory(officer=officer2, allegation=allegation)
officer2_doc = {
'id': officer2.id, 'full_name': officer2.full_name, 'complaints': 3
}
officer3 = OfficerFactory(first_name='Jesse', last_name='Acosta')
OfficerAllegationFactory(officer=officer3)
officer3_doc = {
'id': officer3.id, 'full_name': officer3.full_name, 'complaints': 3
}
ResponseTemplateFactory(
response_type='coaccused_pair',
syntax=(
'@{{user_name}} {{officer1.full_name}} and {{officer2.full_name}} '
'were co-accused in {{coaccused}} case'
)
)
builder = CoaccusedPairResponseBuilder()
expect(list(builder.build(
[('source1', officer1_doc), ('source2', officer2_doc), ('source3', officer3_doc)],
{'user_name': 'abc'}))
).to.eq([{
'source': ('source1', 'source2'),
'tweet_content': '@abc <NAME> and <NAME> were co-accused in 1 case',
'url': '',
'type': 'coaccused_pair',
'entity': None,
'officer1': officer1,
'officer2': officer2,
'coaccused': 1,
}])
class NotFoundResponseBuilderTestCase(TestCase):
def setUp(self):
ResponseTemplate.objects.all().delete()
ResponseTemplateFactory(
response_type='not_found',
syntax='Sorry, @{{user_name}}, the bot find nothing')
def test_build_with_0_response(self):
builder = NotFoundResponseBuilder()
tweet = Mock(
is_tweet_from_followed_accounts=False,
is_retweet_of_twitterbot=False,
is_quoted_tweet_of_twitterbot=False)
context = {
'response_count': 0,
'incoming_tweet': tweet
}
with self.settings(DOMAIN='http://foo.co'):
expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([{
'source': (),
'tweet_content': 'Sorry, @abc, the bot find nothing',
'url': 'http://foo.co',
'type': 'not_found',
'entity': None,
'officer1': None,
'officer2': None,
'coaccused': 0
}])
def test_build_with_response(self):
builder = NotFoundResponseBuilder()
expect(list(builder.build(extra_variables={'user_name': 'abc'}, context={'responses_count': 1}))).to.eq([])
def test_do_nothing_if_retweet_of_twitterbot(self):
builder = NotFoundResponseBuilder()
tweet = Mock(
is_tweet_from_followed_accounts=False,
is_retweet_of_twitterbot=True,
is_quoted_tweet_of_twitterbot=False)
context = {
'responses_count': 0,
'incoming_tweet': tweet
}
expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([])
def test_do_nothing_if_quoted_tweet_of_twitterbot(self):
builder = NotFoundResponseBuilder()
tweet = Mock(
is_tweet_from_followed_accounts=False,
is_retweet_of_twitterbot=False,
is_quoted_tweet_of_twitterbot=True)
context = {
'responses_count': 0,
'incoming_tweet': tweet
}
expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([])
def test_do_nothing_if_there_is_no_incoming_tweet(self):
builder = NotFoundResponseBuilder()
context = {
'responses_count': 0,
'incoming_tweet': None
}
expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=context))).to.eq([])
def test_do_nothing_if_there_is_no_context(self):
builder = NotFoundResponseBuilder()
expect(list(builder.build(extra_variables={'user_name': 'abc'}, context=None))).to.eq([])
|
<gh_stars>1-10
#######################################################################
# Copyright [2019] [<NAME>]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################
# library imports
import time
import math
import numpy
import random
from tkinter import *
# variable declaration
size = 10 # the number of squares wide & tall the simulation will be (n x n square where size = n)
learningRate = 0.80 # the learning rate, included in the Q-Learning algorithm (between 0 and 1)
discountRate = 0.70 # the discount rate, included in the Q-Learning algorithm (between 0 and 1)
score = 0 # counts how many consecutive times the mouse has reached the cheese
minimumMoves = 0 # to be calculated, the optimum moves from the mouse to the cheese (does not account for obstacles)
cheeseCount = 0 # number of times the cheese has been reached
previousMoves = 0 # the number of moves the mouse previously took to reach the cheese
sameMoves = 0 # counts how many consecutive times the mouse has reached the cheese in the same number of moves
moves = 0 # the number of moves the mouse has taken at any point in time since it has last died
count = 0 # the number of iterations the mouse has gone through
speed = 0 # the user input between 1 and 5 which will dictate the value of refresh (below)
refresh = 0 # the number of milliseconds the program waits to refresh the screen
looping = True # to control the main while loop
learning = True # to control the secondary while loop (while the program is learning)
caught = False # to indicate if the user has entered an invalid input
speed_check = True # used for the while loop which asks the user for their desired speed
last_run = True # to indicate if the last iteration is happening, this is so that it can play the path in slow-motion
QTable = [[float(0), float(0), float(0), float(0)]] # declaration of the Q table
for i in range(0, (size*size*size*size)): # using a for loop to initialize the Q table with values of 0
QTable = numpy.append(QTable, [[float(0), float(0), float(0), float(0)]], axis=0)
# entire program loop
while looping:
# prompt the user for their desired speed of the program and loop until a valid value is given
while speed_check:
caught = False # set caught to false every time the loop loops to ensure correct error message
print("Enter a number between 1 and 5 to determine the speed of the simulation where 1 is equivalent to real "
"time & 5 is 20x that")
try:
speed = int(input(""))
except: # if the user enters anything other than a number, set caught to true and display an error message
caught = True
print("You have provided an invalid input. Please enter a number")
# set the refresh speed according to the users input
if 1 <= speed <= 5:
if speed == 1:
refresh = 100
elif speed == 2:
refresh = 75
elif speed == 3:
refresh = 50
elif speed == 4:
refresh = 25
elif speed == 5:
refresh = 5
speed_check = False # exit the while loop
else: # if the entered value was not between 1 and 5 print an error message accordingly
if not caught: # if there was an invalid number, print accordingly
print("You have given an invalid input. Please enter a number between 1 and 5")
tk = Tk() # this is so that i can reference Tk() as tk instead
tk.title("Machine Learning Mouse") # give the window a title
tk.resizable(0, 0) # make the window unable to be resized
canvas = Canvas(tk, width=(size * 20), height=(size * 20)) # set the height of the window in proportion to 'size'
canvas.create_rectangle(-10, -10, (size * 21), (size * 21), fill="black") # set the background to back
print("The simulation will automatically terminate once the mouse has conclusively determined the optimal number "
"of moves to the cheese")
# create the image variables to be called later
cheese_image = PhotoImage(file='res/cheese.png')
bomb_image = PhotoImage(file='res/bomb.png')
mouse_image = PhotoImage(file='res/mouse.png')
start = time.time() # start the timer to be referenced at the end of the program
# loop while the program is learning (searching for the cheese)
while learning:
count = count + 1 # add 1 to the iteration number
dead = False # used for a while loop to update various events while the mouse is still moving & not dead
canvas.pack() # this and the command below update the entire canvas and allow it to be displayed
tk.update()
for x in range(0, size): # draw the white lines which create the grid
canvas.create_line((x * 20), 0, (x * 20), (size * 20), fill="white")
canvas.create_line(0, (x * 20), (size * 20), (x * 20), fill="white")
# the mouse class
class Mouse:
# initializer method to set the first values of the mouse & create the variables needed
def __init__(self, mouse_canvas):
self.canvas = mouse_canvas
self.x_coord = 8 # the x grid unit which the mouse is first located in
self.y_coord = 8 # the y grid unit which the mouse is first located in
self.mouseX = self.x_coord * 20 # calculating the actual pixel value of the mouse's x
self.mouseY = self.y_coord * 20 # calculating the actual pixel value of the mouse's y
canvas.create_image(self.mouseX, self.mouseY, image=mouse_image, anchor=NW) # drawing the first mouse
self.dir = 0 # setting the direction variable to nothing to start
# method to update the location of the mouse and draw the cheese & the mouse at the updated location
def update(self):
if self.dir == 1: # moving up
self.mouseY = self.mouseY + 20
elif self.dir == 2: # moving right
self.mouseX = self.mouseX + 20
elif self.dir == 3: # moving down
self.mouseY = self.mouseY - 20
elif self.dir == 4: # moving left
self.mouseX = self.mouseX - 20
canvas.delete(ALL) # wipes the canvas of all objects
canvas.create_rectangle(-10, -10, (size * 21), (size * 21), fill="black") # creates a black background
canvas.create_image(cheese.cheeseX, cheese.cheeseY, image=cheese_image, anchor=NW) # draws the cheese
canvas.create_image(self.mouseX, self.mouseY, image=mouse_image, anchor=NW) # draws the mouse
for v in range(0, size): # draw the white lines which create the grid
canvas.create_line((v * 20), 0, (v * 20), (size * 20), fill="white")
canvas.create_line(0, (v * 20), (size * 20), (v * 20), fill="white")
# the cheese class
class Cheese:
# initializer method to set the first values of the cheese & create the variables needed
def __init__(self, cheese_canvas):
self.canvas = cheese_canvas
self.x_coord = 1 # the x grid unit which the cheese stays in
self.y_coord = 1 # the y grid unit which the cheese stays in
self.cheeseX = self.x_coord * 20 # calculating the actual pixel value for the cheese's x
self.cheeseY = self.y_coord * 20 # calculating the actual pixel value for the cheese's x
canvas.create_image(self.cheeseX, self.cheeseY, image=cheese_image, anchor=NW) # draws the cheese
# the class for the various obstacles
class Obstacles:
# initializer method to set the various obstacles coordinates and draw them
def __init__(self, obstacle_canvas):
self.canvas = obstacle_canvas
self.x1_coord = 2 # obstacle 1's x grid coordinate
self.y1_coord = 3 # obstacle 1's y grid coordinate
self.obstacle1X = self.x1_coord * 20 # obstacle 1's actual pixel location for the x coordinate
self.obstacle1Y = self.y1_coord * 20 # obstacle 1's actual pixel location for the y coordinate
canvas.create_image(self.obstacle1X, self.obstacle1Y, image=bomb_image, anchor=NW)
self.x2_coord = 3 # obstacle 2's x grid coordinate
self.y2_coord = 2 # obstacle 2's y grid coordinate
self.obstacle2X = self.x2_coord * 20 # obstacle 2's actual pixel location for the x coordinate
self.obstacle2Y = self.y2_coord * 20 # obstacle 2's actual pixel location for the y coordinate
canvas.create_image(self.obstacle2X, self.obstacle2Y, image=bomb_image, anchor=NW)
self.x3_coord = 4 # obstacle 3's x grid coordinate
self.y3_coord = 1 # obstacle 3's y grid coordinate
self.obstacle3X = self.x3_coord * 20 # obstacle 3's actual pixel location for the x coordinate
self.obstacle3Y = self.y3_coord * 20 # obstacle 3's actual pixel location for the y coordinate
canvas.create_image(self.obstacle3X, self.obstacle3Y, image=bomb_image, anchor=NW)
self.x4_coord = 1 # obstacle 4's x grid coordinate
self.y4_coord = 4 # obstacle 4's y grid coordinate
self.obstacle4X = self.x4_coord * 20 # obstacle 4's actual pixel location for the x coordinate
self.obstacle4Y = self.y4_coord * 20 # obstacle 4's actual pixel location for the y coordinate
canvas.create_image(self.obstacle4X, self.obstacle4Y, image=bomb_image, anchor=NW)
# the method to re-draw the obstacles when necessary
def draw(self):
canvas.create_image(self.obstacle1X, self.obstacle1Y, image=bomb_image, anchor=NW) # draw obstacle 1
canvas.create_image(self.obstacle2X, self.obstacle2Y, image=bomb_image, anchor=NW) # draw obstacle 2
canvas.create_image(self.obstacle3X, self.obstacle3Y, image=bomb_image, anchor=NW) # draw obstacle 3
canvas.create_image(self.obstacle4X, self.obstacle4Y, image=bomb_image, anchor=NW) # draw obstacle 4
for v in range(0, size): # draw the white lines which create the grid
canvas.create_line((v * 20), 0, (v * 20), (size * 20), fill="white")
canvas.create_line(0, (v * 20), (size * 20), (v * 20), fill="white")
# the method which chooses the next direction for the mouse and implements the Q-Learning algorithm
def choose_dir():
max_values = [] # array to contain all of the max Q table values
new_position = 0 # create a variable to hold the value of the next state
# create a 4 digit variable to hold the value of the current state ([cheeseX][cheeseY][mouseX][mouseY])
position = (mouse.mouseX / 20) + ((mouse.mouseY / 20) * size) + (((cheese.cheeseX / 20) * size) * size) + \
((((cheese.cheeseY / 20) * size) * size) * size)
# set a variable to the max of the 4 Q table values at the current state
direction = max(QTable[int(position)][0], QTable[int(position)][1], QTable[int(position)][2],
QTable[int(position)][3])
if QTable[int(position)][0] == direction: # if the max val is equal to the up value add it to the array
max_values = numpy.append(max_values, ['up'], axis=0)
if QTable[int(position)][1] == direction: # if the max val is equal to the right value add it to the array
max_values = numpy.append(max_values, ['right'], axis=0)
if QTable[int(position)][2] == direction: # if the max val is equal to the down value add it to the array
max_values = numpy.append(max_values, ['down'], axis=0)
if QTable[int(position)][3] == direction: # if the max val is equal to the left value add it to the array
max_values = numpy.append(max_values, ['left'], axis=0)
# chose a random value from all of the maximum direction in the array we have just appended to
going = random.choice(max_values)
if going == 'up': # if the chosen direction is up change the mouses dir. and change the new position
mouse.dir = 1
new_position = position + size
if going == 'right': # if the chosen direction is right change the mouses dir. and change the new position
mouse.dir = 2
new_position = position + 1
if going == 'down': # if the chosen direction is down change the mouses dir. and change the new position
mouse.dir = 3
new_position = position - size
if going == 'left': # if the chosen direction is left change the mouses dir. and change the new position
mouse.dir = 4
new_position = position - 1
# if the mouse if currently at the cheese, set the reward value to 500
if mouse.mouseX == cheese.cheeseX and mouse.mouseY == cheese.cheeseY:
reward = 500
# if the mouse if currently at any of the obstacles, or has gone off screen, set the reward value to -1000
elif mouse.mouseX > (size * 19 + 6) or mouse.mouseX < -1 or mouse.mouseY > (size * 19 + 6) or mouse.mouseY \
< -1 or (mouse.mouseX == obstacles.obstacle1X and mouse.mouseY == obstacles.obstacle1Y) or \
(mouse.mouseX == obstacles.obstacle2X and mouse.mouseY == obstacles.obstacle2Y) or \
(mouse.mouseX == obstacles.obstacle3X and mouse.mouseY == obstacles.obstacle3Y) or \
(mouse.mouseX == obstacles.obstacle4X and mouse.mouseY == obstacles.obstacle4Y):
reward = -1000
# if neither of the above have occurred, set the reward value to -2 just for moving
else:
reward = -2
# use the Q learning algorithm to set a new value for the chosen state based on the current reward, the
# learning rate, the discount rate, and the next position
QTable[int(position)][mouse.dir - 1] = (QTable[int(position)][mouse.dir - 1]) + (learningRate *
(reward + (discountRate * max(QTable[int(new_position)][0],
QTable[int(new_position)][1],
QTable[int(new_position)][2],
QTable[int(new_position)][3]))
- (QTable[int(position)]
[mouse.dir - 1])))
cheese = Cheese(canvas) # this is so that i can reference Cheese() as 'cheese' when used later
mouse = Mouse(canvas) # this is so that i can reference Mouse() as 'mouse' when used later
obstacles = Obstacles(canvas) # this is so that i can reference Obstacles() as 'obstacles' when used later
if count == 1: # if it is the very first iteration, calculate the minimum number of moves to the cheese
minimumMoves = abs(mouse.x_coord - cheese.x_coord) + abs(mouse.y_coord - cheese.y_coord)
while not dead: # while the mouse is not dead (off screen or hit an obstacle)
mouse.update() # call the update method in the mouse class
obstacles.draw() # call the draw method in the obstacles class
choose_dir() # call the choose direction method
# if the mouse has made over 100 moves (ie. is stuck)
if moves > 100:
score = 0 # reset the consecutive-cheeses-found counter
dead = True # exit this while loop
previousMoves = moves # set the previous moves to the current moves
moves = 0 # reset the moves for the next iteration
# if the mouse has reached the cheese
elif mouse.mouseX == cheese.cheeseX and mouse.mouseY == cheese.cheeseY:
score = score + 1 # add 1 to the consecutive-cheeses-found counter
if moves == previousMoves: # if the previous moves and are same as the correct moves adjust accordingly
sameMoves = sameMoves + 1
else: # if not reset the consecutive same moves counter
sameMoves = 0
cheeseCount = cheeseCount + 1 # add 1 to the total # of cheese's found
if moves == minimumMoves: # if the mouse reached the cheese in the minimum moves, print accordingly
print("Iteration #" + str(count) + " is mouse #" + str(cheeseCount) + " to have found the cheese "
"and it did so in the least "
"possible moves")
else: # if the mouse simply reached the cheese, print the iteration number and the total cheese count
print("Iteration #" + str(count) + " is mouse #" + str(cheeseCount) + " to have found the cheese")
if not last_run: # once the last run has been completed and it is set to false, quit the learning loop
learning = False
# if the mouse has found the cheese 10 times in a row, all in the same number of moves, set the refresh
# speed to 250 milliseconds so the program looks like its moving in slow-motion & set last_run to false
if score > 9 and sameMoves > 9:
refresh = 250
last_run = False
dead = True # dead is set to true to exit the loop exit the loop
previousMoves = moves # set the previous moves to the current moves
moves = 0 # reset the moves for the next iteration
for w in range(0, size): # draw the green lines which create the grid showing success
canvas.create_line((w * 20), 0, (w * 20), (size * 20), fill="green")
canvas.create_line(0, (w * 20), (size * 20), (w * 20), fill="green")
# if the mouse either goes off the screen or runs into 1 of the 4 obstacles
elif mouse.mouseX > (size * 19 + 6) or mouse.mouseX < -1 or mouse.mouseY > (size * 19 + 6) or mouse.mouseY \
< -1 or (mouse.mouseX == obstacles.obstacle1X and mouse.mouseY == obstacles.obstacle1Y) or \
(mouse.mouseX == obstacles.obstacle2X and mouse.mouseY == obstacles.obstacle2Y) or \
(mouse.mouseX == obstacles.obstacle3X and mouse.mouseY == obstacles.obstacle3Y) or \
(mouse.mouseX == obstacles.obstacle4X and mouse.mouseY == obstacles.obstacle4Y):
score = 0 # reset the consecutive-cheeses-found counter
dead = True # dead is set to true to exit the loop exit the loop
previousMoves = moves # set the previous moves to the current moves
moves = 0 # reset the moves for the next iteration
for w in range(0, size): # draw the red lines which create the grid showing failure
canvas.create_line((w * 20), 0, (w * 20), (size * 20), fill="red")
canvas.create_line(0, (w * 20), (size * 20), (w * 20), fill="red")
# if the mouse neither hit an obstacle, went off screen or reached the cheese add 1 to its moves taken
else:
moves = moves + 1
tk.after(refresh) # pause the simulation for 'refresh' milliseconds
tk.update_idletasks() # this and the command below update the entire canvas and allow it to be displayed
tk.update()
# once the mouse is done learning:
finish = time.time() # stop the timer
finalTime = finish - start # calculate the total # of seconds elapsed
finalTime = finalTime/60 # calculate the minutes elapsed
finalSeconds = finalTime - math.floor(finalTime) # calculate the remaining seconds elapsed (in minutes)
finalSeconds = math.floor(finalSeconds * 60) # convert this decimal to a rounded amount of seconds
# print the total number of iterations the mouse made and the # of times it reached the cheese
print('After ' + str(count - 10) + " iterations, the mouse determined the optimal number of moves to the cheese, "
"which it reached " + str(cheeseCount - 10) + " times")
# print the time elapsed in minutes and seconds
print("The simulation terminated after " + str(math.floor(finalTime)) + " minutes and " + str(finalSeconds) +
" seconds")
tk.destroy() # destroy the canvas (force close the window)
print("Press enter to restart the program or type 'N' to quit") # prompt the user to either quit or restart
restartQ = input("")
if restartQ == "N": # if they chose to quit, exit the main loop and the program will end
looping = False
else: # if they chose to restart, re-initialize all variables to their starting values
size = 10
learningRate = 0.80
discountRate = 0.70
score = 0
minimumMoves = 0
cheeseCount = 0
previousMoves = 0
sameMoves = 0
moves = 0
count = 0
speed = 0
refresh = 0
looping = True
learning = True
caught = False
speed_check = True
last_run = True
# re-declare the Q table array & fill it completely with 0s using the for loop
QTable = [[float(0), float(0), float(0), float(0)]]
for i in range(0, (size * size * size * size)):
QTable = numpy.append(QTable, [[float(0), float(0), float(0), float(0)]], axis=0)
|
# -*- coding: utf-8 -*-
# @Time : 2020/7/4
# @Author : <NAME>
# @FileName: MyLightModule.py
# @GitHub : https://github.com/lartpang/MINet/tree/master/code/utils/imgs
import os
import random
from functools import partial
import torch
from PIL import Image
from torch.nn.functional import interpolate
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision import transforms
from utils.joint_transforms import Compose, JointResize, RandomHorizontallyFlip, RandomRotate
from utils.misc import construct_print
def _get_ext(path_list):
ext_list = list(set([os.path.splitext(p)[1] for p in path_list]))
if len(ext_list) != 1:
if ".png" in ext_list:
ext = ".png"
elif ".jpg" in ext_list:
ext = ".jpg"
elif ".bmp" in ext_list:
ext = ".bmp"
else:
raise NotImplementedError
construct_print(f"数据文件夹中包含多种扩展名,这里仅使用{ext}")
else:
ext = ext_list[0]
return ext
def _make_dataset(root):
img_path = os.path.join(root, "Image")
mask_path = os.path.join(root, "Mask")
img_list = os.listdir(img_path)
mask_list = os.listdir(mask_path)
img_ext = _get_ext(img_list)
mask_ext = _get_ext(mask_list)
img_list = [os.path.splitext(f)[0] for f in mask_list if f.endswith(mask_ext)]
return [
(os.path.join(img_path, img_name + img_ext), os.path.join(mask_path, img_name + mask_ext))
for img_name in img_list
]
def _make_dataset_from_list(list_filepath, prefix=(".jpg", ".png")):
img_list = []
with open(list_filepath, mode="r", encoding="utf-8") as openedfile:
line = openedfile.readline()
while line:
img_list.append(line.split()[0])
line = openedfile.readline()
return [
(
os.path.join(os.path.join(os.path.dirname(img_path), "Image"), os.path.basename(img_path) + prefix[0],),
os.path.join(os.path.join(os.path.dirname(img_path), "Mask"), os.path.basename(img_path) + prefix[1],),
)
for img_path in img_list
]
class ImageFolder(Dataset):
def __init__(self, root, in_size, prefix=(".jpg", ".png"), training=True):
self.training = training
if os.path.isdir(root):
construct_print(f"{root} is an image folder")
self.imgs = _make_dataset(root)
elif os.path.isfile(root):
construct_print(f"{root} is a list of images, we will read the corresponding image")
self.imgs = _make_dataset_from_list(root, prefix=prefix)
else:
print(f"{root} is invalid")
raise NotImplementedError
if self.training:
self.train_joint_transform = Compose([JointResize(in_size), RandomHorizontallyFlip(), RandomRotate(10)])
self.train_img_transform = transforms.Compose(
[
transforms.ColorJitter(0.1, 0.1, 0.1),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), # 处理的是Tensor
]
)
self.train_mask_transform = transforms.ToTensor()
else:
self.test_img_trainsform = transforms.Compose(
[
# 输入的如果是一个tuple,则按照数据缩放,但是如果是一个数字,则按比例缩放到短边等于该值
transforms.Resize((in_size, in_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
def __getitem__(self, index):
img_path, mask_path = self.imgs[index]
img_name = (img_path.split(os.sep)[-1]).split(".")[0]
img = Image.open(img_path).convert("RGB")
if self.training:
mask = Image.open(mask_path).convert("L")
img, mask = self.train_joint_transform(img, mask)
img = self.train_img_transform(img)
mask = self.train_mask_transform(mask)
return img, mask, img_name
else:
img = self.test_img_trainsform(img)
return img, img_name, mask_path
def __len__(self):
return len(self.imgs)
def _collate_fn(batch, size_list):
size = random.choice(size_list)
img, mask = [list(item) for item in zip(*batch)]
img = torch.stack(img, dim=0)
img = interpolate(img, size=(size, size), mode="bilinear", align_corners=False)
mask = torch.stack(mask, dim=0)
mask = interpolate(mask, size=(size, size), mode="nearest")
return img, mask
def create_loader(
data_set,
size_list=None,
batch_size=1,
num_workers=0,
shuffle=True,
sampler=None,
drop_last=True,
pin_memory=True,
):
collate_fn = partial(_collate_fn, size_list=size_list) if size_list else None
loader = DataLoader(
dataset=data_set,
collate_fn=collate_fn,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
drop_last=drop_last,
pin_memory=pin_memory,
sampler=sampler,
)
return loader
|
<reponame>rsyamil/applied-nlp
import sys
import re
import os
import collections
import json
import numpy as np
#from NLTK
stopwords = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you',
"you're", "you've", "you'll", "you'd", 'your', 'yours', 'yourself',
'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers',
'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs',
'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll",
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once',
'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each',
'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only',
'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just',
'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn',
"doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn',
"isn't", 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't",
'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't",
'won', "won't", 'wouldn', "wouldn't"]
#parse test files, ignore folder (3-levels deep) hierarchy
model_file = sys.argv[1]
main_input_path = sys.argv[2]
#model_file = "vanillamodel.txt"
#main_input_path = "op_spam_testing_data"
#load model parameters
with open(model_file, 'r', encoding='utf-8') as outf_p:
[model_w_b] = json.load(outf_p)
features = model_w_b["features"]
#features = average_w_b["features"]
print("Features loaded : ", len(features))
p_posterior = {}
for root, dirs, files in os.walk(main_input_path):
reviews = (f for f in files if f.endswith('.txt') and not (f.startswith('README')))
for r in reviews:
p_f_r = root+'/'+r
p_f_r_file = open(p_f_r , 'r')
for texts in p_f_r_file:
texts = re.sub(r'[^\w\s]', ' ', texts) #remove punctuations
tokens = texts.lower().split() #convert to lowercase
tokens = [w for w in tokens if not w in stopwords] #remove stopwords
tokens = [w for w in tokens if not (w.isdigit() #remove numbers
or w[0] == '-' and w[1:].isdigit())]
p_posterior[p_f_r] = {"bag_of_words": tokens,
"path" : p_f_r,
"features": 1,
"labela": 1,
"labelb": 1}
print("Reviews (test) parsed : ", len(p_posterior))
#create feature vector for each test data point
for item in p_posterior:
b_o_w = p_posterior[item]['bag_of_words']
feature_vector = [] #create feature vector for each training data (exist [1], doesnt exist [0])
for f in features:
if f in b_o_w: #if the feature exists in the bag of words
feature_vector.append(1)
else:
feature_vector.append(0)
p_posterior[item]['features'] = feature_vector
#load weights and bias
weights_a = np.asarray(model_w_b["weights_a"])
bias_a = model_w_b["bias_a"]
weights_b = np.asarray(model_w_b["weights_b"])
bias_b = model_w_b["bias_b"]
print("Weights and bias loaded for (deceptive[-1], truthful[+1]) : ", weights_a.shape, bias_a)
print("Weights and bias loaded for (negative [-1], positive [+1]) : ", weights_b.shape, bias_b)
#assign label for each test data point
for item in p_posterior:
f = np.reshape(np.asarray(p_posterior[item]['features']), (1, len(features)))
act_a = np.sign(np.squeeze(f@weights_a + bias_a))
act_b = np.sign(np.squeeze(f@weights_b + bias_b))
if act_b < 0:
p_posterior[item]["labelb"] = "negative"
else:
p_posterior[item]["labelb"] = "positive"
if act_a < 0:
p_posterior[item]["labela"] = "deceptive"
else:
p_posterior[item]["labela"] = "truthful"
#write to output
output = open ('percepoutput.txt' , 'w')
for item in p_posterior:
out_line = p_posterior[item]["labela"] + "\t" + p_posterior[item]["labelb"] + "\t" + p_posterior[item]["path"]
output.write(out_line + '\n')
output.close
|
<gh_stars>0
from urllib.request import urlopen
import pandas as pd
from lib.basics import *
# Retrieving data from github repository
def download_data():
"""Downloads the data from the JHU GitHub repository into feed files"""
print_log("Downloading data from JHU repository ...")
today = set_date()
categories = ["base"] + get_categories()[:-1]
for category in categories:
# Establish access to data on the web and load them
with urlopen(get_feed_url(category)) as r:
data = r.read().decode("utf-8")
# Write data into the feed files
with get_feed_file_path(today, category).open("w", newline="") as file:
file.write(data)
print_log("Download finished")
# Preparing data for further usage
def prepare_base_data(date_):
"""Prepares the basic data: How to name countries (ISO3, full name, and
population size)
"""
# Reading the feed csv-file into a DataFrame, taking only the necessary
# columns (2 = ISO3-codes, 6 = province/state name, 7 = country name,
# 11 = population size
df = pd.read_csv(
str(get_feed_file_path(date_, "base")), usecols=[2, 6, 7, 11]
)
# Dropping of:
# - rows with 1. column (no ISO3-code) empty or 2. column
# ('Province_State') not empty (additional information on a sub-country
# level)
# - column 2 (only used for filtering out unnecessary rows)
drop_rows = [
r
for r in df.index
if pd.isna(df.iat[r, 0]) or not pd.isna(df.iat[r, 1])
]
df = df.drop(index=drop_rows, columns=[df.columns[1]])
# Dumping frame in dictionary
df.columns = ["iso3", "name", "pop"]
countries = df.to_dict(orient="records")
# Adding some special cases that are not provided:
# - 2 ships
# - the Summer Olympics
# - 1 entry for the total
countries += [
{"iso3": "DPR", "name": "<NAME>", "pop": 3700},
{"iso3": "ZDM", "name": "<NAME>", "pop": 1829},
{"iso3": "SO2", "name": "Summer Olympics 2020", "pop": 10000},
{
"iso3": "TTL",
"name": "Total",
"pop": df["pop"].sum(axis="index"),
},
]
# Sorting alphabetically along iso3 code
countries.sort(key=(lambda item: item["iso3"]))
# # Writing the table in the file data_base.csv in data folder of dte
json.dump(
countries, get_data_file_path(date_, name="base").open("w"), indent=4
)
def get_base_data(date_, columns=("iso3", "name", "pop")):
"""Extracts a (nested) dictionary from the base data from date_: The values
of the first column act as keys of the outer dictionary and the columns
as the keys of the inner dictionaries. If only 2 columns are requested then
there's no inner dictionary: The values the values of the of the 2. column.
"""
object_hook = lambda obj: {column: obj[column] for column in columns}
countries = json.load(
get_data_file_path(date_, name="base").open("r"),
object_hook=object_hook,
)
if len(columns) == 2:
return {
country[columns[0]]: country[columns[1]] for country in countries
}
key, *non_keys = columns
return {
country[key]: {column: country[column] for column in non_keys}
for country in countries
}
def prepare_data(date_, excel_output=False):
"""Actual data preparation (see the comments for details)"""
print_log("Preparing data ...")
# Preparing the base data (name, keys, pop-numbers)
prepare_base_data(date_)
# Getting a dictionary that translates country names in iso3-code
name_to_iso3 = get_base_data(date_, columns=("name", "iso3"))
categories = get_categories()
prepped_data = {}
for category in categories[:-1]:
# Reading the csv-feed-file into a DataFrame
df = pd.read_csv(get_feed_file_path(date_, category))
# Aggregate (sum) over rows which belong to the same country (names in
# column 2), which also makes the country names the new index
df = df.groupby(df.columns[1]).sum()
# Dropping the unnecessary columns (longitudes and latitudes -> 1, 2)
df = df.drop(columns=[df.columns[i] for i in {0, 1}])
# Setting a new index: ISO3-codes of the countries
df.index = pd.Index([name_to_iso3[name] for name in df.index])
# Transposing the DataFrame and thereby producing real time series
df = df.T
# Fixing index: Setting a new index with proper date-times
df.index = pd.Index(pd.to_datetime(df.index))
# Fixing columns: Adding a column for the total sum of all countries
df = (pd.concat([df, df.sum(axis="columns")], axis="columns")
.rename({0: "TTL"}, axis="columns"))
# Packing the frame in the dictionary for the prepped data
prepped_data[category] = {"cum": df.copy()}
# Adding the table of cumulated data of active cases to the dictionary
prepped_data["active"] = {}
prepped_data["active"]["cum"] = (
prepped_data["confirmed"]["cum"]
- prepped_data["recovered"]["cum"]
- prepped_data["deaths"]["cum"]
)
# Creating the rest of the dependent data (rel, diffs, ma, ...)
popn = get_base_data(date_, columns=("iso3", "pop"))
for category in categories:
pmio = [
popn[country] / 1e6
for country in prepped_data[category]["cum"].columns
]
p100k = [
popn[country] / 1e5
for country in prepped_data[category]["cum"].columns
]
prepped_data[category]["cum_rel_popmio"] = \
prepped_data[category]["cum"].div(pmio)
prepped_data[category]["cum_rel_pop100k"] = \
prepped_data[category]["cum"].div(p100k)
prepped_data[category]["diff"] = prepped_data[category]["cum"].diff()
prepped_data[category]["diff_rel_popmio"] = \
prepped_data[category]["cum_rel_popmio"].diff()
prepped_data[category]["diff_rel_pop100k"] = \
prepped_data[category]["cum_rel_pop100k"].diff()
prepped_data[category]["diff_ma1w"] = \
prepped_data[category]["diff"].rolling(7).mean()
prepped_data[category]["diff_rel_popmio_ma1w"] = \
prepped_data[category]["diff_rel_popmio"].rolling(7).mean()
prepped_data[category]["diff_rel_pop100k_ma1w"] = \
prepped_data[category]["diff_rel_pop100k"].rolling(7).mean()
if category == "active":
prepped_data[category]["diff_rel_active"] = (
prepped_data[category]["diff"]
.div(prepped_data[category]["cum"].shift(periods=1))
)
# If asked for (keyword argument excel_output=True): Writing the data
# organised by tables which respectively contain all countries sheet-wise
# into one large Excel-file
if excel_output:
print_log("Writing Excel-file ...")
xlsx_file_path = str(get_data_file_path(date_, file_format="xlsx"))
with pd.ExcelWriter(xlsx_file_path) as xlsx_file:
for category, variant in [
(category, variant)
for category in categories
for variant in prepped_data[category]
]:
df = prepped_data[category][variant]
df.to_excel(xlsx_file, sheet_name=f"{category}_{variant}")
print_log("Excel-file finished")
# Writing the data in one JSON-file: Organized with a multi-index
# (category, variant, day) and the countries as columns
# Stacking the individual frames in order category -> variant after
# adjusting the index
df_all = pd.DataFrame()
for category, variant in [
(category, variant)
for category in categories
for variant in prepped_data[category]
]:
df = prepped_data[category][variant]
df.index = pd.MultiIndex.from_tuples(
zip(
[category] * len(df.index),
[variant] * len(df.index),
list(df.index),
)
)
df_all = pd.concat([df_all, df])
# Giving the new index names, and the columns as well
df_all.index.names = ["category", "variant", "date"]
df_all.columns.name = "country"
# Sorting for better query performance
df_all.sort_index()
# Writing the new frame in a JSON-file
print_log("Writing JSON-file ...")
json_file_path = get_data_file_path(date_, file_format="json.gz")
df_all.to_json(
json_file_path, orient="table", indent=4, compression="gzip"
)
print_log("JSON-file finished")
print_log("Data preparation finished")
|
import os
import math
import torch
import random
import numpy as np
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
import torch.nn.init as init
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torch.utils.data import SubsetRandomSampler
class Discriminator(nn.Module):
def __init__(self, z_dim=2):
super(Discriminator, self).__init__()
self.z_dim = z_dim
self.net = nn.Sequential(
nn.Linear(z_dim, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 1000),
nn.LeakyReLU(0.2, True),
nn.Linear(1000, 2),
nn.Sigmoid()
)
self.weight_init()
def weight_init(self, mode='normal'):
initializer = normal_init
for block in self._modules:
for m in self._modules[block]:
initializer(m)
def forward(self, z):
return self.net(z).squeeze()
class VAE1(nn.Module):
def __init__(self, z_dim=2):
super(VAE1, self).__init__()
self.z_dim = z_dim
self.encode = nn.Sequential(
nn.Linear(784, 400),
nn.LeakyReLU(0.2, True),
nn.Linear(400, 400),
nn.LeakyReLU(0.2, True),
nn.Linear(400, 2 * z_dim),
)
self.decode = nn.Sequential(
nn.Linear(z_dim, 400),
nn.LeakyReLU(0.2, True),
nn.Linear(400, 400),
nn.LeakyReLU(0.2, True),
nn.Linear(400, 784),
nn.Sigmoid(),
)
self.weight_init()
def weight_init(self, mode='normal'):
initializer = normal_init
for block in self._modules:
for m in self._modules[block]:
initializer(m)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = std.data.new(std.size()).normal_()
return eps.mul(std).add_(mu)
def forward(self, x, no_enc=False, no_dec=False):
stats = self.encode(x.view(-1, 784))
mu = stats[:, :self.z_dim]
logvar = stats[:, self.z_dim:]
z = self.reparametrize(mu, logvar)
if no_enc:
z = Variable(torch.randn(100, z_dim), requires_grad=False).to(device)
return self.decode(z).view(x.size())
elif no_dec:
return z.squeeze()
else:
x_recon = self.decode(z).view(x.size())
return x_recon, mu, logvar, z.squeeze()
def normal_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.normal(m.weight, 0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def recon_loss(x_recon, x):
n = x.size(0)
loss = F.binary_cross_entropy(x_recon, x, size_average=False).div(n)
return loss
def kl_divergence(mu, logvar):
kld = -0.5 * (1 + logvar - mu ** 2 - logvar.exp()).sum(1).mean()
return kld
def convert_to_display(samples):
cnt, height, width = int(math.floor(math.sqrt(samples.shape[0]))), samples.shape[1], samples.shape[2]
samples = np.transpose(samples, axes=[1, 0, 2, 3])
samples = np.reshape(samples, [height, cnt, cnt, width])
samples = np.transpose(samples, axes=[1, 0, 2, 3])
samples = np.reshape(samples, [height*cnt, width*cnt])
return samples
use_cuda = torch.cuda.is_available()
device = 'cuda' if use_cuda else 'cpu'
max_iter = int(3000)
batch_size = 100
z_dim = 2
lr_D = 0.001
beta1_D = 0.9
beta2_D = 0.999
gamma = 6.4
training_set = datasets.MNIST('../data', train=True, download=True, transform=transforms.ToTensor())
test_set = datasets.MNIST('../data', train=False, download=True, transform=transforms.ToTensor())
data_loader = DataLoader(training_set, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_set, batch_size=500, shuffle=True, num_workers=3)
VAE = VAE1().to(device)
D = Discriminator().to(device)
optim_VAE = optim.Adam(VAE.parameters(), lr=lr_D, betas=(beta1_D, beta2_D))
optim_D = optim.Adam(D.parameters(), lr=lr_D, betas=(beta1_D, beta2_D))
ones = torch.ones(batch_size, dtype=torch.long, device=device)
zeros = torch.zeros(batch_size, dtype=torch.long, device=device)
for epoch in range(max_iter):
train_loss = 0
for batch_idx, (x_true, _) in enumerate(data_loader):
x_true = x_true.to(device)
x_recon, mu, logvar, z = VAE(x_true)
vae_recon_loss = recon_loss(x_recon, x_true)
vae_kld = kl_divergence(mu, logvar)
D_z = D(z)
vae_tc_loss = -(D_z[:, :1] - D_z[:, 1:]).mean()
vae_loss = vae_recon_loss + gamma * vae_tc_loss
train_loss += vae_loss.item()
optim_VAE.zero_grad()
vae_loss.backward(retain_graph=True)
optim_VAE.step()
z_prime = Variable(torch.randn(batch_size, z_dim), requires_grad=False).to(device)
D_z_pperm = D(z_prime)
D_loss = 0.5 * (F.cross_entropy(D_z_pperm, zeros) + F.cross_entropy(D_z, ones))
optim_D.zero_grad()
D_loss.backward()
optim_D.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)] \t Loss: {:.6f} \t Discriminator Loss: {:.6f} \t Generator Loss: {:.6f}'.format(epoch, batch_idx * len(x_true),
len(data_loader.dataset),
100. * batch_idx / len(data_loader),
vae_loss.item(),
D_loss.item(),
vae_tc_loss.item()
))
if batch_idx % 1000 == 0:
samples = VAE(x_true, no_enc=True)
samples = samples.permute(0, 2, 3, 1).contiguous().cpu().data.numpy()
plt.imshow(convert_to_display(samples), cmap='Greys_r')
plt.show()
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(data_loader.dataset)))
if z_dim == 2:
batch_size_test = 500
z_list, label_list = [], []
for i in range(20):
x_test, y_test = iter(test_loader).next()
x_test = Variable(x_test, requires_grad=False).to(device)
z = VAE(x_test, no_dec=True)
z_list.append(z.cpu().data.numpy())
label_list.append(y_test.numpy())
z = np.concatenate(z_list, axis=0)
label = np.concatenate(label_list)
plt.scatter(z[:, 0], z[:, 1], c=label)
plt.show()
print('====> Epoch: {} Average loss: {:.4f}'.format(epoch, train_loss / len(data_loader.dataset)))
|
<gh_stars>1-10
#!/usr/bin/env python3
"""
helper tool to run yosys, generating appropriate yosys script
python, rather than bash, since commandline arguments etc
so much more convenenient in python
In addiition, we can give a task, by providing --task-file [task filepath].
The task should be the only declaration in the file.
ports should be provided one per line, with nothing else on the line except the trailing commma (',').
run_yosys.py will wrap the task in a module, then synthesize that.
Any comments sections using // /* or */ should always have the comment symbol (//, /* or */) at the start of a line
there should be a space before each port name in the task declaration.
Internal parameters should each be declared on a line on their own; and the internal parameter lines will be copied
verbatim inside the wrapper module, so they are available for port declarations.
No comments allowed in the task declaration itself.
PRs to reduce these constraints welcome :)
"""
import argparse
import os
from collections import deque
"""
For reference, example of task and module wrapper for it:
task prot_task(
input [1:0] a,
input [1:0] b,
output reg [1:0] out
);
out = a + b;
endtask
module prot_task_module(
input [1:0] a,
input [1:0] b,
output reg [1:0] out
);
always @(*) begin
prot_task(a, b, out);
end
endmodule
"""
def run(args):
if args.task_file is not None:
assert args.top_module is None
# task_verilog_files = [n for n in args.in_verilog if n.endswith(f'/{args.top_task}.sv')]
# assert len(task_verilog_files) == 1
# task_verilog_file = task_verilog_files[0]
print('task verilog file', args.task_file)
with open(args.task_file) as f:
task_contents = f.read()
port_declarations = [] # full declaration, e.g. "input [1:0] a"
port_names = [] # just the name, e.g. "a"
in_declaration = False
in_block_comment = False
internal_parameters = []
for line in task_contents.split('\n'):
line = line.strip()
if line.startswith('//'):
continue
if in_block_comment:
if line.startswith('*/'):
in_block_comment = False
continue
if line.startswith('/*'):
if line.endswith('*/'):
continue
else:
in_block_comment = True
if line.startswith('task'):
in_declaration = True
task_name = line.replace('task ', '').split('(')[0].strip()
print('task_name', task_name)
continue
if in_declaration:
if line == ');':
in_declaration = False
continue
port_declaration = line
if port_declaration.endswith(','):
port_declaration = port_declaration[:-1]
port_declarations.append(port_declaration)
name = port_declaration.split()[-1]
port_names.append(name)
else:
if line.startswith("parameter "):
# parameter_name = line.split('=')[0].strip().split()[1].strip()
# print('parameter_name', parameteliner_name)
internal_parameters.append(line)
print('declarations', port_declarations)
print('names', port_names)
# new_port_declarations = []
# for decl in port_declarations:
# for internal_param in internal_parameters:
# decl = decl.replace(internal_param, f'{task_name}.{internal_param}')
# new_port_declarations.append(decl)
# port_declarations = new_port_declarations
wrapper_filepath = 'build/task_wrapper.sv'
args.in_verilog.append(args.task_file)
args.in_verilog.append(wrapper_filepath)
args.top_module = f'{task_name}_module'
port_declarations_str = ',\n '.join(port_declarations)
port_names_str = ', '.join(port_names)
wrapper_file_contents = f"""module {task_name}_module(
{port_declarations_str}
);"""
if len(internal_parameters) > 0:
wrapper_file_contents += ' ' + '\n '.join(internal_parameters)
wrapper_file_contents += f"""
always @(*) begin
{task_name}({port_names_str});
end
endmodule
"""
with open(wrapper_filepath, 'w') as f:
f.write(wrapper_file_contents)
print(wrapper_file_contents)
# os.system(f"cat {wrapper_filepath}")
with open('build/yosys.tcl', 'w') as f:
for file in args.in_verilog:
f.write(f"read_verilog -sv {file}\n")
if not os.path.exists('build/netlist'):
os.makedirs('build/netlist')
if args.top_module:
f.write(f'hierarchy -top {args.top_module}')
f.write(f"""
delete t:$assert
write_verilog build/netlist/0.v
flatten
write_verilog build/netlist/1.v
synth
write_verilog build/netlist/2.v
techmap;
write_verilog build/netlist/3.v
# ltp
dfflibmap -liberty {args.cell_lib}
write_verilog build/netlist/4.v
abc -liberty {args.cell_lib}
""")
if not args.no_cells:
f.write("""
write_verilog build/netlist/5.v
clean
write_verilog build/netlist/6.v
""")
f.write("""
write_rtlil build/rtlil.rtl
write_verilog build/netlist.v
ltp
sta
stat
""")
if args.show:
f.write('show\n')
if os.system('yosys -s build/yosys.tcl') != 0:
raise Exception("Failed")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--task-file', type=str,
help='give this instead of top module if top is a task; should be a filepath, not given to --in-verilog')
parser.add_argument('--in-verilog', type=str, nargs='+', help='path to verilog file')
parser.add_argument(
'--top-module', type=str,
help='top module name, only needed if more than one module, and not using --task-file.')
parser.add_argument('--show', action='store_true', help='show xdot on the result')
parser.add_argument('--no-cells', action='store_true', help='stop after dfflibmap')
parser.add_argument(
'--cell-lib', type=str, default='tech/osu018/osu018_stdcells.lib',
help='e.g. path to osu018_stdcells.lib')
args = parser.parse_args()
if args.task_file is not None and args.in_verilog is None:
args.in_verilog = []
args.in_verilog = deque(args.in_verilog)
for additional in ['src/assert_ignore.sv', 'src/op_const.sv', 'src/const.sv']:
if additional not in args.in_verilog:
args.in_verilog.appendleft(additional)
print(args.in_verilog)
run(args)
|
<reponame>myhugong/probing-TTS-models<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 1 18:51:41 2019
@author: lukeum
"""
import matplotlib.pylab as plt
import os
import sys
import numpy as np
import torch
import transformers
import soundfile
import argparse
from praatio import tgio
from librosa import resample
from tqdm import tqdm
sys.path.append('./waveglow/')
sys.path.append('./text')
from hparams import create_hparams
from model import Tacotron2
from train import load_model
from text import text_to_sequence,sequence_to_pinyin
from denoiser import Denoiser
from text.cleaners import text_normalize
from text.txt2pinyin import txt2pinyin
def plot_data(data, path, figsize=(16, 4)):
fig, axes = plt.subplots(1, len(data), figsize=figsize)
for i in range(len(data)):
axes[i].imshow(data[i], aspect='auto', origin='bottom',
interpolation='none')
plt.savefig(path)
plt.close()
def load_bert(path):
'''
Load the Chinese Bert model in the specified folder
'''
config_path = os.path.join(path,'chinese_wwm_ext_pytorch/bert_config.json')
model_path = os.path.join(path,'chinese_wwm_ext_pytorch/pytorch_model.bin')
vocab_path = os.path.join(path, 'chinese_wwm_ext_pytorch/vocab.txt')
config = transformers.BertConfig.from_pretrained(config_path)
config.output_hidden_states=True
model = transformers.BertModel.from_pretrained(model_path,config=config)
model.eval()
tokenizer = transformers.BertTokenizer(vocab_path)
return model, tokenizer
def extract_embeddings(model,tokenizer,text,upsampling=True):
'''
Extract embeddings from the pre-trained bert model.
Apply upsampling to ensure that embedding length are the same as the phoneme length
'''
clean_text = text_normalize(text)
pinyin_seq = txt2pinyin(clean_text)
phon_seq = [i for syl in pinyin_seq for i in syl]
inputs = torch.tensor(tokenizer.encode(clean_text)).unsqueeze(0)
assert inputs[0,0]==101 and inputs[0,-1]==102
outputs = model(inputs)
h = outputs[0].cpu().detach().numpy()
# del outputs
h = h[:,1:-1,:]
assert h.shape[1] == len(pinyin_seq)
features = [np.tile(h[:,i,:],[1,len(syl),1]) for i,syl in enumerate(pinyin_seq)]
features = np.concatenate(features,axis=1)
assert features.shape[1] == len(phon_seq)
assert features.shape[2] == 768
assert features.shape[0] == 1
return torch.tensor(features).cuda().half()
def Create_textgrid(phones,out_path,raw=False):
'''
Create a textgrid based on the alignment
sample: an pd DataFrame of an alignment file
out_path: the output path
'''
tg = tgio.Textgrid()
syl_tier = tgio.IntervalTier('phones',[],0,sample.iloc[-1,1]+sample.iloc[-1,2])
entries = []
if raw:
for i in range(len(sample)):
ph = (sample.iloc[i,3],sample.iloc[i,3]+sample.iloc[i,4],sample.iloc[i,-1])
entries.append(ph)
else:
for i in range(len(sample)):
ph = (sample.iloc[i,1],sample.iloc[i,1]+sample.iloc[i,2],sample.iloc[i,-1])
entries.append(ph)
syl_tier = syl_tier.new(entryList=entries)
tg.addTier(syl_tier)
out_path = os.path.join(out_path,sample.iloc[0,0]+'.TextGrid')
tg.save(out_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--text',default=None,required=True,type=str)
parser.add_argument('--use_bert',action='store_true')
parser.add_argument('--bert_folder',default="./",type=str)
parser.add_argument('--tacotron_path', type=str)
parser.add_argument('--waveglow_path',type=str)
parser.add_argument('--resample',default=None, type=str)
parser.add_argument('--out_dir',required=True,type=str)
parser.add_argument('--text_grid',action='store_true',default=None)
parser.add_argument('--alignment', action='store_true',default=None)
args = parser.parse_args()
hparams = create_hparams()
hparams.bert = args.use_bert
hparams.sampling_rate = 22050
# load Tacotron 2
checkpoint_path = args.tacotron_path
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path))
model.cuda().eval().half()
# load Waveglow
waveglow_path = args.waveglow_path
waveglow = torch.load(waveglow_path)
waveglow.cuda().eval().half()
for k in waveglow.convinv:
k.float()
for m in waveglow.modules():
if 'Conv' in str(type(m)):
setattr(m, 'padding_mode', 'zeros')
denoiser = Denoiser(waveglow)
# load Chinese BERT
if hparams.bert:
bert, tokenizer= load_bert(args.bert_folder)
# Extract phonemic features
with open(args.text,'r') as f:
texts = []
for line in f.readlines():
name, sen = line.strip().split(' ')
if sen[-1] not in ['。','?','!']:
texts.append((name, sen+'。'))
else:
texts.append((name, sen))
for i, (name, text) in tqdm(enumerate(texts)):
phone_seq = np.array(text_to_sequence(text, ['chinese_cleaners']))[None, :]
phones = torch.autograd.Variable(
torch.from_numpy(phone_seq)).cuda().long()
if hparams.bert == False:
sequence = phones
# Extract BERT embeddings
else:
features = extract_embeddings(bert,tokenizer,text)
sequence = (phones, features)
mel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)
if args.alignment:
plot_data((mel_outputs_postnet.float().data.cpu().numpy()[0],
alignments.float().data.cpu().numpy()[0].T), os.path.join(args.out_dir,'fig_%s.png'%(i)))
with torch.no_grad():
audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)
audio_denoised = denoiser(audio, strength=0.01)[:, 0]
audio = np.concatenate((np.zeros(2048),audio[0].data.cpu().numpy(),np.zeros(2048)))
# name = 'sample_'+str(i)
# audio = audio[0].data.cpu().numpy()
soundfile.write(os.path.join(args.out_dir,'%s.wav'%(name)),audio,hparams.sampling_rate)
# Downsampled to 16000 for forced alignment with Kaldi
if args.resample:
new_y = resample(audio,hparams.sampling_rate,16000)
soundfile.write(os.path.join(args.out_dir,'%s_resample.wav'%(name)),new_y,16000)
# Generate textgrid annotations based on attention alignment
if args.text_grid:
alignment = alignments.float().data.cpu().numpy()[0].T
frames = np.argmax(alignment,axis=0)
frames = [phone_seq[0,i] for i in frames]
pinyin_seq = sequence_to_pinyin(frames)
duration = 256/hparams.sampling_rate*(len(frames)+16)
times = []
for i, p in enumerate(pinyin_seq):
if i != len(pinyin_seq)-1:
if p != pinyin_seq[i+1]:
times.append((p,i))
else:
times.append((p,i))
tg = tgio.Textgrid()
syl_tier = tgio.IntervalTier('phones',[],0,duration)
entries = []
entries.append((0,2048/hparams.sampling_rate,'sil'))
for i, p in enumerate(times):
if i == 0:
ph = (2048/hparams.sampling_rate,(p[1]+8)*256/hparams.sampling_rate,p[0])
else:
ph = ((p_last[1]+8)*256/hparams.sampling_rate,(p[1]+8)*256/hparams.sampling_rate,p[0])
p_last = p
entries.append(ph)
syl_tier = syl_tier.new(entryList=entries)
tg.addTier(syl_tier)
out_path = os.path.join(args.out_dir, 'textgrid','%s.TextGrid'%(name))
tg.save(out_path)
|
<reponame>vs666/BrickBreaker
from motion import Motion
from Board import Board
from variables import game_matrix as ar
from variables import props
from variables import BrickOb as brk
from math import fabs
'''
check death
check collision
reflect vertical
reflect horizontial
reflect board ( pass board object)
dead ball (board object ??)
'''
class Ball(Motion):
def __init__(self, x_in, y_in, x_lim, y_lim):
super().__init__(x_in, y_in, x_lim, y_lim, 0)
self.vel_x = 0
self.vel_y = 0
self.state = 'rest'
def check_death(self): # Redundant now, updated to isDead()
if self.x == self.lim_x-1:
return True
else:
return False
def __check_collision(self, fut_x, fut_y, plank):
global ar
fut_x = int(fut_x)
fut_y = int(fut_y)
if fut_x <= 0 or fut_x >= self.lim_x - 1 or fut_y >= self.lim_y - 1 or fut_y <= 0:
return 'wall'
for i in range(int(fabs(self.vel_y)+1)):
for j in range(int(fabs(self.vel_x))+1):
av = j
if self.vel_x < 0:
av = -1*av
if self.vel_y > 0 and ar[int(self.x + av)][int(self.y + i)] >= 3:
return 'brick'
if self.vel_y < 0 and ar[int(self.x + av)][int(self.y - i)] >= 3:
return 'brick'
if (fut_x >= plank.x and self.x < plank.x) and ((plank.y + int(plank.wi) >= self.y and self.y >= plank.y - int(plank.wi))):
return 'plank'
elif (fut_x >= plank.x and self.x < plank.x) and self.y + self.vel_y <= plank.y + int(plank.wi) and self.y+self.vel_y >= plank.y + int(plank.wi):
return 'plank'
else:
return 'None'
def handle_collision(self): # Handles normal collision and not with plank
global ar
if (self.x+self.vel_x) <= 0 or (self.x+self.vel_x) >= self.lim_x-1 or ar[int(self.x+self.vel_x)][int(self.y)] != 0:
self.vel_x = -1*self.vel_x
if (self.y + self.vel_y) <= 0 or (self.y + self.vel_y) >= self.lim_y - 1 or ar[int(self.x)][int(self.y+self.vel_y)] != 0:
self.vel_y = -1*self.vel_y
elif ar[int(self.vel_x+self.x)][int(self.vel_y+self.y)] != 0:
self.vel_x = -1*self.vel_x
self.vel_y = -1*self.vel_y
# else no collision has happened
def __handle_wall_collision(self):
if self.x + self.vel_x >= self.lim_x-1 or self.x + self.vel_x <= 0:
self.vel_x = -1*self.vel_x
if self.y + self.vel_y >= self.lim_y-1 or self.y + self.vel_y <= 0:
self.vel_y = -1*self.vel_y
def __handle_brick_collision(self):
global brk
xt = False
yt = False
pts = 0
pp = 0
if props["ThroughBall"]:
ar[int(self.x)][int(self.y)]=0
tvel_x = fabs(self.vel_x)
tvel_y = fabs(self.vel_y)
while self.x < self.lim_x - 1 and self.y < self.lim_y - 1 and (tvel_x > 0 or tvel_y > 0):
for i in range(3):
if self.vel_x > 0:
if ar[int(self.x+i)][int(self.y)] == 4:
pts+=brk[int(self.x+i)][int(self.y)].destroy(ar,brk)
else:
if ar[int(self.x-i)][int(self.y)] == 4:
pts+=brk[int(self.x-i)][int(self.y)].destroy(ar,brk)
if self.vel_y > 0:
if ar[int(self.x)][int(self.y+i)] == 4:
pts+=brk[int(self.x)][int(self.y+i)].destroy(ar,brk)
else:
if ar[int(self.x)][int(self.y-i)]==4:
pts+=brk[int(self.x)][int(self.y-i)].destroy(ar,brk)
adsx = 1
if self.vel_x < 0:
adsx = -1
adsy = 1
if self.vel_y < 0:
adsy = -1
for j in range(3):
if ar[int(self.x+(adsx*i))][int(self.y+(adsy*j))] == 4:
pts+=brk[int(self.x+(adsx*i))][int(self.y+(adsy*j))].destroy(ar,brk)
if tvel_y <= tvel_x:
avx = 1
if self.vel_x < 0:
avx = -1
self.x += avx
tvel_x-=1
if tvel_y >= tvel_x:
avy = 1
if self.vel_y < 0:
avy = -1
self.y += avy
tvel_y-=1
# pp = ar[int(self.x)][int(self.y)]
ar[int(self.x)][int(self.y)]=1
return pts
tempVX = self.vel_x
tempVY = self.vel_y
xsi = 1
if self.vel_x < 0:
xsi = -1
ysi = 1
if self.vel_y < 0:
ysi = -1
xdis = 0
ydis = 0
self.vel_x = xsi
self.vel_y = ysi
pts = 0
while fabs(xdis) < fabs(tempVX) or fabs(ydis) < fabs(tempVY):
xt = False
yt = False
for i in range(int(fabs(self.vel_y)+2)):
if self.vel_y > 0 and ar[int(self.x + self.vel_x)][int(self.y+i)] == 4:
a,b = brk[int(self.x + self.vel_x)][int(self.y+i)].collide(ar,brk)
pts+=b
xt = True
if self.vel_y < 0 and ar[int(self.x + self.vel_x)][int(self.y-i)] == 4:
a,b = brk[int(self.x + self.vel_x)][int(self.y-i)].collide(ar,brk)
pts+=b
xt = True
if self.vel_y > 0 and ar[int(self.x+self.vel_x)][int(self.y-1)] == 4:
xt = True
a,b = brk[int(self.x+self.vel_x)][int(self.y-1)].collide(ar,brk)
pts+=b
if self.vel_y < 0 and ar[int(self.x + self.vel_x)][int(self.y+1)] == 4:
xt = True
a,b = brk[int(self.x+self.vel_x)][int(self.y+1)].collide(ar,brk)
pts+=b
if self.vel_y > 0 and ar[int(self.x)][int(self.y+1)] == 3:
yt = True
a,b = brk[int(self.x)][int(self.y+2)].collide(ar,brk)
pts+=b
if self.vel_y < 0 and ar[int(self.x)][int(self.y-1)] == 3:
yt = True
a,b = brk[int(self.x)][int(self.y-2)].collide(ar,brk)
pts+=b
if xt:
self.vel_x = -1*xsi
if yt:
self.vel_y = -1*ysi
if xt or yt:
self.vel_y *= fabs(tempVY)
self.vel_x *= fabs(tempVX)
return pts
if xdis != tempVX:
ar[int(self.x)][int(self.y)]=0
xdis += xsi
self.x += xsi
ar[int(self.x)][int(self.y)]=1
if ydis != tempVY:
ar[int(self.x)][int(self.y)]=0
ydis += ysi
self.y += ysi
ar[int(self.x)][int(self.y)]=1
if pts == 0:
self.vel_x = tempVX
self.vel_y = tempVY
return pts
def __handle_plank_collision(self, plank):
'''
All the plank collision logic here
'''
if props["PaddleGrab"]:
ar[int(self.x)][int(self.y)]=0
self.reset(plank.x,plank.y)
else:
import math
if self.state != 'rest' and self.y > plank.y + int(plank.wi) and self.y < plank.y - int(plank.wi):
self.vel_y = -1*int(self.vel_y/math.fabs(self.vel_y))*fabs(self.y-plank.wi)
elif self.state != 'rest':
self.vel_x = -1*self.vel_x
# self.vel_y = (plank.x-self.x)
def reset(self, x, y):
# self.vel_x = 0
# self.vel_y = 0
self.x = x
self.y = y
self.state = 'rest'
def isDead(self):
if self.x + self.vel_x >= self.lim_x-5:
return True
return False
def move_object(self, plank): # overriding
global ar
pts = 0
if self.state == 'rest':
ar[int(self.x)][int(self.y)] = 0
self.x = plank.x - 1
self.y = plank.y
ar[int(self.x)][int(self.y)] = 1
return False, 0
if self.isDead():
return True, 0
if self.__check_collision(self.x+self.vel_x, self.y+self.vel_y, plank) == 'brick':
pts = self.__handle_brick_collision()
if self.__check_collision(self.x+self.vel_x, self.y+self.vel_y, plank) == 'plank':
self.__handle_plank_collision(plank)
if self.__check_collision(self.x+self.vel_x, self.y+self.vel_y, plank) == 'wall':
self.__handle_wall_collision()
if self.__check_collision(self.x+self.vel_x, self.y+self.vel_y, plank) == 'None':
ar[int(self.x)][int(self.y)] = 0
self.x += self.vel_x
self.y += self.vel_y
ar[int(self.x)][int(self.y)] = 1
return False, pts
def launch_object(self):
if self.vel_x == 0 and self.vel_y == 0:
self.vel_x = -1
self.vel_y = 1
if self.vel_x > 0:
self.vel_x = -1*self.vel_x
self.state = 'moving'
|
#!/usr/bin/python2.7
#TODO UPDATE CAO: 4/1/18
# Requires python-requests. Install with pip:
#
# pip install requests
#
# or, with easy-install:
#
# easy_install requests
#Blog post from which much of this code was copied:
#https://cryptostag.com/basic-gdax-api-trading-with-python/
#Note the GDAX crypto ids are as follows:
'''
BTC-USD
BTC-GBP
BTC-EUR
ETH-BTC
ETH-USD
LTC-BTC
LTC-USD
ETH-EUR
LTC-EUR
BCH-USD
BCH-BTC
BCH-EUR
'''
import json, hmac, hashlib, time, requests, base64, sys, os, signal, math #numpy
from datetime import datetime, timedelta
from requests.auth import AuthBase
from collections import deque
#==============================================================================
#Global variable(s)
gStopFlag = False
gProductIDs = [
'BTC-USD',
'ETH-USD',
'LTC-USD',
'BCH-USD'
]
gGranularity = [60, 300, 900, 3600, 21600, 86400]
gRevGranularity = [86400, 21600, 3600, 900, 300, 60]
#Set the granularity in seconds based on the timeframe
#btc_1hr_trend = deque() #60 records, 1 per minute
#btc_1d_trend = deque() #288 recores, 1 per 5 minutes
#btc_7d_trend = deque() #168 records, 1 per hour
#btc_30d_trend = deque() #723 records, 1 per hour
gTimeframeToGranularity = {'1hr':60,'1d':300,'7d':3600,'30d':3600}
gTimeframeToHourDelta = {'1hr':1,'1d':24,'7d':168,'30d':210}
gFieldToIndex = { 'low' : 1, 'high' : 2, 'open': 3, 'close' : 4 }
#==============================================================================
#This function flips a global flag indicating an exit condition - graceful quit
def signal_handler(signal, frame):
global gStopFlag
gStopFlag = True
print('Caught Interrupt signal. Exiting!')
#==============================================================================
# Create custom authentication for Exchange
class CoinbaseExchangeAuth(AuthBase):
def __init__(self, api_key, secret_key, passphrase):
self.api_key = api_key
self.secret_key = secret_key
self.passphrase = passphrase
def __call__(self, request):
timestamp = str(time.time())
message = timestamp + request.method + request.path_url + (request.body or '')
hmac_key = base64.b64decode(self.secret_key)
signature = hmac.new(hmac_key, message, hashlib.sha256)
signature_b64 = signature.digest().encode('base64').rstrip('\n')
request.headers.update({
'CB-ACCESS-SIGN': signature_b64,
'CB-ACCESS-TIMESTAMP': timestamp,
'CB-ACCESS-KEY': self.api_key,
'CB-ACCESS-PASSPHRASE': self.passphrase,
'Content-Type': 'application/json'
})
return request
#==============================================================================
#Returns a list of products from the exchange
def products():
response = requests.get(api_base + '/products')
# check for invalid api response
if response.status_code is not 200:
raise Exception('Invalid GDAX Status Code: %d' % response.status_code)
return response.json()
#==============================================================================
#Function for executing market buy order - possibly not needed
def market_buy(product_id, size):
auth = GDAXRequestAuth(api_key, api_secret, passphrase)
order_data = {
'type': 'market',
'side': 'buy',
'product_id': product_id,
'size': size
}
response = requests.post(api_base + '/orders', data=json.dumps(order_data), auth=auth)
if response.status_code is not 200:
raise Exception('Invalid GDAX Status Code: %d' % response.status_code)
return response.json()
#==============================================================================
#Return the status of the order with the given ID #
def order_status(order_id):
order_url = api_base + '/orders/' + order_id
response = requests.get(order_url, auth=auth)
if response.status_code is not 200:
raise Exception('Invalid GDAX Status Code: %d' % response.status_code)
return response.json()
#==============================================================================
#Execute limit buy order
def limit_buy(product_id, price, size, time_in_force='GTC', \
cancel_after=None, post_only=None):
auth = GDAXRequestAuth(api_key, api_secret, passphrase)
order_data = {
'type': 'limit',
'side': 'buy',
'product_id': product_id,
'price': price,
'size': size,
'time_in_force': time_in_force
}
if 'time_in_force' is 'GTT':
order_data['cancel_after'] = cancel_after
if 'time_in_force' not in ['IOC', 'FOK']:
order_data['post_only'] = post_only
response = requests.post(api_base + '/orders', data=json.dumps(order_data), auth=auth)
if response.status_code is not 200:
raise Exception('Invalid GDAX Status Code: %d' % response.status_code)
return response.json()
#==============================================================================
#https://docs.gdax.com/#orders
#Generic order function, can handle buy/sell and market/limit orders
def submit_order(order_type, side, product_id, price, size, \
time_in_force='GTC', cancel_after=None, post_only=None):
auth = GDAXRequestAuth(api_key, api_secret, passphrase)
order_data = {
'type': order_type, #market or limit
'side': side, #buy or sell
'product_id': product_id,
'price': price,
'size': size,
'time_in_force': time_in_force
}
if 'time_in_force' is 'GTT':
order_data['cancel_after'] = cancel_after
if 'time_in_force' not in ['IOC', 'FOK']:
order_data['post_only'] = post_only
response = requests.post(api_base + '/orders', data=json.dumps(order_data), auth=auth)
if response.status_code is not 200:
raise Exception('Invalid GDAX Status Code: %d' % response.status_code)
return response.json()
#==============================================================================
def getHistoricRate(api_url, product_id, start, end, granularity):
#Returns a list of lists:
'''
[
[ time, low, high, open, close, volume ],
[ 1415398768, 0.32, 4.2, 0.35, 4.2, 12.3 ],
...
]
'''
#Enable access to globals
global gProductIDs, gRevGranularity
#Error checking
if product_id not in gProductIDs:
print "Error in getHistoricRate: 'product_id' is", product_id, "which is not valid."
return -1
if not isinstance(start, datetime):
print "Error in getHistoricRate: 'start' is not a datetime object."
return -2
if not isinstance(end, datetime):
print "Error in getHistoricRate: 'end' is not a datetime object."
return -3
#Calculate requested time frame and # of data points
timeRange = end - start
timeRange = int(timeRange.total_seconds())
#print timeRange
candles = timeRange / granularity
#print 'Time range (seconds) of request:', timeRange
#print "Candles:",candles
#If granularity is not valid, or if # of candles > 300,
#find the smallest valid granularity value for the time frame and use that
#granularity instead
if granularity not in gRevGranularity or candles > 300:
print granularity, ' is invalid, it would result in ', candles, ' candles'
#Calulate best granularity based on time frame
for g in gRevGranularity:
#print "Considering ",g, " as a possible granularity"
candles = int(timeRange / g)
#print "This would result in ", candles, " candles"
if candles < 300:
granularity = g
#print "updated granularity", granularity
elif candles > 300: break
print "Found new optimal granularity: ", granularity, " resulting in ", str(int(timeRange / granularity)), " candles"
#Convert datetime objects to strings
end = end.isoformat()
start = start.isoformat()
#Defind json data payload
trend = {
'start' : start,
'end' : end,
'granularity' : granularity
}
#Define url, execute GET request
trend_url = api_url + '/products/' + product_id + '/candles'
response = requests.get(trend_url, trend)
if response.status_code is not 200:
raise Exception('Invalid GDAX Status Code: %d' % response.status_code)
elif len(response.json()) == 0:
raise Exception('GDAX return status code 200, but the response was empty!')
return response
#==============================================================================
def updateAll(product_id, timeframe, dataStruct, api_url):
#NOTE: 'dataStruct' is a dictionary as shown below:
'''
{
"data" : deque(), \
"lowMean" : 0, \
"lowestLow" : 999999, \
"lowSumOfSquares" : 0, \
"lowStdDev" : 0, \
"highMean" : 0, \
"highestHigh" :-999999, \
"highSumOfSquares" : 0, \
"highStdDev" : 0, \
"openMean" : 0, \
"openSumOfSquares" : 0, \
"openStdDev" : 0, \
"closeMean" : 0, \
"closeSumOfSquares": 0, \
"closeStdDev" : 0 }
'''
global gProductIDs, gTimeframes, gTimeframeToHourDelta, gFieldToIndex
records = []
#Error checking
if product_id not in gProductIDs:
print "Error in updateAll(): product_id argument", product_id + "is not a valid product_id!"
return -1
if timeframe not in gTimeframeToGranularity:
print "Error in updateAll(): timeframe argument", timeframe + "is not a valid timeframe!"
return -2
granularity = gTimeframeToGranularity[timeframe]
currentTime = requests.get(api_url + '/time')
end = datetime.utcfromtimestamp(currentTime.json()['epoch'])
#print "The current time is:", end
#print "The current time in epoch is:",currentTime.json()['epoch']
r = ''
#First, check if the "data" deque is empty, if so, we need to initalize it for the first time
if not dataStruct["data"]:
start = end - timedelta(hours=gTimeframeToHourDelta[timeframe])
#r = getHistoricRate(api_url, product_id, start, end, granularity)
#30d trend require 3 http requests
if timeframe == '30d':
now = end
for i in range(0,30,10):
#end = now + timedelta(days=i)
#start = now + timedelta(days=(i-10))
end = now - timedelta(days=i)
start = now - timedelta(days=(i+10))
#print "i is: ", i
#print "30d request:"
#print "START: ",start
#print "END: ",end
r = getHistoricRate(api_url, product_id, start, end, granularity)
if r.status_code < 200 or r.status_code >= 300:
print "Error getting 30 day trend, HTTP response code is: ", str(r.status_code)
print "Response text is:", r.text
return -3
else:
records = r.json()[:240]
for record in records:
#Note that in this block any comment regarding "update the ...mean" simply indicates
#that I will create a rolling sum. The division will happen later once all values are summed.
#Update the lowestLow
if record[gFieldToIndex["low"]] < dataStruct["lowestLow"]:
dataStruct["lowestLow"] = record[gFieldToIndex["low"]]
#Update the lowMean (lM)
dataStruct["lowMean"] += record[gFieldToIndex["low"]]
#Update the highestHigh
if record[gFieldToIndex["high"]] > dataStruct["highestHigh"]:
dataStruct["highestHigh"] = record[gFieldToIndex["high"]]
#Update the highMean (hM)
dataStruct["highMean"] += record[gFieldToIndex["high"]]
#Update the openMean (oM)
dataStruct["openMean"] += record[gFieldToIndex["open"]]
#Update the closeMean (cM)
dataStruct["closeMean"] += record[gFieldToIndex["close"]]
dataStruct["data"].appendleft(record)
#btc_30d_trend.appendleft("======================================")
time.sleep(2)
#1hr, 1d, and 7d trends can all be handled with this code
else:
start = end - timedelta(hours=gTimeframeToHourDelta[timeframe])
#print "start:", start
#print "end :", end
#print timeframe
#Sprint "granularity:", granularity
r = getHistoricRate(api_url, product_id, start, end, granularity)
if r.status_code < 200 or r.status_code >= 300:
print "Error getting ", timeframe, " trend, HTTP response code is: ", str(r.status_code)
print "Response text is:", r.text
return -4
else:
#Note that GDAX returns 300 results by defaults, so grab the first N results
if timeframe == "1hr":
records = r.json()[:60]
elif timeframe == "1d":
records = r.json()[:288]
elif timeframe == "7d":
records = r.json()[:168]
elif timeframe == "30d":
records = r.json()[:240]
for record in records:
#Update the lowestLow
if record[gFieldToIndex["low"]] < dataStruct["lowestLow"]:
dataStruct["lowestLow"] = record[gFieldToIndex["low"]]
#Update the highestHigh
if record[gFieldToIndex["high"]] > dataStruct["highestHigh"]:
dataStruct["highestHigh"] = record[gFieldToIndex["high"]]
dataStruct["lowMean"] += record[gFieldToIndex["low"]]
dataStruct["highMean"] += record[gFieldToIndex["high"]]
dataStruct["openMean"] += record[gFieldToIndex["open"]]
dataStruct["closeMean"] += record[gFieldToIndex["close"]]
#print "Datapoint:",record
#btc_1hr_trend[datapoint[0]] = datapoint[1:]
dataStruct["data"].appendleft(record)
#for item in dataStruct["data"]:
# print item
#print "type(r.json): " , type(r.json())
#Finish calcuating the mean - divide by N
dequeLength = len(dataStruct["data"])
#print dequeLength
dataStruct["lowMean"] /= dequeLength
dataStruct["highMean"] /= dequeLength
dataStruct["openMean"] /= dequeLength
dataStruct["closeMean"] /= dequeLength
#Now find the StdDev
for record in dataStruct["data"]:
#Update the Variances
dataStruct["lowVariance"] += (record[gFieldToIndex["low"]] - dataStruct["lowMean"]) ** 2
dataStruct["highVariance"] += (record[gFieldToIndex["high"]] - dataStruct["highMean"]) ** 2
dataStruct["openVariance"] += (record[gFieldToIndex["open"]] - dataStruct["openMean"]) ** 2
dataStruct["closeVariance"] += (record[gFieldToIndex["close"]] - dataStruct["closeMean"]) ** 2
dataStruct["lowVariance"] /= dequeLength
dataStruct["highVariance"] /= dequeLength
dataStruct["openVariance"] /= dequeLength
dataStruct["closeVariance"] /= dequeLength
dataStruct["lowStdDev"] = math.sqrt( dataStruct["lowVariance"] )
dataStruct["highStdDev"] = math.sqrt( dataStruct["highVariance"] )
dataStruct["openStdDev"] = math.sqrt( dataStruct["openVariance"] )
dataStruct["closeStdDev"] = math.sqrt( dataStruct["closeVariance"] )
return dataStruct
#The deque exists, so we're simply updating it - Note that GDAX returns 300 records by default,
#So we need to slice the desired records out of the 300 returned results.
else:
#Peek at the last item in the deque; note that the first item will be the UTC epoch
start = dataStruct["data"][-1][0]
#currentTime = requests.get(api_url + '/time')
#now = currentTime.json()['epoch']
currentTime = requests.get(api_url + '/time')
end = datetime.utcfromtimestamp(currentTime.json()['epoch'])
end = currentTime.json()['epoch']
#print "Current Epoch time:", end
#print "Epoch time of last:", start
diff = end - start
#print "Difference:", diff
update = int(diff / granularity)
#print "Difference / " ,granularity, ":", update
#print "End is :", end
#print "Start is :", start
if update == 0:
print "Nothing to update."
return dataStruct
print "Updating ", update, "records"
#'update' now holds the number of <granularity units> since the last update, so we need to pop
#that # of elements from the left of the deque and push the same # of new elements
#to the right side - updating the mean and std. dev. as we do so
if update > 300:
#TODO Rather than throw an error here, reset 'dataStruct' to the default values, call
#this functions recursively with the default data structure, and immediately return
#the result. In essence, simply create the data struct as if for the first time, b/c
#a good chunk of the data needed to be discarded and repopulated anyway.
print "Error: Attempting to update deque, but 'update' is > 300!"
return
start = datetime.utcfromtimestamp(start)
end = datetime.utcfromtimestamp(end)
#print "type(r):", type(r)
#print "r:", r
#print "api:", api_url, "\npid: ", product_id, "\nstart:" ,start, "\nend :",end, "\ngranularity:",granularity
r = getHistoricRate(api_url, product_id, start, end, granularity)
#print "type(r):", type(r)
#print "r:", r
if r.status_code < 200 or r.status_code >= 300:
print "Error getting ", timeframe, " trend, HTTP response code is: ", str(r.status_code)
print "Response text is:", r.text
return -5
#Note the slicing to reverse the list: e.g.
#Current time: 10
#Current data: 1,2,3,4,5,6,7
#r: 10,9,8
#print "Updating a dataStruct: r.json() is of type:", type(r.json())
#print "r.json() is:", r.json()
records = r.json()[:update]
#print "records len() is", len(records)
#print "About to update from the following list:"
#for r in records:
# print r
dequeLength = len(dataStruct["data"])
for newRecord in records[::-1]:
#Update the deque of data
oldRecord = dataStruct["data"].popleft()
dataStruct["data"].append(newRecord)
#print "pop :", oldRecord
#print "push:", newRecord
lowMean = dataStruct["lowMean"]
highMean = dataStruct["highMean"]
openMean = dataStruct["openMean"]
closeMean = dataStruct["closeMean"]
#Now re-calculate all the statistics
#Recalculate the means (averages)
dataStruct["lowMean"] += ((newRecord[gFieldToIndex["low"]] - oldRecord[gFieldToIndex["low"]]) / dequeLength)
dataStruct["highMean"] += ((newRecord[gFieldToIndex["high"]] - oldRecord[gFieldToIndex["high"]]) / dequeLength)
dataStruct["openMean"] += ((newRecord[gFieldToIndex["open"]] - oldRecord[gFieldToIndex["open"]]) / dequeLength)
dataStruct["closeMean"] += ((newRecord[gFieldToIndex["close"]] - oldRecord[gFieldToIndex["close"]]) / dequeLength)
#Recalculate the variance based on this formula: http://jonisalonen.com/2014/efficient-and-accurate-rolling-standard-deviation/
dataStruct["lowVariance"] += (newRecord[gFieldToIndex["low"]] - oldRecord[gFieldToIndex["low"]])* \
(newRecord[gFieldToIndex["low"]]-dataStruct["lowMean"]+oldRecord[gFieldToIndex["low"]]-lowMean)/dequeLength
dataStruct["highVariance"] += (newRecord[gFieldToIndex["high"]] - oldRecord[gFieldToIndex["high"]])* \
(newRecord[gFieldToIndex["high"]]-dataStruct["highMean"]+oldRecord[gFieldToIndex["high"]]-highMean)/dequeLength
dataStruct["openVariance"] += (newRecord[gFieldToIndex["open"]] - oldRecord[gFieldToIndex["open"]])* \
(newRecord[gFieldToIndex["open"]]-dataStruct["openMean"]+oldRecord[gFieldToIndex["open"]]-openMean)/dequeLength
dataStruct["closeVariance"] += (newRecord[gFieldToIndex["close"]] - oldRecord[gFieldToIndex["close"]])* \
(newRecord[gFieldToIndex["close"]]-dataStruct["closeMean"]+oldRecord[gFieldToIndex["close"]]-closeMean)/dequeLength
#Now Recalculate the stddev by simply taking the sqrt of the variance
dataStruct["lowStdDev"] = math.sqrt( dataStruct["lowVariance"])
dataStruct["highStdDev"] = math.sqrt( dataStruct["highVariance"])
dataStruct["openStdDev"] = math.sqrt( dataStruct["openVariance"])
dataStruct["closeStdDev"] = math.sqrt( dataStruct["closeVariance"])
return dataStruct
#==============================================================================
def main(argv):
#TODO
'''Spawn 2 threads: one to handle user interaction and another to
interact with the exchange, store the data, calculate stats, etc
The user thread should be able to give info to the user e.g.:
1. "Thread 2 is sleeping for N seconds"
2. "Current price is N standard deviations -/+ the <timeframe> mean
3. "Thread 2 is in a 'soft-buy' state, etc.
4. "Press 'q' to kill the process.
5. "type 'bg' to background this process"
etc.
'''
api_url = 'https://api.gdax.com/'
currentTime = requests.get(api_url + '/time')
now = datetime.utcfromtimestamp(currentTime.json()['epoch'])
#print currentTime.json()
#print now
#sys.exit(1)
#Enable access to the global stop flag
global gStopFlag
#Register signal handler to catch interrupts
signal.signal(signal.SIGINT, signal_handler)
#Declare and initalize GDAX authentication variables
#api_url = 'https://api.gdax.com/'
#Use the sandbox for testing
#TODO Enter your own creds here
api_url = 'https://api-public.sandbox.gdax.com'
api_key = 'MYAPIKEYHERE'
api_secret = 'MYAPISECERETHERE'
passphrase = '<PASSWORD>'
auth = CoinbaseExchangeAuth(api_key, api_secret, passphrase)
#Wake once per hour by default, though this value will change dynamically based on the state (see below)
state_intervals= {\
#Update every 05 minutes when poised to execute sell order
"strong_sell": 300, \
#Update every 15 minutes when prices are rising
"soft_sell": 900, \
#Update once an hour when prices are stable
"hold": 60, \
#"hold": 3600, \
#Update every 15 minutes when prices are falling
"soft_buy": 900, \
#Update every 05 minutes when poised to execute buy order
"strong_buy": 300 \
}
#Create field headers in the csv log files if they don't yet exist
if not os.path.isfile("transaction1.csv"):
with open("transaction1.csv", "a") as file:
file.write("Time,Coin,30d Z score,7d Z-score,1d Z-score,Mkt Decision\n")
if not os.path.isfile("transaction2.csv"):
with open("transaction2.csv", "a") as file:
file.write("Time,Coin,30d Z score,7d Z-score,1d Z-score,Mkt Decision\n")
cryptos = ["BTC-USD"]#,"LTC-USD"]
timeFrames = [ "1d", "7d", "30d"] #"1hr",
#Define dictionaries to hold trend data for each crypto - for now, let's just do BTC
#Note that we can only retrieve 300 records per request
#Historical data is returned as [ time, low, high, open, clocse, volume ]
#btc_1hr_trend = deque() #60 records, 1 per minute # 300 * 60 s = 5 hr
#btc_1d_trend = deque() #288 recores, 1 per 5 minutes # 300 * 300s = 25 hrs
#btc_7d_trend = deque() #168 records, 1 per hour # 300 * 3600 = 12.5 D
#btc_30d_trend = deque() #720 records, 1 per hour
# Total: 1,236 records | max ~ 68 bytes/record
# Total space: 1,236*68 = 84,048 = 82kB
#Initalize the data structs holding the stats for each coin
cryptoData = {}
cryptoState = {}
cryptoZScores = {}
for crypto in cryptos:
cryptoData[crypto] = {}
cryptoState[crypto] = "hold"
cryptoZScores[crypto] = {}
for timeFrame in timeFrames:
cryptoZScores[crypto][timeFrame] = 0
cryptoData[crypto][timeFrame] = {
"data" : deque(), \
"lowMean" : 0, \
"lowestLow" : 999999, \
"lowVariance" : 0, \
"lowStdDev" : 0, \
"highMean" : 0, \
"highestHigh" :-999999, \
"highVariance" : 0, \
"highStdDev" : 0, \
"openMean" : 0, \
"openVariance" : 0, \
"openStdDev" : 0, \
"closeMean" : 0, \
"closeVariance" : 0, \
"closeStdDev" : 0 }
#Time variables for trend data
start = ''
end = ''
api_url = 'https://api.gdax.com/'
#TODO Spawn one thread per crypto - use a mutex to limit request rate
#Core logic loop
while not gStopFlag:
#For each crypto/coin I'm tracking:
for crypto, cryptoDict in cryptoData.iteritems():
if gStopFlag: break
#print "crypto:", crypto
for timeframe in cryptoDict:
if gStopFlag: break
#print timeframe,cryptoDict[timeframe]
#Iterate over each time frame for which I'm tracking it:
#print "-----------------------------------------------------------------------------"
print "Updating:", crypto, timeframe
#print
cryptoDict[timeframe] = updateAll(crypto, timeframe, cryptoDict[timeframe], api_url)
time.sleep(1)
currentPrice = requests.get(api_url + 'products/'+crypto+'/ticker')
if currentPrice.status_code >= 200 and currentPrice.status_code < 300:
print "Current Price:", currentPrice.json()["price"]
else:
print "Error getting current price"
'''
print "Time Frame:", timeframe, "lowestLow: ", cryptoDict[timeframe]["lowestLow"]
print "Time Frame:", timeframe, "lowMean: ", cryptoDict[timeframe]["lowMean"]
print "Time Frame:", timeframe, "lowStdDev: ", cryptoDict[timeframe]["lowStdDev"]
print "Time Frame:", timeframe, "highestHigh: ", cryptoDict[timeframe]["highestHigh"]
print "Time Frame:", timeframe, "highMean: ", cryptoDict[timeframe]["highMean"]
print "Time Frame:", timeframe, "highStdDev: ", cryptoDict[timeframe]["highStdDev"]
print "Time Frame:", timeframe, "openMean: ", cryptoDict[timeframe]["openMean"]
print "Time Frame:", timeframe, "openStdDev: ", cryptoDict[timeframe]["openStdDev"]
print "Time Frame:", timeframe, "closeMean: ", cryptoDict[timeframe]["closeMean"]
print "Time Frame:", timeframe, "closeStdDev: ", cryptoDict[timeframe]["closeStdDev"]
print ""
'''
#print "Now the current price in terms of Z-scores:"
lowZScore = (float(currentPrice.json()["price"]) - cryptoDict[timeframe]["lowMean"] ) / cryptoDict[timeframe]["lowStdDev"]
highZScore = (float(currentPrice.json()["price"]) - cryptoDict[timeframe]["highMean"] ) / cryptoDict[timeframe]["highStdDev"]
openZScore = (float(currentPrice.json()["price"]) - cryptoDict[timeframe]["openMean"] ) / cryptoDict[timeframe]["openStdDev"]
closeZScore= (float(currentPrice.json()["price"]) - cryptoDict[timeframe]["closeMean"] ) / cryptoDict[timeframe]["closeStdDev"]
cryptoZScores[crypto][timeframe] = float(lowZScore + highZScore + openZScore + closeZScore ) / 4.0
print "Average Z score:", cryptoZScores[crypto][timeframe]
print "-----------------------------------------------------------------------------"
'''
print "lowestZScore: ", str(lowZScore)
print "highestZScore:", str(highZScore)
print "openZScore: ", str(openZScore)
print "closeZScore: ", str(closeZScore)
if timeframe == "30d":
if lowZScore < -2.9: state = 'strong_buy'
elif lowZScore < -1.9: state = 'soft_buy'
elif abs(lowZScore) < 1: state = 'hold'
elif lowZScore > -1.9: state = 'soft_sell'
else: state = 'strong_sell'
'''
time.sleep(5)
currentTime = requests.get(api_url + '/time')
now = datetime.utcfromtimestamp(currentTime.json()['epoch'])
#For each crypto/coin I'm tracking, check the Z-scores for various timeframes to make a market decision
#based on the MarketDecision spreadsheet on the desktop - LogicMatrix 1
for crypto, cryptoDict in cryptoData.iteritems():
if gStopFlag: break
#print "crypto:", crypto
if (cryptoData[crypto]["30d"] <= -1) and (cryptoData[crypto]["7d"] <= -2) and (cryptoData[crypto]["1d"] > 1):
cryptoState[crypto] = "buy"
elif (cryptoData[crypto]["30d"] >= 1) and (cryptoData[crypto]["7d"] >= 2) and (cryptoData[crypto]["1d"] < 1):
cryptoState[crypto] = "sell"
else: cryptoState[crypto] = "hold"
print "Logic Matrix #1: Status for ", crypto, " is ", cryptoState[crypto]
#if cryptoState[crypto] != "hold":
with open("transaction1.csv", "a") as file:
file.write(str(now) + "," + crypto + "," + \
currentPrice.json()["price"] + \
str(cryptoZScores[crypto]["30d"]) + "," + \
str(cryptoZScores[crypto]["7d"]) + "," + \
str(cryptoZScores[crypto]["1d"]) + "," + \
cryptoState[crypto] + "\n")
#For each crypto/coin I'm tracking, check the Z-scores for various timeframes to make a market decision
#based on the MarketDecision spreadsheet on the desktop - Logic Matrix 2
for crypto, cryptoDict in cryptoData.iteritems():
if gStopFlag: break
#print "crypto:", crypto
if (cryptoData[crypto]["7d"] <= -2) and (cryptoData[crypto]["1d"] > 1):
cryptoState[crypto] = "buy"
elif (cryptoData[crypto]["7d"] >= 2) and (cryptoData[crypto]["1d"] < 1):
cryptoState[crypto] = "sell"
else: cryptoState[crypto] = "hold"
print "Logic Matrix #2: Status for ", crypto, " is ", cryptoState[crypto]
#if cryptoState[crypto] != "hold":
with open("transaction2.csv", "a") as file:
file.write(str(now) + "," + crypto + "," + \
str(cryptoZScores[crypto]["30d"]) + "," + \
str(cryptoZScores[crypto]["7d"]) + "," + \
str(cryptoZScores[crypto]["1d"]) + "," + \
cryptoState[crypto] + "\n")
if gStopFlag:
#TODO Clean up here
print "in main(), exiting due to interrupt"
sys.exit(1)
sleeptime = 300
print "Sleeping for", sleeptime, " seconds."
time.sleep(sleeptime)
print "-----------------------------------------------------------------------------"
#Sleep for some period of time as determined by the current state
#print "Sleeping for", state_intervals[cryptoState[crypto]]," seconds."
#time.sleep(state_intervals[cryptoState[crypto]])
# Get accounts
#r = requests.get(api_url + '/accounts', auth=auth)
#print r.json()
# [{"id": "a1b2c3d4", "balance":...
# Place an order - limit is the default type
order = {
'size': .001,
'price': .001,
'side': 'buy',
'product_id': 'BTC-USD',
}
#r = requests.post(api_url + '/orders', json=order, auth=auth)
#print r.json()
#r = requests.get(order_url, auth=auth)
#print r.json()["status"]
if gStopFlag:
#TODO Clean up here, e.g. write out trend data to a file
print "in main(), exiting due to interrupt"
sys.exit(1)
if __name__ == "__main__":
main(sys.argv)
|
import unittest
from ishell.console import Console
from ishell.command import Command
class TestConsole(unittest.TestCase):
def test_console_creation(self):
"""Console must be created."""
c = Console()
assert isinstance(c, Console)
def test_console_has_prompt(self):
"""Console should have a default prompt string."""
c = Console()
assert c.prompt == "Prompt"
assert c.prompt_delim == ">"
def test_console_has_empty_welcome_message(self):
"""Console should has an empty welcome message."""
c = Console()
assert c.welcome_message is None
def test_console_has_welcome_message(self):
"""Console should have a welcome message."""
c = Console(welcome_message='welcome message')
assert c.welcome_message == "welcome message"
class TestCommand(unittest.TestCase):
def test_command_creation(self):
"""Command must be created with name and default help message."""
cmd = Command('configure')
assert cmd.name == 'configure'
assert cmd.help == 'No help provided'
assert cmd.dynamic_args == False
def test_simple_completion(self):
"""Command must complete with only one option."""
cmd1 = Command('configure')
cmd2 = Command('terminal')
cmd1.addChild(cmd2)
candidates = cmd1.complete('', '', 0, run=False, full_line='configure ')
assert 'terminal ' == candidates
candidates = cmd1.complete('', '', 1, run=False, full_line='configure ')
assert None == candidates
def test_double_completion(self):
"""Command must complete with two options."""
cmd1 = Command('configure')
cmd2 = Command('terminal')
cmd3 = Command('interface')
cmd1.addChild(cmd2)
cmd1.addChild(cmd3)
# State 0 must print all commands followed by help message
# and return None as candidates
candidates = cmd1.complete('', '', 0, run=False, full_line='configure ')
assert None == candidates
candidates = cmd1.complete('', 'in', 0, run=False, full_line='configure in')
assert 'interface ' == candidates
candidates = cmd1.complete('', 't', 0, run=False, full_line='configure t')
assert 'terminal ' == candidates
def test_completion_with_buffer(self):
"""Command must complete correctly with buffer provided."""
cmd1 = Command('configure')
cmd2 = Command('terminal')
cmd1.addChild(cmd2)
candidates = cmd1.complete(['t'], 't', 0, run=False, full_line='configure ')
assert 'terminal ' == candidates
candidates = cmd1.complete(['t'], 't', 1, run=False, full_line='configure ')
assert None == candidates
def test_completion_with_dynamic_arg(self):
cmd1 = Command('show')
cmd2 = Command('call', dynamic_args=True)
cmd3 = Command('calls', dynamic_args=True)
cmd2.args = lambda: ['100', '101']
cmd3.args = lambda: ['continuous', 'raw']
cmd1.addChild(cmd2)
cmd1.addChild(cmd3)
candidates = cmd1.complete(['c'], '', 0, run=False, full_line='show calls')
self.assertEqual(None, candidates)
candidates = cmd1.complete(['c'], 'c', 0, run=False, full_line='show calls')
self.assertEqual('call ', candidates)
candidates = cmd1.complete(['c'], 'c', 1, run=False, full_line='show calls')
self.assertEqual('calls ', candidates)
candidates = cmd2.complete([''], '', 0, run=False, full_line='show calls')
self.assertEqual(None, candidates)
candidates = cmd2.complete([''], '1', 0, run=False, full_line='show calls')
self.assertEqual('100', candidates)
candidates = cmd2.complete([''], '1', 1, run=False, full_line='show calls')
self.assertEqual('101', candidates)
candidates = cmd3.complete([''], '', 0, run=False, full_line='show calls c')
self.assertEqual(None, candidates)
candidates = cmd3.complete([''], 'c', 0, run=False, full_line='show calls c')
self.assertEqual('continuous', candidates)
candidates = cmd3.complete([''], 'r', 0, run=False, full_line='show calls c')
self.assertEqual('raw', candidates)
candidates = cmd1.complete(['calls', 'c'], 'c', 0, run=False, full_line='show calls c')
self.assertEqual('continuous', candidates)
candidates = cmd2.complete(['1'], '1', 0, run=False, full_line='show calls c')
self.assertEqual('100', candidates)
candidates = cmd2.complete(['1'], '1', 1, run=False, full_line='show calls c')
self.assertEqual('101', candidates)
if __name__ == '__main__':
unittest.main()
|
#
# Module to support the pickling of different types of connection
# objects and file objects so that they can be transferred between
# different processes.
#
# processing/reduction.py
#
# Copyright (c) 2006-2008, <NAME> --- see COPYING.txt
#
__all__ = []
import os
import sys
import socket
import threading
import copy_reg
import processing
from processing import _processing
from processing.logger import debug, subDebug, subWarning
from processing.forking import thisThreadIsSpawning
from processing.process import _registerAfterFork
#
#
#
connections_are_picklable = (
sys.platform == 'win32' or hasattr(_processing, 'recvFd')
)
try:
fromfd = socket.fromfd
except AttributeError:
def fromfd(fd, family, type, proto=0):
s = socket._socket.socket()
_processing.changeFd(s, fd, family, type, proto)
return s
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from processing._processing import win32
closeHandle = win32.CloseHandle
def duplicateHandle(handle):
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle,
_subprocess.GetCurrentProcess(),
0, False, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
def sendHandle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle,
process_handle, 0, False, _subprocess.DUPLICATE_SAME_ACCESS
)
conn.send(new_handle.Detach())
finally:
win32.CloseHandle(process_handle)
def recvHandle(conn):
return conn.recv()
def isInheritableHandle(handle):
return (win32.GetHandleInformation(handle) & win32.HANDLE_FLAG_INHERIT)
else:
closeHandle = os.close
duplicateHandle = os.dup
def sendHandle(conn, handle, destination_pid):
_processing.sendFd(conn.fileno(), handle)
def recvHandle(conn):
return _processing.recvFd(conn.fileno())
def isInheritableHandle(handle):
return True
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
closeHandle(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
_registerAfterFork(_reset, _reset)
def _getListener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
from processing.connection import Listener
debug('starting listener and thread for sending handles')
_listener = Listener(authenticate=True)
t = threading.Thread(target=_serve)
t.setDaemon(True)
t.start()
finally:
_lock.release()
return _listener
def _serve():
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
sendHandle(conn, handle_wanted, destination_pid)
closeHandle(handle_wanted)
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
if not processing.currentProcess()._exiting:
import traceback
subWarning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduceHandle(handle):
if thisThreadIsSpawning() and isInheritableHandle(handle):
return (None, handle, True)
dup_handle = duplicateHandle(handle)
_cache.add(dup_handle)
subDebug('reducing handle %d', handle)
return (_getListener().address, dup_handle, False)
def rebuildHandle(pickled_data):
from processing.connection import Client
address, handle, inherited = pickled_data
if inherited:
return handle
subDebug('rebuilding handle %d', handle)
conn = Client(address, authenticate=True)
conn.send((handle, os.getpid()))
new_handle = recvHandle(conn)
conn.close()
return new_handle
#
# Register `_processing.Connection` with `copy_reg`
#
def reduceConnection(conn):
return rebuildConnection, (reduceHandle(conn.fileno()),)
def rebuildConnection(reduced_handle):
fd = rebuildHandle(reduced_handle)
return _processing.Connection(fd, duplicate=False)
copy_reg.pickle(_processing.Connection, reduceConnection)
#
# Register `socket.socket` with `copy_reg`
#
def reduceSocket(s):
try:
Family, Type, Proto = s.family, s.type, s.proto
except AttributeError:
# have to guess family, type, proto
address = s.getsockname()
Family = type(address) is str and socket.AF_UNIX or socket.AF_INET
Type = s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)
Proto = 0
reduced_handle = reduceHandle(s.fileno())
return rebuildSocket, (reduced_handle, Family, Type, Proto)
def rebuildSocket(reduced_handle, family, type, proto):
fd = rebuildHandle(reduced_handle)
_sock = fromfd(fd, family, type, proto)
closeHandle(fd)
return socket.socket(_sock=_sock)
copy_reg.pickle(socket.socket, reduceSocket)
#
# Register `_processing.PipeConnection` with `copy_reg`
#
if sys.platform == 'win32':
def reducePipeConnection(conn):
return rebuildPipeConnection, (reduceHandle(conn.fileno()),)
def rebuildPipeConnection(reduced_handle):
handle = rebuildHandle(reduced_handle)
return _processing.PipeConnection(handle, duplicate=False)
copy_reg.pickle(_processing.PipeConnection, reducePipeConnection)
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import numpy as np
from .extern.validator import (
validate_scalar,
validate_array,
validate_physical_type,
)
from .utils import trapz_loglog
from .model_utils import memoize
from astropy.extern import six
from collections import OrderedDict
import os
from astropy.utils.data import get_pkg_data_filename
import warnings
import logging
# Constants and units
from astropy import units as u
# import constant values from astropy.constants
from astropy.constants import c, m_e, hbar, sigma_sb, e, m_p, alpha
__all__ = [
"Synchrotron",
"InverseCompton",
"PionDecay",
"Bremsstrahlung",
"PionDecayKelner06",
]
# Get a new logger to avoid changing the level of the astropy logger
log = logging.getLogger("naima.radiative")
log.setLevel(logging.INFO)
e = e.gauss
mec2 = (m_e * c ** 2).cgs
mec2_unit = u.Unit(mec2)
ar = (4 * sigma_sb / c).to("erg/(cm3 K4)")
r0 = (e ** 2 / mec2).to("cm")
def _validate_ene(ene):
from astropy.table import Table
if isinstance(ene, dict) or isinstance(ene, Table):
try:
ene = validate_array(
"energy", u.Quantity(ene["energy"]), physical_type="energy"
)
except KeyError:
raise TypeError("Table or dict does not have 'energy' column")
else:
if not isinstance(ene, u.Quantity):
ene = u.Quantity(ene)
validate_physical_type("energy", ene, physical_type="energy")
return ene
class BaseRadiative(object):
"""Base class for radiative models
This class implements the flux, sed methods and subclasses must implement
the spectrum method which returns the intrinsic differential spectrum.
"""
def __init__(self, particle_distribution):
self.particle_distribution = particle_distribution
try:
# Check first for the amplitude attribute, which will be present if
# the particle distribution is a function from naima.models
pd = self.particle_distribution.amplitude
validate_physical_type(
"Particle distribution",
pd,
physical_type="differential energy",
)
except (AttributeError, TypeError):
# otherwise check the output
pd = self.particle_distribution([0.1, 1, 10] * u.TeV)
validate_physical_type(
"Particle distribution",
pd,
physical_type="differential energy",
)
@memoize
def flux(self, photon_energy, distance=1 * u.kpc):
"""Differential flux at a given distance from the source.
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic differential
luminosity will be returned. Default is 1 kpc.
"""
spec = self._spectrum(photon_energy)
if distance != 0:
distance = validate_scalar(
"distance", distance, physical_type="length"
)
spec /= 4 * np.pi * distance.to("cm") ** 2
out_unit = "1/(s cm2 eV)"
else:
out_unit = "1/(s eV)"
return spec.to(out_unit)
def sed(self, photon_energy, distance=1 * u.kpc):
"""Spectral energy distribution at a given distance from the source.
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
"""
if distance != 0:
out_unit = "erg/(cm2 s)"
else:
out_unit = "erg/s"
photon_energy = _validate_ene(photon_energy)
sed = (self.flux(photon_energy, distance) * photon_energy ** 2.0).to(
out_unit
)
return sed
class BaseElectron(BaseRadiative):
"""Implements gam and nelec properties in addition to the BaseRadiative methods
"""
def __init__(self, particle_distribution):
super(BaseElectron, self).__init__(particle_distribution)
self.param_names = ["Eemin", "Eemax", "nEed"]
self._memoize = True
self._cache = {}
self._queue = []
@property
def _gam(self):
""" Lorentz factor array
"""
log10gmin = np.log10(self.Eemin / mec2).value
log10gmax = np.log10(self.Eemax / mec2).value
return np.logspace(
log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin))
)
@property
def _nelec(self):
""" Particles per unit lorentz factor
"""
pd = self.particle_distribution(self._gam * mec2)
return pd.to(1 / mec2_unit).value
@property
def We(self):
""" Total energy in electrons used for the radiative calculation
"""
We = trapz_loglog(self._gam * self._nelec, self._gam * mec2)
return We
def compute_We(self, Eemin=None, Eemax=None):
""" Total energy in electrons between energies Eemin and Eemax
Parameters
----------
Eemin : :class:`~astropy.units.Quantity` float, optional
Minimum electron energy for energy content calculation.
Eemax : :class:`~astropy.units.Quantity` float, optional
Maximum electron energy for energy content calculation.
"""
if Eemin is None and Eemax is None:
We = self.We
else:
if Eemax is None:
Eemax = self.Eemax
if Eemin is None:
Eemin = self.Eemin
log10gmin = np.log10(Eemin / mec2).value
log10gmax = np.log10(Eemax / mec2).value
gam = np.logspace(
log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin))
)
nelec = (
self.particle_distribution(gam * mec2).to(1 / mec2_unit).value
)
We = trapz_loglog(gam * nelec, gam * mec2)
return We
def set_We(self, We, Eemin=None, Eemax=None, amplitude_name=None):
""" Normalize particle distribution so that the total energy in electrons
between Eemin and Eemax is We
Parameters
----------
We : :class:`~astropy.units.Quantity` float
Desired energy in electrons.
Eemin : :class:`~astropy.units.Quantity` float, optional
Minimum electron energy for energy content calculation.
Eemax : :class:`~astropy.units.Quantity` float, optional
Maximum electron energy for energy content calculation.
amplitude_name : str, optional
Name of the amplitude parameter of the particle distribution. It
must be accesible as an attribute of the distribution function.
Defaults to ``amplitude``.
"""
We = validate_scalar("We", We, physical_type="energy")
oldWe = self.compute_We(Eemin=Eemin, Eemax=Eemax)
if amplitude_name is None:
try:
self.particle_distribution.amplitude *= (
We / oldWe
).decompose()
except AttributeError:
log.error(
"The particle distribution does not have an attribute"
" called amplitude to modify its normalization: you can"
" set the name with the amplitude_name parameter of set_We"
)
else:
oldampl = getattr(self.particle_distribution, amplitude_name)
setattr(
self.particle_distribution,
amplitude_name,
oldampl * (We / oldWe).decompose(),
)
class Synchrotron(BaseElectron):
"""Synchrotron emission from an electron population.
This class uses the approximation of the synchrotron emissivity in a
random magnetic field of Aharonian, Kelner, and Prosekin 2010, PhysRev D
82, 3002 (`arXiv:1006.1045 <http://arxiv.org/abs/1006.1045>`_).
Parameters
----------
particle_distribution : function
Particle distribution function, taking electron energies as a
`~astropy.units.Quantity` array or float, and returning the particle
energy density in units of number of electrons per unit energy as a
`~astropy.units.Quantity` array or float.
B : :class:`~astropy.units.Quantity` float instance, optional
Isotropic magnetic field strength. Default: equipartition
with CMB (3.24e-6 G)
Other parameters
----------------
Eemin : :class:`~astropy.units.Quantity` float instance, optional
Minimum electron energy for the electron distribution. Default is 1
GeV.
Eemax : :class:`~astropy.units.Quantity` float instance, optional
Maximum electron energy for the electron distribution. Default is 510
TeV.
nEed : scalar
Number of points per decade in energy for the electron energy and
distribution arrays. Default is 100.
"""
def __init__(self, particle_distribution, B=3.24e-6 * u.G, **kwargs):
super(Synchrotron, self).__init__(particle_distribution)
self.B = validate_scalar("B", B, physical_type="magnetic flux density")
self.Eemin = 1 * u.GeV
self.Eemax = 1e9 * mec2
self.nEed = 100
self.param_names += ["B"]
self.__dict__.update(**kwargs)
def _spectrum(self, photon_energy):
"""Compute intrinsic synchrotron differential spectrum for energies in
``photon_energy``
Compute synchrotron for random magnetic field according to
approximation of Aharonian, Kelner, and Prosekin 2010, PhysRev D 82,
3002 (`arXiv:1006.1045 <http://arxiv.org/abs/1006.1045>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
outspecene = _validate_ene(photon_energy)
from scipy.special import cbrt
def Gtilde(x):
"""
AKP10 Eq. D7
Factor ~2 performance gain in using cbrt(x)**n vs x**(n/3.)
Invoking crbt only once reduced time by ~40%
"""
cb = cbrt(x)
gt1 = 1.808 * cb / np.sqrt(1 + 3.4 * cb ** 2.0)
gt2 = 1 + 2.210 * cb ** 2.0 + 0.347 * cb ** 4.0
gt3 = 1 + 1.353 * cb ** 2.0 + 0.217 * cb ** 4.0
return gt1 * (gt2 / gt3) * np.exp(-x)
log.debug("calc_sy: Starting synchrotron computation with AKB2010...")
# strip units, ensuring correct conversion
# astropy units do not convert correctly for gyroradius calculation
# when using cgs (SI is fine, see
# https://github.com/astropy/astropy/issues/1687)
CS1_0 = np.sqrt(3) * e.value ** 3 * self.B.to("G").value
CS1_1 = (
2
* np.pi
* m_e.cgs.value
* c.cgs.value ** 2
* hbar.cgs.value
* outspecene.to("erg").value
)
CS1 = CS1_0 / CS1_1
# Critical energy, erg
Ec = (
3
* e.value
* hbar.cgs.value
* self.B.to("G").value
* self._gam ** 2
)
Ec /= 2 * (m_e * c).cgs.value
EgEc = outspecene.to("erg").value / np.vstack(Ec)
dNdE = CS1 * Gtilde(EgEc)
# return units
spec = (
trapz_loglog(np.vstack(self._nelec) * dNdE, self._gam, axis=0)
/ u.s
/ u.erg
)
spec = spec.to("1/(s eV)")
return spec
def G12(x, a):
"""
Eqs 20, 24, 25 of Khangulyan et al (2014)
"""
alpha, a, beta, b = a
pi26 = np.pi ** 2 / 6.0
G = (pi26 + x) * np.exp(-x)
tmp = 1 + b * x ** beta
g = 1.0 / (a * x ** alpha / tmp + 1.0)
return G * g
def G34(x, a):
"""
Eqs 20, 24, 25 of Khangulyan et al (2014)
"""
alpha, a, beta, b, c = a
pi26 = np.pi ** 2 / 6.0
tmp = (1 + c * x) / (1 + pi26 * c * x)
G = pi26 * tmp * np.exp(-x)
tmp = 1 + b * x ** beta
g = 1.0 / (a * x ** alpha / tmp + 1.0)
return G * g
class InverseCompton(BaseElectron):
"""Inverse Compton emission from an electron population.
If you use this class in your research, please consult and cite
`<NAME>., <NAME>., & <NAME>. 2014, Astrophysical
Journal, 783, 100 <http://adsabs.harvard.edu/abs/2014ApJ...783..100K>`_
Parameters
----------
particle_distribution : function
Particle distribution function, taking electron energies as a
`~astropy.units.Quantity` array or float, and returning the particle
energy density in units of number of electrons per unit energy as a
`~astropy.units.Quantity` array or float.
seed_photon_fields : string or iterable of strings (optional)
A list of gray-body or non-thermal seed photon fields to use for IC
calculation. Each of the items of the iterable can be either:
* A string equal to ``CMB`` (default), ``NIR``, or ``FIR``, for which
radiation fields with temperatures of 2.72 K, 30 K, and 3000 K, and
energy densities of 0.261, 0.5, and 1 eV/cm³ will be used (these are
the GALPROP values for a location at a distance of 6.5 kpc from the
galactic center).
* A list of length three (isotropic source) or four (anisotropic
source) composed of:
1. A name for the seed photon field.
2. Its temperature (thermal source) or energy (monochromatic or
non-thermal source) as a :class:`~astropy.units.Quantity`
instance.
3. Its photon field energy density as a
:class:`~astropy.units.Quantity` instance.
4. Optional: The angle between the seed photon direction and the
scattered photon direction as a :class:`~astropy.units.Quantity`
float instance.
Other parameters
----------------
Eemin : :class:`~astropy.units.Quantity` float instance, optional
Minimum electron energy for the electron distribution. Default is 1
GeV.
Eemax : :class:`~astropy.units.Quantity` float instance, optional
Maximum electron energy for the electron distribution. Default is 510
TeV.
nEed : scalar
Number of points per decade in energy for the electron energy and
distribution arrays. Default is 300.
"""
def __init__(
self, particle_distribution, seed_photon_fields=["CMB"], **kwargs
):
super(InverseCompton, self).__init__(particle_distribution)
self.seed_photon_fields = self._process_input_seed(seed_photon_fields)
self.Eemin = 1 * u.GeV
self.Eemax = 1e9 * mec2
self.nEed = 100
self.param_names += ["seed_photon_fields"]
self.__dict__.update(**kwargs)
@staticmethod
def _process_input_seed(seed_photon_fields):
"""
take input list of seed_photon_fields and fix them into usable format
"""
Tcmb = 2.72548 * u.K # 0.00057 K
Tfir = 30 * u.K
ufir = 0.5 * u.eV / u.cm ** 3
Tnir = 3000 * u.K
unir = 1.0 * u.eV / u.cm ** 3
# Allow for seed_photon_fields definitions of the type 'CMB-NIR-FIR' or
# 'CMB'
if type(seed_photon_fields) != list:
seed_photon_fields = seed_photon_fields.split("-")
result = OrderedDict()
for idx, inseed in enumerate(seed_photon_fields):
seed = {}
if isinstance(inseed, six.string_types):
name = inseed
seed["type"] = "thermal"
if inseed == "CMB":
seed["T"] = Tcmb
seed["u"] = ar * Tcmb ** 4
seed["isotropic"] = True
elif inseed == "FIR":
seed["T"] = Tfir
seed["u"] = ufir
seed["isotropic"] = True
elif inseed == "NIR":
seed["T"] = Tnir
seed["u"] = unir
seed["isotropic"] = True
else:
log.warning(
"Will not use seed {0} because it is not "
"CMB, FIR or NIR".format(inseed)
)
raise TypeError
elif type(inseed) == list and (
len(inseed) == 3 or len(inseed) == 4
):
isotropic = len(inseed) == 3
if isotropic:
name, T, uu = inseed
seed["isotropic"] = True
else:
name, T, uu, theta = inseed
seed["isotropic"] = False
seed["theta"] = validate_scalar(
"{0}-theta".format(name), theta, physical_type="angle"
)
thermal = T.unit.physical_type == "temperature"
if thermal:
seed["type"] = "thermal"
validate_scalar(
"{0}-T".format(name),
T,
domain="positive",
physical_type="temperature",
)
seed["T"] = T
if uu == 0:
seed["u"] = ar * T ** 4
else:
# pressure has same physical type as energy density
validate_scalar(
"{0}-u".format(name),
uu,
domain="positive",
physical_type="pressure",
)
seed["u"] = uu
else:
seed["type"] = "array"
# Ensure everything is in arrays
T = u.Quantity((T,)).flatten()
uu = u.Quantity((uu,)).flatten()
seed["energy"] = validate_array(
"{0}-energy".format(name),
T,
domain="positive",
physical_type="energy",
)
if np.isscalar(seed["energy"]) or seed["energy"].size == 1:
seed["photon_density"] = validate_scalar(
"{0}-density".format(name),
uu,
domain="positive",
physical_type="pressure",
)
else:
if uu.unit.physical_type == "pressure":
uu /= seed["energy"] ** 2
seed["photon_density"] = validate_array(
"{0}-density".format(name),
uu,
domain="positive",
physical_type="differential number density",
)
else:
raise TypeError(
"Unable to process seed photon"
" field: {0}".format(inseed)
)
result[name] = seed
return result
@staticmethod
def _iso_ic_on_planck(
electron_energy, soft_photon_temperature, gamma_energy
):
"""
IC cross-section for isotropic interaction with a blackbody photon
spectrum following Eq. 14 of Khangulyan, Aharonian, and Kelner 2014,
ApJ 783, 100 (`arXiv:1310.7971 <http://www.arxiv.org/abs/1310.7971>`_).
`electron_energy` and `gamma_energy` are in units of m_ec^2
`soft_photon_temperature` is in units of K
"""
Ktomec2 = 1.6863699549e-10
soft_photon_temperature *= Ktomec2
gamma_energy = np.vstack(gamma_energy)
# Parameters from Eqs 26, 27
a3 = [0.606, 0.443, 1.481, 0.540, 0.319]
a4 = [0.461, 0.726, 1.457, 0.382, 6.620]
z = gamma_energy / electron_energy
x = z / (1 - z) / (4.0 * electron_energy * soft_photon_temperature)
# Eq. 14
cross_section = z ** 2 / (2 * (1 - z)) * G34(x, a3) + G34(x, a4)
tmp = (soft_photon_temperature / electron_energy) ** 2
# r0 = (e**2 / m_e / c**2).to('cm')
# (2 * r0 ** 2 * m_e ** 3 * c ** 4 / (pi * hbar ** 3)).cgs
tmp *= 2.6318735743809104e16
cross_section = tmp * cross_section
cc = (gamma_energy < electron_energy) * (electron_energy > 1)
return np.where(cc, cross_section, np.zeros_like(cross_section))
@staticmethod
def _ani_ic_on_planck(
electron_energy, soft_photon_temperature, gamma_energy, theta
):
"""
IC cross-section for anisotropic interaction with a blackbody photon
spectrum following Eq. 11 of Khangulyan, Aharonian, and Kelner 2014,
ApJ 783, 100 (`arXiv:1310.7971 <http://www.arxiv.org/abs/1310.7971>`_).
`electron_energy` and `gamma_energy` are in units of m_ec^2
`soft_photon_temperature` is in units of K
`theta` is in radians
"""
Ktomec2 = 1.6863699549e-10
soft_photon_temperature *= Ktomec2
gamma_energy = gamma_energy[:, None]
# Parameters from Eqs 21, 22
a1 = [0.857, 0.153, 1.840, 0.254]
a2 = [0.691, 1.330, 1.668, 0.534]
z = gamma_energy / electron_energy
ttheta = (
2.0
* electron_energy
* soft_photon_temperature
* (1.0 - np.cos(theta))
)
x = z / (1 - z) / ttheta
# Eq. 11
cross_section = z ** 2 / (2 * (1 - z)) * G12(x, a1) + G12(x, a2)
tmp = (soft_photon_temperature / electron_energy) ** 2
# r0 = (e**2 / m_e / c**2).to('cm')
# (2 * r0 ** 2 * m_e ** 3 * c ** 4 / (pi * hbar ** 3)).cgs
tmp *= 2.6318735743809104e16
cross_section = tmp * cross_section
cc = (gamma_energy < electron_energy) * (electron_energy > 1)
return np.where(cc, cross_section, np.zeros_like(cross_section))
@staticmethod
def _iso_ic_on_monochromatic(
electron_energy, seed_energy, seed_edensity, gamma_energy
):
"""
IC cross-section for an isotropic interaction with a monochromatic
photon spectrum following Eq. 22 of Aharonian & Atoyan 1981, Ap&SS 79,
321 (`http://adsabs.harvard.edu/abs/1981Ap%26SS..79..321A`_)
"""
photE0 = (seed_energy / mec2).decompose().value
phn = seed_edensity
# electron_energy = electron_energy[:, None]
gamma_energy = gamma_energy[:, None]
photE0 = photE0[:, None, None]
phn = phn[:, None, None]
b = 4 * photE0 * electron_energy
w = gamma_energy / electron_energy
q = w / (b * (1 - w))
fic = (
2 * q * np.log(q)
+ (1 + 2 * q) * (1 - q)
+ (1.0 / 2.0) * (b * q) ** 2 * (1 - q) / (1 + b * q)
)
gamint = (
fic
* heaviside(1 - q)
* heaviside(q - 1.0 / (4 * electron_energy ** 2))
)
gamint[np.isnan(gamint)] = 0.0
if phn.size > 1:
phn = phn.to(1 / (mec2_unit * u.cm ** 3)).value
gamint = trapz_loglog(gamint * phn / photE0, photE0, axis=0) # 1/s
else:
phn = phn.to(mec2_unit / u.cm ** 3).value
gamint *= phn / photE0 ** 2
gamint = gamint.squeeze()
# gamint /= mec2.to('erg').value
# r0 = (e**2 / m_e / c**2).to('cm')
# sigt = ((8 * np.pi) / 3 * r0**2).cgs
sigt = 6.652458734983284e-25
c = 29979245800.0
gamint *= (3.0 / 4.0) * sigt * c / electron_energy ** 2
return gamint
def _calc_specic(self, seed, outspecene):
log.debug(
"_calc_specic: Computing IC on {0} seed photons...".format(seed)
)
Eph = (outspecene / mec2).decompose().value
# Catch numpy RuntimeWarnings of overflowing exp (which are then
# discarded anyway)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if self.seed_photon_fields[seed]["type"] == "thermal":
T = self.seed_photon_fields[seed]["T"]
uf = (
self.seed_photon_fields[seed]["u"] / (ar * T ** 4)
).decompose()
if self.seed_photon_fields[seed]["isotropic"]:
gamint = self._iso_ic_on_planck(
self._gam, T.to("K").value, Eph
)
else:
theta = (
self.seed_photon_fields[seed]["theta"].to("rad").value
)
gamint = self._ani_ic_on_planck(
self._gam, T.to("K").value, Eph, theta
)
else:
uf = 1
gamint = self._iso_ic_on_monochromatic(
self._gam,
self.seed_photon_fields[seed]["energy"],
self.seed_photon_fields[seed]["photon_density"],
Eph,
)
lum = uf * Eph * trapz_loglog(self._nelec * gamint, self._gam)
lum = lum * u.Unit("1/s")
return lum / outspecene # return differential spectrum in 1/s/eV
def _spectrum(self, photon_energy):
"""Compute differential IC spectrum for energies in ``photon_energy``.
Compute IC spectrum using IC cross-section for isotropic interaction
with a blackbody photon spectrum following Khangulyan, Aharonian, and
Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971
<http://www.arxiv.org/abs/1310.7971>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
outspecene = _validate_ene(photon_energy)
self.specic = []
for seed in self.seed_photon_fields:
# Call actual computation, detached to allow changes in subclasses
self.specic.append(
self._calc_specic(seed, outspecene).to("1/(s eV)")
)
return np.sum(u.Quantity(self.specic), axis=0)
def flux(self, photon_energy, distance=1 * u.kpc, seed=None):
"""Differential flux at a given distance from the source from a single
seed photon field
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
"""
model = super(InverseCompton, self).flux(
photon_energy, distance=distance
)
if seed is not None:
# Test seed argument
if not isinstance(seed, int):
if seed not in self.seed_photon_fields:
raise ValueError(
"Provided seed photon field name is not in"
" the definition of the InverseCompton instance"
)
else:
seed = list(self.seed_photon_fields.keys()).index(seed)
elif seed > len(self.seed_photon_fields):
raise ValueError(
"Provided seed photon field number is larger"
" than the number of seed photon fields defined in the"
" InverseCompton instance"
)
if distance != 0:
distance = validate_scalar(
"distance", distance, physical_type="length"
)
dfac = 4 * np.pi * distance.to("cm") ** 2
out_unit = "1/(s cm2 eV)"
else:
dfac = 1
out_unit = "1/(s eV)"
model = (self.specic[seed] / dfac).to(out_unit)
return model
def sed(self, photon_energy, distance=1 * u.kpc, seed=None):
"""Spectral energy distribution at a given distance from the source
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
"""
sed = super(InverseCompton, self).sed(photon_energy, distance=distance)
if seed is not None:
if distance != 0:
out_unit = "erg/(cm2 s)"
else:
out_unit = "erg/s"
sed = (
self.flux(photon_energy, distance=distance, seed=seed)
* photon_energy ** 2.0
).to(out_unit)
return sed
class Bremsstrahlung(BaseElectron):
"""
Bremsstrahlung radiation on a completely ionised gas.
This class uses the cross-section approximation of `Baring, M.G., <NAME>., <NAME>., <NAME>., & <NAME>. 1999, Astrophysical
Journal, 513, 311 <http://adsabs.harvard.edu/abs/1999ApJ...513..311B>`_.
The default weights are assuming a completely ionised target gas with ISM
abundances. If pure electron-electron bremsstrahlung is desired, ``n0`` can
be set to the electron density, ``weight_ep`` to 0 and ``weight_ee`` to 1.
Parameters
----------
n0 : :class:`~astropy.units.Quantity` float
Total ion number density.
Other parameters
----------------
weight_ee : float
Weight of electron-electron bremsstrahlung. Defined as :math:`\sum_i
Z_i X_i`, default is 1.088.
weight_ep : float
Weight of electron-proton bremsstrahlung. Defined as :math:`\sum_i
Z_i^2 X_i`, default is 1.263.
"""
def __init__(self, particle_distribution, n0=1 / u.cm ** 3, **kwargs):
super(Bremsstrahlung, self).__init__(particle_distribution)
self.n0 = n0
self.Eemin = 100 * u.MeV
self.Eemax = 1e9 * mec2
self.nEed = 300
# compute ee and ep weights from H and He abundances in ISM assumin
# ionized medium
Y = np.array([1.0, 9.59e-2])
Z = np.array([1, 2])
N = np.sum(Y)
X = Y / N
self.weight_ee = np.sum(Z * X)
self.weight_ep = np.sum(Z ** 2 * X)
self.param_names += ["n0", "weight_ee", "weight_ep"]
self.__dict__.update(**kwargs)
@staticmethod
def _sigma_1(gam, eps):
"""
gam and eps in units of m_e c^2
Eq. A2 of Baring et al. (1999)
Return in units of cm2 / mec2
"""
s1 = 4 * r0 ** 2 * alpha / eps / mec2_unit
s2 = 1 + (1.0 / 3.0 - eps / gam) * (1 - eps / gam)
s3 = np.log(2 * gam * (gam - eps) / eps) - 1.0 / 2.0
s3[np.where(gam < eps)] = 0.0
return s1 * s2 * s3
@staticmethod
def _sigma_2(gam, eps):
"""
gam and eps in units of m_e c^2
Eq. A3 of Baring et al. (1999)
Return in units of cm2 / mec2
"""
s0 = r0 ** 2 * alpha / (3 * eps) / mec2_unit
s1_1 = 16 * (1 - eps + eps ** 2) * np.log(gam / eps)
s1_2 = -1 / eps ** 2 + 3 / eps - 4 - 4 * eps - 8 * eps ** 2
s1_3 = -2 * (1 - 2 * eps) * np.log(1 - 2 * eps)
s1_4 = 1 / (4 * eps ** 3) - 1 / (2 * eps ** 2) + 3 / eps - 2 + 4 * eps
s1 = s1_1 + s1_2 + s1_3 * s1_4
s2_1 = 2 / eps
s2_2 = (4 - 1 / eps + 1 / (4 * eps ** 2)) * np.log(2 * gam)
s2_3 = -2 + 2 / eps - 5 / (8 * eps ** 2)
s2 = s2_1 * (s2_2 + s2_3)
return s0 * np.where(eps <= 0.5, s1, s2) * heaviside(gam - eps)
def _sigma_ee_rel(self, gam, eps):
"""
Eq. A1, A4 of Baring et al. (1999)
Use for Ee > 2 MeV
"""
A = 1 - 8 / 3 * (gam - 1) ** 0.2 / (gam + 1) * (eps / gam) ** (
1.0 / 3.0
)
return (self._sigma_1(gam, eps) + self._sigma_2(gam, eps)) * A
@staticmethod
def _F(x, gam):
"""
Eqs. A6, A7 of Baring et al. (1999)
"""
beta = np.sqrt(1 - gam ** -2)
B = 1 + 0.5 * (gam ** 2 - 1)
C = 10 * x * gam * beta * (2 + gam * beta)
C /= 1 + x ** 2 * (gam ** 2 - 1)
F_1 = (17 - 3 * x ** 2 / (2 - x) ** 2 - C) * np.sqrt(1 - x)
F_2 = 12 * (2 - x) - 7 * x ** 2 / (2 - x) - 3 * x ** 4 / (2 - x) ** 3
F_3 = np.log((1 + np.sqrt(1 - x)) / np.sqrt(x))
return B * F_1 + F_2 * F_3
def _sigma_ee_nonrel(self, gam, eps):
"""
Eq. A5 of Baring et al. (1999)
Use for Ee < 2 MeV
"""
s0 = 4 * r0 ** 2 * alpha / (15 * eps)
x = 4 * eps / (gam ** 2 - 1)
sigma_nonrel = s0 * self._F(x, gam)
sigma_nonrel[np.where(eps >= 0.25 * (gam ** 2 - 1.0))] = 0.0
sigma_nonrel[np.where(gam * np.ones_like(eps) < 1.0)] = 0.0
return sigma_nonrel / mec2_unit
def _sigma_ee(self, gam, Eph):
eps = (Eph / mec2).decompose().value
# initialize shape and units of cross section
sigma = np.zeros_like(gam * eps) * u.Unit(u.cm ** 2 / Eph.unit)
gam_trans = (2 * u.MeV / mec2).decompose().value
# Non relativistic below 2 MeV
if np.any(gam <= gam_trans):
nr_matrix = np.where(gam * np.ones_like(gam * eps) <= gam_trans)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sigma[nr_matrix] = self._sigma_ee_nonrel(gam, eps)[nr_matrix]
# Relativistic above 2 MeV
if np.any(gam > gam_trans):
rel_matrix = np.where(gam * np.ones_like(gam * eps) > gam_trans)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sigma[rel_matrix] = self._sigma_ee_rel(gam, eps)[rel_matrix]
return sigma.to(u.cm ** 2 / Eph.unit)
def _sigma_ep(self, gam, eps):
"""
Using sigma_1 only applies to the ultrarelativistic regime.
Eph > 10 MeV
ToDo: add complete e-p cross-section
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return self._sigma_1(gam, eps)
def _emiss_ee(self, Eph):
"""
Electron-electron bremsstrahlung emissivity per unit photon energy
"""
if self.weight_ee == 0.0:
return np.zeros_like(Eph)
gam = np.vstack(self._gam)
# compute integral with electron distribution
emiss = c.cgs * trapz_loglog(
np.vstack(self._nelec) * self._sigma_ee(gam, Eph),
self._gam,
axis=0,
)
return emiss
def _emiss_ep(self, Eph):
"""
Electron-proton bremsstrahlung emissivity per unit photon energy
"""
if self.weight_ep == 0.0:
return np.zeros_like(Eph)
gam = np.vstack(self._gam)
eps = (Eph / mec2).decompose().value
# compute integral with electron distribution
emiss = c.cgs * trapz_loglog(
np.vstack(self._nelec) * self._sigma_ep(gam, eps),
self._gam,
axis=0,
).to(u.cm ** 2 / Eph.unit)
return emiss
def _spectrum(self, photon_energy):
"""Compute differential bremsstrahlung spectrum for energies in
``photon_energy``.
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
Eph = _validate_ene(photon_energy)
spec = self.n0 * (
self.weight_ee * self._emiss_ee(Eph)
+ self.weight_ep * self._emiss_ep(Eph)
)
return spec
class BaseProton(BaseRadiative):
"""Implements compute_Wp at arbitrary energies
"""
def __init__(self, particle_distribution):
super(BaseProton, self).__init__(particle_distribution)
self.param_names = ["Epmin", "Epmax", "nEpd"]
self._memoize = True
self._cache = {}
self._queue = []
@property
def _Ep(self):
""" Proton energy array in GeV
"""
return np.logspace(
np.log10(self.Epmin.to("GeV").value),
np.log10(self.Epmax.to("GeV").value),
int(self.nEpd * (np.log10(self.Epmax / self.Epmin))),
)
@property
def _J(self):
""" Particles per unit proton energy in particles per GeV
"""
pd = self.particle_distribution(self._Ep * u.GeV)
return pd.to("1/GeV").value
@property
def Wp(self):
"""Total energy in protons
"""
Wp = trapz_loglog(self._Ep * self._J, self._Ep) * u.GeV
return Wp.to("erg")
def compute_Wp(self, Epmin=None, Epmax=None):
""" Total energy in protons between energies Epmin and Epmax
Parameters
----------
Epmin : :class:`~astropy.units.Quantity` float, optional
Minimum proton energy for energy content calculation.
Epmax : :class:`~astropy.units.Quantity` float, optional
Maximum proton energy for energy content calculation.
"""
if Epmin is None and Epmax is None:
Wp = self.Wp
else:
if Epmax is None:
Epmax = self.Epmax
if Epmin is None:
Epmin = self.Epmin
log10Epmin = np.log10(Epmin.to("GeV").value)
log10Epmax = np.log10(Epmax.to("GeV").value)
Ep = (
np.logspace(
log10Epmin,
log10Epmax,
int(self.nEpd * (log10Epmax - log10Epmin)),
)
* u.GeV
)
pdist = self.particle_distribution(Ep)
Wp = trapz_loglog(Ep * pdist, Ep).to("erg")
return Wp
def set_Wp(self, Wp, Epmin=None, Epmax=None, amplitude_name=None):
""" Normalize particle distribution so that the total energy in protons
between Epmin and Epmax is Wp
Parameters
----------
Wp : :class:`~astropy.units.Quantity` float
Desired energy in protons.
Epmin : :class:`~astropy.units.Quantity` float, optional
Minimum proton energy for energy content calculation.
Epmax : :class:`~astropy.units.Quantity` float, optional
Maximum proton energy for energy content calculation.
amplitude_name : str, optional
Name of the amplitude parameter of the particle distribution. It
must be accesible as an attribute of the distribution function.
Defaults to ``amplitude``.
"""
Wp = validate_scalar("Wp", Wp, physical_type="energy")
oldWp = self.compute_Wp(Epmin=Epmin, Epmax=Epmax)
if amplitude_name is None:
try:
self.particle_distribution.amplitude *= (
Wp / oldWp
).decompose()
except AttributeError:
log.error(
"The particle distribution does not have an attribute"
" called amplitude to modify its normalization: you can"
" set the name with the amplitude_name parameter of set_Wp"
)
else:
oldampl = getattr(self.particle_distribution, amplitude_name)
setattr(
self.particle_distribution,
amplitude_name,
oldampl * (Wp / oldWp).decompose(),
)
class PionDecay(BaseProton):
r"""Pion decay gamma-ray emission from a proton population.
Compute gamma-ray spectrum arising from the interaction of a relativistic
proton distribution with stationary target protons using the
parametrization of Kafexhiu et al. (2014).
If you use this class in your research, please consult and cite `Kafexhiu,
E., <NAME>., <NAME>., & <NAME>. 2014, Physical Review D, 90,
123014 <http://adsabs.harvard.edu/abs/2014PhRvD..90l3014K>`_.
Parameters
----------
particle_distribution : function
Particle distribution function, taking proton energies as a
`~astropy.units.Quantity` array or float, and returning the particle
energy density in units of number of protons per unit energy as a
`~astropy.units.Quantity` array or float.
nh : `~astropy.units.Quantity`
Number density of the target protons. Default is :math:`1
\mathrm{cm}^{-3}`.
nuclear_enhancement : bool
Whether to apply the energy-dependent nuclear enhancement factor
considering a target gas with local ISM abundances. See Section IV of
Kafexhiu et al. (2014) for details. Here the proton-nucleus inelastic
cross section of Sihver et al. (1993, PhysRevC 47, 1225) is used.
Other parameters
----------------
Epmin : `~astropy.units.Quantity` float
Minimum proton energy for the proton distribution. Default is 1.22 GeV,
the dynamical threshold for pion production in pp interactions.
Epmax : `~astropy.units.Quantity` float
Minimum proton energy for the proton
distribution. Default is 10 PeV.
nEpd : scalar
Number of points per decade in energy for the proton energy and
distribution arrays. Default is 100.
hiEmodel : str
Monte Carlo model to use for computation of high-energy differential
cross section. Can be one of ``Geant4``, ``Pythia8``, ``SIBYLL``, or
``QGSJET``. See Kafexhiu et al. (2014) for details. Default is
``Pythia8``.
useLUT : bool
Whether to use a lookup table for the differential cross section. The
only lookup table packaged with naima is for the Pythia 8 model and
ISM nuclear enhancement factor.
"""
def __init__(
self,
particle_distribution,
nh=1.0 / u.cm ** 3,
nuclear_enhancement=True,
**kwargs
):
super(PionDecay, self).__init__(particle_distribution)
self.nh = validate_scalar("nh", nh, physical_type="number density")
self.nuclear_enhancement = nuclear_enhancement
self.useLUT = True
self.hiEmodel = "Pythia8"
self.Epmin = (
self._m_p + self._Tth + 1e-4
) * u.GeV # Threshold energy ~1.22 GeV
self.Epmax = 10 * u.PeV # 10 PeV
self.nEpd = 100
self.param_names += ["nh", "nuclear_enhancement", "useLUT", "hiEmodel"]
self.__dict__.update(**kwargs)
# define model parameters from tables
# yapf: disable
#
# Table IV
_a = {}
_a['Geant4'] = [0.728, 0.596, 0.491, 0.2503, 0.117] # Tp > 5
_a['Pythia8'] = [0.652, 0.0016, 0.488, 0.1928, 0.483] # Tp > 50
_a['SIBYLL'] = [5.436, 0.254, 0.072, 0.075, 0.166] # Tp > 100
_a['QGSJET'] = [0.908, 0.0009, 6.089, 0.176, 0.448] # Tp > 100
#
# table V data
# note that np.nan indicate that functions of Tp are needed and are defined
# as need in function F
# parameter order is lambda, alpha, beta, gamma
_F_mp = {}
_F_mp['ExpData'] = [1.0, 1.0, np.nan, 0.0] # Tth <= Tp <= 1.0
_F_mp['Geant4_0'] = [3.0, 1.0, np.nan, np.nan] # 1.0 < Tp <= 4.0
_F_mp['Geant4_1'] = [3.0, 1.0, np.nan, np.nan] # 4.0 < Tp <= 20.0
_F_mp['Geant4_2'] = [3.0, 0.5, 4.2, 1.0] # 20.0 < Tp <= 100
_F_mp['Geant4'] = [3.0, 0.5, 4.9, 1.0] # Tp > 100
_F_mp['Pythia8'] = [3.5, 0.5, 4.0, 1.0] # Tp > 50
_F_mp['SIBYLL'] = [3.55, 0.5, 3.6, 1.0] # Tp > 100
_F_mp['QGSJET'] = [3.55, 0.5, 4.5, 1.0] # Tp > 100
#
# Table VII
_b = {}
_b['Geant4_0'] = [9.53, 0.52, 0.054] # 1 <= Tp < 5
_b['Geant4'] = [9.13, 0.35, 9.7e-3] # Tp >= 5
_b['Pythia8'] = [9.06, 0.3795, 0.01105] # Tp > 50
_b['SIBYLL'] = [10.77, 0.412, 0.01264] # Tp > 100
_b['QGSJET'] = [13.16, 0.4419, 0.01439] # Tp > 100
# yapf: enable
# energy at which each of the hiE models start being valid
_Etrans = {"Pythia8": 50, "SIBYLL": 100, "QGSJET": 100, "Geant4": 100}
#
_m_p = (m_p * c ** 2).to("GeV").value
_m_pi = 0.1349766 # GeV/c2
_Tth = 0.27966184
def _sigma_inel(self, Tp):
"""
Inelastic cross-section for p-p interaction. KATV14 Eq. 1
Parameters
----------
Tp : float
Kinetic energy of proton (i.e. Ep - m_p*c**2) [GeV]
Returns
-------
sigma_inel : float
Inelastic cross-section for p-p interaction [1/cm2].
"""
L = np.log(Tp / self._Tth)
sigma = 30.7 - 0.96 * L + 0.18 * L ** 2
sigma *= (1 - (self._Tth / Tp) ** 1.9) ** 3
return sigma * 1e-27 # convert from mbarn to cm-2
def _sigma_pi_loE(self, Tp):
"""
inclusive cross section for Tth < Tp < 2 GeV
Fit from experimental data
"""
m_p = self._m_p
m_pi = self._m_pi
Mres = 1.1883 # GeV
Gres = 0.2264 # GeV
s = 2 * m_p * (Tp + 2 * m_p) # center of mass energy
gamma = np.sqrt(Mres ** 2 * (Mres ** 2 + Gres ** 2))
K = np.sqrt(8) * Mres * Gres * gamma
K /= np.pi * np.sqrt(Mres ** 2 + gamma)
fBW = m_p * K
fBW /= (
(np.sqrt(s) - m_p) ** 2 - Mres ** 2
) ** 2 + Mres ** 2 * Gres ** 2
mu = np.sqrt(
(s - m_pi ** 2 - 4 * m_p ** 2) ** 2 - 16 * m_pi ** 2 * m_p ** 2
)
mu /= 2 * m_pi * np.sqrt(s)
sigma0 = 7.66e-3 # mb
sigma1pi = sigma0 * mu ** 1.95 * (1 + mu + mu ** 5) * fBW ** 1.86
# two pion production
sigma2pi = 5.7 # mb
sigma2pi /= 1 + np.exp(-9.3 * (Tp - 1.4))
E2pith = 0.56 # GeV
sigma2pi[np.where(Tp < E2pith)] = 0.0
return (sigma1pi + sigma2pi) * 1e-27 # return in cm-2
def _sigma_pi_midE(self, Tp):
"""
Geant 4.10.0 model for 2 GeV < Tp < 5 GeV
"""
m_p = self._m_p
Qp = (Tp - self._Tth) / m_p
multip = -6e-3 + 0.237 * Qp - 0.023 * Qp ** 2
return self._sigma_inel(Tp) * multip
def _sigma_pi_hiE(self, Tp, a):
"""
General expression for Tp > 5 GeV (Eq 7)
"""
m_p = self._m_p
csip = (Tp - 3.0) / m_p
m1 = a[0] * csip ** a[3] * (1 + np.exp(-a[1] * csip ** a[4]))
m2 = 1 - np.exp(-a[2] * csip ** 0.25)
multip = m1 * m2
return self._sigma_inel(Tp) * multip
def _sigma_pi(self, Tp):
sigma = np.zeros_like(Tp)
# for E<2GeV
idx1 = np.where(Tp < 2.0)
sigma[idx1] = self._sigma_pi_loE(Tp[idx1])
# for 2GeV<=E<5GeV
idx2 = np.where((Tp >= 2.0) * (Tp < 5.0))
sigma[idx2] = self._sigma_pi_midE(Tp[idx2])
# for 5GeV<=E<Etrans
idx3 = np.where((Tp >= 5.0) * (Tp < self._Etrans[self.hiEmodel]))
sigma[idx3] = self._sigma_pi_hiE(Tp[idx3], self._a["Geant4"])
# for E>=Etrans
idx4 = np.where((Tp >= self._Etrans[self.hiEmodel]))
sigma[idx4] = self._sigma_pi_hiE(Tp[idx4], self._a[self.hiEmodel])
return sigma
def _b_params(self, Tp):
b0 = 5.9
hiE = np.where(Tp >= 1.0)
TphiE = Tp[hiE]
b1 = np.zeros(TphiE.size)
b2 = np.zeros(TphiE.size)
b3 = np.zeros(TphiE.size)
idx = np.where(TphiE < 5.0)
b1[idx], b2[idx], b3[idx] = self._b["Geant4_0"]
idx = np.where(TphiE >= 5.0)
b1[idx], b2[idx], b3[idx] = self._b["Geant4"]
idx = np.where(TphiE >= self._Etrans[self.hiEmodel])
b1[idx], b2[idx], b3[idx] = self._b[self.hiEmodel]
return b0, b1, b2, b3
def _calc_EpimaxLAB(self, Tp):
m_p = self._m_p
m_pi = self._m_pi
# Eq 10
s = 2 * m_p * (Tp + 2 * m_p) # center of mass energy
EpiCM = (s - 4 * m_p ** 2 + m_pi ** 2) / (2 * np.sqrt(s))
PpiCM = np.sqrt(EpiCM ** 2 - m_pi ** 2)
gCM = (Tp + 2 * m_p) / np.sqrt(s)
betaCM = np.sqrt(1 - gCM ** -2)
EpimaxLAB = gCM * (EpiCM + PpiCM * betaCM)
return EpimaxLAB
def _calc_Egmax(self, Tp):
m_pi = self._m_pi
EpimaxLAB = self._calc_EpimaxLAB(Tp)
gpiLAB = EpimaxLAB / m_pi
betapiLAB = np.sqrt(1 - gpiLAB ** -2)
Egmax = (m_pi / 2) * gpiLAB * (1 + betapiLAB)
return Egmax
def _Amax(self, Tp):
m_p = self._m_p
loE = np.where(Tp < 1.0)
hiE = np.where(Tp >= 1.0)
Amax = np.zeros(Tp.size)
b = self._b_params(Tp)
EpimaxLAB = self._calc_EpimaxLAB(Tp)
Amax[loE] = b[0] * self._sigma_pi(Tp[loE]) / EpimaxLAB[loE]
thetap = Tp / m_p
Amax[hiE] = (
b[1]
* thetap[hiE] ** -b[2]
* np.exp(b[3] * np.log(thetap[hiE]) ** 2)
* self._sigma_pi(Tp[hiE])
/ m_p
)
return Amax
def _F_func(self, Tp, Egamma, modelparams):
lamb, alpha, beta, gamma = modelparams
m_pi = self._m_pi
# Eq 9
Egmax = self._calc_Egmax(Tp)
Yg = Egamma + m_pi ** 2 / (4 * Egamma)
Ygmax = Egmax + m_pi ** 2 / (4 * Egmax)
Xg = (Yg - m_pi) / (Ygmax - m_pi)
# zero out invalid fields (Egamma > Egmax -> Xg > 1)
Xg[np.where(Xg > 1)] = 1.0
# Eq 11
C = lamb * m_pi / Ygmax
F = (1 - Xg ** alpha) ** beta
F /= (1 + Xg / C) ** gamma
#
return F
def _kappa(self, Tp):
thetap = Tp / self._m_p
return 3.29 - thetap ** -1.5 / 5.0
def _mu(self, Tp):
q = (Tp - 1.0) / self._m_p
x = 5.0 / 4.0
return x * q ** x * np.exp(-x * q)
def _F(self, Tp, Egamma):
F = np.zeros_like(Tp)
# below Tth
F[np.where(Tp < self._Tth)] = 0.0
# Tth <= E <= 1GeV: Experimental data
idx = np.where((Tp >= self._Tth) * (Tp <= 1.0))
if idx[0].size > 0:
kappa = self._kappa(Tp[idx])
mp = self._F_mp["ExpData"]
mp[2] = kappa
F[idx] = self._F_func(Tp[idx], Egamma, mp)
# 1GeV < Tp < 4 GeV: Geant4 model 0
idx = np.where((Tp > 1.0) * (Tp <= 4.0))
if idx[0].size > 0:
mp = self._F_mp["Geant4_0"]
mu = self._mu(Tp[idx])
mp[2] = mu + 2.45
mp[3] = mu + 1.45
F[idx] = self._F_func(Tp[idx], Egamma, mp)
# 4 GeV < Tp < 20 GeV
idx = np.where((Tp > 4.0) * (Tp <= 20.0))
if idx[0].size > 0:
mp = self._F_mp["Geant4_1"]
mu = self._mu(Tp[idx])
mp[2] = 1.5 * mu + 4.95
mp[3] = mu + 1.50
F[idx] = self._F_func(Tp[idx], Egamma, mp)
# 20 GeV < Tp < 100 GeV
idx = np.where((Tp > 20.0) * (Tp <= 100.0))
if idx[0].size > 0:
mp = self._F_mp["Geant4_2"]
F[idx] = self._F_func(Tp[idx], Egamma, mp)
# Tp > Etrans
idx = np.where(Tp > self._Etrans[self.hiEmodel])
if idx[0].size > 0:
mp = self._F_mp[self.hiEmodel]
F[idx] = self._F_func(Tp[idx], Egamma, mp)
return F
def _diffsigma(self, Ep, Egamma):
"""
Differential cross section
dsigma/dEg = Amax(Tp) * F(Tp,Egamma)
"""
Tp = Ep - self._m_p
diffsigma = self._Amax(Tp) * self._F(Tp, Egamma)
if self.nuclear_enhancement:
diffsigma *= self._nuclear_factor(Tp)
return diffsigma
def _nuclear_factor(self, Tp):
"""
Compute nuclear enhancement factor
"""
sigmaRpp = 10 * np.pi * 1e-27
sigmainel = self._sigma_inel(Tp)
sigmainel0 = self._sigma_inel(1e3) # at 1e3 GeV
f = sigmainel / sigmainel0
f2 = np.where(f > 1, f, 1.0)
G = 1.0 + np.log(f2)
# epsilon factors computed from Eqs 21 to 23 with local ISM abundances
epsC = 1.37
eps1 = 0.29
eps2 = 0.1
epstotal = np.where(
Tp > self._Tth,
epsC + (eps1 + eps2) * sigmaRpp * G / sigmainel,
0.0,
)
if np.any(Tp < 1.0):
# nuclear enhancement factor diverges towards Tp = Tth, fix Tp<1 to
# eps(1.0) = 1.91
loE = np.where((Tp > self._Tth) * (Tp < 1.0))
epstotal[loE] = 1.9141
return epstotal
def _loadLUT(self, LUT_fname):
try:
filename = get_pkg_data_filename(os.path.join("data", LUT_fname))
self.diffsigma = LookupTable(filename)
except IOError:
warnings.warn(
"LUT {0} not found, reverting to useLUT = False".format(
LUT_fname
)
)
self.diffsigma = self._diffsigma
self.useLUT = False
def _spectrum(self, photon_energy):
"""
Compute differential spectrum from pp interactions using the
parametrization of <NAME>., <NAME>., <NAME>., and
<NAME>.\ 2014, `arXiv:1406.7369
<http://www.arxiv.org/abs/1406.7369>`_.
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
# Load LUT if available, otherwise use self._diffsigma
if self.useLUT:
LUT_base = "PionDecayKafexhiu14_LUT_"
if self.nuclear_enhancement:
LUT_base += "NucEnh_"
LUT_fname = LUT_base + "{0}.npz".format(self.hiEmodel)
# only reload LUT if it has changed or hasn't been loaded yet
try:
if os.path.basename(self.diffsigma.fname) != LUT_fname:
self._loadLUT(LUT_fname)
except AttributeError:
self._loadLUT(LUT_fname)
else:
self.diffsigma = self._diffsigma
Egamma = _validate_ene(photon_energy).to("GeV")
Ep = self._Ep * u.GeV
J = self._J * u.Unit("1/GeV")
specpp = []
for Eg in Egamma:
diffsigma = self.diffsigma(Ep.value, Eg.value) * u.Unit("cm2/GeV")
specpp.append(trapz_loglog(diffsigma * J, Ep))
self.specpp = u.Quantity(specpp)
self.specpp *= self.nh * c.cgs
return self.specpp.to("1/(s eV)")
def heaviside(x):
return (np.sign(x) + 1) / 2.0
class PionDecayKelner06(BaseRadiative):
r"""Pion decay gamma-ray emission from a proton population.
Compute gamma-ray spectrum arising from the interaction of a relativistic
proton distribution with stationary target protons.
Parameters
----------
particle_distribution : function
Particle distribution function, taking proton energies as a
`~astropy.units.Quantity` array or float, and returning the particle
energy density in units of number of protons per unit energy as a
`~astropy.units.Quantity` array or float.
nh : `~astropy.units.Quantity`
Number density of the target protons. Default is :math:`1 cm^{-3}`.
Other parameters
----------------
Etrans : `~astropy.units.Quantity`
For photon energies below ``Etrans``, the delta-functional
approximation is used for the spectral calculation, and the full
calculation is used at higher energies. Default is 0.1 TeV.
References
----------
<NAME>., <NAME>., and <NAME>., 2006 PhysRevD 74, 034018
(`arXiv:astro-ph/0606058 <http://www.arxiv.org/abs/astro-ph/0606058>`_).
"""
# This class doesn't inherit from BaseProton
param_names = ["nh", "Etrans"]
_memoize = True
_cache = {}
_queue = []
def __init__(
self,
particle_distribution,
nh=1.0 / u.cm ** 3,
Etrans=0.1 * u.TeV,
**kwargs
):
self.particle_distribution = particle_distribution
self.nh = validate_scalar("nh", nh, physical_type="number density")
self.Etrans = validate_scalar(
"Etrans", Etrans, domain="positive", physical_type="energy"
)
self.__dict__.update(**kwargs)
def _particle_distribution(self, E):
return self.particle_distribution(E * u.TeV).to("1/TeV").value
def _Fgamma(self, x, Ep):
"""
KAB06 Eq.58
Note: Quantities are not used in this function
Parameters
----------
x : float
Egamma/Eprot
Ep : float
Eprot [TeV]
"""
L = np.log(Ep)
B = 1.30 + 0.14 * L + 0.011 * L ** 2 # Eq59
beta = (1.79 + 0.11 * L + 0.008 * L ** 2) ** -1 # Eq60
k = (0.801 + 0.049 * L + 0.014 * L ** 2) ** -1 # Eq61
xb = x ** beta
F1 = B * (np.log(x) / x) * ((1 - xb) / (1 + k * xb * (1 - xb))) ** 4
F2 = (
1.0 / np.log(x)
- (4 * beta * xb) / (1 - xb)
- (4 * k * beta * xb * (1 - 2 * xb)) / (1 + k * xb * (1 - xb))
)
return F1 * F2
def _sigma_inel(self, Ep):
"""
Inelastic cross-section for p-p interaction. KAB06 Eq. 73, 79
Note: Quantities are not used in this function
Parameters
----------
Ep : float
Eprot [TeV]
Returns
-------
sigma_inel : float
Inelastic cross-section for p-p interaction [1/cm2].
"""
L = np.log(Ep)
sigma = 34.3 + 1.88 * L + 0.25 * L ** 2
if Ep <= 0.1:
Eth = 1.22e-3
sigma *= (1 - (Eth / Ep) ** 4) ** 2 * heaviside(Ep - Eth)
return sigma * 1e-27 # convert from mbarn to cm2
def _photon_integrand(self, x, Egamma):
"""
Integrand of Eq. 72
"""
try:
return (
self._sigma_inel(Egamma / x)
* self._particle_distribution((Egamma / x))
* self._Fgamma(x, Egamma / x)
/ x
)
except ZeroDivisionError:
return np.nan
def _calc_specpp_hiE(self, Egamma):
"""
Spectrum computed as in Eq. 42 for Egamma >= 0.1 TeV
"""
# Fixed quad with n=40 is about 15 times faster and is always within
# 0.5% of the result of adaptive quad for Egamma>0.1
# WARNING: It also produces artifacts for steep distributions (e.g.
# Maxwellian) at ~500 GeV. Reverting to adaptative quadrature
# from scipy.integrate import fixed_quad
# result=c*fixed_quad(self._photon_integrand, 0., 1., args = [Egamma,
# ], n = 40)[0]
from scipy.integrate import quad
Egamma = Egamma.to("TeV").value
specpp = (
c.cgs.value
* quad(
self._photon_integrand,
0.0,
1.0,
args=Egamma,
epsrel=1e-3,
epsabs=0,
)[0]
)
return specpp * u.Unit("1/(s TeV)")
# variables for delta integrand
_c = c.cgs.value
_Kpi = 0.17
_mp = (m_p * c ** 2).to("TeV").value
_m_pi = 1.349766e-4 # TeV/c2
def _delta_integrand(self, Epi):
Ep0 = self._mp + Epi / self._Kpi
qpi = (
self._c
* (self.nhat / self._Kpi)
* self._sigma_inel(Ep0)
* self._particle_distribution(Ep0)
)
return qpi / np.sqrt(Epi ** 2 - self._m_pi ** 2)
def _calc_specpp_loE(self, Egamma):
"""
Delta-functional approximation for low energies Egamma < 0.1 TeV
"""
from scipy.integrate import quad
Egamma = Egamma.to("TeV").value
Epimin = Egamma + self._m_pi ** 2 / (4 * Egamma)
result = (
2
* quad(
self._delta_integrand, Epimin, np.inf, epsrel=1e-3, epsabs=0
)[0]
)
return result * u.Unit("1/(s TeV)")
@property
def Wp(self):
"""Total energy in protons above 1.22 GeV threshold (erg).
"""
from scipy.integrate import quad
Eth = 1.22e-3
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Wp = quad(
lambda x: x * self._particle_distribution(x), Eth, np.Inf
)[0]
return (Wp * u.TeV).to("erg")
def _spectrum(self, photon_energy):
"""
Compute differential spectrum from pp interactions using Eq.71 and
Eq.58 of <NAME>., <NAME>., and <NAME>., 2006
PhysRevD 74, 034018 (`arXiv:astro-ph/0606058
<http://www.arxiv.org/abs/astro-ph/0606058>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
outspecene = _validate_ene(photon_energy)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.nhat = 1.0 # initial value, works for index~2.1
if np.any(outspecene < self.Etrans) and np.any(
outspecene >= self.Etrans
):
# compute value of nhat so that delta functional matches
# accurate calculation at 0.1TeV
full = self._calc_specpp_hiE(self.Etrans)
delta = self._calc_specpp_loE(self.Etrans)
self.nhat *= (full / delta).decompose().value
self.specpp = np.zeros(len(outspecene)) * u.Unit("1/(s TeV)")
for i, Egamma in enumerate(outspecene):
if Egamma >= self.Etrans:
self.specpp[i] = self._calc_specpp_hiE(Egamma)
else:
self.specpp[i] = self._calc_specpp_loE(Egamma)
density_factor = (self.nh / (1 * u.Unit("1/cm3"))).decompose().value
return density_factor * self.specpp.to("1/(s eV)")
class LookupTable(object):
"""
Helper class for two-dimensional look up table
Lookup table should be saved as an npz file with numpy.savez or
numpy.savez_compressed. The file should have three arrays:
* X: log10(x)
* Y: log10(y)
* lut: log10(z)
The instantiated object can be called with arguments (x,y), and the
interpolated value of z will be returned. The interpolation is done through
a cubic spline in semi-logarithmic space.
"""
def __init__(self, filename):
from scipy.interpolate import RectBivariateSpline
f_lut = np.load(filename)
X = f_lut.f.X
Y = f_lut.f.Y
lut = f_lut.f.lut
self.int_lut = RectBivariateSpline(X, Y, 10 ** lut, kx=3, ky=3, s=0)
self.fname = filename
def __call__(self, X, Y):
return self.int_lut(np.log10(X), np.log10(Y)).flatten()
def _calc_lut_pp(args): # pragma: no cover
epr, eph, hiEmodel, nuc = args
from .models import PowerLaw
pl = PowerLaw(1 / u.eV, 1 * u.TeV, 0.0)
pp = PionDecay(pl, hiEmodel=hiEmodel, nuclear_enhancement=nuc)
diffsigma = pp._diffsigma(epr.to("GeV").value, eph.to("GeV").value)
return diffsigma
def generate_lut_pp(
Ep=np.logspace(0.085623713910610105, 7, 800) * u.GeV,
Eg=np.logspace(-5, 3, 1024) * u.TeV,
out_base="PionDecayKafexhiu14_LUT_",
hiEmodel=None,
nuclear_enhancement=True,
): # pragma: no cover
from emcee.interruptible_pool import InterruptiblePool as Pool
pool = Pool()
if hiEmodel is None:
hiEmodel = ["Geant4", "Pythia8", "SIBYLL", "QGSJET"]
elif type(hiEmodel) is str:
hiEmodel = [hiEmodel]
if nuclear_enhancement:
out_base += "NucEnh_"
for model in hiEmodel:
out_file = out_base + model + ".npz"
print("Saving LUT for model {0} in {1}...".format(model, out_file))
args = [(Ep, eg, model, nuclear_enhancement) for eg in Eg]
diffsigma_list = pool.map(_calc_lut_pp, args)
diffsigma = np.array(diffsigma_list).T
np.savez_compressed(
out_file,
X=np.log10(Ep.to("GeV").value),
Y=np.log10(Eg.to("GeV").value),
lut=np.log10(diffsigma),
)
|
import json
import hashlib
import random
from datetime import date
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import datetime
from aiohttp import web
from pymysql import MySQLError
from db import mysql_connect
from auth import requires_auth
from app import get_environ_sfe
EMAIL_PASSWORD = ''
EMAIL_USER = ''
EMAIL_HOST = ''
loop = None
SEMESTER_SLUTT = {"month": 6, "day": 15}
def init():
global EMAIL_PASSWORD, EMAIL_USER, EMAIL_HOST
EMAIL_PASSWORD = get_environ_sfe("EMAIL_PASSWORD")
EMAIL_USER = get_environ_sfe("EMAIL_USER")
EMAIL_HOST = get_environ_sfe("EMAIL_HOST")
async def check_vipps_id(request):
try:
(conn, cur) = await mysql_connect()
vipps_id = str(request.match_info['vipps_id'])
q = request.query
user_id = None
if 'user_id' in q:
user_id = q['user_id']
await cur.execute("Select * from user where vipps_transaction_id = %s AND user_id = %s", [vipps_id, user_id])
r = cur.rowcount
if r == 1:
return web.Response(status=200,
text='{"msg": "Transaction id belongs to userId."}',
content_type='application/json')
await cur.execute("Select * from user where vipps_transaction_id = %s", vipps_id)
r = cur.rowcount
if r == 0:
return web.Response(status=200,
text='{"msg": "Transaction id is unique."}',
content_type='application/json')
else:
return web.Response(status=401,
text='{"msg": "Vipps transaction id not unique."}',
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text=json.dumps(e, default=str),
content_type='application/json')
finally:
await cur.close()
conn.close()
async def check_student_email(request):
try:
(conn, cur) = await mysql_connect()
bod = await request.json()
if 'studentEmail' not in bod:
return web.Response(status=401,
text='{"msg": "studentEmail not in body"}',
content_type='application/json')
student_email = bod['studentEmail']
await cur.execute("select * from user where student_email = %s ", student_email)
if cur.rowcount != 0:
return web.Response(status=409,
text='{"msg": "Student email already in use."}',
content_type='application/json')
else:
return web.Response(status=200,
text='{"msg": "Student email is unique."}',
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text=json.dumps(e, default=str),
content_type='application/json')
finally:
await cur.close()
conn.close()
@requires_auth
async def update_member(request):
try:
(conn, cur) = await mysql_connect()
bod = await request.json()
if input_ok(bod):
userId = bod['userId']
first_name = bod['firstName']
last_name = bod['lastName']
student_email = bod['studentEmail']
private_email = bod['privateEmail']
year_of_admission = int(bod['yearOfAdmission'])
newsletter = bod['newsletter']
trans_id = bod['vippsTransactionId']
vipps_transaction_id = trans_id if trans_id != '' else None
study_programme_id = bod['studyProgrammeId']
private_email = private_email if private_email != '' else None
await cur.execute("update user set first_name = %s, last_name = %s, student_email = %s, "
"private_email = %s, year_of_admission = %s, "
"newsletter = %s, vipps_transaction_id = %s, "
"study_programme_id = %s where user_id = %s",
[first_name, last_name, student_email, private_email, year_of_admission,
newsletter, vipps_transaction_id, study_programme_id, userId])
print(cur.rowcount)
await conn.commit()
return web.Response(status=200,
text='{"msg": "Member updated."}',
content_type='application/json')
else:
return web.Response(status=401,
text='{"msg": "Invalid input."}',
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text=json.dumps(e, default=str),
content_type='application/json')
finally:
await cur.close()
conn.close()
async def register_member(request):
"""
"""
try:
(conn, cur) = await mysql_connect()
bod = await request.json()
if input_ok(bod):
first_name = bod['firstName']
last_name = bod['lastName']
student_email = bod['studentEmail']
private_email = bod['privateEmail']
year_of_admission = int(bod['yearOfAdmission'])
active = 0
verified_email = 0
email_verification_code = generate_verification_code()
newsletter = bod['newsletter']
trans_id = bod['vippsTransactionId']
vipps_transaction_id = trans_id if trans_id != '' else None
study_programme_id = bod['studyProgrammeId']
await cur.execute("INSERT INTO user(first_name, last_name, student_email, private_email, year_of_admission,"
" active, email_verification_code, verified_student_email,"
" newsletter, vipps_transaction_id, study_programme_id) "
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
[first_name, last_name, student_email, private_email, year_of_admission, active,
email_verification_code, verified_email, newsletter,
vipps_transaction_id, study_programme_id]
)
await conn.commit()
print("Member: '{}' has been added to the database.".format(first_name + ' ' + last_name))
student_username = student_email.split('@')[0]
link = 'http://medlem.studentalt.no/#/confirm/{0}_{1}'.format(email_verification_code, student_username)
email_content = 'Hei!\nDu har mottatt denne meldingen fordi det blir forsøkt å registrere seg som SALT medlem med denne epostadressen.\n' \
'Om dette ikke er tilfelle, vennligst se bort ifra denne eposten.\n\n' \
'For å bekrefte brukeren din, klikk på følgende lenke:\n' \
'{0}\n\n' \
'Mvh.\nSALT'.format(link)
success, msg = send_email(student_email, "Epostbekreftelse for SALT-medlem", email_content)
if success:
return web.Response(status=200,
text='{"msg": "%s"}' % msg,
content_type='application/json')
else:
return web.Response(status=500,
text=json.dumps(msg, default=str),
content_type='application/json')
else:
return web.Response(status=401,
text='{"msg": "Invalid input."}',
content_type='application/json')
except MySQLError as e:
print(e)
finally:
await cur.close()
conn.close()
@requires_auth
async def send_new_email_verification_code(request):
"""
Updates 'email_verification_code' for the member specified, and send confirmation email with generated
activation link.
:param request: http-request with 'studentEmail': '<student email>'
:return: status 200 and msg if successful, status 500 and error information if not.
"""
try:
(conn, cur) = await mysql_connect()
bod = await request.json()
if 'studentEmail' not in bod:
return web.Response(status=401,
text='{"msg": "studentEmail not in body"}',
content_type='application/json')
student_email = bod['studentEmail']
email_verification_code = generate_verification_code()
await cur.execute("update user set email_verification_code = %s where student_email = %s",
[email_verification_code, student_email])
await conn.commit()
print("Success: email_verification code updated for user with student_email {}.".format(student_email))
student_username = student_email.split('@')[0]
link = 'http://medlem.studentalt.no/#/confirm/{0}_{1}'.format(email_verification_code, student_username)
email_content = 'Hei!\nDu har mottatt denne meldingen fordi det blir forsøkt å registrere seg som SALT medlem med denne epostadressen.\n' \
'Om dette ikke er tilfelle, vennligst se bort ifra denne eposten.\n\n' \
'For å bekrefte brukeren din, klikk på følgende lenke:\n' \
'{0}\n\n' \
'Mvh.\nSALT'.format(link)
success, msg = send_email(student_email, "Epostbekreftelse for SALT-medlem", email_content)
if success:
return web.Response(status=200,
text='{"msg": "%s"}' % msg,
content_type='application/json')
else:
return web.Response(status=500,
text=json.dumps(msg, default=str),
content_type='application/json')
except MySQLError as e:
print(e)
finally:
await cur.close()
conn.close()
@requires_auth
async def toggle_email_verified(request):
"""
Toggles the 'verified_student_email' attribute of a user with the given userId.
:param request:
:return: status 200 if success, 404 if user not found, 500 if MySQL error.
"""
try:
(conn, cur) = await mysql_connect()
user_id = str(request.match_info['user_id'])
await cur.execute("update user set verified_student_email = not verified_student_email where user_id = %s", [user_id, ])
await conn.commit()
r = cur.rowcount
if r == 1:
return web.Response(status=200,
text='{"msg": "email_verified attribute flipped."}',
content_type='application/json')
else:
return web.Response(status=404,
text='{"error": "No user found with user_id %"}' % user_id,
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
@requires_auth
async def get_expired_members(request):
"""
Returns a list of all members that should have finished their degree, calculated by current semester and normed time
for each members associated study programme.
:param request:
:return: List of all expired members.
"""
try:
(conn, cur) = await mysql_connect()
today = datetime.date.today()
this_year = today.year
semester_slutt = datetime.date(year=this_year, month=SEMESTER_SLUTT['month'], day=SEMESTER_SLUTT['day'])
if (semester_slutt - today).days > 0:
this_year -= 1
await cur.execute("SELECT u.user_id, u.first_name, u.last_name, u.student_email, s.name, "
"u.year_of_admission, s.length as 'normed_years' FROM user u join study_programme s on "
"u.study_programme_id = s.study_programme_id where (%s - u.year_of_admission) >= s.length "
"order by u.user_id ASC",
this_year)
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str),
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
finally:
await cur.close()
conn.close()
async def get_tos(request):
"""
Returns the current Terms of Service text.
:param request:
:return: 200 and the current Terms of Service text.
"""
try:
(conn, cur) = await mysql_connect()
await cur.execute("Select text from terms_of_service where id = 1")
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str),
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
finally:
await cur.close()
conn.close()
@requires_auth
async def update_tos(request):
"""
Updates the Terms of Service text.
:param request: Contains a body with the new Terms of Service text
:return: 200 if updated successfully, 401 if the payload is bad, and 500 if an Error occured.
"""
try:
(conn, cur) = await mysql_connect()
bod = await request.json()
if not "termsOfService" in bod.keys():
return web.Response(status=401,
text='{"msg": "termsOfService key not in body."}',
content_type='application/json')
await cur.execute("update terms_of_service set text = %s where id = 1", bod["termsOfService"])
r = cur.rowcount
await conn.commit()
if r == 1:
return web.Response(status=200,
text='{"msg": "Terms of service updated."}',
content_type='application/json')
except MySQLError as e:
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
finally:
await cur.close()
conn.close()
@requires_auth
async def get_member(request):
"""
Returns all members with 'first_name' or 'last_name' equal to search_string from end of url
:param request: Information about first name or last name of person(s) to return, given in URL
:return: All members qualifying for the search_string
"""
try:
(conn, cur) = await mysql_connect()
name = str(request.match_info['name'])
await cur.execute("SELECT user.user_id, user.first_name, user.last_name, user.student_email, user.private_email, "
"user.year_of_admission, user.newsletter, user.vipps_transaction_id, "
"study_programme.programme_code "
"FROM user INNER JOIN study_programme "
"ON user.study_programme_id=study_programme.study_programme_id "
"WHERE user.first_name = %s OR user.last_name = %s", (name, name))
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str),
content_type='application/json')
except MySQLError as e:
print("error")
print(e)
finally:
await cur.close()
conn.close()
@requires_auth
async def get_all_members(request):
"""
Returns all members from database
:param request:
:return: All members from database
"""
try:
(conn, cur) = await mysql_connect()
await cur.execute("SELECT * from user")
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str),
content_type='application/json')
except MySQLError as e:
print(e)
finally:
await cur.close()
conn.close()
@requires_auth
async def get_newsletter_email(request):
"""
Returns all member student emails wanting newsletter-email
:param request:
:return: A json list of all email addresses wanting newsletter mail
"""
try:
(conn, cur) = await mysql_connect()
await cur.execute("SELECT DISTINCT private_email FROM user "
"WHERE newsletter = 1 AND active = 1 AND verified_student_email = 1")
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str,),
content_type='application/json')
except MySQLError as e:
print("error")
print(e)
finally:
await cur.close()
conn.close()
@requires_auth
async def get_email(request):
"""
Returns all member student-emails
:param request: aiohttp.web.Request object
:return: A json list of all emailaddresses wanting newsletter mail
"""
try:
(conn, cur) = await mysql_connect()
await cur.execute("SELECT DISTINCT student_email FROM user "
"WHERE active = 1 AND verified_student_email = 1")
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str,),
content_type='application/json')
except MySQLError as e:
print("error")
print(e)
return web.Response(status=500,
text='{"error": "Something went wrong when trying to retrieve emails"',
content_type='application/json')
finally:
await cur.close()
conn.close()
async def verify_email(request):
"""
Verifies member's student Email through unique URI containing verification code and student-email address.
:param request: Contains information about verification code and student email
:return Response: status 200 if okay, 500 if not
"""
try:
(conn, cur) = await mysql_connect()
bod = str(request.match_info['verification_code'])
body_split = bod.split('_')
if not len(body_split) == 2:
return web.Response(status=401,
text='{"error": "The verifictaion code was invalid"}',
content_type='application/json')
verification_code = body_split[0]
student_epost = body_split[1] + "@stud.<EMAIL>.no"
await cur.execute("UPDATE user SET verified_student_email = 1 "
"WHERE student_email = %s and email_verification_code = %s", (student_epost, verification_code))
await conn.commit()
r = cur.rowcount
if r == 1:
return web.Response(status=200,
text='{"msg": "Email verified"}',
content_type='application/json')
else:
return web.Response(status=401,
text='{"error": "The verifictaion code was invalid"}',
content_type='application/json')
except MySQLError as e:
print("error")
print(e)
return web.Response(status=500,
text='{"error": "Something went wrong when trying to verify email"',
content_type='application/json')
finally:
await cur.close()
conn.close()
@requires_auth
async def toggle_active(request):
"""
Activates or deactivates a member
:param request: Specifies 'userId' and whether to activate or deactivate.
:return aiohttp.web.Response: status 200 if update ok, 400 if incorrect parameters, 500 if internal error.
"""
try:
(conn, cur) = await mysql_connect()
bod = await request.json()
if not all(keys in bod for keys in ("userId", "active")):
return web.Response(status=400,
text='{"error": "Something went wrong when trying to activate member. '
'Post arguments are missing."',
content_type='application/json')
await cur.execute("UPDATE user SET active = %s WHERE user_id = %s", (bod['active'], bod['userId']))
await conn.commit()
status = "activated" if not bod['active'] == "0" else "deactivated"
msg = '"msg": "Member {}'.format(status)
return web.Response(status=200,
text=json.dumps(msg),
content_type='application/json')
except MySQLError as e:
print("error")
print(e)
return web.Response(status=500,
text='{"error": "Something went wrong when trying to activate member"',
content_type='application/json')
finally:
await cur.close()
conn.close()
@requires_auth
async def check_vipps_activate_rows(request):
"""
Checks and returns the amount of members that would be activated by a given csv-upload.
:param request: Contains the csv data used for checking.
:return: status 200 and number of activations that would result from the csv-file; status 500 if an Error occured.
"""
try:
(conn, cur) = await mysql_connect()
bod = await request.text()
lines = bod.splitlines()
vipps_ids = []
for l in lines:
split = l.split(',')
if all(keys in split for keys in ("TransactionInfo", "Studentforeningen SALT", "350.00")):
vipps_ids.append(split[4])
format_strings = ','.join(['%s'] * len(vipps_ids))
await cur.execute("SELECT count(distinct vipps_transaction_id) as updatableRows "
"from user WHERE active = 0 and vipps_transaction_id IN (%s)"
% format_strings, tuple(vipps_ids))
num_updatable= await cur.fetchone()
return web.Response(status=200,
text=json.dumps(num_updatable),
content_type='application/json')
except MySQLError as e:
return web.Response(status=500,
text=json.dumps(e, default=str),
content_type='application/json')
finally:
await cur.close()
conn.close()
@requires_auth
async def vipps_csv_activate(request):
"""
Sets attribute 'active' to 1 in database for all non-active members with matching vipps_transaction_id and correct
amount paid, found in CSV file received.
:param request: Contains CSV file in multipart/form-data
:return Response: status 200 and amount of members activated if ok, 500 if not
"""
try:
(conn, cur) = await mysql_connect()
bod = await request.text()
lines = bod.splitlines()
vipps_ids = []
for l in lines:
split = l.split(',')
if all(keys in split for keys in ("TransactionInfo", "Studentforeningen SALT", "350.00")):
vipps_ids.append(split[4])
format_strings = ','.join(['%s'] * len(vipps_ids))
await cur.execute("SELECT student_email from user where active = 0 and vipps_transaction_id"
" IN (%s)" % format_strings, tuple(vipps_ids))
email_list = await cur.fetchall()
await cur.execute("UPDATE user SET active = 1 WHERE active = 0 and vipps_transaction_id IN (%s)"
% format_strings, tuple(vipps_ids))
num_updated = cur.rowcount
await conn.commit()
email_content = 'Hei!\nDin betaling av medlemskontigent via vipps transaksjonsID har nå blitt bekreftet' \
', og ditt medlemskap har blitt aktivert.\n' \
'\n\n' \
'Mvh.\nSALT'
emails_sent = 0
for e in email_list:
email = e['student_email']
success, msg = send_email(email, "Ditt medlemskap hos SALT er aktivert", email_content)
if success:
emails_sent += 1
return web.Response(status=200,
text='[{"msg": "Members with valid transaction ID activated."},'
' {"updatedRows": "%s" }]' % num_updated,
content_type='application/json')
except MySQLError as e:
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
finally:
await cur.close()
conn.close()
@requires_auth
async def delete_member(request):
"""
Deletes a member from database specified by 'userId'
:param request: Contains 'userId' value of the member to delete from db
:return: status 200 if ok, status 500 if not
"""
try:
(conn, cur) = await mysql_connect()
bod = await request.json()
if "userId" in bod:
userId = bod["userId"]
await cur.execute("DELETE FROM user WHERE user_id = %s", userId)
await conn.commit()
return web.Response(status=200,
text='{"msg": "Member with userId: %s has been deleted."}' % userId,
content_type='application/json')
except MySQLError as e:
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
finally:
await cur.close()
conn.close()
async def get_all_studyprograms(request):
try:
(conn, cur) = await mysql_connect()
await cur.execute("SELECT * FROM study_programme WHERE active = 1")
r = await cur.fetchall()
return web.Response(status=200,
text=json.dumps(r, default=str, ),
content_type='application/json')
except MySQLError as e:
print(e)
return web.Response(status=500,
text='{"error": "%s"}' % e,
content_type='application/json')
finally:
await cur.close()
conn.close()
# - Web util funcs
def generate_verification_code():
m = hashlib.md5()
m.update(str(random.randint(1, 10000)).encode())
return m.hexdigest()
def input_ok(bod):
keys = ['firstName', 'lastName', 'studentEmail', 'privateEmail', 'yearOfAdmission',
'newsletter', 'vippsTransactionId', 'studyProgrammeId']
for k in keys:
if k not in bod:
print('{!r} is not in body.'.format(k))
return False
trans_id = bod['vippsTransactionId']
if len(trans_id) < 9 and trans_id != '':
return False
s_email = bod['studentEmail'].lower()
d = date.today().year
if not (len(s_email) > 13 and s_email[len(s_email)-13:] == '@stud.ntnu.no' and
d - 6 < int(bod['yearOfAdmission']) <= d):
print("Failure 2")
return False
return True
def send_email(recipient, subject, body, sender='<EMAIL>'):
"""
Sends an email with the given data to the given recipient.
:param recipient: Recipient email address
:param subject: Subject of the email
:param body: Body of the email
:param sender: Email address of the sender
:return: True if successful. False if not.
"""
msg = MIMEMultipart()
msg['From'] = sender
msg['To'] = recipient
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain', 'utf-8'))
text = msg.as_string()
try:
smtp_obj = smtplib.SMTP(EMAIL_HOST, port=587)
smtp_obj.ehlo()
smtp_obj.starttls()
smtp_obj.login(user=EMAIL_USER, password=EMAIL_PASSWORD)
smtp_obj.sendmail(sender, recipient, text)
return True, "Email sent"
except smtplib.SMTPException as error:
return False, 'Unable to send verification email to "{0}". Error-msg:\n{1}'.format(recipient, error)
finally:
smtp_obj.quit()
|
import json
from typing import Dict, List, Tuple, Union
from uuid import uuid4
from abc import ABC, abstractmethod
from enum import Enum, auto
import datetime as dt
class PlanObjectType(Enum):
TASK = auto()
MILESTONE = auto()
CATEGORY = auto()
class PlanObject(ABC):
def __init__(self, name: str):
self.id = str(uuid4())
self.name = name
def to_dict(self):
return {'id': self.id, 'name': self.name, 'type': self.type.name}
@property
@abstractmethod
def type(self) -> PlanObjectType:
pass
class Task(PlanObject):
def __init__(self, name: str, start: dt.date, duration: dt.timedelta, margin: dt.timedelta = None, critical: bool = False):
super().__init__(name)
self.start = start
self.duration = duration
self.margin = dt.timedelta(0) if margin is None else margin
self.critical = critical
@property
def end(self):
return self.start + self.duration
@property
def type(self) -> PlanObjectType:
return PlanObjectType.TASK
def to_dict(self):
data = super().to_dict()
data['start'] = str(self.start)
data['duration'] = str(self.duration)
data['margin'] = str(self.margin)
data['critical'] = str(self.critical)
return data
class Milestone(PlanObject):
def __init__(self, name: str, start: dt.date):
super().__init__(name)
self.start = start
@property
def type(self) -> PlanObjectType:
return PlanObjectType.MILESTONE
@property
def end(self):
return self.start
def to_dict(self):
data = super().to_dict()
data['start'] = str(self.start)
return data
class Category(PlanObject):
def __init__(self, name, start: dt.date = None, duration: dt.timedelta = None, children: List[PlanObject] = None) -> None:
super().__init__(name)
self.__start = start
self.__duration = duration
self.children = [] if children is None else children
def add_child(self, child: PlanObject) -> None:
self.children.append(child)
@property
def type(self):
return PlanObjectType.CATEGORY
@property
def start(self):
s,_ = self.get_time_bounds()
return s
@property
def end(self):
_,e = self.get_time_bounds()
return e
def to_dict(self) -> Dict:
data = super().to_dict()
data['children'] = [c.to_dict() for c in self.children]
return data
def get_list(self) -> List[PlanObject]:
data = []
for c in self.children:
data.append(c)
if isinstance(c,Category):
data.extend(c.get_list())
return data
def get_dims(self) -> Tuple[int,int]:
w = 0
hs = []
n_tasks = 0
for c in self.children:
if isinstance(c, (Task,Milestone)):
# w += 1
n_tasks += 1
elif isinstance(c, Category):
c_w,c_h = c.get_dims()
w += c_w
hs.append(c_h)
else:
raise RuntimeError("Unknown child data type")
if n_tasks:
# Add vertical stack of tasks as one vertical block
w += 1
hs.append(n_tasks)
if hs:
# Add 1 to account for the category itself
height = max(hs)+1
else:
height = 1
if not w:
w = 1
return (w,height)
def get_time_bounds(self) -> Tuple[dt.date,dt.date]:
"""Get start and end boundaries based on all children
Returns:
Tuple[date,date]: [start_date,end_date]
"""
if self.__start is not None and self.__duration is not None:
return (self.__start,self.__start + self.__duration)
ss = []
es = []
for c in self.children:
if isinstance(c, Task):
ss.append(c.start)
es.append((c.start + c.duration))
elif isinstance(c,Milestone):
ss.append(c.start)
es.append(c.start)
elif isinstance(c, Category):
c_ss,c_es = c.get_time_bounds()
ss.append(c_ss)
es.append(c_es)
else:
raise RuntimeError("Unknown child data type")
s = min(ss)
e = max(es)
return (s,e)
class Project():
def __init__(self, name: str, data: List[PlanObject] = None) -> None:
self.name = name
self.root = Category(name,children=data)
self.root.id = 0
def to_dict(self):
return {self.name: self.root.to_dict()}
def from_dict(self):
raise NotImplementedError()
def get_dims(self) -> int:
return self.root.get_dims()
@classmethod
def from_csv(cls, csv_data, project_name):
# Necessary ProjectLibre Columns: ID, Name, Start, Duration, Free Slack, Parent ID, Is WBS Parent, Display task as milestone, Critical, Predecessors
data_list = []
data_dict = {}
for line in csv_data:
id = int(line[0])
name = line[1]
start = dt.datetime.strptime(line[2], '%x %I:%M %p').date()
duration = dt.timedelta(float(line[3].rstrip('?').rstrip('days')))
margin = line[4]
parent_id = int(line[5])
is_parent = line[6] == 'true'
is_milestone = line[7] == 'true'
is_critical = line[8] == 'true'
predecessors = list(map(int,line[9].split(";"))) if line[9] else []
if is_parent:
entry = Category(name,start,duration)
elif is_milestone:
entry = Milestone(name, start)
else:
entry = Task(name, start, duration, margin, is_critical)
entry.predecessors = predecessors
entry.id = id
data_dict[id] = entry
if parent_id == 0:
data_list.append(entry)
else:
data_dict[parent_id].add_child(entry)
return cls(project_name, data_list)
|
<gh_stars>0
from flask import g
from flask_login import current_user
import re
from .models import RandomTable, Macros
from .randomise_utils import split_id, get_random_table_record, get_macro_record
def check_table_definition_validity(table):
error_message = ''
table_list = table.definition.splitlines()
table_iter = iter(table_list)
validate_table_definition = True
min_rng = 99999999
max_rng = 0
with_line_numbers = 0
without_line_numbers = 0
number_of_rows = 0
table_line_type = 1 # Set table type to 1, meaning with line numbers on rows
for i in table_iter:
if i.count('::') == 1:
with_line_numbers += 1
elif i.count('::') == 0:
without_line_numbers += 1
number_of_rows += 1
if with_line_numbers and without_line_numbers:
error_message = 'Table definition invalid, mixed line numbers on rows'
return 0, 0, False, 0, error_message, 0
if without_line_numbers:
table_line_type = 0
min_rng = 1
max_rng = number_of_rows
table_iter = iter(table_list)
# validate table definition
for line_number, i in enumerate(table_iter):
# check number of ::
if (i.count('::') != 1 and table_line_type == 1) or (i.count('::') != 0 and table_line_type == 0):
validate_table_definition = False
error_message = 'Invalid number of separators on line ' + i + ", line number: " + str(line_number)
break
row_text = i
if table_line_type == 1:
# split line
line_def = i.split('::')
if not bool(re.search(r'^\d+-\d+$', line_def[0])):
if not bool(re.search(r'^\d+$', line_def[0])):
validate_table_definition = False
error_message = 'Invalid number/number range on line ' + i + ", line number: " + str(line_number)
break
if bool(re.search(r'^\d+-\d+$', line_def[0])):
match_values = re.search(r'^(\d+)-(\d+)$', line_def[0])
if int(match_values.group(1)) > int(match_values.group(2)):
validate_table_definition = False
error_message = 'Invalid range, first number greater than second on line ' + i + ", line number: " + str(
line_number)
break
else:
if int(match_values.group(1)) < min_rng:
min_rng = int(match_values.group(1))
if int(match_values.group(2)) > max_rng:
max_rng = int(match_values.group(2))
if bool(re.search(r'^\d+$', line_def[0])):
match_values = re.search(r'^(\d+)$', line_def[0])
if int(match_values.group(1)) < min_rng:
min_rng = int(match_values.group(1))
if int(match_values.group(1)) > max_rng:
max_rng = int(match_values.group(1))
row_text = line_def[1]
if validate_table_definition:
validate_table_definition, error_message = validate_text(row_text, table.id)
error_message += ", line number: " + str(line_number + 1)
return max_rng, min_rng, validate_table_definition, table_line_type, error_message, number_of_rows
def validate_text(definition, id):
error_message = ''
validate_definition = True
if definition.find("macro." + id) >= 0:
validate_definition = False
error_message = "Macro referencing self"
elif definition.count('<<') != definition.count('>>'):
validate_definition = False
error_message += 'External reference is malformed in macro'
elif definition.count('<<'):
open_angle_brackets = definition.find("<<")
while open_angle_brackets >= 0:
close_angle_brackets = definition.find(">>", open_angle_brackets)
external_id = definition[open_angle_brackets + 2:close_angle_brackets]
external_data = None
username, id_type, reference_id = split_id(external_id)
if id_type == 'table':
external_data = get_random_table_record(username, reference_id)
elif id_type == 'macro':
external_data = get_macro_record(username, reference_id)
if external_data is None:
error_message += '\nExternal reference <<' + external_id + '>> not found'
return False, error_message
open_angle_brackets = definition.find("<<", close_angle_brackets)
if definition.count('((') != definition.count('))'):
validate_definition = False
error_message += '\nRandom Number range is malformed'
if definition.count('((') > 0:
open_brackets = definition.find("((")
while open_brackets >= 0:
close_brackets = definition.find("))", open_brackets)
generator = definition[open_brackets + 2:close_brackets]
# check type of generator
if not bool(re.search(r'^\d+d\d+$', generator, re.IGNORECASE)): # e.g 1d6
if not bool(re.search(r'^\d+d\d+x\d+$', generator, re.IGNORECASE)): # e.g. 1d6x10
if not bool(re.search(r'^\d+d\d+\+\d+$', generator, re.IGNORECASE)): # e.g. 2d4+2
if not bool(re.search(r'^\d+d\d+\-\d+$', generator, re.IGNORECASE)): # e.g. 4d4-1
if not bool(re.search(r'^\d+-\d+$', generator, re.IGNORECASE)): # e.g. 1-100
if not bool(re.search(r'^\d+-\d+x\d+$', generator, re.IGNORECASE)): # e.g. 1-100x10
if not bool(re.search(r'^\d+d\d+x<<table\..*?>>$', generator,
re.IGNORECASE)): # e.g. 1d6x<<table.magic-item-table-a>>
if not bool(re.search(r'^\d+-\d+x<<table\..*?>>$', generator,
re.IGNORECASE)): # e.g. 1-10x<<table.magic-item-table-a>>
if not bool(re.search(r'^\d+x<<table\..*?>>$', generator,
re.IGNORECASE)): # e.g. 3x<<table.magic-item-table-a>>
if bool(re.search(r'(\d+d\d+|\d+|[\+|\-|x])', generator, re.IGNORECASE)):
components = re.finditer(r'(\d+d\d+|\d+|[\+|\-|x])', generator,
re.IGNORECASE)
valid_generator = True
expect_value = True
operand = 1
for component in components:
if expect_value:
expect_value = False
if re.search(r'd', component.group(1), re.IGNORECASE):
# dice notation
if not bool(
re.search(r'^(\d+)d(\d+)', component.group(1),
re.IGNORECASE)) and not bool(
re.search(r'\d+', component.group(1),
re.IGNORECASE)):
valid_generator = False
break
else:
expect_value = True
if component.group(1) != '+' and component.group(1) != '-' and component.group(1) != 'x':
valid_generator = False
break
if not valid_generator:
error_message += '\nRandom number in ((' + generator + ')) not recognised'
validate_definition = False
open_brackets = definition.find("((", close_brackets)
return validate_definition, error_message
def validate_collection(items):
error_message = ''
validate = True
for item in items.splitlines():
external_data = None
checked = False
user_id = 0
if hasattr(g, 'current_user'):
user_id = g.current_user.id
else:
user_id = current_user.id
if item.startswith('table.'):
checked = True
external_data = RandomTable.query.get([item[6:], user_id])
elif item.startswith('macro.'):
checked = True
external_data = Macros.query.get([item[6:], user_id])
if external_data is None and checked:
error_message += '\nExternal reference ' + item + ' not recognised.'
validate = False
return validate, error_message
|
<filename>pigeon/annotate.py
import functools
import json
import random
from IPython.display import clear_output, display
from ipywidgets import HTML, Button, Dropdown, FloatSlider, HBox, IntSlider, Output, Textarea
def annotate(examples, options=None, shuffle=False, include_skip=True, write_to_file=None, display_fn=display):
"""
Build an interactive widget for annotating a list of input examples.
Parameters
----------
examples: list(any), list of items to annotate
options: list(any) or tuple(start, end, [step]) or None
if list: list of labels for binary classification task (Dropdown or Buttons)
if tuple: range for regression task (IntSlider or FloatSlider)
if None: arbitrary text input (TextArea)
shuffle: bool, shuffle the examples before annotating
include_skip: bool, include option to skip example while annotating
write_to_file: str, write to file after each annotation
display_fn: func, function for displaying an example to the user
Returns
-------
annotations : list of tuples, list of annotated examples (example, label)
"""
examples = list(examples)
if shuffle:
random.shuffle(examples)
annotations = []
current_index = -1
def set_label_text():
nonlocal count_label
count_label.value = "{} examples annotated, {} examples left".format(
len(annotations), len(examples) - current_index
)
def show_next():
nonlocal current_index
current_index += 1
set_label_text()
if current_index >= len(examples):
for btn in buttons:
btn.disabled = True
print("Annotation done.")
return
with out:
clear_output(wait=True)
display_fn(examples[current_index])
def add_annotation(annotation):
annotations.append((examples[current_index], annotation))
if write_to_file is not None:
summerized_annotations = {}
for annon in annotations:
summerized_annotations[annon[0][0]] = annon[-1]
with open(write_to_file, "w") as f:
json.dump(summerized_annotations, f)
show_next()
def skip(btn):
show_next()
count_label = HTML()
set_label_text()
display(count_label)
if type(options) == list:
task_type = "classification"
elif type(options) == tuple and len(options) in [2, 3]:
task_type = "regression"
elif options is None:
task_type = "captioning"
else:
raise Exception("Invalid options")
buttons = []
if task_type == "classification":
use_dropdown = len(options) > 5
if use_dropdown:
dd = Dropdown(options=options)
display(dd)
btn = Button(description="submit")
def on_click(btn):
add_annotation(dd.value)
btn.on_click(on_click)
buttons.append(btn)
else:
for label in options:
btn = Button(description=label)
def on_click(label, btn):
add_annotation(label)
btn.on_click(functools.partial(on_click, label))
buttons.append(btn)
elif task_type == "regression":
target_type = type(options[0])
if target_type == int:
cls = IntSlider
else:
cls = FloatSlider
if len(options) == 2:
min_val, max_val = options
slider = cls(min=min_val, max=max_val)
else:
min_val, max_val, step_val = options
slider = cls(min=min_val, max=max_val, step=step_val)
display(slider)
btn = Button(description="submit")
def on_click(btn):
add_annotation(slider.value)
btn.on_click(on_click)
buttons.append(btn)
else:
ta = Textarea()
display(ta)
btn = Button(description="submit")
def on_click(btn):
add_annotation(ta.value)
btn.on_click(on_click)
buttons.append(btn)
if include_skip:
btn = Button(description="skip")
btn.on_click(skip)
buttons.append(btn)
box = HBox(buttons)
display(box)
out = Output()
display(out)
show_next()
return annotations
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for analyzing GDM code at runtime (reflection)."""
import importlib
import inspect
import os
import types
from typing import Any, Callable, Collection, Dict, List, Optional, Set, Type
from gazoo_device import config
from gazoo_device import gdm_logger
logger = gdm_logger.get_logger()
_PACKAGE_DIR = "gazoo_device/"
def get_all_subclasses_in_module(
parent_class: Type[Any],
module: types.ModuleType,
exclude_private: bool = True,
exclude_abstract: bool = False) -> List[Type[Any]]:
"""Returns all classes derived from parent_class in the module.
Args:
parent_class: Class object from which the subclasses must be derived.
module: Module to scan for subclasses.
exclude_private: If True, do not include private classes.
exclude_abstract: If True, do not include abstract classes.
Returns:
Subclasses of parent_class defined in the module.
"""
subclasses = []
for name, member in inspect.getmembers(module):
if (inspect.isclass(member)
and (not exclude_abstract or not inspect.isabstract(member))
and (not exclude_private or not name.startswith("_"))
and issubclass(member, parent_class)):
subclasses.append(member)
return subclasses
def _get_module_path(module: types.ModuleType) -> str:
"""Returns the module path."""
return module.__file__
def get_all_subclasses_in_package(
parent_class: Type[Any],
package: types.ModuleType,
excluded_modules: Optional[Collection[types.ModuleType]] = None,
exclude_private: bool = True,
exclude_abstract: bool = False,
module_path_getter: Callable[[types.ModuleType], str] = _get_module_path
) -> Dict[types.ModuleType, Set[Type[Any]]]:
"""Returns classes derived from parent_class in modules within the package.
Does not recurse into subpackages (subdirectories are not scanned).
Args:
parent_class: Class object from which the subclasses must be derived.
package: __init__.py module of the package to scan.
excluded_modules: Modules to exclude from the scan.
exclude_private: If True, do not include private classes.
exclude_abstract: If True, do not include abstract classes.
module_path_getter: Function to retrieve the file path of a module.
Returns:
Mapping from module to set of classes derived from parent_class defined in
that module.
"""
if excluded_modules is None:
excluded_modules = set()
modules = [
module for module in _list_package_modules(package, module_path_getter)
if module not in excluded_modules
]
module_to_classes = {}
for module in modules:
subclasses = get_all_subclasses_in_module(parent_class=parent_class,
module=module,
exclude_private=exclude_private,
exclude_abstract=exclude_abstract)
if subclasses:
module_to_classes[module] = set(subclasses)
return module_to_classes
def _list_package_modules(
package: types.ModuleType,
module_path_getter: Callable[[types.ModuleType], str] = _get_module_path
) -> List[types.ModuleType]:
"""Returns a list of all modules defined in the package.
Subpackages (subdirectories) are not scanned.
Args:
package: __init__.py module of the package to scan. For example,
gazoo_device.capabilities.
module_path_getter: Function to retrieve the file path of a module.
"""
suffix = ".py"
suffix_len = len(suffix)
package_path = module_path_getter(package)
package_dir = os.path.dirname(package_path)
files_in_package = os.listdir(package_dir)
module_names = [
a_file[:-suffix_len]
for a_file in files_in_package
if a_file.endswith(suffix) and a_file != "__init__.py"
]
modules = []
for module_name in module_names:
modules.append(
importlib.import_module(
".{}".format(module_name), package=package.__name__))
return modules
|
# -*- coding: utf-8 -*-
import numpy as np
import multiprocessing
import torch
from torch import nn, Tensor
from ctp.kernels import GaussianKernel
from ctp.clutrr.models import BatchNeuralKB, BatchHoppy, BatchUnary, BatchMulti
from ctp.reformulators import SymbolicReformulator
from typing import List, Dict, Tuple, Optional
import pytest
torch.set_num_threads(multiprocessing.cpu_count())
def encode_relation(facts: List[Tuple[str, str, str]],
relation_embeddings: nn.Embedding,
relation_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tensor:
indices_np = np.array([relation_to_idx[r] for _, r, _ in facts], dtype=np.int64)
indices = torch.from_numpy(indices_np)
if device is not None:
indices = indices.to(device)
return relation_embeddings(indices)
def encode_arguments(facts: List[Tuple[str, str, str]],
entity_embeddings: nn.Embedding,
entity_to_idx: Dict[str, int],
device: Optional[torch.device] = None) -> Tuple[Tensor, Tensor]:
indices_np = np.array([[entity_to_idx[s], entity_to_idx[o]] for s, _, o in facts], dtype=np.int64)
indices = torch.from_numpy(indices_np)
if device is not None:
indices = indices.to(device)
emb = entity_embeddings(indices)
return emb[:, 0, :], emb[:, 1, :]
@pytest.mark.light
def test_adv_v1():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('b', 'q', 'c'),
('c', 'p', 'd'),
('d', 'q', 'e'),
('e', 'p', 'f'),
('f', 'q', 'g'),
('g', 'p', 'h'),
('h', 'q', 'i'),
('i', 'p', 'l'),
('l', 'q', 'm'),
('m', 'p', 'n'),
('n', 'q', 'o'),
('o', 'p', 'p'),
('p', 'q', 'q'),
('q', 'p', 'r'),
('r', 'q', 's'),
('s', 'p', 't'),
('t', 'q', 'u'),
('u', 'p', 'v'),
('v', 'q', 'w'),
('x', 'r', 'y'),
('x', 's', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 12
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy(np.array([predicate_to_index['p'], predicate_to_index['q']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
hoppy0 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=0)
hoppy1 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=1)
hoppy2 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=2)
hoppy3 = BatchHoppy(model, hops_lst=[(reformulator, False)], depth=3)
xs_np = rs.randint(nb_entities, size=batch_size)
xp_np = rs.randint(nb_predicates, size=batch_size)
xo_np = rs.randint(nb_entities, size=batch_size)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['c']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['e']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['g']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['i']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['m']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['o']
xs_np[6] = entity_to_index['a']
xp_np[6] = predicate_to_index['r']
xo_np[6] = entity_to_index['q']
xs_np[7] = entity_to_index['a']
xp_np[7] = predicate_to_index['r']
xo_np[7] = entity_to_index['s']
xs_np[8] = entity_to_index['a']
xp_np[8] = predicate_to_index['r']
xo_np[8] = entity_to_index['u']
xs = torch.from_numpy(xs_np)
xp = torch.from_numpy(xp_np)
xo = torch.from_numpy(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
inf0 = hoppy0.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf1 = hoppy1.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf2 = hoppy2.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf3 = hoppy3.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf0_np = inf0.cpu().numpy()
inf1_np = inf1.cpu().numpy()
inf2_np = inf2.cpu().numpy()
inf3_np = inf3.cpu().numpy()
np.testing.assert_allclose(inf0_np, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf1_np, [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf2_np, [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
np.testing.assert_allclose(inf3_np, [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], rtol=1e-1, atol=1e-1)
print(inf3_np)
@pytest.mark.light
def test_adv_v2():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('a', 'p', 'd'),
('c', 'p', 'd'),
('e', 'q', 'f'),
('f', 'p', 'c'),
('x', 'r', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 6
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy(np.array([predicate_to_index['p']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
unary = BatchUnary(model, hops_lst=[(reformulator, False)])
xs_np = rs.randint(nb_entities, size=batch_size)
xp_np = rs.randint(nb_predicates, size=batch_size)
xo_np = rs.randint(nb_entities, size=batch_size)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['a']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['b']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['c']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['d']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['e']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['f']
xs = torch.from_numpy(xs_np)
xp = torch.from_numpy(xp_np)
xo = torch.from_numpy(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
inf = unary.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf_np = inf.cpu().numpy()
print(inf_np)
np.testing.assert_allclose(inf_np, [1] * batch_size, rtol=1e-2, atol=1e-2)
@pytest.mark.light
def test_adv_v3():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('a', 'p', 'd'),
('c', 'p', 'd'),
('e', 'q', 'f'),
('f', 'p', 'c'),
('x', 'r', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 6
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy(np.array([predicate_to_index['p']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
unary = BatchUnary(model, hops_lst=[(reformulator, True)])
xs_np = rs.randint(nb_entities, size=batch_size)
xp_np = rs.randint(nb_predicates, size=batch_size)
xo_np = rs.randint(nb_entities, size=batch_size)
xs_np[0] = entity_to_index['a']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['a']
xs_np[1] = entity_to_index['a']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['b']
xs_np[2] = entity_to_index['a']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['c']
xs_np[3] = entity_to_index['a']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['d']
xs_np[4] = entity_to_index['a']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['e']
xs_np[5] = entity_to_index['a']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['f']
xs = torch.from_numpy(xs_np)
xp = torch.from_numpy(xp_np)
xo = torch.from_numpy(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
inf = unary.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf_np = inf.cpu().numpy()
print(inf_np)
np.testing.assert_allclose(inf_np, [0] * batch_size, rtol=1e-2, atol=1e-2)
@pytest.mark.light
def test_adv_v4():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('a', 'p', 'd'),
('c', 'p', 'd'),
('e', 'q', 'f'),
('f', 'p', 'c'),
('x', 'r', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 6
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy(np.array([predicate_to_index['p']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
unary = BatchUnary(model, hops_lst=[(reformulator, False)])
xs_np = rs.randint(nb_entities, size=batch_size)
xp_np = rs.randint(nb_predicates, size=batch_size)
xo_np = rs.randint(nb_entities, size=batch_size)
xs_np[0] = entity_to_index['b']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['a']
xs_np[1] = entity_to_index['b']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['b']
xs_np[2] = entity_to_index['b']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['c']
xs_np[3] = entity_to_index['b']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['d']
xs_np[4] = entity_to_index['b']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['e']
xs_np[5] = entity_to_index['b']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['f']
xs = torch.from_numpy(xs_np)
xp = torch.from_numpy(xp_np)
xo = torch.from_numpy(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
inf = unary.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf_np = inf.cpu().numpy()
print(inf_np)
np.testing.assert_allclose(inf_np, [0] * batch_size, rtol=1e-2, atol=1e-2)
@pytest.mark.light
def test_adv_v5():
embedding_size = 20
torch.manual_seed(0)
rs = np.random.RandomState(0)
triples = [
('a', 'p', 'b'),
('a', 'p', 'd'),
('c', 'p', 'd'),
('e', 'q', 'f'),
('f', 'p', 'c'),
('x', 'r', 'y')
]
entity_lst = sorted({e for (e, _, _) in triples} | {e for (_, _, e) in triples})
predicate_lst = sorted({p for (_, p, _) in triples})
nb_entities = len(entity_lst)
nb_predicates = len(predicate_lst)
entity_to_index = {e: i for i, e in enumerate(entity_lst)}
predicate_to_index = {p: i for i, p in enumerate(predicate_lst)}
with torch.no_grad():
kernel = GaussianKernel()
entity_embeddings = nn.Embedding(nb_entities, embedding_size * 2, sparse=True)
predicate_embeddings = nn.Embedding(nb_predicates, embedding_size * 2, sparse=True)
rel_emb = encode_relation(facts=triples,
relation_embeddings=predicate_embeddings,
relation_to_idx=predicate_to_index)
arg1_emb, arg2_emb = encode_arguments(facts=triples,
entity_embeddings=entity_embeddings,
entity_to_idx=entity_to_index)
batch_size = 6
fact_size = rel_emb.shape[0]
entity_size = entity_embeddings.weight.shape[0]
rel_emb = rel_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg1_emb = arg1_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
arg2_emb = arg2_emb.view(1, fact_size, -1).repeat(batch_size, 1, 1)
nb_facts = torch.tensor([fact_size for _ in range(batch_size)], dtype=torch.long)
emb = entity_embeddings.weight.view(1, entity_size, -1).repeat(batch_size, 1, 1)
_nb_entities = torch.tensor([entity_size for _ in range(batch_size)], dtype=torch.long)
facts = [rel_emb, arg1_emb, arg2_emb]
model = BatchNeuralKB(kernel=kernel)
indices = torch.from_numpy(np.array([predicate_to_index['p']]))
reformulator = SymbolicReformulator(predicate_embeddings, indices)
unary = BatchUnary(model, hops_lst=[(reformulator, True)])
xs_np = rs.randint(nb_entities, size=batch_size)
xp_np = rs.randint(nb_predicates, size=batch_size)
xo_np = rs.randint(nb_entities, size=batch_size)
xs_np[0] = entity_to_index['b']
xp_np[0] = predicate_to_index['r']
xo_np[0] = entity_to_index['a']
xs_np[1] = entity_to_index['b']
xp_np[1] = predicate_to_index['r']
xo_np[1] = entity_to_index['b']
xs_np[2] = entity_to_index['b']
xp_np[2] = predicate_to_index['r']
xo_np[2] = entity_to_index['c']
xs_np[3] = entity_to_index['b']
xp_np[3] = predicate_to_index['r']
xo_np[3] = entity_to_index['d']
xs_np[4] = entity_to_index['b']
xp_np[4] = predicate_to_index['r']
xo_np[4] = entity_to_index['e']
xs_np[5] = entity_to_index['b']
xp_np[5] = predicate_to_index['r']
xo_np[5] = entity_to_index['f']
xs = torch.from_numpy(xs_np)
xp = torch.from_numpy(xp_np)
xo = torch.from_numpy(xo_np)
xs_emb = entity_embeddings(xs)
xp_emb = predicate_embeddings(xp)
xo_emb = entity_embeddings(xo)
inf = unary.score(xp_emb, xs_emb, xo_emb, facts=facts, nb_facts=nb_facts,
entity_embeddings=emb, nb_entities=_nb_entities)
inf_np = inf.cpu().numpy()
print(inf_np)
np.testing.assert_allclose(inf_np, [1] * batch_size, rtol=1e-2, atol=1e-2)
if __name__ == '__main__':
pytest.main([__file__])
# test_adv_v2()
# test_adv_v3()
# test_adv_v4()
# test_adv_v5()
|
"""
Copyright 2013 <NAME>
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms import *
from cvxpy.expressions.variables import Variable
from cvxpy.expressions.constants import Parameter
import cvxpy.utilities as u
import cvxpy.interface.matrix_utilities as intf
import numpy as np
import unittest
class TestAtoms(unittest.TestCase):
""" Unit tests for the atoms module. """
def setUp(self):
self.a = Variable(name='a')
self.x = Variable(2, name='x')
self.y = Variable(2, name='y')
self.A = Variable(2,2,name='A')
self.B = Variable(2,2,name='B')
self.C = Variable(3,2,name='C')
# Test the norm wrapper.
def test_norm(self):
with self.assertRaises(Exception) as cm:
norm(self.C, 3)
self.assertEqual(str(cm.exception),
"Invalid value 3 for p.")
# Test the normInf class.
def test_normInf(self):
exp = self.x+self.y
atom = normInf(exp)
# self.assertEquals(atom.name(), "normInf(x + y)")
self.assertEquals(atom.size, (1,1))
self.assertEquals(atom.curvature, u.Curvature.CONVEX_KEY)
assert atom.is_convex()
assert (-atom).is_concave()
self.assertEquals(normInf(atom).curvature, u.Curvature.CONVEX_KEY)
self.assertEquals(normInf(-atom).curvature, u.Curvature.CONVEX_KEY)
# Test the norm1 class.
def test_norm1(self):
exp = self.x+self.y
atom = norm1(exp)
# self.assertEquals(atom.name(), "norm1(x + y)")
self.assertEquals(atom.size, (1,1))
self.assertEquals(atom.curvature, u.Curvature.CONVEX_KEY)
self.assertEquals(norm1(atom).curvature, u.Curvature.CONVEX_KEY)
self.assertEquals(norm1(-atom).curvature, u.Curvature.CONVEX_KEY)
# Test the norm2 class.
def test_norm2(self):
exp = self.x+self.y
atom = norm2(exp)
# self.assertEquals(atom.name(), "norm2(x + y)")
self.assertEquals(atom.size, (1,1))
self.assertEquals(atom.curvature, u.Curvature.CONVEX_KEY)
self.assertEquals(norm2(atom).curvature, u.Curvature.CONVEX_KEY)
self.assertEquals(norm2(-atom).curvature, u.Curvature.CONVEX_KEY)
# Test the geo_mean class.
def test_geo_mean(self):
exp = self.x+self.y
atom = geo_mean(exp, self.x)
# self.assertEquals(atom.name(), "norm2(x + y)")
self.assertEquals(atom.size, (2,1))
self.assertEquals(atom.curvature, u.Curvature.CONCAVE_KEY)
self.assertEquals(atom.sign, u.Sign.POSITIVE_KEY)
def test_quad_over_lin(self):
# Test quad_over_lin DCP.
atom = quad_over_lin(square(self.x), self.a)
self.assertEquals(atom.curvature, u.Curvature.CONVEX_KEY)
atom = quad_over_lin(-square(self.x), self.a)
self.assertEquals(atom.curvature, u.Curvature.CONVEX_KEY)
atom = quad_over_lin(sqrt(self.x), self.a)
self.assertEquals(atom.curvature, u.Curvature.UNKNOWN_KEY)
assert not atom.is_dcp()
# Test quad_over_lin size validation.
with self.assertRaises(Exception) as cm:
quad_over_lin(self.x, self.x)
self.assertEqual(str(cm.exception),
"The second argument to quad_over_lin must be a scalar.")
def test_elemwise_arg_count(self):
"""Test arg count for max and min variants.
"""
with self.assertRaises(Exception) as cm:
max_elemwise(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
with self.assertRaises(Exception) as cm:
min_elemwise(1)
self.assertTrue(str(cm.exception) in (
"__init__() takes at least 3 arguments (2 given)",
"__init__() missing 1 required positional argument: 'arg2'"))
def test_matrix_frac(self):
"""Test for the matrix_frac atom.
"""
atom = matrix_frac(self.x, self.A)
self.assertEquals(atom.size, (1,1))
self.assertEquals(atom.curvature, u.Curvature.CONVEX_KEY)
# Test matrix_frac size validation.
with self.assertRaises(Exception) as cm:
matrix_frac(self.x, self.C)
self.assertEqual(str(cm.exception),
"The second argument to matrix_frac must be a square matrix.")
with self.assertRaises(Exception) as cm:
matrix_frac(self.A, self.A)
self.assertEqual(str(cm.exception),
"The first argument to matrix_frac must be a column vector.")
with self.assertRaises(Exception) as cm:
matrix_frac(Variable(3), self.A)
self.assertEqual(str(cm.exception),
"The arguments to matrix_frac have incompatible dimensions.")
def test_max_entries_sign(self):
"""Test sign for max_entries.
"""
# One arg.
self.assertEquals(max_entries(1).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(max_entries(-2).sign, u.Sign.NEGATIVE_KEY)
self.assertEquals(max_entries(Variable()).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(max_entries(0).sign, u.Sign.ZERO_KEY)
def test_min_entries_sign(self):
"""Test sign for min_entries.
"""
# One arg.
self.assertEquals(min_entries(1).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(min_entries(-2).sign, u.Sign.NEGATIVE_KEY)
self.assertEquals(min_entries(Variable()).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(min_entries(0).sign, u.Sign.ZERO_KEY)
# Test sign logic for max_elemwise.
def test_max_elemwise_sign(self):
# Two args.
self.assertEquals(max_elemwise(1, 2).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(max_elemwise(1, Variable()).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(max_elemwise(1, -2).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(max_elemwise(1, 0).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(max_elemwise(Variable(), 0).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(max_elemwise(Variable(), Variable()).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(max_elemwise(Variable(), -2).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(max_elemwise(0, 0).sign, u.Sign.ZERO_KEY)
self.assertEquals(max_elemwise(0, -2).sign, u.Sign.ZERO_KEY)
self.assertEquals(max_elemwise(-3, -2).sign, u.Sign.NEGATIVE_KEY)
# Many args.
self.assertEquals(max_elemwise(-2, Variable(), 0, -1, Variable(), 1).sign,
u.Sign.POSITIVE_KEY)
# Promotion.
self.assertEquals(max_elemwise(1, Variable(2)).sign,
u.Sign.POSITIVE_KEY)
self.assertEquals(max_elemwise(1, Variable(2)).size,
(2, 1))
# Test sign logic for min_elemwise.
def test_min_elemwise_sign(self):
# Two args.
self.assertEquals(min_elemwise(1, 2).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(min_elemwise(1, Variable()).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(min_elemwise(1, -2).sign, u.Sign.NEGATIVE_KEY)
self.assertEquals(min_elemwise(1, 0).sign, u.Sign.ZERO_KEY)
self.assertEquals(min_elemwise(Variable(), 0).sign, u.Sign.NEGATIVE_KEY)
self.assertEquals(min_elemwise(Variable(), Variable()).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(min_elemwise(Variable(), -2).sign, u.Sign.NEGATIVE_KEY)
self.assertEquals(min_elemwise(0, 0).sign, u.Sign.ZERO_KEY)
self.assertEquals(min_elemwise(0, -2).sign, u.Sign.NEGATIVE_KEY)
self.assertEquals(min_elemwise(-3, -2).sign, u.Sign.NEGATIVE_KEY)
# Many args.
self.assertEquals(min_elemwise(-2, Variable(), 0, -1, Variable(), 1).sign,
u.Sign.NEGATIVE_KEY)
# Promotion.
self.assertEquals(min_elemwise(-1, Variable(2)).sign,
u.Sign.NEGATIVE_KEY)
self.assertEquals(min_elemwise(-1, Variable(2)).size,
(2, 1))
def test_sum_entries(self):
"""Test the sum_entries atom.
"""
self.assertEquals(sum_entries(1).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(sum_entries([1, -1]).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(sum_entries([1, -1]).curvature, u.Curvature.CONSTANT_KEY)
self.assertEquals(sum_entries(Variable(2)).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(sum_entries(Variable(2)).size, (1, 1))
self.assertEquals(sum_entries(Variable(2)).curvature, u.Curvature.AFFINE_KEY)
# Mixed curvature.
mat = np.mat("1 -1")
self.assertEquals(sum_entries(mat*square(Variable(2))).curvature, u.Curvature.UNKNOWN_KEY)
def test_mul_elemwise(self):
"""Test the mul_elemwise atom.
"""
self.assertEquals(mul_elemwise([1, -1], self.x).sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(mul_elemwise([1, -1], self.x).curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(mul_elemwise([1, -1], self.x).size, (2, 1))
pos_param = Parameter(2, sign="positive")
neg_param = Parameter(2, sign="negative")
self.assertEquals(mul_elemwise(pos_param, pos_param).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(mul_elemwise(pos_param, neg_param).sign, u.Sign.NEGATIVE_KEY)
self.assertEquals(mul_elemwise(neg_param, neg_param).sign, u.Sign.POSITIVE_KEY)
self.assertEquals(mul_elemwise(neg_param, square(self.x)).curvature, u.Curvature.CONCAVE_KEY)
# Test promotion.
self.assertEquals(mul_elemwise([1, -1], 1).size, (2, 1))
self.assertEquals(mul_elemwise(1, self.C).size, self.C.size)
with self.assertRaises(Exception) as cm:
mul_elemwise(self.x, [1, -1])
self.assertEqual(str(cm.exception),
"The first argument to mul_elemwise must be constant.")
# Test the vstack class.
def test_vstack(self):
atom = vstack(self.x, self.y, self.x)
self.assertEquals(atom.name(), "vstack(x, y, x)")
self.assertEquals(atom.size, (6,1))
atom = vstack(self.A, self.C, self.B)
self.assertEquals(atom.name(), "vstack(A, C, B)")
self.assertEquals(atom.size, (7,2))
entries = []
for i in range(self.x.size[0]):
for j in range(self.x.size[1]):
entries.append(self.x[i, j])
atom = vstack(*entries)
# self.assertEqual(atom[1,0].name(), "vstack(x[0,0], x[1,0])[1,0]")
with self.assertRaises(Exception) as cm:
vstack(self.C, 1)
self.assertEqual(str(cm.exception),
"All arguments to vstack must have the same number of columns.")
with self.assertRaises(Exception) as cm:
vstack()
self.assertEqual(str(cm.exception),
"No arguments given to vstack.")
def test_reshape(self):
"""Test the reshape class.
"""
expr = reshape(self.A, 4, 1)
self.assertEquals(expr.sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(expr.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(expr.size, (4, 1))
expr = reshape(expr, 2, 2)
self.assertEquals(expr.size, (2, 2))
expr = reshape(square(self.x), 1, 2)
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertEquals(expr.curvature, u.Curvature.CONVEX_KEY)
self.assertEquals(expr.size, (1, 2))
with self.assertRaises(Exception) as cm:
reshape(self.C, 5, 4)
self.assertEqual(str(cm.exception),
"Invalid reshape dimensions (5, 4).")
def test_vec(self):
"""Test the vec atom.
"""
expr = vec(self.C)
self.assertEquals(expr.sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(expr.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(expr.size, (6, 1))
expr = vec(self.x)
self.assertEquals(expr.size, (2, 1))
expr = vec(square(self.a))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertEquals(expr.curvature, u.Curvature.CONVEX_KEY)
self.assertEquals(expr.size, (1, 1))
def test_diag(self):
"""Test the diag atom.
"""
expr = diag(self.x)
self.assertEquals(expr.sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(expr.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(expr.size, (2, 2))
expr = diag(self.A)
self.assertEquals(expr.sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(expr.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(expr.size, (2, 1))
with self.assertRaises(Exception) as cm:
diag(self.C)
self.assertEqual(str(cm.exception),
"Argument to diag must be a vector or square matrix.")
def test_trace(self):
"""Test the trace atom.
"""
expr = trace(self.A)
self.assertEquals(expr.sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(expr.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(expr.size, (1, 1))
with self.assertRaises(Exception) as cm:
trace(self.C)
self.assertEqual(str(cm.exception),
"Argument to trace must be a square matrix.")
def test_log1p(self):
"""Test the log1p atom.
"""
expr = log1p(1)
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertEquals(expr.curvature, u.Curvature.CONSTANT_KEY)
self.assertEquals(expr.size, (1, 1))
expr = log1p(-0.5)
self.assertEquals(expr.sign, u.Sign.NEGATIVE_KEY)
def test_upper_tri(self):
with self.assertRaises(Exception) as cm:
upper_tri(self.C)
self.assertEqual(str(cm.exception),
"Argument to upper_tri must be a square matrix.")
def test_huber(self):
# Valid.
huber(self.x, 1)
with self.assertRaises(Exception) as cm:
huber(self.x, -1)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
with self.assertRaises(Exception) as cm:
huber(self.x, [1,1])
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
# M parameter.
M = Parameter(sign="positive")
# Valid.
huber(self.x, M)
M.value = 1
self.assertAlmostEquals(huber(2, M).value, 3)
# Invalid.
M = Parameter(sign="negative")
with self.assertRaises(Exception) as cm:
huber(self.x, M)
self.assertEqual(str(cm.exception),
"M must be a non-negative scalar constant.")
def test_sum_largest(self):
"""Test the sum_largest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
sum_largest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
lambda_sum_largest(self.x, 2.4)
self.assertEqual(str(cm.exception),
"First argument must be a square matrix.")
with self.assertRaises(Exception) as cm:
lambda_sum_largest(Variable(2, 2), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
def test_sum_smallest(self):
"""Test the sum_smallest atom and related atoms.
"""
with self.assertRaises(Exception) as cm:
sum_smallest(self.x, -1)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
with self.assertRaises(Exception) as cm:
lambda_sum_smallest(Variable(2,2), 2.4)
self.assertEqual(str(cm.exception),
"Second argument must be a positive integer.")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.routing
import torn.plugins.app
import torn.api
from torn.exception import TornErrorHandler, TornNotFoundError, TornUrlNameNotFound
import tornado.web
import torn.plugins
import re
import os
class Route:
def __init__(self, uri: str, controller: torn.api.Controller):
self.uri = uri
self.controller = controller
self.uri_vars = torn.plugins.app.get_uri_variables(uri)
self.params = {}
self.default_vals = {}
self.__name = None
def __make_params(self, params):
DEFAULT_REGEX = '[a-zA-Z0-9]+';
for var in self.uri_vars:
if var not in params:
params[var] = DEFAULT_REGEX
return params
def args(self, params):
self.params = self.__make_params(params)
return self
def defaults(self, value):
self.default_vals = value
return self
def name(self, value):
self.__name = value
return self
def get_name(self):
return self.__name
def get_controller(self):
return self.controller
def __get_regex(self, path: str):
uri = torn.plugins.app.uri_creator(self.uri, self.params, self.default_vals)
path = path.strip('/')
if uri == '^$' and path == '':
match = [True]
else:
match = re.findall(uri, path)
return match
def matches(self, request):
match = self.__get_regex(request.path)
return bool(match)
def get_args(self, request):
match = self.__get_regex(request.path)
values = match[0]
uri_vars = self.uri_vars
args = {}
if type(values) != bool:
for i in range(len(uri_vars)):
# if value is blank, check if default exists and pass it instead
uri_var = uri_vars[i]
if type(values) != str:
value = values[i]
else:
value = values
exact_matches = re.findall(self.params[uri_var], value)
if len(exact_matches) > 0:
args[uri_var] = exact_matches[0]
else:
args[uri_var] = self.__get_default_value(uri_var)
return args
def __get_default_value(self, uri_var: str):
return self.default_vals[uri_var]
class RouteCollection:
def __init__(self):
self.routes = []
self.named_routes = {}
def add_route(self, route: Route):
self.routes.append(route)
def match(self, request):
for route in self.routes:
if route.matches(request):
return route
raise TornNotFoundError
def map_names(self):
for route in self.routes:
name = route.get_name()
if route.get_name():
self.named_routes[route.get_name()] = route
def get_route_by_name(self, name: str):
if name in self.named_routes:
return self.named_routes[name]
raise TornUrlNameNotFound
class Routing:
def __init__(self):
self.routes = RouteCollection()
def add(self, uri: str, contoller: torn.api.Controller):
route = Route(uri, contoller)
self.routes.add_route(route)
return route
def getRouteCollection(self):
self.routes.map_names()
return self.routes
class Router(tornado.routing.Router):
def __init__(self, app: tornado.web.Application, route = Routing()):
self.app = app
self.routes = route.getRouteCollection()
def url_for(self, name, kwargs=dict()):
try:
route = self.routes.get_route_by_name(name)
uri = route.uri
for variable in route.uri_vars:
uri = uri.replace("{" + variable + "}", kwargs[variable])
return uri
except Exception as e:
return ""
def find_handler(self, request, **kwargs):
# logging to be done here
try:
if torn.plugins.app.is_static(request.path):
# to serve static files
return self.app.get_handler_delegate(request, tornado.web.StaticFileHandler, target_kwargs=dict(path=os.getcwd() + "/Assets"), path_kwargs=dict(path=request.path.strip('/')))
else:
route = self.routes.match(request)
torn.plugins.log.info(request.method + "\t" + request.path, code=str(200))
return self.app.get_handler_delegate(request, route.get_controller(), path_kwargs=route.get_args(request))
except tornado.web.HTTPError as e:
torn.plugins.log.warning(request.method + "\t" + request.path, code=str(e.status_code))
return self.app.get_handler_delegate(request, TornErrorHandler, target_kwargs=dict(status_code=e.status_code))
|
<gh_stars>10-100
import asyncio
import json
from unittest import TestCase
from unittest.mock import Mock
from test import AsyncMock
from pyhap.accessory import (
Accessories,
Accessory,
)
from pyhap.characteristic import Characteristic
from pyhap.characteristics import (
Brightness,
On,
Hue,
)
from pyhap.service import LightbulbService
from pyhap.util import CustomJSONEncoder
class TestAccessories(TestCase):
def test_add(self):
accessories = Accessories()
accessory = Mock()
self.assertEqual(accessories.accessory_count, 1)
accessories.add(accessory)
self.assertEqual(accessory.accessory_id, 2)
self.assertEqual(accessories.accessories[2], accessory)
self.assertEqual(accessories.accessory_count, 2)
def test_iter(self):
accessories = Accessories()
for a in accessories:
self.assertEqual(a, accessories.accessories[1])
@staticmethod
def test_identify():
callback = AsyncMock()
accessory = Accessory(name='test_name', model='test_model', manufacturer='test_manufacturer',
identify_callback=callback)
asyncio.get_event_loop().run_until_complete(accessory.identify())
callback.assert_called_once()
def test_get_characteristic(self):
accessories = Accessories()
characteristic = accessories.get_characteristic(1, 2)
self.assertIsInstance(characteristic, Characteristic)
def test_read_characteristic(self):
accessories = Accessories()
error, characteristics = accessories.read_characteristic({
'id': '1.2,1.3',
'meta': '1',
'perms': '1',
'type': '1',
'include_ev': '1',
})
self.assertFalse(error)
self.assertEqual(characteristics['characteristics'][0], {
'aid': 1,
'iid': 2,
'value': 'PyHAP',
'perms': ['pr'],
'type': '23',
})
self.assertEqual(characteristics['characteristics'][1], {
'aid': 1,
'iid': 3,
'value': 'PyHAP1,1',
'perms': ['pr'],
'type': '21',
})
def test_read_characteristic_write_only(self):
accessories = Accessories()
error, characteristics = accessories.read_characteristic({'id': '1.7'})
self.assertTrue(error)
self.assertEqual(characteristics['characteristics'][0], {
'aid': 1,
'iid': 7,
'status': -70405,
})
def test_write_characteristic(self):
accessory = Accessory(name='PyHAP', model='PyHAP1,1', manufacturer='PyHAP', hardware_revision='0')
service = LightbulbService()
bool_characteristic = On(False)
int_characteristic = Brightness(8)
float_characteristic = Hue(5.0)
service.add_characteristic(bool_characteristic)
service.add_characteristic(int_characteristic)
service.add_characteristic(float_characteristic)
accessories = Accessories()
accessory.add_service(service)
accessories.add(accessory)
# bool characteristic
callback = AsyncMock()
bool_characteristic.callback = callback
self.assertEqual(bool_characteristic.value, False)
result = asyncio.get_event_loop().run_until_complete(
accessories.write_characteristic([{'aid': 2, 'iid': 10, 'value': True}])
)
callback.assert_called_once_with(True)
self.assertEqual(result, [])
self.assertEqual(bool_characteristic.value, True)
# int characteristic write
callback = AsyncMock()
int_characteristic.callback = callback
self.assertEqual(int_characteristic.value, 8)
result = asyncio.get_event_loop().run_until_complete(
accessories.write_characteristic([{'aid': 2, 'iid': 11, 'value': 12}])
)
callback.assert_called_once_with(12)
self.assertEqual(result, [])
self.assertEqual(int_characteristic.value, 12)
# float characteristic write
callback = AsyncMock()
float_characteristic.callback = callback
self.assertEqual(float_characteristic.value, 5.0)
result = asyncio.get_event_loop().run_until_complete(
accessories.write_characteristic([{'aid': 2, 'iid': 12, 'value': 7.0}])
)
callback.assert_called_once_with(7.0)
self.assertEqual(result, [])
self.assertEqual(float_characteristic.value, 7.0)
# None value during write, leave previous value
previous_value = bool_characteristic.value
result = asyncio.get_event_loop().run_until_complete(
accessories.write_characteristic([{'aid': 2, 'iid': 10}])
)
self.assertEqual(result, [])
self.assertEqual(bool_characteristic.value, previous_value)
bool_characteristic.value = previous_value
# None callback
bool_characteristic.callback = None
result = asyncio.get_event_loop().run_until_complete(
accessories.write_characteristic([{'aid': 2, 'iid': 10, 'value': True}])
)
self.assertEqual(result, [])
# Exception in callback
bool_characteristic.callback = exception_callback
result = asyncio.get_event_loop().run_until_complete(
accessories.write_characteristic([{'aid': 2, 'iid': 10, 'value': True}])
)
self.assertEqual(result, [{
'aid': 2,
'iid': 10,
'status': -70402,
}])
def test_write_characteristic_read_only(self):
accessories = Accessories()
result = asyncio.get_event_loop().run_until_complete(
accessories.write_characteristic([{'aid': 1, 'iid': 2, 'value': 'test_value'}])
)
self.assertEqual(result, [{
'aid': 1,
'iid': 2,
'status': -70404,
}])
# pylint: disable=line-too-long
def test_json(self):
accessories = Accessories()
result = json.loads(json.dumps(accessories.__json__(), cls=CustomJSONEncoder))
self.assertEqual(result, {
'accessories': [{
'aid': 1,
'services': [{
'type': '3E',
'iid': 1,
'characteristics': [
{'type': '23', 'perms': ['pr'], 'format': 'string', 'aid': 1, 'iid': 2, 'value': 'PyHAP', 'maxLen': 64},
{'type': '21', 'perms': ['pr'], 'format': 'string', 'aid': 1, 'iid': 3, 'value': 'PyHAP1,1', 'maxLen': 64},
{'type': '20', 'perms': ['pr'], 'format': 'string', 'aid': 1, 'iid': 4, 'value': 'PyHAP', 'maxLen': 64},
{'type': '30', 'perms': ['pr'], 'format': 'string', 'aid': 1, 'iid': 5, 'value': '3331779EC7A8', 'maxLen': 64},
{'type': '52', 'perms': ['pr'], 'format': 'string', 'aid': 1, 'iid': 6, 'value': '0.0.1', 'maxLen': 64},
{'type': '14', 'perms': ['pw'], 'format': 'bool', 'aid': 1, 'iid': 7}
]
}]
}]
})
async def exception_callback():
raise Exception()
|
<filename>viz3d/opengl/camera_shader.py
from viz3d.opengl.gl_algebra import gl_transpose
from viz3d.opengl.model import PointCloudModel, EllipsesModel, CamerasModel, LinesModel, VoxelsModel, PosesModel
from viz3d.opengl.shader import *
import numpy as np
class CameraAlbedoShader(Shader):
"""
A CameraAlbedoShader is a shader for simple Camera
"""
# ------------------------------------------------------------------------------------------------------------------
# VERTEX SHADER SOURCE
__vertex_shader = """
#version 330 core
layout (location = 0) in vec3 in_position;
layout (location = 1) in vec3 in_color;
layout (location = 3) in mat4 in_model_to_world;
uniform mat4 world_to_cam;
uniform mat4 projection;
out vec3 _color;
void main() {
vec4 homogeneous = vec4(in_position, 1.0);
gl_Position = projection * world_to_cam * in_model_to_world * homogeneous;
_color = in_color;
}
"""
# ------------------------------------------------------------------------------------------------------------------
# FRAGMENT SHADER SOURCE
__fragment_shader = """
#version 330 core
out vec3 fragment_color;
in vec3 _color;
void main() {
fragment_color = _color;
}
"""
# ------------------------------------------------------------------------------------------------------------------
# OVERRIDDEN METHODS
def initialize_uniform_variables(self, **kwargs):
"""
Initializes the uniform variables of the program
"""
pid = self.shader_program.gl_shader_program_id
glUseProgram(pid)
def draw_model(self,
model: Model,
world_to_cam=Optional[np.ndarray],
projection: Optional[np.ndarray] = None,
**kwargs):
assert_debug(world_to_cam is not None)
assert_debug(projection is not None)
pid = self.shader_program.gl_shader_program_id
glUseProgram(pid)
# Vertex locations
glUniformMatrix4fv(self.get_ulocation("world_to_cam"), 1, gl_transpose(), world_to_cam)
glUniformMatrix4fv(self.get_ulocation("projection"), 1, gl_transpose(), projection)
glBindVertexArray(model.vao)
if isinstance(model, PointCloudModel):
# Set Point size
glPointSize(model.model_data.point_size)
glDrawArraysInstanced(GL_POINTS, 0, model.num_points(), model.num_instances())
elif isinstance(model, CamerasModel):
glLineWidth(model.model_data.width)
glDrawElementsInstanced(GL_LINES, model.num_elements(),
GL_UNSIGNED_INT, ctypes.c_void_p(0), model.num_instances())
elif isinstance(model, PosesModel):
glLineWidth(model.model_data.width)
glDrawElementsInstanced(GL_LINES, model.num_elements(), GL_UNSIGNED_INT,
ctypes.c_void_p(0), model.num_instances())
elif isinstance(model, EllipsesModel):
glEnable(GL_LINE_SMOOTH)
glDrawElementsInstanced(GL_TRIANGLES, model.num_elements(),
GL_UNSIGNED_INT, ctypes.c_void_p(0), model.num_instances())
elif isinstance(model, LinesModel) or isinstance(model, VoxelsModel):
glLineWidth(model.model_data.line_width)
glDrawElementsInstanced(GL_LINES, model.num_elements(),
GL_UNSIGNED_INT, ctypes.c_void_p(0), model.num_instances())
else:
raise NotImplementedError("Unrecognized model type")
# Release buffers
glBindVertexArray(0)
glUseProgram(0)
def init_shader_program(self, **kwargs):
assert_debug(not self.initialized)
super().init_shader_program(**kwargs)
def vertex_shader(self):
return self.__vertex_shader
def fragment_shader(self) -> str:
return self.__fragment_shader
|
<reponame>dalbonip/hmp_hunter<gh_stars>0
import os
import re
import pandas as pd
from search_db_for_lib import look_for_lib
from datetime import date, datetime
from pytz import timezone
directory = "clientes"
def make_report():
data_e_hora_atuais = datetime.now()
fuso_horario = timezone('America/Sao_Paulo')
data_e_hora_sao_paulo = data_e_hora_atuais.astimezone(fuso_horario)
data_e_hora_sao_paulo_em_texto = data_e_hora_sao_paulo.strftime('%d/%m/%Y %H:%M')
f = open("templates/report.html", "w")
f.write("")
f.close()
f = open("templates/report.html", "r+")
f.write("<div>")
f.write("{% extends 'base.html' %}")
f.write("{% block head %}")
f.write("{% endblock %}")
f.write("{% block body %}")
f.write('<div class="content">')
f.write('<br><h1>CVEs Report</h1><br>')
f.write('<div><button onClick="window.location.href=`/atualizar`">Atualizar report</button>  <button onClick="window.print()">Print to file</button>  <button onClick="window.location.href=`/upload`">Add cliente</button></a></h3><br><br></div>')
f.write(f'<teste class="normal">atualizado pela última vez em: {data_e_hora_sao_paulo_em_texto} (UTC -3/Brasília)</teste>')
for filename in os.listdir(directory):
if filename[-4:] == ".csv":
f.write("<div>")
csvtoread = directory +"/"+ filename
dataframe = pd.read_csv(csvtoread)
f.write(f"<div><br><br><br><br><br><h2><p> >_ Cliente: {filename[:-4].capitalize()} </p></h2></div>")
# print(dataframe)
for r in range(len(dataframe)):
lib = dataframe.iat[r,0]
ver = dataframe.iat[r,1]
retorno = look_for_lib(lib,ver)
if retorno != 404 and retorno != None :
retorno_final = list(dict.fromkeys(retorno))
#f.write(f"<br><br><div><h4> {filename[:-4]}:</h4></div>")
f.write(f'<br><div><h4>[+] {filename[:-4].capitalize()} <teste class="normal">-</teste> <a>{lib} {ver} </a><teste class="normal">is vulnerable to<teste> <a>{len(retorno_final)} CVE(s)<teste class="normal">!</teste></a><br></h4></div>')
for vuln in retorno_final:
f.write('<cvee class="greeny">')
final = vuln.replace(' : ','</cvee><a> : </a><teste class="normal">')
src_str1 = re.compile(lib, re.IGNORECASE)
src_str2 = re.compile("(( |v)(\d+)\.(\d+)\.?(\d+)\.?(\d+)?\.?(\d+)?( |\.)?)")
final = src_str1.sub(f'<cvee class="greeny"> {lib} </cvee>',final)
final = src_str2.sub(f'<cvee class="greeny"> {ver} </cvee>',final)
f.write(f'<div><p>{final}</teste></p></div>')
f.write("</div>")
else:
f.write("</div>")
f.write("<br>")
f.write("<br><br><br><br><hr>")
else:
continue
f.write("</div>")
f.write("{% endblock %}")
f.close() |
import csv
import os
import logging
from dataactcore.interfaces.db import GlobalDB
from dataactcore.logging import configure_logging
from dataactcore.models.jobModels import FileType
from dataactcore.models.validationModels import FileColumn, FieldType
from dataactvalidator.health_check import create_app
from dataactvalidator.filestreaming.fieldCleaner import FieldCleaner
logger = logging.getLogger(__name__)
class SchemaLoader(object):
"""Load schema from corresponding .csv and insert related validation rules to the db."""
# TODO: add schema to .csv mapping to the db instead of hard-coding here.
fieldFiles = {
"appropriations": "appropFields.csv",
"award": "awardFields.csv",
"award_financial": "awardFinancialFields.csv",
"program_activity": "programActivityFields.csv",
"award_procurement": "awardProcurementFields.csv",
"fabs": "fabsFields.csv",
"executive_compensation": "executiveCompensationFields.csv"}
@staticmethod
def load_fields(file_type_name, schema_file_name):
"""Load specified schema from a .csv."""
with create_app().app_context():
sess = GlobalDB.db().session
# get file type object for specified fileTypeName
file_type = sess.query(FileType).filter(FileType.name == file_type_name).one()
# delete existing schema from database
SchemaLoader.remove_columns_by_file_type(sess, file_type)
# get allowable datatypes
type_query = sess.query(FieldType.name, FieldType.field_type_id).all()
types = {data_type.name: data_type.field_type_id for data_type in type_query}
# add schema to database
with open(schema_file_name, 'rU') as csvfile:
reader = csv.DictReader(csvfile)
file_column_count = 0
for record in reader:
record = FieldCleaner.clean_record(record)
fields = ["fieldname", "required", "data_type"]
if all(field in record for field in fields):
SchemaLoader.add_column_by_file_type(
sess,
types,
file_type,
FieldCleaner.clean_string(record["fieldname"]),
FieldCleaner.clean_string(record["fieldname_short"]),
record["required"],
record["data_type"],
record["padded_flag"],
record["field_length"])
file_column_count += 1
else:
raise ValueError('CSV File does not follow schema')
sess.commit()
logger.info({
'message': '{} {} schema records added to {}'.format(file_column_count, file_type_name,
FileColumn.__tablename__),
'message_type': 'ValidatorInfo',
'file_type': file_type.letter_name
})
@staticmethod
def remove_columns_by_file_type(sess, file_type):
"""Remove the schema for a specified file type."""
deleted_records = sess.query(FileColumn).filter(FileColumn.file == file_type).delete(
synchronize_session='fetch')
logger.info({
'message': '{} {} schema records deleted from {}'.format(deleted_records, file_type.name,
FileColumn.__tablename__),
'message_type': 'ValidatorInfo',
'file_type': file_type.letter_name
})
@staticmethod
def add_column_by_file_type(sess, types, file_type, field_name, field_name_short, required, field_type,
padded_flag="False", field_length=None):
"""
Adds a new column to the schema
Args:
file_type -- FileType object this column belongs to
field_name -- The name of the schema column
types -- List of field types
field_name_short -- The machine-friendly, short column name
required -- marks the column if data is allways required
field_type -- sets the type of data allowed in the column
padded_flag -- True if this column should be padded
field_length -- Maximum allowed length for this field
"""
new_column = FileColumn()
new_column.file = file_type
new_column.required = False
new_column.name = field_name.lower().strip().replace(' ', '_')
new_column.name_short = field_name_short.lower().strip().replace(' ', '_')
field_type = field_type.upper()
# Allow for other names
if field_type == "STR":
field_type = "STRING"
elif field_type == "FLOAT":
field_type = "DECIMAL"
elif field_type == "BOOL":
field_type = "BOOLEAN"
# Translate padded flag to true or false
if not padded_flag:
new_column.padded_flag = False
elif padded_flag.lower() == "true":
new_column.padded_flag = True
else:
new_column.padded_flag = False
# Check types
if field_type in types:
new_column.field_types_id = types[field_type]
else:
raise ValueError('Type {} not value for {}'.format(field_type, field_name))
# Check Required
if required.lower() in ['true', 'false']:
if required.lower() == 'true':
new_column.required = True
else:
raise ValueError('Required field is not boolean for {}'.format(field_name))
# Add length if present
if field_length is not None and str(field_length).strip() != "":
length_int = int(str(field_length).strip())
new_column.length = length_int
sess.add(new_column)
@classmethod
def load_all_from_path(cls, path):
# Load field definitions into validation DB
for key in cls.fieldFiles:
filepath = os.path.join(path, cls.fieldFiles[key])
cls.load_fields(key, filepath)
if __name__ == '__main__':
configure_logging()
SchemaLoader.load_all_from_path("../config/")
|
<reponame>lxdzz/item
import hashlib
from django.core.paginator import Paginator
from django.shortcuts import render, HttpResponseRedirect,HttpResponse
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt #免除csrf保护
from Seller.models import *
def loginValid(fun):
def inner(request, *args, **kwargs):
cookie_username = request.COOKIES.get('username')
session_username = request.session.get('username')
if cookie_username and session_username and cookie_username == session_username:
return fun(request, *args, **kwargs)
else:
return HttpResponseRedirect('/login/')
return inner
def setPassword(password):
md5 = hashlib.md5()
md5.update(password.encode())
result = md5.hexdigest()
return result
def register(request):
error_message = ""
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
if email:
# 首先检查email有没有
user = LoginUser.objects.filter(email=email).first()
if not user:
new_user = LoginUser()
new_user.email = email
new_user.username = email
new_user.password = <PASSWORD>(password)
new_user.save()
else:
error_message = '邮箱已经被注册,请登录'
else:
error_message = '邮箱不可以为空'
return render(request, 'seller/register.html', locals())
import time
import datetime
from django.views.decorators.cache import cache_page
@cache_page(60*15) #使用缓存,缓存的寿命15分钟
def login(request):
error_message = ""
if request.method == "POST":
email = request.POST.get('email')
password = request.POST.get('password')
code=request.POST.get("valid_code")
if email:
# 首先检查email有没有
user = LoginUser.objects.filter(email=email).first()
if user:
db_password = user.password
password = <PASSWORD>(password)
if db_password == password:
#检测验证码
#获取验证码
codes=Valid_Code.objects.filter(code_user=email).order_by("-code_time").first()
#校验验证码是否存在,是否过期,是否使用
now=time.mktime(datetime.datetime.now().timetuple())
db_time=time.mktime(codes.code_time.timetuple())
t=(now-db_time)/60
if codes and codes.code_state==0 and t<=5 and codes.code_content.upper()==code.upper():
response = HttpResponseRedirect('/Seller/index/')
response.set_cookie('username', user.username)
response.set_cookie('user_id', user.id)
request.session['username'] = user.username
return response
else:
error_message = '验证码错误'
else:
error_message = '密码错误'
else:
error_message = '用户不存在'
else:
error_message='邮箱不可以为空'
return render(request, 'seller/login.html', locals())
def logout(request):
response = HttpResponseRedirect('/login/')
keys = request.COOKIES.keys()
for key in keys:
response.delete_cookie(key)
del request.session['username']
return response
@loginValid
def index(request):
return render(request, 'seller/index.html', locals())
@loginValid
def goods_list(request, status, page=1):
page = int(page)
if status == '1':
goodses = Goods.objects.filter(goods_status=1)
elif status == '0':
goodses = Goods.objects.filter(goods_status=0)
else:
goodses = Goods.objects.all()
all_goods = Paginator(goodses, 10)
goods_list = all_goods.page(page)
return render(request, 'seller/goods_list.html', locals())
# def goods_list(request):
# goods_list=Goods.objects.all()
# return render(request,'goods_list.html',locals())
@loginValid
def goods_status(request, state, id):
id = int(id)
goods = Goods.objects.get(id=id)
if state == 'up':
goods.goods_status = 1
elif state == 'down':
goods.goods_status = 0
goods.save()
url = request.META.get('HTTP_REFERER', '/goods_list/1/1')
return HttpResponseRedirect(url)
@loginValid
def personal_info(request):
user_id = request.COOKIES.get('user_id')
user = LoginUser.objects.get(id=int(user_id))
if request.method == 'POST':
user.username = request.POST.get('username')
user.gender = request.POST.get('gender')
user.age = request.POST.get('age')
user.phone_number = request.POST.get('phone_number')
user.address = request.POST.get('address')
user.photo = request.FILES.get('photo')
user.save()
return render(request, 'seller/personal_info.html', locals())
@loginValid
def goods_add(request):
goods_type_list = GoodsType.objects.all()
if request.method == 'POST':
data = request.POST
files = request.FILES
goods = Goods()
# 常规保存
goods.goods_number = data.get('goods_number')
goods.goods_name = data.get('goods_name')
goods.goods_price = data.get('goods_price')
goods.goods_count = data.get('goods_count')
goods.goods_location = data.get('goods_location')
goods.goods_safe_date = data.get('goods_safe_date')
goods.goods_pro_time = data.get('goods_pro_time') # 出厂日期格式必须是yyyy-mm-dd
goods.goods_status = 1
# 保存外键类型
goods_type_id = int(data.get('goods_type'))
goods.goods_type = GoodsType.objects.get(id=goods_type_id)
# 保存图片
picture = files.get('picture')
goods.picture = picture
# 保存对应的卖家
user_id = request.COOKIES.get('user_id')
goods.good_store = LoginUser.objects.get(id=int(user_id))
goods.save()
return render(request, 'seller/goods_add.html', locals())
#生成随机数
import random
def random_code(len=6):
"""
生成6位验证码
"""
string = '1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
valid_code = "".join([random.choice(string) for i in range(len)])
return valid_code
#发送验证码
import requests
from Qshop.settings import DING_URL
import json
def sendDing(content, to=None):
headers = {
"Content-Type": "application/json",
"Charset": "utf-8"
}
requests_data = {
"msgtype": "text",
"text": {
"content": content
},
"at": {
"atMobiles": [
],
"isAtAll": True
}
}
if to:
requests_data["at"]["atMobiles"].append(to)
requests_data["at"]["isAtAll"] = False
else:
requests_data["at"]["atMobiles"].clear()
requests_data["at"]["isAtAll"] = True
sendData=json.dumps(requests_data)
response=requests.post(url=DING_URL,headers=headers,data=sendData)
content=response.json()
return content
#保存验证码
from CeleryTask.tasks import sendDing
@csrf_exempt
def send_login_code(request):
result={
"code":200,
"data":""
}
if request.method=="POST":
email=request.POST.get("email")
code=random_code()
c=Valid_Code()
c.code_user=email
c.code_content=code
c.save()
send_data="%s的验证码是%s,不要告诉人"%(email,code)
# sendDing(send_data) #发送验证
sendDing.delay(send_data)
result["data"]="发送成功"
else:
result["data"]="请求错误"
result["code"]=400
return JsonResponse(result)
def order_list(request,status):
"""
status 订单的状态
返回所有订单的store_id
0 未支付
1 已支付
2 代发货
3/4 完成/拒收
"""
status=int(status)
user_id=request.COOKIES.get("user_id") #获取店铺id
store=LoginUser.objects.get(id=user_id) #获取店铺信息
store_order=store.orderinfo_set.filter(order_status=status).order_by("-id") #获取店铺对应的订单
return render(request,'seller/order_list.html',locals())
from Buyer.models import OrderInfo
def change_order(request):
#通过订单详情id来锁定订单详情
order_id=request.GET.get("order_id")
#获取要修改的状态
order_status=request.GET.get("order_status")
order=OrderInfo.objects.get(id=order_id)
order.order_status=int(order_status)
order.save()
return JsonResponse({"data":"修改成功"})
# Create your views here.
|
<filename>GUI Applications/calc.py
from tkinter import Tk
from tkinter import Entry
from tkinter import Button
from tkinter import StringVar
t=Tk()
t.title("<NAME>")
t.geometry("425x300")
t.resizable(0,0)
t.configure(background="black")#back ground color
a=StringVar()
def show(c):
a.set(a.get()+c)
def equal():
x=a.get()
a.set(eval(x))
def clear():
a.set("")
e1=Entry(font=("",30),justify="right",textvariable=a)
e1.place(x=0,y=0,width=425,height=50)
b1=Button(text="7",font=("",25),bg="gray",fg="white",activebackground="yellow",command=show)
b1.place(x=5,y=55,width=100,height=50)
b1.configure(command=lambda:show("7"))
b2=Button(text="8",font=("",25),bg="gray",fg="white",activebackground="yellow")
b2.place(x=110,y=55,width=100,height=50)
b2.configure(command=lambda:show("8"))
b3=Button(text="9",font=("",25),bg="gray",fg="white",activebackground="yellow")
b3.place(x=215,y=55,width=100,height=50)
b3.configure(command=lambda:show("9"))
b4=Button(text="+",font=("",25),bg="gray",fg="white",activebackground="yellow")
b4.place(x=320,y=55,width=100,height=50)
b4.configure(command=lambda:show("+"))
b5=Button(text="4",font=("",25),bg="gray",fg="white",activebackground="yellow")
b5.place(x=5,y=110,width=100,height=50)
b5.configure(command=lambda:show("4"))
b6=Button(text="5",font=("",25),bg="gray",fg="white",activebackground="yellow")
b6.place(x=110,y=110,width=100,height=50)
b6.configure(command=lambda:show("5"))
b7=Button(text="6",font=("",25),bg="gray",fg="white",activebackground="yellow")
b7.place(x=215,y=110,width=100,height=50)
b7.configure(command=lambda:show("6"))
b8=Button(text="-",font=("",25),bg="gray",fg="white",activebackground="yellow")
b8.place(x=320,y=110,width=100,height=50)
b8.configure(command=lambda:show("-"))
b9=Button(text="1",font=("",25),bg="gray",fg="white",activebackground="yellow")
b9.place(x=5,y=165,width=100,height=50)
b9.configure(command=lambda:show("1"))
b10=Button(text="2",font=("",25),bg="gray",fg="white",activebackground="yellow")
b10.place(x=110,y=165,width=100,height=50)
b10.configure(command=lambda:show("2"))
b11=Button(text="3",font=("",25),bg="gray",fg="white",activebackground="yellow")
b11.place(x=215,y=165,width=100,height=50)
b11.configure(command=lambda:show("3"))
b12=Button(text="*",font=("",25),bg="gray",fg="white",activebackground="yellow")
b12.place(x=320,y=165,width=100,height=50)
b12.configure(command=lambda:show("*"))
b13=Button(text="C",font=("",25),bg="gray",fg="white",activebackground="yellow")
b13.place(x=5,y=220,width=100,height=50)
b13.configure(command=clear)
b14=Button(text="0",font=("",25),bg="gray",fg="white",activebackground="yellow")
b14.place(x=110,y=220,width=100,height=50)
b14.configure(command=lambda:show("0"))
b15=Button(text="=",font=("",25),bg="gray",fg="white",activebackground="yellow",command=equal)
b15.place(x=215,y=220,width=100,height=50)
b15.configure(command=equal)
b16=Button(text="/",font=("",25),bg="gray",fg="white",activebackground="yellow")
b16.place(x=320,y=220,width=100,height=50)
b16.configure(command=lambda:show("/"))
t.mainloop() |
# ntripbrowser code is placed under the 3-Clause BSD License.
# Written by <NAME> (<EMAIL>)
#
# If you are interested in using ntripbrowser code as a part of a
# closed source project, please contact Emlid Limited (<EMAIL>).
#
# Copyright (c) 2017, Emlid Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Emlid Limited nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Emlid Limited BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from geopy.distance import geodesic
import pycurl
import cchardet
try:
from io import BytesIO # Python 3
except ImportError:
from StringIO import StringIO as BytesIO # Python 2
from .constants import (CAS_HEADERS, STR_HEADERS, NET_HEADERS, PYCURL_TIMEOUT_ERRNO,
MULTICURL_SELECT_TIMEOUT)
from .exceptions import (ExceededTimeoutError, UnableToConnect, NoDataReceivedFromCaster)
logger = logging.getLogger(__name__)
class DataFetcher(object):
"""Fetch data from specified urls, execute custom callback on results.
Parameters
----------
urls : [str, str, ...]
URL's to fetch data from.
timeout : int
parser_method : callable
Custom callback to be executed on fetched from url's results.
Attributes
----------
urls_processed : [str, str, ...]
URL's which are processed and on which no valid data was found.
result :
Return value of `parser_method` function or None.
"""
def __init__(self, urls, timeout, parser_method):
self.timeout = timeout
self.urls = urls
self._parser_method = parser_method
self.urls_processed = []
self.results = None
self._multicurl = None
self._buffers = {}
self._curls_failed = []
@property
def curls(self):
return list(self._buffers.keys())
@property
def _result_found(self):
return bool(self.results)
def setup(self):
self.urls_processed = []
self.results = None
self._multicurl = pycurl.CurlMulti()
self._buffers = {}
self._curls_failed = []
self._initialize()
logger.info('DataFetcher: curls setup in process')
for curl in self.curls:
self._multicurl.add_handle(curl)
def _initialize(self):
for url in self.urls:
logger.debug('DataFetcher: Buffered curl creation for url "%s" in process', url)
buffer = BytesIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.TIMEOUT, self.timeout)
curl.setopt(pycurl.CONNECTTIMEOUT, self.timeout)
curl.setopt(pycurl.WRITEFUNCTION, buffer.write)
curl.setopt(pycurl.WRITEDATA, buffer)
self._buffers.update({curl: buffer})
def read_data(self):
while not self._result_found:
ret, num_handles = self._multicurl.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while num_handles:
self._multicurl.select(MULTICURL_SELECT_TIMEOUT)
while not self._result_found:
ret, num_handles = self._multicurl.perform()
self._read_multicurl_info()
if self._result_found:
return
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._process_fetch_failure()
def _read_multicurl_info(self):
_, successful_curls, failed_curls = self._multicurl.info_read()
self._curls_failed.extend(failed_curls)
for curl in successful_curls:
self._process_successful_curl(curl)
if self._result_found:
return
def _process_successful_curl(self, curl):
curl_results = self._buffers[curl].getvalue()
url_processed = curl.getinfo(pycurl.EFFECTIVE_URL)
self.urls_processed.append(url_processed)
logger.info('DataFetcher: Trying to parse curl response from "%s"', url_processed)
try:
self.results = self._parser_method(curl_results)
logger.info('DataFetcher: Results from "%s" is processed successfully', url_processed)
except NoDataReceivedFromCaster:
self.results = None
logger.info('DataFetcher: No valid data found in curl response from "%s"', url_processed)
def _process_fetch_failure(self):
"""- If the number of processed URL's is equal to the number of URL's
which are requested to poll, this means that no data received from casters.
- If in failed curls list timeout error exist, use it as a fail reason.
- If no curls with exceeded timeout are found, throw UnableToConnect
with first failed curl reason.
- Otherwise, there are no failed curls and all curls which are succeeds
received no data from the caster, so throw a NoDataReceivedFromCaster.
"""
logger.info('DataFetcher: No valid result is received')
if len(self.urls_processed) == len(self.urls):
raise NoDataReceivedFromCaster()
for _, error_code, error_text in self._curls_failed:
if error_code == PYCURL_TIMEOUT_ERRNO:
raise ExceededTimeoutError(error_text)
if self._curls_failed:
_, _, error_text = self._curls_failed[0]
raise UnableToConnect(error_text)
raise NoDataReceivedFromCaster()
def teardown(self):
for curl in self.curls:
self._multicurl.remove_handle(curl)
self._multicurl.close()
for curl in self.curls:
curl.close()
logger.info('DataFetcher: Curls are closed succesfully')
self._buffers = {}
class NtripBrowser(object):
def __init__(self, host, port=2101, timeout=4, # pylint: disable-msg=too-many-arguments
coordinates=None, maxdist=None):
self._host = None
self.host = host
self.port = port
self.timeout = timeout
self.coordinates = coordinates
self.maxdist = maxdist
self._fetcher = DataFetcher(self.urls, self.timeout, self._process_raw_data)
@property
def host(self):
return self._host
@host.setter
def host(self, host):
host = host.replace('http://', '')
host = host.replace('https://', '')
self._host = host
@property
def urls(self):
http_url = '{}{}:{}'.format('http://', self.host, self.port)
https_url = '{}{}:{}'.format('https://', self.host, self.port)
http_sourcetable_url = '{}{}'.format(http_url, '/sourcetable.txt')
https_sourcetable_url = '{}{}'.format(https_url, '/sourcetable.txt')
return [http_url, http_sourcetable_url, https_url, https_sourcetable_url]
def get_mountpoints(self):
self._fetcher.setup()
self._fetcher.read_data()
self._fetcher.teardown()
return self._fetcher.results
def _process_raw_data(self, raw_data):
decoded_raw_ntrip = self._decode_data(raw_data)
ntrip_tables = self._get_ntrip_tables(decoded_raw_ntrip)
ntrip_dictionary = self._form_ntrip_entries(ntrip_tables)
ntrip_dictionary = self._add_distance(ntrip_dictionary)
return self._trim_outlying(ntrip_dictionary)
@staticmethod
def _decode_data(data):
data_encoding = cchardet.detect(data)['encoding']
return data.decode('utf8' if not data_encoding else data_encoding)
def _get_ntrip_tables(self, data):
ntrip_tables = self._extract_ntrip_entry_strings(data)
if not any(ntrip_tables):
raise NoDataReceivedFromCaster()
return ntrip_tables
@staticmethod
def _extract_ntrip_entry_strings(raw_table):
str_list, cas_list, net_list = [], [], []
for row in raw_table.splitlines():
if row.startswith('STR'):
str_list.append(row)
elif row.startswith('CAS'):
cas_list.append(row)
elif row.startswith('NET'):
net_list.append(row)
return str_list, cas_list, net_list
def _form_ntrip_entries(self, ntrip_tables):
return {
'str': self._form_dictionaries(STR_HEADERS, ntrip_tables[0]),
'cas': self._form_dictionaries(CAS_HEADERS, ntrip_tables[1]),
'net': self._form_dictionaries(NET_HEADERS, ntrip_tables[2])
}
@staticmethod
def _form_dictionaries(headers, line_list):
def form_line(index):
line = index.split(';', len(headers))[1:]
return dict(list(zip(headers, line)))
return [form_line(i) for i in line_list]
def _add_distance(self, ntrip_dictionary):
return {
'cas': self._add_distance_column(ntrip_dictionary.get('cas')),
'net': self._add_distance_column(ntrip_dictionary.get('net')),
'str': self._add_distance_column(ntrip_dictionary.get('str'))
}
def _add_distance_column(self, ntrip_type_dictionary):
for station in ntrip_type_dictionary:
latlon = self._get_float_coordinates((station.get('Latitude'), station.get('Longitude')))
station['Distance'] = self._get_distance(latlon)
return ntrip_type_dictionary
@staticmethod
def _get_float_coordinates(obs_point):
def to_float(arg):
try:
return float(arg.replace(',', '.'))
except (ValueError, AttributeError):
return None
return [to_float(coordinate) for coordinate in obs_point]
def _get_distance(self, obs_point):
if self.coordinates:
try:
return geodesic(obs_point, self.coordinates).kilometers
except ValueError:
logger.warning("Unable calculate the geodesic distance between points, %s, %s",
obs_point, self.coordinates)
return None
def _trim_outlying(self, ntrip_dictionary):
if (self.maxdist is not None) and (self.coordinates is not None):
return {
'cas': self._trim_outlying_casters(ntrip_dictionary.get('cas')),
'net': self._trim_outlying_casters(ntrip_dictionary.get('net')),
'str': self._trim_outlying_casters(ntrip_dictionary.get('str'))
}
return ntrip_dictionary
def _trim_outlying_casters(self, ntrip_type_dictionary):
def by_distance(row):
return row['Distance'] < self.maxdist
inlying_casters = list(filter(by_distance, ntrip_type_dictionary))
inlying_casters.sort(key=lambda row: row['Distance'])
return inlying_casters
|
<gh_stars>1-10
'Tests for roller-balance server.'
import collections
import decimal
import os.path
import uuid
# pylint: disable=unused-import
import pytest
# pylint: enable=unused-import
import accounting
import db
import etherscan
import logs
import web
LOGGER = logs.logging.getLogger('roller.test')
ADDRESSES = [40*str(digit) for digit in range(10)]
# Details of deposits already made on ropsten.
SAFE = '5afe51A3E2f4bfDb689DDf89681fe09116b6894A'
DEPOSITS = [dict(
source='7f6041155c0db03eb2b49abf2d61b370b4253ef7', amount=500000000000000000, block_number=11527328,
transaction='ca3f6c423a4f66dd53e59498af42562d66c0a32d83faf6eb44d41102433c28d1'
), dict(
source='44569aa35ff6d97e6531880712a41d2af72a007c', amount=500000000000000000, block_number=11527333,
transaction='e055965dc4e848cfa188bf623dc62ad46142669809f132a591155543b15b035b')]
DEPOSIT_BLOCK_RANGE = (11527328, 11527333)
ADDRESSES = [DEPOSITS[idx]['source'] for idx in range(2)] + ADDRESSES
# Details of payments already made on ropsten.
PAYMENTS_ADDRESS = '5afe51A3E2f4bfDb689DDf89681fe09116b6894A'
PAYMENT_TRANSACTION = 'b90bab2330e838922a78ddad28f8c0fe0749b9e6a4434c2dc367a88e71197c7b'
PAYMENTS = [
dict(address='7f6041155c0db03eb2b49abf2d61b370b4253ef7', amount=350000000000000000),
dict(address='44569aa35ff6d97e6531880712a41d2af72a007c', amount=350000000000000000)]
# Details of a non multisend ether transaction.
PAYMENTS_ADDRESS_INVALID = 'd59af98e9b8885829aa5924e482549e2c24a50b9'
PAYMENT_TRANSACTION_INVALID = '17e9cdbec1030c129d8bf8d64b9a5fc54fce60d2b84ddf6f14a4b68384d197f2'
def initialize_test_database():
'Initialize the database for testing.'
assert db.DB_NAME[-5:] == '_test', f"will not run accounting tests on non test database {db.DB_NAME}"
db.nuke_database_and_create_new_please_think_twice()
def get_last_transaction_idx():
'Get the latest transaction idx.'
with db.sql_connection() as sql:
sql.execute('SELECT idx FROM transactions ORDER BY idx DESC LIMIT 1')
return sql.fetchone()['idx']
def fake_transaction_hash():
'Create a fake transaction hash.'
return f"fake-{uuid.uuid4().hex}{uuid.uuid4().hex}"[:64]
def test_accounting_basic():
'Test accounting.'
initialize_test_database()
# Test deposits.
assert accounting.get_balance(ADDRESSES[0]) == 0
accounting.debug_deposit(ADDRESSES[0], 1, fake_transaction_hash())
assert accounting.get_balance(ADDRESSES[0]) == 1
accounting.debug_deposit(ADDRESSES[0], 10, fake_transaction_hash())
assert accounting.get_balance(ADDRESSES[0]) == 11
# Test transfers.
assert accounting.get_balance(ADDRESSES[1]) == 0
accounting.transfer(ADDRESSES[0], ADDRESSES[1], 2)
assert accounting.get_balance(ADDRESSES[0]) == 9
assert accounting.get_balance(ADDRESSES[1]) == 2
# Test withdraw.
withdrawals = collections.defaultdict(list)
transaction_idx = get_last_transaction_idx()
assert accounting.get_unsettled_withdrawals() == withdrawals
accounting.withdraw(ADDRESSES[0], 3)
assert accounting.get_balance(ADDRESSES[0]) == 6
transaction_idx += 1
withdrawals[ADDRESSES[0]].append(dict(idx=transaction_idx, amount=decimal.Decimal(3)))
assert accounting.get_unsettled_withdrawals() == withdrawals
accounting.withdraw(ADDRESSES[0], 4)
assert accounting.get_balance(ADDRESSES[0]) == 2
transaction_idx += 1
withdrawals[ADDRESSES[0]].append(dict(idx=transaction_idx, amount=decimal.Decimal(4)))
assert accounting.get_unsettled_withdrawals() == withdrawals
accounting.withdraw(ADDRESSES[1], 2)
assert accounting.get_balance(ADDRESSES[1]) == 0
transaction_idx += 1
withdrawals[ADDRESSES[1]].append(dict(idx=transaction_idx, amount=decimal.Decimal(2)))
assert accounting.get_unsettled_withdrawals() == withdrawals
def test_accounting_with_etherscan():
'Test integration of accounting with etherscan module.'
initialize_test_database()
assert accounting.get_balance(accounting.SAFE) == 0
accounting.scan_for_deposits(*DEPOSIT_BLOCK_RANGE)
assert accounting.get_balance(accounting.SAFE) == (
-1 * sum([deposit['amount'] for deposit in DEPOSITS]) // accounting.WEI_DEPOSIT_FOR_ONE_ROLLER)
assert not accounting.get_unsettled_withdrawals()
withdrawals = collections.defaultdict(list)
transaction_idx = get_last_transaction_idx()
for deposit in DEPOSITS:
balance = accounting.get_balance(deposit['source'])
assert balance * accounting.WEI_DEPOSIT_FOR_ONE_ROLLER == deposit['amount']
accounting.withdraw(deposit['source'], balance)
transaction_idx += 1
withdrawals[deposit['source']].append(dict(idx=transaction_idx, amount=decimal.Decimal(balance)))
assert accounting.get_unsettled_withdrawals() == withdrawals
assert accounting.settle(PAYMENT_TRANSACTION) == dict(settled_transactions_count=2, unsettled_transaction_count=0)
assert not accounting.get_unsettled_withdrawals()
# Test full scan and make sure we hit the same block twice.
accounting.scan_for_deposits()
accounting.scan_for_deposits()
# Make sure we hit old data.
with db.sql_connection() as sql:
sql.execute('UPDATE deposit_scans SET end_block = %s', (DEPOSIT_BLOCK_RANGE[0],))
with pytest.raises(accounting.ScanError):
accounting.scan_for_deposits()
def test_accounting_errors(monkeypatch):
'Test accounting errors.'
initialize_test_database()
accounting.debug_deposit(ADDRESSES[0], 1, fake_transaction_hash())
with pytest.raises(accounting.InsufficientFunds):
accounting.transfer(ADDRESSES[0], ADDRESSES[1], 2)
with pytest.raises(accounting.InsufficientFunds):
accounting.withdraw(ADDRESSES[0], 2)
bad_deposits = [deposit.copy() for deposit in DEPOSITS]
bad_deposits[0]['amount'] -= 1
monkeypatch.setattr(etherscan, 'get_deposits', lambda *args, **kwargs: bad_deposits)
accounting.scan_for_deposits(*DEPOSIT_BLOCK_RANGE)
bad_payments = [payment.copy() for payment in PAYMENTS]
bad_payments[0]['amount'] -= 1
monkeypatch.setattr(etherscan, 'get_payments', lambda *args, **kwargs: bad_payments)
with pytest.raises(accounting.SettleError):
accounting.settle(PAYMENT_TRANSACTION)
for deposit in DEPOSITS:
balance = accounting.get_balance(deposit['source'])
accounting.withdraw(deposit['source'], balance)
bad_withdrawals = accounting.get_unsettled_withdrawals()
bad_withdrawals[ADDRESSES[0]].append(bad_withdrawals[ADDRESSES[0]][0])
monkeypatch.setattr(accounting, 'get_unsettled_withdrawals', lambda *args, **kwargs: bad_withdrawals)
with pytest.raises(accounting.SettleError):
accounting.settle(PAYMENT_TRANSACTION)
def test_webserver_errors():
'General webserver errors.'
initialize_test_database()
web.DEBUG = False
with web.APP.test_client() as client:
error_response = client.post('/deposit', data=dict(address=ADDRESSES[0], amount=10))
assert error_response.status == '403 FORBIDDEN'
error_response = client.post('/get_balance')
assert error_response.status == '400 BAD REQUEST'
assert error_response.json == dict(
status=400, error_name='ArgumentMismatch',
error_message='request does not contain arguments(s): address')
error_response = client.post('/get_balance', data=dict(bad_argument='stam', address=ADDRESSES[0]))
assert error_response.status == '400 BAD REQUEST'
assert error_response.json == dict(
status=400, error_name='ArgumentMismatch',
error_message='request contain unexpected arguments(s): bad_argument')
for bad_amount in ['string', 1.1]:
error_response = client.post('/deposit', data=dict(address=ADDRESSES[0], amount=bad_amount))
assert error_response.status == '400 BAD REQUEST'
assert error_response.json == dict(
status=400, error_name='ArgumentMismatch',
error_message='argument amount has to be an integer')
for bad_amount in [0, -1]:
error_response = client.post('/deposit', data=dict(address=ADDRESSES[0], amount=bad_amount))
assert error_response.status == '400 BAD REQUEST'
assert error_response.json == dict(
status=400, error_name='ArgumentMismatch',
error_message='argument amount must be larger than zero')
for bad_address, error_message in [
(f"{ADDRESSES[0][:-1]}", 'argument address must be 40 characters long'),
(f"{ADDRESSES[0][:-1]}g", 'argument address is not a hex string')
]:
error_response = client.post('/get_balance', data=dict(address=bad_address))
assert error_response.status == '400 BAD REQUEST'
assert error_response.json == dict(
status=400, error_name='ArgumentMismatch',
error_message=error_message)
for bad_tx_hash, error_message in [
(63*'0', 'argument transaction_hash must be 64 characters long'),
(64*'g', 'argument transaction_hash is not a hex string')
]:
error_response = client.post('/settle', data=dict(transaction_hash=bad_tx_hash))
assert error_response.status == '400 BAD REQUEST'
assert error_response.json == dict(
status=400, error_name='ArgumentMismatch',
error_message=error_message)
for reason in ['response', 'exception']:
error_response = client.post('/five_hundred', data=dict(reason=reason))
assert error_response.status == '500 INTERNAL SERVER ERROR'
assert client.get('/no_such_endpoint').status == '403 FORBIDDEN'
def test_webserver_debug():
'Test an almost full flow in debug mode.'
initialize_test_database()
web.DEBUG = True
with web.APP.test_client() as client:
prices_repsonse = client.get('/get_prices')
assert prices_repsonse.status == '200 OK'
assert prices_repsonse.json == dict(
status=200, safe=accounting.SAFE,
wei_deposit_for_one_roller=accounting.WEI_DEPOSIT_FOR_ONE_ROLLER,
wei_withdraw_for_one_roller=accounting.WEI_WITHDRAW_FOR_ONE_ROLLER)
balance_response = client.post('/get_balance', data=dict(address=ADDRESSES[0]))
assert balance_response.status == '200 OK'
assert balance_response.json == dict(status=200, balance=0)
deposit_response = client.post('/deposit', data=dict(address=ADDRESSES[0], amount=100))
assert deposit_response.status == '201 CREATED'
assert client.post('/get_balance', data=dict(address=ADDRESSES[0])).json['balance'] == 100
transfer_response = client.post('/transfer', data=dict(
source=ADDRESSES[0], target=ADDRESSES[1], amount=101))
assert transfer_response.status == '400 BAD REQUEST'
assert transfer_response.json == dict(
status=400, error_name='InsufficientFunds',
error_message=f"address {ADDRESSES[0]} has less than 101 rollers")
transfer_response = client.post('/transfer', data=dict(
source=ADDRESSES[0], target=ADDRESSES[1], amount=10))
assert transfer_response.status == '201 CREATED'
assert client.post('/get_balance', data=dict(address=ADDRESSES[0])).json['balance'] == 90
assert client.post('/get_balance', data=dict(address=ADDRESSES[1])).json['balance'] == 10
assert client.get('/get_unsettled_withdrawals').status == '200 OK'
assert client.get('/get_unsettled_withdrawals').json['unsettled_withdrawals'] == ''
withdraw_response = client.post('/withdraw', data=dict(address=ADDRESSES[0], amount=91))
assert withdraw_response.status == '400 BAD REQUEST'
assert withdraw_response.json == dict(
status=400, error_name='InsufficientFunds',
error_message=f"address {ADDRESSES[0]} has less than 91 rollers")
withdraw_response = client.post('/withdraw', data=dict(address=ADDRESSES[0], amount=5))
assert withdraw_response.status == '201 CREATED'
assert client.post('/get_balance', data=dict(address=ADDRESSES[0])).json['balance'] == 85
assert client.get('/get_unsettled_withdrawals').json['unsettled_withdrawals'] != ''
withdraw_response = client.post('/withdraw', data=dict(address=ADDRESSES[1], amount=5))
assert withdraw_response.status == '201 CREATED'
assert client.post('/get_balance', data=dict(address=ADDRESSES[1])).json['balance'] == 5
assert client.get('/get_unsettled_withdrawals').json['unsettled_withdrawals'] != ''
def test_webserver_payment_flow():
'To test the full flow we run a production webserver.'
initialize_test_database()
accounting.scan_for_deposits(*DEPOSIT_BLOCK_RANGE)
web.DEBUG = False
with web.APP.test_client() as client:
for deposit in DEPOSITS:
roller_balance = deposit['amount'] // accounting.WEI_DEPOSIT_FOR_ONE_ROLLER
balance_response = client.post('/get_balance', data=dict(address=deposit['source']))
assert balance_response.json == dict(status=200, balance=roller_balance)
client.post('/withdraw', data=dict(address=deposit['source'], amount=roller_balance))
assert client.get('/get_unsettled_withdrawals').json['unsettled_withdrawals'] != ''
assert client.post('/settle', data=dict(transaction_hash=PAYMENT_TRANSACTION)).status == '201 CREATED'
assert client.get('/get_unsettled_withdrawals').json['unsettled_withdrawals'] == ''
def test_etherscan(monkeypatch):
'Test etherscan module.'
assert etherscan.get_latest_block_number() > 0
assert etherscan.get_deposits(SAFE, *DEPOSIT_BLOCK_RANGE) == DEPOSITS
assert etherscan.get_payments(PAYMENTS_ADDRESS, PAYMENT_TRANSACTION) == PAYMENTS
# Test a non matching address.
assert etherscan.get_payments(PAYMENTS_ADDRESS_INVALID, PAYMENT_TRANSACTION) == []
# Test a non multisend transaction.
assert etherscan.get_payments(PAYMENTS_ADDRESS_INVALID, PAYMENT_TRANSACTION_INVALID) == []
original_headers = etherscan.ETHERSCAN_HEADERS
etherscan.ETHERSCAN_HEADERS = {}
with pytest.raises(etherscan.EtherscanError):
etherscan.get_latest_block_number()
with pytest.raises(etherscan.EtherscanError):
etherscan.get_deposits(SAFE, *DEPOSIT_BLOCK_RANGE)
etherscan.ETHERSCAN_HEADERS = original_headers
monkeypatch.setattr(etherscan, 'call', lambda *args, **kwargs: 'not a hex string')
with pytest.raises(etherscan.EtherscanError):
etherscan.get_latest_block_number()
def test_database(monkeypatch, tmp_path):
'Test database access.'
initialize_test_database()
with db.sql_connection() as sql:
sql.execute('SELECT 1 FROM transactions')
with pytest.raises(db.pymysql.MySQLError):
with db.sql_connection() as sql:
sql.execute('bad sql')
# Try bad migrations.
monkeypatch.setattr(db, 'MIGRATIONS_DIRECTORY', tmp_path)
for migration, migration_file_name in (
('Bad SQL;', '0.bad.sql'),
('# No apply function.', '0.bad.py'),
('Bad python', '0.bad.py')
):
with open(os.path.join(tmp_path, migration_file_name), 'w', encoding='utf-8') as migration_file:
migration_file.write(migration)
# It's okay, really.
# pylint: disable=cell-var-from-loop
monkeypatch.setattr(db.os, 'listdir', lambda *args, **kwargs: [migration_file_name])
# pylint: enable=cell-var-from-loop
with pytest.raises(db.FailedMigration):
initialize_test_database()
# monkeypatch.undo()
# Invalid migration file names.
monkeypatch.setattr(db.os.path, 'isfile', lambda *args, **kwargs: True)
monkeypatch.setattr(db.os, 'listdir', lambda *args, **kwargs: [
'0.schema.sqnot', 'schema.sql', '/tmp', '0.schema.sql', '0.duplicate.sql'])
with pytest.raises(db.DuplicateMigrationNumber):
initialize_test_database()
monkeypatch.undo()
def test_logs():
'Just for coverage.'
web.logs.setup(suppress_loggers=['foo'])
|
<gh_stars>0
import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from pySDC.projects.FastWaveSlowWave.HookClass_acoustic import dump_energy
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.acoustic_helpers.standard_integrators import bdf2, dirk, trapezoidal, rk_imex
from pySDC.projects.FastWaveSlowWave.AcousticAdvection_1D_FD_imex_multiscale import acoustic_1d_imex_multiscale
def compute_and_plot_solutions():
"""
Routine to compute and plot the solutions of SDC(2), IMEX, BDF-2 and RK for a multiscale problem
"""
num_procs = 1
t0 = 0.0
Tend = 3.0
nsteps = 154 # 154 is value in Vater et al.
dt = Tend / float(nsteps)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-10
level_params['dt'] = dt
# This comes as read-in for the step class
step_params = dict()
step_params['maxiter'] = 2
# This comes as read-in for the problem class
problem_params = dict()
problem_params['cadv'] = 0.05
problem_params['cs'] = 1.0
problem_params['nvars'] = [(2, 512)]
problem_params['order_adv'] = 5
problem_params['waveno'] = 5
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = 2
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = dump_energy
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = acoustic_1d_imex_multiscale
description['problem_params'] = problem_params
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params
description['step_params'] = step_params
description['level_params'] = level_params
# instantiate the controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# instantiate standard integrators to be run for comparison
trap = trapezoidal((P.A + P.Dx).astype('complex'), 0.5)
bdf2_m = bdf2(P.A + P.Dx)
dirk_m = dirk((P.A + P.Dx).astype('complex'), step_params['maxiter'])
rkimex = rk_imex(P.A.astype('complex'), P.Dx.astype('complex'), step_params['maxiter'])
y0_tp = np.concatenate((uinit.values[0, :], uinit.values[1, :]))
y0_bdf = y0_tp
y0_dirk = y0_tp.astype('complex')
y0_imex = y0_tp.astype('complex')
# Perform time steps with standard integrators
for i in range(0, nsteps):
# trapezoidal rule step
ynew_tp = trap.timestep(y0_tp, dt)
# BDF-2 scheme
if i == 0:
ynew_bdf = bdf2_m.firsttimestep(y0_bdf, dt)
ym1_bdf = y0_bdf
else:
ynew_bdf = bdf2_m.timestep(y0_bdf, ym1_bdf, dt)
# DIRK scheme
ynew_dirk = dirk_m.timestep(y0_dirk, dt)
# IMEX scheme
ynew_imex = rkimex.timestep(y0_imex, dt)
y0_tp = ynew_tp
ym1_bdf = y0_bdf
y0_bdf = ynew_bdf
y0_dirk = ynew_dirk
y0_imex = ynew_imex
# Finished running standard integrators
unew_tp, pnew_tp = np.split(ynew_tp, 2)
unew_bdf, pnew_bdf = np.split(ynew_bdf, 2)
unew_dirk, pnew_dirk = np.split(ynew_dirk, 2)
unew_imex, pnew_imex = np.split(ynew_imex, 2)
fs = 8
rcParams['figure.figsize'] = 2.5, 2.5
# rcParams['pgf.rcfonts'] = False
fig = plt.figure()
sigma_0 = 0.1
k = 7.0 * 2.0 * np.pi
x_0 = 0.75
x_1 = 0.25
print('Maximum pressure in SDC: %5.3e' % np.linalg.norm(uend.values[1, :], np.inf))
print('Maximum pressure in DIRK: %5.3e' % np.linalg.norm(pnew_dirk, np.inf))
print('Maximum pressure in RK-IMEX: %5.3e' % np.linalg.norm(pnew_imex, np.inf))
if dirk_m.order == 2:
plt.plot(P.mesh, pnew_bdf, 'd-', color='c', label='BDF-2', markevery=(50, 75))
p_slow = np.exp(-np.square(np.mod(P.mesh - problem_params['cadv'] * Tend, 1.0) - x_0) / (sigma_0 * sigma_0))
plt.plot(P.mesh, p_slow, '--', color='k', markersize=fs - 2, label='Slow mode', dashes=(10, 2))
if np.linalg.norm(pnew_imex, np.inf) <= 2:
plt.plot(P.mesh, pnew_imex, '+-', color='r', label='IMEX(' + str(rkimex.order) + ')', markevery=(1, 75),
mew=1.0)
plt.plot(P.mesh, uend.values[1, :], 'o-', color='b', label='SDC(' + str(step_params['maxiter']) + ')',
markevery=(25, 75))
plt.plot(P.mesh, pnew_dirk, '-', color='g', label='DIRK(' + str(dirk_m.order) + ')')
plt.xlabel('x', fontsize=fs, labelpad=0)
plt.ylabel('Pressure', fontsize=fs, labelpad=0)
fig.gca().set_xlim([0, 1.0])
fig.gca().set_ylim([-0.5, 1.1])
fig.gca().tick_params(axis='both', labelsize=fs)
plt.legend(loc='upper left', fontsize=fs, prop={'size': fs}, handlelength=3)
fig.gca().grid()
filename = 'data/multiscale-K' + str(step_params['maxiter']) + '-M' + str(sweeper_params['num_nodes']) + '.png'
plt.gcf().savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
compute_and_plot_solutions()
|
<gh_stars>1-10
"""Reward Calculator for DRL"""
import numpy as np
import scipy.spatial
from geometry_msgs.msg import Pose2D
from typing import Dict, Tuple, Union
class RewardCalculator:
def __init__(
self,
robot_radius: float,
safe_dist: float,
goal_radius: float,
rule: str = "rule_00",
extended_eval: bool = False,
):
"""A facotry class for reward calculation. Holds various reward functions.
An overview of the reward functions can be found under:
https://github.com/ignc-research/arena-rosnav/blob/local_planner_subgoalmode/docs/DRL-Training.md#reward-functions
Possible reward functions: "_rule_00_", "_rule_01_", "_rule_02_", "_rule_03_", "_rule_04_"
Args:
robot_radius (float): Robots' radius in meters.
safe_dist (float): Robots' safe distance in meters.
goal_radius (float): Radius of the goal.
rule (str, optional): The desired reward function name. Defaults to "rule_00".
extended_eval (bool, optional): Extended evaluation mode. Defaults to False.
"""
self.curr_reward = 0
# additional info will be stored here and be returned alonge with reward.
self.info = {}
self.robot_radius = robot_radius
self.goal_radius = goal_radius
self.last_goal_dist = None
self.last_dist_to_path = None
self.last_action = None
self.safe_dist = robot_radius + safe_dist
self._extended_eval = extended_eval
self.kdtree = None
self._cal_funcs = {
"rule_00": RewardCalculator._cal_reward_rule_00,
"rule_01": RewardCalculator._cal_reward_rule_01,
"rule_02": RewardCalculator._cal_reward_rule_02,
"rule_03": RewardCalculator._cal_reward_rule_03,
"rule_04": RewardCalculator._cal_reward_rule_04,
}
self.cal_func = self._cal_funcs[rule]
def reset(self) -> None:
"""Resets variables related to the episode."""
self.last_goal_dist = None
self.last_dist_to_path = None
self.last_action = None
self.kdtree = None
def _reset(self) -> None:
"""Resets variables related to current step."""
self.curr_reward = 0
self.info = {}
def get_reward(
self,
laser_scan: np.ndarray,
goal_in_robot_frame: Tuple[float, float],
*args,
**kwargs
) -> Tuple[float, Dict[str, Union[str, int, bool]]]:
"""Returns reward and info to the gym environment.
Args:
laser_scan (np.ndarray): 2D laser scan data.
goal_in_robot_frame (Tuple[float, float]): Position (rho, theta) of the goal in the robot frame (polar coordinate).
Returns:
Tuple[float, Dict[str, Union[str, int, bool]]]: Tuple of calculated rewards for the current step, \
and the reward information dictionary.
"""
self._reset()
self.cal_func(self, laser_scan, goal_in_robot_frame, *args, **kwargs)
return self.curr_reward, self.info
def _cal_reward_rule_00(
self,
laser_scan: np.ndarray,
goal_in_robot_frame: Tuple[float, float],
*args,
**kwargs
):
"""Reward function: '_rule\_00_'
Description:
"rule_00" incorporates the most instinctive characteristics for learning navigation into its \
reward calculation. The reward function is made up of only 4 summands, namely the success \
reward, the collision reward, the danger reward and the progress reward. Similar reward functions \
were utilized in numerous research projects and produced promising results. Thus, this \
rule is chosen to be the basis for further experiments with extended versions of it. \
Args:
laser_scan (np.ndarray): 2D laser scan data.
goal_in_robot_frame (Tuple[float, float]): Position (rho, theta) of the goal in the robot frame (polar coordinate).
"""
self._reward_goal_reached(goal_in_robot_frame)
self._reward_safe_dist(laser_scan, punishment=0.25)
self._reward_collision(laser_scan)
self._reward_goal_approached(
goal_in_robot_frame, reward_factor=0.3, penalty_factor=0.4
)
def _cal_reward_rule_01(
self,
laser_scan: np.ndarray,
goal_in_robot_frame: Tuple[float, float],
*args,
**kwargs
):
"""Reward function: '_rule\_01_'
Description:
This reward function extends "rule 00" by adding a penalty factor that affects the current \
reward like an abstract fuel consumption factor. In principle, a certain penalty is applied \
for each action taken depending on the velocity and thus imposes a severer punishment for \
dissipated driving.
Args:
laser_scan (np.ndarray): 2D laser scan data.
goal_in_robot_frame (Tuple[float, float]): Position (rho, theta) of the goal in the robot frame (polar coordinate).
"""
self._reward_distance_traveled(
kwargs["action"], consumption_factor=0.0075
)
self._reward_goal_reached(goal_in_robot_frame, reward=15)
self._reward_safe_dist(laser_scan, punishment=0.25)
self._reward_collision(laser_scan, punishment=10)
self._reward_goal_approached(
goal_in_robot_frame, reward_factor=0.3, penalty_factor=0.4
)
def _cal_reward_rule_02(
self,
laser_scan: np.ndarray,
goal_in_robot_frame: Tuple[float, float],
*args,
**kwargs
):
"""Reward function: '_rule\_02_'
Description:
Previous reward functions required only basic information from the simulation. For this rule, \
which builds on the reward function "rule 01", we introduced the assessment of the progress \
regarding the global plan. The additional summand essentially rewards the agent for following \
the global plan. It was implemented in order to test the effect of including the global plan in \
the reward calculation. \
Since "rule 02" shares almost the same reward function composition as "rule 01", similar performance \
was expected to some extent. The desired behavior for this agent was to learn faster and \
to drive more goal-oriented than the agent of "rule 01", as this rule was provided the global plan. \
Args:
laser_scan (np.ndarray): 2D laser scan data.
goal_in_robot_frame (Tuple[float, float]): Position (rho, theta) of the goal in the robot frame (polar coordinate).
"""
self._reward_distance_traveled(
kwargs["action"], consumption_factor=0.0075
)
self._reward_following_global_plan(
kwargs["global_plan"], kwargs["robot_pose"]
)
self._reward_goal_reached(goal_in_robot_frame, reward=15)
self._reward_safe_dist(laser_scan, punishment=0.25)
self._reward_collision(laser_scan, punishment=10)
self._reward_goal_approached(
goal_in_robot_frame, reward_factor=0.3, penalty_factor=0.4
)
def _cal_reward_rule_03(
self,
laser_scan: np.ndarray,
goal_in_robot_frame: Tuple[float, float],
*args,
**kwargs
):
"""Reward function: '_rule\_03_'
Description:
The base of this rule is made up of summands from "rule 00". The two extra factors were \
introduced in order to further leverage the global plan information for reward generation. \
One that rewards the following of the global path and one for valuing the agents’ action - \
positively, when it approaches the global plan - negatively when the robot distances itself \
from the path. \
Args:
laser_scan (np.ndarray): 2D laser scan data. \
goal_in_robot_frame (Tuple[float, float]): Position (rho, theta) of the goal in the robot frame (polar coordinate). \
"""
self._reward_following_global_plan(
kwargs["global_plan"], kwargs["robot_pose"], kwargs["action"]
)
if laser_scan.min() > self.safe_dist:
self._reward_distance_global_plan(
kwargs["global_plan"],
kwargs["robot_pose"],
reward_factor=0.2,
penalty_factor=0.3,
)
else:
self.last_dist_to_path = None
self._reward_goal_reached(goal_in_robot_frame, reward=15)
self._reward_safe_dist(laser_scan, punishment=0.25)
self._reward_collision(laser_scan, punishment=10)
self._reward_goal_approached(
goal_in_robot_frame, reward_factor=0.3, penalty_factor=0.4
)
def _cal_reward_rule_04(
self,
laser_scan: np.ndarray,
goal_in_robot_frame: Tuple[float, float],
*args,
**kwargs
):
"""Reward function: '_rule\_04_'
Description:
This reward function extends "rule 03" with an additional term that punishes the agent for \
abruptly changing the direction. Previous test runs, conducted right after the implementation, \
evidenced that although the agent performed well on different tasks, the robot tended to drive \
in tail motion. It was aimed to adjust this behavior by including this additional penalty term. \
Args:
laser_scan (np.ndarray): 2D laser scan data.
goal_in_robot_frame (Tuple[float, float]): Position (rho, theta) of the goal in the robot frame (polar coordinate).
"""
self._reward_abrupt_direction_change(kwargs["action"])
self._reward_following_global_plan(
kwargs["global_plan"], kwargs["robot_pose"], kwargs["action"]
)
if laser_scan.min() > self.safe_dist:
self._reward_distance_global_plan(
kwargs["global_plan"],
kwargs["robot_pose"],
reward_factor=0.2,
penalty_factor=0.3,
)
else:
self.last_dist_to_path = None
self._reward_goal_reached(goal_in_robot_frame, reward=15)
self._reward_safe_dist(laser_scan, punishment=0.25)
self._reward_collision(laser_scan, punishment=10)
self._reward_goal_approached(
goal_in_robot_frame, reward_factor=0.3, penalty_factor=0.4
)
def _reward_goal_reached(
self, goal_in_robot_frame: Tuple[float, float], reward: float = 15
):
"""Reward for reaching the goal.
Args:
goal_in_robot_frame (Tuple[float, float], optional): Position (rho, theta) of the goal in the robot frame (polar coordinate).
reward (float, optional): Reward amount for reaching the goal. Defaults to 15.
"""
if goal_in_robot_frame[0] < self.goal_radius:
self.curr_reward = reward
self.info["is_done"] = True
self.info["done_reason"] = 2
self.info["is_success"] = 1
else:
self.info["is_done"] = False
def _reward_goal_approached(
self,
goal_in_robot_frame=Tuple[float, float],
reward_factor: float = 0.3,
penalty_factor: float = 0.5,
):
"""Reward for approaching the goal.
Args:
goal_in_robot_frame ([type], optional): Position (rho, theta) of the goal in the robot frame (polar coordinate). Defaults to Tuple[float, float].
reward_factor (float, optional): Factor to be multiplied when the difference between current distance to goal and the previous one is positive. \
Defaults to 0.3.
penalty_factor (float, optional): Factor to be multiplied when the difference between current distance to goal and the previous one is negative. Defaults to 0.5.
"""
if self.last_goal_dist is not None:
# goal_in_robot_frame : [rho, theta]
# higher negative weight when moving away from goal
# (to avoid driving unnecessary circles when train in contin. action space)
if (self.last_goal_dist - goal_in_robot_frame[0]) > 0:
w = reward_factor
else:
w = penalty_factor
reward = w * (self.last_goal_dist - goal_in_robot_frame[0])
# print("reward_goal_approached: {}".format(reward))
self.curr_reward += reward
self.last_goal_dist = goal_in_robot_frame[0]
def _reward_collision(self, laser_scan: np.ndarray, punishment: float = 10):
"""Reward for colliding with an obstacle.
Args:
laser_scan (np.ndarray): 2D laser scan data.
punishment (float, optional): Punishment amount for collisions. Defaults to 10.
"""
if laser_scan.min() <= self.robot_radius:
self.curr_reward -= punishment
if not self._extended_eval:
self.info["is_done"] = True
self.info["done_reason"] = 1
self.info["is_success"] = 0
else:
self.info["crash"] = True
def _reward_safe_dist(
self, laser_scan: np.ndarray, punishment: float = 0.15
):
"""Reward for undercutting safe distance.
Args:
laser_scan (np.ndarray): 2D laser scan data.
punishment (float, optional): Punishment amount. Could be applied in consecutive timesteps. \
Defaults to 0.15.
"""
if laser_scan.min() < self.safe_dist:
self.curr_reward -= punishment
if self._extended_eval:
self.info["safe_dist"] = True
def _reward_not_moving(
self, action: np.ndarray = None, punishment: float = 0.01
):
"""Reward for not moving.
Args:
action (np.ndarray, optional): Array of shape (2,). First entry, linear velocity. \
Second entry, angular velocity. Defaults to None.
punishment (float, optional): Punishment for not moving. Defaults to 0.01.
Note:
Only applies half of the punishment amount when angular velocity is larger than zero.
"""
if action is not None and action[0] == 0.0:
self.curr_reward -= (
punishment if action[1] == 0.0 else punishment / 2
)
def _reward_distance_traveled(
self,
action: np.array = None,
punishment: float = 0.01,
consumption_factor: float = 0.005,
):
"""Reward for driving a certain distance. Supposed to represent "fuel consumption".
Args:
action (np.array, optional): Array of shape (2,). First entry, linear velocity. \
Second entry, angular velocity. Defaults to None.
punishment (float, optional): Punishment when action can't be retrieved. Defaults to 0.01.
consumption_factor (float, optional): Factor for the weighted velocity punishment. Defaults to 0.005.
"""
if action is None:
self.curr_reward -= punishment
else:
lin_vel = action[0]
ang_vel = action[1]
reward = (lin_vel + (ang_vel * 0.001)) * consumption_factor
self.curr_reward -= reward
def _reward_distance_global_plan(
self,
global_plan: np.array,
robot_pose: Pose2D,
reward_factor: float = 0.1,
penalty_factor: float = 0.15,
):
"""Reward for approaching/veering away the global plan.
Description:
Weighted difference between prior distance to global plan and current distance to global plan.
Args:
global_plan (np.array): Array containing 2D poses.
robot_pose (Pose2D): Robot position.
reward_factor (float, optional): Factor to be multiplied when the difference between current \
distance to global plan and the previous one is positive. Defaults to 0.1.
penalty_factor (float, optional): Factor to be multiplied when the difference between current \
distance to global plan and the previous one is negative. Defaults to 0.15.
"""
if global_plan is not None and len(global_plan) != 0:
curr_dist_to_path, idx = self.get_min_dist2global_kdtree(
global_plan, robot_pose
)
if self.last_dist_to_path is not None:
if curr_dist_to_path < self.last_dist_to_path:
w = reward_factor
else:
w = penalty_factor
self.curr_reward += w * (
self.last_dist_to_path - curr_dist_to_path
)
self.last_dist_to_path = curr_dist_to_path
def _reward_following_global_plan(
self,
global_plan: np.array,
robot_pose: Pose2D,
action: np.array = None,
dist_to_path: float = 0.5,
):
"""Reward for travelling along the global plan.
Args:
global_plan (np.array): Array containing 2D poses.
robot_pose (Pose2D): Robot position.
action (np.array, optional): action (np.ndarray, optional): Array of shape (2,). First entry, linear velocity. \
Second entry, angular velocity. Defaults to None.
dist_to_path (float, optional): Minimum distance to the global path. Defaults to 0.5.
"""
if (
global_plan is not None
and len(global_plan) != 0
and action is not None
):
curr_dist_to_path, idx = self.get_min_dist2global_kdtree(
global_plan, robot_pose
)
if curr_dist_to_path <= dist_to_path:
self.curr_reward += 0.1 * action[0]
def get_min_dist2global_kdtree(
self, global_plan: np.array, robot_pose: Pose2D
) -> Tuple[float, int]:
"""Calculates minimal distance to global plan using kd-tree-search.
Args:
global_plan (np.array): Array containing 2D poses.
robot_pose (Pose2D): Robot position.
Returns:
Tuple[float, int]: Distance to the closes pose and index of the closes pose.
"""
if self.kdtree is None:
self.kdtree = scipy.spatial.cKDTree(global_plan)
dist, index = self.kdtree.query([robot_pose.x, robot_pose.y])
return dist, index
def _reward_abrupt_direction_change(self, action: np.array = None):
"""Applies a penalty when an abrupt change of direction occured.
Args:
action (np.array, optional): Array of shape (2,). First entry, linear velocity. \
Second entry, angular velocity. Defaults to None.
"""
if self.last_action is not None:
curr_ang_vel = action[1]
last_ang_vel = self.last_action[1]
vel_diff = abs(curr_ang_vel - last_ang_vel)
self.curr_reward -= (vel_diff ** 4) / 2500
self.last_action = action
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009 <NAME> <<EMAIL>>
from dbus import PROPERTIES_IFACE
from telepathy.interfaces import CHANNEL_TYPE_DBUS_TUBE, CONN_INTERFACE, \
CHANNEL_INTERFACE, CHANNEL_INTERFACE_TUBE, CONNECTION
from coherence.extern.telepathy import client, tube
from coherence.dbus_constants import BUS_NAME, OBJECT_PATH, DEVICE_IFACE, SERVICE_IFACE
from coherence import dbus_service
class MirabeauTubeConsumerMixin(tube.TubeConsumerMixin):
def __init__(self, found_peer_callback=None, disapeared_peer_callback=None,
got_devices_callback=None):
tube.TubeConsumerMixin.__init__(self,
found_peer_callback=found_peer_callback,
disapeared_peer_callback=disapeared_peer_callback)
self.got_devices_callback = got_devices_callback
self.info("MirabeauTubeConsumer created")
self._coherence_tubes = {}
def pre_accept_tube(self, tube):
params = tube[PROPERTIES_IFACE].Get(CHANNEL_INTERFACE_TUBE, 'Parameters')
initiator = params.get("initiator")
for group in ("publish", "subscribe"):
try:
contacts = self.roster[group]
except KeyError:
self.debug("Group %r not in roster...", group)
continue
for contact_handle, contact in contacts.iteritems():
if contact[CONNECTION + "/contact-id"] == initiator:
return True
return False
def post_tube_accept(self, tube, tube_conn, initiator_handle):
service = tube.props[CHANNEL_TYPE_DBUS_TUBE + ".ServiceName"]
if service == BUS_NAME:
tube.remote_object = dbus_service.DBusPontoon(None, tube_conn)
elif service == DEVICE_IFACE:
tube.remote_object = dbus_service.DBusDevice(None, tube_conn)
elif service == SERVICE_IFACE:
tube.remote_object = dbus_service.DBusService(None, None, tube_conn)
else:
self.info("tube %r is not coming from Coherence", service)
return tube_conn
if initiator_handle not in self._coherence_tubes:
self._coherence_tubes[initiator_handle] = {}
self._coherence_tubes[initiator_handle][service] = tube
if len(self._coherence_tubes[initiator_handle]) == 3:
self.announce(initiator_handle)
def tube_closed(self, tube):
self.disapeared_peer_callback(tube)
super(MirabeauTubeConsumerMixin, self).tube_closed(tube)
def announce(self, initiator_handle):
service_name = BUS_NAME
pontoon_tube = self._coherence_tubes[initiator_handle][service_name]
def cb(participants, removed):
if participants and initiator_handle in participants:
initiator_bus_name = participants[initiator_handle]
self.info("bus name %r for service %r", initiator_bus_name,
service_name)
if initiator_bus_name is not None:
self.found_devices(initiator_handle, initiator_bus_name)
for handle in removed:
try:
tube_channels = self._coherence_tubes[handle]
except KeyError:
self.debug("tube with handle %d not registered", handle)
else:
for service_iface_name, channel in tube_channels.iteritems():
channel[CHANNEL_INTERFACE].Close()
del self._coherence_tubes[handle]
pontoon_tube.remote_object.tube.watch_participants(cb)
def found_devices(self, initiator_handle, initiator_bus_name):
devices = []
tubes = self._coherence_tubes[initiator_handle]
pontoon_tube = tubes[BUS_NAME].remote_object.tube
device_tube = tubes[DEVICE_IFACE].remote_object.tube
service_tube = tubes[SERVICE_IFACE].remote_object.tube
self.info("using pontoon tube at %r", tubes[BUS_NAME].object_path)
def got_devices(pontoon_devices):
self.info("%r devices registered in remote pontoon", len(pontoon_devices))
for device_dict in pontoon_devices:
device_path = device_dict["path"]
self.info("getting object at %r from %r", device_path,
initiator_bus_name)
proxy = device_tube.get_object(initiator_bus_name, device_path)
infos = proxy.get_info(dbus_interface=DEVICE_IFACE)
service_proxies = []
for service_path in device_dict["services"]:
service_proxy = service_tube.get_object(initiator_bus_name,
service_path)
service_proxies.append(service_proxy)
proxy.services = service_proxies
devices.append(proxy)
self.got_devices_callback(devices)
def got_error(exception):
print ">>>", exception
pontoon = pontoon_tube.get_object(initiator_bus_name, OBJECT_PATH)
pontoon.get_devices_async(1, reply_handler=got_devices,
error_handler=got_error)
class MirabeauTubeConsumer(MirabeauTubeConsumerMixin, client.Client):
logCategory = "mirabeau_tube_consumer"
def __init__(self, manager, protocol, account, muc_id, conference_server,
found_peer_callback=None, disapeared_peer_callback=None,
got_devices_callback=None):
MirabeauTubeConsumerMixin.__init__(self,
found_peer_callback=found_peer_callback,
disapeared_peer_callback=disapeared_peer_callback,
got_devices_callback=got_devices_callback)
client.Client.__init__(self, manager, protocol, account, muc_id, conference_server)
def got_tube(self, tube):
client.Client.got_tube(self, tube)
self.accept_tube(tube)
def tube_opened(self, tube):
tube_conn = super(MirabeauTubePublisherConsumer, self).tube_opened(tube)
self.post_tube_accept(tube, tube_conn)
return tube_conn
|
import time, os, sys
import threading
import queue
from .tools import win32, img
import shutil
import os
import uuid
import random
from datetime import date
from pprint import pprint
import pyperclip
Task_Queue = queue.Queue()
Result_Queue = queue.Queue()
task_timeout = 30 #
class TaskWork(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, name='api_task')
self._task = None
self.task_list = Task_Queue
self.result_list = Result_Queue
def run(self):
while True:
self._task = self.task_list.get()
print('{0}处理{1}级任务: {2}'.format(time.strftime("%Y-%m-%d %X"), self._task.priority, self._task.name))
try:
self._task.result = self._task.func()
if self._task.wait:
self.result_list.put(self._task)
except Exception as e:
if self._task.wait:
self._task.error = e.args[0]
self.result_list.put(self._task)
# Task_Queue.put(_task)
time.sleep(0.5)
class Task(object):
def __init__(self, id, priority, func, wait=True, name=''):
self.id = id
self.name = name
self.priority = priority
self.func = func
self.result = None
self.error = None
self.wait = wait
task_work = TaskWork()
task_work.setDaemon(True)
task_work.start()
def task(priority=10, wait=True, name=''):
def decorator(func):
def wrapper(*args, **kw):
start = time.time()
id = '{}-{}'.format(uuid.uuid1(), random.random()*1000)
Task_Queue.put(Task(id, priority, lambda: func(*args, **kw), wait=wait, name=name))
while wait:
_task = Result_Queue.get()
if _task.id == id:
return _task.result
Result_Queue.put(_task)
if time.time() - start > task_timeout:
return False
time.sleep(0.5)
return True
return wrapper
return decorator
class WechatClient(object):
def __init__(self):
self.title = '微信'
self.login_class = 'WeChatLoginWndForPC' # 280x400
self.tip_class = 'AlertDialog'
self.body_class = 'WeChatMainWndForPC' # 851x500
self.search_icon = os.path.join(os.path.dirname(__file__), 'static', 'image', 'search_icon.jpg')
self.temp_dir = os.path.join(os.path.dirname(__file__), 'static', 'image', 'temp')
self.account_dir = os.path.join(os.path.dirname(__file__), 'static', 'image', 'account')
self.pdf_dir = os.path.join(os.path.dirname(__file__), 'static', 'pdf')
self.all_account = {}
self.send_count = 0 # 发送消息次数
self.clear_logout_tips()
# self.get_account_list()
def clear_logout_tips(self, sleep=4.0):
time.sleep(sleep)
for h in win32.get_all_hwnds(self.tip_class, self.title):
win32.move2(h, 180, 188)
win32.click()
def clear_hot_key_tips(self, sleep=2.0):
time.sleep(sleep)
for h in win32.get_all_hwnds('ConfirmDialog', '热键冲突'):
win32.move2(h, 244, 188)
win32.click()
def clear_open_file_window(self, sleep=1.0):
time.sleep(sleep)
for h in win32.get_all_hwnds('#', '打开', fuzzy=True):
win32.key('ESC')
time.sleep(0.2)
def login(self):
login_hwnds = win32.get_all_hwnds(self.login_class, None)
if login_hwnds:
hwnd = login_hwnds[0]
self.clear_logout_tips(0.3)
win32.move2(hwnd, 140, 352)
win32.click()
# 截取登入二维码
time.sleep(0.2)
img_dir = win32.screen_shot(hwnd, x1=2, y1=30, x2=278, y2=370,temp_dir=self.temp_dir)
return {'hwnd': hwnd, 'url': img_dir}
return False
def check_login(self, hwnd, wait=False):
if not wait:
return not win32.is_window(hwnd)
for i in range(20):
if win32.is_window(hwnd):
time.sleep(1)
else:
return True
return False
def loginout(self, hwnd):
if win32.is_window(hwnd):
r = win32.get_window_rect(hwnd) # 窗口位置信息
win32.move2(hwnd, 30, r[3] - r[1] - 24)
win32.click()
time.sleep(0.2)
win32.move2(hwnd, 120, r[3] - r[1] - 35, False)
win32.click()
time.sleep(0.2)
setting_hwnd_list = win32.get_all_hwnds('SettingWnd', '设置')
for s_hwnd in setting_hwnd_list:
s_rect = win32.get_window_rect(s_hwnd) # 窗口位置信息
if 545 < s_rect[2] - s_rect[0] < 555 and 465 < s_rect[3] - s_rect[1] < 475:
win32.move2(s_hwnd, 322, 284, False)
win32.click()
time.sleep(0.3)
confirm_hwnd_list = win32.get_all_hwnds('ConfirmDialog', '微信')
for c_hwnd in confirm_hwnd_list:
win32.move2(c_hwnd, 225, 190, False)
win32.click()
return True
def get_account_list(self):
"""获取当前登入的账户列表"""
copy = self.all_account.copy()
for k, v in copy.items():
if not win32.is_window(v):
self.all_account.pop(k)
exists = list(self.all_account.values())
all_hwnds = win32.get_all_hwnds(self.body_class, self.title)
all_hwnds = [h for h in all_hwnds if h not in exists]
if all_hwnds:
self.clear_screen_and_file(False)
for hwnd in all_hwnds:
r = win32.get_window_rect(hwnd) # 窗口位置信息
win32.move2(hwnd, 30, r[3] - r[1] - 24)
win32.click()
time.sleep(0.2)
win32.move2(hwnd, 120, r[3] - r[1] - 35, False)
win32.click()
time.sleep(0.2)
setting_hwnd_list = win32.get_all_hwnds('SettingWnd', '设置')
for s_hwnd in setting_hwnd_list:
pyperclip.copy('')
s_rect = win32.get_window_rect(s_hwnd) # 窗口位置信息
if 545 < s_rect[2] - s_rect[0] < 555 and 465 < s_rect[3] - s_rect[1] < 475:
win32.move2(s_hwnd, 320, 235, False)
win32.click()
time.sleep(0.1)
win32.click()
time.sleep(0.2)
win32.ctrl_c()
time.sleep(0.2)
win32.key('ESC')
account = pyperclip.paste()
account = account.replace('微信号:', '')
account = account.replace(' ','').replace(' ', '')
if account:
pprint(account)
self.all_account.update({account: hwnd})
# win32.move2(hwnd, 200, 10)
# win32.click()
# win32.move2(hwnd, 28, 35, active=False)
# win32.click()
# time.sleep(0.2)
# file_dir = win32.screen_shot(hwnd, name='account-{}'.format(hwnd), x1=111, y1=90, x2=225, y2=120, temp_dir=self.temp_dir)
# account = self.check_exist(file_dir)
# if not account:
# result = img.recognize(file_dir)
# pprint('-=======账号识别======')
# pprint(result)
# try:
# account_temp = result[0]['words']
# account = account_temp.replace('微信号:', '')
# account = account.replace(' ','').replace(' ', '')
# new_fir = os.path.join(self.account_dir, '{}.jpg'.format(account))
# if os.path.exists(new_fir):
# os.remove(new_fir)
# shutil.move(file_dir, new_fir)
# except Exception as e:
# pprint('-=======账号识别出错======')
# pprint(e.args[0])
# if os.path.exists(file_dir):
# os.remove(file_dir)
return self.all_account
def check_exist(self, file_dir):
jpg_list = [x for x in os.listdir(self.account_dir)]
result = []
for jpg_name in jpg_list:
account = os.path.splitext(jpg_name)[0]
jpg_path = os.path.join(self.account_dir, jpg_name)
r = img.compare(file_dir, jpg_path)
if r > 0.9:
result.append((account, r))
if result:
result.sort(key=lambda x: x[1])
result = result[0][0]
return result
return False
def send_msg(self, account, friend, content, msg_type='text'):
"""
消息发送
:param account: 微信账号
:param friend:
:param content:
:param msg_type:
:return:
"""
hwnd = self.all_account.get(account)
if hwnd and win32.is_window(hwnd):
win32.move2(hwnd, 150, 10)
win32.click()
time.sleep(0.2)
win32.move2(hwnd, 155, 40)
win32.click()
time.sleep(0.2)
win32.paste_text(friend)
time.sleep(1)
if not self.check_friend(hwnd):
return False
win32.move2(hwnd, 180, 100)
win32.click()
time.sleep(0.5)
rect = win32.get_window_rect(hwnd) # 窗口位置信息
if msg_type == 'text':
win32.paste_text(content)
elif msg_type == 'file':
win32.move(rect[0] + 377, rect[3] - 119)
time.sleep(0.2)
win32.click()
time.sleep(0.3)
win32.paste_text(content)
time.sleep(0.4)
win32.key('o', ['ALT'])
# send
win32.move(rect[2] - 60, rect[3] - 21)
win32.click()
self.clear_screen_and_file()
return True
self.clear_screen_and_file(False)
return False
def clear_screen_and_file(self, clear_file=True):
# 热键冲突
for h in win32.get_all_hwnds('ConfirmDialog', '热键冲突'):
time.sleep(0.5)
win32.move2(h, 244, 188)
win32.click()
# 登出提示
for h in win32.get_all_hwnds(self.tip_class, self.title):
time.sleep(0.5)
win32.move2(h, 180, 188)
win32.click()
# 打开文件窗口
for h in win32.get_all_hwnds('#', '打开', fuzzy=True):
time.sleep(0.5)
win32.key('ESC')
# 搜聊天记录窗口
for h in win32.get_all_hwnds('FTSMsgSearchWnd', '微信'):
time.sleep(0.5)
win32.active_window(h)
win32.key('ESC')
if not clear_file:
return True
# 删除文件
if self.send_count > 50:
shutil.rmtree(self.temp_dir)
# shutil.rmtree(self.pdf_dir)
os.makedirs(self.temp_dir, 0o777)
# os.makedirs(self.pdf_dir, 0o777)
for path in [x for x in os.listdir(self.pdf_dir)]:
if len(path) == 10 and path.count('-') == 2:
year, month, day = path.split('-')
today = date.today()
path_date = today.replace(year=int(year), month=int(month), day=int(day))
if (today - path_date).days >= 2:
shutil.rmtree(os.path.join(self.pdf_dir, path))
self.send_count = 0
else:
self.send_count += 1
def check_friend(self, hwnd):
friend_img = win32.screen_shot(hwnd, name='friend-{}'.format(hwnd), x1=72, y1=105, x2=105, y2=137, temp_dir=self.temp_dir)
r = img.compare(friend_img, self.search_icon)
if r > 0.91:
win32.move2(hwnd, 250, 37)
win32.click()
return False
return True
wechat = WechatClient()
@task(name='微信登入')
def login():
return wechat.login()
@task(wait=False, name='消息发送')
def send_msg(*args, **kwargs):
return wechat.send_msg(*args, **kwargs)
@task(name='账户列表')
def get_account_list(*args, **kwargs):
return wechat.get_account_list(*args, **kwargs)
def check_account(account):
wechat.get_account_list()
hwnd = wechat.all_account.get(account)
return True if hwnd else False
def check_login(hwnd, wait=False):
return wechat.check_login(hwnd, wait=wait)
if __name__ == "__main__":
pass
|
<gh_stars>1-10
#!/usr/bin/python
# (c) 2018 <NAME>. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Path Store unit test module. Tests in this module can be run like:
python3 path_store/test.py TestInsert
"""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Unit test module.
# https://docs.python.org/3.5/library/unittest.html
import unittest
#
# Local imports.
#
# Utilities.
from path_store.test.principal import Principal, SetCounterDict, SetCounterList
#
# Modules under test.
import pathstore
class TestInsert(unittest.TestCase):
def test_empty(self):
point0 = None
value = "mt"
point1 = pathstore.merge(point0, value)
self.assertIsNot(point0, point1)
self.assertEqual(point1, value)
point0 = "full"
point1 = pathstore.merge(point0, value)
self.assertIsNot(point0, point1)
self.assertEqual(point1, value)
point1 = pathstore.merge(point0, value, [])
self.assertIsNot(point0, point1)
self.assertEqual(point1, value)
def test_zero(self):
path = [0]
value = "blob"
point0 = None
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, ["blob"])
point0 = []
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, ["blob"])
point0 = [None]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, ["blob"])
point0 = ["ma"]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, ["blob"])
def test_one(self):
path = [1]
value = "blob"
point0 = None
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, [None, "blob"])
point0 = []
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, [None, "blob"])
point0 = [None, None]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, [None, "blob"])
point0 = ["ma"]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, ["ma", "blob"])
point0 = ["ma", "mo"]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, ["ma", "blob"])
point0 = ["ma", "mo", "mi"]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, ["ma", "blob", "mi"])
point0 = ["ma", None, "mi"]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, ["ma", "blob", "mi"])
point0 = ("ba",)
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, ("ba", "blob"))
point0 = {'car': "veh"}
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, [None, "blob"])
point0 = {'tooky':0, 'wonkey': "babb"}
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, [None, "blob"])
point0 = "Stringiness"
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, [None, "blob"])
def test_insert_None(self):
path = [1]
point0 = [None, "goner"]
point1 = pathstore.merge(point0, None, path)
self.assertIs(point0, point1)
self.assertEqual(point1, [None, "goner"])
def test_zero_one(self):
path = [0, 1]
value = "Inner"
point0 = ["Outer String"]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, [[None, "Inner"]])
point0 = [{'hand':"yy"}]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, [[None, "Inner"]])
point0 = [[]]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, [[None, "Inner"]])
point0_0 = []
point0 = [point0_0]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertIs(point1[0], point0_0)
self.assertEqual(point1, [[None, "Inner"]])
point0_0 = ["Another"]
point0 = [point0_0]
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertIs(point1[0], point0_0)
self.assertIs(pathstore.get(point0, 0), point0_0)
self.assertEqual(point1, [["Another", "Inner"]])
def test_string(self):
path = "blib"
value = "blob"
point0 = None
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, {'blib': "blob"})
point0 = 5
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, {'blib': "blob"})
point0 = None
point1 = pathstore.merge(point0, value, [path])
self.assertIsNot(point0, point1)
self.assertEqual(point1, {'blib': "blob"})
point0 = {}
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, {'blib': "blob"})
point0 = []
point1 = pathstore.merge(point0, value, path)
self.assertIsNot(point0, point1)
self.assertEqual(point1, {'blib': "blob"})
point0 = {'blib': "bleb"}
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, {'blib': "blob"})
point0 = {'blyb': "bleb"}
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, {'blib': "blob", 'blyb': "bleb"})
point0 = {'blib': "bleb", 'blil': ["bib", "bab"]}
point1 = pathstore.merge(point0, value, path)
self.assertIs(point0, point1)
self.assertEqual(point1, {'blib': "blob", 'blil': ["bib", "bab"]})
def test_principal_root(self):
point0 = Principal()
path = ('testAttr', "flim", 3, "flam")
point1 = pathstore.merge(point0, 4, path)
self.assertIs(point1, point0)
self.assertIsInstance(point1, Principal)
self.assertEqual(point1.testAttr
, {'flim':[None, None, None, {'flam':4}]})
point1 = pathstore.merge(point0, 5)
self.assertIsNot(point1, point0)
self.assertEqual(point1, 5)
#
# Default point maker replaces Principal at root.
point1 = pathstore.merge(point0, 1, ('notAttr', "flim"))
self.assertIsNot(point1, point0)
self.assertEqual(point1, {'notAttr':{'flim':1}})
def test_principal_leaf(self):
point0 = None
path = ('de', 'fgh', 'ij', 'kl')
value = Principal()
point1 = pathstore.merge(point0, value, path)
self.assertEqual(point1, {'de':{'fgh':{'ij':{'kl': value}}}})
def test_setter_optimisation_attr(self):
principal = Principal()
self.assertEqual(principal.setterCount, 0)
principal1 = pathstore.merge(principal, "valley", 'countedStr')
self.assertIs(principal, principal1)
self.assertEqual(principal.countedStr, "valley")
self.assertEqual(principal.setterCount, 1)
value = "rift"
principal1 = pathstore.merge(principal, value, 'countedStr')
self.assertIs(principal, principal1)
self.assertIs(principal.countedStr, value)
self.assertEqual(principal.setterCount, 2)
principal1 = pathstore.merge(principal, value, 'countedStr')
self.assertIs(principal, principal1)
self.assertIs(principal.countedStr, value)
self.assertEqual(principal.setterCount, 2)
def test_setter_optimisation_dict(self):
principal = SetCounterDict()
self.assertEqual(principal.setterCount, 0)
key = 'keen'
principal1 = pathstore.merge(principal, "valley", key)
self.assertIs(principal, principal1)
self.assertEqual(principal[key], "valley")
self.assertEqual(principal.setterCount, 1)
value = "rift"
principal1 = pathstore.merge(principal, value, key)
self.assertIs(principal, principal1)
self.assertIs(principal[key], value)
self.assertEqual(principal.setterCount, 2)
self.assertIs(principal, principal1)
principal1 = pathstore.merge(principal, value, key)
self.assertIs(principal[key], value)
self.assertEqual(principal.setterCount, 2)
def test_setter_optimisation_list(self):
principal = SetCounterList()
self.assertEqual(principal.setterCount, 0)
principal1 = pathstore.merge(principal, "valley", 0)
self.assertIs(principal, principal1)
self.assertEqual(principal, ["valley"])
self.assertEqual(principal.setterCount, 1)
value = "rift"
principal1 = pathstore.merge(principal, value, 1)
self.assertIs(principal, principal1)
self.assertIs(principal[1], value)
self.assertEqual(principal.setterCount, 2)
self.assertEqual(principal, ["valley", value])
principal1 = pathstore.merge(principal, value, 1)
self.assertIs(principal, principal1)
self.assertIs(principal[1], value)
self.assertEqual(principal.setterCount, 2)
|
from manga_py.providers import providers_list
from manga_py.fs import root_path
from manga_py.meta import repo_name
from json import dumps
from datetime import datetime
start_items = [
# [ address, (0 - not worked, 1 - worked, 2 - alias), 'Comment']
['http://com-x.life', 1, ' - One thread only!!! --no-multi-threads. <i class="v0"></i>'],
['http://comic-walker.com', 0, ' - Maybe...'],
['http://comico.jp', 1, ' - only public downloading now'],
['http://e-hentai.org', 1, '<i class="td"></i>'],
['http://dm5.com', 0, '<i class="d"></i> <b>Site down now</b>'],
['http://heavenmanga.biz', 2, '- See heavenmanga.site'],
['http://hentai-chan.me', 1, '- Need fill access file'],
['http://comic.k-manga.jp', 0, ' - Maybe...'],
['http://luscious.net', 1, '<i class="td"></i>'],
['http://lezhin.com', 0, ' - Maybe...'],
['http://mangaz.com', 0, ' - Maybe...'],
['http://s-manga.net', 0, ' - Maybe'],
['http://sunday-webry.com', 0, ' - Not worked decryption images now. In develop.'],
['http://tapas.io', 1, '<i class="v0"></i>, only public downloading now'],
['http://tsumino.com', 1, '<i class="d"></i>'],
['http://8muses.com', 0, '- Need decode page.'],
['http://mangago.me', 0, '- Need decode page.'],
['http://mangachan.me', 1, '- Site down now.'],
['http://digitalteamreader.netsons.org', 0, ' - Moved to http://dgtread.com Maybe later'],
['http://hentai-chan.me', 0, ' - Malicious site. Not recommended for visiting'],
['http://kobato.hologfx.com/reader', 1, ' - Reader offline'],
['http://mangaid.co', 1, ' - See https://bacamanga.co/'],
['http://reader.championscans.comco', 0, ' - See read.ptscans.com'],
['http://http://reader.jokerfansub.com', 0, ' - Site down now'],
]
_start_items = [i[0] for i in start_items]
def merge(*providers):
for p in providers:
yield from providers_list[p]
def clean(providers):
_list = {}
for i in providers:
_ = i.find('/')
if not ~_:
_ = i.strip('()')
else:
_ = i[:_].strip('()')
_list['http://' + _.replace(r'\.', '.')] = ''
return list(_list.keys())
def aggregate(providers):
_list = []
for i in providers:
if i not in _start_items:
_list.append([i, 1, ''])
return _list
def prepare_html(html):
with open(html, 'r') as r:
content = r.read()
with open(html, 'w') as w:
content = content.replace('__repo_name__', repo_name)
today = datetime.today()
content = content.replace('__last_update__', '{}/{:0>2}/{:0>2} {:0>2}-{:0>2}-{:0>2}'.format(
today.year, today.month, today.day, today.hour, today.minute, today.second
))
w.write(content)
def build_providers():
items = aggregate(clean(merge(*providers_list))) + start_items
items = sorted(items, key=lambda l: l[0])
return dumps(items)
def main():
path = root_path() + '/helpers/gh_pages_content/'
with open(path + 'providers.json', 'w') as w:
w.write(build_providers())
prepare_html(path + 'index.html')
# print(len(build_providers()))
|
<filename>match_synsets_to_categories.py
import warnings
import argparse
import json
from pandas.io.json import json_normalize
from categories import Categories
import sys
from nltk.corpus import wordnet as wn
import pandas as pd
from tqdm import tqdm
import os
import re
warnings.filterwarnings(
"ignore",
message="numpy.dtype size changed, may indicate binary incompatibility. "
"Expected 96, got 88")
tqdm.pandas()
def get_hypernyms(synset):
"""Returns top level category for the synset given"""
term_list = []
for s in synset.hypernyms():
term_list += [s.name()]
h = get_hypernyms(s)
if len(h):
term_list += h
return (set(term_list) | set([synset.name()]))
def synset_in_category(synset, category):
hypernyms = get_hypernyms(wn.synset(synset))
return hypernyms & category
def get_category_synsets_for_row(phrase, category):
"""Takes a phrase as a list of (word, synset) tuples and outputs a
a list of (word, synset, category_synset) tuples or None if no matches found"""
results = []
for (word, synset) in phrase:
result = synset_in_category(synset, category)
if result:
results.append((word, synset, result))
if not results:
return None
else:
return results
def dataset_to_pandas(data_file, dataset):
"""Accept raw JSON/TXT/other data that is then formatted into a same
Pandas data frame based on the dataset-specific rules"""
if dataset == 'vg-regions':
with open(data_file) as f:
data = json.load(f)
# Creates Pandas data frame with 'regions' object as a root:
df = json_normalize(data, 'regions')
elif dataset == 'coco':
with open(data_file) as f:
data = json.load(f)
# Creates Pandas data frame with 'regions' object as a root:
df = json_normalize(data, 'annotations')
elif dataset == 'picsom':
with open(data_file) as f:
data = f.readlines()
sentences = []
for line in data:
line = line.rstrip()
# Match first non-space characters into one group, and captions into other:
m = re.match('([^ ]+) (.*)', line)
assert m, 'ERROR: Reading gt input file failed'
label = m.group(1)
captions = m.group(2)
current_sentences = captions.split(' # ')
current_sentences = [{'label': label, 'text': s} for s in current_sentences]
sentences = sentences + current_sentences
df = pd.DataFrame(sentences)
else:
print("ERROR: Unknown dataset: {}.".format(dataset))
sys.exit(1)
return df
def main(args):
print("Loading {} caption data from {}".format(args.dataset, args.data_file))
df = dataset_to_pandas(args.data_file, args.dataset)
print("Loading sentence synsets from {}".format(args.synset_file))
sentence_syns = pd.read_json(args.synset_file, typ='series')
print("Loading category '{}'".format(args.category))
category = set(Categories[args.category])
print("Getting data rows matching category '{}'".format(args.category))
category_synsets = sentence_syns.progress_apply(
lambda x: get_category_synsets_for_row(x, category))
print("Adding results to master data")
df[args.category] = category_synsets
file_name = os.path.basename(args.synset_file)
file_name_no_ext = os.path.splitext(file_name)[0]
output_file_name_json = '{}_{}.json'.format(file_name_no_ext, args.category)
output_file_name_pandas = '{}_{}.pandas.pkl'.format(file_name_no_ext, args.category)
output_file_json = os.path.join(args.output_path, output_file_name_json)
output_file_pandas = os.path.join(args.output_path, output_file_name_pandas)
print("Saving results to {} for user analysis".format(output_file_json))
df.to_json(output_file_json, orient='records')
print("Saving results to {} for further steps in the pipeline.".format(output_file_pandas))
df.to_pickle(output_file_pandas)
print("DONE!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='vg-regions',
help='Dataset to use')
parser.add_argument('--data_file', type=str,
help='original dataset in JSON/TXT/other format')
parser.add_argument('--synset_file', type=str,
help='JSON file containing synsets for the dataset. '
'Should have the same number of entries as the dataset file.')
parser.add_argument('--category', type=str, default='location',
help='Which category to do the stats for')
parser.add_argument('--output_path', type=str, default='output/',
help='path where to save output data (both JSON and serialized Pandas')
args = parser.parse_args()
main(args=args)
|
import sys
import os
this_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.abspath(os.path.join(this_path, os.pardir))
sys.path.append(root_path)
import torch
from utilities.vqa.dataset import *
from transformers import BertTokenizer
from datasets.creator import DatasetCreator, MultiPurposeDataset
from torch.utils.data import Dataset
from utilities.evaluation.beam_search import BeamSearchInput
from collections import Counter
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def create_datasets(base_path):
seq_counter = {
'training': Counter(),
'testing': Counter()
}
def elem_processing_fn(question_id, question, image_path, answer, split):
question_tkn = bert_tokenizer.encode(question)
question_tkn = [bert_tokenizer.cls_token_id] + question_tkn + [bert_tokenizer.sep_token_id]
question_tkn_len = len(question_tkn)
if split == 'training':
answer_tkn = bert_tokenizer.encode(answer)
answer_tkn = answer_tkn + [bert_tokenizer.sep_token_id]
answer_tkn_len = len(answer_tkn)
seq_counter[split].update([answer_tkn_len + question_tkn_len])
sequence = question_tkn + answer_tkn
input_mask = [1] * (question_tkn_len + answer_tkn_len)
token_types = [0] * question_tkn_len + [1] * answer_tkn_len
return [question_id, sequence, token_types, input_mask]
else:
seq_counter[split].update([question_tkn_len])
token_types = [0] * question_tkn_len
return [question_id, question_tkn, token_types]
def post_processing_fn(tr_data, ts_data, tr_size, ts_size):
tr_removed = len(tr_data)
print('Removing short samples checking frequencies')
tr_data = list(filter(lambda item: seq_counter['training'][len(item[1])] > 1000, tr_data))
print(seq_counter)
tr_removed -= len(tr_data)
print('Removed {} from training data and {} from testing data'.format(tr_removed, 0))
tr_data = tr_data[:tr_size]
print('Len tr = {}, len ts = {}'.format(len(tr_data), len(ts_data)))
max_len_tr = 0
max_len_ts = 0
for length, freq in seq_counter['training'].items():
if freq > 1000:
if max_len_tr < length:
max_len_tr = length
for length, freq in seq_counter['testing'].items():
if max_len_ts < length:
max_len_ts = length
# Pad sequences
print('Padding training sequences..')
tr_data = DatasetCreator.pad_sequences(tr_data, axis=1, value=int(bert_tokenizer.pad_token_id),
maxlen=max_len_tr)
tr_data = DatasetCreator.pad_sequences(tr_data, axis=2, value=int(1),
maxlen=max_len_tr)
tr_data = DatasetCreator.pad_sequences(tr_data, axis=3, value=int(0),
maxlen=max_len_tr)
return tr_data, ts_data
DatasetCreator().create_together(tr_size=1000000, ts_size=100000,
tr_destination=os.path.join(base_path, 'training'),
ts_destination=os.path.join(base_path, 'testing'),
elem_processing_fn=elem_processing_fn, post_processing_fn=post_processing_fn)
class BertBeamSearchInput(BeamSearchInput):
def __init__(self, seq_idx, seg_id, logits_idx, *args):
super().__init__(seq_idx, logits_idx, *args)
self.seg_id = seg_id
def update_args(self, running_args, initial_args):
"""
We have to update the segment id tensors every time a word is generated in BERT
"""
running_args[self.seg_id] = torch.cat(
[running_args[self.seg_id], torch.ones(running_args[self.seg_id].shape[0], 1).long().to('cuda')], dim=1)
initial_args[self.seg_id] = torch.cat([initial_args[self.seg_id], torch.ones(1).long().to('cuda')])
return running_args, initial_args
class BertDataset(MultiPurposeDataset):
def __getitem__(self, item):
sample = self.data[item]
if not self.evaluating:
_, sequence, token_types, input_mask = sample
else:
__id, question, token_types = sample
if not self.evaluating:
# Return answer + image + length
return torch.tensor(sequence).long(), torch.tensor(token_types).long(), torch.tensor(input_mask).long()
else:
question = torch.tensor(question).long()
token_types = torch.tensor(token_types).long()
beam_input = BertBeamSearchInput(0, 1, 0, question, token_types)
ground_truths = self.evaluation_data[str(__id)]
return __id, beam_input, ground_truths
if __name__ == '__main__':
path = resources_path('models', 'baseline', 'answering', 'bert', 'data')
create_datasets(path)
|
import os
import torch
import numpy as np
from . import base
from . import tools
class DQN(base.ValueNet):
"""docstring for DQN"""
def __init__(self, handle, env, sub_len, eps=1.0, memory_size=2**10, batch_size=64):
super().__init__(env, handle)
self.replay_buffer = tools.MemoryGroup(self.view_space, self.feature_space, self.num_actions, memory_size, batch_size, sub_len)
def flush_buffer(self, **kwargs):
self.replay_buffer.push(**kwargs)
def train(self):
self.replay_buffer.tight()
batch_num = self.replay_buffer.get_batch_num()
for i in range(batch_num):
obs, feats, obs_next, feat_next, dones, rewards, actions, masks = self.replay_buffer.sample()
target_q = self.calc_target_q(obs=obs_next, feature=feat_next, rewards=rewards, dones=dones)
loss, q = super().train(state=[obs, feats], target_q=target_q, acts=actions, masks=masks)
self.update()
# if i % 50 == 0:
# print('[*] LOSS:', loss, '/ Q:', q)
def save(self, dir_path):
file_path = os.path.join(dir_path, "dqn")
torch.save(self.Q.state_dict(), file_path)
print("[*] Model saved at: {}".format(file_path))
def load(self, dir_path):
file_path = os.path.join(dir_path, "dqn")
self.Q.load_state_dict(torch.load(file_path))
print("[*] Loaded model from {}".format(file_path))
class MFQ(base.ValueNet):
def __init__(self, handle, env, sub_len, eps=1.0, memory_size=2**10, batch_size=64):
super().__init__(env, handle, use_mf=True)
config = {
'max_len': memory_size,
'batch_size': batch_size,
'obs_shape': self.view_space,
'feat_shape': self.feature_space,
'act_n': self.num_actions,
'use_mean': True,
'sub_len': sub_len
}
self.replay_buffer = tools.MemoryGroup(**config)
def flush_buffer(self, **kwargs):
self.replay_buffer.push(**kwargs)
def train(self):
self.replay_buffer.tight()
batch_num = self.replay_buffer.get_batch_num()
for i in range(batch_num):
obs, feat, acts, act_prob, obs_next, feat_next, act_prob_next, rewards, dones, masks = self.replay_buffer.sample()
target_q = self.calc_target_q(obs=obs_next, feature=feat_next, rewards=rewards, dones=dones, prob=act_prob_next)
loss, q = super().train(state=[obs, feat], target_q=target_q, prob=act_prob, acts=acts, masks=masks)
self.update()
# if i % 50 == 0:
# print('[*] LOSS:', loss, '/ Q:', q)
def save(self, dir_path):
file_path = os.path.join(dir_path, "mfq")
torch.save(self.Q.state_dict(), file_path)
print("[*] Model saved at: {}".format(file_path))
def load(self, dir_path):
file_path = os.path.join(dir_path, "mfq")
self.Q.load_state_dict(torch.load(file_path))
print("[*] Loaded model from {}".format(file_path))
|
import unittest
with_alazar = True
def get_pulse():
from qupulse.pulses import TablePulseTemplate as TPT, SequencePulseTemplate as SPT, RepetitionPulseTemplate as RPT
ramp = TPT(identifier='ramp', channels={'out', 'trigger'})
ramp.add_entry(0, 'start', channel='out')
ramp.add_entry('duration', 'stop', 'linear', channel='out')
ramp.add_entry(0, 1, channel='trigger')
ramp.add_entry('duration', 1, 'hold', channel='trigger')
ramp.add_measurement_declaration('meas', 0, 'duration')
base = SPT([(ramp, dict(start='min', stop='max', duration='tau/3'), dict(meas='A')),
(ramp, dict(start='max', stop='max', duration='tau/3'), dict(meas='B')),
(ramp, dict(start='max', stop='min', duration='tau/3'), dict(meas='C'))], {'min', 'max', 'tau'})
repeated = RPT(base, 'n')
root = SPT([repeated, repeated, repeated], {'min', 'max', 'tau', 'n'})
return root
def get_alazar_config():
from atsaverage import alazar
from atsaverage.config import ScanlineConfiguration, CaptureClockConfiguration, EngineTriggerConfiguration,\
TRIGInputConfiguration, InputConfiguration
trig_level = int((5 + 0.4) / 10. * 255)
assert 0 <= trig_level < 256
config = ScanlineConfiguration()
config.triggerInputConfiguration = TRIGInputConfiguration(triggerRange=alazar.TriggerRangeID.etr_5V)
config.triggerConfiguration = EngineTriggerConfiguration(triggerOperation=alazar.TriggerOperation.J,
triggerEngine1=alazar.TriggerEngine.J,
triggerSource1=alazar.TriggerSource.external,
triggerSlope1=alazar.TriggerSlope.positive,
triggerLevel1=trig_level,
triggerEngine2=alazar.TriggerEngine.K,
triggerSource2=alazar.TriggerSource.disable,
triggerSlope2=alazar.TriggerSlope.positive,
triggerLevel2=trig_level)
config.captureClockConfiguration = CaptureClockConfiguration(source=alazar.CaptureClockType.internal_clock,
samplerate=alazar.SampleRateID.rate_100MSPS)
config.inputConfiguration = 4*[InputConfiguration(input_range=alazar.InputRangeID.range_1_V)]
config.totalRecordSize = 0
assert config.totalRecordSize == 0
return config
def get_operations():
from atsaverage.operations import Downsample
return [Downsample(identifier='DS_A', maskID='A'),
Downsample(identifier='DS_B', maskID='B'),
Downsample(identifier='DS_C', maskID='C'),
Downsample(identifier='DS_D', maskID='D')]
def get_window(card):
from atsaverage.gui import ThreadedStatusWindow
window = ThreadedStatusWindow(card)
window.start()
return window
class TaborTests(unittest.TestCase):
@unittest.skip
def test_all(self):
from qupulse.hardware.feature_awg.tabor import TaborChannelTuple, TaborDevice
#import warnings
tawg = TaborDevice(r'USB0::0x168C::0x2184::0000216488::INSTR')
tchannelpair = TaborChannelTuple(tawg, (1, 2), 'TABOR_AB')
tawg.paranoia_level = 2
#warnings.simplefilter('error', Warning)
from qupulse.hardware.setup import HardwareSetup, PlaybackChannel, MarkerChannel
hardware_setup = HardwareSetup()
hardware_setup.set_channel('TABOR_A', PlaybackChannel(tchannelpair, 0))
hardware_setup.set_channel('TABOR_B', PlaybackChannel(tchannelpair, 1))
hardware_setup.set_channel('TABOR_A_MARKER', MarkerChannel(tchannelpair, 0))
hardware_setup.set_channel('TABOR_B_MARKER', MarkerChannel(tchannelpair, 1))
if with_alazar:
from qupulse.hardware.dacs.alazar import AlazarCard
import atsaverage.server
if not atsaverage.server.Server.default_instance.running:
atsaverage.server.Server.default_instance.start(key=b'guest')
import atsaverage.core
alazar = AlazarCard(atsaverage.core.getLocalCard(1, 1))
alazar.register_mask_for_channel('A', 0)
alazar.register_mask_for_channel('B', 0)
alazar.register_mask_for_channel('C', 0)
alazar.config = get_alazar_config()
alazar.register_operations('test', get_operations())
window = get_window(atsaverage.core.getLocalCard(1, 1))
hardware_setup.register_dac(alazar)
repeated = get_pulse()
from qupulse.pulses.sequencing import Sequencer
sequencer = Sequencer()
sequencer.push(repeated,
parameters=dict(n=1000, min=-0.5, max=0.5, tau=192*3),
channel_mapping={'out': 'TABOR_A', 'trigger': 'TABOR_A_MARKER'},
window_mapping=dict(A='A', B='B', C='C'))
instruction_block = sequencer.build()
hardware_setup.register_program('test', instruction_block)
if with_alazar:
from atsaverage.masks import PeriodicMask
m = PeriodicMask()
m.identifier = 'D'
m.begin = 0
m.end = 1
m.period = 1
m.channel = 0
alazar._registered_programs['test'].masks.append(m)
hardware_setup.arm_program('test')
d = 1
|
"""
Removes the duplicate sets of COPE responses to the same questions in the same survey version.
In PPI(COPE) surveys, the purpose of questionnaire_response_id is to group all responses from the same survey together.
Some COPE questions allowed participants to provide multiple answers, which be will connected via the same
questionnaire_response_id. However, a participant may submit the responses multiple times for the same questions,
therefore creating duplicates. for the COPE surveys there are multiple versions of the survey where the questions can be
reused in multiple versions. we need to keep the same question answer pairs from different versions.
We need to use the combination of person_id, observation_source_concept_id,
observation_source_value, and questionnaire_response_id and cope_month to identify multiple sets of responses.
We only want to keep the most recent set of responses and remove previous sets of responses per each cope_month version.
cope_survey_semantic_version_map in the rdr dataset can be used to get the cope_month version.
In short the query should achieve
Step 1:
Identify most recent questionnaire_response_id for same person, question, cope_month combination.
Step 2:
Prioritize responses with same person, question, cope_month combination with the most recent questionnaire_response_id.
Step 3:
Keep only records associated with most questionnaire_response_id, person, question, answer per each cope_month version.
"""
import logging
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
# Project imports
from common import JINJA_ENV
from constants.cdr_cleaner import clean_cdr as cdr_consts
from utils import pipeline_logging
LOGGER = logging.getLogger(__name__)
COPE_SURVEY_VERSION_MAP_TABLE = 'cope_survey_semantic_version_map'
ISSUE_NUMBERS = ['DC1146', 'DC1135']
OBSERVATION = 'observation'
SANDBOX_DUPLICATE_COPE_RESPONSES = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE
`{{project}}.{{sandbox_dataset}}.{{intermediary_table}}` AS
SELECT
o.* EXCEPT (cope_month,
rank_order,
is_pmi_skip,
max_observation_datetime)
FROM (
SELECT
*,
DENSE_RANK() OVER(
PARTITION BY person_id,
observation_source_concept_id,
observation_source_value,
value_source_value,
cope_month
ORDER BY
is_pmi_skip ASC,
max_observation_datetime DESC,
questionnaire_response_id DESC,
observation_id DESC) AS rank_order
FROM (
SELECT
obs.*,
IF
(value_source_value = 'PMI_Skip',
1,
0) AS is_pmi_skip,
MAX(observation_datetime) OVER(
PARTITION BY person_id,
observation_source_concept_id,
observation_source_value,
cope.cope_month,
obs.questionnaire_response_id) AS max_observation_datetime,
cope.cope_month /* will handle case if obs table has valid cope_month or not */
FROM
`{{project}}.{{dataset}}.observation` obs
JOIN
`{{project}}.{{dataset}}.{{cope_survey_version_table}}` cope
ON
obs.questionnaire_response_id = cope.questionnaire_response_id )
) o
WHERE
o.rank_order != 1
""")
REMOVE_DUPLICATE_COPE_RESPONSES = JINJA_ENV.from_string("""
DELETE
FROM
`{{project}}.{{dataset}}.observation`
WHERE
observation_id IN (
SELECT
observation_id
FROM
`{{project}}.{{sandbox_dataset}}.{{intermediary_table}}` )
""")
class DropCopeDuplicateResponses(BaseCleaningRule):
"""
Removes the duplicate sets of responses to the same questions excluding COPE survey.
"""
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = 'Removes the duplicate sets of COPE responses to the same questions from the same survey version.'
super().__init__(issue_numbers=ISSUE_NUMBERS,
description=desc,
affected_datasets=[cdr_consts.RDR],
affected_tables=[OBSERVATION],
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def get_query_specs(self):
"""
Return a list of dictionary query specifications.
:return: A list of dictionaries. Each dictionary contains a single query
and a specification for how to execute that query. The specifications
are optional but the query is required.
"""
sandbox_duplicate_rows = {
cdr_consts.QUERY:
SANDBOX_DUPLICATE_COPE_RESPONSES.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox_dataset=self.sandbox_dataset_id,
cope_survey_version_table=COPE_SURVEY_VERSION_MAP_TABLE,
intermediary_table=self.get_sandbox_tablenames()[0])
}
delete_duplicate_rows = {
cdr_consts.QUERY:
REMOVE_DUPLICATE_COPE_RESPONSES.render(
project=self.project_id,
dataset=self.dataset_id,
sandbox_dataset=self.sandbox_dataset_id,
intermediary_table=self.get_sandbox_tablenames()[0])
}
return [sandbox_duplicate_rows, delete_duplicate_rows]
def setup_rule(self, client):
"""
Function to run any data upload options before executing a query.
"""
pass
def setup_validation(self, client):
"""
Run required steps for validation setup
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self, client):
"""
Validates the cleaning rule which deletes or updates the data from the tables
"""
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self):
sandbox_table = f'{self._issue_numbers[0].lower()}_{self.affected_tables[0]}'
return [sandbox_table]
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
pipeline_logging.configure()
ARGS = parser.parse_args()
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(DropCopeDuplicateResponses,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(DropCopeDuplicateResponses,)])
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from UTILS.colorful import *
import numpy as np
from UTILS.tensor_ops import _2tensor
class PPO():
def __init__(self, policy_and_critic, mcv=None):
from .reinforce_foundation import CoopAlgConfig
self.policy_and_critic = policy_and_critic
self.clip_param = CoopAlgConfig.clip_param
self.ppo_epoch = CoopAlgConfig.ppo_epoch
self.n_pieces_batch_division = CoopAlgConfig.n_pieces_batch_division
self.value_loss_coef = CoopAlgConfig.value_loss_coef
self.entropy_coef = CoopAlgConfig.entropy_coef
self.max_grad_norm = CoopAlgConfig.max_grad_norm
self.lr = CoopAlgConfig.lr
self.g_optimizer = optim.Adam(policy_and_critic.parameters(), lr=self.lr)
self.g_update_delayer = 0
self.g_initial_value_loss = 0
self.invalid_penalty = CoopAlgConfig.invalid_penalty
# 轮流训练式
self.train_switch = True
self.mcv = mcv
self.ppo_update_cnt = 0
self.loss_bias = CoopAlgConfig.balance
def train_on_traj(self, traj_pool, task):
# print(traj_pool) 从轨迹中采样
g_value_loss_epoch = 0
g_action_loss_epoch = 0
g_dist_entropy_epoch = 0
error_act_loss_epoch = 0
self.train_switch = not self.train_switch
num_updates = self.ppo_epoch * self.n_pieces_batch_division
if task == 'train_R':
flag='train_R'
print('train_R')
elif task == 'train_L':
flag='train_L'
print('train_L')
for e in range(self.ppo_epoch):
data_generator = self.轨迹采样(traj_pool, flag=flag)
n_batch = next(data_generator)
for small_batch in range(n_batch):
sample = next(data_generator)
self.g_optimizer.zero_grad()
loss_final = 0
policy_loss, entropy_loss, gx_value_loss, error_act_loss, loss_final_t = self.get_loss(flag, sample)
g_value_loss_epoch += gx_value_loss.item() / num_updates
g_action_loss_epoch += policy_loss.item() / num_updates
g_dist_entropy_epoch += entropy_loss.item() / num_updates
error_act_loss_epoch += error_act_loss.item() / num_updates
if flag == 'train_R':
loss_final += loss_final_t * self.loss_bias
if flag == 'train_L':
loss_final += loss_final_t * (1 - self.loss_bias)
loss_final.backward()
nn.utils.clip_grad_norm_(self.policy_and_critic.parameters(), self.max_grad_norm)
self.g_optimizer.step()
pass # finish small batch update
pass # finish all epoch update
self.ppo_update_cnt += 1
print亮靛('value loss', g_value_loss_epoch, 'policy loss',g_action_loss_epoch, 'entropy loss',g_dist_entropy_epoch,'invalid-action loss', error_act_loss_epoch)
return self.ppo_update_cnt
def 轨迹采样(self, traj_pool, flag):
container = {}
if flag=='train_R':
req_dict = ['g_obs', 'g_actions', 'g_actionLogProbs_R', 'return_R', 'value_R', 'ctr_mask_R' ]
req_dict_rename = ['g_obs', 'g_actions', 'g_actionLogProbs' , 'return', 'state_value', 'ctr_mask']
elif flag=='train_L':
req_dict = ['g_obs', 'g_actions', 'g_actionLogProbs_L', 'return_L', 'value_L', 'ctr_mask_L']
req_dict_rename = ['g_obs', 'g_actions', 'g_actionLogProbs' , 'return', 'state_value', 'ctr_mask']
return_rename = "return"
value_rename = "state_value"
advantage_rename = "advantage"
# 将 g_obs 替换为 g_obs>xxxx
for key_index, key in enumerate(req_dict):
key_name = req_dict[key_index]
key_rename = req_dict_rename[key_index]
if not hasattr(traj_pool[0], key_name):
real_key_list = [real_key for real_key in traj_pool[0].__dict__ if (key_name+'>' in real_key)]
assert len(real_key_list) > 0, ('检查提供的变量', key,key_index)
for real_key in real_key_list:
mainkey, subkey = real_key.split('>')
req_dict.append(real_key)
req_dict_rename.append(key_rename+'>'+subkey)
big_batch_size = -1 # 检查是不是长度统一
# 加载轨迹进container数组
for key_index, key in enumerate(req_dict):
key_name = req_dict[key_index]
key_rename = req_dict_rename[key_index]
if not hasattr(traj_pool[0], key_name): continue
set_item = np.concatenate([getattr(traj, key_name) for traj in traj_pool], axis=0)
if not (big_batch_size==set_item.shape[0] or (big_batch_size<0)):
print('error')
assert big_batch_size==set_item.shape[0] or (big_batch_size<0), (key,key_index)
big_batch_size = set_item.shape[0]
container[key_rename] = set_item # 指针赋值
container[advantage_rename] = container[return_rename] - container[value_rename]
container[advantage_rename] = ( container[advantage_rename] - container[advantage_rename].mean() ) / (container[advantage_rename].std() + 1e-5)
mini_batch_size = math.ceil(big_batch_size / self.n_pieces_batch_division) # size of minibatch for each agent
sampler = BatchSampler(SubsetRandomSampler(range(big_batch_size)), mini_batch_size, drop_last=False)
yield len(sampler)
for indices in sampler:
selected = {}
for key in container:
selected[key] = container[key][indices]
for key in [key for key in selected if '>' in key]:
# 重新把子母键值组合成二重字典
mainkey, subkey = key.split('>')
if not mainkey in selected: selected[mainkey] = {}
selected[mainkey][subkey] = selected[key]
del selected[key]
yield selected
def get_loss(self, flag, sample):
obs = _2tensor(sample['g_obs'])
# obs: $batch.$n_agent.$core_dim used in [action eval],
advantage = _2tensor(sample['advantage'])
# advantage A(s_t): $batch.$1.(advantage) used in [policy reinforce],
action = _2tensor(sample['g_actions'])
# action: $batch.$2.(two actions) not used yet
oldPi_actionLogProb = _2tensor(sample['g_actionLogProbs'])
# oldPi_actionLogProb: $batch.$1.(the output from act) used in [clipped version of value loss],
real_value = _2tensor(sample['return'])
# real_value: $batch.$1.(r_t0+r_t1+r_t2+...)
ctr_mask = _2tensor(sample['ctr_mask'])
if flag == 'train_R':
newPi_value, newPi_actionLogProb, entropy_loss, probs_, _,_,_,_ = self.policy_and_critic.evaluate_actions(obs, action)
elif flag == 'train_L':
_,_,_,_, newPi_value, newPi_actionLogProb, entropy_loss, probs_ = self.policy_and_critic.evaluate_actions(obs, action)
else:
assert False
error_act_loss = (ctr_mask*probs_).mean()
ratio = torch.exp(newPi_actionLogProb - oldPi_actionLogProb)
surr1 = ratio * advantage
surr2 = torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param) * advantage
policy_loss = -torch.min(surr1, surr2).mean()
value_loss = 0.5 * F.mse_loss(real_value, newPi_value)
loss_final = policy_loss +value_loss*self.value_loss_coef -entropy_loss*self.entropy_coef +error_act_loss * self.invalid_penalty
return policy_loss, entropy_loss, value_loss, error_act_loss, loss_final
|
<filename>multi-label/resnet/train.py
# -*- coding: utf-8 -*-
'''
Author: <NAME>
Email: <EMAIL>
Python Version: 3.7.10
Description: train.py includes the training process for the
weakly supervised labeling classification (incomplete label assignments).
'''
import os
import ast
import sys
import json
import random
import logging
import argparse
import numpy as np
from tqdm import tqdm
from os.path import dirname as up
import torch
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
sys.path.append(up(os.path.abspath(__file__)))
from resnet import ResNet
from dataloader import GenDEBRIS_ML, bands_mean, bands_std, RandomRotationTransform , pos_weight, gen_weights
sys.path.append(os.path.join(up(up(up(os.path.abspath(__file__)))), 'utils'))
from metrics import Evaluation_ML
root_path = up(up(up(os.path.abspath(__file__))))
logging.basicConfig(filename=os.path.join(root_path, 'logs','log_resnet.log'), filemode='a',level=logging.INFO, format='%(name)s - %(levelname)s - %(message)s')
logging.info('*'*10)
def seed_all(seed):
# Pytorch Reproducibility
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def seed_worker(worker_id):
# DataLoader Workers Reproducibility
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
###############################################################
# Training
###############################################################
def main(options):
# Reproducibility
seed_all(0)
# Tensorboard
writer = SummaryWriter(os.path.join(root_path, 'logs', options['tensorboard']))
# Transformations
transform_train = transforms.Compose([transforms.ToTensor(),
RandomRotationTransform([-90, 0, 90, 180]),
transforms.RandomHorizontalFlip()])
transform_test = transforms.Compose([transforms.ToTensor()])
standardization = transforms.Normalize(bands_mean, bands_std)
# Construct Data loader
if options['mode']=='train':
dataset_train = GenDEBRIS_ML('train', transform=transform_train, standardization = standardization, agg_to_water = options['agg_to_water'])
dataset_test = GenDEBRIS_ML('val', transform=transform_test, standardization = standardization, agg_to_water = options['agg_to_water'])
train_loader = DataLoader( dataset_train,
batch_size = options['batch'],
shuffle = True,
num_workers = options['num_workers'],
pin_memory = options['pin_memory'],
prefetch_factor = options['prefetch_factor'],
persistent_workers= options['persistent_workers'],
worker_init_fn=seed_worker)
test_loader = DataLoader( dataset_test,
batch_size = options['batch'],
shuffle = False,
num_workers = options['num_workers'],
pin_memory = options['pin_memory'],
prefetch_factor = options['prefetch_factor'],
persistent_workers= options['persistent_workers'],
worker_init_fn=seed_worker)
elif options['mode']=='test':
dataset_test = GenDEBRIS_ML('test', transform=transform_test, standardization = standardization, agg_to_water = options['agg_to_water'])
test_loader = DataLoader( dataset_test,
batch_size = options['batch'],
shuffle = False,
num_workers = options['num_workers'],
pin_memory = options['pin_memory'],
prefetch_factor = options['prefetch_factor'],
persistent_workers= options['persistent_workers'],
worker_init_fn=seed_worker)
else:
raise
# Use gpu or cpu
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = ResNet(input_bands = options['input_channels'],
output_classes = options['output_channels'])
model.to(device)
# Load model from specific epoch to continue the training or start the evaluation
if options['resume_from_epoch'] > 1:
resume_model_dir = os.path.join(options['checkpoint_path'], str(options['resume_from_epoch']))
model_file = os.path.join(resume_model_dir, 'model.pth')
logging.info('Loading model files from folder: %s' % model_file)
checkpoint = torch.load(model_file, map_location = device)
model.load_state_dict(checkpoint)
# dereference
del checkpoint
if torch.cuda.is_available():
torch.cuda.empty_cache()
global pos_weight
# Aggregate Distribution Mixed Water, Wakes, Cloud Shadows, Waves with Marine Water
if options['agg_to_water']:
pos_weight = pos_weight[:-4] # Drop Mixed Water, Wakes, Cloud Shadows, Waves
# Weighted Cross Entropy Loss & adam optimizer
weight = gen_weights(1/pos_weight, c = options['weight_param'])
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=weight.to(device))
optimizer = torch.optim.Adam(model.parameters(), lr=options['lr'], weight_decay=options['decay'])
# Learning Rate scheduler
if options['reduce_lr_on_plateau']==1:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, patience=10, verbose=True)
else:
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, options['lr_steps'], gamma=0.2, verbose=True)
# Start training
start = options['resume_from_epoch'] + 1
epochs = options['epochs']
eval_every = options['eval_every']
if options['mode']=='train':
# Write model-graph to Tensorboard
dataiter = iter(train_loader)
image_temp, _ = dataiter.next()
writer.add_graph(model, image_temp.to(device))
model.train()
###############################################################
# Start Training
###############################################################
for epoch in range(start, epochs+1):
training_loss = []
training_batches = 0
i_board = 0
for (image, target) in tqdm(train_loader, desc="training"):
image = image.to(device)
target = target.to(device)
optimizer.zero_grad()
logits = model(image)
loss = criterion(logits, target)
loss.backward()
training_batches += target.shape[0]
training_loss.append((loss.data*target.shape[0]).tolist())
optimizer.step()
# Write running loss
writer.add_scalar('training loss', loss , (epoch - 1) * len(train_loader)+i_board)
i_board+=1
logging.info("Training loss was: " + str(sum(training_loss) / training_batches))
###############################################################
# Start Evaluation
###############################################################
if epoch % eval_every == 0 or epoch==1:
model.eval()
test_loss = []
test_batches = 0
y_true = []
y_predicted = []
predicted_probs = []
with torch.no_grad():
for (image, target) in tqdm(test_loader, desc="testing"):
image = image.to(device)
target = target.to(device)
logits = model(image)
loss = criterion(logits, target)
probs = torch.sigmoid(logits).cpu().numpy()
target = target.cpu().numpy()
test_batches += target.shape[0]
test_loss.append((loss.data*target.shape[0]).tolist())
predicted_probs += list(probs)
y_true += list(target)
predicted_probs = np.asarray(predicted_probs)
y_predicted = (predicted_probs >= options['threshold']).astype(np.float32)
y_predicted = np.asarray(y_predicted)
y_true = np.asarray(y_true)
###############################################################
# Store Scores
###############################################################
acc = Evaluation_ML(y_predicted, predicted_probs, y_true)
logging.info("\n")
logging.info("Test loss was: " + str(sum(test_loss) / test_batches))
logging.info("STATISTICS AFTER EPOCH " +str(epoch) + ": \n")
logging.info("Evaluation: " + str(acc))
logging.info("Saving models")
model_dir = os.path.join(options['checkpoint_path'], str(epoch))
os.makedirs(model_dir, exist_ok=True)
torch.save(model.state_dict(), os.path.join(model_dir, 'model.pth'))
writer.add_scalars('Loss per epoch', {'Test loss':sum(test_loss) / test_batches,
'Train loss':sum(training_loss) / training_batches},
epoch)
writer.add_scalar('Precision/test macroPrec', acc["macroPrec"] , epoch)
writer.add_scalar('Precision/test microPrec', acc["microPrec"] , epoch)
writer.add_scalar('Precision/test samplePrec', acc["samplePrec"] , epoch)
writer.add_scalar('Recall/test macroRec', acc["macroRec"] , epoch)
writer.add_scalar('Recall/test microRec', acc["microRec"] , epoch)
writer.add_scalar('Recall/test sampleRec', acc["sampleRec"] , epoch)
writer.add_scalar('F1/test macroF1', acc["macroF1"] , epoch)
writer.add_scalar('F1/test microF1', acc["microF1"] , epoch)
writer.add_scalar('F1/test sampleF1', acc["sampleF1"] , epoch)
writer.add_scalar('Multilabeling Loss/test HammingLoss', acc["HammingLoss"] , epoch)
writer.add_scalar('Multilabeling Loss/test coverageError', acc["coverageError"] , epoch)
writer.add_scalar('Multilabeling Loss/test rankLoss', acc["rankLoss"] , epoch)
if options['reduce_lr_on_plateau'] == 1:
scheduler.step(sum(test_loss) / test_batches)
else:
scheduler.step()
model.train()
# CODE ONLY FOR EVALUATION - TESTING MODE !
elif options['mode']=='test':
model.eval()
test_loss = []
test_batches = 0
y_true = []
y_predicted = []
predicted_probs = []
with torch.no_grad():
for (image, target) in tqdm(test_loader, desc="testing"):
image = image.to(device)
target = target.to(device)
logits = model(image)
loss = criterion(logits, target)
probs = torch.sigmoid(logits).cpu().numpy()
target = target.cpu().numpy()
test_batches += target.shape[0]
test_loss.append((loss.data*target.shape[0]).tolist())
predicted_probs += list(probs)
y_true += list(target)
predicted_probs = np.asarray(predicted_probs)
y_predicted = (predicted_probs >= options['threshold']).astype(np.float32)
y_predicted = np.asarray(y_predicted)
y_true = np.asarray(y_true)
###############################################################
# Store Scores
###############################################################
acc = Evaluation_ML(y_predicted, predicted_probs, y_true)
logging.info("\n")
logging.info("Test loss was: " + str(sum(test_loss) / test_batches))
logging.info("STATISTICS : \n")
logging.info("Evaluation: " + str(acc))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Options
parser.add_argument('--agg_to_water', default=True, type=bool, help='Aggregate Mixed Water, Wakes, Cloud Shadows, Waves with Marine Water')
parser.add_argument('--mode', default='train', help='select between train or test ')
parser.add_argument('--epochs', default=20, type=int, help='Number of epochs to run')
parser.add_argument('--batch', default=32, type=int, help='Batch size')
parser.add_argument('--resume_from_epoch', default=0, type=int, help='load model from previous epoch')
parser.add_argument('--input_channels', default=11, type=int, help='Number of input bands')
parser.add_argument('--output_channels', default=11, type=int, help='Number of output classes')
parser.add_argument('--weight_param', default=1.6, type=float, help='Weighting parameter for Loss Function')
parser.add_argument('--threshold', default=0.5, type=int, help='threshold for evaluation')
# Optimization
parser.add_argument('--lr', default=2e-4, type=float, help='learning rate')
parser.add_argument('--decay', default=1e-6, type=float, help='learning rate decay')
parser.add_argument('--reduce_lr_on_plateau', default=0, type=int, help='reduce learning rate when no increase (0 or 1)')
parser.add_argument('--lr_steps', default='[5, 10, 15]', type=str, help='Specify the steps that the lr will be reduced (Only when reduce_lr_on_plateau is 0)')
# Evaluation/Checkpointing
parser.add_argument('--checkpoint_path', default=os.path.join(up(os.path.abspath(__file__)), 'trained_models'), help='folder to save checkpoints into (empty = this folder)')
parser.add_argument('--eval_every', default=1, type=int, help='How frequently to run evaluation (epochs)')
# misc
parser.add_argument('--num_workers', default=1, type=int, help='How many cpus for loading data (0 is the main process)')
parser.add_argument('--pin_memory', default=False, type=bool, help='Use pinned memory or not')
parser.add_argument('--prefetch_factor', default=1, type=int, help='Number of sample loaded in advance by each worker')
parser.add_argument('--persistent_workers', default=True, type=bool, help='This allows to maintain the workers Dataset instances alive.')
parser.add_argument('--tensorboard', default='tsboard_multilabel', type=str, help='Name for tensorboard run')
args = parser.parse_args()
options = vars(args) # convert to ordinary dict
# lr_steps list or single float
lr_steps = ast.literal_eval(options['lr_steps'])
if type(lr_steps) is list:
pass
elif type(lr_steps) is int:
lr_steps = [lr_steps]
else:
raise
options['lr_steps'] = lr_steps
logging.info('parsed input parameters:')
logging.info(json.dumps(options, indent = 2))
main(options)
|
import datetime
from decimal import Decimal
import pytest
from pybankreader.exceptions import ValidationError
from pybankreader.fields import Field, IntegerField, CharField, RegexField, \
TimestampField, DecimalField
def _generic_field_test(field_instance, ok_value, long_value, set_value=None):
"""
As any field is based on the Field class, this can test the same basic
behavior for all subclases
:param Field field_instance: instance of a Field subclass
:param string ok_value: value that should pass validation
:param string long_value: value that is longer than allowed
:param string set_value: value that should pass validation and be set
instead of ok_value, since the value may be re-cast by the field
:return:
"""
field_instance.field_name = 'test_field'
empty_value = ''
# Test long value
with pytest.raises(ValidationError) as e:
field_instance.value = long_value
assert e.value.field == "test_field"
exp_message = "Value '{}' exceeds maximum length of {}". \
format(long_value, field_instance.length)
assert e.value.message == exp_message
# Test ok value and non-required field value
if set_value:
field_instance.value = set_value
else:
field_instance.value = ok_value
assert str(field_instance.value) == ok_value
field_instance.value = empty_value
assert field_instance.value is None
# Test required field
field_instance.required = True
with pytest.raises(ValidationError) as e:
field_instance.value = empty_value
assert e.value.field == 'test_field'
exp_message = "A value is required for this field"
assert e.value.message == exp_message
def test_base_field():
fld = Field(length=6, required=False)
_generic_field_test(fld, "haha", "hahahaha")
def test_char_field():
fld = CharField(length=6, required=False)
_generic_field_test(fld, "haha", "hahahaha")
def test_regex_field():
fld = RegexField('^0 1[a-f]+$', length=5, required=False)
_generic_field_test(fld, '0 1ae', '0 1aefaea')
with pytest.raises(ValidationError) as e:
fld.value = '0 1az'
assert e.value.field == 'test_field'
msg = "Value '{}' does not match the regex pattern '{}'".format(
'0 1az', fld._regex
)
assert e.value.message == msg
def test_integer_field():
fld = IntegerField(length=3, required=False)
_generic_field_test(fld, '19', '1999')
fld.value = '-19'
assert fld.value == -19
def test_decimal_field():
fld = DecimalField(length=6, required=False)
_generic_field_test(fld, '13.54', '1234.56')
fld.value = '13.54'
assert fld.value == Decimal('13.54')
fld.value = '-13.54'
assert fld.value == Decimal('-13.54')
def test_timestamp_field():
fld = TimestampField("%y%m%d%H%M%S", length=12, required=False)
ok_value = str(datetime.datetime(
year=2014, month=9, day=2, hour=7, minute=9, second=22
))
_generic_field_test(fld, ok_value, '1409020709222', '140902070922')
with pytest.raises(ValidationError) as e:
fld.value = "whatever"
assert e.value.field == 'test_field'
msg = "Value 'whatever' cannot be parsed to date using format " \
"'%y%m%d%H%M%S'. Error is: time data 'whatever' does not match " \
"format '%y%m%d%H%M%S'"
assert e.value.message == msg
|
<gh_stars>10-100
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
from __future__ import print_function
import sys
import math
import xedhash
class hashmul_t(xedhash.hash_fun_interface_t):
"""Implement multiplicative hashing."""
def __init__(self, table_size):
# golden ratio phi is (1+sqrt(5))/2. From Knuth, volume 3, page 516
# 1/phi = (sqrt(5)-1)/2 (after some arithmetic)
# We are using 1/phi * 2**n
# where n is the number of bits in the data type (32)
self.golden_ratio_recip2to32 = 2654435769
self.table_size = table_size
# pow2 is True if the table is a power of 2.
# ilog2_table_size is only valid if pow2 is True
self.pow2, self.ilog2_table_size = self.power_of_2()
def kind(self):
return "mult"
def power_of_2(self):
ilog2_table_size = int(math.log(self.table_size,2))
if pow(2,ilog2_table_size) == self.table_size:
return (True, ilog2_table_size)
return (False, -1)
def get_table_size(self):
return self.table_size
def __str__(self):
return "h(x) = hashmul({})".format(self.table_size)
def apply(self, k):
"""Apply the hash function to the key k"""
#sys.stderr.write("Apply {} --> ".format(k))
q = self.golden_ratio_recip2to32 * k
fraction = q & ((1<<32)-1)
r = fraction * self.table_size
v = r >> 32
#sys.stderr.write(" {}\n".format(v))
return v
def apply_pow2(self, k):
"""Apply the hash function to the key k, for power of 2 table sizes"""
q = self.golden_ratio_recip2to32 * k
fraction = q & ((1<<32)-1)
v = fraction >> (32-self.ilog2_table_size)
return v
def is_perfect(self, key_list):
values = set()
for k in key_list:
#sys.stderr.write("Checking {}\n".format(k))
v = self.apply(k)
if v in values:
# collision - not perfect
return False
values.add(v)
# no collisions in the output of the hash: perfect
return True
def need_hash_index_validation(self):
"""Need to validate that we landed on live bucket"""
return True
def add_key_validation(self, strings_dict):
key_str = strings_dict['key_str']
hentry_str ='%s[%s]' % (strings_dict['table_name'],
strings_dict['hidx_str'])
return 'if(%s.key == %s)' % (hentry_str, key_str)
def emit_cvar_decl(self):
if self.pow2:
return "xed_union64_t t"
return "xed_union64_t t, u"
def emit_cexpr(self, key_str="key"):
"""Emit a C expression for the hash function given a C variable
key_str."""
if self.pow2:
# power of 2 table size can replace the 2nd multiply with a shift
c_hash_expr = """(t.u64 = {0} * {1}, t.s.lo32 >> (32-{2}))""".format(
str(self.golden_ratio_recip2to32),
key_str,
self.ilog2_table_size)
else:
# the ULL cast on the constant is important to get 64b math.
c_hash_expr = """(t.u64 = {0} * {1}, u.u64 = t.s.lo32 * {2}ULL, u.s.hi32)""".format(
str(self.golden_ratio_recip2to32),
key_str,
str(self.table_size))
return c_hash_expr
def find_perfect(keylist):
n = len(keylist)
for m in range(n,2*n):
f = hashmul_t(n)
if f.is_perfect(keylist):
return f
return None
def test1():
f = hashmul_t(128)
for k in range(0,128):
v = f.apply(k)
print("{} -> {}".format(k,v))
if f.is_perfect(range(0,128)):
print("Hash function is perfect")
else:
print("Hash function has collisions")
print(f.emit_cexpr())
return 0
def test2():
f = hashmul_t(9)
inputs = [225,2273,737,2785,241,2289,753,2801]
for k in inputs:
v = f.apply(k)
print("{} -> {}".format(k,v))
if f.is_perfect(inputs):
print("Hash function is perfect")
else:
print("Hash function has collisions")
print(f.emit_cexpr())
return 0
def test3():
f = hashmul_t(16)
inputs = [225,2273,737,2785,241,2289,753,2801]
for k in inputs:
v1 = f.apply(k)
v2 = f.apply_pow2(k)
if v1 != v2:
print("ERROR {} -> {} {}".format(k,v1,v2))
else:
print("OK {} -> {} {}".format(k,v1,v2))
if f.is_perfect(inputs):
print("Hash function is perfect")
else:
print("Hash function has collisions")
print(f.emit_cexpr())
return 0
def test4():
f = hashmul_t(1)
inputs = [68002]
for k in inputs:
v1 = f.apply(k)
v2 = f.apply_pow2(k)
if v1 != v2:
print("ERROR {} -> {} {}".format(k,v1,v2))
else:
print("OK {} -> {} {}".format(k,v1,v2))
if f.is_perfect(inputs):
print("Hash function is perfect")
else:
print("Hash function has collisions")
print(f.emit_cexpr())
return 0
def test():
fail = 0
for f in [test1, test2, test3, test4]:
r = f()
if r:
print("FAIL: {}".format(f.__name__))
fail = 1
else:
print("PASS: {}".format(f.__name__))
return fail
if __name__ == "__main__":
r = test()
sys.exit(r)
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""EdgeQL routines for function call compilation."""
import itertools
import typing
from edgedb.lang.ir import ast as irast
from edgedb.lang.ir import utils as irutils
from edgedb.lang.schema import functions as s_func
from edgedb.lang.schema import name as sn
from edgedb.lang.schema import objects as s_obj
from edgedb.lang.schema import types as s_types
from edgedb.lang.edgeql import ast as qlast
from edgedb.lang.edgeql import errors
from edgedb.lang.edgeql import parser as qlparser
from . import astutils
from . import context
from . import dispatch
from . import pathctx
from . import setgen
from . import typegen
@dispatch.compile.register(qlast.FunctionCall)
def compile_FunctionCall(
expr: qlast.Base, *, ctx: context.ContextLevel) -> irast.Base:
with ctx.new() as fctx:
if isinstance(expr.func, str):
funcname = expr.func
else:
funcname = sn.Name(expr.func[1], expr.func[0])
funcs = fctx.schema.get_functions(
funcname, module_aliases=fctx.modaliases)
if funcs is None:
raise errors.EdgeQLError(
f'could not resolve function name {funcname}',
context=expr.context)
fctx.in_func_call = True
args, kwargs, arg_types = process_func_args(expr, funcname, ctx=fctx)
for funcobj in funcs:
if check_function(funcobj, arg_types):
break
else:
raise errors.EdgeQLError(
f'could not find a function variant {funcname}',
context=expr.context)
fixup_param_scope(funcobj, args, kwargs, ctx=fctx)
node = irast.FunctionCall(func=funcobj, args=args, kwargs=kwargs)
if funcobj.initial_value is not None:
rtype = irutils.infer_type(node, fctx.schema)
iv_ql = qlast.TypeCast(
expr=qlparser.parse_fragment(funcobj.initial_value),
type=typegen.type_to_ql_typeref(rtype)
)
node.initial_value = dispatch.compile(iv_ql, ctx=fctx)
ir_set = setgen.ensure_set(node, ctx=ctx)
return ir_set
def check_function(
func: s_func.Function,
arg_types: typing.Iterable[s_obj.Object]) -> bool:
if not func.paramtypes:
if not arg_types:
# Match: `func` is a function without parameters
# being called with no arguments.
return True
else:
# No match: `func` is a function without parameters
# being called with some arguments.
return False
if not arg_types:
# Call without arguments
for pi, pd in enumerate(func.paramdefaults):
if pd is None and pi != func.varparam:
# There is at least one non-variadic parameter
# without default; hence this function cannot
# be called without arguments.
return False
return True
for pt, pd, at in itertools.zip_longest(func.paramtypes,
func.paramdefaults,
arg_types):
if pt is None:
# We have more arguments than parameters.
if func.varparam is not None:
# Function has a variadic parameter
# (which must be the last one).
pt = func.paramtypes[func.varparam]
else:
# No variadic parameter, hence no match.
return False
elif at is None:
# We have fewer arguments than parameters.
if pd is None:
return False
else:
# We have both types for the parameter and for
# the argument; check if they are compatible.
if not at.issubclass(pt):
return False
# Match, the `func` passed all checks.
return True
def process_func_args(
expr: qlast.FunctionCall, funcname: sn.Name, *,
ctx: context.ContextLevel) \
-> typing.Tuple[
typing.List[irast.Base], # args
typing.Dict[str, irast.Base], # kwargs
typing.List[s_types.Type]]: # arg_types
args = []
kwargs = {}
arg_types = []
for ai, a in enumerate(expr.args):
arg_ql = a.arg
if a.sort or a.filter:
arg_ql = astutils.ensure_qlstmt(arg_ql)
if a.filter:
arg_ql.where = astutils.extend_qlbinop(arg_ql.where, a.filter)
if a.sort:
arg_ql.orderby = a.sort + arg_ql.orderby
with ctx.newscope(fenced=True) as fencectx:
# We put on a SET OF fence preemptively in case this is
# a SET OF arg, which we don't know yet due to polymorphic
# matching.
arg = setgen.scoped_set(
dispatch.compile(arg_ql, ctx=fencectx),
ctx=fencectx)
if a.name:
kwargs[a.name] = arg
aname = a.name
else:
args.append(arg)
aname = ai
arg_type = irutils.infer_type(arg, ctx.schema)
if arg_type is None:
raise errors.EdgeQLError(
f'could not resolve the type of argument '
f'${aname} of function {funcname}',
context=a.context)
arg_types.append(arg_type)
return args, kwargs, arg_types
def fixup_param_scope(
func: s_func.Function,
args: typing.List[irast.Set],
kwargs: typing.Dict[str, irast.Set], *,
ctx: context.ContextLevel) -> None:
varparam_kind = None
for i, arg in enumerate(args):
if varparam_kind is not None:
paramkind = varparam_kind
else:
paramkind = func.paramkinds[i]
if i == func.varparam:
varparam_kind = paramkind
if paramkind != qlast.SetQualifier.SET_OF:
arg_scope = pathctx.get_set_scope(arg, ctx=ctx)
if arg_scope is not None:
arg_scope.collapse()
pathctx.assign_set_scope(arg, None, ctx=ctx)
for name, arg in kwargs.items():
i = func.paramnames.index(name)
paramkind = func.paramkinds[i]
if paramkind != qlast.SetQualifier.SET_OF:
arg_scope = pathctx.get_set_scope(arg, ctx=ctx)
if arg_scope is not None:
arg_scope.collapse()
pathctx.assign_set_scope(arg, None, ctx=ctx)
|
"""Module to import and decode zs2 files."""
import gzip as _gzip
import struct as _struct
# Author: <NAME>
# Copyright: Copyright 2015,2016,2017, <NAME>
# License: MIT
#####################################
#
# Python 2/3 compatibility
#
# turn byte/str/int/unicode character into ordinal value
_ord= lambda x: x if isinstance(x,int) else ord(x)
# turn byte/str/int/unicode into unicode character(s)
_chr= lambda x: u'%c'%x if isinstance(x,int) else u'%c'%_ord(x)
_to_string= lambda data: u''.join([_chr(elem) for elem in data])
######## convenience function
_unpack1= lambda fmt, data: _struct.unpack('<'+_fmt_map[fmt],data)[0]
# The I and L format characters have different sizes depending on the platform
# even if presumably fixed with '<'. E.g. windows L = 4 bytes, Travis CI L = 8 bytes
# we want:
# B=1 byte, H=2 bytes, L=4 bytes, Q=8 bytes
_fmt_size = [_struct.calcsize(key) for key in 'BHILQ']
_fmt_map = {key:'BHILQ'[_fmt_size.index(2**idx)] for idx, key in enumerate('BHLQ')}
_fmt_map.update({key.lower():_fmt_map[key].lower() for key in _fmt_map})
_fmt_map.update({'d':'d','f':'f'}) # floating point numbers
#####################################
#
# File functions
#
def load(filename, debug=False):
"""Open file and return data stream"""
# returns Bytes in Py3 and Str in Py2
# note that in Py3 type(data[0])==int while type(data[:1])==bytes
# while in Py2 type(data[0])==str, and type(data[:1])==str
with _gzip.open(filename, 'rb') as f:
data_stream = bytearray( f.read() )
if len(data_stream)<4:
raise ValueError('Data stream is too short.')
if not debug and not _has_file_marker(data_stream):
raise ValueError('File marker is missing. Found 0x%X, expected 0xDEADBEAF.' % _unpack1('L',data_stream[:4]))
if not debug and _has_extended_header(data_stream):
raise ValueError('File has unexpected, extended binary header. Try processing file in debug mode.')
return data_stream
#####################################
#
# Data stream functions
#
def data_stream_to_chunks(data_stream, start=0, debug=False):
"""Get all elements and associated data without decoding data beyond length information.
Parameter "start" is the beginning of the file marker."""
# we need to find the beginning of the file.
# (We expect 'start' to be at index 4, but it doesn't matter here.)
if debug: return _data_stream_to_chunks_debug(data_stream, start)
chunks=[]
next_start = start+4 # skip byte header
while next_start < len(data_stream):
# get start index of this element
start = next_start
if _ord(data_stream[start]) == 0xFF:
# indicator ending a 0xDD section
chunks.append([start, None, []])
next_start += 1
continue
# get element name
# and place of continuation (either data block or next element)
name, cont = _get_byte_str(data_stream, start)
# skip associated data block, if any
if cont >= len(data_stream):
# end of file
next_start = len(data_stream)
else:
data_type = _ord(data_stream[cont])
if data_type == 0xee:
next_start = _skip_past_data_ee(data_stream, cont)
elif data_type == 0xaa:
next_start = _skip_past_data_aa(data_stream, cont)
elif data_type == 0xdd:
next_start = _skip_past_data_dd(data_stream, cont)
else:
next_start = _skip_past_number_type(data_stream, cont)
if next_start is None:
# presumably, that was a chunk type without data.
next_start = cont
chunks.append([start, name, data_stream[cont:next_start]])
return chunks
def _data_stream_to_chunks_debug(data_stream, start=0):
"""Use this function if unknown chunk types appear.
This function is not robust and may identify
spurious chunks in data segments."""
chunks = []
next_start = _find_next_parameter(data_stream, start)
if next_start > 4+start:
chunks.append([4+start,' * extended header * ', data_stream[4+start,:next_start]])
while next_start < len(data_stream):
start = next_start
name, cont = _get_byte_str(data_stream, start)
next_start = _find_next_parameter(data_stream, cont)
chunks.append([start, name, data_stream[cont:next_start]])
return chunks
def get_data_stream_hex_dump(data_stream, start, rows=4, bytes_per_row=16):
"""Return hex dump as printable string"""
display_start_address = True
step = bytes_per_row
end = start+rows*bytes_per_row
sep=' '*4
out = []
while start<end:
if not display_start_address: addr = ''
else:
addr = u'%0.6x: ' % start
line = data_stream[start:start+step]
hexa = u' '.join(['%0.2x'%_ord(x) for x in line]) # "u" forces unicode output
prin = u''.join([_chr(x) if (32<=_ord(x)<=127) else u'\u00b7' for x in line])
out.append(addr+hexa+sep+prin)
start += step
return u'\n'.join(out)
# ##############
def _has_file_marker(data_stream):
"""Check data stream for 0xDEADBEAF file marker"""
file_marker = _struct.pack("<"+_fmt_map['L'], 0xdeadbeaf)
return data_stream.startswith(file_marker)
def _has_extended_header(data_stream):
"""Check if the first chunk does not start at the 4th byte in the data stream."""
return _find_next_parameter(data_stream, 0) > 4
def _find_next_parameter(data_stream, start):
"""Find a number followed by at least the same number of printable ASCII characters
This is a heuristic method to find the beginning of the next chunk.
Use should be avoided since it may skip data."""
ASCII_char = lambda x: 32<=_ord(x)<=127
start -= 1
while True:
start += 1
try: length = _ord(data_stream[start])
except IndexError: return None
if length == 0: continue
string = data_stream[start+1:start+1+length]
if all([ASCII_char(char) for char in string]): break
return start
def _get_byte_str(data_stream, start=0):
"""Get string according to byte encoding. Does not validate string."""
length = _ord(data_stream[start])
string = _to_string(data_stream[start+1: start+1+length])
return string, start+1+length
def _skip_past_data_dd(data_stream, start):
"""Skip past chunk data"""
if (_ord(data_stream[start])!=0xDD):
raise TypeError('Unexpected block format for 0xDD at 0x%x.' % (start))
length = _ord(data_stream[start+1])
return start+2+length
def _skip_past_data_aa(data_stream, start):
"""Minimal validation and skip past chunk data"""
if ((_ord(data_stream[start])!=0xAA) or
(not _is_bit31_set(data_stream, start+1))):
raise TypeError('Unexpected block format for 0xAA (0x%x) with length and string marker 0x%08x at 0x%x.' % (
_ord(data_stream[start]), _unpack1('L',data_stream[start+1:start+5]), start))
char_count = _unpack1('L',data_stream[start+1:start+5]) & 0x7FFFFFFF
byte_length = char_count * 2
return start+5+byte_length
def _skip_past_data_ee(data_stream, start):
"""Validate and skip past chunk data"""
if _ord(data_stream[start])!=0xEE:
raise TypeError('Unexpected block format for 0xEE at 0x%x.' % (start))
data_type = _unpack1('H',data_stream[start+1:start+3])
try:
byte_length={0x11:1, 0x04:4, 0x05:8, 0x16: 4, 0x00: 0}[data_type]
except KeyError:
raise TypeError('Unknown data type 0x%02x in block 0xEE at 0x%x.' % (data_type, start))
data_entries = _unpack1('L',data_stream[start+3:start+7])
if (data_type == 0x00) and (data_entries != 0):
raise ValueError('Expected empty list with data type EE-00 but found list of %i entries at 0x%x.' % (data_entries, start))
return start+7+data_entries * byte_length
def _skip_past_number_type(data_stream, start):
"""Validate and skip past chunk data"""
data_type = _ord(data_stream[start])
try:
byte_length = {0x11: 4,
0x22: 4,
0x33: 4,
0x44: 4,
0x55: 2,
0x66: 2,
0x88: 1,
0x99: 1,
0xbb: 4,
0xcc: 8,
}[data_type]
except KeyError:
# we return None rather than raise an exception
# since we call this function to test IF this is a number
return None
return start+1+byte_length
def _is_bit31_set(data_stream, start=0):
"""Test bit 31 counting from position "start"."""
return _unpack1('L',data_stream[start:start+4]) & 0x80000000 != 0
def _get_unicode_string(data_stream, start=0, check_string_marker=True):
"""Try to get one unicode string, returns tupe of (string or None, index-after-string)"""
if len(data_stream)-start<4: return None, start
if check_string_marker and not _is_bit31_set(data_stream, start): return None, start
chars, cont = _get_data_list(data_stream, 2, start, raise_error_if_string=False)
if chars is None: return None, start # probably not enough data for string
return u''.join([_chr(_unpack1('H',char)) for char in chars]), cont
def _get_data_list(data_stream, item_length, start, raise_error_if_string=True):
"""Try to get a list of items of length item_length (or strings: give -number of strings per string tuple), returns tupe of (list of byte-data, index-after-string)"""
# use item_length<0 to indicate number of strings per list item
if 4+start>len(data_stream): return None, start # not enough bytes
length = _unpack1('L',data_stream[start:start+4]) & 0x7FFFFFFF
if raise_error_if_string and _is_bit31_set(data_stream, start):
raise ValueError('List of expected item size %i has string marker set.' % (item_length))
length = length & 0x7FFFFFFF
unit_length = item_length if item_length>0 else 4*(-item_length)
if 4+length*unit_length+start>len(data_stream): return None, start # not enough bytes
if item_length>=0:
return [data_stream[start+4+item_length*i:start+4+item_length*(i+1)] for i in range(length)],start+4+length*item_length
elif item_length<0:
string_count = (-item_length)
data_start=start+4
data_end=data_start
error=False
strings=[]
for j in range(length):
start_item=data_end
for i in range(string_count):
string, data_end = _get_unicode_string(data_stream,data_end)
error = error or (string is None)
strings.append(data_stream[start_item:data_end])
if error: return None, start
return strings,data_end
#####################################
#
# Chunk (data) functions
#
def parse_chunks(chunks, level=None, debug=False):
"""Dispatch function to parse chunk data at different levels (default: maximum level)
Note that format of level 3 is subject to change in the future.
Set debug to True to disable most sanity checks and try to interpret as
much as possible. Note that this may return spurious chunks."""
level = level or 3
chunks = _parse_chunk_types(chunks) # level 1
if level >= 2:
chunks = _parse_chunk_ee_subtypes(chunks, debug) # EE04, EE16, but return raw data for EE11
if level >= 3:
chunks = _parse_chunk_ee11_data_records(chunks, debug)
return chunks
def _parse_chunk_types(chunks):
"""Decode element data"""
dispatch={
0x11: _parse_data_11,
0x22: _parse_data_22,
0x33: _parse_data_33,
0x44: _parse_data_44,
0x55: _parse_data_55,
0x66: _parse_data_66,
0x88: _parse_data_88,
0x99: _parse_data_99,
0xaa: _parse_data_aa,
0xbb: _parse_data_bb,
0xcc: _parse_data_cc,
0xdd: _parse_data_dd,
0xee: _parse_data_ee, # NB: break out sub-types from this function
}
out = []
for chunk in chunks:
address, name, raw_data = chunk
if name is not None:
# normal chunk
if len(raw_data)>0:
data_type = _ord(raw_data[0])
data, type_code = dispatch[data_type](raw_data)
else:
# e.g., txutil.TUnit, CTSingleGroupDataBlock
type_code = ''
data = []
else:
# "end" marker
# each 0xFF ends one level of nesting
# started by a 0xDD chunk
# now we can give it a name (that would, technically, be legal (ie. 0x00))
name, type_code, data = '', 'end', []
out.append([address, name, type_code, data])
return out
def _parse_chunk_ee_subtypes(chunks, debug=False):
"""Check all chunks and extract lists for data type EE."""
result = chunks[:]
for index, chunk in enumerate(chunks):
address, name, data_type, data = chunk
if data_type !=u'EE': continue
try: interpreted_data, type_code = _parse_data_ee_subtypes(data, debug)
except KeyError:
print('Address: 0x%X' % address)
print(get_data_stream_hex_dump(data,0))
raise
result[index] = [address, name, type_code, interpreted_data]
return result
def _parse_chunk_ee11_data_records(chunks, debug=False):
"""Check all chunks and extract records for data type EE11."""
result = chunks[:]
for index, chunk in enumerate(chunks):
address, name, data_type, data = chunk
if data_type !=u'EE11': continue
if name.startswith(u'QS_'):
interpreted_data, type_code = _parse_record_data_ee11_formats_QS(name, data, debug)
elif name==u'Entry':
interpreted_data, type_code = _parse_record_data_ee11_formats_Entry(data, debug)
else:
if not debug: raise ValueError('Unknown data records for chunk type "%s".' % name)
# i ndebug mode: copy data verbatim
interpreted_data, type_code = data[:], u'EE11'
result[index] = [address, name, type_code, interpreted_data]
return result
#################
#
# parse data types
#
_parse_data_11 = lambda data: (_unpack1('l',data[1:]),'11') if _ord(data[0])==0x11 else (None, None) # hex number (e.g. color), also can be 0xffffffff for -1 (n.d.), or counter number (decimal), signed index (-1 for not defined)
_parse_data_22 = lambda data: (_unpack1('L',data[1:]),'22') if _ord(data[0])==0x22 else (None, None) # Value
_parse_data_33 = lambda data: (_unpack1('l', data[1:]),'33') if _ord(data[0])==0x33 else (None, None) # prob int -- screen coords, also sometimes negative
_parse_data_44 = lambda data: (_unpack1('L', data[1:]),'44') if _ord(data[0])==0x44 else (None, None) # int 'capabilities', or 0/1 values, color
_parse_data_55 = lambda data: (_unpack1('h', data[1:]),'55') if _ord(data[0])==0x55 else (None, None) # only Flag: 0x0 or 0xffff
_parse_data_66 = lambda data: (_unpack1('H', data[1:]),'66') if _ord(data[0])==0x66 else (None, None) # (ID numbers, basically counters)
_parse_data_88 = lambda data: (_ord(data[1:]),'88') if _ord(data[0])==0x88 else (None, None) # non-binary flag, signed
_parse_data_99 = lambda data: (_ord(data[1:]) != 0,'99') if _ord(data[0])==0x99 else (None, None) # binary flag
_parse_data_aa = lambda data: (_get_unicode_string(data,1)[0],'AA') if _ord(data[0])==0xAA else (None, None)
_parse_data_bb = lambda data: (_s2d(_unpack1('f', data[1:])),'BB') if _ord(data[0])==0xBB else (None, None) # ProgVersion, Percent
_parse_data_cc = lambda data: (_unpack1('d', data[1:]),'CC') if _ord(data[0])==0xCC else (None, None) # ok
_parse_data_dd = lambda data: (len(data[1:]),'DD') if _ord(data[0])==0xDD else (None, None) # number of zero-bytes
_parse_data_dd = lambda data: (_get_byte_str(data[1:])[0],'DD') if _ord(data[0])==0xDD else (None, None) # number of zero-bytes
_parse_data_ee = lambda data: (data[1:],'EE') if _ord(data[0])==0xEE else (None, None) # number of zero-bytes
########################
#
# parse EE sub-types
#
def _parse_data_ee_subtypes(data, debug=False):
"""Parse known subtypes and be particularly lenient in debug mode.
In debug mode, "-debug" may be appended to the type code,
or unparsed data with type code "EE" may be returned."""
sub_type = _unpack1('H',data[:2])
byte_lengths={0x11:1, 0x04:4, 0x05:8, 0x16: 4, 0x00: 0}
if (sub_type not in byte_lengths) and debug:
return data[:], 'EE' # simply return un-interpreted
type_code = u'EE%0.2X' % (sub_type)
byte_length=byte_lengths[sub_type]
entries = _unpack1('L',data[2:6])
# perform sanity check on length of data
expected_data_length = byte_length*entries
if not debug and (len(data) > expected_data_length+6):
raise ValueError('Too much data in %s: %s' % (
repr(data), repr(data[7+expected_data_length:])))
if len(data) < expected_data_length+6:
if debug: return data[:], 'EE'
raise ValueError('Not enough data in %s: expected %i bytes for %i entries, got %i bytes.' %
(repr(data), expected_data_length, entries, len(data)-6))
# get list elements
items, cont = _get_data_list(data, byte_length, 2)
extra_data = data[cont:]
if sub_type == 0x04: # 0x0004 is single precision floats
interpreted_data=[_s2d(_unpack1('f',item)) for item in items]
elif sub_type == 0x05: # 0x0005 is double precision float
interpreted_data=[_unpack1('d',item) for item in items]
elif sub_type == 0x16:
interpreted_data=[_unpack1('L',item) for item in items]
elif sub_type == 0x11:
interpreted_data=[_unpack1('B',item) for item in items]
elif sub_type == 0x00:
# probably always an empty list. If not,
# we return a list of item-length b'', with any data as extra_data
interpreted_data = items
else: raise ValueError('Unknown data format or sub-type code 0x%x for EE' % sub_type)
if len(extra_data)>0:
interpreted_data,type_code = [interpreted_data, extra_data], type_code+'-debug'
return interpreted_data, type_code
#####################
#
# parse EE11 data records
#
# format string:
# Byte unsigned B signed b
# Word unsigned H signed h
# Long unsigned L signed l
# single f
# double d
# list of (tuples): (...)
# string S
#
# shorthand:
# 2S2(LH) expands to SS(LH)(LH)(LH)
# 2S2(LH)B=3 evaluates successfully if the last value is equal to 3
# B=3:BH=4 evaluates successfully if B==3 AND the H==4
# B=3:BH=4:BHS will evaluate data as BHS only if B==3 and H==4
def expand_format(fmt):
"""Expand a format including multiple applications of a token
into a sequence without multiplication.
Also works with lists and nested lists."""
# this function may be of broader interest to those interpreting format string and data
if fmt.count('(') != fmt.count(')'): raise ValueError('Brackets do not balance in %r' % fmt)
if fmt.find(')') < fmt.find('('): raise ValueError('Closing bracket before opening in %r' % fmt)
out, fmt_idx, times = '', 0, None
while fmt_idx < len(fmt):
char = fmt[fmt_idx]
if char>='0' and char<='9':
times = (times or 0) * 10 + (ord(char)-ord('0'))
fmt_idx += 1
elif char == '(':
close_idx, nesting = fmt_idx + 1, 0
while fmt[close_idx] != ')' or nesting>0:
if fmt[close_idx] == '(': nesting += 1
elif fmt[close_idx] == ')': nesting -= 1
close_idx += 1
out += ('(%s)' % expand_format(fmt[fmt_idx+1:close_idx]))*(times if times is not None else 1)
times = None
fmt_idx = close_idx + 1
else:
out += char*(times if times is not None else 1)
times = None
fmt_idx += 1
return out
def _get_next_token_compact(fmt, fmt_idx):
"""Helper for _compact_format() to find next token, compacting
stuff in brackets on the fly."""
if fmt[fmt_idx] != '(':
token = fmt[fmt_idx]
close_idx = fmt_idx
else:
close_idx, nesting = fmt_idx + 1, 0
while fmt[close_idx] != ')' or nesting>0:
if fmt[close_idx] == '(': nesting += 1
elif fmt[close_idx] == ')': nesting -= 1
close_idx += 1
token = '(%s)' % _compact_format(fmt[fmt_idx+1:close_idx])
return token, close_idx + 1
def _compact_format(fmt):
"""Consolidate repeated tokens into their number followed by a single token,
also works with lists and nested lists"""
if not len(fmt): return fmt
if fmt.count('(') != fmt.count(')'): raise ValueError('Brackets do not balance in %r' % fmt)
if fmt.find(')') < fmt.find('('): raise ValueError('Closing bracket before opening in %r' % fmt)
# ensure format is in its expanded form
if any(check in fmt for check in '0123456789'): fmt = expand_format(fmt)
out, fmt_idx, times, last_token = '', 0, None, ''
while fmt_idx < len(fmt):
token, next_idx = _get_next_token_compact(fmt, fmt_idx)
if (token == last_token) or (last_token == ''):
times = (times or 0) + 1
else:
if (times or 0) > 1:
out += '%i%s' % (times, last_token)
else:
out += '%s' % (last_token)
times = 1
fmt_idx = next_idx
last_token = token
if (times or 0) > 1:
out += '%i%s' % (times, token)
else:
out += '%s' % (token)
return out
def _parse_data_by_format_helper(fmt, data, strict_unsigned = None):
"""This is the core function for EE11 format string interpretation"""
# returns data_idx rather than residual data[data_idx:]
strict_unsigned = True if strict_unsigned is None else strict_unsigned
fmt_idx, data_idx = 0, 0
parsed_fmt, parsed_data = '', []
while fmt_idx < len(fmt):
token = fmt[fmt_idx]
if token == '.':
# interpret everything remaining as bytes
char = 'B'
length = _struct.calcsize(char)
new_data = [_unpack1(char,data[idx:idx+length]) for idx in range(data_idx, len(data))]
parsed_data += new_data
parsed_fmt += char*len(new_data)
data_idx = len(data)
fmt_idx += 1
elif token == '*':
# use heuristic to interpret remaining bytes as
# string or bytes
_, fmt_tail, data_tail, _ = _parse_heuristic_string_byte(data[data_idx:])
parsed_data += data_tail
parsed_fmt += fmt_tail
data_idx = len(data)
fmt_idx += 1
elif token == 'S': # string
string, cont_idx = _get_unicode_string(data, data_idx, check_string_marker=True)
if string is None:
return False, parsed_fmt, parsed_data, data_idx
parsed_data.append(string)
parsed_fmt += token
data_idx = cont_idx
fmt_idx += 1
elif token == '(': # list
closing_idx, nesting = fmt_idx+1, 0
while fmt[closing_idx] != ')' or nesting > 0:
if fmt[closing_idx] == ')': nesting -= 1
elif fmt[closing_idx] == '(': nesting += 1
closing_idx += 1
sub_fmt = fmt[fmt_idx+1:closing_idx]
try:
count = _unpack1('L', data[data_idx:data_idx+4])
except _struct.error:
return False, parsed_fmt, parsed_data, data_idx
list_data = []
list_data_idx = data_idx+4
for run in range(count):
success, new_fmt, new_parsed_data, new_data_idx = (
_parse_data_by_format_helper(sub_fmt, data[list_data_idx:], strict_unsigned=strict_unsigned))
if success:
# flatten one-tuples to elements
if len(new_parsed_data) == 1: new_parsed_data = new_parsed_data[0]
list_data.append(new_parsed_data)
list_data_idx += new_data_idx
else:
# return state we had before entering the list
return False, parsed_fmt, parsed_data, data_idx
parsed_data.append(list_data)
data_idx = list_data_idx
parsed_fmt += '(%s)' % sub_fmt
fmt_idx = closing_idx + 1
else:
byte_length = _struct.calcsize(_fmt_map[token])
try:
number_raw = _unpack1(token, data[data_idx:data_idx+byte_length])
except _struct.error:
return False, parsed_fmt, parsed_data, data_idx
if not strict_unsigned and token not in 'fd':
# check if we have to overwrite the highest unsigned number
# with signed '-1' (since this is typically a flag)
max_value = 2**(8*byte_length)-1
if number_raw == max_value:
number = -1
parsed_fmt += token.lower() # indicate signed
else:
number = number_raw
parsed_fmt += token
elif token == 'f':
number = _single_as_double(number_raw)
parsed_fmt += token
else:
number = number_raw
parsed_fmt += token
parsed_data.append(number)
data_idx += byte_length
fmt_idx += 1
return True, parsed_fmt, parsed_data, data_idx
def _parse_data_by_format(fmt, data, strict_unsigned = None):
"""Entry point for lowest level of data parsing. Returns success==True if
the entire format string could had been parsed."""
# entry point for Level 1 of parsing algorithm
if any(check in fmt for check in '0123456789'): fmt = expand_format(fmt)
success, parsed_fmt, parsed_data, data_idx = _parse_data_by_format_helper(fmt, data, strict_unsigned = strict_unsigned)
return success, _compact_format(parsed_fmt), parsed_data, data[data_idx:]
def _parse_heuristic_string_byte(data):
data_idx = 0
data_out, fmt_out = [], ''
while data_idx < len(data):
string, cont_idx = _get_unicode_string(data, data_idx, check_string_marker=True)
if string is None:
data_out.append(_unpack1('B',data[data_idx:data_idx+1]))
data_idx += 1
fmt_out += 'B'
else:
data_out.append(string)
data_idx = cont_idx
fmt_out += 'S'
return True, fmt_out, data_out, bytearray()
def _parse_data_by_expression(expr, data, strict_unsigned = None):
"""Evaluate a single parser expression"""
# entry point for Level 2 of parsing algorithm
fmt = expr.split('=',1)[0]
l1_success, parsed_fmt, parsed_data, residual = _parse_data_by_format(fmt, data, strict_unsigned = strict_unsigned)
if '=' not in expr:
# success means that the string has been parsed on full
success = l1_success and len(residual) == 0
else:
expected = expr.split('=',1)[1]
if expected == '':
# matches anything
success = l1_success
else:
# last parameter parsed is equal to whatever is specified
try:
# cast to appropriate type
# NB: eval enables us to test lists
compared = type(parsed_data[-1])(eval(expected))
except (ValueError, TypeError):
# wrong type
compared = None
success = l1_success and compared is not None and compared == parsed_data[-1]
return success, parsed_fmt, parsed_data, residual
def _parse_record(grammar, data, strict_unsigned = None):
"""Evaluate data record according to given grammar."""
# Main entry point (Level 3) for parsing of data in EE11 records
if isinstance(grammar,(list,tuple)):
# within a list of chains, return result of first chain
# that evaluates successfully
for chain in grammar:
result = _parse_record(chain, data, strict_unsigned = strict_unsigned)
if result[0]: break # first chain that evaluates successully
else:
# within a chain, ALL expressions have to evaluate successfully
for expr in grammar.split(':'):
success, parsed_fmt, parsed_data, residual = (
_parse_data_by_expression(expr, data,
strict_unsigned=strict_unsigned))
if not success:
break
result = success, parsed_fmt, parsed_data, residual
return result
def _parse_record_data_ee11_formats_QS(name, data, debug=False):
fmt={'QS_Par':['B=1:B4B'],
'QS_ValPar':['B=1:BdSH9B'],
'QS_TextPar':['B=1:B4S'],
'QS_SelPar':['B=2:BL(L)4S'],
'QS_ValArrPar':['B=2:BSHB(L)'],
'QS_ValArrParElem':['B=2:B(Ld)'],
'QS_ArrPar':['B=2:B(L)B'],
'QS_ParProp':['B=7:B9BH9S3H5SL=0:B9BH9S3H5SL2HBS4B',
'B=7:B9BH9S3H5SL=2:B9BH9S3H5SL2HBLS4B',
'B=8:B9BH*'],
'QS_ValProp':['B=1:B4B'],
'QS_TextProp':['B=1:B8B'],
'QS_SelProp':['B=4:B3B2(4S)2(S)(H)(L)(S)','B=4:B3B',
'B=5:B3B2(4S)2(S)(H)(L)(S)B','B=5:B4B'],
'QS_ValArrParProp':['B=2:B4BH4B'],
'QS_SkalProp':['B=2:B2S2B'],
'QS_ValSetting':['B=2:B2SLS3BH2B(H)(S)11B'],
'QS_NumFmt':['B=2:B4Bd'],
'QS_Plaus':['B=1:B9B6BH6BH6B'],
'QS_Tol':['B=1:B9B6BH6BH3B'],
}
grammar = fmt.get(name,'*') # get specific grammar or use default
# if all fails, ensure success through linear heuristic interpretation
if grammar[-1] != '*': grammar.append('*')
success, parsed_fmt, parsed_data, residual = _parse_record(grammar, bytearray(data), strict_unsigned=False)
if not success or len(residual):
# this should never be reached as long as we parse with '*' or '.'
raise ValueError('Unexpected parse error of EE11 for %r with %r' % (name, data))
if debug:
# Raise awareness of application of heuristics
actual = expand_format(parsed_fmt)
for option in grammar:
requested = expand_format(option.split(':')[-1])
if actual == requested: break
else:
print('Applied heuristic format %s for %s with %s' %
(parsed_fmt, name, repr(data)[:200]+('...' if len(repr(data))>200 else '')))
return parsed_data, (('EE11-%s' % parsed_fmt) if len(parsed_fmt) else 'EE11')
def _parse_record_data_ee11_formats_Entry(data, debug=False):
"""Provisional decoder for Entry record format.
Note that output format is subject to change."""
# temporary solution that produces a format string consistent
# with QS_ chunks. However, we can do better than this.
success, parsed_fmt, parsed_data, residual = _parse_record('*', bytearray(data), strict_unsigned=True)
if not success or len(residual):
raise ValueError('Internal parser error in <Entry>: %r' % data)
return parsed_data, u'EE11-%s' % parsed_fmt
##############################################################
if (len(data)<1) or _ord(data[0]) != 0x02: return data, 'EE11' # unknown format
data = bytearray(data)
format_string = []
line = []
start = 0
# get sub-type
sub_type = _ord(data[start]) # this is 0x02
start += 1
format_string.append('B')
# next, there will be the ERFC and a 3-tuple of bytes
for i in range(4):
line.append(_ord(data[start]))
start += 1
format_string.append('B')
while start < len(data):
string, start = _get_unicode_string(data, start=start, check_string_marker=True)
if string is not None: # found a string
line.append(string)
format_string.append('S')
continue
numbers, cont, fmt = _get_prefixed_data(data, start)
if numbers is not None:
if _next_is_prefixed_data_or_string(data, cont):
line += numbers
start = cont
format_string.append(fmt)
continue
if _next_is_prefixed_data_or_string(data, start+4) and (len(data)-start>=4):
line += list(_struct.unpack('<HH',data[start:start+4]))
start += 4
format_string.append('HH')
continue
if _next_is_prefixed_data_or_string(data, start+2) and (len(data)-start>=2):
line += list(_struct.unpack('<BB',data[start:start+2]))
start += 2
format_string.append('BB')
continue
line += list(_struct.unpack('<B',data[start:start+1]))
start += 1
format_string.append('B')
return [sub_type]+line, u'EE11-%s' % (u'B'+u''.join(format_string))
def _get_prefixed_data(data, start):
"""Get a list of numbers introduced by a type prefix specific to Entry record."""
if len(data)-start < 2: return None, start, None
prefix = _ord(data[start])
value, cont, fmt = None, start, None
if (prefix ==0x07) and (len(data)-start>=9):
value, cont, fmt = [_unpack1('d',data[start+1:start+9]),], start+9, 'd'
elif (prefix ==0x64) and (len(data)-start>=5):
value, cont, fmt = [_unpack1('L',data[start+1:start+5]),], start+5, 'l'
elif (prefix ==0x01) and (len(data)-start>=5):
value, cont , fmt = list(_struct.unpack('<BBBB',data[start+1:start+5])), start+5, 'bbbb'
elif (prefix ==0x04) and (len(data)-start>=2):
value, cont, fmt = [_ord(data[start+1]),], start+2, '1'
return value, cont, fmt
def _next_is_prefixed_data_or_string(data, start):
"""Next is end of file, prefixed data, or string."""
if start>=len(data): return True
string, cont = _get_unicode_string(data, start=start, check_string_marker=True)
if string is not None: return True
numbers, cont, fmt = _get_prefixed_data(data, start)
if numbers is not None: return True
return False
###########################################
#
# Single precision conversion
# helper function
#
def _single_as_double(presumed_single):
"""Convert a double-precision number containing a single-precision value
into a double-precision number with the shortest decimal
representation that still has the same single-precision value.
Example: struct.unpack('<f',...) of the single-precision representation
of 0.1 returns 0.10000000149011612. The current function
converts the latter value 'back' to 0.1. Note that there
are cases where the solution is not unique.
"""
# helper functions
def tup2int_str(tup):
"""Return positive integer part of tuple (or '0')"""
if tup[2]<=0:
return ''.join('%i' % v for v in tup[1][:max((0,len(tup[1])+tup[2]))]) or '0'
else:
return ''.join('%i' % v for v in tup[1])+('0'*tup[2])
def tup2frac_str(tup):
"""Return positive fractional part of tuple (or '')"""
return '0'*(-len(tup[1])-tup[2])+''.join('%i' % v for v in tup[1][max((0,len(tup[1])+tup[2])):])
def add_one_in_last(int_str, frac_str):
"""Add 1 to least significant digit in fractional part (or to integer)"""
if frac_str == '': return (str(int(int_str)+1), '')
new_frac = ('%%0%ii' % len(frac_str)) % (int(frac_str)+1)
carry = new_frac[:len(new_frac)-len(frac_str)] or '0'
return (str(int(int_str)+int(carry)), new_frac[len(new_frac)-len(frac_str):])
def equal(current, expected):
"""Test expectation, treat overflow as regular failure"""
try:
return _struct.pack('<f',current) == expected
except OverflowError:
return False
if presumed_single != presumed_single:
return presumed_single # NaN
if presumed_single in (float('inf'), float('-inf'), float('-0')):
return presumed_single
# work with positive numbers, recover sign at the end
value = abs(presumed_single)
try:
required = _struct.pack('<f', value) # this is what we want to maintain
except OverflowError:
# value exceeds limit of single-precision floats
# this function should not have been called in the first place
# --> fail with debug info
print('Attempted to interpret %r as single.' % presumed_single)
raise
# turn float into tuple of format decimal.Decimal().as_tuple()
# limit to 9 significant digits in keeping with single-precision resolution
test = (int(presumed_single<0.), [int(v) for v in ('%.9e' % value).split('e')[0].replace('.','')],
int(('%.9e' % value).split('e')[1])-9)
# decompose tuple into string components
integer = tup2int_str(test)
fraction = tup2frac_str(test).rstrip('0')
good = (integer, fraction) # last known correct value
while fraction:
# round down by truncation and see if we're still good
fraction = fraction[:-1]
if not equal(float(integer+'.'+fraction), required):
# rounding down didn't work, so try
# rounding up (i.e., add one to truncated number)
integer, fraction = add_one_in_last(integer, fraction)
if not equal(float(integer+'.'+fraction), required):
# rounding up didn't help either --> we're done
break
# new best result
good = (integer, fraction.rstrip('0'))
result = float('.'.join(good)) * (-1 if presumed_single<0. else 1)
# confirm we're good:
if _struct.pack('<f', result) != _struct.pack('<f', presumed_single):
raise ValueError('Failed interpretation of %r, obtained %r.' % (
presumed_single, result))
return result
_s2d = _single_as_double
|
<gh_stars>0
#!/usr/bin/env python
import xml.etree.ElementTree as ETree
import numpy as np
import pandas as pd
import pytest
from unify_idents.engine_parsers.ident.xtandem_alanine import (
XTandemAlanine_Parser,
_get_single_spec_df,
)
def test_engine_parsers_xtandem_init():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
parser = XTandemAlanine_Parser(
input_file,
params={
"cpus": 2,
"enzyme": "(?<=[KR])(?![P])",
"terminal_cleavage_site_integrity": "any",
"validation_score_field": {"xtandem_alanine": "x!tandem:hyperscore"},
"bigger_scores_better": {"xtandem_alanine": True},
"modifications": [
{
"aa": "M",
"type": "opt",
"position": "any",
"name": "Oxidation",
},
{
"aa": "C",
"type": "fix",
"position": "any",
"name": "Carbamidomethyl",
},
{
"aa": "*",
"type": "opt",
"position": "Prot-N-term",
"name": "Acetyl",
},
],
},
)
def test_engine_parsers_xtandem_file_matches_xtandem_parser():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
assert XTandemAlanine_Parser.check_parser_compatibility(input_file) is True
def test_engine_parsers_msfragger_file_not_matches_xtandem_parser():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_msfragger_3.tsv"
)
assert XTandemAlanine_Parser.check_parser_compatibility(input_file) is False
def test_engine_parsers_xtandem_check_dataframe_integrity():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
rt_lookup_path = pytest._test_path / "data" / "_ursgal_lookup.csv"
db_path = pytest._test_path / "data" / "test_Creinhardtii_target_decoy.fasta"
parser = XTandemAlanine_Parser(
input_file,
params={
"cpus": 2,
"rt_pickle_name": rt_lookup_path,
"database": db_path,
"enzyme": "(?<=[KR])(?![P])",
"terminal_cleavage_site_integrity": "any",
"validation_score_field": {"xtandem_alanine": "x!tandem:hyperscore"},
"bigger_scores_better": {"xtandem_alanine": True},
"modifications": [
{
"aa": "M",
"type": "opt",
"position": "any",
"name": "Oxidation",
},
{
"aa": "C",
"type": "fix",
"position": "any",
"name": "Carbamidomethyl",
},
{
"aa": "*",
"type": "opt",
"position": "Prot-N-term",
"name": "Acetyl",
},
],
},
)
df = parser.unify()
assert len(parser.root) == 79
assert (df["raw_data_location"] == "path/for/glory.mzML").all()
assert pytest.approx(df["ucalc_mz"].mean()) == 796.4324
assert pytest.approx(df["exp_mz"].mean()) == 796.71967
assert df["modifications"].str.contains("Acetyl:0").sum() == 1
assert df["modifications"].str.contains("Oxidation:").sum() == 23
assert (
df["modifications"].str.count("Carbamidomethyl:")
== df["sequence"].str.count("C")
).all()
assert df["modifications"].str.count(":").sum() == 50
def test_get_single_spec_df():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
element = ETree.parse(input_file).getroot()[0]
ref_dict = {
"exp_mz": None,
"calc_mz": None,
"spectrum_title": None,
"raw_data_location": "path/for/glory.mgf",
"search_engine": "xtandem_alanine",
"spectrum_id": None,
"modifications": None,
"retention_time_seconds": None,
"x!tandem:delta": None,
"x!tandem:nextscore": None,
"x!tandem:y_score": None,
"x!tandem:y_ions": None,
"x!tandem:b_score": None,
"x!tandem:b_ions": None,
"sequence": None,
"charge": None,
"x!tandem:hyperscore": None,
}
mapping_dict = {
"delta": "x!tandem:delta",
"nextscore": "x!tandem:nextscore",
"y_score": "x!tandem:y_score",
"y_ions": "x!tandem:y_ions",
"b_score": "x!tandem:b_score",
"b_ions": "x!tandem:b_ions",
"seq": "sequence",
"z": "charge",
"hyperscore": "x!tandem:hyperscore",
}
result = _get_single_spec_df(ref_dict, mapping_dict, element)
assert isinstance(result, pd.DataFrame)
assert (
result.values
== np.array(
[
None,
"1315.5700",
"test_Creinhardtii_QE_pH11.10381.10381.3",
"path/for/glory.mgf",
"xtandem_alanine",
"10381",
list(["15.99492:5"]),
None,
"0.0057",
"8.0",
"9.9",
"5",
"0.0",
"0",
"DDVHNMGADGIR",
"3",
"14.2",
],
dtype=object,
)
).all()
def test_engine_parsers_xtandem_nterminal_mod():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
rt_lookup_path = pytest._test_path / "data" / "_ursgal_lookup.csv"
db_path = pytest._test_path / "data" / "test_Creinhardtii_target_decoy.fasta"
parser = XTandemAlanine_Parser(
input_file,
params={
"cpus": 2,
"rt_pickle_name": rt_lookup_path,
"database": db_path,
"enzyme": "(?<=[KR])(?![P])",
"terminal_cleavage_site_integrity": "any",
"validation_score_field": {"xtandem_alanine": "x!tandem:hyperscore"},
"bigger_scores_better": {"xtandem_alanine": True},
"modifications": [
{
"aa": "M",
"type": "opt",
"position": "any",
"name": "Oxidation",
},
{
"aa": "C",
"type": "fix",
"position": "any",
"name": "Carbamidomethyl",
},
{
"aa": "*",
"type": "opt",
"position": "Prot-N-term",
"name": "Acetyl",
},
],
"raw_file_location": "test_Creinhardtii_QE_pH11.mzML",
"15N": False,
},
)
df = parser.unify()
relevant_row = df[df["sequence"] == "WGLVSSELQTSEAETPGLK"]
assert relevant_row["modifications"].tolist() == ["Acetyl:0"]
def test_engine_parsers_xtandem_multiple_psms():
input_file = pytest._test_path / "data" / "multiple_psms_xtandem.xml"
rt_lookup_path = pytest._test_path / "data" / "_ursgal_lookup.csv"
db_path = pytest._test_path / "data" / "human_ecoli_test_target_decoy.fasta"
parser = XTandemAlanine_Parser(
input_file,
params={
"cpus": 2,
"rt_pickle_name": rt_lookup_path,
"database": db_path,
"enzyme": "(?<=[KR])(?![P])",
"terminal_cleavage_site_integrity": "any",
"validation_score_field": {"xtandem_alanine": "x!tandem:hyperscore"},
"bigger_scores_better": {"xtandem_alanine": True},
"modifications": [
{
"aa": "M",
"type": "opt",
"position": "any",
"name": "Oxidation",
},
{
"aa": "C",
"type": "fix",
"position": "any",
"name": "Carbamidomethyl",
},
{
"aa": "*",
"type": "opt",
"position": "Prot-N-term",
"name": "Acetyl",
},
],
"raw_file_location": "test_Creinhardtii_QE_pH11.mzML",
"15N": False,
},
)
# Test file with:
# - one sequence in first group
# - 3 sequences in second group
df = parser.unify()
assert len(df) == 4
assert set(df["sequence"]) == {
"ITIPITLRMLIAK",
"SMMNGGSSPESDVGTDNK",
"SMMNGGSSPESDVGTDNK",
"SMMNGGSSPESDVGTDNK",
}
assert set(df["spectrum_id"]) == {12833, 14525}
assert set(df["modifications"]) == {
"Acetyl:0",
"",
"Oxidation:2",
"Oxidation:3",
}
def test_engine_parsers_xtandem_map_mod_names():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
parser = XTandemAlanine_Parser(
input_file,
params={
"cpus": 2,
"enzyme": "(?<=[KR])(?![P])",
"terminal_cleavage_site_integrity": "any",
"validation_score_field": {"xtandem_alanine": "x!tandem:hyperscore"},
"bigger_scores_better": {"xtandem_alanine": True},
"modifications": [
{
"aa": "M",
"type": "opt",
"position": "any",
"name": "Oxidation",
},
{
"aa": "C",
"type": "fix",
"position": "any",
"name": "Carbamidomethyl",
},
{
"aa": "*",
"type": "opt",
"position": "Prot-N-term",
"name": "Acetyl",
},
],
},
)
test_df = pd.DataFrame({"modifications": [["57.021464:0"]], "sequence": ["CERK"]})
assert parser.map_mod_names(test_df)["modifications"][0] == "Carbamidomethyl:1"
def test_engine_parsers_xtandem_map_mod_names_nterm():
input_file = (
pytest._test_path / "data" / "test_Creinhardtii_QE_pH11_xtandem_alanine.xml"
)
parser = XTandemAlanine_Parser(
input_file,
params={
"cpus": 2,
"enzyme": "(?<=[KR])(?![P])",
"terminal_cleavage_site_integrity": "any",
"validation_score_field": {"xtandem_alanine": "x!tandem:hyperscore"},
"bigger_scores_better": {"xtandem_alanine": True},
"modifications": [
{
"aa": "M",
"type": "opt",
"position": "any",
"name": "Oxidation",
},
{
"aa": "C",
"type": "fix",
"position": "any",
"name": "Carbamidomethyl",
},
{
"aa": "*",
"type": "opt",
"position": "Prot-N-term",
"name": "Acetyl",
},
],
},
)
row = pd.DataFrame(
{"modifications": [["57.021464:0", "42.010565:0"]], "sequence": ["CERK"]}
)
assert set(parser.map_mod_names(row)["modifications"][0].split(";")) == {
"Carbamidomethyl:1",
"Acetyl:0",
}
|
<reponame>jbrown-xentity/ckan
# encoding: utf-8
import datetime
import json
import pytest
import responses
import sqlalchemy.orm as orm
import ckan.plugins as p
import ckanext.datapusher.interfaces as interfaces
import ckanext.datastore.backend.postgres as db
from ckan.tests import helpers, factories
class FakeDataPusherPlugin(p.SingletonPlugin):
p.implements(p.IConfigurable, inherit=True)
p.implements(interfaces.IDataPusher, inherit=True)
def configure(self, _config):
self.after_upload_calls = 0
def can_upload(self, resource_id):
return False
def after_upload(self, context, resource_dict, package_dict):
self.after_upload_calls += 1
@pytest.mark.ckan_config(
"ckan.plugins", "datastore datapusher test_datapusher_plugin"
)
@pytest.mark.usefixtures("with_plugins")
class TestInterace(object):
sysadmin_user = None
normal_user = None
@pytest.fixture(autouse=True)
def setup_class(self, clean_db, test_request_context):
resource = factories.Resource(url_type="datastore")
self.dataset = factories.Dataset(resources=[resource])
with test_request_context():
self.sysadmin_user = factories.User(
name="testsysadmin", sysadmin=True
)
self.normal_user = factories.User(name="annafan")
engine = db.get_write_engine()
self.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@responses.activate
def test_send_datapusher_creates_task(self, test_request_context):
responses.add(
responses.POST,
"http://datapusher.ckan.org/job",
content_type="application/json",
body=json.dumps({"job_id": "foo", "job_key": "bar"}),
)
resource = self.dataset["resources"][0]
context = {"ignore_auth": True, "user": self.sysadmin_user["name"]}
with test_request_context():
result = p.toolkit.get_action("datapusher_submit")(
context, {"resource_id": resource["id"]}
)
assert not result
context.pop("task_status", None)
with pytest.raises(p.toolkit.ObjectNotFound):
p.toolkit.get_action("task_status_show")(
context,
{
"entity_id": resource["id"],
"task_type": "datapusher",
"key": "datapusher",
},
)
def test_after_upload_called(self):
dataset = factories.Dataset()
resource = factories.Resource(package_id=dataset["id"])
# Push data directly to the DataStore for the resource to be marked as
# `datastore_active=True`, so the grid view can be created
data = {
"resource_id": resource["id"],
"fields": [
{"id": "a", "type": "text"},
{"id": "b", "type": "text"},
],
"records": [{"a": "1", "b": "2"}],
"force": True,
}
helpers.call_action("datastore_create", **data)
# Create a task for `datapusher_hook` to update
task_dict = {
"entity_id": resource["id"],
"entity_type": "resource",
"task_type": "datapusher",
"key": "datapusher",
"value": '{"job_id": "my_id", "job_key":"my_key"}',
"last_updated": str(datetime.datetime.now()),
"state": "pending",
}
helpers.call_action("task_status_update", context={}, **task_dict)
# Call datapusher_hook with a status of complete to trigger the
# default views creation
params = {
"status": "complete",
"metadata": {"resource_id": resource["id"]},
}
helpers.call_action("datapusher_hook", context={}, **params)
total = sum(
plugin.after_upload_calls
for plugin in p.PluginImplementations(interfaces.IDataPusher)
)
assert total == 1, total
params = {
"status": "complete",
"metadata": {"resource_id": resource["id"]},
}
helpers.call_action("datapusher_hook", context={}, **params)
total = sum(
plugin.after_upload_calls
for plugin in p.PluginImplementations(interfaces.IDataPusher)
)
assert total == 2, total
|
from enum import Enum
from .errors import JujuError
class Source(Enum):
"""Source defines a origin source. Providing a hint to the controller about
what the charm identity is from the URL and origin source.
"""
LOCAL = "local"
CHARM_STORE = "charm-store"
CHARM_HUB = "charm-hub"
def __str__(self):
return self.value
class Origin:
def __init__(self, source, channel, platform):
self.source = source
self.channel = channel
self.platform = platform
def __str__(self):
return "origin using source {} for channel {} and platform {}".format(str(self.source), self.channel, self.platform)
class Risk(Enum):
STABLE = "stable"
CANDIDATE = "candidate"
BETA = "beta"
EDGE = "edge"
def __str__(self):
return self.value
@staticmethod
def valid(potential):
for risk in [Risk.STABLE, Risk.CANDIDATE, Risk.BETA, Risk.EDGE]:
if str(risk) == potential:
return True
return False
class Channel:
"""Channel identifies and describes completely a store channel.
A channel consists of, and is subdivided by, tracks, risk-levels and
- Tracks enable snap developers to publish multiple supported releases of
their application under the same snap name.
- Risk-levels represent a progressive potential trade-off between stability
and new features.
The complete channel name can be structured as three distinct parts separated
by slashes:
<track>/<risk>
"""
def __init__(self, track=None, risk=None):
if not Risk.valid(risk):
raise JujuError("unexpected risk {}".format(risk))
self.track = track or ""
self.risk = risk
@staticmethod
def parse(s):
"""parse a channel from a given string.
Parse does not take into account branches.
"""
if not s:
raise JujuError("channel cannot be empty")
p = s.split("/")
risk = None
track = None
if len(p) == 1:
if Risk.valid(p[0]):
risk = p[0]
else:
track = p[0]
risk = str(Risk.STABLE)
elif len(p) == 2:
track = p[0]
risk = p[1]
else:
raise JujuError("channel is malformed and has too many components {}".format(s))
if risk is not None and not Risk.valid(risk):
raise JujuError("risk in channel {} is not valid".format(s))
if track is not None and track == "":
raise JujuError("track in channel {} is not valid".format(s))
return Channel(track, risk)
def normalize(self):
track = self.track if self.track != "latest" else ""
risk = self.risk if self.risk != "" else ""
return Channel(track, risk)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.track == other.track and self.risk == other.risk
return False
def __str__(self):
path = self.risk
if self.track != "":
path = "{}/{}".format(self.track, path)
return path
class Platform:
"""ParsePlatform parses a string representing a store platform.
Serialized version of platform can be expected to conform to the following:
1. Architecture is mandatory.
2. OS is optional and can be dropped. Series is mandatory if OS wants
to be displayed.
3. Series is also optional.
To indicate something is missing `unknown` can be used in place.
Examples:
1. `<arch>/<os>/<series>`
2. `<arch>`
3. `<arch>/<series>`
4. `<arch>/unknown/<series>`
"""
def __init__(self, arch, series=None, os=None):
self.arch = arch
self.series = series
self.os = os
@staticmethod
def parse(s):
if not s:
raise JujuError("platform cannot be empty")
p = s.split("/")
arch = None
os = None
series = None
if len(p) == 1:
arch = p[0]
elif len(p) == 2:
arch = p[0]
series = p[1]
elif len(p) == 3:
arch = p[0]
os = p[1]
series = p[2]
else:
raise JujuError("platform is malformed and has too many components {}".format(s))
if not arch:
raise JujuError("architecture in platform {} is not valid".format(s))
if os is not None and os == "":
raise JujuError("os in platform {} is not valid".format(s))
if series is not None and series == "":
raise JujuError("series in platform {} is not valid".format(s))
return Platform(arch, series, os)
def normalize(self):
os = self.os if self.os is not None or self.os != "unknown" else None
series = self.series
if series is None or series == "unknown":
os = None
series = None
return Platform(self.arch, series, os)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.arch == other.arch and self.os == other.os and self.series == other.series
return False
def __str__(self):
path = self.arch
if self.os is not None and self.os != "":
path = "{}/{}".format(path, self.os)
if self.series is not None and self.series != "":
path = "{}/{}".format(path, self.series)
return path
|
<filename>release/scripts/modules/bpy_extras/anim_utils.py
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
__all__ = (
"bake_action",
"bake_action_objects",
"bake_action_iter",
"bake_action_objects_iter",
)
import bpy
def bake_action(
obj,
*,
action, frames,
**kwargs
):
"""
:arg obj: Object to bake.
:type obj: :class:`bpy.types.Object`
:arg action: An action to bake the data into, or None for a new action
to be created.
:type action: :class:`bpy.types.Action` or None
:arg frames: Frames to bake.
:type frames: iterable of int
:return: an action or None
:rtype: :class:`bpy.types.Action`
"""
if not (kwargs.get("do_pose") or kwargs.get("do_object")):
return None
action, = bake_action_objects(
[(obj, action)],
frames=frames,
**kwargs,
)
return action
def bake_action_objects(
object_action_pairs,
*,
frames,
**kwargs
):
"""
A version of :func:`bake_action_objects_iter` that takes frames and returns the output.
:arg frames: Frames to bake.
:type frames: iterable of int
:return: A sequence of Action or None types (aligned with `object_action_pairs`)
:rtype: sequence of :class:`bpy.types.Action`
"""
iter = bake_action_objects_iter(object_action_pairs, **kwargs)
iter.send(None)
for frame in frames:
iter.send(frame)
return iter.send(None)
def bake_action_objects_iter(
object_action_pairs,
**kwargs
):
"""
An coroutine that bakes actions for multiple objects.
:arg object_action_pairs: Sequence of object action tuples,
action is the destination for the baked data. When None a new action will be created.
:type object_action_pairs: Sequence of (:class:`bpy.types.Object`, :class:`bpy.types.Action`)
"""
scene = bpy.context.scene
frame_back = scene.frame_current
iter_all = tuple(
bake_action_iter(obj, action=action, **kwargs)
for (obj, action) in object_action_pairs
)
for iter in iter_all:
iter.send(None)
while True:
frame = yield None
if frame is None:
break
scene.frame_set(frame)
bpy.context.view_layer.update()
for iter in iter_all:
iter.send(frame)
scene.frame_set(frame_back)
yield tuple(iter.send(None) for iter in iter_all)
# XXX visual keying is actually always considered as True in this code...
def bake_action_iter(
obj,
*,
action,
only_selected=False,
do_pose=True,
do_object=True,
do_visual_keying=True,
do_constraint_clear=False,
do_parents_clear=False,
do_clean=False
):
"""
An coroutine that bakes action for a single object.
:arg obj: Object to bake.
:type obj: :class:`bpy.types.Object`
:arg action: An action to bake the data into, or None for a new action
to be created.
:type action: :class:`bpy.types.Action` or None
:arg only_selected: Only bake selected bones.
:type only_selected: bool
:arg do_pose: Bake pose channels.
:type do_pose: bool
:arg do_object: Bake objects.
:type do_object: bool
:arg do_visual_keying: Use the final transformations for baking ('visual keying')
:type do_visual_keying: bool
:arg do_constraint_clear: Remove constraints after baking.
:type do_constraint_clear: bool
:arg do_parents_clear: Unparent after baking objects.
:type do_parents_clear: bool
:arg do_clean: Remove redundant keyframes after baking.
:type do_clean: bool
:return: an action or None
:rtype: :class:`bpy.types.Action`
"""
# -------------------------------------------------------------------------
# Helper Functions and vars
# Note: BBONE_PROPS is a list so we can preserve the ordering
BBONE_PROPS = [
'bbone_curveinx', 'bbone_curveoutx',
'bbone_curveiny', 'bbone_curveouty',
'bbone_rollin', 'bbone_rollout',
'bbone_scaleinx', 'bbone_scaleoutx',
'bbone_scaleiny', 'bbone_scaleouty',
'bbone_easein', 'bbone_easeout'
]
def pose_frame_info(obj):
matrix = {}
bbones = {}
for name, pbone in obj.pose.bones.items():
if do_visual_keying:
# Get the final transform of the bone in its own local space...
matrix[name] = obj.convert_space(pose_bone=pbone, matrix=pbone.matrix,
from_space='POSE', to_space='LOCAL')
else:
matrix[name] = pbone.matrix_basis.copy()
# Bendy Bones
if pbone.bone.bbone_segments > 1:
bbones[name] = {bb_prop: getattr(pbone, bb_prop) for bb_prop in BBONE_PROPS}
return matrix, bbones
if do_parents_clear:
if do_visual_keying:
def obj_frame_info(obj):
return obj.matrix_world.copy()
else:
def obj_frame_info(obj):
parent = obj.parent
matrix = obj.matrix_basis
if parent:
return parent.matrix_world @ matrix
else:
return matrix.copy()
else:
if do_visual_keying:
def obj_frame_info(obj):
parent = obj.parent
matrix = obj.matrix_world
if parent:
return parent.matrix_world.inverted_safe() @ matrix
else:
return matrix.copy()
else:
def obj_frame_info(obj):
return obj.matrix_basis.copy()
# -------------------------------------------------------------------------
# Setup the Context
if obj.pose is None:
do_pose = False
if not (do_pose or do_object):
raise Exception("Pose and object baking is disabled, no action needed")
pose_info = []
obj_info = []
options = {'INSERTKEY_NEEDED'}
# -------------------------------------------------------------------------
# Collect transformations
while True:
# Caller is responsible for setting the frame and updating the scene.
frame = yield None
# Signal we're done!
if frame is None:
break
if do_pose:
pose_info.append((frame, *pose_frame_info(obj)))
if do_object:
obj_info.append((frame, obj_frame_info(obj)))
# -------------------------------------------------------------------------
# Clean (store initial data)
if do_clean and action is not None:
clean_orig_data = {fcu: {p.co[1] for p in fcu.keyframe_points} for fcu in action.fcurves}
else:
clean_orig_data = {}
# -------------------------------------------------------------------------
# Create action
# in case animation data hasn't been created
atd = obj.animation_data_create()
if action is None:
action = bpy.data.actions.new("Action")
# Leave tweak mode before trying to modify the action (T48397)
if atd.use_tweak_mode:
atd.use_tweak_mode = False
atd.action = action
# -------------------------------------------------------------------------
# Apply transformations to action
# pose
if do_pose:
for name, pbone in obj.pose.bones.items():
if only_selected and not pbone.bone.select:
continue
if do_constraint_clear:
while pbone.constraints:
pbone.constraints.remove(pbone.constraints[0])
# Create compatible eulers, quats.
euler_prev = None
quat_prev = None
for (f, matrix, bbones) in pose_info:
pbone.matrix_basis = matrix[name].copy()
pbone.keyframe_insert("location", index=-1, frame=f, group=name, options=options)
rotation_mode = pbone.rotation_mode
if rotation_mode == 'QUATERNION':
if quat_prev is not None:
quat = pbone.rotation_quaternion.copy()
quat.make_compatible(quat_prev)
pbone.rotation_quaternion = quat
quat_prev = quat
del quat
else:
quat_prev = pbone.rotation_quaternion.copy()
pbone.keyframe_insert("rotation_quaternion", index=-1, frame=f, group=name, options=options)
elif rotation_mode == 'AXIS_ANGLE':
pbone.keyframe_insert("rotation_axis_angle", index=-1, frame=f, group=name, options=options)
else: # euler, XYZ, ZXY etc
if euler_prev is not None:
euler = pbone.rotation_euler.copy()
euler.make_compatible(euler_prev)
pbone.rotation_euler = euler
euler_prev = euler
del euler
else:
euler_prev = pbone.rotation_euler.copy()
pbone.keyframe_insert("rotation_euler", index=-1, frame=f, group=name, options=options)
pbone.keyframe_insert("scale", index=-1, frame=f, group=name, options=options)
# Bendy Bones
if pbone.bone.bbone_segments > 1:
bbone_shape = bbones[name]
for bb_prop in BBONE_PROPS:
# update this property with value from bbone_shape, then key it
setattr(pbone, bb_prop, bbone_shape[bb_prop])
pbone.keyframe_insert(bb_prop, index=-1, frame=f, group=name, options=options)
# object. TODO. multiple objects
if do_object:
if do_constraint_clear:
while obj.constraints:
obj.constraints.remove(obj.constraints[0])
# Create compatible eulers, quats.
euler_prev = None
quat_prev = None
for (f, matrix) in obj_info:
name = "Action Bake" # XXX: placeholder
obj.matrix_basis = matrix
obj.keyframe_insert("location", index=-1, frame=f, group=name, options=options)
rotation_mode = obj.rotation_mode
if rotation_mode == 'QUATERNION':
if quat_prev is not None:
quat = obj.rotation_quaternion.copy()
quat.make_compatible(quat_prev)
obj.rotation_quaternion = quat
quat_prev = quat
del quat
print ("quat_prev", quat_prev)
else:
quat_prev = obj.rotation_quaternion.copy()
obj.keyframe_insert("rotation_quaternion", index=-1, frame=f, group=name, options=options)
elif rotation_mode == 'AXIS_ANGLE':
obj.keyframe_insert("rotation_axis_angle", index=-1, frame=f, group=name, options=options)
else: # euler, XYZ, ZXY etc
if euler_prev is not None:
euler = obj.rotation_euler.copy()
euler.make_compatible(euler_prev)
obj.rotation_euler = euler
euler_prev = euler
del euler
else:
euler_prev = obj.rotation_euler.copy()
obj.keyframe_insert("rotation_euler", index=-1, frame=f, group=name, options=options)
obj.keyframe_insert("scale", index=-1, frame=f, group=name, options=options)
if do_parents_clear:
obj.parent = None
# -------------------------------------------------------------------------
# Clean
if do_clean:
for fcu in action.fcurves:
fcu_orig_data = clean_orig_data.get(fcu, set())
keyframe_points = fcu.keyframe_points
i = 1
while i < len(keyframe_points) - 1:
val = keyframe_points[i].co[1]
if val in fcu_orig_data:
i += 1
continue
val_prev = keyframe_points[i - 1].co[1]
val_next = keyframe_points[i + 1].co[1]
if abs(val - val_prev) + abs(val - val_next) < 0.0001:
keyframe_points.remove(keyframe_points[i])
else:
i += 1
yield action
|
<reponame>DeppMeng/HRNet-MaskRCNN-Benchmark
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import datetime
import logging
import time
import torch
import torch.distributed as dist
from maskrcnn_benchmark.utils.comm import get_world_size
from maskrcnn_benchmark.utils.metric_logger import MetricLogger
def reduce_loss_dict(loss_dict):
"""
Reduce the loss dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
loss_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k, v in loss_dict.items():
loss_names.append(k)
all_losses.append(v)
all_losses = torch.stack(all_losses, dim=0)
dist.reduce(all_losses, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
all_losses /= world_size
reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
return reduced_losses
def do_train(
model,
data_loader,
optimizer,
scheduler,
checkpointer,
device,
checkpoint_period,
arguments,
cfg
):
logger = logging.getLogger("maskrcnn_benchmark.trainer")
logger.info("Start training")
meters = MetricLogger(delimiter=" ")
max_iter = len(data_loader)
start_iter = arguments["iteration"]
model.train()
start_training_time = time.time()
end = time.time()
dtype = torch.half if cfg.SOLVER.ENABLE_FP16 else torch.float
for iteration, (images, targets, _) in enumerate(data_loader, start_iter):
data_time = time.time() - end
iteration = iteration + 1
arguments["iteration"] = iteration
scheduler.step()
images = images.to(device=device, dtype=dtype)
targets = [target.to(device) for target in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_loss_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
meters.update(loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
if cfg.SOLVER.ENABLE_FP16:
optimizer.backward(losses)
else:
losses.backward()
optimizer.step()
batch_time = time.time() - end
end = time.time()
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_iter - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 20 == 0 or iteration == max_iter:
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.6f}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=optimizer.param_groups[0]["lr"],
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
)
)
if iteration % checkpoint_period == 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_iter:
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_iter)
)
)
|
#!/usr/bin/env python
__author__ = "bitsofinfo"
import importlib
from multiprocessing import Pool, Process
import json
import pprint
import yaml
from dateutil import parser as dateparser
import re
import os
from objectpath import *
import argparse
import collections
import sys
import datetime
import logging
import time
from pygrok import Grok
import http.server
import threading
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler, FileCreatedEvent
import concurrent.futures
from twisted.web.server import Site
from twisted.web.static import File
from twisted.internet import reactor
from twisted.internet import endpoints
# Dict of result handler yaml parsed configs (filename -> object)
result_handler_configs = {}
class ObjectPathContext():
# The Objectpath Tree for the evaluation_doc JSON
evaluation_doc_objectpath_tree = None
# the raw evaluation doc
evaluation_doc = None
# More debugging
debug_objectpath_expr = False
dump_evaldoc_on_error = False
def __init__(self, evaldoc, debug_objectpath_expressions, dump_evaldoc_on_error):
self.debug_objectpath_expr = debug_objectpath_expressions
self.dump_evaldoc_on_error = dump_evaldoc_on_error
self.update(evaldoc)
# update the context w/ the most recent
# evaluation_doc
def update(self,evaldoc):
self.evaluation_doc = evaldoc
self.evaluation_doc_objectpath_tree = Tree(self.evaluation_doc)
# Uses ObjectPath to evaluate the given
# objectpath_query against the current state of the
# `evaluation_doc_objectpath_tree`
#
# NOTE! If multiple matches, returns the 1st match
def exec_objectpath_first_match(self,objectpath_query):
return self._exec_objectpath(objectpath_query,0)
# Uses ObjectPath to evaluate the given
# objectpath_query against the current state of the
# `evaluation_doc_objectpath_tree`
#
# NOTE! If multiple matches, returns the match located
# at index `force_return_index_on_multiple_results`
# unless force_return_index_on_multiple_results=None
# then returns all
def exec_objectpath_specific_match(self,objectpath_query,force_return_index_on_multiple_results=None):
return self._exec_objectpath(objectpath_query,force_return_index_on_multiple_results)
# Uses ObjectPath to evaluate the given
# objectpath_query against the current state of the
# `evaluation_doc_objectpath_tree`
#
# NOTE! this can return lists of values of when multiple matches
def exec_objectpath(self,objectpath_query):
return self._exec_objectpath(objectpath_query,None)
# Uses ObjectPath to evaluate the given
# objectpath_query against the current state of the
# `evaluation_doc_objectpath_tree`
# Takes a force_return_index_on_multiple_results should multiple matches be found
# to force the return on a specified element
def _exec_objectpath(self,objectpath_query,force_return_index_on_multiple_results):
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query)
qresult = None
try:
qresult = self.evaluation_doc_objectpath_tree.execute(objectpath_query)
except Exception as e:
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " failure: " + str(sys.exc_info()[0]))
raise e
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " raw result type(): " + str(type(qresult)))
# Primitive type
if isinstance(qresult,(str,bool,int)):
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " returning (str|bool|int): " + str(qresult))
return qresult
# List or Generator
elif qresult is not None:
toreturn = []
if isinstance(qresult,(list)):
if len(qresult) > 0:
return qresult
return None
# assume generator
else:
try:
while True:
r = next(qresult)
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " next() returned val: " + str(r))
if r is not None:
toreturn.append(r)
except StopIteration as s:
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " received StopIteration after " + str(len(toreturn)) + " nexts()..")
if len(toreturn) == 1:
toreturn = toreturn[0]
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " generator had 1 element, returning: " + str(toreturn))
return toreturn
elif len(toreturn) > 1:
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " generator has %d elements: %s" % (len(toreturn),json.dumps(toreturn)))
# if we are forced to return a specific index on multiple..... do it
if isinstance(force_return_index_on_multiple_results,(str)):
force_return_index_on_multiple_results = int(force_return_index_on_multiple_results)
if force_return_index_on_multiple_results is not None:
toreturn = toreturn[force_return_index_on_multiple_results]
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " force_return_index_on_multiple_results=%d , returning val:%s" % (force_return_index_on_multiple_results,str(toreturn)))
return toreturn
else:
return None
# None...
else:
if self.debug_objectpath_expr:
logging.debug("exec_objectpath: query: " + objectpath_query + " yielded None")
return None
class TestsslResultProcessor(object):
# for controlling access to job_name_2_metrics_db
lock = threading.RLock()
result_handler_configs = {}
# total threads = total amount of commands
# per file that can be processed concurrently
threads = 1
# More debugging
debug_objectpath_expr = False
dump_evaldoc_on_error = False
debug_dump_evaldoc = False
def dumpEvalDoc(self,evaluation_doc):
if self.dump_evaldoc_on_error:
try:
if evaluation_doc is not None:
print("dump_evaldoc_on_error: " + json.dumps(evaluation_doc,indent=2))
else:
print("dump_evaldoc_on_error: evaluation_doc is None!")
except Exception as etwo:
logging.exception("Unexpected error attempting dump_evaldoc_on_error")
# Will process the testssl_json_result_file_path file
def processResultFile(self,testssl_json_result_file_path,input_dir):
logging.info("Received event for create of new testssl.sh JSON result file: '%s'", testssl_json_result_file_path)
# open the file
testssl_result = None
# get absolute path & filename optionally
testssl_json_result_abs_file_path = os.path.abspath(testssl_json_result_file_path)
testssl_json_result_filename = os.path.basename(testssl_json_result_file_path)
# init eval doc
evaluation_doc = None
# Open the JSON file
try:
with open(testssl_json_result_file_path, 'r') as f:
testssl_result = json.load(f)
# no scan result
if 'scanResult' not in testssl_result or len(testssl_result['scanResult']) == 0:
logging.info("Result JSON contained empty 'scanResult', skipping: '%s'", testssl_json_result_file_path)
return
except Exception as e:
logging.exception("Unexpected error in open(): "+testssl_json_result_file_path + " error:" +str(sys.exc_info()[0]))
raise e
logging.info("testssl.sh JSON result file loaded OK: '%s'" % testssl_json_result_file_path)
# for each of our result handler configs
# lets process the JSON result file through it
try:
for config_filename, config in result_handler_configs.items():
logging.info("Evaluating %s against config '%s' ..." % (testssl_json_result_file_path,config_filename))
try:
# create uberdoc for evaluations
evaluation_doc = {
config['evaluation_doc_config']['target_keys']['testssl_result_json']: testssl_result,
config['evaluation_doc_config']['target_keys']['testssl_result_parent_dir_path']:os.path.dirname(testssl_json_result_file_path).replace(input_dir+"/",""),
config['evaluation_doc_config']['target_keys']['testssl_result_parent_dir_abs_path']:os.path.dirname(testssl_json_result_abs_file_path),
config['evaluation_doc_config']['target_keys']['testssl_result_file_abs_path']:testssl_json_result_abs_file_path,
config['evaluation_doc_config']['target_keys']['testssl_result_filename']:testssl_json_result_filename
}
# apply any properties found in the path_properties_grok
if 'path_properties_grok' in config and config['path_properties_grok'] is not None:
grok = Grok(config['path_properties_grok'],custom_patterns=config['custom_groks'])
matches = grok.match(testssl_json_result_file_path)
# matches?
if matches is not None:
if 'ignored' in matches:
del matches['ignored']
else:
logging.warn("path_properties_grok: matched nothing! grok:" + config['path_properties_grok'] + " against path: " + testssl_json_result_file_path)
matches = {}
result_metadata = {
config['evaluation_doc_config']['target_keys']['result_metadata']:matches
}
evaluation_doc.update(result_metadata)
# Create our Tree to do ObjectPath evals
# against our evaluation_doc
objectpath_ctx = ObjectPathContext(evaluation_doc,self.debug_objectpath_expr,self.dump_evaldoc_on_error)
# for debugging
if self.debug_dump_evaldoc:
logging.warn("debug_dump_evaldoc: dumping evalution_doc pre cert_expires_objectpath evaluation")
self.dumpEvalDoc(evaluation_doc)
# Lets grab the cert expires to calc number of days till expiration
# Note we force grab the first match...
cert_expires_at_str = objectpath_ctx.exec_objectpath_first_match(config['cert_expires_objectpath'])
cert_expires_at = dateparser.parse(cert_expires_at_str)
expires_in_days = cert_expires_at - datetime.datetime.utcnow()
evaluation_doc.update({
config['evaluation_doc_config']['target_keys']['cert_expires_in_days']:expires_in_days.days
})
# Rebuild our Tree to do ObjectPath evals
# against our evaluation_doc to sure the Tree is up to date
objectpath_ctx.update(evaluation_doc)
# for debugging, dump again as we updated it
if self.debug_dump_evaldoc:
logging.warn("debug_dump_evaldoc: dumping evalution_doc pre Trigger evaluations")
self.dumpEvalDoc(evaluation_doc)
# lets process all triggers
triggers_fired = []
for trigger_name in config['trigger_on']:
trigger = config['trigger_on'][trigger_name]
objectpath_result = objectpath_ctx.exec_objectpath(trigger['objectpath'])
results = []
if objectpath_result is not None:
# if a primitive....
if isinstance(objectpath_result,(str,int,float,bool)):
# if a boolean we only include if True....
if isinstance(objectpath_result,(bool)):
if objectpath_result is False:
continue
results.append(objectpath_result)
# if a list ...
elif isinstance(objectpath_result,(list)):
results = objectpath_result
# some other object, throw in a list
else:
results.append(objectpath_result)
# ok we got at least 1 result back
# from the objectpath expression
if len(results) > 0:
triggers_fired.append({
'tag':trigger_name,
'title':trigger['title'],
'reactors':trigger['reactors'],
'objectpath':trigger['objectpath'],
'results':results,
'config_filename':config_filename,
'testssl_json_result_abs_file_path':testssl_json_result_abs_file_path,
'testssl_json_result_filename':testssl_json_result_filename,
'evaluation_doc':evaluation_doc
})
# Triggers were fired
# lets process their reactors
if len(triggers_fired) > 0:
# build a map of reactors -> triggers
reactor_triggers = {}
for t in triggers_fired:
# each trigger can have N reactors
for reactor_name in t['reactors']:
if reactor_name not in reactor_triggers:
reactor_triggers[reactor_name] = []
reactor_triggers[reactor_name].append(t)
# for each reactor to invoke...
for reactor_name,triggers in reactor_triggers.items():
# handle misconfig
if reactor_name not in config['reactor_engines']:
logging.error("Configured reactor_engine '%s' is not configured?... skipping" % (reactor_name))
continue
# create the reactor
reactor = None
reactor_config = config['reactor_engines'][reactor_name]
class_name = reactor_config['class_name']
try:
reactor_class = getattr(importlib.import_module('reactors.' + class_name.lower()), class_name)
reactor = reactor_class(reactor_config)
except Exception as e:
logging.exception("Error loading reactor class: " + class_name + ". Failed to find 'reactors/"+class_name.lower() +".py' with class '"+class_name+"' declared within it")
self.dumpEvalDoc(evaluation_doc)
raise e
# react to the fired triggers
logging.debug("Invoking reactor: " + reactor_name + " for " + str(len(triggers)) + " fired triggers")
reactor.handleTriggers(triggers,objectpath_ctx)
else:
logging.info("No triggers fired for: " + testssl_json_result_file_path)
except Exception as e:
logging.exception("Unexpected error processing: " + testssl_json_result_file_path + " using: " + config_filename + " err:" + str(sys.exc_info()[0]))
self.dumpEvalDoc(evaluation_doc)
except Exception as e:
logging.exception("Unexpected error processing: " + testssl_json_result_file_path + " " + str(sys.exc_info()[0]))
self.dumpEvalDoc(evaluation_doc)
class TestsslResultFileMonitor(FileSystemEventHandler):
# We will feed new input files to this processor
testssl_result_processor = None
# max threads
threads = 1
# our Pool
executor = None
# input_dir_sleep_seconds
input_dir_sleep_seconds = 0
# the actual input_dir that we are monitoring
input_dir = None
# Regex Filter to match relevent paths in events received
input_filename_filter = 'testssloutput.+.json'
def set_threads(self, t):
self.threads = t
# to keep track of event.src_paths we have processed
processed_result_paths = collections.deque(maxlen=400)
# route to on_modified
def on_created(self,event):
super(TestsslResultFileMonitor, self).on_created(event)
self.on_modified(event)
# our main impl for processing new json files
def on_modified(self, event):
super(TestsslResultFileMonitor, self).on_modified(event)
if not self.executor:
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.threads)
if event.is_directory:
return
# Check if already processed
if event.src_path in self.processed_result_paths:
return
# compile our filter
input_filename_re_filter = None
if self.input_filename_filter is not None:
input_filename_re_filter = re.compile(self.input_filename_filter,re.I)
if input_filename_re_filter.match(event.src_path):
# give write time to close....
time.sleep(self.input_dir_sleep_seconds)
# file needs data...
if (os.stat(event.src_path).st_size == 0):
return
# Attempt to decode the JSON file
# if OK then we know its done writing
try:
with open(event.src_path, 'r') as f:
testssl_result = json.load(f)
if testssl_result is None:
return
except json.decoder.JSONDecodeError as e:
# we just ignore these, it means the file
# is not done being written
return
except Exception as e:
logging.exception("Unexpected error in open(): "+event.src_path + " error:" +str(sys.exc_info()[0]))
return
# Check if already processed
if event.src_path in self.processed_result_paths:
return
logging.info("Responding to parsable testssl.sh JSON result: %s", event.src_path)
# mark it as processed
self.processed_result_paths.append(event.src_path)
# submit for evaluation
self.executor.submit(self.testssl_result_processor.processResultFile,event.src_path,self.input_dir)
class HandlerConfigFileMonitor(FileSystemEventHandler):
# our Pool
executor = None
# Filter to match relevent paths in events received
filename_filter = '.json'
def on_created(self, event):
super(HandlerConfigFileMonitor, self).on_created(event)
if not self.executor:
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
if event.is_directory:
return
if '.yaml' in event.src_path:
logging.info("Responding to creation of result handler config file: %s", event.src_path)
# attempt to open the config
# and parse the yaml
try:
config = None
with open(event.src_path, 'r') as stream:
try:
config = yaml.load(stream)
except yaml.YAMLError as exc:
logging.exception(event.src_path + ": Unexpected error in yaml.load("+event.src_path+") " + str(sys.exc_info()[0]))
except Exception as e:
logging.exception(event.src_path + ": Unexpected error:" + str(sys.exc_info()[0]))
# our config name is the filename
config_filename = os.path.basename(event.src_path)
result_handler_configs[config_filename] = config
def init_watching(input_dir,
config_dir,
input_dir_watchdog_threads,
input_dir_sleep_seconds,
debug_objectpath_expr,
input_filename_filter,
dump_evaldoc_on_error,
debug_dump_evaldoc,
httpserver_port,
httpserver_root_dir):
# mthreaded...
if (isinstance(input_dir_watchdog_threads,str)):
input_dir_watchdog_threads = int(input_dir_watchdog_threads)
# create watchdog to look for new config files
result_handler_config_monitor = HandlerConfigFileMonitor()
# create watchdog to look for new files
event_handler = TestsslResultFileMonitor()
event_handler.set_threads(input_dir_watchdog_threads)
event_handler.input_dir = input_dir
event_handler.input_filename_filter = input_filename_filter
if (isinstance(input_dir_sleep_seconds,str)):
input_dir_sleep_seconds = int(input_dir_sleep_seconds)
event_handler.input_dir_sleep_seconds = input_dir_sleep_seconds
# Create a TestsslProcessor to consume the testssl_cmds files
event_handler.testssl_result_processor = TestsslResultProcessor()
event_handler.testssl_result_processor.debug_objectpath_expr = debug_objectpath_expr
event_handler.testssl_result_processor.dump_evaldoc_on_error = dump_evaldoc_on_error
event_handler.testssl_result_processor.debug_dump_evaldoc = debug_dump_evaldoc
# give the processor the total number of threads to use
# for processing testssl.sh cmds concurrently
if (isinstance(input_dir_watchdog_threads,str)):
input_dir_watchdog_threads = int(input_dir_watchdog_threads)
event_handler.testssl_result_processor.threads = input_dir_watchdog_threads
# schedule our config_dir file watchdog
observer1 = Observer()
observer1.schedule(result_handler_config_monitor, config_dir, recursive=True)
observer1.start()
logging.getLogger("watchdog.observers.inotify_buffer").setLevel("INFO")
logging.info("Monitoring for new result handler config YAML files at: %s ",config_dir)
# lets process any config files already there...
config_dir_path_to_startup_scan = config_dir
if config_dir_path_to_startup_scan.startswith("./") or not config_dir_path_to_startup_scan.startswith("/"):
config_dir_path_to_startup_scan = os.getcwd() + "/" + config_dir_path_to_startup_scan.replace("./","")
# load any pre existing configs
for f in os.listdir(config_dir):
result_handler_config_monitor.on_created(FileCreatedEvent(config_dir + "/" + os.path.basename(f)))
# schedule our testssl.sh json result file watchdog
observer2 = Observer()
observer2.schedule(event_handler, input_dir, recursive=True)
observer2.start()
logging.getLogger("watchdog.observers.inotify_buffer").setLevel("INFO")
logging.info("Monitoring for new testssl.sh result JSON files at: %s ",input_dir)
# port...
if (isinstance(httpserver_port,str)):
httpserver_port = int(httpserver_port)
# start local http server?
httpdthread = None
if httpserver_port is not None and isinstance(httpserver_port,int):
logging.info("Starting HTTP server listening on: %d and serving up: %s" % (httpserver_port,httpserver_root_dir))
resource = File(httpserver_root_dir)
factory = Site(resource)
endpoint = endpoints.TCP4ServerEndpoint(reactor, httpserver_port)
endpoint.listen(factory)
httpdthread = threading.Thread(target=reactor.run,args=(False,))
httpdthread.daemon = True
httpdthread.start()
try:
while True:
time.sleep(30)
except KeyboardInterrupt:
observer1.stop()
observer2.stop()
observer1.join()
observer2.join()
###########################
# Main program
##########################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-dir', dest='input_dir', default="./input", help="Directory path to recursively monitor for new `*.json` testssl.sh result files. Default './input'")
parser.add_argument('-f', '--input-filename-filter', dest='input_filename_filter', default=".*testssloutput.+.json", help="Regex for filter --input-dir files from triggering the watchdog. Default '.*testssloutput.+.json'")
parser.add_argument('-I', '--config-dir', dest='config_dir', default="./configs", help="Directory path to recursively monitor for new `*.yaml` result handler config files. Default './configs'")
parser.add_argument('-l', '--log-file', dest='log_file', default=None, help="Path to log file, default None, STDOUT")
parser.add_argument('-x', '--log-level', dest='log_level', default="DEBUG", help="log level, default DEBUG ")
parser.add_argument('-w', '--input-dir-watchdog-threads', dest='input_dir_watchdog_threads', default=10, help="max threads for watchdog input-dir file processing, default 10")
parser.add_argument('-s', '--input-dir-sleep-seconds', dest='input_dir_sleep_seconds', default=5, help="When a new *.json file is detected in --input-dir, how many seconds to wait before processing to allow testssl.sh to finish writing. Default 5")
parser.add_argument('-d', '--debug-object-path-expr', dest='debug_objectpath_expr', default=False, help="Default False. When True, adds more details on ObjectPath expression parsing to logs")
parser.add_argument('-D', '--debug-dump-evaldoc', action='store_true', help="Flag to enable dumping the 'evaluation_doc' to STDOUT after it is constructed for evaluations (WARNING: this is large & json pretty printed)")
parser.add_argument('-E', '--dump-evaldoc-on-error', action='store_true', help="Flag to enable dumping the 'evaluation_doc' to STDOUT (json pretty printed) on any error (WARNING: this is large & json pretty printed)")
parser.add_argument('-p', '--httpserver-port', dest='httpserver_port', default=None, help="Default None, if a numeric port is specified, this will startup a simple twisted http server who's document root is the --httpserver-root-dir")
parser.add_argument('-r', '--httpserver-root-dir', dest='httpserver_root_dir', default=None, help="Default None, if specified the embedded http server will serve up content from this directory, has no effect if --httpserver-port is not specified")
args = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(args.log_level),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
filename=args.log_file,filemode='w')
logging.Formatter.converter = time.gmtime
init_watching(args.input_dir,
args.config_dir,
args.input_dir_watchdog_threads,
int(args.input_dir_sleep_seconds),
args.debug_objectpath_expr,
args.input_filename_filter,
args.dump_evaldoc_on_error,
args.debug_dump_evaldoc,
args.httpserver_port,
args.httpserver_root_dir)
|
from grpclib.health.check import ServiceStatus
from grpclib.health.service import Health
from grpclib.server import Server
from insanic.app import Insanic
from insanic.conf import settings
from interstellar import config as interstellar_common_config
from interstellar.abstracts import AbstractPlugin
from interstellar.server import config
from interstellar.server.server import GRPCServer
from interstellar.utils import load_class
class InterstellarServer(AbstractPlugin):
plugin_name = "INTERSTELLAR_SERVER"
app = None
# vocabulary beacon == server
base_beacons = []
warp_beacon = None
health_beacons = None
@classmethod
def init_app(cls, app: Insanic):
"""
Initializes application with grpc server functionality
:param app: Instance of insanic application
:return:
"""
# load common interstellar configs
cls._load_config(settings, interstellar_common_config)
cls.load_config(settings, config)
for s in app.config.INTERSTELLAR_SERVERS:
if isinstance(s, str):
klass = load_class(s)
else:
klass = s
cls.base_beacons.append(klass())
cls.logger('info', f"Loading {klass.__module__}.{klass.__name__} for GRPC serving.")
if len(cls.base_beacons):
# only need to initialize grpc servers if there are actual servers to run
cls.health_beacons = {s: [ServiceStatus()] for s in cls.base_beacons}
cls.warp_beacon = GRPCServer(cls.base_beacons + [Health(cls.health_beacons)])
# attach start stop listeners
cls.attach_listeners(app)
# attach grpc server events
cls.attach_grpc_server_events(app)
else:
cls.logger('warning', f"No GRPC Servers have been initialized.")
super().init_app(app)
@classmethod
def attach_listeners(cls, app: Insanic):
@app.listener('after_server_start')
async def after_server_start_start_grpc(app, loop=None, **kwargs):
if app.config.INTERSTELLAR_SERVER_ENABLED:
await cls.start(loop=loop)
else:
cls.logger("info", f"INTERSTELLAR_SERVER_ENABLED is turned off")
@app.listener('before_server_stop')
async def before_server_stop_stop_grpc(app, loop=None, **kwargs):
await cls.stop()
@classmethod
def attach_grpc_server_events(cls, app):
from interstellar.server.events import attach_events
attach_events(cls.warp_beacon)
@classmethod
async def start(cls, host=None, port=None, *, reuse_port=True, reuse_address=True, loop=None):
"""
Start grpc server
:param host:
:param port:
:param reuse_port:
:param reuse_address:
:return:
"""
cls._host = host or settings.INTERSTELLAR_SERVER_HOST
cls._port = port \
or settings.INTERSTELLAR_SERVER_PORT \
or cls.app.config.SERVICE_PORT + cls.app.config.INTERSTELLAR_SERVER_PORT_DELTA
if cls.warp_beacon:
if loop:
cls.warp_beacon._loop = loop
await cls.warp_beacon.start(host=cls._host, port=cls._port, reuse_port=reuse_port,
reuse_address=reuse_address)
cls.logger('info', f"Serving GRPC from {cls._host}:{cls._port}")
else:
cls.logger('warning', f"Did not start GRPC server because server has not been initialized.")
@classmethod
async def stop(cls):
"""
Gracefully stops grpc server
:return:
"""
cls.logger('info', f"Closing GRPC.")
if cls.warp_beacon is not None:
cls.warp_beacon.close()
await cls.warp_beacon.wait_closed()
cls.logger('info', f"Closed GRPC.")
@classmethod
def reset(cls):
cls.base_beacons = []
cls.warp_beacon = None
cls.health_beacons = None
cls.app = None
cls.config_imported = False
# keeping with protoss...
InterstellarWarpBeacon = InterstellarServer
|
<gh_stars>1-10
from requests import Session
import re
import numpy
import time
import sys
import csv
import datetime
import random
import string
def get_letter(letter, follow_subsequent=True):
print("get_letter(\"{}\").".format(letter))
s = Session() # this session will hold the cookies
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.2 Safari/605.1.15",
"Origin" : "https://auspost.com.au/postcode",
"Referer": "https://auspost.com.au/postcode",
"Accept-Language" : "en-au",
"Host" : "auspost.com.au",
"Accept-Encoding" : "gzip, deflate",
"X-Requested-With" : "XMLHttpRequest",
"Accept": "*/*",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Connection": "keep-alive",
}
target_url = "https://auspost.com.au/postcode/suburb-index/{}".format(letter)
response = s.get(target_url, headers = headers)
result_list = []
subsequent_page_list = []
for i in range(len(response.text.splitlines())):
line = response.text.splitlines()[i]
this_result_tuple = ()
# lid = re.search( r'html', line, flags=re.IGNORECASE)
# if lid:
# # pass
# print("found html")
lid = re.match( r'.*class=\"pol-suburb-index-link js-pol-suburb-index-link\">(.+)</a>', line, flags=re.IGNORECASE)
if lid:
# print("Found suburb: {}".format(lid.group(1)))
this_result = lid.group(1)#.encode('utf-8')
result_list.append(this_result)
lid = re.match( r'.*<a href=\"/postcode/suburb-index/({}\d)\">\d</a>'.format(letter), line, flags=re.IGNORECASE)
if lid:
print("Found subs page: {}".format(lid.group(1)))
this_subsequent_page = lid.group(1)#.encode('utf-8')
subsequent_page_list.append(this_subsequent_page)
if follow_subsequent:
for subsequent_page in subsequent_page_list:
result_list.extend(get_letter(subsequent_page, False))
return result_list
def main():
# oall_result_list = get_letter("b"))
target_letter_list = list(string.ascii_lowercase)
# target_letter_list = ["c", "d"]
random.shuffle(target_letter_list)
number_of_target_letters = len(target_letter_list)
i = 0
oall_result_list = []
#
for this_target_letter in target_letter_list:
i += 1
print("Doing letter {} of {}.".format(i, number_of_target_letters)) ########
if (1 < i):
delay = numpy.random.exponential(1, 1)[0]
print("Delaying {} s.".format(delay))
sys.stdout.flush()
time.sleep(delay)
oall_result_list.extend(get_letter(this_target_letter))
oall_result_list.sort()
print(oall_result_list)
with open('AP_suburb_list.csv','w') as out:
csv_out=csv.writer(out)
csv_out.writerow(['name'])
for item in oall_result_list:
csv_out.writerow([item])
if __name__ == "__main__":
main()
|
<reponame>eggfly/WatchIO
print("Hello, world!")
from ST7735 import TFT
from sysfont import sysfont
import machine
from machine import SPI,Pin
import time
import math
backlight = machine.Pin(15, machine.Pin.OUT)
backlight.value(0)
spi = SPI(-1, baudrate=70000000, polarity=0, phase=0, sck=Pin(18), mosi=Pin(23), miso=Pin(19))
tft=TFT(spi, 27, 33, 14)
tft.initr()
tft.rgb(True)
def testlines(color):
tft.fill(TFT.BLACK)
for x in range(0, tft.size()[0], 6):
tft.line((0,0),(x, tft.size()[1] - 1), color)
for y in range(0, tft.size()[1], 6):
tft.line((0,0),(tft.size()[0] - 1, y), color)
tft.fill(TFT.BLACK)
for x in range(0, tft.size()[0], 6):
tft.line((tft.size()[0] - 1, 0), (x, tft.size()[1] - 1), color)
for y in range(0, tft.size()[1], 6):
tft.line((tft.size()[0] - 1, 0), (0, y), color)
tft.fill(TFT.BLACK)
for x in range(0, tft.size()[0], 6):
tft.line((0, tft.size()[1] - 1), (x, 0), color)
for y in range(0, tft.size()[1], 6):
tft.line((0, tft.size()[1] - 1), (tft.size()[0] - 1,y), color)
tft.fill(TFT.BLACK)
for x in range(0, tft.size()[0], 6):
tft.line((tft.size()[0] - 1, tft.size()[1] - 1), (x, 0), color)
for y in range(0, tft.size()[1], 6):
tft.line((tft.size()[0] - 1, tft.size()[1] - 1), (0, y), color)
def testfastlines(color1, color2):
tft.fill(TFT.BLACK)
for y in range(0, tft.size()[1], 5):
tft.hline((0,y), tft.size()[0], color1)
for x in range(0, tft.size()[0], 5):
tft.vline((x,0), tft.size()[1], color2)
def testdrawrects(color):
tft.fill(TFT.BLACK);
for x in range(0,tft.size()[0],6):
tft.rect((tft.size()[0]//2 - x//2, tft.size()[1]//2 - x/2), (x, x), color)
def testfillrects(color1, color2):
tft.fill(TFT.BLACK);
for x in range(tft.size()[0],0,-6):
tft.fillrect((tft.size()[0]//2 - x//2, tft.size()[1]//2 - x/2), (x, x), color1)
tft.rect((tft.size()[0]//2 - x//2, tft.size()[1]//2 - x/2), (x, x), color2)
def testfillcircles(radius, color):
for x in range(radius, tft.size()[0], radius * 2):
for y in range(radius, tft.size()[1], radius * 2):
tft.fillcircle((x, y), radius, color)
def testdrawcircles(radius, color):
for x in range(0, tft.size()[0] + radius, radius * 2):
for y in range(0, tft.size()[1] + radius, radius * 2):
tft.circle((x, y), radius, color)
def testtriangles():
tft.fill(TFT.BLACK);
color = 0xF800
w = tft.size()[0] // 2
x = tft.size()[1] - 1
y = 0
z = tft.size()[0]
for t in range(0, 15):
tft.line((w, y), (y, x), color)
tft.line((y, x), (z, x), color)
tft.line((z, x), (w, y), color)
x -= 4
y += 4
z -= 4
color += 100
def testroundrects():
tft.fill(TFT.BLACK);
color = 100
for t in range(5):
x = 0
y = 0
w = tft.size()[0] - 2
h = tft.size()[1] - 2
for i in range(17):
tft.rect((x, y), (w, h), color)
x += 2
y += 3
w -= 4
h -= 6
color += 1100
color += 100
def tftprinttest():
tft.fill(TFT.BLACK);
v = 30
tft.text((0, v), "Hello World!", TFT.RED, sysfont, 1, nowrap=True)
v += sysfont["Height"]
tft.text((0, v), "Hello World!", TFT.YELLOW, sysfont, 2, nowrap=True)
v += sysfont["Height"] * 2
tft.text((0, v), "Hello World!", TFT.GREEN, sysfont, 3, nowrap=True)
v += sysfont["Height"] * 3
tft.text((0, v), str(1234.567), TFT.BLUE, sysfont, 4, nowrap=True)
time.sleep_ms(1500)
tft.fill(TFT.BLACK);
v = 0
tft.text((0, v), "Hello World!", TFT.RED, sysfont)
v += sysfont["Height"]
tft.text((0, v), str(math.pi), TFT.GREEN, sysfont)
v += sysfont["Height"]
tft.text((0, v), " Want pi?", TFT.GREEN, sysfont)
v += sysfont["Height"] * 2
tft.text((0, v), hex(8675309), TFT.GREEN, sysfont)
v += sysfont["Height"]
tft.text((0, v), " Print HEX!", TFT.GREEN, sysfont)
v += sysfont["Height"] * 2
tft.text((0, v), "Sketch has been", TFT.WHITE, sysfont)
v += sysfont["Height"]
tft.text((0, v), "running for: ", TFT.WHITE, sysfont)
v += sysfont["Height"]
tft.text((0, v), str(time.ticks_ms() / 1000), TFT.PURPLE, sysfont)
v += sysfont["Height"]
tft.text((0, v), " seconds.", TFT.WHITE, sysfont)
def test_main():
tft.fill(TFT.BLACK)
tft.text((0, 0), "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur adipiscing ante sed nibh tincidunt feugiat. Maecenas enim massa, fringilla sed malesuada et, malesuada sit amet turpis. Sed porttitor neque ut ante pretium vitae malesuada nunc bibendum. Nullam aliquet ultrices massa eu hendrerit. Ut sed nisi lorem. In vestibulum purus a tortor imperdiet posuere. ", TFT.WHITE, sysfont, 1)
time.sleep_ms(1000)
tftprinttest()
time.sleep_ms(4000)
testlines(TFT.YELLOW)
time.sleep_ms(500)
testfastlines(TFT.RED, TFT.BLUE)
time.sleep_ms(500)
testdrawrects(TFT.GREEN)
time.sleep_ms(500)
testfillrects(TFT.YELLOW, TFT.PURPLE)
time.sleep_ms(500)
tft.fill(TFT.BLACK)
testfillcircles(10, TFT.BLUE)
testdrawcircles(10, TFT.WHITE)
time.sleep_ms(500)
testroundrects()
time.sleep_ms(500)
testtriangles()
time.sleep_ms(500)
test_main()
|
<filename>cca_zoo/deepmodels/architectures.py
from abc import abstractmethod
from math import sqrt
from typing import Iterable
import torch
class BaseEncoder(torch.nn.Module):
@abstractmethod
def __init__(self, latent_dims: int, variational: bool = False):
super(BaseEncoder, self).__init__()
self.variational = variational
self.latent_dims = latent_dims
@abstractmethod
def forward(self, x):
pass
class BaseDecoder(torch.nn.Module):
@abstractmethod
def __init__(self, latent_dims: int):
super(BaseDecoder, self).__init__()
self.latent_dims = latent_dims
@abstractmethod
def forward(self, x):
pass
class Encoder(BaseEncoder):
def __init__(
self,
latent_dims: int,
variational: bool = False,
feature_size: int = 784,
layer_sizes: Iterable = None,
):
super(Encoder, self).__init__(latent_dims, variational=variational)
if layer_sizes is None:
layer_sizes = [128]
layers = []
# first layer
layers.append(
torch.nn.Sequential(
torch.nn.Linear(feature_size, layer_sizes[0]), torch.nn.ReLU()
)
)
# other layers
for l_id in range(len(layer_sizes) - 1):
layers.append(
torch.nn.Sequential(
torch.nn.Linear(layer_sizes[l_id], layer_sizes[l_id + 1]),
torch.nn.ReLU(),
)
)
self.layers = torch.nn.Sequential(*layers)
if self.variational:
self.fc_mu = torch.nn.Linear(layer_sizes[-1], latent_dims)
self.fc_var = torch.nn.Linear(layer_sizes[-1], latent_dims)
else:
self.fc = torch.nn.Linear(layer_sizes[-1], latent_dims)
def forward(self, x):
x = self.layers(x)
if self.variational:
mu = self.fc_mu(x)
logvar = self.fc_var(x)
return mu, logvar
else:
x = self.fc(x)
return x
class Decoder(BaseDecoder):
def __init__(
self,
latent_dims: int,
feature_size: int = 784,
layer_sizes: list = None,
norm_output: bool = False,
):
super(Decoder, self).__init__(latent_dims)
if layer_sizes is None:
layer_sizes = [128]
layers = []
layers.append(
torch.nn.Sequential(
torch.nn.Linear(latent_dims, layer_sizes[0]), torch.nn.Sigmoid()
)
)
for l_id in range(len(layer_sizes)):
if l_id == len(layer_sizes) - 1:
if norm_output:
layers.append(
torch.nn.Sequential(
torch.nn.Linear(layer_sizes[l_id], feature_size),
torch.nn.Sigmoid(),
)
)
else:
layers.append(
torch.nn.Sequential(
torch.nn.Linear(layer_sizes[l_id], feature_size),
)
)
else:
layers.append(
torch.nn.Sequential(
torch.nn.Linear(layer_sizes[l_id], layer_sizes[l_id + 1]),
torch.nn.ReLU(),
)
)
self.layers = torch.nn.Sequential(*layers)
def forward(self, x):
x = self.layers(x)
return x
class CNNEncoder(BaseEncoder):
def __init__(
self,
latent_dims: int,
variational: bool = False,
feature_size: Iterable = (28, 28),
channels: list = None,
kernel_sizes: list = None,
stride: list = None,
padding: list = None,
):
super(CNNEncoder, self).__init__(latent_dims, variational=variational)
if channels is None:
channels = [1, 1]
if kernel_sizes is None:
kernel_sizes = [5] * (len(channels))
if stride is None:
stride = [1] * (len(channels))
if padding is None:
padding = [2] * (len(channels))
# assume square input
conv_layers = []
current_size = feature_size[0]
current_channels = 1
for l_id in range(len(channels) - 1):
conv_layers.append(
torch.nn.Sequential( # input shape (1, current_size, current_size)
torch.nn.Conv2d(
in_channels=current_channels, # input height
out_channels=channels[l_id], # n_filters
kernel_size=kernel_sizes[l_id], # filter size
stride=stride[l_id], # filter movement/step
padding=padding[l_id],
# if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if
# stride=1
), # output shape (out_channels, current_size, current_size)
torch.nn.ReLU(), # activation
)
)
current_size = current_size
current_channels = channels[l_id]
if self.variational:
self.fc_mu = torch.nn.Sequential(
torch.nn.Linear(
int(current_size * current_size * current_channels), latent_dims
),
)
self.fc_var = torch.nn.Sequential(
torch.nn.Linear(
int(current_size * current_size * current_channels), latent_dims
),
)
else:
self.fc = torch.nn.Sequential(
torch.nn.Linear(
int(current_size * current_size * current_channels), latent_dims
),
)
self.conv_layers = torch.nn.Sequential(*conv_layers)
def forward(self, x):
x = self.conv_layers(x)
x = x.reshape((x.shape[0], -1))
if self.variational:
mu = self.fc_mu(x)
logvar = self.fc_var(x)
return mu, logvar
else:
x = self.fc(x)
return x
class CNNDecoder(BaseDecoder):
def __init__(
self,
latent_dims: int,
feature_size: Iterable = (28, 28),
channels: list = None,
kernel_sizes=None,
strides=None,
paddings=None,
norm_output: bool = False,
):
super(CNNDecoder, self).__init__(latent_dims)
if channels is None:
channels = [1, 1]
if kernel_sizes is None:
kernel_sizes = [5] * len(channels)
if strides is None:
strides = [1] * len(channels)
if paddings is None:
paddings = [2] * len(channels)
if norm_output:
activation = torch.nn.Sigmoid()
else:
activation = torch.nn.ReLU()
conv_layers = []
current_channels = 1
current_size = feature_size[0]
# Loop backward through decoding layers in order to work out the dimensions at each layer - in particular the first
# linear layer needs to know B*current_size*current_size*channels
for l_id, (channel, kernel, stride, padding) in reversed(
list(enumerate(zip(channels, kernel_sizes, strides, paddings)))
):
conv_layers.append(
torch.nn.Sequential(
torch.nn.ConvTranspose2d(
in_channels=channel, # input height
out_channels=current_channels,
kernel_size=kernel_sizes[l_id],
stride=strides[l_id], # filter movement/step
padding=paddings[l_id],
# if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if
# stride=1
),
activation,
)
)
current_size = current_size
current_channels = channel
# reverse layers as constructed in reverse
self.conv_layers = torch.nn.Sequential(*conv_layers[::-1])
self.fc_layer = torch.nn.Sequential(
torch.nn.Linear(
latent_dims, int(current_size * current_size * current_channels)
),
)
def forward(self, x):
x = self.fc_layer(x)
x = x.reshape((x.shape[0], self.conv_layers[0][0].in_channels, -1))
x = x.reshape(
(
x.shape[0],
self.conv_layers[0][0].in_channels,
int(sqrt(x.shape[-1])),
int(sqrt(x.shape[-1])),
)
)
x = self.conv_layers(x)
return x
class LinearEncoder(BaseEncoder):
def __init__(self, latent_dims: int, feature_size: int, variational: bool = False):
super(LinearEncoder, self).__init__(latent_dims, variational=variational)
self.variational = variational
if self.variational:
self.fc_mu = torch.nn.Linear(feature_size, latent_dims)
self.fc_var = torch.nn.Linear(feature_size, latent_dims)
else:
self.fc = torch.nn.Linear(feature_size, latent_dims)
def forward(self, x):
if self.variational:
mu = self.fc_mu(x)
logvar = self.fc_var(x)
return mu, logvar
else:
x = self.fc(x)
return x
class LinearDecoder(BaseDecoder):
def __init__(self, latent_dims: int, feature_size: int):
super(LinearDecoder, self).__init__(latent_dims)
self.linear = torch.nn.Linear(latent_dims, feature_size)
def forward(self, x):
out = self.linear(x)
return out
|
<reponame>rauwuckl/CElegansPhototaxis
# Either run python3 evolution.py to start from 0
# or run 'python3 evolution.py population N' to start with population number N which is loaded from filename populationN.npy
import numpy as np
import timeit
import threading
import sys
import random as randomPack
import os.path
from os import remove as os_remove
from multiprocessing import Pipe, Pool,Process,Queue,TimeoutError
from phototaxis import *
# set_noiseTerm('')
mutation_rate = 0.00001
popsize = 300
elite_size = 20
n_generations = 200
p = 0.02
# set this to 1 for a machine with only one or two cores:
numberThreads = 14
# should not be changed:
length_vector = 33
if (popsize-elite_size)%2 != 0:
raise ValueError('popsize-elite_size not even')
# each thread uses a different instance of the network. different object
evaluators = [Phototaxis_evaluator(i) for i in range(numberThreads)]
def multiProcessPopulationFitness(pop, NumberOfAssesment=4):
'''Asses fitness for all individuals in the network, NumberOfAssesment times'''
if(popsize != len(pop)):
raise ValueError('lenghth of population doesnt fit popsize')
if(numberThreads ==1):
return evaluators[0].assesPopulationFitness(pop, sender=None, numberAssesments=NumberOfAssesment)
lowerBound = 0
processes = []
receivers = []
#split the population into numberThreads parts and give each part to a different job
for i,evaluator in enumerate(evaluators):
# the first blocks will be one smaller (implicitly rounding down with //) until we can equally fit the rest
# stepsize = N individuals left / N jobs left
upperBound = lowerBound+((popsize-lowerBound)//(numberThreads-i))
print('[{}:{}]'.format(lowerBound,upperBound))
# to get the data back from the process we use a unidirectional pipe
receiver, sender = Pipe(duplex=False)
newprocess = Process(target=evaluator.assesPopulationFitness, args=(pop[lowerBound:upperBound],sender,NumberOfAssesment))
# newprocess = Process(target=evaluators[i].assesPopulationFitness, args=(subPopulation,sender))
newprocess.start()
receivers.append(receiver)
processes.append(newprocess)
lowerBound = upperBound
evaluatedPop = []
# collect the data
for rec in receivers:
evaluatedPop.extend(rec.recv())
# terminate the processes
for job in processes:
job.join()
print('finished threaded Population assesment')
return evaluatedPop
def sortPopulation(populationToSort):
# sort the list of dictonarys by the fitness
populationSorted = sorted(populationToSort, key=lambda individual:individual['fitness'], reverse=True)
return populationSorted
def evalPopNtimes(name, N):
'''asses population N times to give a better picture
Args:
name: name of the .npy file in which the poulation is stored
'''
pop = np.load(name)
pop = multiProcessPopulationFitness(pop, N)
pop = sortPopulation(pop)
np.save('properEvalPop', pop)
def selectParent(pop):
'''selects a parent from the population randomly, biased on the fitness'''
canidateA = randomPack.choice(pop)
canidateB = randomPack.choice(pop)
if( (canidateA['fitness'] > canidateB['fitness'])):
return canidateA['content']
else:
return canidateB['content']
def crossover(ParentA, ParentB):
# Intermediate Recombination (similar to Line Recombination)
ChildA = np.zeros(length_vector)
ChildB = np.zeros(length_vector)
for i in range(length_vector):
while True:
alpha = randomPack.uniform(-p, 1+p)
beta = randomPack.uniform(-p, 1+p)
t = alpha*ParentA[i]+ (1-alpha)*ParentB[i]
s = beta *ParentB[i]+ (1-beta )*ParentA[i]
if((0<t<1)and(0<s<1)):
break # think about the ranges maybe
ChildA[i] = t
ChildB[i] = s
return (ChildA, ChildB)
def mutate(child):
# just add indebendent gaussian noise elementwise
# mutation_rate is a global parameter specified above
return np.clip(child + np.random.normal(loc=0.0, scale=mutation_rate, size=length_vector),0,1)
def do_evolution(population, startGen):
global elite_size, popsize, n_generations
if(len(population) != popsize):
raise ValueError('popsize not equal length of population')
for t in range(startGen, n_generations):
next_population = population[0:elite_size] # keep the best individuals straight away
for i in range(int((popsize-elite_size)/2)):
# select parents (biased on their fitness)
ParentA = selectParent(population)
ParentB = selectParent(population)
# create two children from the 2 parents
ChildA, ChildB = crossover(ParentA, ParentB)
# mutate the children a little bit
ChildA = mutate(ChildA)
ChildB = mutate(ChildB)
# add them to the population list with their fitness yet undefined
next_population.append({'fitness': None, 'content':ChildA})
next_population.append({'fitness': None, 'content':ChildB})
print('{} of {} children born'.format(2*(i+1), (popsize-elite_size)))
# asses fitness for all the population
next_population = multiProcessPopulationFitness(next_population)
population=sortPopulation(next_population)
print('Fitest Individual: {} with fitness: {}'.format(best['content'], best['fitness']))
np.save('population{}'.format(t), population)
# we can put a file in the folder and the evolution will stop after the running generation
if os.path.exists('stop'):
print('stop file was detected')
os_remove('stop')
return population
return population
def checkAssesment(population):
'''function used to check that the multiprocess fitness evaluation works as expected. set noiseTerm to '' to use it'''
Eval = Phototaxis_evaluator(42)
print('starting pop single')
startSingle = timeit.default_timer()
popSingle = Eval.assesPopulationFitness(population)
popSingle = sortPopulation(popSingle)
durationSingle = timeit.default_timer() - startSingle
print('starting pop multi')
startMulti = timeit.default_timer()
popMulti = multiProcessPopulationFitness(population)
popMulti = sortPopulation(popMulti)
durationMulti = timeit.default_timer() - startMulti
print('single eval took {}, multi core eval took {}, single/multi {} '.format(durationSingle, durationMulti, (durationSingle/durationMulti)))
same = ([ind['fitness'] for ind in popSingle] == [ind['fitness'] for ind in popMulti])
print('they are the same: {}'.format(same))
return popSingle, popMulti
if __name__ == '__main__':
print('bla: {}'.format(sys.argv))
if len(sys.argv)==1:
population = [{'fitness': -float('inf'), 'content':np.random.rand(length_vector)} for i in range(popsize)]
population = multiProcessPopulationFitness(population)
population = sortPopulation(population)
do_evolution(population, 0)
else:
try:
startGeneration = int(sys.argv[2])
fileName = str(sys.argv[1])
except ValueError:
print('could not pars stuff: {}'.format(sys.argv))
startPopulation = list(np.load('{}{}.npy'.format(fileName, startGeneration)))
do_evolution(startPopulation, (startGeneration+1))
|
"""Setup script to compile tlsssl to run against py2.7 on macOS."""
# standard libs
from distutils.dir_util import mkpath
import os
import urllib2
import shutil
import sys
import stat
import re
import inspect
import argparse
# our libs. kind of hacky since this isn't a valid python package.
CURRENT_DIR = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
PARENT_DIR = os.path.dirname(CURRENT_DIR)
sys.path.insert(0, PARENT_DIR)
from vendir import config # noqa
from vendir import hash_helper # noqa
from vendir import log # noqa
from vendir import package # noqa
from vendir import runner # noqa
CONFIG = config.ConfigSectionMap()
# where an OpenSSL 1.0.1+ libssl.dylib and libcrypto.dylib are now
LIBS_SRC = os.path.join(CONFIG['base_install_path'], 'openssl/lib')
# where you'll want them eventually installed
LIBS_DEST = os.path.join(CONFIG['tlsssl_install_dir'], 'lib')
# where the associated headers are
HEADER_SRC = os.path.join(CONFIG['base_install_path'], 'openssl/include')
def download_python_source_files():
"""Download CPython source files from Github.
Verify the sha hash and redownload if they do not match.
"""
log.info("Downloading and verifying python source files...")
src_dir = os.path.join(CURRENT_DIR, '_src')
if not os.path.exists(src_dir):
log.debug("Creating _src directory...")
mkpath(src_dir)
os.chdir(src_dir)
gh_url = 'https://raw.githubusercontent.com/python/cpython/v2.7.10/'
# This ugly looking block of code is a pair that matches the filename,
# github url, and sha256 hash for each required python source file
fp = [
['ssl.py', '{}Lib/ssl.py'.format(gh_url), CONFIG['ssl_py_hash']],
['_ssl.c', '{}Modules/_ssl.c'.format(gh_url), CONFIG['ssl_c_hash']],
['make_ssl_data.py', '{}Tools/ssl/make_ssl_data.py'.format(gh_url),
CONFIG['make_ssl_data_py_hash']],
['socketmodule.h', '{}Modules/socketmodule.h'.format(gh_url),
CONFIG['socketmodule_h_hash']],
]
# Verify we have the correct python source files else download it
log.detail("Downloading & checking hash of python source files...")
for fname, url, sha256 in fp:
# This is a dual check step for file existence and hash matching
log.debug("Checking source file: {}...".format(fname))
if not os.path.isfile(fname) or (
hash_helper.getsha256hash(fname) != sha256):
log.info("Downloading '{}' source file...".format(fname))
log.debug("Download url: {}".format(url))
try:
data = urllib2.urlopen(url)
f = open(fname, "w")
content = data.read()
f.write(content)
f.close()
# Verify the hash of the source file we just downloaded
download_file_hash = hash_helper.getsha256hash(fname)
if download_file_hash != sha256:
log.warn("The hash for '{}' does not match the expected "
"hash. The downloaded hash is '{}'".format(
fname, download_file_hash))
else:
log.debug("The download file '{}' matches our expected "
"hash of '{}'".format(fname, sha256))
except(urllib2.HTTPError, urllib2.URLError,
OSError, IOError) as err:
log.error("Unable to download '{}' "
"due to {}\n".format(fname, err))
sys.exit(1)
# We are done with _src directory for now so go back to script root path
os.chdir(CURRENT_DIR)
def patch():
"""Patch source files for the build phase."""
log.info("Creating our patch files for tlsssl...")
patch_dir = os.path.join(CURRENT_DIR, '_patch')
if not os.path.exists(patch_dir):
log.debug("Creating _patch directory...")
mkpath(patch_dir)
patch_pairs = [
# ['_patch/_ssl.c', '_src/_ssl.c', ],
['_patch/make_ssl_data.py', '_src/make_ssl_data.py'],
# ['_patch/ssl.py', '_src/ssl.py'],
]
log.detail("Create our patch files if they do not exist...")
for dest, source in patch_pairs:
if not os.path.isfile(os.path.join(CURRENT_DIR, dest)):
source = os.path.join(CURRENT_DIR, source)
diff = os.path.join(CURRENT_DIR,
'_diffs',
"{}.diff".format(os.path.basename(dest)))
dest = os.path.join(CURRENT_DIR, dest)
log.debug("Patching '{}'".format(dest))
# TODO: Validate the return code and exist if something didn't work
cmd = ['/usr/bin/patch', source, diff, "-o", dest]
out = runner.Popen(cmd)
runner.pprint(out)
# Copy over the socketmodule.h file as well
if not os.path.isfile(os.path.join(patch_dir, "socketmodule.h")):
log.debug("Copying 'socketmodule.h' to the _patch dir")
source = os.path.join(CURRENT_DIR, "_src", "socketmodule.h")
shutil.copy(source, os.path.realpath(os.path.join(patch_dir)))
if not os.path.isfile(os.path.join(patch_dir, "_ssl.c")):
log.debug("Copying '_ssl.c' to the _patch dir")
source = os.path.join(CURRENT_DIR, "_src", "_ssl.c")
shutil.copy(source, os.path.realpath(os.path.join(patch_dir)))
if not os.path.isfile(os.path.join(patch_dir, "ssl.py")):
log.debug("Copying 'ssl.py' to the _patch dir")
source = os.path.join(CURRENT_DIR, "_src", "ssl.py")
shutil.copy(source, os.path.realpath(os.path.join(patch_dir)))
log.detail("All patch files are created...")
def build():
"""Build our tlsssl patch."""
log.info("Building tlsssl...")
patch_dir = os.path.join(CURRENT_DIR, '_patch')
# Step 2: make sure the _ssl_data.h header has been generated
ssl_data = os.path.join(patch_dir, "_ssl_data.h")
if not os.path.isfile(ssl_data):
log.debug("Generate _ssl_data.h header...")
tool_path = os.path.join(CURRENT_DIR, "_patch", "make_ssl_data.py")
# Run the generating script
cmd = ['/usr/bin/python', tool_path, HEADER_SRC, ssl_data]
out = runner.Popen(cmd)
runner.pprint(out, 'debug')
# Step 3: remove the temporary work directory under the build dir
build_dir = os.path.join(CURRENT_DIR, 'build')
if os.path.exists(build_dir):
log.debug("Removing build directory...")
shutil.rmtree(build_dir, ignore_errors=True)
log.debug("Creating build directories...")
mkpath(build_dir)
# Step 3.5: copy ssl.py to the build directory
log.info("Copy 'ssl.py' to the build directory...")
shutil.copy(os.path.join(CURRENT_DIR, '_patch/ssl.py'), build_dir)
workspace_rel = os.path.join(build_dir)
workspace_abs = os.path.realpath(workspace_rel)
# Step 4: copy and rename the dylibs to there
log.detail("Copying dylibs to build directory")
ssl_src = os.path.join(LIBS_SRC, "libssl.dylib")
crypt_src = os.path.join(LIBS_SRC, "libcrypto.dylib")
ssl_tmp = os.path.join(workspace_abs, "libtlsssl.dylib")
crypt_tmp = os.path.join(workspace_abs, "libtlscrypto.dylib")
try:
shutil.copy(ssl_src, ssl_tmp)
shutil.copy(crypt_src, crypt_tmp)
except(IOError) as err:
log.warn("tlsssl has a dependency on OpenSSL 1.0.1+ as such you "
"must build and install OpenSSL from ../openssl.")
log.error("Build failed and will now exit!")
log.error("{}".format(err))
sys.exit(1)
# Step 5: change the ids of the dylibs
log.detail("Changing the ids of the dylibs...")
ssl_dest = os.path.join(LIBS_DEST, "libtlsssl.dylib")
crypt_dest = os.path.join(LIBS_DEST, "libtlscrypto.dylib")
# (need to temporarily mark them as writeable)
# NOTE: I don't think this I needed any longer
st = os.stat(ssl_tmp)
os.chmod(ssl_tmp, st.st_mode | stat.S_IWUSR)
st = os.stat(crypt_tmp)
os.chmod(crypt_tmp, st.st_mode | stat.S_IWUSR)
cmd = ['/usr/bin/install_name_tool', '-id', ssl_dest, ssl_tmp]
out = runner.Popen(cmd)
runner.pprint(out, 'debug')
cmd = ['/usr/bin/install_name_tool', '-id', crypt_dest, crypt_tmp]
out = runner.Popen(cmd)
runner.pprint(out, 'debug')
# Step 6: change the link between ssl and crypto
# This part is a bit trickier - we need to take the existing entry
# for libcrypto on libssl and remap it to the new location
cmd = ['/usr/bin/otool', '-L', ssl_tmp]
out = runner.Popen(cmd)
runner.pprint(out, 'debug')
old_path = re.findall('^\t(/[^\(]+?libcrypto.*?.dylib)',
out[0],
re.MULTILINE)[0]
log.debug("The old path was: {}".format(old_path))
cmd = ['/usr/bin/install_name_tool', '-change', old_path, crypt_dest,
ssl_tmp]
out = runner.Popen(cmd)
runner.pprint(out, 'debug')
# Step 7: cleanup permissions
# NOTE: Same. I don't think this I needed any longer
st = os.stat(ssl_tmp)
os.chmod(ssl_tmp, st.st_mode & ~stat.S_IWUSR)
st = os.stat(crypt_tmp)
os.chmod(crypt_tmp, st.st_mode & ~stat.S_IWUSR)
# Step 8: patch in the additional paths and linkages
# NOTE: This command will output a few warnings that are hidden at
# build time. Just an FYI in case this needs to be resolved in
# the future.
system_python_path = ("/System/Library/Frameworks/Python.framework/"
"Versions/2.7/include/python2.7")
cmd = ["cc", "-fno-strict-aliasing", "-fno-common", "-dynamic", "-arch",
"x86_64", "-arch", "i386", "-g", "-Os", "-pipe", "-fno-common",
"-fno-strict-aliasing", "-fwrapv", "-DENABLE_DTRACE", "-DMACOSX",
"-DNDEBUG", "-Wall", "-Wstrict-prototypes", "-Wshorten-64-to-32",
"-DNDEBUG", "-g", "-fwrapv", "-Os", "-Wall", "-Wstrict-prototypes",
"-DENABLE_DTRACE", "-arch", "x86_64", "-arch", "i386", "-pipe",
"-I{}".format(HEADER_SRC),
"-I{}".format(system_python_path),
"-c", "_patch/_ssl.c", "-o", "build/_ssl.o"]
out = runner.Popen(cmd)
if out[2] == 0:
log.debug("Build of '_ssl.o' completed successfullyly")
else:
log.error("Build has failed: {}".format(out[1]))
cmd = ["cc", "-bundle", "-undefined", "dynamic_lookup", "-arch",
"x86_64", "-arch", "i386", "-Wl,-F.", "build/_ssl.o",
"-L{}".format(workspace_abs), "-ltlsssl", "-ltlsssl", "-o",
"build/_ssl.so"]
out = runner.Popen(cmd)
if out[2] == 0:
log.debug("Build of '_ssl.so' completed successfullyly")
else:
log.error("Build has failed: {}".format(out[1]))
log.debug("Remove temp '_ssl.o' from build directory")
os.remove(os.path.join(build_dir, "_ssl.o"))
def main():
"""Build and package the tlsssl patch."""
parser = argparse.ArgumentParser(prog='tlsssl setup',
description='This script will compile '
'tlsssl and optionally create '
'a native macOS package.')
parser.add_argument('-b', '--build', action='store_true',
help='Compile the tlsssl binaries')
parser.add_argument('-p', '--pkg', action='store_true',
help='Package the tlsssl output directory.')
parser.add_argument('-v', '--verbose', action='count', default=1,
help="Increase verbosity level. Repeatable up to "
"2 times (-vv)")
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# set argument variables
log.verbose = args.verbose
if args.build:
log.info("Bulding tslssl...")
download_python_source_files()
patch()
build()
if args.pkg:
# FIXME: This has grown out of control. Move this outside of main!
log.info("Building a package for tlsssl...")
version = CONFIG['tlsssl_version']
# we need to setup the payload
payload_dir = os.path.join(CURRENT_DIR, 'payload')
if os.path.exists(payload_dir):
log.debug("Removing payload directory...")
shutil.rmtree(payload_dir, ignore_errors=True)
log.debug("Creating payload directory...")
payload_lib_dir = os.path.join(payload_dir, LIBS_DEST.lstrip('/'))
payload_root_dir = os.path.join(
payload_dir, CONFIG['tlsssl_install_dir'].lstrip('/'))
mkpath(payload_lib_dir)
log.detail("Changing file permissions for 'ssl.py'...")
# ssl.py needs to have chmod 644 so non-root users can import this
os.chmod('build/ssl.py', stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP |
stat.S_IROTH)
log.detail("Copying build files into payload directory")
shutil.copy('build/_ssl.so', payload_root_dir)
shutil.copy('build/ssl.py', payload_root_dir)
shutil.copy('build/libtlscrypto.dylib', payload_lib_dir)
shutil.copy('build/libtlsssl.dylib', payload_lib_dir)
pth_fname = CONFIG['pth_fname']
# if the pth_fname key is set write the .pth file
if pth_fname is not '':
log.debug("Write the '.pth' file so native python can read "
"this module without a sys.path.insert")
python_sys = "/Library/Python/2.7/site-packages/"
python_sys_local = os.path.join("payload", python_sys.lstrip('/'))
log.debug("Make site-packages inside of payload")
mkpath(python_sys_local)
pth_file = os.path.join(python_sys_local, pth_fname)
f = open(pth_file, 'w')
# this hacky method will force this path to have a high priority
# http://stackoverflow.com/a/37380306
content = ("""import sys; sys.__plen = len(sys.path)
{}
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; \
p=getattr(sys,'__egginsert',0); \
sys.path[p:p]=new; sys.__egginsert = p+len(new)""".format(
os.path.dirname(LIBS_DEST)))
f.write(content)
f.close()
rc = package.pkg(root='payload',
version=version,
identifier="{}.tlsssl".format(CONFIG['pkgid']),
output='tlsssl-{}.pkg'.format(version),
)
if rc == 0:
log.info("tlsssl packaged properly")
else:
log.error("Looks like package creation failed")
if __name__ == '__main__':
main()
|
from ast import keyword
import re
import json
from tqdm import tqdm
import os
import datetime
from transformers import pipeline
import sys
import datetime
import codecs
import pandas as pd
import textwrap
from collections import defaultdict
tqdm.pandas()
print('downloading model')
summarizer = pipeline("summarization", model="lidiya/bart-large-xsum-samsum")
print('model downloaded')
# list of regular expression to be forged in transcripts
reg_ex = {
r"( *)\<(.*?)\>": '',
r"\n": '',
r"( +)": ' ',
r"\-": '',
r"(,+)": ',',
r"( *)(-+)": '',
r"\[": '',
r"\]": '',
}
# utility methods
# open transcript using given file_path
def open_transcript(file_path):
document = open(file_path, "r").readlines()
return document
# remove punctuations and special tokens
def rem_ntok(reg_ex, text):
for key, val in reg_ex.items():
text = re.sub(key, val, text)
return text
# remove extra space from the text
# remove \n and concat utterance which do not start with (PERSON.*?)
def add_colon(sentence):
eidx = re.search(r'\(PERSON(.*?)\)', sentence).end()
if sentence[eidx] == ':':
return sentence
else:
return sentence[:eidx] + ':' + sentence[eidx:]
# process roles list and remove "( and )"
def process_roles(role):
regex = {
r"\(": '',
r"\)": ''
}
for key, value in regex.items():
role = re.sub(key, '', role)
return role
# remove special tokens from the processed list of roles and utterances
def remove_special_tokens(utterance):
regex = [r'^\.\',', r'^\.\'', r'^\',', r'^,', r'^\'', r'^\.', r'^, ,', r'^\?']
for exp in regex:
utterance = re.sub(exp, '', utterance)
return utterance
# retur max_lenght of list of sentences
def max_length(text_list):
length = [len(text.split(' ')) for text in text_list]
return max(length)
# remove short utterances precisely "4"
def preprocess_utterance(sequence):
return_seq = [sentences for sentences in sequence if len(sentences) > 4]
return return_seq
# insert extra roles based on generated sentences
def insert_to_roles(roles, len, idx, role, con_index):
idx = idx + con_index
for i in range(len):
roles.insert(idx, role)
return roles
# text insertion in utterance list at a particular position.
def insert_text(utterances, sequences, idx, con_index):
idx = idx + con_index
for text in sequences[::-1]:
utterances.insert(idx, text)
return utterances
# check if folder contains transcripts
def check_for_transcript(file_list):
for file_ in file_list:
result = re.findall("transcript", file_)
if len(result) == 1:
return file_
else:
pass
return ValueError("File not found!")
# convert to json files and save
def to_JSON_batch(processed_dict, file_path):
with open(file_path, "w") as file_handle:
json.dump(processed_dict, file_handle)
def to_JSON_single(processed_dict, file_name, file_path):
out_dict = {file_name: processed_dict}
with open(file_path, "w") as file_handle:
json.dump(out_dict, file_handle)
# remove newline character
def preprocess_transcripts(document):
transcript = []
for line in document:
if line == "\n":
continue
transcript.append(line.replace("\n", "") + " ")
return transcript
# iterate over transcript and segmentation
def parse_transcript(reg_ex, transcript):
# updated list of transcript's text
updateList = []
for text in transcript:
updateList.append(rem_ntok(
reg_ex = reg_ex,
text = text
))
# create list of utterances
utteranceList = []
person_regex = [r'\(PERSON(.*)\)']
for text in updateList:
result = re.findall(person_regex[0], text)
if len(result) == 1:
utteranceList.append(add_colon(text))
else:
try:
prev_text = utteranceList[-1]
utteranceList[-1] = prev_text + text.strip() + " "
except Exception as e:
pass
return utteranceList
# bifurcate transcripts into roles and utterances.
def split_transcripts(processed_transcript):
roles, utterances, temp_roles = [], [], []
for text in processed_transcript:
temp = text.split(':')
tune = remove_special_tokens(temp[1].strip()).strip()
tune = remove_special_tokens(tune.strip()).strip()
tune = remove_special_tokens(tune.strip()).strip()
if tune != '' and len(tune) > 2:
utterances.append(tune)
temp_roles.append(temp[0])
for role in temp_roles:
roles.append(process_roles(role))
return roles, utterances
# shortning and splitting utterance sentence and assign roles!
def post_process(roles, utterances):
mappings = {
"idx": [],
"utterances": [],
"roles": []
}
for idx, utterance in enumerate(utterances):
word_list = [sentence.strip() for sentence in utterance.split(' ')]
sentence_list = [sentence.strip() for sentence in utterance.split('.')]
# check if length of word list is greater than 150
if len(word_list) > 150:
sequence = []
temp = ""
for sentence in sentence_list:
temp = f'{temp} {sentence}.'
# if word limit exceeded than create a new sentence
if len(temp.split(' ')) > 150:
sequence.append(temp.strip())
temp = ''
sequence.append(temp.strip())
# delete the sentence present in original list
del utterances[idx]
# preprocess and striping and removing small sentence less than 3
sequence = preprocess_utterance(sequence)
len_roles = len(sequence)
# retrieve corresponding role from the roles list
role = roles[idx]
# delete the role present in original list
del roles[idx]
# mapping index, roles and utterances to mapping dictionary
mappings["idx"].append(idx)
mappings["utterances"].append(sequence)
mappings["roles"].append(role)
# Applying modifications
con_index = 0
for idx, index in enumerate(mappings['idx']):
sequence = mappings['utterances'][idx]
len_utterances = len(sequence)
utterances = insert_text(utterances, sequence, index, con_index)
roles = insert_to_roles(roles, len_utterances, index, mappings['roles'][idx], con_index)
# Reflecting to the position of insertion
# print(f'Inserted @ {index + con_index}')
con_index = con_index + len_utterances
# New length after insertion
# print(f'Length of lists after insertion {len(roles), len(utterances)}')
# Applying changes to main dictionary
return roles, utterances
# process single file
def process_single(path_to_file):
document = open_transcript(path_to_file)
transcripts = preprocess_transcripts(document)
transcripts = parse_transcript(reg_ex, transcripts)
roles, utterances = split_transcripts(transcripts)
roles, utterances = post_process(roles, utterances)
trans_dict = {
"roles": roles,
"utterances": utterances
}
return trans_dict
# pre-segmentation utility methods
# generate samsum dataset favourable sentences
def generate_dialogues(roles, utterances):
sentences = zip(roles, utterances)
dialogues = [f'{role}: {utterance}' for role, utterance in sentences]
return dialogues
# partition document for better processing.
def doc_partitioning(document, max_characters=500):
processed_dict = dict()
processed_dict['part_0'] = ''
identity_generator = 'part_'
temp = ''
count = 0
for sentence in document:
key = f'{identity_generator}{count}'
temp = temp + sentence
if len(temp) > max_characters:
temp = ''
count = count + 1
key = f'{identity_generator}{count}'
processed_dict[key] = ''
processed_dict[key] = processed_dict[key] +'\n'+ sentence
return processed_dict
# summarization and minute generation.
def apply_summarizer(processed_dict):
output = []
for key in tqdm(processed_dict.keys(), total=len(processed_dict.keys())):
result = summarizer(processed_dict[key])
output.append(result[0]['summary_text'])
return output
# split summarized paragraph into separate sentences.
def split_Sentences(summarizer_output):
result = []
for text in summarizer_output:
sentences = text.split('.')
for sentence in sentences:
sentence = sentence.strip()
if sentence != '':
result.append(sentence)
return result
# get current date.
def return_date():
return f'Date: {datetime.datetime.now().strftime("%Y-%m-%d")}'
# prepare main-body of the meeting minutes.
def main_body(output: list):
body = []
body_header = 'SUMMARY- \n'
for sentence in output:
if len(sentence.split(' ')) > 3:
body.append(f'-{sentence}')
body_content = '\n'.join(body)
# body_Block = f'{body_header}{body_content}'
body_Block = body_header + body_content
return body_Block
# retrieve meeting participants
def generate_person_list(output):
reg_ex = [r'PERSON[0-9]{1,2}']
person_list = []
for sentence in output:
result = re.search(reg_ex[0], sentence)
try:
person_list.append(result.group())
except:
pass
return list(set(person_list))
# convert generated document to text file
def convert_str_2_txt(document: str, process_code: str):
file_name = f'output/meeting-minutes/{process_code}.txt'
with open(file_name, "w") as text_file:
text_file.write(document)
return file_name
# generate attendee string
def generate_attendees(person_list):
attendee_header = 'ATTENDEES: '
attendee_content = attendee_header + ', '.join(person_list)
return attendee_content
# generate keyword list
# def process_generated_keywords(process_code: str):
# path_to_directory = "output/processed-keywords"
# path_to_directory = os.path.normpath(path_to_directory)
# filename = f"{process_code}.csv"
# path_to_file = os.path.join(path_to_directory, filename)
# keywords = pd.read_csv(path_to_file)['text'].to_list()
# return keywords
# def generate_keywords(keyword_list: list):
# keyword_header = "KEYWORDS: "
# keyword_content = keyword_header + "; ".join(keyword_list)
# return keyword_content
# generate meeting minute
def prepare_document(attendee_str, body, annotator = 'DeepCON'):
Date_ = return_date()
# Document = f'{Date_}\n{attendee_str}\n{keywords_str}\n\n\n{body}\n\nMinuted by: {annotator}'
Document = f'{Date_}\n{attendee_str}\n\n\n{body}\n\nMinuted by: {annotator}'
return Document
def generate_complete_file(path_to_file: str, process_code: str, length: str):
max_char = 0
if length == 'short':
max_char = 1700
elif length == "medium":
max_char = 1000
else:
max_char = 500
# sample processing for single transcript
# path_to_file = "output//processed-transcripts//asefasawdac.txt"
trans_dict = process_single(path_to_file)
trans_dict.keys()
# merge roles and utterances in conversational format provided by the samsum dataset.
meeting_conv = generate_dialogues(trans_dict["roles"], trans_dict["utterances"])
# pratitioned document
processed_dict = doc_partitioning(meeting_conv, max_characters=max_char)
# apply summarizer to processed transcript
output = apply_summarizer(processed_dict)
output = split_Sentences(output)
# generate different segments of the processed meeting
person_List = generate_person_list(output)
# keywords_List = process_generated_keywords(process_code)
attendees = generate_attendees(person_List)
# keywords = generate_keywords(keywords_List)
main_body_ = main_body(output)
# Assemle parts and preparing final minute:
# DOCUMENT = prepare_document(attendee_str=attendees, keywords_str=keywords, body=main_body_)
DOCUMENT = prepare_document(attendee_str=attendees, body=main_body_)
print(DOCUMENT)
# TODO Change this later
convert_str_2_txt(DOCUMENT, process_code)
|
import math
import pygame
from ball import Ball
from primitives import Pose
from cue import Cue, BasicCue
import constants as c
from copy import copy
class Player(Ball):
def __init__(self, game, x=0, y=0):
super().__init__(game, x, y)
self.mass *= 1.05
self.color = (255, 255, 0)
self.active_cue = BasicCue()
self.is_player = True
self.has_collided = False
self.collided_with = None
self.first_spawn = True
self.perfect = pygame.image.load(c.image_path("perfect_room.png"))
self.perfect_alpha = 0
def win_perfect(self):
self.perfect_alpha = 255
def load_back_surface(self):
self.back_surface = pygame.image.load(c.image_path("player_back.png"))
def update(self, dt, events):
for event in events:
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
mouse_pose = Pose(pygame.mouse.get_pos(), 0) + self.game.current_scene.camera.pose
my_pose = self.pose.copy() # TODO once camera movement exists, account for it
self.cue_hit(mouse_pose - my_pose)
super().update(dt, events)
current_room = self.game.current_scene.current_room() #TODO make this check fake player in simulation
floor_num = self.game.current_floor
self.perfect_alpha -= 50 * dt
if self.perfect_alpha < 128:
self.perfect_alpha -= 150*dt
if self.is_completely_in_room() and not current_room.enemies_have_spawned:
self.velocity *= 0.03**dt
if not self.game.in_simulation:
if self.is_completely_in_room() and not current_room.enemies_have_spawned and self.game.current_scene.all_balls_below_speed() and current_room.doors_are_open:
# if(floor_num == 1 and self.first_spawn):
# current_room.doors_close()
# current_room.spawn_enemies_first_room()
# current_room.waves_remaining = 3
if(current_room.is_boss_room and floor_num != 1):
current_room.doors_close()
current_room.waves_remaining = 1
current_room.spawn_boss()
else:
current_room.doors_close()
current_room.set_difficulty()
current_room.spawn_enemies()
elif current_room.enemies_have_spawned and not current_room.doors_are_open and self.game.current_scene.no_enemies() and current_room.waves_remaining >0:
if (floor_num == 1 and self.first_spawn):
current_room.spawn_enemies_first_room()
else:
current_room.spawn_enemies()
elif current_room.enemies_have_spawned and not current_room.doors_are_open and self.game.current_scene.no_enemies():
if(self.first_spawn):
self.first_spawn = False
current_room.doors_open()
def take_turn(self):
pass
def cue_hit(self, hit_vector):
# TODO use self.knock, and account for cue type
# self.velocity = hit_vector.copy()
hit_vector *= -1
if self.turn_phase != c.BEFORE_HIT or not self.turn_in_progress:
return
elif self.turn_in_progress:
self.turn_phase = c.AFTER_HIT
angle = math.atan2(-hit_vector.y, hit_vector.x) * 180/math.pi
power = hit_vector.magnitude()*0.55
if power > 110:
power = 110
self.velocity *= 0
self.knock(self.active_cue, angle, power)
def draw_prediction_line(self, screen, offset=(0, 0)):
if self.sunk:
return
if not self.turn_in_progress or not self.turn_phase == c.BEFORE_HIT:
return
self.game.in_simulation = True
player_copy = copy(self)
player_copy.is_simulating = True
player_copy.pose = self.pose.copy()
player_copy.velocity = self.velocity.copy()
player_copy.collide_with_other_ball_2 = player_copy.mock_collision
mouse_pose = Pose(pygame.mouse.get_pos(), 0) + self.game.current_scene.camera.pose
my_pose = self.pose.copy()
player_copy.cue_hit(mouse_pose - my_pose)
traveled = 0
positions = []
velocities = []
old = player_copy.pose.copy()
final_position = None
for i in range(c.SIM_ITERATIONS):
new = player_copy.pose.copy()
traveled += (new - old).magnitude()
old = new
if traveled > c.SIM_MAX_DIST:
break
if(c.VARIABLE_SIM_SPEED):
near_wall = False
if(c.SIM_NEAR_WALL_STEP_REDUCTION != 1):
mapTiles = self.game.current_scene.map.tiles_near(player_copy.pose, player_copy.radius + c.SIM_MOVEMENT);
for mapTile in mapTiles:
if(mapTile.collidable):
near_wall = True
break
if near_wall and player_copy.velocity.magnitude() >3:
sim_update = (c.SIM_MOVEMENT / player_copy.velocity.magnitude() / c.SIM_NEAR_WALL_STEP_REDUCTION)
elif player_copy.velocity.magnitude() > 1:
sim_update = (c.SIM_MOVEMENT/player_copy.velocity.magnitude())
else:
break
sim_update = 1 / c.SIM_MIN_FPS
if(sim_update> 1/1 / c.SIM_MIN_FPS):
sim_update = 1 / c.SIM_MIN_FPS
#mapTiles = self.game.current_scene.map.tiles_near(self.pose, self.radius + );
else:
sim_update = 1 / c.SIM_FPS
player_copy.update(sim_update, [])
positions.append(player_copy.pose.copy())
velocities.append(player_copy.velocity.magnitude())
if player_copy.has_collided:
final_position = player_copy.pose.copy()
break
if player_copy.velocity.magnitude() < 1:
final_position = player_copy.pose.copy()
break
if player_copy.sunk:
break
if len(positions) > 1:
final_direction = positions[-1] - positions[-2]
else:
final_direction = Pose((1, 0), 0)
extra = c.SIM_MAX_DIST - traveled
surf = pygame.Surface((3, 3))
surf.fill(c.BLACK)
pygame.draw.circle(surf, c.WHITE, (surf.get_width()//2, surf.get_width()//2), surf.get_width()//2)
alpha = 255
surf.set_colorkey(c.BLACK)
i = -1
for pose in positions[::1]:
i += 1
circle_diam = max(3, min(7, (velocities[i]/160)))
surf = pygame.Surface((circle_diam, circle_diam))
surf.fill(c.BLACK)
surf.set_colorkey(c.BLACK)
pygame.draw.circle(surf, c.WHITE, (surf.get_width() // 2, surf.get_width() // 2), surf.get_width() // 2)
surf.set_alpha(alpha)
screen.blit(surf, (pose.x + offset[0] - surf.get_width()//2, pose.y + offset[1] - surf.get_width()//2))
offset_pose = Pose(offset, 0)
if player_copy.collided_with:
other = player_copy.collided_with
to_other = other.pose - player_copy.pose
angle = math.degrees(-math.atan2(to_other.y, to_other.x))
pointer = pygame.transform.rotate(self.pointer, angle)
pointer_length = 100
start = other.pose - to_other*(1/to_other.magnitude())*other.radius + offset_pose
end = start + to_other*(1/to_other.magnitude())*pointer_length
pygame.draw.line(screen, c.WHITE, start.get_position(), end.get_position())
screen.blit(pointer, (end.x - pointer.get_width()//2, end.y - pointer.get_height()//2))
if final_position:
final_position += offset_pose
pygame.draw.circle(screen, c.WHITE, final_position.get_position(), player_copy.radius, 2)
elif len(positions) >= 1:
final = positions[-1] + offset_pose
angle = math.degrees(-math.atan2(final_direction.y, final_direction.x))
pointer = pygame.transform.rotate(self.pointer, angle)
end = final + final_direction*(1/(final_direction.magnitude()*extra + 1))
screen.blit(pointer, (end.x - pointer.get_width() // 2, end.y - pointer.get_height() // 2))
self.game.in_simulation = False
def draw(self, screen, offset=(0, 0)):
super().draw(screen, offset=offset)
if self.perfect_alpha > 0:
x = self.pose.x + offset[0] - self.perfect.get_width()//2
y = self.pose.y + offset[1] - self.perfect.get_height() - self.radius - 5
self.perfect.set_alpha(self.perfect_alpha)
self.perfect.set_colorkey(c.BLACK)
screen.blit(self.perfect, (x, y))
def sink_for_real(self):
super().sink_for_real()
self.game.current_scene.player_just_sunk()
def mock_collision(self, other): #ONLY FOR MOCK BALL COLLISIONS
if self.has_collided or other.is_player:
return
self.has_collided = True
self.collided_with = other
collision_normal = self.pose - other.pose
collision_normal_unscaled = collision_normal.copy()
#offset_required = (collision_normal.magnitude() - (self.radius + other.radius) ) / 1.95
#collision_normal.scale_to(1)
#self.pose -= collision_normal * offset_required
#other.pose += collision_normal * offset_required
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
collision_normal.scale_to(1)
velocity_vector = self.velocity.copy()
velocity_vector.scale_to(1)
# self.pose += velocity_vector * (offset_required * math.cos(math.atan2(velocity_vector.y-collision_normal.y, velocity_vector.x-collision_normal.x)))
dot_product_self_norm = collision_normal.x * velocity_vector.x + collision_normal.y * velocity_vector.y;
if((collision_normal.magnitude() * velocity_vector.magnitude()) != 0):
acos_input = dot_product_self_norm / (collision_normal.magnitude() * velocity_vector.magnitude())
if(acos_input>1):
acos_input = 1
if(acos_input<-1):
acos_input = -1
angle_vel = math.acos(acos_input)
else:
angle_vel = 1
angle_b = math.asin((math.sin(angle_vel) / (self.radius + other.radius)) * collision_normal_unscaled.magnitude())
angle_c = math.pi - (angle_b + angle_vel)
if(math.sin(angle_vel)== 0):
angle_vel = 1
interpolated_offset = ((self.radius + other.radius) / math.sin(angle_vel)) * math.sin(angle_c)
# print("OFFSET :" + str(interpolated_offset) + " angle C: " + str(math.degrees(angle_c)) + " angle vel: " + str(math.degrees(angle_vel)))
if(self.velocity.magnitude() + other.velocity.magnitude()) != 0:
self.pose -= velocity_vector * abs(interpolated_offset) * (self.velocity.magnitude()/(self.velocity.magnitude() + other.velocity.magnitude()))
#other.pose += velocity_vector * abs(interpolated_offset) * (other.velocity.magnitude()/(self.velocity.magnitude() + other.velocity.magnitude()))
|
<reponame>PKUfudawei/cmssw<filename>L1Trigger/L1TCalorimeter/python/caloParams_2021_v0_2_cfi.py
import FWCore.ParameterSet.Config as cms
from L1Trigger.L1TCalorimeter.caloParams_cfi import caloParamsSource
import L1Trigger.L1TCalorimeter.caloParams_cfi
caloStage2Params = L1Trigger.L1TCalorimeter.caloParams_cfi.caloParams.clone(
# towers
#towerLsbH = 0.5
#towerLsbE = 0.5
#towerLsbSum = 0.5
#towerNBitsH = 8
#towerNBitsE = 8
#towerNBitsSum = 9
#towerNBitsRatio = 3
#towerEncoding = True
# regions
#regionLsb = 0.5
#regionPUSType = "None"
#regionPUSParams = []
# EG
#egEtaCut = 28
#egLsb = 0.5
#egSeedThreshold = 2.
#egNeighbourThreshold = 1.
egHcalThreshold = 0.,
egTrimmingLUTFile = "L1Trigger/L1TCalorimeter/data/egTrimmingLUT_10_v16.01.19.txt",
#egMaxHcalEt = 0.
#egMaxPtHOverE = 128.
egHOverEcutBarrel = 3,
egHOverEcutEndcap = 4,
egBypassExtHOverE = 0,
egMaxHOverELUTFile = "L1Trigger/L1TCalorimeter/data/HoverEIdentification_0.995_v15.12.23.txt",
egCompressShapesLUTFile = "L1Trigger/L1TCalorimeter/data/egCompressLUT_v4.txt",
egShapeIdType = "compressed",
#egShapeIdVersion = 0
egShapeIdLUTFile = "L1Trigger/L1TCalorimeter/data/shapeIdentification_adapt0.99_compressedieta_compressedE_compressedshape_v15.12.08.txt", #Not used any more in the current emulator version, merged with calibration LUT
#egPUSType = "None"
egIsolationType = "compressed",
egIsoLUTFile = "L1Trigger/L1TCalorimeter/data/EG_Iso_LUT_04_04_2017.2.txt",
egIsoLUTFile2 = "L1Trigger/L1TCalorimeter/data/EG_LoosestIso_2018.2.txt",
#egIsoAreaNrTowersEta = 2
#egIsoAreaNrTowersPhi = 4
egIsoVetoNrTowersPhi = 2,
#egIsoPUEstTowerGranularity = cms.uint32(1)
#egIsoMaxEtaAbsForTowerSum = cms.uint32(4)
#egIsoMaxEtaAbsForIsoSum = cms.uint32(27)
egPUSParams = cms.vdouble(1,4,32), #Isolation window in firmware goes up to abs(ieta)=32 for now
egCalibrationType = "compressed",
egCalibrationVersion = 0,
egCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/corrections_Trimming10_compressedieta_compressedE_compressedshape_PANTELIS_v2_NEW_CALIBRATIONS_withShape_v17.04.04.txt",
# Tau
#tauLsb = 0.5
isoTauEtaMax = 25,
tauSeedThreshold = 0.,
#tauNeighbourThreshold = 0.
#tauIsoAreaNrTowersEta = 2
#tauIsoAreaNrTowersPhi = 4
#tauIsoVetoNrTowersPhi = 2
#tauPUSType = "None"
tauIsoLUTFile = "L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_Option_31_extrap_2018_FW_v10.0.0.txt",
tauIsoLUTFile2 = "L1Trigger/L1TCalorimeter/data/Tau_Iso_LUT_Option_31_extrap_2018_FW_v10.0.0.txt",
tauCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/Tau_Calibration_LUT_2018_Layer1CalibrationNewHCAL_FW_v13.0.0.txt",
tauCompressLUTFile = "L1Trigger/L1TCalorimeter/data/tauCompressAllLUT_12bit_v3.txt",
tauPUSParams = [1,4,32],
# jets
#jetLsb = 0.5
jetSeedThreshold = 4.0,
#jetNeighbourThreshold = 0.
jetPUSType = "ChunkyDonut",
#jetBypassPUS = 0
# Calibration options
jetCalibrationType = "LUT",
jetCompressPtLUTFile = "L1Trigger/L1TCalorimeter/data/lut_pt_compress_2017v1.txt",
jetCompressEtaLUTFile = "L1Trigger/L1TCalorimeter/data/lut_eta_compress_2017v1.txt",
jetCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/lut_calib_2018v1_ECALZS_noHFJEC.txt",
# sums: 0=ET, 1=HT, 2=MET, 3=MHT
#etSumLsb = 0.5
etSumEtaMin = [1, 1, 1, 1, 1],
etSumEtaMax = [28, 26, 28, 26, 28],
etSumEtThreshold = [0., 30., 0., 30., 0.], # only 2nd (HT) and 4th (MHT) values applied
etSumMetPUSType = "LUT", # et threshold from this LUT supercedes et threshold in line above
#etSumEttPUSType = "None"
#etSumEcalSumPUSType = "None"
#etSumBypassMetPUS = 0
etSumBypassEttPUS = 1,
etSumBypassEcalSumPUS = 1,
#etSumXCalibrationType = "None"
#etSumYCalibrationType = "None"
#etSumEttCalibrationType = "None"
#etSumEcalSumCalibrationType = "None"
etSumMetPUSLUTFile = "L1Trigger/L1TCalorimeter/data/newSFHBHEOnp5_METPUM_211124.txt",
#etSumEttPUSLUTFile = "L1Trigger/L1TCalorimeter/data/lut_towEtThresh_dummy.txt"
#etSumEcalSumPUSLUTFile = "L1Trigger/L1TCalorimeter/data/lut_towEtThresh_dummy.txt"
#etSumXCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt"
#etSumYCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt"
#etSumEttCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt"
#etSumEcalSumCalibrationLUTFile = "L1Trigger/L1TCalorimeter/data/lut_etSumPUS_dummy.txt"
# Layer 1 SF
layer1ECalScaleETBins = cms.vint32([3, 6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]),
layer1ECalScaleFactors = cms.vdouble([
1.13, 1.13, 1.13, 1.12, 1.12, 1.12, 1.12, 1.12, 1.13, 1.12, 1.13, 1.13, 1.13, 1.14, 1.14, 1.13, 1.13, 1.31, 1.15, 1.27, 1.28, 1.31, 1.31, 1.32, 1.35, 0.00, 0.00, 0.00,
1.13, 1.13, 1.13, 1.12, 1.12, 1.12, 1.12, 1.12, 1.13, 1.12, 1.13, 1.13, 1.13, 1.14, 1.14, 1.13, 1.13, 1.31, 1.15, 1.27, 1.28, 1.31, 1.31, 1.32, 1.35, 1.38, 0.00, 0.00,
1.08, 1.08, 1.09, 1.08, 1.09, 1.08, 1.08, 1.09, 1.10, 1.09, 1.09, 1.10, 1.09, 1.09, 1.09, 1.10, 1.10, 1.26, 1.11, 1.21, 1.20, 1.23, 1.25, 1.28, 1.31, 1.33, 1.21, 0.00,
1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.07, 1.07, 1.06, 1.07, 1.07, 1.07, 1.07, 1.08, 1.08, 1.07, 1.08, 1.19, 1.09, 1.16, 1.16, 1.20, 1.22, 1.23, 1.28, 1.29, 1.18, 1.09,
1.04, 1.04, 1.04, 1.05, 1.05, 1.04, 1.05, 1.05, 1.05, 1.06, 1.05, 1.06, 1.06, 1.06, 1.06, 1.06, 1.06, 1.16, 1.08, 1.15, 1.15, 1.19, 1.20, 1.22, 1.26, 1.31, 1.15, 1.08,
1.04, 1.03, 1.04, 1.04, 1.03, 1.03, 1.04, 1.04, 1.04, 1.04, 1.04, 1.05, 1.05, 1.06, 1.05, 1.05, 1.05, 1.14, 1.07, 1.12, 1.14, 1.17, 1.18, 1.21, 1.25, 1.27, 1.15, 1.07,
1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.03, 1.04, 1.03, 1.03, 1.03, 1.04, 1.04, 1.05, 1.05, 1.03, 1.13, 1.06, 1.11, 1.13, 1.15, 1.17, 1.20, 1.23, 1.25, 1.12, 1.08,
1.03, 1.02, 1.03, 1.02, 1.03, 1.00, 1.03, 1.03, 1.03, 1.03, 1.02, 1.03, 1.04, 1.04, 1.04, 1.04, 1.02, 1.11, 1.05, 1.11, 1.12, 1.14, 1.16, 1.18, 1.22, 1.26, 1.08, 1.05,
1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.04, 1.03, 1.03, 1.04, 1.11, 1.05, 1.11, 1.11, 1.14, 1.17, 1.17, 1.21, 1.22, 1.07, 1.05,
1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.03, 1.09, 1.05, 1.10, 1.11, 1.13, 1.15, 1.17, 1.20, 1.21, 1.06, 1.05,
1.01, 1.02, 1.01, 1.01, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.03, 1.03, 1.03, 1.03, 1.09, 1.05, 1.09, 1.10, 1.12, 1.14, 1.16, 1.20, 1.23, 1.07, 1.05,
1.00, 1.01, 1.01, 1.01, 1.01, 1.01, 1.01, 1.01, 1.02, 1.02, 1.02, 1.02, 1.02, 1.03, 1.02, 1.03, 1.03, 1.08, 1.05, 1.10, 1.09, 1.12, 1.13, 1.17, 1.19, 1.23, 1.04, 1.03,
1.00, 1.01, 1.01, 1.01, 1.01, 1.01, 1.00, 1.01, 1.01, 1.02, 1.01, 1.01, 1.02, 1.02, 1.02, 1.02, 1.02, 1.06, 1.05, 1.09, 1.09, 1.11, 1.12, 1.16, 1.18, 1.22, 1.00, 1.03,
1.00, 1.00, 1.00, 1.01, 1.01, 1.01, 1.00, 1.01, 1.01, 1.02, 1.00, 1.01, 1.02, 1.02, 1.02, 1.02, 1.02, 1.06, 1.04, 1.08, 1.09, 1.10, 1.13, 1.15, 1.18, 1.21, 1.00, 1.03
]),
layer1HCalScaleETBins = cms.vint32([6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]),
layer1HCalScaleFactors = cms.vdouble([
1.55, 1.59, 1.60, 1.60, 1.58, 1.62, 1.63, 1.63, 1.63, 1.65, 1.65, 1.71, 1.69, 1.72, 1.84, 1.98, 1.98, 1.51, 1.55, 1.56, 1.42, 1.44, 1.46, 1.46, 1.51, 1.44, 1.29, 1.23,
1.39, 1.39, 1.40, 1.42, 1.40, 1.42, 1.45, 1.43, 1.43, 1.45, 1.47, 1.49, 1.47, 1.51, 1.57, 1.67, 1.70, 1.32, 1.35, 1.36, 1.24, 1.26, 1.27, 1.30, 1.32, 1.31, 1.16, 1.10,
1.31, 1.33, 1.33, 1.34, 1.33, 1.34, 1.35, 1.37, 1.36, 1.37, 1.39, 1.39, 1.39, 1.39, 1.45, 1.54, 1.57, 1.22, 1.25, 1.27, 1.16, 1.19, 1.20, 1.22, 1.25, 1.24, 1.10, 1.05,
1.27, 1.28, 1.29, 1.29, 1.29, 1.28, 1.31, 1.31, 1.30, 1.31, 1.33, 1.34, 1.33, 1.34, 1.41, 1.46, 1.48, 1.19, 1.20, 1.20, 1.12, 1.13, 1.15, 1.17, 1.20, 1.20, 1.06, 1.01,
1.22, 1.22, 1.23, 1.23, 1.23, 1.24, 1.24, 1.26, 1.25, 1.27, 1.27, 1.28, 1.28, 1.27, 1.32, 1.38, 1.41, 1.12, 1.15, 1.16, 1.08, 1.10, 1.11, 1.13, 1.15, 1.15, 1.03, 0.98,
1.17, 1.19, 1.17, 1.19, 1.19, 1.19, 1.20, 1.22, 1.20, 1.21, 1.21, 1.22, 1.22, 1.23, 1.26, 1.31, 1.33, 1.10, 1.10, 1.10, 1.04, 1.06, 1.07, 1.09, 1.11, 1.10, 0.99, 0.95,
1.14, 1.15, 1.14, 1.15, 1.16, 1.15, 1.16, 1.17, 1.16, 1.17, 1.19, 1.18, 1.18, 1.19, 1.22, 1.26, 1.26, 1.06, 1.07, 1.08, 1.02, 1.03, 1.04, 1.06, 1.07, 1.07, 0.96, 0.92,
1.11, 1.11, 1.13, 1.12, 1.11, 1.13, 1.13, 1.13, 1.12, 1.14, 1.15, 1.15, 1.14, 1.15, 1.17, 1.20, 1.23, 1.03, 1.05, 1.05, 1.00, 1.01, 1.02, 1.03, 1.05, 1.03, 0.95, 0.91,
1.08, 1.09, 1.09, 1.08, 1.09, 1.10, 1.10, 1.11, 1.11, 1.11, 1.12, 1.11, 1.11, 1.12, 1.13, 1.17, 1.16, 1.01, 1.02, 1.03, 0.98, 0.99, 0.99, 1.01, 1.02, 1.01, 0.94, 0.89,
1.06, 1.07, 1.06, 1.07, 1.07, 1.07, 1.08, 1.08, 1.07, 1.07, 1.08, 1.08, 1.08, 1.09, 1.10, 1.14, 1.13, 1.00, 1.02, 1.02, 0.97, 0.98, 0.98, 0.99, 1.00, 1.00, 0.92, 0.87,
1.03, 1.04, 1.04, 1.04, 1.04, 1.05, 1.05, 1.05, 1.05, 1.05, 1.05, 1.05, 1.05, 1.06, 1.06, 1.09, 1.09, 0.97, 0.99, 1.00, 0.95, 0.96, 0.96, 0.97, 0.99, 0.98, 0.90, 0.85,
1.00, 1.00, 1.00, 1.01, 1.01, 1.01, 1.02, 1.02, 1.01, 1.01, 1.02, 1.02, 1.01, 0.98, 1.01, 1.02, 1.02, 0.96, 0.97, 1.00, 0.93, 0.94, 0.94, 0.95, 0.96, 0.96, 0.89, 0.82,
0.96, 0.96, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.97, 0.95, 0.95, 0.95, 0.96, 0.95, 0.93, 0.95, 0.95, 0.93, 0.93, 0.94, 0.94, 0.95, 0.95, 0.88, 0.82
]),
layer1HFScaleETBins = cms.vint32([6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 55, 70, 256]),
layer1HFScaleFactors = cms.vdouble([
1.35, 1.09, 1.12, 1.10, 1.17, 1.18, 1.19, 1.23, 1.25, 1.32, 1.61, 1.79,
1.27, 1.01, 1.09, 1.03, 1.04, 1.05, 1.09, 1.11, 1.18, 1.19, 1.48, 1.67,
1.15, 0.98, 1.05, 1.02, 1.00, 0.99, 1.03, 1.04, 1.10, 1.12, 1.39, 1.66,
1.14, 0.96, 1.03, 0.97, 0.96, 0.96, 0.98, 1.00, 1.04, 1.07, 1.35, 1.59,
1.07, 0.97, 1.00, 0.96, 0.91, 0.92, 0.95, 0.96, 1.01, 1.03, 1.28, 1.56,
1.03, 0.94, 0.97, 0.94, 0.88, 0.90, 0.92, 0.94, 0.98, 1.01, 1.27, 1.53,
1.01, 0.92, 0.96, 0.90, 0.87, 0.89, 0.91, 0.93, 0.96, 0.99, 1.23, 1.48,
0.98, 0.89, 0.96, 0.87, 0.86, 0.87, 0.89, 0.91, 0.94, 0.97, 1.19, 1.47,
0.95, 0.88, 0.94, 0.87, 0.86, 0.86, 0.88, 0.90, 0.94, 0.96, 1.16, 1.43,
0.93, 0.88, 0.93, 0.87, 0.86, 0.87, 0.88, 0.90, 0.93, 0.95, 1.14, 1.42,
0.92, 0.86, 0.90, 0.86, 0.85, 0.86, 0.88, 0.89, 0.92, 0.95, 1.12, 1.41,
0.90, 0.85, 0.90, 0.85, 0.84, 0.86, 0.88, 0.90, 0.93, 0.95, 1.09, 1.35,
0.86, 0.85, 0.89, 0.85, 0.85, 0.86, 0.88, 0.90, 0.93, 0.95, 1.10, 1.27
])
)
|
<reponame>TUW-GEO/qa4sm-reader<gh_stars>0
# -*- coding: utf-8 -*-
"""
Contains helper functions for plotting qa4sm results.
"""
from qa4sm_reader import globals
import numpy as np
import pandas as pd
import os.path
from typing import Union
import copy
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as mcol
import matplotlib.ticker as mticker
import matplotlib.gridspec as gridspec
from matplotlib.patches import Patch, PathPatch
from matplotlib.lines import Line2D
from cartopy import config as cconfig
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from pygeogrids.grids import BasicGrid, genreg_grid
from shapely.geometry import Polygon, Point
import warnings
cconfig['data_dir'] = os.path.join(os.path.dirname(__file__), 'cartopy')
def _float_gcd(a, b, atol=1e-08):
"Greatest common divisor (=groesster gemeinsamer teiler)"
while abs(b) > atol:
a, b = b, a % b
return a
def _get_grid(a):
"Find the stepsize of the grid behind a and return the parameters for that grid axis."
a = np.unique(a) # get unique values and sort
das = np.unique(np.diff(a)) # get unique stepsizes and sort
da = das[0] # get smallest stepsize
for d in das[1:]: # make sure, all stepsizes are multiple of da
da = _float_gcd(d, da)
a_min = a[0]
a_max = a[-1]
len_a = int((a_max - a_min) / da + 1)
return a_min, a_max, da, len_a
def _get_grid_for_irregulars(a, grid_stepsize):
"Find the stepsize of the grid behind a for datasets with predeifned grid stepsize, and return the parameters for that grid axis."
a = np.unique(a)
a_min = a[0]
a_max = a[-1]
da = grid_stepsize
len_a = int((a_max - a_min) / da + 1)
return a_min, a_max, da, len_a
def _value2index(a, a_min, da):
"Return the indexes corresponding to a. a and the returned index is a numpy array."
return ((a - a_min) / da).astype('int')
def _format_floats(x):
"""Format floats in the statistsics table"""
if isinstance(x, float):
if abs(x) < 0.000001:
return "~ 0"
elif 0.1 < abs(x) < 1e3:
return np.format_float_positional(x, precision=2)
else:
return np.format_float_scientific(x, precision=2)
else:
return x
def oversample(lon, lat, data, extent, dx, dy):
"""Sample to regular grid"""
other = BasicGrid(lon, lat)
reg_grid = genreg_grid(dx, dy, minlat=extent[2], maxlat=extent[3],
minlon=extent[0], maxlon=extent[1])
max_dist = dx * 111 * 1000 # a mean distance for one degree it's around 111 km
lut = reg_grid.calc_lut(other, max_dist=max_dist)
img = np.ma.masked_where(lut == -1, data[lut])
img[np.isnan(img)] = np.ma.masked
return img.reshape(-1, reg_grid.shape[1]), reg_grid
def geotraj_to_geo2d(df, index=globals.index_names, grid_stepsize=None):
"""
Converts geotraj (list of lat, lon, value) to a regular grid over lon, lat.
The values in df needs to be sampled from a regular grid, the order does not matter.
When used with plt.imshow(), specify data_extent to make sure,
the pixels are exactly where they are expected.
Parameters
----------
df : pandas.DataFrame
DataFrame containing 'lat', 'lon' and 'var' Series.
index : tuple, optional
Tuple containing the names of lattitude and longitude index. Usually ('lat','lon')
The default is globals.index_names
grid_stepsize : None or float, optional
angular grid stepsize to prepare a regular grid for plotting
Returns
-------
zz : numpy.ndarray
array holding the gridded values. When using plt.imshow, specify origin='lower'.
[0,0] : llc (lower left corner)
first coordinate is longitude.
data_extent : tuple
(x_min, x_max, y_min, y_max) in Data coordinates.
origin : string
'upper' or 'lower' - define how the plot should be oriented, for irregular grids it should return 'upper'
"""
xx = df.index.get_level_values(index[1]) # lon
yy = df.index.get_level_values(index[0]) # lat
if grid_stepsize not in ['nan', None]:
x_min, x_max, dx, len_x = _get_grid_for_irregulars(xx, grid_stepsize)
y_min, y_max, dy, len_y = _get_grid_for_irregulars(yy, grid_stepsize)
data_extent = (x_min - dx/2, x_max + dx/2, y_min - dy/2, y_max + dy/2)
zz, grid = oversample(xx, yy, df.values, data_extent, dx, dy)
origin = 'upper'
else:
x_min, x_max, dx, len_x = _get_grid(xx)
y_min, y_max, dy, len_y = _get_grid(yy)
ii = _value2index(yy, y_min, dy)
jj = _value2index(xx, x_min, dx)
zz = np.full((len_y, len_x), np.nan, dtype=np.float64)
zz[ii, jj] = df
data_extent = (x_min - dx / 2, x_max + dx / 2, y_min - dy / 2, y_max + dy / 2)
origin = 'lower'
return zz, data_extent, origin
def get_value_range(ds, metric=None, force_quantile=False, quantiles=[0.025, 0.975], diff_map=False):
"""
Get the value range (v_min, v_max) from globals._metric_value_ranges
If the range is (None, None), a symmetric range around 0 is created,
showing at least the symmetric <quantile> quantile of the values.
if force_quantile is True, the quantile range is used.
Parameters
----------
ds : pd.DataFrame or pd.Series
Series holding the values
metric : str , optional (default: None)
name of the metric (e.g. 'R'). None equals to force_quantile=True.
force_quantile : bool, optional
always use quantile, regardless of globals.
The default is False.
quantiles : list, optional
quantile of data to include in the range.
The default is [0.025,0.975]
diff_map : bool, default is False
Whether the colorbar is for a difference plot
Returns
-------
v_min : float
lower value range of plot.
v_max : float
upper value range of plot.
"""
if metric == None:
force_quantile = True
ranges = globals._metric_value_ranges
if not force_quantile: # try to get range from globals
try:
v_min = ranges[metric][0]
v_max = ranges[metric][1]
if (v_min is None and v_max is None): # get quantile range and make symmetric around 0.
v_min, v_max = get_quantiles(ds, quantiles)
v_max = max(abs(v_min), abs(v_max)) # make sure the range is symmetric around 0
v_min = -v_max
elif v_min is None:
v_min = get_quantiles(ds, quantiles)[0]
elif v_max is None:
v_max = get_quantiles(ds, quantiles)[1]
else: # v_min and v_max are both determinded in globals
pass
except KeyError: # metric not known, fall back to quantile
force_quantile = True
warnings.warn('The metric \'{}\' is not known. \n'.format(metric) + \
'Could not get value range from globals._metric_value_ranges\n' + \
'Computing quantile range \'{}\' instead.\n'.format(str(quantiles)) +
'Known metrics are: \'' + \
'\', \''.join([metric for metric in ranges]) + '\'')
if force_quantile: # get quantile range
v_min, v_max = get_quantiles(ds, quantiles)
# adjust range based on the difference values in the map
if diff_map:
extreme = max([abs(v) for v in get_quantiles(ds, quantiles)])
v_min, v_max = -extreme, extreme
return v_min, v_max
def get_quantiles(ds, quantiles) -> tuple:
"""
Gets lower and upper quantiles from pandas.Series or pandas.DataFrame
Parameters
----------
ds : (pandas.Series | pandas.DataFrame)
Input values.
quantiles : list
quantile of values to include in the range
Returns
-------
v_min : float
lower quantile.
v_max : float
upper quantile.
"""
q = ds.quantile(quantiles)
if isinstance(ds, pd.Series):
return q.iloc[0], q.iloc[1]
elif isinstance(ds, pd.DataFrame):
return min(q.iloc[0]), max(q.iloc[1])
else:
raise TypeError("Inappropriate argument type. 'ds' must be pandas.Series or pandas.DataFrame.")
def get_plot_extent(df, grid_stepsize=None, grid=False) -> tuple:
"""
Gets the plot_extent from the values. Uses range of values and
adds a padding fraction as specified in globals.map_pad
Parameters
----------
grid : bool
whether the values in df is on a equally spaced grid (for use in mapplot)
df : pandas.DataFrame
Plot values.
Returns
-------
extent : tuple | list
(x_min, x_max, y_min, y_max) in Data coordinates.
"""
lat, lon, gpi = globals.index_names
if grid and grid_stepsize in ['nan', None]:
# todo: problem if only single lon/lat point is present?
x_min, x_max, dx, len_x = _get_grid(df.index.get_level_values(lon))
y_min, y_max, dy, len_y = _get_grid(df.index.get_level_values(lat))
extent = [x_min-dx/2., x_max+dx/2., y_min-dx/2., y_max+dx/2.]
elif grid and grid_stepsize:
x_min, x_max, dx, len_x = _get_grid_for_irregulars(df.index.get_level_values(lon), grid_stepsize)
y_min, y_max, dy, len_y = _get_grid_for_irregulars(df.index.get_level_values(lat), grid_stepsize)
extent = [x_min - dx / 2., x_max + dx / 2., y_min - dx / 2., y_max + dx / 2.]
else:
extent = [df.index.get_level_values(lon).min(), df.index.get_level_values(lon).max(),
df.index.get_level_values(lat).min(), df.index.get_level_values(lat).max()]
dx = extent[1] - extent[0]
dy = extent[3] - extent[2]
# set map-padding around values to be globals.map_pad percent of the smaller dimension
padding = min(dx, dy) * globals.map_pad / (1 + globals.map_pad)
extent[0] -= padding
extent[1] += padding
extent[2] -= padding
extent[3] += padding
if extent[0] < -180:
extent[0] = -180
if extent[1] > 180:
extent[1] = 180
if extent[2] < -90:
extent[2] = -90
if extent[3] > 90:
extent[3] = 90
return extent
def init_plot(figsize, dpi, add_cbar=None, projection=None) -> tuple:
"""Initialize mapplot"""
if not projection:
projection=globals.crs
fig = plt.figure(figsize=figsize, dpi=dpi)
if add_cbar:
gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=[19, 1])
ax = fig.add_subplot(gs[0], projection=projection)
cax = fig.add_subplot(gs[1])
else:
gs = gridspec.GridSpec(nrows=1, ncols=1)
ax = fig.add_subplot(gs[0], projection=projection)
cax = None
return fig, ax, cax
def get_extend_cbar(metric):
"""
Find out whether the colorbar should extend, based on globals._metric_value_ranges[metric]
Parameters
----------
metric : str
metric used in plot
Returns
-------
str
one of ['neither', 'min', 'max', 'both'].
"""
vrange = globals._metric_value_ranges[metric]
if vrange[0] is None:
if vrange[1] is None:
return 'both'
else:
return 'min'
else:
if vrange[1] is None:
return 'max'
else:
return 'neither'
def style_map(
ax, plot_extent, add_grid=True,
map_resolution=globals.naturalearth_resolution,
add_topo=False, add_coastline=True,
add_land=True, add_borders=True, add_us_states=False,
grid_intervals=globals.grid_intervals,
):
"""Parameters to style the mapplot"""
ax.set_extent(plot_extent, crs=globals.data_crs)
ax.spines["geo"].set_linewidth(0.4)
if add_grid:
# add gridlines. Bcs a bug in cartopy, draw girdlines first and then grid labels.
# https://github.com/SciTools/cartopy/issues/1342
try:
grid_interval = max((plot_extent[1] - plot_extent[0]),
(plot_extent[3] - plot_extent[2])) / 5 # create apprx. 5 gridlines in the bigger dimension
if grid_interval <= min(grid_intervals):
raise RuntimeError
grid_interval = min(grid_intervals, key=lambda x: abs(
x - grid_interval)) # select the grid spacing from the list which fits best
gl = ax.gridlines(crs=globals.data_crs, draw_labels=False,
linewidth=0.5, color='grey', linestyle='--',
zorder=3) # draw only gridlines.
# todo: this can slow the plotting down!!
xticks = np.arange(-180, 180.001, grid_interval)
yticks = np.arange(-90, 90.001, grid_interval)
gl.xlocator = mticker.FixedLocator(xticks)
gl.ylocator = mticker.FixedLocator(yticks)
except RuntimeError:
pass
else:
try: # drawing labels fails for most projections
gltext = ax.gridlines(crs=globals.data_crs, draw_labels=True,
linewidth=0.5, color='grey', alpha=0., linestyle='-',
zorder=4) # draw only grid labels.
xticks = xticks[(xticks >= plot_extent[0]) & (xticks <= plot_extent[1])]
yticks = yticks[(yticks >= plot_extent[2]) & (yticks <= plot_extent[3])]
gltext.xformatter = LONGITUDE_FORMATTER
gltext.yformatter = LATITUDE_FORMATTER
gltext.top_labels = False
gltext.right_labels = False
gltext.xlocator = mticker.FixedLocator(xticks)
gltext.ylocator = mticker.FixedLocator(yticks)
except RuntimeError as e:
print("No tick labels plotted.\n" + str(e))
if add_topo:
ax.stock_img()
if add_coastline:
coastline = cfeature.NaturalEarthFeature('physical', 'coastline',
map_resolution,
edgecolor='black', facecolor='none')
ax.add_feature(coastline, linewidth=0.4, zorder=3)
if add_land:
land = cfeature.NaturalEarthFeature('physical', 'land',
map_resolution,
edgecolor='none', facecolor='white')
ax.add_feature(land, zorder=1)
if add_borders:
borders = cfeature.NaturalEarthFeature('cultural', 'admin_0_countries',
map_resolution,
edgecolor='black', facecolor='none')
ax.add_feature(borders, linewidth=0.2, zorder=3)
if add_us_states:
ax.add_feature(cfeature.STATES, linewidth=0.1, zorder=3)
return ax
def make_watermark(
fig,
placement=globals.watermark_pos,
for_map=False,
offset=0.03
):
"""
Adds a watermark to fig and adjusts the current axis to make sure there
is enough padding around the watermarks.
Padding can be adjusted in globals.watermark_pad.
Fontsize can be adjusted in globals.watermark_fontsize.
plt.tight_layout needs to be called prior to make_watermark,
because tight_layout does not take into account annotations.
Parameters
----------
fig : matplotlib.figure.Figure
placement : str
'top' : places watermark in top right corner
'bottom' : places watermark in bottom left corner
"""
# ax = fig.gca()
# pos1 = ax.get_position() #fraction of figure
fontsize = globals.watermark_fontsize
pad = globals.watermark_pad
height = fig.get_size_inches()[1]
offset = offset + (((fontsize + pad) / globals.matplotlib_ppi) / height) * 2.2
if placement == 'top':
plt.annotate(globals.watermark, xy=[0.5, 1], xytext=[-pad, -pad],
fontsize=fontsize, color='grey',
horizontalalignment='center', verticalalignment='top',
xycoords='figure fraction', textcoords='offset points')
top = fig.subplotpars.top
fig.subplots_adjust(top=top - offset)
elif placement == 'bottom':
plt.annotate(globals.watermark, xy=[0.5, 0], xytext=[pad, pad],
fontsize=fontsize, color='grey',
horizontalalignment='center', verticalalignment='bottom',
xycoords='figure fraction', textcoords='offset points')
bottom = fig.subplotpars.bottom
if not for_map:
fig.subplots_adjust(bottom=bottom + offset)
else:
raise NotImplementedError
def _make_cbar(fig, im, cax, ref_short:str, metric:str, label=None, diff_map=False):
"""
Make colorbar to use in plots
Parameters
----------
fig: matplotlib.figure.Figure
figure of plot
im: AxesImage
from method Axes.imshow()
cax: axes.SubplotBase
from fig.add_subplot
ref_short: str
name of ref dataset
metric: str
name of metric
label: str
label to describe the colorbar
diff_map : bool, default is False
Whether the colorbar is for a difference plot
"""
if label is None:
try:
label = globals._metric_name[metric] + \
globals._metric_description[metric].format(
globals._metric_units[ref_short])
except KeyError as e:
raise Exception('The metric \'{}\' or reference \'{}\' is not known.\n'.format(metric, ref_short) + str(e))
extend = get_extend_cbar(metric)
if diff_map:
extend = "both"
cbar = fig.colorbar(im, cax=cax, orientation='horizontal', extend=extend)
cbar.set_label(label, weight='normal')
cbar.outline.set_linewidth(0.4)
cbar.outline.set_edgecolor('black')
cbar.ax.tick_params(width=0.4)
return fig, im, cax
def _CI_difference(fig, ax, ci):
"""
Insert the median value of the upper and lower CI difference
Parameters
----------
fig: matplotlib.figure.Figure
figure with CIs
ci: list
list of upper and lower ci dataframes
"""
lower_pos = []
for ax in fig.axes:
n = 0
# iterating through axes artists:
for c in ax.get_children():
# searching for PathPatches
if isinstance(c, PathPatch):
# different width whether it's the metric or the CIs
if n in np.arange(0, 100, 3):
# getting current width of box:
p = c.get_path()
verts = p.vertices
verts_sub = verts[:-1]
xmin = np.min(verts_sub[:, 0])
lower_pos.append(xmin)
n += 1
for ci_df, xmin in zip(ci, lower_pos):
diff = ci_df["upper"] - ci_df["lower"]
ci_range = float(diff.mean())
ypos = float(ci_df["lower"].min())
ax.annotate(
"Mean CI\nRange:\n {:.2g}".format(ci_range),
xy = (xmin - 0.2, ypos),
horizontalalignment="center"
)
def _add_dummies(df:pd.DataFrame, to_add:int) -> list:
"""
Add empty columns in dataframe to avoid error in matplotlib when not all boxplot groups have the same
number of values
"""
for n, col in enumerate(np.arange(to_add)):
# add columns while avoiding name clashes
df[str(n)] = np.nan
return df
def patch_styling(
box_dict,
facecolor
) -> None:
"""Define style of the boxplots"""
for n, (patch, median) in enumerate(zip(box_dict["boxes"], box_dict["medians"])):
patch.set(color="grey", facecolor=facecolor, linewidth=1.6, alpha=0.7)
median.set(color="grey", linewidth=1.6)
for (whis, caps) in zip(box_dict["whiskers"], box_dict["caps"]):
whis.set(color="grey", linewidth=1.6)
caps.set(color="grey", linewidth=1.6)
def _box_stats(ds:pd.Series, med:bool=True, iqrange:bool=True, count:bool=True) -> str:
"""
Create the metric part with stats of the box (axis) caption
Parameters
----------
ds: pd.Series
data on which stats are found
med: bool
iqrange: bool
count: bool
statistics
Returns
-------
stats: str
caption with summary stats
"""
# interquartile range
iqr = ds.quantile(q=[0.75,0.25]).diff()
iqr = abs(float(iqr.loc[0.25]))
met_str = []
if med:
met_str.append('Median: {:.3g}'.format(ds.median()))
if iqrange:
met_str.append('IQR: {:.3g}'.format(iqr))
if count:
met_str.append('N: {:d}'.format(ds.count()))
stats = '\n'.join(met_str)
return stats
def boxplot(
df,
ci=None,
label=None,
figsize=None,
dpi=100,
spacing=0.35,
axis=None,
**plotting_kwargs,
) -> tuple:
"""
Create a boxplot_basic from the variables in df.
The box shows the quartiles of the dataset while the whiskers extend
to show the rest of the distribution, except for points that are
determined to be “outliers” using a method that is a function of
the inter-quartile range.
Parameters
----------
df : pandas.DataFrame
DataFrame containing 'lat', 'lon' and (multiple) 'var' Series.
ci : list
list of Dataframes containing "upper" and "lower" CIs
label : str, optional
Label of the y axis, describing the metric. The default is None.
figsize : tuple, optional
Figure size in inches. The default is globals.map_figsize.
dpi : int, optional
Resolution for raster graphic output. The default is globals.dpi.
spacing : float, optional.
Space between the central boxplot and the CIs. Default is 0.3
Returns
-------
fig : matplotlib.figure.Figure
the boxplot
ax : matplotlib.axes.Axes
"""
values = df.copy()
center_pos = np.arange(len(values.columns))*2
# make plot
ax = axis
if axis is None:
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
ticklabels = values.columns
# styling of the boxes
kwargs = {"patch_artist": True, "return_type": "dict"}
# changes necessary to have confidence intervals in the plot
# could be an empty list or could be 'None', if de-selected from the kwargs
if ci:
upper, lower = [], []
for n, intervals in enumerate(ci):
lower.append(intervals["lower"])
upper.append(intervals["upper"])
lower = _add_dummies(
pd.concat(lower, ignore_index=True, axis=1),
len(center_pos)-len(ci),
)
upper = _add_dummies(
pd.concat(upper, ignore_index=True, axis=1),
len(center_pos)-len(ci),
)
low = lower.boxplot(
positions=center_pos - spacing,
showfliers=False,
widths=0.15,
ax=ax,
**kwargs
)
up = upper.boxplot(
positions=center_pos + spacing,
showfliers=False,
widths=0.15,
ax=ax,
**kwargs
)
patch_styling(low, 'skyblue')
patch_styling(up, 'tomato')
cen = values.boxplot(
positions=center_pos,
showfliers=False,
widths=0.3,
ax=ax,
**kwargs
)
patch_styling(cen, 'white')
if ci:
low_ci = Patch(color='skyblue', alpha=0.7, label='Lower CI')
up_ci = Patch(color='tomato', alpha=0.7, label='Upper CI')
#_CI_difference(fig, ax, ci)
plt.legend(
handles=[low_ci, up_ci],
fontsize=8,
loc="best"
)
# provide y label
if label is not None:
plt.ylabel(label, weight='normal')
ax.set_xticks(center_pos)
ax.set_xticklabels(ticklabels)
ax.tick_params(labelsize=globals.tick_size)
ax.grid(axis='x')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if axis is None:
return fig, ax
# TODO: test?
def resize_bins(sorted, nbins):
"""Resize the bins for "continuous" metadata types"""
bin_edges = np.linspace(0, 100, nbins + 1)
p_rank = 100.0 * (np.arange(sorted.size) + 0.5) / sorted.size
# use +- 1 to make sure nothing falls outside bins
bin_edges = np.interp(bin_edges, p_rank, sorted, left=sorted[0]-1, right=sorted[-1]+1)
bin_values = np.digitize(sorted, bin_edges)
unique_values, counts = np.unique(bin_values, return_counts=True)
bin_size = max(counts)
return bin_values, unique_values, bin_size
def bin_continuous(
df:pd.DataFrame,
metadata_values:pd.DataFrame,
meta_key:str,
nbins=4,
min_size=5,
**kwargs,
) -> dict:
"""
Subset the continuous metadata types
Parameters
----------
df : pd.DataFrame
Dataframe of the values to plot
metadata_values : pd.DataFrame
metadata values
meta_key : str
name of the metadata
nbins : int. Default is 4.
Bins to divide the metadata range into
min_size : int. Default is 5
Minimum number of values to have in a bin
kwargs: dict
Keyword arguments for specific metadata types
Returns
-------
binned: dict
dictionary with metadata subsets as keys
"""
meta_units = globals.metadata[meta_key][3]
meta_range = metadata_values[meta_key].to_numpy()
sorted = np.sort(meta_range)
if len(meta_range) < min_size:
raise ValueError(
"There are too few points per metadata to generate the boxplots. You can set 'min_size'"
"to a lower value to allow for smaller samples."
)
bin_values, unique_values, bin_size = resize_bins(sorted, nbins)
# adjust bins to have the specified number of bins if possible, otherwise enough valoues per bin
while bin_size < min_size:
nbins -= 1
bin_values, unique_values, bin_size = resize_bins(sorted, nbins)
# use metadata to sort dataframe
df = pd.concat([df, metadata_values], axis=1).sort_values(meta_key)
df.drop(columns=meta_key, inplace=True)
# put binned data in dataframe
binned = {}
for bin in unique_values:
bin_index = np.where(bin_values==bin)
bin_sorted = sorted[bin_index]
bin_df = df.iloc[bin_index]
bin_label = "{:.2f}-{:.2f} {}".format(min(bin_sorted), max(bin_sorted), meta_units)
if not all(col >= min_size for col in bin_df.count()):
continue
binned[bin_label] = bin_df
# If too few points are available to make the plots
if not binned:
return None
return binned
def bin_classes(
df:pd.DataFrame,
metadata_values:pd.DataFrame,
meta_key:str,
min_size=5,
**kwargs,
):
"""
Subset the continuous metadata types
Parameters
----------
df : pd.DataFrame
Dataframe of the values to plot
metadata_values : pd.DataFrame
metadata values
meta_key : str
name of the metadata
min_size : int. Default is 5
Minimum number of values to have in a bin
kwargs: dict
Keyword arguments for specific metadata types
Returns
-------
binned: dict
dictionary with metadata subsets as keys
"""
classes_lut = globals.metadata[meta_key][1]
grouped = metadata_values.applymap(
lambda x : classes_lut[x]
)
binned = {}
for meta_class, meta_df in grouped.groupby(meta_key).__iter__():
bin_df = df.loc[meta_df.index]
if not all(col >= min_size for col in bin_df.count()):
continue
binned[meta_class] = bin_df
# If too few points are available to make the plots
if not binned:
return None
return binned
def bin_discrete(
df:pd.DataFrame,
metadata_values:pd.DataFrame,
meta_key:str,
min_size=5,
**kwargs,
) -> pd.DataFrame:
"""
Provide a formatted dataframe for discrete type metadata (e.g. station or network)
Parameters
----------
df : pd.DataFrame
Dataframe of the values to plot
metadata_values : pd.DataFrame
metadata values
meta_key : str
name of the metadata
min_size : int. Default is 5
Minimum number of values to have in a bin
kwargs: dict
Keyword arguments for specific metadata types
Returns
-------
formatted: pd.DataFrame
Dataframe formatted for seaborn plotting
"""
groups = []
for col in df.columns:
group = pd.concat(
[df[col], metadata_values],
axis=1
)
group.columns = ["values", meta_key]
group["Dataset"] = col
groups.append(group)
grouped = pd.concat(groups)
formatted = []
for meta, meta_df in grouped.groupby(meta_key).__iter__():
if meta_df["values"].count() < min_size:
continue
formatted.append(meta_df)
# If too few points are available to make the plots
if not formatted:
return None, None
else:
formatted = pd.concat(formatted)
# return None as no CI data is needed for this plot
return formatted
def bin_function_lut(type):
"""Lookup table between the metadata type and the binning function"""
lut = {
"continuous": bin_continuous,
"discrete": bin_discrete,
"classes": bin_classes,
}
if type not in lut.keys():
raise KeyError(
"The type '{}' does not correspond to any binning function".format(type)
)
return lut[type]
def _stats_discrete(df:pd.DataFrame, meta_key:str, stats_key:str) -> list:
"""Return list of stats by group, where groups are created with a specific key"""
stats_list = []
for _key, group in df.groupby(meta_key).__iter__():
stats = _box_stats(group[stats_key])
median = group[stats_key].median()
stats_list.append((stats, median))
return stats_list
def combine_soils(
soil_fractions:dict,
clay_fine:int=35,
clay_coarse:int=20,
sand_coarse:int=65,
) -> pd.DataFrame:
"""
Create a metadata granulometry classification based on 'coarse', 'medium' or 'fine' soil types. Uses
the soil texture triangle diagram to transform the values.
Parameters
----------
soil_fractions: dict
Dictionary with {'soil type (clay, sand or silt)': qa4sm_handlers.Metadata}
clay_fine: int
clay threshold above which the soil is fine
clay_coarse: int
clay threshold below which the soil can be coarse
sand_coarse: int
sand threshold above which the soil can be coarse
Returns
-------
soil_combined: pd.DataFrame
Dataframe with the new metadata type
"""
# get thresholds on cartesian plane
cf_y = clay_fine*np.sin(2/3*np.pi)
cc_y = clay_coarse*np.sin(2/3*np.pi)
sc_x = 100-sand_coarse
# transform values to cartesian
x = soil_fractions["sand_fraction"].values.apply(lambda x: 100-x)
y = soil_fractions["clay_fraction"].values.apply(lambda x: x*np.sin(2/3*np.pi))
soil_combined = pd.concat([x,y], axis=1)
soil_combined.columns = ["x", "y"]
# function to calssify
def sort_soil_type(row):
if row["x"] < sc_x and row["y"] < cc_y:
return "Coarse\ngranulometry"
elif cc_y < row["y"] < cf_y:
return "Medium\ngranulometry"
else:
return "Fine\ngranulometry"
soil_combined = soil_combined.apply(lambda row: sort_soil_type(row), axis=1).to_frame("soil_type")
return soil_combined
def combine_depths(depth_dict:dict) -> pd.DataFrame:
"""
Create a metadata entry for the instrument depth by finding the middle point between the upper and lower
specified instrument depths
Parameters
----------
depth_dict: dict
Dictionary with {'instrument_depthfrom/instrument_depthto': qa4sm_handlers.Metadata}
Returns
-------
depths_combined: pd.DataFrame
Dataframe with the new metadata type
"""
depths_combined = []
for key, obj in depth_dict.items():
depths_combined.append(obj.values)
depths_combined = pd.concat(depths_combined, axis=1)
depths_combined = depths_combined.mean(axis=1).to_frame("instrument_depth")
return depths_combined
def aggregate_subplots(to_plot:dict, funct, n_bars, common_y=None, **kwargs):
"""
Aggregate multiple subplots into one image
Parameters
----------
to_plot: dict
dictionary with the data to plot, of the shape 'title of the subplot': pd.Dataframe
(or data format required by funct)
funct: method
function to create the individual subplots. Should have a parameter 'axis',
where the plt.Axis can be given. Returns a tuple of (unit_height, unit_width)
n_bars: int
number of boxplot bars (one is central + confidence intervals)
**kwargs: dict
arguments to pass on to the plotting function
Return
------
fig, axes
"""
sub_n = len(to_plot.keys())
if sub_n == 1:
for n, (bin_label, data) in enumerate(to_plot.items()):
fig, axes = funct(df=data, **kwargs)
elif sub_n > 1:
# provide the figure and subplots
rows = int(np.ceil(sub_n/2))
fig, axes = plt.subplots(rows, 2, sharey=True)
for n, (bin_label, data) in enumerate(to_plot.items()):
if n % 2 == 0:
try:
ax=axes[int(n/2), 0]
except IndexError: # If only two subplots, it is a 1-dimensional array
ax=axes[0]
else:
try:
ax=axes[int(n/2), 1]
except IndexError:
ax=axes[1]
# Make sure funct has the correct parameters format
if 'axis' not in funct.__code__.co_varnames:
raise KeyError(
"'axis' should be in the parameters of the given function {}".format(funct)
)
funct(df=data, axis=ax, **kwargs)
ax.set_title(bin_label, fontdict={"fontsize":10})
if n != 0:
ax.legend([],[], frameon=False)
# eliminate extra subplot if odd number
if rows*2 > sub_n:
fig.delaxes(axes[rows-1, 1])
plt.subplots_adjust(wspace=0.1, hspace=0.25)
fig.set_figheight(globals.boxplot_height*(np.ceil(sub_n/2) + 0.2))
fig.set_figwidth(globals.boxplot_width*n_bars*2)
if common_y:
fig.text(0.05, 0.5, common_y, va='center', rotation='vertical')
return fig, axes
def bplot_multiple(to_plot, y_axis, n_bars, **kwargs) -> tuple:
"""
Create subplots for each metadata category/range
Parameters
----------
to_plot : dict
dictionary of {'bin name': Dataframe}
y_axis : str
Name of the x-axis
n_bars : int or float
Number of datasets/boxplot bars
"""
# create plot with as many subplots as the dictionary keys
n_subplots = len(to_plot.keys())
if "axis" in kwargs.keys():
del kwargs["axis"]
fig, axes = aggregate_subplots(to_plot=to_plot, funct=boxplot, n_bars=n_bars, **kwargs)
return fig, axes
def _dict2df(to_plot_dict:dict, meta_key:str) -> pd.DataFrame:
"""Transform a dictionary into a DataFrame for catplotting"""
to_plot_df = []
for range, values in to_plot_dict.items():
range_grouped = []
for ds in values:
values_ds = values[ds].to_frame(name="values")
values_ds["Dataset"] = ds
values_ds[meta_key] = "\n[".join(range.split(" ["))
range_grouped.append(values_ds)
range_grouped = pd.concat(range_grouped)
to_plot_df.append(range_grouped)
to_plot_df = pd.concat(to_plot_df)
return to_plot_df
def add_cat_info(to_plot:pd.DataFrame, metadata_name:str) -> pd.DataFrame:
"""Add info (N, median value) to metadata category labels"""
groups = to_plot.groupby(metadata_name)["values"]
counts = groups.count()
to_plot[metadata_name] = to_plot[metadata_name].apply(
lambda x : x + "\nN: {}".format(counts[x])
)
return to_plot
def bplot_catplot(to_plot, y_axis, metadata_name, axis=None, **kwargs) -> tuple:
"""
Create individual plot with grouped boxplots by metadata value
Parameters
----------
to_plot: pd.Dataframe
Seaborn-formatted dataframe
y_axis: str
Name of the x-axis
metadata_name: str
Name of the metadata type
axis : matplotlib.axes.Axis, optional
if provided, the function will create the plot on the specified axis
"""
labels = None
return_figax = False
orient = "v"
if axis is None:
return_figax = True
fig, axis = plt.subplots(1)
orient = "h"
if orient == "v":
x = metadata_name
y = "values"
elif orient == "h":
x = "values"
y = metadata_name
# add N points to the axis labels
to_plot = add_cat_info(to_plot, metadata_name=metadata_name)
box = sns.boxplot(
x=x,
y=y,
hue="Dataset",
data=to_plot,
palette="Set2",
ax=axis,
showfliers = False,
orient=orient,
)
n_bars = to_plot["Dataset"].nunique()
n_meta = to_plot[metadata_name].nunique()
unit_height = 1
unit_width = len(to_plot[metadata_name].unique())
# needed for overlapping station names
box.tick_params(labelsize=globals.tick_size)
dims = [globals.boxplot_width*n_meta*2, globals.boxplot_height]
if orient == "v":
axis.set(xlabel=None, ylabel=y_axis)
axis.yaxis.grid(True) # Hide the horizontal gridlines
axis.xaxis.grid(False) # Show the vertical gridlines
if orient == "h":
axis.set(ylabel=None, xlabel=y_axis)
axis.yaxis.grid(False) # Hide the horizontal gridlines
axis.xaxis.grid(True) # Show the vertical gridlines
axis.set_axisbelow(True)
axis.spines['right'].set_visible(False)
axis.spines['top'].set_visible(False)
axis.legend(loc="best", fontsize="small")
if return_figax:
fig.set_figwidth(dims[0])
fig.set_figheight(dims[1])
return fig, axis
else:
axis.set(xlabel=None)
axis.set(ylabel=None)
def boxplot_metadata(
df:pd.DataFrame,
metadata_values:pd.DataFrame,
offset=0.02,
ax_label=None,
nbins=4,
axis=None,
plot_type:str="catplot",
**bplot_kwargs,
) -> tuple:
"""
Boxplots by metadata. The output plot depends on the metadata type:
- "continuous"
- "discrete"
- "classes"
Parameters
----------
df : pd.DataFrame
Dataframe with values for all variables (in metric)
metadata_values : pd.DataFrame
Dataframe containing the metadata values to use for the plot
offset: float
offset of watermark
ax_label : str
Name of the y axis - cannot be set globally
nbins: int
number pf bins to divide the plots in (only for continuous type of metadata, e.g. elevation)
axis : matplotlib.axes.Axis, optional
if provided, the function will create the plot on the specified axis
plot_type : str, default is 'catplot'
one of 'catplot' or 'multiplot', defines the type of plots for the 'classes' and 'continuous'
metadata types
Returns
-------
fig : matplotlib.figure.Figure
the boxplot
ax : matplotlib.axes.Axes
labels : list
list of class/ bins names
"""
metric_label = "values"
meta_key = metadata_values.columns[0]
# sort data according to the metadata type
type = globals.metadata[meta_key][2]
bin_funct = bin_function_lut(type)
to_plot = bin_funct(
df=df,
metadata_values=metadata_values,
meta_key=meta_key,
nbins=nbins,
)
if to_plot is None:
raise ValueError(
"There are too few points per metadata to generate the boxplots. You can set 'min_size'"
"to a lower value to allow for smaller samples."
)
if isinstance(to_plot, dict):
if plot_type == "catplot":
to_plot = _dict2df(to_plot, meta_key)
generate_plot = bplot_catplot
elif plot_type == "multiplot":
generate_plot = bplot_multiple
elif isinstance(to_plot, pd.DataFrame):
generate_plot = bplot_catplot
out = generate_plot(
to_plot=to_plot,
y_axis=ax_label,
metadata_name=meta_key,
n_bars=len(df.columns),
axis=axis,
**bplot_kwargs,
)
if axis is None:
fig, axes = out
return fig, axes
def mapplot(
df, metric,
ref_short,
ref_grid_stepsize=None,
plot_extent=None,
colormap=None,
projection=None,
add_cbar=True,
label=None,
figsize=globals.map_figsize,
dpi=globals.dpi,
diff_map=False,
**style_kwargs
) -> tuple:
"""
Create an overview map from df using values as color. Plots a scatterplot for ISMN and an image plot for other
input values.
Parameters
----------
df : pandas.Series
values to be plotted. Generally from metric_df[Var]
metric : str
name of the metric for the plot
ref_short : str
short_name of the reference dataset (read from netCDF file)
ref_grid_stepsize : float or None, optional (None by default)
angular grid stepsize, needed only when ref_is_angular == False,
plot_extent : tuple
(x_min, x_max, y_min, y_max) in Data coordinates. The default is None.
colormap : Colormap, optional
colormap to be used.
If None, defaults to globals._colormaps.
projection : cartopy.crs, optional
Projection to be used. If none, defaults to globals.map_projection.
The default is None.
add_cbar : bool, optional
Add a colorbar. The default is True.
label : str, optional
Label of the y axis, describing the metric. If None, a label is autogenerated from metadata.
The default is None.
figsize : tuple, optional
Figure size in inches. The default is globals.map_figsize.
dpi : int, optional
Resolution for raster graphic output. The default is globals.dpi.
diff_map : bool, default is False
if True, a difference colormap is created
**style_kwargs :
Keyword arguments for plotter.style_map().
Returns
-------
fig : matplotlib.figure.Figure
the boxplot
ax : matplotlib.axes.Axes
"""
if not colormap:
cmap = globals._colormaps[metric]
else:
cmap = colormap
v_min, v_max = get_value_range(df, metric)
# everything changes if the plot is a difference map
if diff_map:
v_min, v_max = get_value_range(df, metric=None, diff_map=True)
cmap = globals._diff_colormaps[metric]
# mask values outside range (e.g. for negative STDerr from TCA)
if metric in globals._metric_mask_range.keys():
mask_under, mask_over = globals._metric_mask_range[metric] # get values from scratch to disregard quantiles
cmap = copy.copy(cmap)
if mask_under is not None:
v_min = mask_under
cmap.set_under("red")
if mask_over is not None:
v_max = mask_over
cmap.set_over("red")
# initialize plot
fig, ax, cax = init_plot(figsize, dpi, add_cbar, projection)
# scatter point or mapplot
if ref_short in globals.scattered_datasets: # scatter
if not plot_extent:
plot_extent = get_plot_extent(df)
markersize = globals.markersize ** 2
lat, lon, gpi = globals.index_names
im = ax.scatter(df.index.get_level_values(lon),
df.index.get_level_values(lat),
c=df, cmap=cmap, s=markersize,
vmin=v_min, vmax=v_max,
edgecolors='black', linewidths=0.1,
zorder=2, transform=globals.data_crs)
else: # mapplot
if not plot_extent:
plot_extent = get_plot_extent(df, grid_stepsize=ref_grid_stepsize, grid=True)
if isinstance(ref_grid_stepsize, np.ndarray):
ref_grid_stepsize = ref_grid_stepsize[0]
zz, zz_extent, origin = geotraj_to_geo2d(df, grid_stepsize=ref_grid_stepsize) # prep values
im = ax.imshow(zz, cmap=cmap, vmin=v_min,
vmax=v_max, interpolation='nearest',
origin=origin, extent=zz_extent,
transform=globals.data_crs, zorder=2)
if add_cbar: # colorbar
_make_cbar(fig, im, cax, ref_short, metric, label=label, diff_map=diff_map)
style_map(ax, plot_extent, **style_kwargs)
fig.canvas.draw() # very slow. necessary bcs of a bug in cartopy: https://github.com/SciTools/cartopy/issues/1207
return fig, ax
def plot_spatial_extent(
polys:dict,
ref_points:bool=None,
overlapping:bool=False,
intersection_extent:tuple=None,
reg_grid=False,
grid_stepsize=None,
**kwargs,
):
"""
Plots the given Polygons and optionally the reference points on a map.
Parameters
----------
polys : dict
dictionary with shape {name: shapely.geometry.Polygon}
ref_points : 2D array
array of lon, lat for the reference points positions
overlapping : bool, dafault is False.
Whether the polygons have an overlap
intersection_extent : tuple | None
if given, corresponds to the extent of the intersection. Shape (minlon, maxlon, minlat, maxlat)
reg_grid : bool, default is False,
plotting oprion for regular grids (satellites)
"""
fig, ax, cax = init_plot(figsize=globals.map_figsize, dpi=globals.dpi)
legend_elements = []
# plot polygons
for n, items in enumerate(polys.items()):
name, Pol = items
if n == 0:
union = Pol
# get maximum extent
union = union.union(Pol)
style = {'color':'powderblue', 'alpha':0.4}
# shade the union/intersection of the polygons
if overlapping:
x, y = Pol.exterior.xy
if name == "selection":
ax.fill(x, y, **style, zorder=5)
continue
ax.plot(x, y, label=name)
# shade the areas individually
else:
if name == "selection":
continue
x, y = Pol.exterior.xy
ax.fill(x, y, **style, zorder=6)
ax.plot(x, y, label=name, zorder=6)
# add reference points to the figure
if ref_points is not None:
if overlapping and intersection_extent is not None:
minlon, maxlon, minlat, maxlat = intersection_extent
mask = (ref_points[:,0]>=minlon) & (ref_points[:,0]<=maxlon) &\
(ref_points[:,1]>=minlat) & (ref_points[:,1]<=maxlat)
selected = ref_points[mask]
outside = ref_points[~ mask]
else:
selected, outside = ref_points, np.array([])
marker_styles = [
{"marker": "o", "c":"turquoise", "s":15},
{"marker": "o", "c":"tomato", "s":15},
]
# mapplot with imshow for gridded (non-ISMN) references
if reg_grid:
plot_df = []
for n, (point_set, style, name) in enumerate(zip(
(selected, outside),
marker_styles,
("Selected reference validation points", "Validation points outside selection")
)):
if point_set.size != 0:
point_set = point_set.transpose()
index = pd.MultiIndex.from_arrays(point_set, names=('lon', 'lat'))
point_set = pd.Series(
data=n,
index=index,
)
plot_df.append(point_set)
# plot point to 'fake' legend entry
ax.scatter(0, 0, label=name, marker="s", s=10, c=style["c"])
else:
continue
plot_df = pd.concat(plot_df, axis=0)
zz, zz_extent, origin = geotraj_to_geo2d(
plot_df,
grid_stepsize=grid_stepsize
)
cmap = mcol.LinearSegmentedColormap.from_list('mycmap', ['turquoise', 'tomato'])
im = ax.imshow(
zz, cmap=cmap,
origin=origin, extent=zz_extent,
transform=globals.data_crs, zorder=4
)
# scatterplot for ISMN reference
else:
for point_set, style, name in zip(
(selected, outside),
marker_styles,
("Selected reference validation points", "Validation points outside selection")
):
if point_set.size != 0:
im = ax.scatter(
point_set[:,0], point_set[:,1],
edgecolors='black', linewidths=0.1,
zorder=4, transform=globals.data_crs,
**style, label=name
)
else:
continue
# style plot
make_watermark(fig, globals.watermark_pos, offset=0)
title_style = {"fontsize": 12}
ax.set_title("Spatial extent of the comparison", **title_style)
# provide extent of plot
d_lon = abs(union.bounds[0] - union.bounds[2])* 1/8
d_lat = abs(union.bounds[1] - union.bounds[3])* 1/8
plot_extent = (union.bounds[0] - d_lon, union.bounds[2] + d_lon,
union.bounds[1] - d_lat, union.bounds[3] + d_lat)
grid_intervals = [1, 5, 10, 30]
style_map(ax, plot_extent, grid_intervals=grid_intervals)
# create legend
plt.legend(
loc="lower left",
fontsize='small',
framealpha=0.4,
edgecolor='black'
)
|
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Object ID for Dupli Groups
Say you have a linked character or asset, you can now set an Object ID for the
entire instance (the objects in the group), and use it with the Object Index
pass later in compositing. Something that I always wanted and it wasn't
possible!
In order for the Object ID to be loaded afterwards on computers without
Amaranth installed, it will automatically create a text file (called
AmaranthStartup.py) and save it inside the .blend, this will autorun on
startup and set the OB IDs. Remember to have auto-run python scripts on your
startup preferences.
Set a Pass Index and press "Apply Object ID to Duplis" on the Relations panel,
Object Properties.
"""
import bpy
from amaranth.scene.debug import AMTH_SCENE_OT_blender_instance_open
# Some settings are bound to be saved on a startup py file
# TODO: refactor this, amth_text should not be declared as a global variable,
# otherwise becomes confusing when you call it in the classes below.
def amaranth_text_startup(context):
amth_text_name = "AmaranthStartup.py"
amth_text_exists = False
global amth_text
try:
if bpy.data.texts:
for tx in bpy.data.texts:
if tx.name == amth_text_name:
amth_text_exists = True
amth_text = bpy.data.texts[amth_text_name]
break
else:
amth_text_exists = False
if not amth_text_exists:
bpy.ops.text.new()
amth_text = bpy.data.texts[((len(bpy.data.texts) * -1) + 1)]
amth_text.name = amth_text_name
amth_text.write("# Amaranth Startup Script\nimport bpy\n")
amth_text.use_module = True
return amth_text_exists
except AttributeError:
return None
# FEATURE: Dupli Group Path
def ui_dupli_group_library_path(self, context):
ob = context.object
row = self.layout.row()
row.alignment = "LEFT"
if ob and ob.instance_collection and ob.instance_collection.library:
lib = ob.instance_collection.library.filepath
row.operator(AMTH_SCENE_OT_blender_instance_open.bl_idname,
text="Library: %s" % lib,
emboss=False,
icon="LINK_BLEND").filepath = lib
# // FEATURE: Dupli Group Path
# FEATURE: Object ID for objects inside DupliGroups
class AMTH_OBJECT_OT_id_dupligroup(bpy.types.Operator):
"""Set the Object ID for objects in the dupli group"""
bl_idname = "object.amaranth_object_id_duplis"
bl_label = "Apply Object ID to Duplis"
clear = False
@classmethod
def poll(cls, context):
return context.active_object.instance_collection
def execute(self, context):
self.__class__.clear = False
ob = context.active_object
amaranth_text_startup(context)
script_exists = False
script_intro = "# OB ID: %s" % ob.name
obdata = 'bpy.data.objects[" % s"]' % ob.name
# TODO: cleanup script var using format or template strings
script = "%s" % (
"\nif %(obdata)s and %(obdata)s.instance_collection and %(obdata)s.pass_index != 0: %(obname)s \n"
" for dob in %(obdata)s.instance_collection.objects: %(obname)s \n"
" dob.pass_index = %(obdata)s.pass_index %(obname)s \n" %
{"obdata": obdata, "obname": script_intro})
for txt in bpy.data.texts:
if txt.name == amth_text.name:
for li in txt.lines:
if script_intro == li.body:
script_exists = True
continue
if not script_exists:
amth_text.write("\n")
amth_text.write(script_intro)
amth_text.write(script)
if ob and ob.instance_collection:
if ob.pass_index != 0:
for dob in ob.instance_collection.objects:
dob.pass_index = ob.pass_index
self.report({"INFO"},
"%s ID: %s to all objects in this Dupli Group" % (
"Applied" if not script_exists else "Updated",
ob.pass_index))
return {"FINISHED"}
class AMTH_OBJECT_OT_id_dupligroup_clear(bpy.types.Operator):
"""Clear the Object ID from objects in dupli group"""
bl_idname = "object.amaranth_object_id_duplis_clear"
bl_label = "Clear Object ID from Duplis"
@classmethod
def poll(cls, context):
return context.active_object.instance_collection
def execute(self, context):
context.active_object.pass_index = 0
AMTH_OBJECT_OT_id_dupligroup.clear = True
amth_text_exists = amaranth_text_startup(context)
match_first = "# OB ID: %s" % context.active_object.name
if amth_text_exists:
for txt in bpy.data.texts:
if txt.name == amth_text.name:
for li in txt.lines:
if match_first in li.body:
li.body = ""
continue
self.report({"INFO"}, "Object IDs back to normal")
return {"FINISHED"}
def ui_object_id_duplis(self, context):
if context.active_object.instance_collection:
split = self.layout.split()
row = split.row(align=True)
row.enabled = context.active_object.pass_index != 0
row.operator(
AMTH_OBJECT_OT_id_dupligroup.bl_idname)
row.operator(
AMTH_OBJECT_OT_id_dupligroup_clear.bl_idname,
icon="X", text="")
split.separator()
if AMTH_OBJECT_OT_id_dupligroup.clear:
self.layout.label(text="Next time you save/reload this file, "
"object IDs will be back to normal",
icon="INFO")
# // FEATURE: Object ID for objects inside DupliGroups
def register():
bpy.utils.register_class(AMTH_OBJECT_OT_id_dupligroup)
bpy.utils.register_class(AMTH_OBJECT_OT_id_dupligroup_clear)
bpy.types.OBJECT_PT_duplication.append(ui_dupli_group_library_path)
bpy.types.OBJECT_PT_relations.append(ui_object_id_duplis)
def unregister():
bpy.utils.unregister_class(AMTH_OBJECT_OT_id_dupligroup)
bpy.utils.unregister_class(AMTH_OBJECT_OT_id_dupligroup_clear)
bpy.types.OBJECT_PT_duplication.remove(ui_dupli_group_library_path)
bpy.types.OBJECT_PT_relations.remove(ui_object_id_duplis)
|
'''
Created on Jun 25, 2021
@author: willg
'''
from typing import List
import os
import discord
import common
import UtilityFunctions
main_help_file_list = ['main_help.txt']
tabling_help_file_list = ['tabling_help_1.txt', 'tabling_help_2.txt']
server_defaults_help_file_list = ['server_defaults_help.txt']
flags_help_file_list = ['flags_help.txt']
lounge_reporter_help_file_list = ['lounge_staff_help_1.txt','lounge_staff_help_2.txt','lounge_staff_help_3.txt']
lounge_submitting_tables_help_file_list = ['lounge_table_submission_help.txt']
other_help_file_list = ['other_help_1.txt', 'other_help_2.txt']
default_help_key = 'help'
tabling_help_key = 'tabling'
all_players_help_file_list = ['all_players_help.txt']
change_tag_help_file_list = ['change_tag_help.txt']
dc_help_file_list = ['dc_help.txt']
fcs_help_file_list = ['fcs_help.txt']
graph_help_file_list = ['graph_help.txt']
race_results_help_file_list = ['race_results_help.txt']
race_size_help_file_list = ['race_size_help.txt']
races_help_file_list = ['races_help.txt']
remove_race_help_file_list = ['remove_race_help.txt']
reset_undo_help_file_list = ['reset_undo_help.txt']
start_war_help_file_list = ['start_war_help.txt']
style_help_file_list = ['style_help.txt']
TABLING_HELP_FILES = {"1":start_war_help_file_list,
"2":reset_undo_help_file_list,
"3":dc_help_file_list,
"4":remove_race_help_file_list,
"5":change_tag_help_file_list,
"6":style_help_file_list,
"7":graph_help_file_list,
"8":race_size_help_file_list,
"9":races_help_file_list,
"10":all_players_help_file_list,
"11":fcs_help_file_list,
"12":race_results_help_file_list}
HELP_KEY_FILES = {default_help_key:main_help_file_list,
tabling_help_key:tabling_help_file_list,
"serverdefaults":server_defaults_help_file_list,
"server defaults":server_defaults_help_file_list,
"flags":flags_help_file_list,
"submittable":lounge_submitting_tables_help_file_list,
"submitable":lounge_submitting_tables_help_file_list,
"submit table":lounge_submitting_tables_help_file_list,
"reporter":lounge_reporter_help_file_list,
"reporters":lounge_reporter_help_file_list,
"updater":lounge_reporter_help_file_list,
"updaters":lounge_reporter_help_file_list,
"other":other_help_file_list
}
HELP_CATEGORIES = [
"tabling",
"server defaults",
"flags",
"submit table",
"reporter",
"updater",
"other"
]
for tabling_help_list in TABLING_HELP_FILES.values():
for index, file_name in enumerate(tabling_help_list):
if not tabling_help_list[index].startswith(common.TABLING_HELP_PATH):
tabling_help_list[index] = f"{common.TABLING_HELP_PATH}{file_name}"
for help_list in HELP_KEY_FILES.values():
for index, file_name in enumerate(help_list):
if not help_list[index].startswith(common.HELP_PATH):
help_list[index] = f"{common.HELP_PATH}{file_name}"
QUICKSTART_FILE = f"{common.HELP_PATH}quickstart.txt"
def get_help_files(args:List[str]):
help_ind = None
for ind, arg in enumerate(args):
if 'help' in arg:
help_ind = ind
break
if help_ind is None:
return default_help_key, HELP_KEY_FILES[default_help_key]
new_args = args[help_ind+1:]
help_key = " ".join(new_args)
if help_key in HELP_KEY_FILES:
return help_key, HELP_KEY_FILES[help_key]
if help_key in TABLING_HELP_FILES:
return help_key, TABLING_HELP_FILES[help_key]
return default_help_key, HELP_KEY_FILES[default_help_key]
async def send_help(message: discord.Message, args:List[str], prefix=common.default_prefix, is_lounge_server=False):
embed = discord.Embed(description="- [Help Documentation](https://www.github.com/BadWolf1023/MKW-Table-Bot/wiki)\n"
f"- [Discord Server](https://discord.gg/{common.TABLEBOT_SERVER_INVITE_CODE})\n"
f"- [Invite the bot]({common.INVITE_LINK})")
embed.set_author(name="MKW Table Bot Help", icon_url="https://64.media.tumblr.com/b0df9696b2c8388dba41ad9724db69a4/tumblr_mh1nebDwp31rsjd4ho1_500.jpg")
await message.channel.send(embed=embed)
# help_key, help_files = get_help_files(args)
# """if is_lounge_server and help_key == tabling_help_key:
# await message.channel.send("See the table bot guide and flow charts in <#835555593833414696> or <#835561764322017320>.")
# return
# """
# for help_text_file in help_files:
# if os.path.isfile(help_text_file):
# with open(help_text_file, "r", encoding="utf-8") as f:
# help_text = f.read()
# if len(help_text) > 1:
# help_text = help_text.replace("{SERVER_PREFIX}", prefix)
# help_text_chunks = list(UtilityFunctions.string_chunks(help_text, 2000))
# for chunk in help_text_chunks:
# await message.channel.send(chunk)
# else:
# break
async def send_quickstart(discord_message_obj):
quick_start = "No quickstart."
with open(QUICKSTART_FILE, "r", encoding="utf-8") as f:
quick_start = f.read()
await discord_message_obj.channel.send(quick_start)
|
<gh_stars>10-100
# misc small utilities
# Author:: <NAME> (<<EMAIL>>)
# Copyright:: Copyright (c) 2014, 2015, 2016 Magnetic Media Online, Inc.
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import collections
import csv
import os
import math
import re
import logging
import random
def weighted_sample(x, weights, n=1):
"Returns a weighted sample of length n with replacement. Weight do not need to sum to 1."
length = len(x)
assert length == len(weights) > 0
assert n >= 1
totalweight = sum(weights)
cumulative_sum = 0
sample = []
r = [random.random() * totalweight for i in xrange(n)]
for i in xrange(length):
cumulative_sum += weights[i]
for j in xrange(n):
if r[j] < cumulative_sum:
sample.append(x[i])
r[j] = totalweight + 1 #make sure that it won't be triggered again
if len(sample) >= n:
break
return sample
def sample_file (inf, outf, rates, pos, logger=None, separator = "\t"):
"""Sample lines in INF according to RATES at position POS and write OUTF."""
reading(inf,logger)
written = collections.defaultdict(int)
wasread = collections.defaultdict(int)
with open(inf) as i:
with open(outf,"w") as o:
for l in i:
v = l.strip().split(separator)[pos]
wasread[v] += 1
try:
r = rates[v]
except KeyError:
r = rates[v] = 1
warn("Unexpected value [%s] in [%s], set rate=1" % (v,l.strip()), logger)
if r == 1 or random.random() <= r:
o.write(l)
written[v] += 1
wrote(outf,logger)
info("Read {i:,d} lines: {c:s}".format(
i=sum(wasread.itervalues()),c=counter2string(wasread)),logger)
info("Wrote {i:,d} lines: {c:s}".format(
i=sum(written.itervalues()),c=counter2string(written)),logger)
def get_logger (name, level = logging.INFO):
console = logging.StreamHandler() # stderr
console.setFormatter(logging.Formatter(
fmt='%(asctime)s %(levelname)s %(name)s/%(module)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
# add the handler to the root logger
logger = logging.getLogger(name)
logger.addHandler(console)
logger.setLevel(level)
return logger
def debug (s,logger = None):
if logger is None:
print "DEBUG " + s
elif isinstance(logger,logging.Logger):
logger.debug(s)
else:
pass
def info (s,logger = None):
if logger is None:
print s
elif isinstance(logger,logging.Logger):
logger.info(s)
else:
pass
def warn (s,logger = None):
if logger is None:
print "WARNING " + s
elif isinstance(logger,logging.Logger):
logger.warn(s)
else:
pass
# http://stackoverflow.com/questions/497885/python-element-wise-tuple-operations-like-sum
def tuple_sum (a, b):
return tuple(map(sum, zip(a, b)))
# http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
def dedup (seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
# http://stackoverflow.com/questions/15889131/how-to-find-the-cumulative-sum-of-numbers-in-a-list
def accumu (seq):
total = 0
for x in seq:
total += x
yield total
def cumsum (seq): return list(accumu(seq))
# similar to the system function, but
# - does not support negative step
# - does support dates and such, as long as they support "+" and "<"
# - stop is included in the range
def myrange (start, stop, step):
while start <= stop:
yield start
start += step
import ast
import pprint
class HumanReadable (object):
# http://stackoverflow.com/questions/28055565/how-to-serialize-a-python-dict-to-text-in-a-human-readable-way
@staticmethod
def save (x, fname):
with open(fname, 'w') as f:
pprint.PrettyPrinter(stream=f).pprint(x)
@staticmethod
def load (fname):
with open(fname, 'r') as f:
return ast.literal_eval(f.read())
# execfile('util.py'); test()
def test ():
counter = collections.Counter(['a','b','a','c','a','b'])
PrintCounter().out(counter,'test1')
PrintCounter(max_row=2,min_omit=0,min_row=0).out(counter,'test2')
PrintCounter.csv(counter,'test3',sys.stdout)
PrintCounter.csv(counter,'test4',"foo-")
os.remove("foo-test4.csv")
counter[u'a\u0437'] = 3
counter[7] = 5
print counter
PrintCounter.csv(counter,'test5',"foo-")
os.remove("foo-test5.csv")
print asBigNumberBin(123456789)
print asBigNumberDec(123456789)
print "bin_entropy"
for x in range(10):
print bin_entropy(10,x)
print "bin_mutual_info"
print bin_mutual_info(200,100,100,50)
for x in range(10):
print bin_mutual_info(200,20,20+0.8*x,(200-x)*0.1)
x1 = dict([(a,2*a) for a in range(10)])
x1[(1,2,3)] = 6
x1 = [x1] + [(x1,x1)]
HumanReadable.save(x1, "tmp")
x2 = HumanReadable.load("tmp")
os.remove("tmp")
if x1 != x2:
raise Exception("HumanReadable",x1,x2)
print x1
def default_None (x, d): return d if x is None else x
def empty2none (v): return (None if v == '' else v)
# http://stackoverflow.com/questions/29127801/cross-version-portability-in-python
if hasattr(1, 'bit_length'):
def bitlen (x): return x.bit_length()
else:
def bitlen (x): return len(bin(x))-2
# http://en.wikipedia.org/wiki/Binary_prefix
binaryPrefixes = ['K','M','G','T','P','E','Z','Y']
asBigNumberBinCuts = [(10*(y+1),binaryPrefixes[y]) for y in range(len(binaryPrefixes))][::-1]
def asBigNumberBin (v): # valid toString argument
l = bitlen(v)
for b,p in asBigNumberBinCuts:
if l >= b:
return "%.1f%si" % ((v >> (b-10)) / 1024.0, p)
return str(v)
asBigNumberDecCuts = [(10.0**(3*(y+1)),binaryPrefixes[y]) for y in range(len(binaryPrefixes))][::-1]
def asBigNumberDec (v): # valid toString argument
for c,p in asBigNumberDecCuts:
if v >= c:
return "%.1f%s" % (v / c, p)
return str(v)
def nicenum (s): # nice number presentation
try:
return "{n:,d}".format(n=int(s))
except ValueError:
return s
# not needed in python3
def ensure_dir (path, logger = None):
if path == "": # current directory is presumed to exist
return
try:
os.makedirs(path)
info("Created [%s]" % (path),logger)
except OSError:
if os.path.isdir(path):
debug("Path [%s] already exists" % (path),logger)
else:
raise
class DirLock (object):
def __init__ (self, path):
ensure_dir(path)
self.dir = path
self.lock = os.path.join(path,"locked")
def __enter__ (self):
if os.path.exists(self.lock):
with open(self.lock) as l:
raise ValueError("directory is in use",self.dir,l.read())
with open(self.lock,"w") as l:
l.write("pid=%s logname=%s" % (os.getpid(),os.getenv("LOGNAME")))
return self.dir
def __exit__ (self, _exc_type, _exc_value, _traceback):
os.unlink(self.lock)
# turn exceptions into None
def catching_exceptions (logger,function,arguments):
try:
return function(*arguments)
except Exception as e:
logger.error("%s: %s",function.__name__,e)
return None
# http://nullege.com/codes/search/pyutil.strutil.commonsuffix
def commonsuffix(l):
cp = []
for i in range(min([len(element) for element in l])):
c = l[0][-i-1]
for s in l[1:]:
if s[-i-1] != c:
cp.reverse()
return ''.join(cp)
cp.append(c)
cp.reverse()
return ''.join(cp)
def title_from_2paths (first, second):
cp = os.path.commonprefix([first,second])
cs = commonsuffix([first,second])
return "%s(%s|%s)%s" % (
cp,first[len(cp):len(first)-len(cs)],
second[len(cp):len(second)-len(cs)],cs)
def canonicalize_domain (domain):
if domain is None or domain == '':
return None
if re.match(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(:[0-9]+)?$',domain):
return 'dotted.quad'
domain = re.sub(r'(:[0-9]+|[.:]+)$','',domain.lower()) # strip port & downcase
tld = re.sub(r'^.*\.([a-z]*)$',r'\1',domain)
if len(tld) > 2: mindot = 1 # .com .info, .travel, .kitchen &c
elif len(tld) == 2: mindot = 2 # gov.us com.cn &c
else:
# logger.info("weird domain [[%s]]",domain)
return domain
while domain.count('.') > mindot:
domain1 = re.sub(r'^(pub|web|www*)?-?[0-9]*\.','',domain)
if domain1 == domain:
return domain
else:
domain = domain1
return domain
def url2host (url):
if url is None or url == '':
return None
if re.match(r'https?://',url):
return re.sub(r'^https?://([^/]*)(/.*)?',r'\1',url)
return 'bad.url'
def url2domain (url):
return canonicalize_domain(url2host(url))
def sigmoid (v):
return 1/(1+math.exp(-v))
def antisigmoid(v):
return -math.log(1/v - 1)
def bin_entropy (total, first):
"Return the total entropy in nats."
if total < 0 or first < 0 or first > total:
raise ValueError("util.bin_entropy",total,first)
if total == 0 or first == 0 or first == total:
return 0
second = total - first
return math.log(total) - (
first * math.log(first) + second * math.log(second)) / total
def bin_mutual_info (total, actual, predicted, tp):
"Return the mutual information in nats."
fn = actual - tp
fp = predicted - tp
tn = total - actual - predicted + tp
if (total < 0 or actual > total or actual < 0 or predicted > total
or predicted < 0 or tp < 0 or fn < 0 or fp < 0 or tn < 0):
raise ValueError("util.bin_mutual_info",total, actual, predicted, tp)
if total == 0 or actual == 0 or actual == total or predicted == 0 or predicted == total:
return 0
mi = 0
total = float(total)
if tp > 0:
mi += tp * math.log(total * tp / (actual * predicted))
if fn > 0:
mi += fn * math.log(total * fn / (actual * (total-predicted)))
if fp > 0:
mi += fp * math.log(total * fp / ((total-actual) * predicted))
if tn > 0:
mi += tn * math.log(total * tn / ((total-actual) * (total-predicted)))
return mi / total
def dict_entropy (counts, missing = None, scaledto1 = False):
"Return total entropy and the entropy with missing dropped."
n = sum(counts.itervalues())
if n == 0 or len(counts) <= 1:
return (0,None)
s = sum(c*math.log(c,2) for c in counts.itervalues())
entropy_total = math.log(n,2) - s / n
if missing in counts:
if len(counts) == 2:
entropy_present = 0
else:
nonN = counts[missing]
n -= nonN
entropy_present = math.log(n,2) - (s - nonN * math.log(nonN,2)) / n
else: entropy_present = None
if scaledto1:
if entropy_total is not None:
entropy_total /= math.log(len(counts), 2)
if entropy_present is not None:
entropy_present /= math.log(len(counts), 2)
return (entropy_total,entropy_present)
def dict__str__ (counts, missing = None):
"Return a short string describing the counter dictionary."
entropy_total,entropy_present = dict_entropy(counts,missing)
return "len={l:,d}; sum={s:,d}; entropy={e:g}{p:s}".format(
l=len(counts),s=sum(counts.itervalues()),e=entropy_total,
p=("" if entropy_present is None else
"/{p:g}".format(p=entropy_present)))
def title2missing (title):
return tuple([None] * len(title)) if isinstance(title,tuple) else None
def title2string(title, sep='-'):
return sep.join(str(o) for o in title) if not isinstance(title,str) else title
# http://stackoverflow.com/questions/613183/python-sort-a-dictionary-by-value
# http://stackoverflow.com/questions/28839182/sorting-dictionary-by-value-and-lexicographical
def counter2pairs (counter):
# count: reverse, value: lexicographical
return sorted(counter.iteritems(), key=lambda (k,v): (-v,k))
def dict_drop_rare (counter, min_count):
return dict((k,v) for (k,v) in counter.iteritems() if v >= min_count)
def counter_aggregate (dicts):
ret = dict()
for di in dicts:
for what,count in di.iteritems():
ret[what] = ret.get(what,0) + count
return ret
def counter2string (counter, sep="; ", maxlen=None):
total = sum(counter.itervalues())
pairs = counter2pairs(counter)
if maxlen and maxlen < len(pairs):
suffix = "...%d omitted" % (len(pairs) - maxlen)
pairs = pairs[:maxlen]
else:
suffix = ""
return sep.join("[{k:s}: {v:,d} ({p:.2%})]".format(
k=str(k),v=v,p=float(v)/total) for (k,v) in pairs)+suffix
class PrintCounter (object):
min_count_default = 0 # omit if count less that this OR
max_row_default = sys.maxint # ... already have this many rows OR
min_percent_default = 0. # ... percent less that this
min_row_default = 10 # ... but ONLY IF already printed at least this much
min_omit_default = 10 # ... AND do NOT omit less than this much
header_default = '===' # printed before the counter title
prefix_default = None # printed before the list
suffix_default = None # printed after the list
def __init__ (self, *args, **kwargs):
# args -- tuple of anonymous arguments
# kwargs -- dictionary of named arguments
if len(args) > 0:
if len(kwargs) == 0:
kwargs = args[0]
else: raise Exception("PrintCounter: cannot mix anonymous & named")
self.min_count = default_None(kwargs.get('pc_min_count'), PrintCounter.min_count_default)
self.max_row = default_None(kwargs.get('pc_max_row'), PrintCounter.max_row_default)
self.min_percent = default_None(kwargs.get('pc_min_percent'), PrintCounter.min_percent_default)
self.min_row = default_None(kwargs.get('pc_min_row'), PrintCounter.min_row_default)
self.min_omit = default_None(kwargs.get('pc_min_omit'), PrintCounter.min_omit_default)
self.header = default_None(kwargs.get('pc_header'), PrintCounter.header_default)
self.prefix = default_None(kwargs.get('pc_prefix'), PrintCounter.prefix_default)
self.suffix = default_None(kwargs.get('pc_suffix'), PrintCounter.suffix_default)
self.total = dict() # fill it outside for cross-percentages
@staticmethod
def add_arguments (parser):
parser.add_argument('-pc-min_count', type=int, help='for PrintCounter')
parser.add_argument('-pc-max_row', type=int, default=100, help='for PrintCounter')
parser.add_argument('-pc-min_percent', type=float, default=1.0, help='for PrintCounter')
parser.add_argument('-pc-min_row', type=int, help='for PrintCounter')
parser.add_argument('-pc-min_omit', type=int, help='for PrintCounter')
parser.add_argument('-pc-header', help='for PrintCounter')
parser.add_argument('-pc-prefix', help='for PrintCounter')
parser.add_argument('-pc-suffix', help='for PrintCounter')
def out (self, counter, title, missing = None):
if missing is None:
missing = title2missing(title)
title = title2string(title)
total = sum(counter.itervalues())
num_rows = len(counter)
if total == num_rows:
print "{h:s} {t:s} {n:,d} items: {i:s}".format(
h=self.header,t=title,n=num_rows,i=counter.keys())
return False
small5 = dict_drop_rare(counter,5)
if len(small5) == len(counter) or len(small5) < 2:
print "{h:s} {t:s} ({a:s})".format(
h=self.header,t=title,a=dict__str__(counter,missing))
else:
print "{h:s} {t:s} ({a:s})/({s:s})".format(
h=self.header,t=title,a=dict__str__(counter,missing),
s=dict__str__(small5,missing))
row = 0
left = 1
if not self.prefix is None:
print self.prefix
def as_ascii (o):
if isinstance(o,str):
return o
if isinstance(o,unicode):
return o.encode('utf-8')
return str(o)
for obj, count in counter2pairs(counter):
percent = float(count) / total
row += 1
omit = num_rows - row + 1
if ((count < self.min_count or row > self.max_row
or 100 * percent < self.min_percent)
and omit > self.min_omit and row > self.min_row):
print " - omitted {o:,d} rows ({l:.2%})".format(o=omit,l=left)
if not self.suffix is None:
print self.suffix
return True # truncated
left -= percent
xp = ("" if obj not in self.total else
" ({p:.2%})".format(p=float(count)/self.total[obj]))
if isinstance(obj,tuple):
print " {r:5d} {o:s} {c:12,d} {p:6.2%}{xp:s}".format(
r=row, o=" ".join(as_ascii(o).rjust(30) for o in obj),
c=count, p=percent, xp = xp)
else:
print " {r:5d} {o:30s} {c:12,d} {p:6.2%}{xp:s}".format(
r=row, o=as_ascii(obj), c=count, p=percent, xp=xp)
if not self.suffix is None:
print self.suffix
return False # no truncation
@staticmethod
def csv (counter, title, destination, logger=None, smallest = 0):
if isinstance(destination, str):
if isinstance(title,tuple):
destination += "-".join(str(o) for o in title) + ".csv"
else:
destination += title + ".csv"
info("Writing {r:,d} rows to [{d:s}]".format(
r=len(counter),d=destination),logger)
with open(destination,"wb") as dest:
PrintCounter.csv(counter, title, dest, smallest=smallest)
wrote(destination,logger=logger)
else:
writer = csv.writer(destination)
if isinstance(title,tuple):
writer.writerow(list(title)+["count"])
for observation,count in counter2pairs(counter):
if count >= smallest:
writer.writerow([unicode(x).encode('utf-8')
for x in observation]+[count])
else:
writer.writerow([title,"count"])
# writer.writerows(counter2pairs(counter))
for observation,count in counter2pairs(counter):
if count >= smallest:
writer.writerow([unicode(observation).encode('utf-8'),count])
# chances are, write() above will write a better message than this
#if isinstance(destination,file) and os.path.isfile(destination.name):
# wrote(destination.name,logger=logger)
# http://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
class CommonMixin(object):
def __eq__(self, other):
return type(other) is type(self) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.__dict__)
def wilson (success, total):
"Return the center and the half-length of the Wilson confidence interval"
z = 1.96
p = float(success) / total
scale = 1 / (1 + z*z / total)
center = ( p + z*z / (2 * total) ) * scale
halfwidth = z * math.sqrt( p*(1-p) / total + z*z/(4*total*total) ) * scale
return (center, halfwidth)
# pass an empty collections.defaultdict(int) as types
# and it will be filled with type counts
# NB: this will double count objects which appear multiple times in containers
def sizeof (obj, types = None):
ret = sys.getsizeof(obj)
if types is not None:
types[type(obj).__name__] += 1
if (isinstance(obj,list) or isinstance(obj,tuple) or
isinstance(obj,set) or isinstance(obj,frozenset)):
for x in obj:
ret += sizeof(x, types = types)
return ret
if isinstance(obj,dict):
for k,v in obj.iteritems():
ret += sizeof(k, types = types) + sizeof(v, types = types)
return ret
return ret
def bytes2string (s):
if bitlen(s) > 10:
return "{b:,d} bytes ({a:s}B)".format(b=s,a=asBigNumberBin(s))
else:
return "{b:,d} bytes".format(b=s)
def filesize2string (f):
return bytes2string(os.path.getsize(f))
def reading (f,logger = None):
info("Reading {s:s} from [{f:s}]".format(s=filesize2string(f),f=f),logger)
def wrote (f,logger = None):
info("Wrote {s:s} into [{f:s}]".format(s=filesize2string(f),f=f),logger)
def enum (name, values):
return type(name, (), dict(zip(values,values)))
def enum_get (cl, val):
ret = cl.__dict__.get(val)
if val == ret: return ret
raise ValueError("enum_get: Bad value for Enum",cl.__name__,val)
def read_multimap (inf, delimiter, col1, col2, logger = None,
keyproc = None, valproc = None):
'Read a multi-map from a TSV/CSV stream with 2 columns.'
if isinstance(inf,str):
reading(inf,logger=logger)
with open(inf) as ins:
return read_multimap(ins,delimiter,col1,col2,logger=logger,
keyproc=keyproc,valproc=valproc)
ret = dict()
lines = 0
for row in csv.reader(inf,delimiter=delimiter,escapechar='\\'):
if len(row) <= max(col1,col2):
warn("Bad line %s, aborting" % (row),logger)
break
lines += 1
key = row[col1].strip()
if keyproc is not None:
key = keyproc(key)
val = row[col2].strip()
if valproc is not None:
val = valproc(val)
try:
s = ret[key]
except KeyError:
s = ret[key] = set()
if val in s:
warn("Duplicate value [%s] for key [%s]" % (val,key),logger)
s.add(val)
info("Read {l:,d} lines with {k:,d} keys and {v:,d} values".format(
l=lines,k=len(ret),v=sum([len(s) for s in ret.itervalues()])),logger)
return ret
if __name__ == '__main__':
test()
|
<reponame>naiiytom/cms-angular-fastapi-keycloak
import os
from io import StringIO
import pandas as pd
import requests
import json
from flask import Flask, request
from flask_cors import CORS
from .s3_backend.s3_storage import (get_disease_table_presigned_url,
get_export_history_presigned_url,
get_premium_table_presigned_url, put_csv_file_into_s3)
origins = [
"http://localhost",
"http://localhost:4200",
]
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
app = Flask(__name__)
CORS(app)
app.config['MAX_CONTENT_LENGTH'] = 1000 * 1024 * 1024 # 50 Mb limit
# @app.get('/')
# def root():
# return {'hello': 'world'}
# @app.get('/file')
# def download_file():
# path = 'tmp/return.csv'
# return FileResponse(path=path, filename='return.csv', media_type='text/csv')
# @app.get('/premium-table')
# def download_premium_table():
# path = 'tmp/byproduct/Elite_health.csv'
# return FileResponse(path=path, filename='return.csv', media_type='text/csv')
# @app.post('/premium-table')
# def upload_premium_table(req: Request):
# body = req.body
# print(body)
# return ['success']
@app.route("/export_history", methods=['GET'])
def export_history():
filename = request.args.get('filename')
print(filename)
presigned = get_export_history_presigned_url(filename)
return {'url': presigned}
@app.route("/export_premium_table", methods=['GET'])
def export_premium_table():
package_name = request.args.get('package_name')
print(package_name)
presigned = get_premium_table_presigned_url(package_name)
return {'url': presigned}
@app.route("/export_disease_table", methods=['GET'])
def export_disease_table():
package_name = request.args.get('package_name')
print(package_name)
presigned = get_disease_table_presigned_url(package_name)
return {'url': presigned}
@app.route("/create_backup_history", methods=['POST'])
def create_backup_history():
csv_buffer = StringIO()
json_list = request.get_json()
print(json_list)
package_name = json_list['package_name']
doc_type = json_list['type']
username = json_list['username']
print(package_name, doc_type, username)
if doc_type == 'disease':
# output = None
# df = pd.DataFrame.from_dict([doc['_source'] for doc in output])
# df.to_csv(csv_buffer)
# resp = put_csv_file_into_s3(
# username, package_name, doc_type, 'premium-table', csv_buffer)
# return resp
return {'status': 501, 'message': 'Not implemented'}
elif doc_type == 'premium':
# output = None
# df = pd.DataFrame.from_dict([doc['_source'] for doc in output])
# df.to_csv(csv_buffer)
# resp = put_csv_file_into_s3(
# username, package_name, doc_type, 'premium-table', csv_buffer)
# return resp
return {'status': 501, 'message': 'Not Implemented'}
elif doc_type == 'faq':
output = get_faq_from_package(package_name, 10000, 0)
# print(output['hits']['hits'])
df = pd.DataFrame.from_dict([doc['_source'] for doc in output['hits']['hits']])
df.to_csv(csv_buffer)
resp = put_csv_file_into_s3(
username, package_name, doc_type, 'edit-history', csv_buffer)
return resp
elif doc_type == 'kb':
# output = get_kb_from_package(package_name, 10000, 0)
# df = pd.DataFrame.from_dict([doc['_source'] for doc in output])
# df.to_csv(csv_buffer)
# resp = put_csv_file_into_s3(
# username, package_name, doc_type, 'edit-history', csv_buffer)
return {'status': 501, 'message': 'Not implemented yet'}
else:
return {'status': 403, 'message': 'Forbidden'}
def get_faq_from_package(package_name, size, start_index):
data = {
"query": {
"bool": {
"must": [
{"match_phrase": {"package_type": package_name}}
]
}
},
"size": size,
"from": start_index,
"_source": {
"excludes": ["question_hub"]
}
}
headers = {'Content-type': 'application/json'}
url = ''
r = requests.post(url, data=json.dumps(data), headers=headers)
outputs = json.loads(r.text)
return outputs
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import common_functions as common
import argparse
import numpy as np
import math
def hist_ratio(json_file_path_list,
metric,
min_npe=None,
max_npe=None,
exclude_aborted=False,
aborted_only=False,
tel_id=None,
notebook=False,
delta_angle_degrees=0.2):
if len(json_file_path_list) != 2:
raise Exception('"json_file_path_list" should have exactly 2 elements')
if exclude_aborted and aborted_only:
raise Exception('"exclude-aborted" and "aborted-only" options are not compatible"')
# FETCH SCORE AND COMPUTE HISTOGRAM #######################################
data_list = []
label_list = []
hist_list = []
bins_list = []
for json_file_path in json_file_path_list:
if not notebook:
print("Parsing {}...".format(json_file_path))
json_dict = common.parse_json_file(json_file_path)
if tel_id is not None:
json_dict = common.image_filter_equals(json_dict, "tel_id", tel_id)
if min_npe is not None:
#json_dict = common.image_filter_range(json_dict, "npe", min_value=min_npe)
json_dict = common.image_filter_range(json_dict, "img_cleaned_sum_pe", min_value=min_npe)
if max_npe is not None:
#json_dict = common.image_filter_range(json_dict, "npe", max_value=max_npe)
json_dict = common.image_filter_range(json_dict, "img_cleaned_sum_pe", max_value=max_npe)
if not notebook:
print(len(json_dict["io"]), "images")
score_array = common.extract_score_array(json_dict, metric)
data_list.append(score_array)
label_list.append(json_dict["label"])
min_range, max_range = 0., math.sin(math.radians(delta_angle_degrees))
hist, bin_edges = np.histogram(score_array,
range=(min_range, max_range),
bins=1)
#bins=[0., math.sin(math.radians(0.2)), math.sin(math.radians(0.4)), math.sin(math.radians(0.6))] )
hist_list.append(hist)
bins_list.append(bin_edges)
# COMPUTE RATIO ###########################################################
#assert bins_list[0] == bins_list[1]
edges_of_bins = bins_list[0]
val_of_bins_data_1 = hist_list[0]
val_of_bins_data_2 = hist_list[1]
print(val_of_bins_data_1)
print(val_of_bins_data_2)
# Set ratio where val_of_bins_data_2 is not zero
ratio = np.divide(val_of_bins_data_1,
val_of_bins_data_2,
where=(val_of_bins_data_2 != 0))
# Compute error on ratio (null if cannot be computed)
error = np.divide(val_of_bins_data_1 * np.sqrt(val_of_bins_data_2) + val_of_bins_data_2 * np.sqrt(val_of_bins_data_1),
np.power(val_of_bins_data_2, 2),
where=(val_of_bins_data_2 != 0))
return ratio, error, edges_of_bins
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("--exclude-aborted", action="store_true", default=False,
help="Ignore values from aborted images")
parser.add_argument("--aborted-only", action="store_true", default=False,
help="Only consider aborted images")
parser.add_argument("--angle", type=float, default=None, metavar="FLOAT",
help="Delta angle (in degrees) of the first bin")
parser.add_argument("--min-npe", type=float, default=None, metavar="FLOAT",
help="Only considere images having more than the specified total number of photo electrons")
parser.add_argument("--max-npe", type=float, default=None, metavar="FLOAT",
help="Only considere images having less than the specified total number of photo electrons")
parser.add_argument("--metric", "-m", required=True,
metavar="STRING",
help="The metric name to plot")
parser.add_argument("--telid", type=int, default=None,
metavar="INTEGER",
help="Only plot results for this telescope")
parser.add_argument("--notebook", action="store_true",
help="Notebook mode")
parser.add_argument("fileargs", nargs=2, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
exclude_aborted = args.exclude_aborted
aborted_only = args.aborted_only
angle = args.angle
min_npe = args.min_npe
max_npe = args.max_npe
metric = args.metric
tel_id = args.telid
notebook = args.notebook
json_file_path_list = args.fileargs
ratio, error, bins = hist_ratio(json_file_path_list,
metric,
min_npe=min_npe,
max_npe=max_npe,
exclude_aborted=exclude_aborted,
aborted_only=aborted_only,
tel_id=tel_id,
notebook=notebook,
delta_angle_degrees=angle)
print("ratio:", ratio)
print("error:", error)
print("bins:", bins)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.